mirror of
https://github.com/openziti/zrok.git
synced 2025-06-21 10:17:51 +02:00
Merge branch 'main' of github.com:openziti/zrok into password-reset-fix
This commit is contained in:
commit
ec21146b4e
1
.gitattributes
vendored
Normal file
1
.gitattributes
vendored
Normal file
@ -0,0 +1 @@
|
|||||||
|
* text=auto eol=lf
|
2
.gitignore
vendored
2
.gitignore
vendored
@ -28,6 +28,8 @@ go.work
|
|||||||
go.work.sum
|
go.work.sum
|
||||||
zrok-venv
|
zrok-venv
|
||||||
|
|
||||||
|
# cache used by local dev cross-build script
|
||||||
|
/.npm
|
||||||
npm-debug.log*
|
npm-debug.log*
|
||||||
yarn-debug.log*
|
yarn-debug.log*
|
||||||
yarn-error.log*
|
yarn-error.log*
|
||||||
|
92
ACKNOWLEDGEMENTS.md
Normal file
92
ACKNOWLEDGEMENTS.md
Normal file
@ -0,0 +1,92 @@
|
|||||||
|
# ACKNOWLEDGEMENTS
|
||||||
|
|
||||||
|
## github.com/openziti/zrok/endpoints/socks
|
||||||
|
|
||||||
|
Portions of the `socks` package is based on code from `https://github.com/tailscale/tailscale/blob/v1.58.2/net/socks5/socks5.go`, which included the following license:
|
||||||
|
|
||||||
|
> BSD 3-Clause License
|
||||||
|
>
|
||||||
|
> Copyright (c) 2020 Tailscale Inc & AUTHORS.
|
||||||
|
>
|
||||||
|
> Redistribution and use in source and binary forms, with or without
|
||||||
|
> modification, are permitted provided that the following conditions are met:
|
||||||
|
>
|
||||||
|
> 1. Redistributions of source code must retain the above copyright notice, this
|
||||||
|
> list of conditions and the following disclaimer.
|
||||||
|
>
|
||||||
|
> 2. Redistributions in binary form must reproduce the above copyright notice,
|
||||||
|
> this list of conditions and the following disclaimer in the documentation
|
||||||
|
> and/or other materials provided with the distribution.
|
||||||
|
>
|
||||||
|
> 3. Neither the name of the copyright holder nor the names of its
|
||||||
|
> contributors may be used to endorse or promote products derived from
|
||||||
|
> this software without specific prior written permission.
|
||||||
|
>
|
||||||
|
> THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
|
||||||
|
> AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
||||||
|
> IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
|
||||||
|
> DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
|
||||||
|
> FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
||||||
|
> DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
|
||||||
|
> SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
|
||||||
|
> CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
|
||||||
|
> OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||||
|
> OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||||
|
|
||||||
|
## github.com/openziti/zrok/drives/davServer
|
||||||
|
|
||||||
|
The `davServer` package is based on code from `https://cs.opensource.google/go/go/`, which included the following license:
|
||||||
|
|
||||||
|
> Copyright (c) 2009 The Go Authors. All rights reserved.
|
||||||
|
>
|
||||||
|
> Redistribution and use in source and binary forms, with or without
|
||||||
|
> modification, are permitted provided that the following conditions are
|
||||||
|
> met:
|
||||||
|
>
|
||||||
|
> * Redistributions of source code must retain the above copyright
|
||||||
|
> notice, this list of conditions and the following disclaimer.
|
||||||
|
> * Redistributions in binary form must reproduce the above
|
||||||
|
> copyright notice, this list of conditions and the following disclaimer
|
||||||
|
> in the documentation and/or other materials provided with the
|
||||||
|
> distribution.
|
||||||
|
> * Neither the name of Google Inc. nor the names of its
|
||||||
|
> contributors may be used to endorse or promote products derived from
|
||||||
|
> this software without specific prior written permission.
|
||||||
|
>
|
||||||
|
> THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||||
|
> "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||||
|
> LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||||
|
> A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||||
|
> OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||||
|
> SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||||
|
> LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||||
|
> DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||||
|
> THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||||
|
> (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||||
|
> OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE
|
||||||
|
|
||||||
|
## github.com/openziti/zrok/drives/davClient
|
||||||
|
|
||||||
|
The `davClient` package is based on code from `github.com/emersion/go-webdav`, which included the following license:
|
||||||
|
|
||||||
|
> The MIT License (MIT)
|
||||||
|
>
|
||||||
|
> Copyright (c) 2020 Simon Ser
|
||||||
|
>
|
||||||
|
> Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||||
|
> of this software and associated documentation files (the "Software"), to deal
|
||||||
|
> in the Software without restriction, including without limitation the rights
|
||||||
|
> to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||||
|
> copies of the Software, and to permit persons to whom the Software is
|
||||||
|
> furnished to do so, subject to the following conditions:
|
||||||
|
>
|
||||||
|
> The above copyright notice and this permission notice shall be included in all
|
||||||
|
> copies or substantial portions of the Software.
|
||||||
|
>
|
||||||
|
> THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||||
|
> IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||||
|
> FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||||
|
> AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||||
|
> LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||||
|
> OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||||
|
> SOFTWARE.
|
28
CHANGELOG.md
28
CHANGELOG.md
@ -1,11 +1,29 @@
|
|||||||
# CHANGELOG
|
# CHANGELOG
|
||||||
|
|
||||||
## v0.4.24
|
## v0.4.25
|
||||||
|
|
||||||
FIX: Updated password reset to handle multiple reset requests.
|
FIX: Updated password reset to handle multiple reset requests.
|
||||||
|
|
||||||
|
## v0.4.24
|
||||||
|
|
||||||
|
FEATURE: New `socks` backend mode for use with private sharing. Use `zrok share private --backend-mode socks` and then `zrok access private` that share from somewhere else... very lightweight VPN-like functionality (https://github.com/openziti/zrok/issues/558)
|
||||||
|
|
||||||
|
FEATURE: New `zrok admin create account` command that allows populating accounts directly into the underlying controller database (https://github.com/openziti/zrok/issues/551)
|
||||||
|
|
||||||
|
CHANGE: The `zrok test loopback public` utility to report non-`200` errors and also ensure that the listening side of the test is fully established before starting loopback testing.
|
||||||
|
|
||||||
|
CHANGE: The OpenZiti SDK for golang (https://github.com/openziti/sdk-golang) has been updated to version `v0.22.28`
|
||||||
|
|
||||||
## v0.4.23
|
## v0.4.23
|
||||||
|
|
||||||
|
FEATURE: New CLI commands have been implemented for working with the `drive` share backend mode (part of the "zrok Drives" functionality). These commands include `zrok cp`, `zrok mkdir` `zrok mv`, `zrok ls`, and `zrok rm`. These are initial, minimal versions of these commands and very likely contain bugs and ergonomic annoyances. There is a guide available at (`docs/guides/drives/cli.md`) that explains how to work with these tools in detail (https://github.com/openziti/zrok/issues/438)
|
||||||
|
|
||||||
|
FEATURE: Python SDK now has a decorator for integrating with various server side frameworks. See the `http-server` example.
|
||||||
|
|
||||||
|
FEATURE: Python SDK share and access handling now supports context management.
|
||||||
|
|
||||||
|
FEATURE: TLS for `zrok` controller and frontends. Add the `tls:` stanza to your controller configuration (see `etc/ctrl.yml`) to enable TLS support for the controller API. Add the `tls:` stanza to your frontend configuration (see `etc/frontend.yml`) to enable TLS support for frontends (be sure to check your `public` frontend template) (#24)(https://github.com/openziti/zrok/issues/24)
|
||||||
|
|
||||||
CHANGE: Improved OpenZiti resource cleanup resilience. Previous resource cleanup would stop when an error was encountered at any stage of the cleanup process (serps, sps, config, service). New cleanup implementation logs errors but continues to clean up anything that it can (https://github.com/openziti/zrok/issues/533)
|
CHANGE: Improved OpenZiti resource cleanup resilience. Previous resource cleanup would stop when an error was encountered at any stage of the cleanup process (serps, sps, config, service). New cleanup implementation logs errors but continues to clean up anything that it can (https://github.com/openziti/zrok/issues/533)
|
||||||
|
|
||||||
CHANGE: Instead of setting the `ListenOptions.MaxConnections` property to `64`, use the default value of `3`. This property actually controls the number of terminators created on the underlying OpenZiti network. This property is actually getting renamed to `ListenOptions.MaxTerminators` in an upcoming release of `github.com/openziti/sdk-golang` (https://github.com/openziti/zrok/issues/535)
|
CHANGE: Instead of setting the `ListenOptions.MaxConnections` property to `64`, use the default value of `3`. This property actually controls the number of terminators created on the underlying OpenZiti network. This property is actually getting renamed to `ListenOptions.MaxTerminators` in an upcoming release of `github.com/openziti/sdk-golang` (https://github.com/openziti/zrok/issues/535)
|
||||||
@ -14,14 +32,6 @@ CHANGE: Versioning for the Python SDK has been updated to use versioneer for man
|
|||||||
|
|
||||||
CHANGE: Python SDK package name has been renamed to `zrok`, dropping the `-sdk` postfix. [pypi](https://pypi.org/project/zrok).
|
CHANGE: Python SDK package name has been renamed to `zrok`, dropping the `-sdk` postfix. [pypi](https://pypi.org/project/zrok).
|
||||||
|
|
||||||
FEATURE: Python SDK now has a decorator for integrating with various server side frameworks. See the `http-server` example.
|
|
||||||
|
|
||||||
FEATURE: Python SDK share and access handling now supports context management.
|
|
||||||
|
|
||||||
FEATURE: TLS for `zrok` controller and acces endpoints. Add the specified stanza to your controller file (see `etc/ctrl.yml`). Your controller will now listen over TLS. (Note: you will need to update your client environments/configs to use the new https:// url). Likewise with `access` add the stanza to your frontend configuration (see `etc/frontend.yml`). Additionally you will have to update the frontend url template to emit a https:// scheme.
|
|
||||||
|
|
||||||
FEATURE: TLS for `zrok` controller and frontends. Add the `tls:` stanza to your controller configuration (see `etc/ctrl.yml`) to enable TLS support for the controller API. Add the `tls:` stanza to your frontend configuration (see `etc/frontend.yml`) to enable TLS support for frontends (be sure to check your `public` frontend template) (#24)(https://github.com/openziti/zrok/issues/24)
|
|
||||||
|
|
||||||
## v0.4.22
|
## v0.4.22
|
||||||
|
|
||||||
FIX: The goreleaser action is not updated to work with the latest golang build. Modifed `go.mod` to comply with what goreleaser expects
|
FIX: The goreleaser action is not updated to work with the latest golang build. Modifed `go.mod` to comply with what goreleaser expects
|
||||||
|
@ -143,6 +143,28 @@ func (cmd *accessPrivateCommand) run(_ *cobra.Command, args []string) {
|
|||||||
}
|
}
|
||||||
}()
|
}()
|
||||||
|
|
||||||
|
case "socks":
|
||||||
|
fe, err := tcpTunnel.NewFrontend(&tcpTunnel.FrontendConfig{
|
||||||
|
BindAddress: cmd.bindAddress,
|
||||||
|
IdentityName: env.EnvironmentIdentityName(),
|
||||||
|
ShrToken: args[0],
|
||||||
|
RequestsChan: requests,
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
if !panicInstead {
|
||||||
|
tui.Error("unable to create private access", err)
|
||||||
|
}
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
go func() {
|
||||||
|
if err := fe.Run(); err != nil {
|
||||||
|
if !panicInstead {
|
||||||
|
tui.Error("error starting access", err)
|
||||||
|
}
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
default:
|
default:
|
||||||
cfg := proxy.DefaultFrontendConfig(env.EnvironmentIdentityName())
|
cfg := proxy.DefaultFrontendConfig(env.EnvironmentIdentityName())
|
||||||
cfg.ShrToken = shrToken
|
cfg.ShrToken = shrToken
|
||||||
|
66
cmd/zrok/adminCreateAccount.go
Normal file
66
cmd/zrok/adminCreateAccount.go
Normal file
@ -0,0 +1,66 @@
|
|||||||
|
package main
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"github.com/openziti/zrok/controller"
|
||||||
|
"github.com/openziti/zrok/controller/config"
|
||||||
|
"github.com/openziti/zrok/controller/store"
|
||||||
|
"github.com/spf13/cobra"
|
||||||
|
)
|
||||||
|
|
||||||
|
func init() {
|
||||||
|
adminCreateCmd.AddCommand(newAdminCreateAccount().cmd)
|
||||||
|
}
|
||||||
|
|
||||||
|
type adminCreateAccount struct {
|
||||||
|
cmd *cobra.Command
|
||||||
|
}
|
||||||
|
|
||||||
|
func newAdminCreateAccount() *adminCreateAccount {
|
||||||
|
cmd := &cobra.Command{
|
||||||
|
Use: "account <configPath}> <email> <password>",
|
||||||
|
Short: "Pre-populate an account in the database; returns an enable token for the account",
|
||||||
|
Args: cobra.ExactArgs(3),
|
||||||
|
}
|
||||||
|
command := &adminCreateAccount{cmd: cmd}
|
||||||
|
cmd.Run = command.run
|
||||||
|
return command
|
||||||
|
}
|
||||||
|
|
||||||
|
func (cmd *adminCreateAccount) run(_ *cobra.Command, args []string) {
|
||||||
|
cfg, err := config.LoadConfig(args[0])
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
str, err := store.Open(cfg.Store)
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
token, err := controller.CreateToken()
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
hpwd, err := controller.HashPassword(args[2])
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
trx, err := str.Begin()
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
defer func() {
|
||||||
|
if err := trx.Commit(); err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
a := &store.Account{
|
||||||
|
Email: args[1],
|
||||||
|
Salt: hpwd.Salt,
|
||||||
|
Password: hpwd.Password,
|
||||||
|
Token: token,
|
||||||
|
}
|
||||||
|
if _, err := str.CreateAccount(a, trx); err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
fmt.Println(token)
|
||||||
|
}
|
106
cmd/zrok/copy.go
Normal file
106
cmd/zrok/copy.go
Normal file
@ -0,0 +1,106 @@
|
|||||||
|
package main
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"github.com/openziti/zrok/drives/sync"
|
||||||
|
"github.com/openziti/zrok/environment"
|
||||||
|
"github.com/openziti/zrok/sdk/golang/sdk"
|
||||||
|
"github.com/openziti/zrok/tui"
|
||||||
|
"github.com/spf13/cobra"
|
||||||
|
"net/url"
|
||||||
|
"os"
|
||||||
|
)
|
||||||
|
|
||||||
|
func init() {
|
||||||
|
rootCmd.AddCommand(newCopyCommand().cmd)
|
||||||
|
}
|
||||||
|
|
||||||
|
type copyCommand struct {
|
||||||
|
cmd *cobra.Command
|
||||||
|
sync bool
|
||||||
|
basicAuth string
|
||||||
|
}
|
||||||
|
|
||||||
|
func newCopyCommand() *copyCommand {
|
||||||
|
cmd := &cobra.Command{
|
||||||
|
Use: "copy <source> [<target>] (<target> defaults to 'file://.`)",
|
||||||
|
Short: "Copy (unidirectional sync) zrok drive contents from <source> to <target> ('http://', 'file://', and 'zrok://' supported)",
|
||||||
|
Aliases: []string{"cp"},
|
||||||
|
Args: cobra.RangeArgs(1, 2),
|
||||||
|
}
|
||||||
|
command := ©Command{cmd: cmd}
|
||||||
|
cmd.Run = command.run
|
||||||
|
cmd.Flags().BoolVarP(&command.sync, "sync", "s", false, "Only copy modified files (one-way synchronize)")
|
||||||
|
cmd.Flags().StringVarP(&command.basicAuth, "basic-auth", "a", "", "Basic authentication <username:password>")
|
||||||
|
return command
|
||||||
|
}
|
||||||
|
|
||||||
|
func (cmd *copyCommand) run(_ *cobra.Command, args []string) {
|
||||||
|
if cmd.basicAuth == "" {
|
||||||
|
cmd.basicAuth = os.Getenv("ZROK_DRIVES_BASIC_AUTH")
|
||||||
|
}
|
||||||
|
|
||||||
|
sourceUrl, err := url.Parse(args[0])
|
||||||
|
if err != nil {
|
||||||
|
tui.Error(fmt.Sprintf("invalid source '%v'", args[0]), err)
|
||||||
|
}
|
||||||
|
if sourceUrl.Scheme == "" {
|
||||||
|
sourceUrl.Scheme = "file"
|
||||||
|
}
|
||||||
|
|
||||||
|
targetStr := "."
|
||||||
|
if len(args) == 2 {
|
||||||
|
targetStr = args[1]
|
||||||
|
}
|
||||||
|
targetUrl, err := url.Parse(targetStr)
|
||||||
|
if err != nil {
|
||||||
|
tui.Error(fmt.Sprintf("invalid target '%v'", targetStr), err)
|
||||||
|
}
|
||||||
|
if targetUrl.Scheme == "" {
|
||||||
|
targetUrl.Scheme = "file"
|
||||||
|
}
|
||||||
|
|
||||||
|
root, err := environment.LoadRoot()
|
||||||
|
if err != nil {
|
||||||
|
tui.Error("error loading root", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
var allocatedAccesses []*sdk.Access
|
||||||
|
if sourceUrl.Scheme == "zrok" {
|
||||||
|
access, err := sdk.CreateAccess(root, &sdk.AccessRequest{ShareToken: sourceUrl.Host})
|
||||||
|
if err != nil {
|
||||||
|
tui.Error("error creating access", err)
|
||||||
|
}
|
||||||
|
allocatedAccesses = append(allocatedAccesses, access)
|
||||||
|
}
|
||||||
|
if targetUrl.Scheme == "zrok" {
|
||||||
|
access, err := sdk.CreateAccess(root, &sdk.AccessRequest{ShareToken: targetUrl.Host})
|
||||||
|
if err != nil {
|
||||||
|
tui.Error("error creating access", err)
|
||||||
|
}
|
||||||
|
allocatedAccesses = append(allocatedAccesses, access)
|
||||||
|
}
|
||||||
|
defer func() {
|
||||||
|
for _, access := range allocatedAccesses {
|
||||||
|
err := sdk.DeleteAccess(root, access)
|
||||||
|
if err != nil {
|
||||||
|
tui.Warning("error deleting target access", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
|
source, err := sync.TargetForURL(sourceUrl, root, cmd.basicAuth)
|
||||||
|
if err != nil {
|
||||||
|
tui.Error(fmt.Sprintf("error creating target for '%v'", sourceUrl), err)
|
||||||
|
}
|
||||||
|
target, err := sync.TargetForURL(targetUrl, root, cmd.basicAuth)
|
||||||
|
if err != nil {
|
||||||
|
tui.Error(fmt.Sprintf("error creating target for '%v'", targetUrl), err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := sync.OneWay(source, target, cmd.sync); err != nil {
|
||||||
|
tui.Error("error copying", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
fmt.Println("copy complete!")
|
||||||
|
}
|
95
cmd/zrok/ls.go
Normal file
95
cmd/zrok/ls.go
Normal file
@ -0,0 +1,95 @@
|
|||||||
|
package main
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"github.com/jedib0t/go-pretty/v6/table"
|
||||||
|
"github.com/openziti/zrok/drives/sync"
|
||||||
|
"github.com/openziti/zrok/environment"
|
||||||
|
"github.com/openziti/zrok/sdk/golang/sdk"
|
||||||
|
"github.com/openziti/zrok/tui"
|
||||||
|
"github.com/openziti/zrok/util"
|
||||||
|
"github.com/sirupsen/logrus"
|
||||||
|
"github.com/spf13/cobra"
|
||||||
|
"net/url"
|
||||||
|
"os"
|
||||||
|
"sort"
|
||||||
|
)
|
||||||
|
|
||||||
|
func init() {
|
||||||
|
rootCmd.AddCommand(newLsCommand().cmd)
|
||||||
|
}
|
||||||
|
|
||||||
|
type lsCommand struct {
|
||||||
|
cmd *cobra.Command
|
||||||
|
basicAuth string
|
||||||
|
}
|
||||||
|
|
||||||
|
func newLsCommand() *lsCommand {
|
||||||
|
cmd := &cobra.Command{
|
||||||
|
Use: "ls <target>",
|
||||||
|
Short: "List the contents of drive <target> ('http://', 'zrok://','file://')",
|
||||||
|
Aliases: []string{"dir"},
|
||||||
|
Args: cobra.ExactArgs(1),
|
||||||
|
}
|
||||||
|
command := &lsCommand{cmd: cmd}
|
||||||
|
cmd.Run = command.run
|
||||||
|
cmd.Flags().StringVarP(&command.basicAuth, "basic-auth", "a", "", "Basic authentication <username:password>")
|
||||||
|
return command
|
||||||
|
}
|
||||||
|
|
||||||
|
func (cmd *lsCommand) run(_ *cobra.Command, args []string) {
|
||||||
|
if cmd.basicAuth == "" {
|
||||||
|
cmd.basicAuth = os.Getenv("ZROK_DRIVES_BASIC_AUTH")
|
||||||
|
}
|
||||||
|
|
||||||
|
targetUrl, err := url.Parse(args[0])
|
||||||
|
if err != nil {
|
||||||
|
tui.Error(fmt.Sprintf("invalid target '%v'", args[0]), err)
|
||||||
|
}
|
||||||
|
if targetUrl.Scheme == "" {
|
||||||
|
targetUrl.Scheme = "file"
|
||||||
|
}
|
||||||
|
|
||||||
|
root, err := environment.LoadRoot()
|
||||||
|
if err != nil {
|
||||||
|
tui.Error("error loading root", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if targetUrl.Scheme == "zrok" {
|
||||||
|
access, err := sdk.CreateAccess(root, &sdk.AccessRequest{ShareToken: targetUrl.Host})
|
||||||
|
if err != nil {
|
||||||
|
tui.Error("error creating access", err)
|
||||||
|
}
|
||||||
|
defer func() {
|
||||||
|
if err := sdk.DeleteAccess(root, access); err != nil {
|
||||||
|
logrus.Warningf("error freeing access: %v", err)
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
}
|
||||||
|
|
||||||
|
target, err := sync.TargetForURL(targetUrl, root, cmd.basicAuth)
|
||||||
|
if err != nil {
|
||||||
|
tui.Error(fmt.Sprintf("error creating target for '%v'", targetUrl), err)
|
||||||
|
}
|
||||||
|
|
||||||
|
objects, err := target.Dir("/")
|
||||||
|
if err != nil {
|
||||||
|
tui.Error("error listing directory", err)
|
||||||
|
}
|
||||||
|
sort.Slice(objects, func(i, j int) bool {
|
||||||
|
return objects[i].Path < objects[j].Path
|
||||||
|
})
|
||||||
|
|
||||||
|
tw := table.NewWriter()
|
||||||
|
tw.SetOutputMirror(os.Stdout)
|
||||||
|
tw.SetStyle(table.StyleLight)
|
||||||
|
tw.AppendHeader(table.Row{"type", "Name", "Size", "Modified"})
|
||||||
|
for _, object := range objects {
|
||||||
|
if object.IsDir {
|
||||||
|
tw.AppendRow(table.Row{"DIR", object.Path, "", ""})
|
||||||
|
} else {
|
||||||
|
tw.AppendRow(table.Row{"", object.Path, util.BytesToSize(object.Size), object.Modified.Local()})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
tw.Render()
|
||||||
|
}
|
75
cmd/zrok/md.go
Normal file
75
cmd/zrok/md.go
Normal file
@ -0,0 +1,75 @@
|
|||||||
|
package main
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"github.com/openziti/zrok/drives/sync"
|
||||||
|
"github.com/openziti/zrok/environment"
|
||||||
|
"github.com/openziti/zrok/sdk/golang/sdk"
|
||||||
|
"github.com/openziti/zrok/tui"
|
||||||
|
"github.com/sirupsen/logrus"
|
||||||
|
"github.com/spf13/cobra"
|
||||||
|
"net/url"
|
||||||
|
"os"
|
||||||
|
)
|
||||||
|
|
||||||
|
func init() {
|
||||||
|
rootCmd.AddCommand(newMdCommand().cmd)
|
||||||
|
}
|
||||||
|
|
||||||
|
type mdCommand struct {
|
||||||
|
cmd *cobra.Command
|
||||||
|
basicAuth string
|
||||||
|
}
|
||||||
|
|
||||||
|
func newMdCommand() *mdCommand {
|
||||||
|
cmd := &cobra.Command{
|
||||||
|
Use: "md <target>",
|
||||||
|
Short: "Make directory at <target> ('http://', 'zrok://', 'file://')",
|
||||||
|
Aliases: []string{"mkdir"},
|
||||||
|
Args: cobra.ExactArgs(1),
|
||||||
|
}
|
||||||
|
command := &mdCommand{cmd: cmd}
|
||||||
|
cmd.Run = command.run
|
||||||
|
cmd.Flags().StringVarP(&command.basicAuth, "basic-auth", "a", "", "Basic authentication <username:password>")
|
||||||
|
return command
|
||||||
|
}
|
||||||
|
|
||||||
|
func (cmd *mdCommand) run(_ *cobra.Command, args []string) {
|
||||||
|
if cmd.basicAuth == "" {
|
||||||
|
cmd.basicAuth = os.Getenv("ZROK_DRIVES_BASIC_AUTH")
|
||||||
|
}
|
||||||
|
|
||||||
|
targetUrl, err := url.Parse(args[0])
|
||||||
|
if err != nil {
|
||||||
|
tui.Error(fmt.Sprintf("invalid target '%v'", args[0]), err)
|
||||||
|
}
|
||||||
|
if targetUrl.Scheme == "" {
|
||||||
|
targetUrl.Scheme = "file"
|
||||||
|
}
|
||||||
|
|
||||||
|
root, err := environment.LoadRoot()
|
||||||
|
if err != nil {
|
||||||
|
tui.Error("error loading root", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if targetUrl.Scheme == "zrok" {
|
||||||
|
access, err := sdk.CreateAccess(root, &sdk.AccessRequest{ShareToken: targetUrl.Host})
|
||||||
|
if err != nil {
|
||||||
|
tui.Error("error creating access", err)
|
||||||
|
}
|
||||||
|
defer func() {
|
||||||
|
if err := sdk.DeleteAccess(root, access); err != nil {
|
||||||
|
logrus.Warningf("error freeing access: %v", err)
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
}
|
||||||
|
|
||||||
|
target, err := sync.TargetForURL(targetUrl, root, cmd.basicAuth)
|
||||||
|
if err != nil {
|
||||||
|
tui.Error(fmt.Sprintf("error creating target for '%v'", targetUrl), err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := target.Mkdir("/"); err != nil {
|
||||||
|
tui.Error("error creating directory", err)
|
||||||
|
}
|
||||||
|
}
|
75
cmd/zrok/mv.go
Normal file
75
cmd/zrok/mv.go
Normal file
@ -0,0 +1,75 @@
|
|||||||
|
package main
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"github.com/openziti/zrok/drives/sync"
|
||||||
|
"github.com/openziti/zrok/environment"
|
||||||
|
"github.com/openziti/zrok/sdk/golang/sdk"
|
||||||
|
"github.com/openziti/zrok/tui"
|
||||||
|
"github.com/sirupsen/logrus"
|
||||||
|
"github.com/spf13/cobra"
|
||||||
|
"net/url"
|
||||||
|
"os"
|
||||||
|
)
|
||||||
|
|
||||||
|
func init() {
|
||||||
|
rootCmd.AddCommand(newMvCommand().cmd)
|
||||||
|
}
|
||||||
|
|
||||||
|
type mvCommand struct {
|
||||||
|
cmd *cobra.Command
|
||||||
|
basicAuth string
|
||||||
|
}
|
||||||
|
|
||||||
|
func newMvCommand() *mvCommand {
|
||||||
|
cmd := &cobra.Command{
|
||||||
|
Use: "mv <target> <newPath>",
|
||||||
|
Short: "Move the drive <target> to <newPath> ('http://', 'zrok://', 'file://')",
|
||||||
|
Aliases: []string{"move"},
|
||||||
|
Args: cobra.ExactArgs(2),
|
||||||
|
}
|
||||||
|
command := &mvCommand{cmd: cmd}
|
||||||
|
cmd.Run = command.run
|
||||||
|
cmd.Flags().StringVarP(&command.basicAuth, "basic-auth", "a", "", "Basic authentication <username:password>")
|
||||||
|
return command
|
||||||
|
}
|
||||||
|
|
||||||
|
func (cmd *mvCommand) run(_ *cobra.Command, args []string) {
|
||||||
|
if cmd.basicAuth == "" {
|
||||||
|
cmd.basicAuth = os.Getenv("ZROK_DRIVES_BASIC_AUTH")
|
||||||
|
}
|
||||||
|
|
||||||
|
targetUrl, err := url.Parse(args[0])
|
||||||
|
if err != nil {
|
||||||
|
tui.Error(fmt.Sprintf("invalid target '%v'", args[0]), err)
|
||||||
|
}
|
||||||
|
if targetUrl.Scheme == "" {
|
||||||
|
targetUrl.Scheme = "file"
|
||||||
|
}
|
||||||
|
|
||||||
|
root, err := environment.LoadRoot()
|
||||||
|
if err != nil {
|
||||||
|
tui.Error("error loading root", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if targetUrl.Scheme == "zrok" {
|
||||||
|
access, err := sdk.CreateAccess(root, &sdk.AccessRequest{ShareToken: targetUrl.Host})
|
||||||
|
if err != nil {
|
||||||
|
tui.Error("error creating access", err)
|
||||||
|
}
|
||||||
|
defer func() {
|
||||||
|
if err := sdk.DeleteAccess(root, access); err != nil {
|
||||||
|
logrus.Warningf("error freeing access: %v", err)
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
}
|
||||||
|
|
||||||
|
target, err := sync.TargetForURL(targetUrl, root, cmd.basicAuth)
|
||||||
|
if err != nil {
|
||||||
|
tui.Error(fmt.Sprintf("error creating target for '%v'", targetUrl), err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := target.Move("/", args[1]); err != nil {
|
||||||
|
tui.Error("error moving", err)
|
||||||
|
}
|
||||||
|
}
|
@ -31,14 +31,14 @@ type reserveCommand struct {
|
|||||||
|
|
||||||
func newReserveCommand() *reserveCommand {
|
func newReserveCommand() *reserveCommand {
|
||||||
cmd := &cobra.Command{
|
cmd := &cobra.Command{
|
||||||
Use: "reserve <public|private> <target>",
|
Use: "reserve <public|private> [<target>]",
|
||||||
Short: "Create a reserved share",
|
Short: "Create a reserved share",
|
||||||
Args: cobra.ExactArgs(2),
|
Args: cobra.RangeArgs(1, 2),
|
||||||
}
|
}
|
||||||
command := &reserveCommand{cmd: cmd}
|
command := &reserveCommand{cmd: cmd}
|
||||||
cmd.Flags().StringVarP(&command.uniqueName, "unique-name", "n", "", "A unique name for the reserved share (defaults to generated identifier)")
|
cmd.Flags().StringVarP(&command.uniqueName, "unique-name", "n", "", "A unique name for the reserved share (defaults to generated identifier)")
|
||||||
cmd.Flags().StringArrayVar(&command.frontendSelection, "frontends", []string{"public"}, "Selected frontends to use for the share")
|
cmd.Flags().StringArrayVar(&command.frontendSelection, "frontends", []string{"public"}, "Selected frontends to use for the share")
|
||||||
cmd.Flags().StringVarP(&command.backendMode, "backend-mode", "b", "proxy", "The backend mode (public|private: proxy, web, caddy, drive) (private: tcpTunnel, udpTunnel)")
|
cmd.Flags().StringVarP(&command.backendMode, "backend-mode", "b", "proxy", "The backend mode (public|private: proxy, web, caddy, drive) (private: tcpTunnel, udpTunnel, socks)")
|
||||||
cmd.Flags().BoolVarP(&command.jsonOutput, "json-output", "j", false, "Emit JSON describing the created reserved share")
|
cmd.Flags().BoolVarP(&command.jsonOutput, "json-output", "j", false, "Emit JSON describing the created reserved share")
|
||||||
cmd.Flags().StringArrayVar(&command.basicAuth, "basic-auth", []string{}, "Basic authentication users (<username:password>,...)")
|
cmd.Flags().StringArrayVar(&command.basicAuth, "basic-auth", []string{}, "Basic authentication users (<username:password>,...)")
|
||||||
cmd.Flags().StringVar(&command.oauthProvider, "oauth-provider", "", "Enable OAuth provider [google, github]")
|
cmd.Flags().StringVar(&command.oauthProvider, "oauth-provider", "", "Enable OAuth provider [google, github]")
|
||||||
@ -52,7 +52,7 @@ func newReserveCommand() *reserveCommand {
|
|||||||
|
|
||||||
func (cmd *reserveCommand) run(_ *cobra.Command, args []string) {
|
func (cmd *reserveCommand) run(_ *cobra.Command, args []string) {
|
||||||
shareMode := sdk.ShareMode(args[0])
|
shareMode := sdk.ShareMode(args[0])
|
||||||
privateOnlyModes := []string{"tcpTunnel", "udpTunnel"}
|
privateOnlyModes := []string{"tcpTunnel", "udpTunnel", "socks"}
|
||||||
if shareMode != sdk.PublicShareMode && shareMode != sdk.PrivateShareMode {
|
if shareMode != sdk.PublicShareMode && shareMode != sdk.PrivateShareMode {
|
||||||
tui.Error("invalid sharing mode; expecting 'public' or 'private'", nil)
|
tui.Error("invalid sharing mode; expecting 'public' or 'private'", nil)
|
||||||
} else if shareMode == sdk.PublicShareMode && slices.Contains(privateOnlyModes, cmd.backendMode) {
|
} else if shareMode == sdk.PublicShareMode && slices.Contains(privateOnlyModes, cmd.backendMode) {
|
||||||
@ -66,6 +66,9 @@ func (cmd *reserveCommand) run(_ *cobra.Command, args []string) {
|
|||||||
var target string
|
var target string
|
||||||
switch cmd.backendMode {
|
switch cmd.backendMode {
|
||||||
case "proxy":
|
case "proxy":
|
||||||
|
if len(args) != 2 {
|
||||||
|
tui.Error("the 'proxy' backend mode expects a <target>", nil)
|
||||||
|
}
|
||||||
v, err := parseUrl(args[1])
|
v, err := parseUrl(args[1])
|
||||||
if err != nil {
|
if err != nil {
|
||||||
tui.Error("invalid target endpoint URL", err)
|
tui.Error("invalid target endpoint URL", err)
|
||||||
@ -73,22 +76,42 @@ func (cmd *reserveCommand) run(_ *cobra.Command, args []string) {
|
|||||||
target = v
|
target = v
|
||||||
|
|
||||||
case "web":
|
case "web":
|
||||||
|
if len(args) != 2 {
|
||||||
|
tui.Error("the 'web' backend mode expects a <target>", nil)
|
||||||
|
}
|
||||||
target = args[1]
|
target = args[1]
|
||||||
|
|
||||||
case "tcpTunnel":
|
case "tcpTunnel":
|
||||||
|
if len(args) != 2 {
|
||||||
|
tui.Error("the 'tcpTunnel' backend mode expects a <target>", nil)
|
||||||
|
}
|
||||||
target = args[1]
|
target = args[1]
|
||||||
|
|
||||||
case "udpTunnel":
|
case "udpTunnel":
|
||||||
|
if len(args) != 2 {
|
||||||
|
tui.Error("the 'udpTunnel' backend mode expects a <target>", nil)
|
||||||
|
}
|
||||||
target = args[1]
|
target = args[1]
|
||||||
|
|
||||||
case "caddy":
|
case "caddy":
|
||||||
|
if len(args) != 2 {
|
||||||
|
tui.Error("the 'caddy' backend mode expects a <target>", nil)
|
||||||
|
}
|
||||||
target = args[1]
|
target = args[1]
|
||||||
|
|
||||||
case "drive":
|
case "drive":
|
||||||
|
if len(args) != 2 {
|
||||||
|
tui.Error("the 'drive' backend mode expects a <target>", nil)
|
||||||
|
}
|
||||||
target = args[1]
|
target = args[1]
|
||||||
|
|
||||||
|
case "socks":
|
||||||
|
if len(args) != 1 {
|
||||||
|
tui.Error("the 'socks' backend mode does not expect <target>", nil)
|
||||||
|
}
|
||||||
|
|
||||||
default:
|
default:
|
||||||
tui.Error(fmt.Sprintf("invalid backend mode '%v'; expected {proxy, web, tcpTunnel, udpTunnel, caddy, drive}", cmd.backendMode), nil)
|
tui.Error(fmt.Sprintf("invalid backend mode '%v'; expected {proxy, web, tcpTunnel, udpTunnel, caddy, drive, socks}", cmd.backendMode), nil)
|
||||||
}
|
}
|
||||||
|
|
||||||
env, err := environment.LoadRoot()
|
env, err := environment.LoadRoot()
|
||||||
|
75
cmd/zrok/rm.go
Normal file
75
cmd/zrok/rm.go
Normal file
@ -0,0 +1,75 @@
|
|||||||
|
package main
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"github.com/openziti/zrok/drives/sync"
|
||||||
|
"github.com/openziti/zrok/environment"
|
||||||
|
"github.com/openziti/zrok/sdk/golang/sdk"
|
||||||
|
"github.com/openziti/zrok/tui"
|
||||||
|
"github.com/sirupsen/logrus"
|
||||||
|
"github.com/spf13/cobra"
|
||||||
|
"net/url"
|
||||||
|
"os"
|
||||||
|
)
|
||||||
|
|
||||||
|
func init() {
|
||||||
|
rootCmd.AddCommand(newRmCommand().cmd)
|
||||||
|
}
|
||||||
|
|
||||||
|
type rmCommand struct {
|
||||||
|
cmd *cobra.Command
|
||||||
|
basicAuth string
|
||||||
|
}
|
||||||
|
|
||||||
|
func newRmCommand() *rmCommand {
|
||||||
|
cmd := &cobra.Command{
|
||||||
|
Use: "rm <target>",
|
||||||
|
Short: "Remove (delete) the contents of drive <target> ('http://', 'zrok://', 'file://')",
|
||||||
|
Aliases: []string{"del"},
|
||||||
|
Args: cobra.ExactArgs(1),
|
||||||
|
}
|
||||||
|
command := &rmCommand{cmd: cmd}
|
||||||
|
cmd.Run = command.run
|
||||||
|
cmd.Flags().StringVarP(&command.basicAuth, "basic-auth", "a", "", "Basic authentication <username:password>")
|
||||||
|
return command
|
||||||
|
}
|
||||||
|
|
||||||
|
func (cmd *rmCommand) run(_ *cobra.Command, args []string) {
|
||||||
|
if cmd.basicAuth == "" {
|
||||||
|
cmd.basicAuth = os.Getenv("ZROK_DRIVES_BASIC_AUTH")
|
||||||
|
}
|
||||||
|
|
||||||
|
targetUrl, err := url.Parse(args[0])
|
||||||
|
if err != nil {
|
||||||
|
tui.Error(fmt.Sprintf("invalid target '%v'", args[0]), err)
|
||||||
|
}
|
||||||
|
if targetUrl.Scheme == "" {
|
||||||
|
targetUrl.Scheme = "file"
|
||||||
|
}
|
||||||
|
|
||||||
|
root, err := environment.LoadRoot()
|
||||||
|
if err != nil {
|
||||||
|
tui.Error("error loading root", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if targetUrl.Scheme == "zrok" {
|
||||||
|
access, err := sdk.CreateAccess(root, &sdk.AccessRequest{ShareToken: targetUrl.Host})
|
||||||
|
if err != nil {
|
||||||
|
tui.Error("error creating access", err)
|
||||||
|
}
|
||||||
|
defer func() {
|
||||||
|
if err := sdk.DeleteAccess(root, access); err != nil {
|
||||||
|
logrus.Warningf("error freeing access: %v", err)
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
}
|
||||||
|
|
||||||
|
target, err := sync.TargetForURL(targetUrl, root, cmd.basicAuth)
|
||||||
|
if err != nil {
|
||||||
|
tui.Error(fmt.Sprintf("error creating target for '%v'", targetUrl), err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := target.Rm("/"); err != nil {
|
||||||
|
tui.Error("error removing", err)
|
||||||
|
}
|
||||||
|
}
|
@ -6,6 +6,7 @@ import (
|
|||||||
"github.com/openziti/zrok/endpoints"
|
"github.com/openziti/zrok/endpoints"
|
||||||
"github.com/openziti/zrok/endpoints/drive"
|
"github.com/openziti/zrok/endpoints/drive"
|
||||||
"github.com/openziti/zrok/endpoints/proxy"
|
"github.com/openziti/zrok/endpoints/proxy"
|
||||||
|
"github.com/openziti/zrok/endpoints/socks"
|
||||||
"github.com/openziti/zrok/endpoints/tcpTunnel"
|
"github.com/openziti/zrok/endpoints/tcpTunnel"
|
||||||
"github.com/openziti/zrok/endpoints/udpTunnel"
|
"github.com/openziti/zrok/endpoints/udpTunnel"
|
||||||
"github.com/openziti/zrok/environment"
|
"github.com/openziti/zrok/environment"
|
||||||
@ -33,13 +34,13 @@ type sharePrivateCommand struct {
|
|||||||
|
|
||||||
func newSharePrivateCommand() *sharePrivateCommand {
|
func newSharePrivateCommand() *sharePrivateCommand {
|
||||||
cmd := &cobra.Command{
|
cmd := &cobra.Command{
|
||||||
Use: "private <target>",
|
Use: "private [<target>]",
|
||||||
Short: "Share a target resource privately",
|
Short: "Share a target resource privately",
|
||||||
Args: cobra.ExactArgs(1),
|
Args: cobra.RangeArgs(0, 1),
|
||||||
}
|
}
|
||||||
command := &sharePrivateCommand{cmd: cmd}
|
command := &sharePrivateCommand{cmd: cmd}
|
||||||
cmd.Flags().StringArrayVar(&command.basicAuth, "basic-auth", []string{}, "Basic authentication users (<username:password>,...")
|
cmd.Flags().StringArrayVar(&command.basicAuth, "basic-auth", []string{}, "Basic authentication users (<username:password>,...")
|
||||||
cmd.Flags().StringVarP(&command.backendMode, "backend-mode", "b", "proxy", "The backend mode {proxy, web, tcpTunnel, udpTunnel, caddy, drive}")
|
cmd.Flags().StringVarP(&command.backendMode, "backend-mode", "b", "proxy", "The backend mode {proxy, web, tcpTunnel, udpTunnel, caddy, drive, socks}")
|
||||||
cmd.Flags().BoolVar(&command.headless, "headless", false, "Disable TUI and run headless")
|
cmd.Flags().BoolVar(&command.headless, "headless", false, "Disable TUI and run headless")
|
||||||
cmd.Flags().BoolVar(&command.insecure, "insecure", false, "Enable insecure TLS certificate validation for <target>")
|
cmd.Flags().BoolVar(&command.insecure, "insecure", false, "Enable insecure TLS certificate validation for <target>")
|
||||||
cmd.Run = command.run
|
cmd.Run = command.run
|
||||||
@ -51,6 +52,9 @@ func (cmd *sharePrivateCommand) run(_ *cobra.Command, args []string) {
|
|||||||
|
|
||||||
switch cmd.backendMode {
|
switch cmd.backendMode {
|
||||||
case "proxy":
|
case "proxy":
|
||||||
|
if len(args) != 1 {
|
||||||
|
tui.Error("the 'proxy' backend mode expects a <target>", nil)
|
||||||
|
}
|
||||||
v, err := parseUrl(args[0])
|
v, err := parseUrl(args[0])
|
||||||
if err != nil {
|
if err != nil {
|
||||||
if !panicInstead {
|
if !panicInstead {
|
||||||
@ -61,21 +65,41 @@ func (cmd *sharePrivateCommand) run(_ *cobra.Command, args []string) {
|
|||||||
target = v
|
target = v
|
||||||
|
|
||||||
case "web":
|
case "web":
|
||||||
|
if len(args) != 1 {
|
||||||
|
tui.Error("the 'web' backend mode expects a <target>", nil)
|
||||||
|
}
|
||||||
target = args[0]
|
target = args[0]
|
||||||
|
|
||||||
case "tcpTunnel":
|
case "tcpTunnel":
|
||||||
|
if len(args) != 1 {
|
||||||
|
tui.Error("the 'tcpTunnel' backend mode expects a <target>", nil)
|
||||||
|
}
|
||||||
target = args[0]
|
target = args[0]
|
||||||
|
|
||||||
case "udpTunnel":
|
case "udpTunnel":
|
||||||
|
if len(args) != 1 {
|
||||||
|
tui.Error("the 'udpTunnel' backend mode expects a <target>", nil)
|
||||||
|
}
|
||||||
target = args[0]
|
target = args[0]
|
||||||
|
|
||||||
case "caddy":
|
case "caddy":
|
||||||
|
if len(args) != 1 {
|
||||||
|
tui.Error("the 'caddy' backend mode expects a <target>", nil)
|
||||||
|
}
|
||||||
target = args[0]
|
target = args[0]
|
||||||
cmd.headless = true
|
cmd.headless = true
|
||||||
|
|
||||||
case "drive":
|
case "drive":
|
||||||
|
if len(args) != 1 {
|
||||||
|
tui.Error("the 'drive' backend mode expects a <target>", nil)
|
||||||
|
}
|
||||||
target = args[0]
|
target = args[0]
|
||||||
|
|
||||||
|
case "socks":
|
||||||
|
if len(args) != 0 {
|
||||||
|
tui.Error("the 'socks' backend mode does not expect <target>", nil)
|
||||||
|
}
|
||||||
|
|
||||||
default:
|
default:
|
||||||
tui.Error(fmt.Sprintf("invalid backend mode '%v'; expected {proxy, web, tcpTunnel, udpTunnel, caddy, drive}", cmd.backendMode), nil)
|
tui.Error(fmt.Sprintf("invalid backend mode '%v'; expected {proxy, web, tcpTunnel, udpTunnel, caddy, drive}", cmd.backendMode), nil)
|
||||||
}
|
}
|
||||||
@ -264,6 +288,27 @@ func (cmd *sharePrivateCommand) run(_ *cobra.Command, args []string) {
|
|||||||
}
|
}
|
||||||
}()
|
}()
|
||||||
|
|
||||||
|
case "socks":
|
||||||
|
cfg := &socks.BackendConfig{
|
||||||
|
IdentityPath: zif,
|
||||||
|
ShrToken: shr.Token,
|
||||||
|
Requests: requests,
|
||||||
|
}
|
||||||
|
|
||||||
|
be, err := socks.NewBackend(cfg)
|
||||||
|
if err != nil {
|
||||||
|
if !panicInstead {
|
||||||
|
tui.Error("error creating socks backend", err)
|
||||||
|
}
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
go func() {
|
||||||
|
if err := be.Run(); err != nil {
|
||||||
|
logrus.Errorf("error running socks backend: %v", err)
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
default:
|
default:
|
||||||
tui.Error("invalid backend mode", nil)
|
tui.Error("invalid backend mode", nil)
|
||||||
}
|
}
|
||||||
|
@ -7,6 +7,7 @@ import (
|
|||||||
"github.com/openziti/zrok/endpoints"
|
"github.com/openziti/zrok/endpoints"
|
||||||
"github.com/openziti/zrok/endpoints/drive"
|
"github.com/openziti/zrok/endpoints/drive"
|
||||||
"github.com/openziti/zrok/endpoints/proxy"
|
"github.com/openziti/zrok/endpoints/proxy"
|
||||||
|
"github.com/openziti/zrok/endpoints/socks"
|
||||||
"github.com/openziti/zrok/endpoints/tcpTunnel"
|
"github.com/openziti/zrok/endpoints/tcpTunnel"
|
||||||
"github.com/openziti/zrok/endpoints/udpTunnel"
|
"github.com/openziti/zrok/endpoints/udpTunnel"
|
||||||
"github.com/openziti/zrok/environment"
|
"github.com/openziti/zrok/environment"
|
||||||
@ -92,23 +93,25 @@ func (cmd *shareReservedCommand) run(_ *cobra.Command, args []string) {
|
|||||||
panic(err)
|
panic(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
logrus.Infof("sharing target: '%v'", target)
|
if resp.Payload.BackendMode != "socks" {
|
||||||
|
logrus.Infof("sharing target: '%v'", target)
|
||||||
|
|
||||||
if resp.Payload.BackendProxyEndpoint != target {
|
if resp.Payload.BackendProxyEndpoint != target {
|
||||||
upReq := share.NewUpdateShareParams()
|
upReq := share.NewUpdateShareParams()
|
||||||
upReq.Body = &rest_model_zrok.UpdateShareRequest{
|
upReq.Body = &rest_model_zrok.UpdateShareRequest{
|
||||||
ShrToken: shrToken,
|
ShrToken: shrToken,
|
||||||
BackendProxyEndpoint: target,
|
BackendProxyEndpoint: target,
|
||||||
}
|
|
||||||
if _, err := zrok.Share.UpdateShare(upReq, auth); err != nil {
|
|
||||||
if !panicInstead {
|
|
||||||
tui.Error("unable to update backend proxy endpoint", err)
|
|
||||||
}
|
}
|
||||||
panic(err)
|
if _, err := zrok.Share.UpdateShare(upReq, auth); err != nil {
|
||||||
|
if !panicInstead {
|
||||||
|
tui.Error("unable to update backend proxy endpoint", err)
|
||||||
|
}
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
logrus.Infof("updated backend proxy endpoint to: %v", target)
|
||||||
|
} else {
|
||||||
|
logrus.Infof("using existing backend proxy endpoint: %v", target)
|
||||||
}
|
}
|
||||||
logrus.Infof("updated backend proxy endpoint to: %v", target)
|
|
||||||
} else {
|
|
||||||
logrus.Infof("using existing backend proxy endpoint: %v", target)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
var shareDescription string
|
var shareDescription string
|
||||||
@ -258,6 +261,27 @@ func (cmd *shareReservedCommand) run(_ *cobra.Command, args []string) {
|
|||||||
}
|
}
|
||||||
}()
|
}()
|
||||||
|
|
||||||
|
case "socks":
|
||||||
|
cfg := &socks.BackendConfig{
|
||||||
|
IdentityPath: zif,
|
||||||
|
ShrToken: shrToken,
|
||||||
|
Requests: requests,
|
||||||
|
}
|
||||||
|
|
||||||
|
be, err := socks.NewBackend(cfg)
|
||||||
|
if err != nil {
|
||||||
|
if !panicInstead {
|
||||||
|
tui.Error("error creating socks backend", err)
|
||||||
|
}
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
go func() {
|
||||||
|
if err := be.Run(); err != nil {
|
||||||
|
logrus.Errorf("error running socks backend: %v", err)
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
default:
|
default:
|
||||||
tui.Error("invalid backend mode", nil)
|
tui.Error("invalid backend mode", nil)
|
||||||
}
|
}
|
||||||
|
@ -136,18 +136,19 @@ func (l *looper) run() {
|
|||||||
|
|
||||||
l.startup()
|
l.startup()
|
||||||
logrus.Infof("looper #%d, shrToken: %v, frontend: %v", l.id, l.shrToken, l.proxyEndpoint)
|
logrus.Infof("looper #%d, shrToken: %v, frontend: %v", l.id, l.shrToken, l.proxyEndpoint)
|
||||||
go l.serviceListener()
|
if l.serviceListener() {
|
||||||
l.dwell()
|
l.dwell()
|
||||||
l.iterate()
|
l.iterate()
|
||||||
|
}
|
||||||
logrus.Infof("looper #%d: complete", l.id)
|
logrus.Infof("looper #%d: complete", l.id)
|
||||||
l.shutdown()
|
l.shutdown()
|
||||||
}
|
}
|
||||||
|
|
||||||
func (l *looper) serviceListener() {
|
func (l *looper) serviceListener() bool {
|
||||||
zcfg, err := ziti.NewConfigFromFile(l.zif)
|
zcfg, err := ziti.NewConfigFromFile(l.zif)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
logrus.Errorf("error opening ziti config '%v': %v", l.zif, err)
|
logrus.Errorf("error opening ziti config '%v': %v", l.zif, err)
|
||||||
return
|
return false
|
||||||
}
|
}
|
||||||
options := ziti.ListenOptions{
|
options := ziti.ListenOptions{
|
||||||
ConnectTimeout: 5 * time.Minute,
|
ConnectTimeout: 5 * time.Minute,
|
||||||
@ -156,15 +157,21 @@ func (l *looper) serviceListener() {
|
|||||||
zctx, err := ziti.NewContext(zcfg)
|
zctx, err := ziti.NewContext(zcfg)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
logrus.Errorf("error loading ziti context: %v", err)
|
logrus.Errorf("error loading ziti context: %v", err)
|
||||||
return
|
return false
|
||||||
}
|
}
|
||||||
if l.listener, err = zctx.ListenWithOptions(l.shrToken, &options); err == nil {
|
|
||||||
|
if l.listener, err = zctx.ListenWithOptions(l.shrToken, &options); err != nil {
|
||||||
|
logrus.Errorf("looper #%d, error listening: %v", l.id, err)
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
go func() {
|
||||||
if err := http.Serve(l.listener, l); err != nil {
|
if err := http.Serve(l.listener, l); err != nil {
|
||||||
logrus.Errorf("looper #%d, error serving: %v", l.id, err)
|
logrus.Errorf("looper #%d, error serving: %v", l.id, err)
|
||||||
}
|
}
|
||||||
} else {
|
}()
|
||||||
logrus.Errorf("looper #%d, error listening: %v", l.id, err)
|
|
||||||
}
|
return true
|
||||||
}
|
}
|
||||||
|
|
||||||
func (l *looper) ServeHTTP(w http.ResponseWriter, r *http.Request) {
|
func (l *looper) ServeHTTP(w http.ResponseWriter, r *http.Request) {
|
||||||
@ -239,6 +246,9 @@ func (l *looper) iterate() {
|
|||||||
if req, err := http.NewRequest("POST", l.proxyEndpoint, bytes.NewBufferString(outbase64)); err == nil {
|
if req, err := http.NewRequest("POST", l.proxyEndpoint, bytes.NewBufferString(outbase64)); err == nil {
|
||||||
client := &http.Client{Timeout: time.Second * time.Duration(l.cmd.timeoutSeconds)}
|
client := &http.Client{Timeout: time.Second * time.Duration(l.cmd.timeoutSeconds)}
|
||||||
if resp, err := client.Do(req); err == nil {
|
if resp, err := client.Do(req); err == nil {
|
||||||
|
if resp.StatusCode != 200 {
|
||||||
|
logrus.Errorf("looper #%d unexpected response status code %v!", l.id, resp.StatusCode)
|
||||||
|
}
|
||||||
inpayload := new(bytes.Buffer)
|
inpayload := new(bytes.Buffer)
|
||||||
io.Copy(inpayload, resp.Body)
|
io.Copy(inpayload, resp.Body)
|
||||||
inbase64 := inpayload.String()
|
inbase64 := inpayload.String()
|
||||||
|
@ -62,7 +62,7 @@ func (h *accessHandler) Handle(params share.AccessParams, principal *rest_model_
|
|||||||
return share.NewAccessNotFound()
|
return share.NewAccessNotFound()
|
||||||
}
|
}
|
||||||
|
|
||||||
feToken, err := createToken()
|
feToken, err := CreateToken()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
logrus.Error(err)
|
logrus.Error(err)
|
||||||
return share.NewAccessInternalServerError()
|
return share.NewAccessInternalServerError()
|
||||||
|
@ -50,7 +50,7 @@ func (h *createFrontendHandler) Handle(params admin.CreateFrontendParams, princi
|
|||||||
}
|
}
|
||||||
defer func() { _ = tx.Rollback() }()
|
defer func() { _ = tx.Rollback() }()
|
||||||
|
|
||||||
feToken, err := createToken()
|
feToken, err := CreateToken()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
logrus.Errorf("error creating frontend token: %v", err)
|
logrus.Errorf("error creating frontend token: %v", err)
|
||||||
return admin.NewCreateFrontendInternalServerError()
|
return admin.NewCreateFrontendInternalServerError()
|
||||||
|
@ -55,7 +55,7 @@ func (h *inviteHandler) Handle(params account.InviteParams) middleware.Responder
|
|||||||
logrus.Infof("using invite token '%v' to process invite request for '%v'", inviteToken.Token, params.Body.Email)
|
logrus.Infof("using invite token '%v' to process invite request for '%v'", inviteToken.Token, params.Body.Email)
|
||||||
}
|
}
|
||||||
|
|
||||||
token, err = createToken()
|
token, err = CreateToken()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
logrus.Error(err)
|
logrus.Error(err)
|
||||||
return account.NewInviteInternalServerError()
|
return account.NewInviteInternalServerError()
|
||||||
|
@ -24,7 +24,7 @@ func salt() string {
|
|||||||
return base64.StdEncoding.EncodeToString(buf)
|
return base64.StdEncoding.EncodeToString(buf)
|
||||||
}
|
}
|
||||||
|
|
||||||
func hashPassword(password string) (*hashedPassword, error) {
|
func HashPassword(password string) (*hashedPassword, error) {
|
||||||
return rehashPassword(password, salt())
|
return rehashPassword(password, salt())
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -38,7 +38,7 @@ func (h *registerHandler) Handle(params account.RegisterParams) middleware.Respo
|
|||||||
return account.NewRegisterNotFound()
|
return account.NewRegisterNotFound()
|
||||||
}
|
}
|
||||||
|
|
||||||
token, err := createToken()
|
token, err := CreateToken()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
logrus.Errorf("error creating token for request '%v' (%v): %v", params.Body.Token, ar.Email, err)
|
logrus.Errorf("error creating token for request '%v' (%v): %v", params.Body.Token, ar.Email, err)
|
||||||
return account.NewRegisterInternalServerError()
|
return account.NewRegisterInternalServerError()
|
||||||
@ -49,7 +49,7 @@ func (h *registerHandler) Handle(params account.RegisterParams) middleware.Respo
|
|||||||
return account.NewRegisterUnprocessableEntity().WithPayload(rest_model_zrok.ErrorMessage(err.Error()))
|
return account.NewRegisterUnprocessableEntity().WithPayload(rest_model_zrok.ErrorMessage(err.Error()))
|
||||||
}
|
}
|
||||||
|
|
||||||
hpwd, err := hashPassword(params.Body.Password)
|
hpwd, err := HashPassword(params.Body.Password)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
logrus.Errorf("error hashing password for request '%v' (%v): %v", params.Body.Token, ar.Email, err)
|
logrus.Errorf("error hashing password for request '%v' (%v): %v", params.Body.Token, ar.Email, err)
|
||||||
return account.NewRegisterInternalServerError()
|
return account.NewRegisterInternalServerError()
|
||||||
|
@ -53,7 +53,7 @@ func (handler *resetPasswordHandler) Handle(params account.ResetPasswordParams)
|
|||||||
return account.NewResetPasswordUnprocessableEntity().WithPayload(rest_model_zrok.ErrorMessage(err.Error()))
|
return account.NewResetPasswordUnprocessableEntity().WithPayload(rest_model_zrok.ErrorMessage(err.Error()))
|
||||||
}
|
}
|
||||||
|
|
||||||
hpwd, err := hashPassword(params.Body.Password)
|
hpwd, err := HashPassword(params.Body.Password)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
logrus.Errorf("error hashing password for '%v' (%v): %v", params.Body.Token, a.Email, err)
|
logrus.Errorf("error hashing password for '%v' (%v): %v", params.Body.Token, a.Email, err)
|
||||||
return account.NewResetPasswordRequestInternalServerError()
|
return account.NewResetPasswordRequestInternalServerError()
|
||||||
|
@ -34,7 +34,7 @@ func (handler *resetPasswordRequestHandler) Handle(params account.ResetPasswordR
|
|||||||
}
|
}
|
||||||
defer func() { _ = tx.Rollback() }()
|
defer func() { _ = tx.Rollback() }()
|
||||||
|
|
||||||
token, err = createToken()
|
token, err = CreateToken()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
logrus.Errorf("error creating token for '%v': %v", params.Body.EmailAddress, err)
|
logrus.Errorf("error creating token for '%v': %v", params.Body.EmailAddress, err)
|
||||||
return account.NewResetPasswordRequestInternalServerError()
|
return account.NewResetPasswordRequestInternalServerError()
|
||||||
|
@ -0,0 +1,3 @@
|
|||||||
|
-- +migrate Up
|
||||||
|
|
||||||
|
alter type backend_mode add value 'socks';
|
@ -0,0 +1,58 @@
|
|||||||
|
-- +migrate Up
|
||||||
|
|
||||||
|
alter table shares rename to shares_old;
|
||||||
|
create table shares (
|
||||||
|
id integer primary key,
|
||||||
|
environment_id integer constraint fk_environments_shares references environments on delete cascade,
|
||||||
|
z_id string not null unique,
|
||||||
|
token string not null,
|
||||||
|
share_mode string not null,
|
||||||
|
backend_mode string not null,
|
||||||
|
frontend_selection string,
|
||||||
|
frontend_endpoint string,
|
||||||
|
backend_proxy_endpoint string,
|
||||||
|
reserved boolean not null default(false),
|
||||||
|
created_at datetime not null default(strftime('%Y-%m-%d %H:%M:%f', 'now')),
|
||||||
|
updated_at datetime not null default(strftime('%Y-%m-%d %H:%M:%f', 'now')),
|
||||||
|
deleted boolean not null default(false),
|
||||||
|
|
||||||
|
constraint chk_z_id check (z_id <> ''),
|
||||||
|
constraint chk_token check (token <> ''),
|
||||||
|
constraint chk_share_mode check (share_mode == 'public' or share_mode == 'private'),
|
||||||
|
constraint chk_backend_mode check (backend_mode == 'proxy' or backend_mode == 'web' or backend_mode == 'tcpTunnel' or backend_mode == 'udpTunnel' or backend_mode == 'caddy' or backend_mode == 'drive' or backend_mode == 'socks')
|
||||||
|
);
|
||||||
|
insert into shares select * from shares_old;
|
||||||
|
drop index shares_token_idx;
|
||||||
|
create unique index shares_token_idx ON shares(token) WHERE deleted is false;
|
||||||
|
|
||||||
|
alter table frontends rename to frontends_old;
|
||||||
|
create table frontends (
|
||||||
|
id integer primary key,
|
||||||
|
environment_id integer references environments(id),
|
||||||
|
token varchar(32) not null unique,
|
||||||
|
z_id varchar(32) not null,
|
||||||
|
public_name varchar(64) unique,
|
||||||
|
url_template varchar(1024),
|
||||||
|
reserved boolean not null default(false),
|
||||||
|
created_at datetime not null default(strftime('%Y-%m-%d %H:%M:%f', 'now')),
|
||||||
|
updated_at datetime not null default(strftime('%Y-%m-%d %H:%M:%f', 'now')),
|
||||||
|
deleted boolean not null default(false),
|
||||||
|
private_share_id integer references shares(id)
|
||||||
|
);
|
||||||
|
insert into frontends select * from frontends_old;
|
||||||
|
drop table frontends_old;
|
||||||
|
|
||||||
|
alter table share_limit_journal rename to share_limit_journal_old;
|
||||||
|
create table share_limit_journal (
|
||||||
|
id integer primary key,
|
||||||
|
share_id integer references shares(id),
|
||||||
|
rx_bytes bigint not null,
|
||||||
|
tx_bytes bigint not null,
|
||||||
|
action limit_action_type not null,
|
||||||
|
created_at datetime not null default(strftime('%Y-%m-%d %H:%M:%f', 'now')),
|
||||||
|
updated_at datetime not null default(strftime('%Y-%m-%d %H:%M:%f', 'now'))
|
||||||
|
);
|
||||||
|
insert into share_limit_journal select * from share_limit_journal_old;
|
||||||
|
drop table share_limit_journal_old;
|
||||||
|
|
||||||
|
drop table shares_old;
|
@ -65,7 +65,7 @@ func createShareToken() (string, error) {
|
|||||||
return gen(), nil
|
return gen(), nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func createToken() (string, error) {
|
func CreateToken() (string, error) {
|
||||||
gen, err := nanoid.CustomASCII("abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789", 12)
|
gen, err := nanoid.CustomASCII("abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789", 12)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return "", err
|
return "", err
|
||||||
|
@ -1,3 +1,4 @@
|
|||||||
|
# Stage 1: Install Node.js with nvm
|
||||||
FROM debian:bullseye-slim
|
FROM debian:bullseye-slim
|
||||||
#
|
#
|
||||||
# this file mirrors the build params used in the GitHub Actions and enables
|
# this file mirrors the build params used in the GitHub Actions and enables
|
||||||
@ -12,23 +13,31 @@ ARG go_root=/usr/local/go
|
|||||||
ARG go_cache=/usr/share/go_cache
|
ARG go_cache=/usr/share/go_cache
|
||||||
ARG uid=1000
|
ARG uid=1000
|
||||||
ARG gid=1000
|
ARG gid=1000
|
||||||
RUN apt-get -y update
|
RUN apt-get -y update \
|
||||||
RUN apt-get -y install gcc-arm-linux-gnueabihf g++-arm-linux-gnueabihf gcc-aarch64-linux-gnu
|
&& apt-get -y install \
|
||||||
RUN apt-get -y install wget build-essential
|
gcc-arm-linux-gnueabihf g++-arm-linux-gnueabihf gcc-aarch64-linux-gnu \
|
||||||
|
wget build-essential
|
||||||
|
|
||||||
COPY ./linux-build.sh /usr/local/bin/
|
|
||||||
RUN wget -q https://go.dev/dl/${go_distribution_file}
|
RUN wget -q https://go.dev/dl/${go_distribution_file}
|
||||||
RUN tar -xzf ${go_distribution_file} -C /usr/local/
|
RUN tar -xzf ${go_distribution_file} -C /usr/local/
|
||||||
|
|
||||||
|
RUN wget -qO- https://deb.nodesource.com/setup_18.x | bash \
|
||||||
|
&& apt-get -y update \
|
||||||
|
&& apt-get -y install \
|
||||||
|
nodejs
|
||||||
|
|
||||||
RUN mkdir ${go_path} ${go_cache}
|
RUN mkdir ${go_path} ${go_cache}
|
||||||
RUN chown -R ${uid}:${gid} ${go_path} ${go_cache}
|
RUN chown -R ${uid}:${gid} ${go_path} ${go_cache}
|
||||||
|
|
||||||
|
COPY ./linux-build.sh /usr/local/bin/
|
||||||
|
|
||||||
USER ${uid}:${gid}
|
USER ${uid}:${gid}
|
||||||
ENV TARGETARCH=${TARGETARCH}
|
ENV TARGETARCH=${TARGETARCH}
|
||||||
ENV GOPATH=${go_path}
|
ENV GOPATH=${go_path}
|
||||||
ENV GOROOT=${go_root}
|
ENV GOROOT=${go_root}
|
||||||
ENV GOCACHE=${go_cache}
|
ENV GOCACHE=${go_cache}
|
||||||
ENV PATH=${go_path}/bin:${go_root}/bin:$PATH
|
ENV PATH=${go_path}/bin:${go_root}/bin:$PATH
|
||||||
|
|
||||||
RUN go install github.com/mitchellh/gox@latest
|
RUN go install github.com/mitchellh/gox@latest
|
||||||
WORKDIR /mnt
|
WORKDIR /mnt
|
||||||
ENTRYPOINT ["linux-build.sh"]
|
ENTRYPOINT ["linux-build.sh"]
|
||||||
|
@ -6,7 +6,10 @@
|
|||||||
#
|
#
|
||||||
#
|
#
|
||||||
|
|
||||||
set -o pipefail -e -u
|
set -o errexit
|
||||||
|
set -o nounset
|
||||||
|
set -o pipefail
|
||||||
|
set -o xtrace
|
||||||
|
|
||||||
# if no architectures supplied then default list of three
|
# if no architectures supplied then default list of three
|
||||||
if (( ${#} )); then
|
if (( ${#} )); then
|
||||||
@ -31,6 +34,16 @@ else
|
|||||||
PROCS_PER_JOB=0 # invokes gox default to use all CPUs-1
|
PROCS_PER_JOB=0 # invokes gox default to use all CPUs-1
|
||||||
fi
|
fi
|
||||||
|
|
||||||
|
(
|
||||||
|
HOME=/tmp/builder
|
||||||
|
# Navigate to the "ui" directory and run npm commands
|
||||||
|
npm config set cache /mnt/.npm
|
||||||
|
cd ./ui/
|
||||||
|
mkdir -p $HOME
|
||||||
|
npm install
|
||||||
|
npm run build
|
||||||
|
)
|
||||||
|
|
||||||
for ARCH in ${JOBS[@]}; do
|
for ARCH in ${JOBS[@]}; do
|
||||||
GOX_CMD="
|
GOX_CMD="
|
||||||
gox \
|
gox \
|
||||||
|
@ -1,5 +1,5 @@
|
|||||||
# this builds docker.io/openziti/zrok
|
# this builds docker.io/openziti/zrok
|
||||||
ARG ZITI_CLI_TAG="0.31.2"
|
ARG ZITI_CLI_TAG="0.32.1"
|
||||||
ARG ZITI_CLI_IMAGE="docker.io/openziti/ziti-cli"
|
ARG ZITI_CLI_IMAGE="docker.io/openziti/ziti-cli"
|
||||||
# this builds docker.io/openziti/ziti-controller
|
# this builds docker.io/openziti/ziti-controller
|
||||||
FROM ${ZITI_CLI_IMAGE}:${ZITI_CLI_TAG}
|
FROM ${ZITI_CLI_IMAGE}:${ZITI_CLI_TAG}
|
||||||
@ -20,8 +20,9 @@ LABEL name="openziti/zrok" \
|
|||||||
|
|
||||||
USER root
|
USER root
|
||||||
|
|
||||||
### install packages (jq introduced in source image in next release 0.30.6)
|
### install packages: findutils provides xargs which is used by the zrok Helm chart's controller bootstrapping script to
|
||||||
RUN INSTALL_PKGS="jq" && \
|
#create the default account enable token
|
||||||
|
RUN INSTALL_PKGS="findutils" && \
|
||||||
microdnf -y update --setopt=install_weak_deps=0 --setopt=tsflags=nodocs && \
|
microdnf -y update --setopt=install_weak_deps=0 --setopt=tsflags=nodocs && \
|
||||||
microdnf -y install --setopt=install_weak_deps=0 --setopt=tsflags=nodocs ${INSTALL_PKGS}
|
microdnf -y install --setopt=install_weak_deps=0 --setopt=tsflags=nodocs ${INSTALL_PKGS}
|
||||||
|
|
||||||
|
314
docs/guides/drives/cli.md
Normal file
314
docs/guides/drives/cli.md
Normal file
@ -0,0 +1,314 @@
|
|||||||
|
# The Drives CLI
|
||||||
|
|
||||||
|
The zrok drives CLI tools allow for simple, ergonomic management and synchronization of local and remote files.
|
||||||
|
|
||||||
|
## Sharing a Drive
|
||||||
|
|
||||||
|
Virtual drives are shared through the `zrok` CLI using the `--backend-mode drive` flag through the `zrok share` command, using either the `public` or `private` sharing modes. We'll use the `private` sharing mode for this example:
|
||||||
|
|
||||||
|
```
|
||||||
|
$ mkdir /tmp/junk
|
||||||
|
$ zrok share private --headless --backend-mode drive /tmp/junk
|
||||||
|
[ 0.124] INFO sdk-golang/ziti.(*listenerManager).createSessionWithBackoff: {session token=[cf640aac-2706-49ae-9cc9-9a497d67d9c5]} new service session
|
||||||
|
[ 0.145] INFO main.(*sharePrivateCommand).run: allow other to access your share with the following command:
|
||||||
|
zrok access private wkcfb58vj51l
|
||||||
|
```
|
||||||
|
|
||||||
|
The command shown above creates an ephemeral, `private` drive share pointed at the local `/tmp/junk` folder.
|
||||||
|
|
||||||
|
Notice that the share token allocated by `zrok` is `wkcfb58vj51l`. We'll use that share token to identify our virtual drive in the following operations.
|
||||||
|
|
||||||
|
## Working with a Private Drive Share
|
||||||
|
|
||||||
|
First, let's copy a file into our virtual drive using the `zrok copy` command:
|
||||||
|
|
||||||
|
```
|
||||||
|
$ zrok copy LICENSE zrok://wkcfb58vj51l
|
||||||
|
[ 0.119] INFO zrok/drives/sync.OneWay: => /LICENSE
|
||||||
|
copy complete!
|
||||||
|
```
|
||||||
|
|
||||||
|
We used the URL scheme `zrok://<shareToken>` to refer to the private virtual drive we allocated above using the `zrok share private` command. Use `zrok://` URLs with the drives CLI tools to refer to contents of private virtual drives.
|
||||||
|
|
||||||
|
Next, let's get a directory listing of the virtual drive:
|
||||||
|
|
||||||
|
```
|
||||||
|
$ zrok ls zrok://wkcfb58vj51l
|
||||||
|
┌──────┬─────────┬─────────┬───────────────────────────────┐
|
||||||
|
│ TYPE │ NAME │ SIZE │ MODIFIED │
|
||||||
|
├──────┼─────────┼─────────┼───────────────────────────────┤
|
||||||
|
│ │ LICENSE │ 11.3 kB │ 2024-01-19 12:16:46 -0500 EST │
|
||||||
|
└──────┴─────────┴─────────┴───────────────────────────────┘
|
||||||
|
```
|
||||||
|
|
||||||
|
We can make directories on the virtual drive:
|
||||||
|
|
||||||
|
```
|
||||||
|
$ zrok mkdir zrok://wkcfb58vj51l/stuff
|
||||||
|
$ zrok ls zrok://wkcfb58vj51l
|
||||||
|
┌──────┬─────────┬─────────┬───────────────────────────────┐
|
||||||
|
│ TYPE │ NAME │ SIZE │ MODIFIED │
|
||||||
|
├──────┼─────────┼─────────┼───────────────────────────────┤
|
||||||
|
│ │ LICENSE │ 11.3 kB │ 2024-01-19 12:16:46 -0500 EST │
|
||||||
|
│ DIR │ stuff │ │ │
|
||||||
|
└──────┴─────────┴─────────┴───────────────────────────────┘
|
||||||
|
```
|
||||||
|
|
||||||
|
We can copy the contents of a local directory into the new directory on the virtual drive:
|
||||||
|
|
||||||
|
```
|
||||||
|
$ ls -l util/
|
||||||
|
total 20
|
||||||
|
-rw-rw-r-- 1 michael michael 329 Jul 21 13:17 email.go
|
||||||
|
-rw-rw-r-- 1 michael michael 456 Jul 21 13:17 headers.go
|
||||||
|
-rw-rw-r-- 1 michael michael 609 Jul 21 13:17 proxy.go
|
||||||
|
-rw-rw-r-- 1 michael michael 361 Jul 21 13:17 size.go
|
||||||
|
-rw-rw-r-- 1 michael michael 423 Jan 2 11:57 uniqueName.go
|
||||||
|
$ zrok copy util/ zrok://wkcfb58vj51l/stuff
|
||||||
|
[ 0.123] INFO zrok/drives/sync.OneWay: => /email.go
|
||||||
|
[ 0.194] INFO zrok/drives/sync.OneWay: => /headers.go
|
||||||
|
[ 0.267] INFO zrok/drives/sync.OneWay: => /proxy.go
|
||||||
|
[ 0.337] INFO zrok/drives/sync.OneWay: => /size.go
|
||||||
|
[ 0.408] INFO zrok/drives/sync.OneWay: => /uniqueName.go
|
||||||
|
copy complete!
|
||||||
|
$ zrok ls zrok://wkcfb58vj51l/stuff
|
||||||
|
┌──────┬───────────────┬───────┬───────────────────────────────┐
|
||||||
|
│ TYPE │ NAME │ SIZE │ MODIFIED │
|
||||||
|
├──────┼───────────────┼───────┼───────────────────────────────┤
|
||||||
|
│ │ email.go │ 329 B │ 2024-01-19 12:26:45 -0500 EST │
|
||||||
|
│ │ headers.go │ 456 B │ 2024-01-19 12:26:45 -0500 EST │
|
||||||
|
│ │ proxy.go │ 609 B │ 2024-01-19 12:26:45 -0500 EST │
|
||||||
|
│ │ size.go │ 361 B │ 2024-01-19 12:26:45 -0500 EST │
|
||||||
|
│ │ uniqueName.go │ 423 B │ 2024-01-19 12:26:45 -0500 EST │
|
||||||
|
└──────┴───────────────┴───────┴───────────────────────────────┘
|
||||||
|
```
|
||||||
|
|
||||||
|
And we can remove files and directories from the virtual drive:
|
||||||
|
|
||||||
|
```
|
||||||
|
$ zrok rm zrok://wkcfb58vj51l/LICENSE
|
||||||
|
$ zrok ls zrok://wkcfb58vj51l
|
||||||
|
┌──────┬───────┬──────┬──────────┐
|
||||||
|
│ TYPE │ NAME │ SIZE │ MODIFIED │
|
||||||
|
├──────┼───────┼──────┼──────────┤
|
||||||
|
│ DIR │ stuff │ │ │
|
||||||
|
└──────┴───────┴──────┴──────────┘
|
||||||
|
$ zrok rm zrok://wkcfb58vj51l/stuff
|
||||||
|
$ zrok ls zrok://wkcfb58vj51l
|
||||||
|
┌──────┬──────┬──────┬──────────┐
|
||||||
|
│ TYPE │ NAME │ SIZE │ MODIFIED │
|
||||||
|
├──────┼──────┼──────┼──────────┤
|
||||||
|
└──────┴──────┴──────┴──────────┘
|
||||||
|
```
|
||||||
|
|
||||||
|
## Working with Public Shares
|
||||||
|
|
||||||
|
Public shares work very similarly to private shares, they just use a different URL scheme:
|
||||||
|
|
||||||
|
```
|
||||||
|
$ zrok share public --headless --backend-mode drive /tmp/junk
|
||||||
|
[ 0.708] INFO sdk-golang/ziti.(*listenerManager).createSessionWithBackoff: {session token=[05e0f48b-242b-4fd9-8edb-259488535c47]} new service session
|
||||||
|
[ 0.878] INFO main.(*sharePublicCommand).run: access your zrok share at the following endpoints:
|
||||||
|
https://6kiww4bn7iok.share.zrok.io
|
||||||
|
```
|
||||||
|
|
||||||
|
The same commands, with a different URL scheme work with the `zrok` drives CLI:
|
||||||
|
|
||||||
|
```
|
||||||
|
$ zrok copy util/ https://6kiww4bn7iok.share.zrok.io
|
||||||
|
[ 0.268] INFO zrok/drives/sync.OneWay: => /email.go
|
||||||
|
[ 0.406] INFO zrok/drives/sync.OneWay: => /headers.go
|
||||||
|
[ 0.530] INFO zrok/drives/sync.OneWay: => /proxy.go
|
||||||
|
[ 0.655] INFO zrok/drives/sync.OneWay: => /size.go
|
||||||
|
[ 0.714] INFO zrok/drives/sync.OneWay: => /uniqueName.go
|
||||||
|
copy complete!
|
||||||
|
michael@fourtyfour Fri Jan 19 12:42:52 ~/Repos/nf/zrok
|
||||||
|
$ zrok ls https://6kiww4bn7iok.share.zrok.io
|
||||||
|
┌──────┬───────────────┬───────┬───────────────────────────────┐
|
||||||
|
│ TYPE │ NAME │ SIZE │ MODIFIED │
|
||||||
|
├──────┼───────────────┼───────┼───────────────────────────────┤
|
||||||
|
│ │ email.go │ 329 B │ 2023-07-21 13:17:56 -0400 EDT │
|
||||||
|
│ │ headers.go │ 456 B │ 2023-07-21 13:17:56 -0400 EDT │
|
||||||
|
│ │ proxy.go │ 609 B │ 2023-07-21 13:17:56 -0400 EDT │
|
||||||
|
│ │ size.go │ 361 B │ 2023-07-21 13:17:56 -0400 EDT │
|
||||||
|
│ │ uniqueName.go │ 423 B │ 2024-01-02 11:57:14 -0500 EST │
|
||||||
|
└──────┴───────────────┴───────┴───────────────────────────────┘
|
||||||
|
```
|
||||||
|
|
||||||
|
For basic authentication provided by public shares, the `zrok` drives CLI offers the `--basic-auth` flag, which accepts a `<username>:<password>` parameter to specify the authentication for the public virtual drive (if it's required).
|
||||||
|
|
||||||
|
Alternatively, the authentication can be set using the `ZROK_DRIVES_BASIC_AUTH` environment variable:
|
||||||
|
|
||||||
|
```
|
||||||
|
$ export ZROK_DRIVES_BASIC_AUTH=username:password
|
||||||
|
```
|
||||||
|
|
||||||
|
## One-way Synchronization
|
||||||
|
|
||||||
|
The `zrok copy` command includes a `--sync` flag, which only copies files detected as _modified_. `zrok` considers a file with the same modification timestamp and size to be the same. Of course, this is not a strong guarantee that the files are equivalent. Future `zrok` drives versions will provide a cryptographically strong mechanism (a-la `rsync` and friends) to guarantee that files and trees of files are synchronized.
|
||||||
|
|
||||||
|
For now, the `--sync` flag provides a convenience mechanism to allow resuming copies of large file trees and provide a reasonable guarantee that the trees are in sync.
|
||||||
|
|
||||||
|
Let's take a look at `zrok copy --sync` in action:
|
||||||
|
|
||||||
|
```
|
||||||
|
$ zrok copy --sync docs/ https://glmv049c62p7.share.zrok.io
|
||||||
|
[ 0.636] INFO zrok/drives/sync.OneWay: => /_attic/
|
||||||
|
[ 0.760] INFO zrok/drives/sync.OneWay: => /_attic/network/
|
||||||
|
[ 0.816] INFO zrok/drives/sync.OneWay: => /_attic/network/_category_.json
|
||||||
|
[ 0.928] INFO zrok/drives/sync.OneWay: => /_attic/network/prod/
|
||||||
|
[ 0.987] INFO zrok/drives/sync.OneWay: => /_attic/network/prod/ziti-ctrl.service
|
||||||
|
[ 1.048] INFO zrok/drives/sync.OneWay: => /_attic/network/prod/ziti-ctrl.yml
|
||||||
|
[ 1.107] INFO zrok/drives/sync.OneWay: => /_attic/network/prod/ziti-router0.service
|
||||||
|
[ 1.167] INFO zrok/drives/sync.OneWay: => /_attic/network/prod/ziti-router0.yml
|
||||||
|
[ 1.218] INFO zrok/drives/sync.OneWay: => /_attic/network/prod/zrok-access-public.service
|
||||||
|
[ 1.273] INFO zrok/drives/sync.OneWay: => /_attic/network/prod/zrok-ctrl.service
|
||||||
|
[ 1.328] INFO zrok/drives/sync.OneWay: => /_attic/network/prod/zrok-ctrl.yml
|
||||||
|
[ 1.382] INFO zrok/drives/sync.OneWay: => /_attic/network/prod/zrok.io-network-skeleton.md
|
||||||
|
[ 1.447] INFO zrok/drives/sync.OneWay: => /_attic/overview.md
|
||||||
|
[ 1.572] INFO zrok/drives/sync.OneWay: => /_attic/sharing/
|
||||||
|
[ 1.622] INFO zrok/drives/sync.OneWay: => /_attic/sharing/_category_.json
|
||||||
|
[ 1.673] INFO zrok/drives/sync.OneWay: => /_attic/sharing/reserved_services.md
|
||||||
|
[ 1.737] INFO zrok/drives/sync.OneWay: => /_attic/sharing/sharing_modes.md
|
||||||
|
[ 1.793] INFO zrok/drives/sync.OneWay: => /_attic/v0.2_account_requests.md
|
||||||
|
[ 1.902] INFO zrok/drives/sync.OneWay: => /_attic/v0.4_limits.md
|
||||||
|
...
|
||||||
|
[ 9.691] INFO zrok/drives/sync.OneWay: => /images/zrok_web_ui_empty_shares.png
|
||||||
|
[ 9.812] INFO zrok/drives/sync.OneWay: => /images/zrok_web_ui_new_environment.png
|
||||||
|
[ 9.870] INFO zrok/drives/sync.OneWay: => /images/zrok_zoom_to_fit.png
|
||||||
|
copy complete!
|
||||||
|
```
|
||||||
|
|
||||||
|
Because the target drive was empty, `zrok copy --sync` copied the entire contents of the local `docs/` tree into the virtual drive. However, if we run that command again, we get:
|
||||||
|
|
||||||
|
```
|
||||||
|
$ zrok copy --sync docs/ https://glmv049c62p7.share.zrok.io
|
||||||
|
copy complete!
|
||||||
|
```
|
||||||
|
|
||||||
|
The virtual drive contents are already in sync with the local filesystem tree, so there is nothing for it to copy.
|
||||||
|
|
||||||
|
Let's alter the contents of the drive and run the `--sync` again:
|
||||||
|
|
||||||
|
```
|
||||||
|
$ zrok rm https://glmv049c62p7.share.zrok.io/images
|
||||||
|
$ zrok copy --sync docs/ https://glmv049c62p7.share.zrok.io
|
||||||
|
[ 0.364] INFO zrok/drives/sync.OneWay: => /images/
|
||||||
|
[ 0.456] INFO zrok/drives/sync.OneWay: => /images/zrok.png
|
||||||
|
[ 0.795] INFO zrok/drives/sync.OneWay: => /images/zrok_cover.png
|
||||||
|
[ 0.866] INFO zrok/drives/sync.OneWay: => /images/zrok_deployment.drawio
|
||||||
|
...
|
||||||
|
[ 2.254] INFO zrok/drives/sync.OneWay: => /images/zrok_web_ui_empty_shares.png
|
||||||
|
[ 2.340] INFO zrok/drives/sync.OneWay: => /images/zrok_web_ui_new_environment.png
|
||||||
|
[ 2.391] INFO zrok/drives/sync.OneWay: => /images/zrok_zoom_to_fit.png
|
||||||
|
copy complete!
|
||||||
|
```
|
||||||
|
|
||||||
|
Because we removed the `images/` tree from the virtual drive, `zrok copy --sync` detected this and copied the local `images/` tree back onto the virtual drive.
|
||||||
|
|
||||||
|
## Drive-to-Drive Copies and Synchronization
|
||||||
|
|
||||||
|
The `zrok copy` CLI can operate on pairs of virtual drives remotely, without ever having to store files locally. This allow for drive-to-drive copies and synchronization.
|
||||||
|
|
||||||
|
Here are a couple of examples:
|
||||||
|
|
||||||
|
```
|
||||||
|
$ zrok copy --sync https://glmv049c62p7.share.zrok.io https://glmv049c62p7.share.zrok.io
|
||||||
|
copy complete!
|
||||||
|
```
|
||||||
|
|
||||||
|
Specifying the same URL for both the source and the target of a `--sync` operation should always result in nothing being copied... they are the same drive with the same state.
|
||||||
|
|
||||||
|
We can copy files between two virtual drives with a single command:
|
||||||
|
|
||||||
|
```
|
||||||
|
$ zrok copy --sync https://glmv049c62p7.share.zrok.io zrok://hsml272j3xzf
|
||||||
|
[ 1.396] INFO zrok/drives/sync.OneWay: => /_attic/
|
||||||
|
[ 2.083] INFO zrok/drives/sync.OneWay: => /_attic/overview.md
|
||||||
|
[ 2.704] INFO zrok/drives/sync.OneWay: => /_attic/sharing/
|
||||||
|
...
|
||||||
|
[ 118.240] INFO zrok/drives/sync.OneWay: => /images/zrok_web_console_empty.png
|
||||||
|
[ 118.920] INFO zrok/drives/sync.OneWay: => /images/zrok_enable_modal.png
|
||||||
|
[ 119.589] INFO zrok/drives/sync.OneWay: => /images/zrok_cover.png
|
||||||
|
[ 120.214] INFO zrok/drives/sync.OneWay: => /getting-started.mdx
|
||||||
|
copy complete!
|
||||||
|
$ zrok copy --sync https://glmv049c62p7.share.zrok.io zrok://hsml272j3xzf
|
||||||
|
copy complete!
|
||||||
|
```
|
||||||
|
|
||||||
|
## Copying from Drives to the Local Filesystem
|
||||||
|
|
||||||
|
In the current version of the drives CLI, `zrok copy` always assumes the destination is a directory. There is currently no way to do:
|
||||||
|
|
||||||
|
```
|
||||||
|
$ zrok copy somefile someotherfile
|
||||||
|
```
|
||||||
|
|
||||||
|
What you'll end up with on the local filesystem is:
|
||||||
|
|
||||||
|
```
|
||||||
|
somefile
|
||||||
|
someotherfile/somefile
|
||||||
|
```
|
||||||
|
|
||||||
|
It's in the backlog to support file destinations in a future release of `zrok`. So, when using `zrok copy`, always take note of the destination.
|
||||||
|
|
||||||
|
`zrok copy` supports a default destination of `file://.`, so you can do single parameter `zrok copy` commands like this:
|
||||||
|
|
||||||
|
```
|
||||||
|
$ zrok ls https://azc47r3cwjds.share.zrok.io
|
||||||
|
┌──────┬─────────┬─────────┬───────────────────────────────┐
|
||||||
|
│ TYPE │ NAME │ SIZE │ MODIFIED │
|
||||||
|
├──────┼─────────┼─────────┼───────────────────────────────┤
|
||||||
|
│ │ LICENSE │ 11.3 kB │ 2023-07-21 13:17:56 -0400 EDT │
|
||||||
|
└──────┴─────────┴─────────┴───────────────────────────────┘
|
||||||
|
$ zrok copy https://azc47r3cwjds.share.zrok.io/LICENSE
|
||||||
|
[ 0.260] INFO zrok/drives/sync.OneWay: => /LICENSE
|
||||||
|
copy complete!
|
||||||
|
$ ls -l
|
||||||
|
total 12
|
||||||
|
-rw-rw-r-- 1 michael michael 11346 Jan 19 13:29 LICENSE
|
||||||
|
```
|
||||||
|
|
||||||
|
You can also specify a local folder as the destination for your copy:
|
||||||
|
|
||||||
|
```
|
||||||
|
$ zrok copy https://azc47r3cwjds.share.zrok.io/LICENSE /tmp/inbox
|
||||||
|
[ 0.221] INFO zrok/drives/sync.OneWay: => /LICENSE
|
||||||
|
copy complete!
|
||||||
|
$ l /tmp/inbox
|
||||||
|
total 12
|
||||||
|
-rw-rw-r-- 1 michael michael 11346 Jan 19 13:30 LICENSE
|
||||||
|
```
|
||||||
|
|
||||||
|
## Unique Names and Reserved Shares
|
||||||
|
|
||||||
|
Private reserved shares with unque names can be particularly useful with the drives CLI:
|
||||||
|
|
||||||
|
```
|
||||||
|
$ zrok reserve private -b drive --unique-name mydrive /tmp/junk
|
||||||
|
[ 0.315] INFO main.(*reserveCommand).run: your reserved share token is 'mydrive'
|
||||||
|
$ zrok share reserved --headless mydrive
|
||||||
|
[ 0.289] INFO main.(*shareReservedCommand).run: sharing target: '/tmp/junk'
|
||||||
|
[ 0.289] INFO main.(*shareReservedCommand).run: using existing backend proxy endpoint: /tmp/junk
|
||||||
|
[ 0.767] INFO sdk-golang/ziti.(*listenerManager).createSessionWithBackoff: {session token=[d519a436-9fb5-4207-afd5-7cbc28fb779a]} new service session
|
||||||
|
[ 0.927] INFO main.(*shareReservedCommand).run: use this command to access your zrok share: 'zrok access private mydrive'
|
||||||
|
```
|
||||||
|
|
||||||
|
This makes working with `zrok://` URLs particularly convenient:
|
||||||
|
|
||||||
|
```
|
||||||
|
$ zrok ls zrok://mydrive
|
||||||
|
┌──────┬─────────┬─────────┬───────────────────────────────┐
|
||||||
|
│ TYPE │ NAME │ SIZE │ MODIFIED │
|
||||||
|
├──────┼─────────┼─────────┼───────────────────────────────┤
|
||||||
|
│ │ LICENSE │ 11.3 kB │ 2023-07-21 13:17:56 -0400 EDT │
|
||||||
|
└──────┴─────────┴─────────┴───────────────────────────────┘
|
||||||
|
```
|
||||||
|
|
||||||
|
## Future Enhancements
|
||||||
|
|
||||||
|
Coming in a future release of `zrok` drives are features like:
|
||||||
|
|
||||||
|
* two-way synchronization between multiple hosts... allowing for shared "dropbox-like" usage scenarios between multiple environments
|
||||||
|
* better ergonomics for single-file destinations
|
@ -29,7 +29,7 @@ import styles from '@site/src/css/download-card.module.css';
|
|||||||
|
|
||||||
```text
|
```text
|
||||||
$source = Join-Path -Path $env:TEMP -ChildPath "zrok\zrok.exe"
|
$source = Join-Path -Path $env:TEMP -ChildPath "zrok\zrok.exe"
|
||||||
$destination = Join-Path -Path $env:HOME -ChildPath "bin\zrok.exe"
|
$destination = Join-Path -Path $env:USERPROFILE -ChildPath "bin\zrok.exe"
|
||||||
New-Item -Path $destination -ItemType Directory -ErrorAction SilentlyContinue
|
New-Item -Path $destination -ItemType Directory -ErrorAction SilentlyContinue
|
||||||
Copy-Item -Path $source -Destination $destination
|
Copy-Item -Path $source -Destination $destination
|
||||||
$env:path += ";"+$destination
|
$env:path += ";"+$destination
|
||||||
|
305
drives/davClient/client.go
Normal file
305
drives/davClient/client.go
Normal file
@ -0,0 +1,305 @@
|
|||||||
|
package davClient
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"fmt"
|
||||||
|
"github.com/openziti/zrok/drives/davClient/internal"
|
||||||
|
"io"
|
||||||
|
"net/http"
|
||||||
|
"time"
|
||||||
|
)
|
||||||
|
|
||||||
|
// HTTPClient performs HTTP requests. It's implemented by *http.Client.
|
||||||
|
type HTTPClient interface {
|
||||||
|
Do(req *http.Request) (*http.Response, error)
|
||||||
|
}
|
||||||
|
|
||||||
|
type basicAuthHTTPClient struct {
|
||||||
|
c HTTPClient
|
||||||
|
username, password string
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *basicAuthHTTPClient) Do(req *http.Request) (*http.Response, error) {
|
||||||
|
req.SetBasicAuth(c.username, c.password)
|
||||||
|
return c.c.Do(req)
|
||||||
|
}
|
||||||
|
|
||||||
|
// HTTPClientWithBasicAuth returns an HTTP client that adds basic
|
||||||
|
// authentication to all outgoing requests. If c is nil, http.DefaultClient is
|
||||||
|
// used.
|
||||||
|
func HTTPClientWithBasicAuth(c HTTPClient, username, password string) HTTPClient {
|
||||||
|
if c == nil {
|
||||||
|
c = http.DefaultClient
|
||||||
|
}
|
||||||
|
return &basicAuthHTTPClient{c, username, password}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Client provides access to a remote WebDAV filesystem.
|
||||||
|
type Client struct {
|
||||||
|
ic *internal.Client
|
||||||
|
}
|
||||||
|
|
||||||
|
func NewClient(c HTTPClient, endpoint string) (*Client, error) {
|
||||||
|
ic, err := internal.NewClient(c, endpoint)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return &Client{ic}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *Client) FindCurrentUserPrincipal(ctx context.Context) (string, error) {
|
||||||
|
propfind := internal.NewPropNamePropFind(internal.CurrentUserPrincipalName)
|
||||||
|
|
||||||
|
// TODO: consider retrying on the root URI "/" if this fails, as suggested
|
||||||
|
// by the RFC?
|
||||||
|
resp, err := c.ic.PropFindFlat(ctx, "", propfind)
|
||||||
|
if err != nil {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
|
||||||
|
var prop internal.CurrentUserPrincipal
|
||||||
|
if err := resp.DecodeProp(&prop); err != nil {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
if prop.Unauthenticated != nil {
|
||||||
|
return "", fmt.Errorf("webdav: unauthenticated")
|
||||||
|
}
|
||||||
|
|
||||||
|
return prop.Href.Path, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
var fileInfoPropFind = internal.NewPropNamePropFind(
|
||||||
|
internal.ResourceTypeName,
|
||||||
|
internal.GetContentLengthName,
|
||||||
|
internal.GetLastModifiedName,
|
||||||
|
internal.GetContentTypeName,
|
||||||
|
internal.GetETagName,
|
||||||
|
)
|
||||||
|
|
||||||
|
func fileInfoFromResponse(resp *internal.Response) (*FileInfo, error) {
|
||||||
|
path, err := resp.Path()
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
fi := &FileInfo{Path: path}
|
||||||
|
|
||||||
|
var resType internal.ResourceType
|
||||||
|
if err := resp.DecodeProp(&resType); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
if resType.Is(internal.CollectionName) {
|
||||||
|
fi.IsDir = true
|
||||||
|
} else {
|
||||||
|
var getLen internal.GetContentLength
|
||||||
|
if err := resp.DecodeProp(&getLen); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
var getType internal.GetContentType
|
||||||
|
if err := resp.DecodeProp(&getType); err != nil && !internal.IsNotFound(err) {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
var getETag internal.GetETag
|
||||||
|
if err := resp.DecodeProp(&getETag); err != nil && !internal.IsNotFound(err) {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
fi.Size = getLen.Length
|
||||||
|
fi.MIMEType = getType.Type
|
||||||
|
fi.ETag = string(getETag.ETag)
|
||||||
|
}
|
||||||
|
|
||||||
|
var getMod internal.GetLastModified
|
||||||
|
if err := resp.DecodeProp(&getMod); err != nil && !internal.IsNotFound(err) {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
fi.ModTime = time.Time(getMod.LastModified)
|
||||||
|
|
||||||
|
return fi, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *Client) Stat(ctx context.Context, name string) (*FileInfo, error) {
|
||||||
|
resp, err := c.ic.PropFindFlat(ctx, name, fileInfoPropFind)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return fileInfoFromResponse(resp)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *Client) Open(ctx context.Context, name string) (io.ReadCloser, error) {
|
||||||
|
req, err := c.ic.NewRequest(http.MethodGet, name, nil)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
resp, err := c.ic.Do(req.WithContext(ctx))
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
return resp.Body, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *Client) Readdir(ctx context.Context, name string, recursive bool) ([]FileInfo, error) {
|
||||||
|
depth := internal.DepthOne
|
||||||
|
if recursive {
|
||||||
|
depth = internal.DepthInfinity
|
||||||
|
}
|
||||||
|
|
||||||
|
ms, err := c.ic.PropFind(ctx, name, depth, fileInfoPropFind)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
l := make([]FileInfo, 0, len(ms.Responses))
|
||||||
|
for _, resp := range ms.Responses {
|
||||||
|
fi, err := fileInfoFromResponse(&resp)
|
||||||
|
if err != nil {
|
||||||
|
return l, err
|
||||||
|
}
|
||||||
|
l = append(l, *fi)
|
||||||
|
}
|
||||||
|
|
||||||
|
return l, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
type fileWriter struct {
|
||||||
|
pw *io.PipeWriter
|
||||||
|
done <-chan error
|
||||||
|
}
|
||||||
|
|
||||||
|
func (fw *fileWriter) Write(b []byte) (int, error) {
|
||||||
|
return fw.pw.Write(b)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (fw *fileWriter) Close() error {
|
||||||
|
if err := fw.pw.Close(); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
return <-fw.done
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *Client) Create(ctx context.Context, name string) (io.WriteCloser, error) {
|
||||||
|
pr, pw := io.Pipe()
|
||||||
|
|
||||||
|
req, err := c.ic.NewRequest(http.MethodPut, name, pr)
|
||||||
|
if err != nil {
|
||||||
|
pw.Close()
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
done := make(chan error, 1)
|
||||||
|
go func() {
|
||||||
|
resp, err := c.ic.Do(req.WithContext(ctx))
|
||||||
|
if err != nil {
|
||||||
|
done <- err
|
||||||
|
return
|
||||||
|
}
|
||||||
|
resp.Body.Close()
|
||||||
|
done <- nil
|
||||||
|
}()
|
||||||
|
|
||||||
|
return &fileWriter{pw, done}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *Client) CreateWithModTime(ctx context.Context, name string, modTime time.Time) (io.WriteCloser, error) {
|
||||||
|
pr, pw := io.Pipe()
|
||||||
|
|
||||||
|
req, err := c.ic.NewRequest(http.MethodPut, name, pr)
|
||||||
|
if err != nil {
|
||||||
|
pw.Close()
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
req.Header.Set("Zrok-Modtime", fmt.Sprintf("%d", modTime.Unix()))
|
||||||
|
|
||||||
|
done := make(chan error, 1)
|
||||||
|
go func() {
|
||||||
|
resp, err := c.ic.Do(req.WithContext(ctx))
|
||||||
|
if err != nil {
|
||||||
|
done <- err
|
||||||
|
return
|
||||||
|
}
|
||||||
|
resp.Body.Close()
|
||||||
|
done <- nil
|
||||||
|
}()
|
||||||
|
|
||||||
|
return &fileWriter{pw, done}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *Client) Touch(ctx context.Context, path string, mtime time.Time) error {
|
||||||
|
status, err := c.ic.Touch(ctx, path, mtime)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
for _, resp := range status.Responses {
|
||||||
|
if resp.Err() != nil {
|
||||||
|
return resp.Err()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *Client) RemoveAll(ctx context.Context, name string) error {
|
||||||
|
req, err := c.ic.NewRequest(http.MethodDelete, name, nil)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
resp, err := c.ic.Do(req.WithContext(ctx))
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
resp.Body.Close()
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *Client) Mkdir(ctx context.Context, name string) error {
|
||||||
|
req, err := c.ic.NewRequest("MKCOL", name, nil)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
resp, err := c.ic.Do(req.WithContext(ctx))
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
resp.Body.Close()
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *Client) CopyAll(ctx context.Context, name, dest string, overwrite bool) error {
|
||||||
|
req, err := c.ic.NewRequest("COPY", name, nil)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
req.Header.Set("Destination", c.ic.ResolveHref(dest).String())
|
||||||
|
req.Header.Set("Overwrite", internal.FormatOverwrite(overwrite))
|
||||||
|
|
||||||
|
resp, err := c.ic.Do(req.WithContext(ctx))
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
resp.Body.Close()
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *Client) MoveAll(ctx context.Context, name, dest string, overwrite bool) error {
|
||||||
|
req, err := c.ic.NewRequest("MOVE", name, nil)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
req.Header.Set("Destination", c.ic.ResolveHref(dest).String())
|
||||||
|
req.Header.Set("Overwrite", internal.FormatOverwrite(overwrite))
|
||||||
|
|
||||||
|
resp, err := c.ic.Do(req.WithContext(ctx))
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
resp.Body.Close()
|
||||||
|
return nil
|
||||||
|
}
|
286
drives/davClient/internal/client.go
Normal file
286
drives/davClient/internal/client.go
Normal file
@ -0,0 +1,286 @@
|
|||||||
|
package internal
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"context"
|
||||||
|
"encoding/xml"
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
"mime"
|
||||||
|
"net"
|
||||||
|
"net/http"
|
||||||
|
"net/url"
|
||||||
|
"path"
|
||||||
|
"strings"
|
||||||
|
"time"
|
||||||
|
"unicode"
|
||||||
|
)
|
||||||
|
|
||||||
|
// DiscoverContextURL performs a DNS-based CardDAV/CalDAV service discovery as
|
||||||
|
// described in RFC 6352 section 11. It returns the URL to the CardDAV server.
|
||||||
|
func DiscoverContextURL(ctx context.Context, service, domain string) (string, error) {
|
||||||
|
var resolver net.Resolver
|
||||||
|
|
||||||
|
// Only lookup TLS records, plaintext connections are insecure
|
||||||
|
_, addrs, err := resolver.LookupSRV(ctx, service+"s", "tcp", domain)
|
||||||
|
if dnsErr, ok := err.(*net.DNSError); ok {
|
||||||
|
if dnsErr.IsTemporary {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
} else if err != nil {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(addrs) == 0 {
|
||||||
|
return "", fmt.Errorf("webdav: domain doesn't have an SRV record")
|
||||||
|
}
|
||||||
|
addr := addrs[0]
|
||||||
|
|
||||||
|
target := strings.TrimSuffix(addr.Target, ".")
|
||||||
|
if target == "" {
|
||||||
|
return "", fmt.Errorf("webdav: empty target in SRV record")
|
||||||
|
}
|
||||||
|
|
||||||
|
// TODO: perform a TXT lookup, check for a "path" key in the response
|
||||||
|
u := url.URL{Scheme: "https"}
|
||||||
|
if addr.Port == 443 {
|
||||||
|
u.Host = target
|
||||||
|
} else {
|
||||||
|
u.Host = fmt.Sprintf("%v:%v", target, addr.Port)
|
||||||
|
}
|
||||||
|
u.Path = "/.well-known/" + service
|
||||||
|
return u.String(), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// HTTPClient performs HTTP requests. It's implemented by *http.Client.
|
||||||
|
type HTTPClient interface {
|
||||||
|
Do(req *http.Request) (*http.Response, error)
|
||||||
|
}
|
||||||
|
|
||||||
|
type Client struct {
|
||||||
|
http HTTPClient
|
||||||
|
endpoint *url.URL
|
||||||
|
}
|
||||||
|
|
||||||
|
func NewClient(c HTTPClient, endpoint string) (*Client, error) {
|
||||||
|
if c == nil {
|
||||||
|
c = http.DefaultClient
|
||||||
|
}
|
||||||
|
|
||||||
|
u, err := url.Parse(endpoint)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
if u.Path == "" {
|
||||||
|
// This is important to avoid issues with path.Join
|
||||||
|
u.Path = "/"
|
||||||
|
}
|
||||||
|
return &Client{http: c, endpoint: u}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *Client) ResolveHref(p string) *url.URL {
|
||||||
|
if !strings.HasPrefix(p, "/") {
|
||||||
|
p = path.Join(c.endpoint.Path, p)
|
||||||
|
}
|
||||||
|
return &url.URL{
|
||||||
|
Scheme: c.endpoint.Scheme,
|
||||||
|
User: c.endpoint.User,
|
||||||
|
Host: c.endpoint.Host,
|
||||||
|
Path: p,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *Client) NewRequest(method string, path string, body io.Reader) (*http.Request, error) {
|
||||||
|
return http.NewRequest(method, c.ResolveHref(path).String(), body)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *Client) NewXMLRequest(method string, path string, v interface{}) (*http.Request, error) {
|
||||||
|
var buf bytes.Buffer
|
||||||
|
buf.WriteString(xml.Header)
|
||||||
|
if err := xml.NewEncoder(&buf).Encode(v); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
req, err := c.NewRequest(method, path, &buf)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
req.Header.Add("Content-Type", "text/xml; charset=\"utf-8\"")
|
||||||
|
|
||||||
|
return req, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *Client) Do(req *http.Request) (*http.Response, error) {
|
||||||
|
resp, err := c.http.Do(req)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
if resp.StatusCode/100 != 2 {
|
||||||
|
defer resp.Body.Close()
|
||||||
|
|
||||||
|
contentType := resp.Header.Get("Content-Type")
|
||||||
|
if contentType == "" {
|
||||||
|
contentType = "text/plain"
|
||||||
|
}
|
||||||
|
|
||||||
|
var wrappedErr error
|
||||||
|
t, _, _ := mime.ParseMediaType(contentType)
|
||||||
|
if t == "application/xml" || t == "text/xml" {
|
||||||
|
var davErr Error
|
||||||
|
if err := xml.NewDecoder(resp.Body).Decode(&davErr); err != nil {
|
||||||
|
wrappedErr = err
|
||||||
|
} else {
|
||||||
|
wrappedErr = &davErr
|
||||||
|
}
|
||||||
|
} else if strings.HasPrefix(t, "text/") {
|
||||||
|
lr := io.LimitedReader{R: resp.Body, N: 1024}
|
||||||
|
var buf bytes.Buffer
|
||||||
|
io.Copy(&buf, &lr)
|
||||||
|
resp.Body.Close()
|
||||||
|
if s := strings.TrimSpace(buf.String()); s != "" {
|
||||||
|
if lr.N == 0 {
|
||||||
|
s += " […]"
|
||||||
|
}
|
||||||
|
wrappedErr = fmt.Errorf("%v", s)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil, &HTTPError{Code: resp.StatusCode, Err: wrappedErr}
|
||||||
|
}
|
||||||
|
return resp, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *Client) DoMultiStatus(req *http.Request) (*MultiStatus, error) {
|
||||||
|
resp, err := c.Do(req)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
defer resp.Body.Close()
|
||||||
|
|
||||||
|
if resp.StatusCode != http.StatusMultiStatus {
|
||||||
|
return nil, fmt.Errorf("HTTP multi-status request failed: %v", resp.Status)
|
||||||
|
}
|
||||||
|
|
||||||
|
// TODO: the response can be quite large, support streaming Response elements
|
||||||
|
var ms MultiStatus
|
||||||
|
if err := xml.NewDecoder(resp.Body).Decode(&ms); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
return &ms, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *Client) PropFind(ctx context.Context, path string, depth Depth, propfind *PropFind) (*MultiStatus, error) {
|
||||||
|
req, err := c.NewXMLRequest("PROPFIND", path, propfind)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
req.Header.Add("Depth", depth.String())
|
||||||
|
|
||||||
|
return c.DoMultiStatus(req.WithContext(ctx))
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *Client) Touch(ctx context.Context, path string, mtime time.Time) (*MultiStatus, error) {
|
||||||
|
tstr := fmt.Sprintf("%d", mtime.Unix())
|
||||||
|
var v []RawXMLValue
|
||||||
|
for _, c := range tstr {
|
||||||
|
v = append(v, RawXMLValue{tok: xml.CharData{byte(c)}})
|
||||||
|
}
|
||||||
|
pup := &PropertyUpdate{
|
||||||
|
Set: []Set{
|
||||||
|
{
|
||||||
|
Prop: Prop{
|
||||||
|
Raw: []RawXMLValue{
|
||||||
|
*NewRawXMLElement(xml.Name{Space: "zrok:", Local: "lastmodified"}, nil, v),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
status, err := c.PropUpdate(ctx, path, pup)
|
||||||
|
return status, err
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *Client) PropUpdate(ctx context.Context, path string, propupd *PropertyUpdate) (*MultiStatus, error) {
|
||||||
|
req, err := c.NewXMLRequest("PROPPATCH", path, propupd)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return c.DoMultiStatus(req.WithContext(ctx))
|
||||||
|
}
|
||||||
|
|
||||||
|
// PropfindFlat performs a PROPFIND request with a zero depth.
|
||||||
|
func (c *Client) PropFindFlat(ctx context.Context, path string, propfind *PropFind) (*Response, error) {
|
||||||
|
ms, err := c.PropFind(ctx, path, DepthZero, propfind)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// If the client followed a redirect, the Href might be different from the request path
|
||||||
|
if len(ms.Responses) != 1 {
|
||||||
|
return nil, fmt.Errorf("PROPFIND with Depth: 0 returned %d responses", len(ms.Responses))
|
||||||
|
}
|
||||||
|
return &ms.Responses[0], nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func parseCommaSeparatedSet(values []string, upper bool) map[string]bool {
|
||||||
|
m := make(map[string]bool)
|
||||||
|
for _, v := range values {
|
||||||
|
fields := strings.FieldsFunc(v, func(r rune) bool {
|
||||||
|
return unicode.IsSpace(r) || r == ','
|
||||||
|
})
|
||||||
|
for _, f := range fields {
|
||||||
|
if upper {
|
||||||
|
f = strings.ToUpper(f)
|
||||||
|
} else {
|
||||||
|
f = strings.ToLower(f)
|
||||||
|
}
|
||||||
|
m[f] = true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return m
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *Client) Options(ctx context.Context, path string) (classes map[string]bool, methods map[string]bool, err error) {
|
||||||
|
req, err := c.NewRequest(http.MethodOptions, path, nil)
|
||||||
|
if err != nil {
|
||||||
|
return nil, nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
resp, err := c.Do(req.WithContext(ctx))
|
||||||
|
if err != nil {
|
||||||
|
return nil, nil, err
|
||||||
|
}
|
||||||
|
resp.Body.Close()
|
||||||
|
|
||||||
|
classes = parseCommaSeparatedSet(resp.Header["Dav"], false)
|
||||||
|
if !classes["1"] {
|
||||||
|
return nil, nil, fmt.Errorf("webdav: server doesn't support DAV class 1")
|
||||||
|
}
|
||||||
|
|
||||||
|
methods = parseCommaSeparatedSet(resp.Header["Allow"], true)
|
||||||
|
return classes, methods, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// SyncCollection perform a `sync-collection` REPORT operation on a resource
|
||||||
|
func (c *Client) SyncCollection(ctx context.Context, path, syncToken string, level Depth, limit *Limit, prop *Prop) (*MultiStatus, error) {
|
||||||
|
q := SyncCollectionQuery{
|
||||||
|
SyncToken: syncToken,
|
||||||
|
SyncLevel: level.String(),
|
||||||
|
Limit: limit,
|
||||||
|
Prop: prop,
|
||||||
|
}
|
||||||
|
|
||||||
|
req, err := c.NewXMLRequest("REPORT", path, &q)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
ms, err := c.DoMultiStatus(req.WithContext(ctx))
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
return ms, nil
|
||||||
|
}
|
452
drives/davClient/internal/elements.go
Normal file
452
drives/davClient/internal/elements.go
Normal file
@ -0,0 +1,452 @@
|
|||||||
|
package internal
|
||||||
|
|
||||||
|
import (
|
||||||
|
"encoding/xml"
|
||||||
|
"errors"
|
||||||
|
"fmt"
|
||||||
|
"net/http"
|
||||||
|
"net/url"
|
||||||
|
"strconv"
|
||||||
|
"strings"
|
||||||
|
"time"
|
||||||
|
)
|
||||||
|
|
||||||
|
const Namespace = "DAV:"
|
||||||
|
|
||||||
|
var (
|
||||||
|
ResourceTypeName = xml.Name{Namespace, "resourcetype"}
|
||||||
|
DisplayNameName = xml.Name{Namespace, "displayname"}
|
||||||
|
GetContentLengthName = xml.Name{Namespace, "getcontentlength"}
|
||||||
|
GetContentTypeName = xml.Name{Namespace, "getcontenttype"}
|
||||||
|
GetLastModifiedName = xml.Name{Namespace, "getlastmodified"}
|
||||||
|
GetETagName = xml.Name{Namespace, "getetag"}
|
||||||
|
|
||||||
|
CurrentUserPrincipalName = xml.Name{Namespace, "current-user-principal"}
|
||||||
|
)
|
||||||
|
|
||||||
|
type Status struct {
|
||||||
|
Code int
|
||||||
|
Text string
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *Status) MarshalText() ([]byte, error) {
|
||||||
|
text := s.Text
|
||||||
|
if text == "" {
|
||||||
|
text = http.StatusText(s.Code)
|
||||||
|
}
|
||||||
|
return []byte(fmt.Sprintf("HTTP/1.1 %v %v", s.Code, text)), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *Status) UnmarshalText(b []byte) error {
|
||||||
|
if len(b) == 0 {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
parts := strings.SplitN(string(b), " ", 3)
|
||||||
|
if len(parts) != 3 {
|
||||||
|
return fmt.Errorf("webdav: invalid HTTP status %q: expected 3 fields", s)
|
||||||
|
}
|
||||||
|
code, err := strconv.Atoi(parts[1])
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("webdav: invalid HTTP status %q: failed to parse code: %v", s, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
s.Code = code
|
||||||
|
s.Text = parts[2]
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *Status) Err() error {
|
||||||
|
if s == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// TODO: handle 2xx, 3xx
|
||||||
|
if s.Code != http.StatusOK {
|
||||||
|
return &HTTPError{Code: s.Code}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
type Href url.URL
|
||||||
|
|
||||||
|
func (h *Href) String() string {
|
||||||
|
u := (*url.URL)(h)
|
||||||
|
return u.String()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (h *Href) MarshalText() ([]byte, error) {
|
||||||
|
return []byte(h.String()), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (h *Href) UnmarshalText(b []byte) error {
|
||||||
|
u, err := url.Parse(string(b))
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
*h = Href(*u)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// https://tools.ietf.org/html/rfc4918#section-14.16
|
||||||
|
type MultiStatus struct {
|
||||||
|
XMLName xml.Name `xml:"DAV: multistatus"`
|
||||||
|
Responses []Response `xml:"response"`
|
||||||
|
ResponseDescription string `xml:"responsedescription,omitempty"`
|
||||||
|
SyncToken string `xml:"sync-token,omitempty"`
|
||||||
|
}
|
||||||
|
|
||||||
|
func NewMultiStatus(resps ...Response) *MultiStatus {
|
||||||
|
return &MultiStatus{Responses: resps}
|
||||||
|
}
|
||||||
|
|
||||||
|
// https://tools.ietf.org/html/rfc4918#section-14.24
|
||||||
|
type Response struct {
|
||||||
|
XMLName xml.Name `xml:"DAV: response"`
|
||||||
|
Hrefs []Href `xml:"href"`
|
||||||
|
PropStats []PropStat `xml:"propstat,omitempty"`
|
||||||
|
ResponseDescription string `xml:"responsedescription,omitempty"`
|
||||||
|
Status *Status `xml:"status,omitempty"`
|
||||||
|
Error *Error `xml:"error,omitempty"`
|
||||||
|
Location *Location `xml:"location,omitempty"`
|
||||||
|
}
|
||||||
|
|
||||||
|
func NewOKResponse(path string) *Response {
|
||||||
|
href := Href{Path: path}
|
||||||
|
return &Response{
|
||||||
|
Hrefs: []Href{href},
|
||||||
|
Status: &Status{Code: http.StatusOK},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func NewErrorResponse(path string, err error) *Response {
|
||||||
|
code := http.StatusInternalServerError
|
||||||
|
var httpErr *HTTPError
|
||||||
|
if errors.As(err, &httpErr) {
|
||||||
|
code = httpErr.Code
|
||||||
|
}
|
||||||
|
|
||||||
|
var errElt *Error
|
||||||
|
errors.As(err, &errElt)
|
||||||
|
|
||||||
|
href := Href{Path: path}
|
||||||
|
return &Response{
|
||||||
|
Hrefs: []Href{href},
|
||||||
|
Status: &Status{Code: code},
|
||||||
|
ResponseDescription: err.Error(),
|
||||||
|
Error: errElt,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (resp *Response) Err() error {
|
||||||
|
if resp.Status == nil || resp.Status.Code/100 == 2 {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
var err error
|
||||||
|
if resp.Error != nil {
|
||||||
|
err = resp.Error
|
||||||
|
}
|
||||||
|
if resp.ResponseDescription != "" {
|
||||||
|
if err != nil {
|
||||||
|
err = fmt.Errorf("%v (%w)", resp.ResponseDescription, err)
|
||||||
|
} else {
|
||||||
|
err = fmt.Errorf("%v", resp.ResponseDescription)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return &HTTPError{
|
||||||
|
Code: resp.Status.Code,
|
||||||
|
Err: err,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (resp *Response) Path() (string, error) {
|
||||||
|
err := resp.Err()
|
||||||
|
var path string
|
||||||
|
if len(resp.Hrefs) == 1 {
|
||||||
|
path = resp.Hrefs[0].Path
|
||||||
|
} else if err == nil {
|
||||||
|
err = fmt.Errorf("webdav: malformed response: expected exactly one href element, got %v", len(resp.Hrefs))
|
||||||
|
}
|
||||||
|
return path, err
|
||||||
|
}
|
||||||
|
|
||||||
|
func (resp *Response) DecodeProp(values ...interface{}) error {
|
||||||
|
for _, v := range values {
|
||||||
|
// TODO wrap errors with more context (XML name)
|
||||||
|
name, err := valueXMLName(v)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if err := resp.Err(); err != nil {
|
||||||
|
return newPropError(name, err)
|
||||||
|
}
|
||||||
|
for _, propstat := range resp.PropStats {
|
||||||
|
raw := propstat.Prop.Get(name)
|
||||||
|
if raw == nil {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
if err := propstat.Status.Err(); err != nil {
|
||||||
|
return newPropError(name, err)
|
||||||
|
}
|
||||||
|
if err := raw.Decode(v); err != nil {
|
||||||
|
return newPropError(name, err)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
return newPropError(name, &HTTPError{
|
||||||
|
Code: http.StatusNotFound,
|
||||||
|
Err: fmt.Errorf("missing property"),
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func newPropError(name xml.Name, err error) error {
|
||||||
|
return fmt.Errorf("property <%v %v>: %w", name.Space, name.Local, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (resp *Response) EncodeProp(code int, v interface{}) error {
|
||||||
|
raw, err := EncodeRawXMLElement(v)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
for i := range resp.PropStats {
|
||||||
|
propstat := &resp.PropStats[i]
|
||||||
|
if propstat.Status.Code == code {
|
||||||
|
propstat.Prop.Raw = append(propstat.Prop.Raw, *raw)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
resp.PropStats = append(resp.PropStats, PropStat{
|
||||||
|
Status: Status{Code: code},
|
||||||
|
Prop: Prop{Raw: []RawXMLValue{*raw}},
|
||||||
|
})
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// https://tools.ietf.org/html/rfc4918#section-14.9
|
||||||
|
type Location struct {
|
||||||
|
XMLName xml.Name `xml:"DAV: location"`
|
||||||
|
Href Href `xml:"href"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// https://tools.ietf.org/html/rfc4918#section-14.22
|
||||||
|
type PropStat struct {
|
||||||
|
XMLName xml.Name `xml:"DAV: propstat"`
|
||||||
|
Prop Prop `xml:"prop"`
|
||||||
|
Status Status `xml:"status"`
|
||||||
|
ResponseDescription string `xml:"responsedescription,omitempty"`
|
||||||
|
Error *Error `xml:"error,omitempty"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// https://tools.ietf.org/html/rfc4918#section-14.18
|
||||||
|
type Prop struct {
|
||||||
|
XMLName xml.Name `xml:"DAV: prop"`
|
||||||
|
Raw []RawXMLValue `xml:",any"`
|
||||||
|
}
|
||||||
|
|
||||||
|
func EncodeProp(values ...interface{}) (*Prop, error) {
|
||||||
|
l := make([]RawXMLValue, len(values))
|
||||||
|
for i, v := range values {
|
||||||
|
raw, err := EncodeRawXMLElement(v)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
l[i] = *raw
|
||||||
|
}
|
||||||
|
return &Prop{Raw: l}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p *Prop) Get(name xml.Name) *RawXMLValue {
|
||||||
|
for i := range p.Raw {
|
||||||
|
raw := &p.Raw[i]
|
||||||
|
if n, ok := raw.XMLName(); ok && name == n {
|
||||||
|
return raw
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p *Prop) Decode(v interface{}) error {
|
||||||
|
name, err := valueXMLName(v)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
raw := p.Get(name)
|
||||||
|
if raw == nil {
|
||||||
|
return HTTPErrorf(http.StatusNotFound, "missing property %s", name)
|
||||||
|
}
|
||||||
|
|
||||||
|
return raw.Decode(v)
|
||||||
|
}
|
||||||
|
|
||||||
|
// https://tools.ietf.org/html/rfc4918#section-14.20
|
||||||
|
type PropFind struct {
|
||||||
|
XMLName xml.Name `xml:"DAV: propfind"`
|
||||||
|
Prop *Prop `xml:"prop,omitempty"`
|
||||||
|
AllProp *struct{} `xml:"allprop,omitempty"`
|
||||||
|
Include *Include `xml:"include,omitempty"`
|
||||||
|
PropName *struct{} `xml:"propname,omitempty"`
|
||||||
|
}
|
||||||
|
|
||||||
|
func xmlNamesToRaw(names []xml.Name) []RawXMLValue {
|
||||||
|
l := make([]RawXMLValue, len(names))
|
||||||
|
for i, name := range names {
|
||||||
|
l[i] = *NewRawXMLElement(name, nil, nil)
|
||||||
|
}
|
||||||
|
return l
|
||||||
|
}
|
||||||
|
|
||||||
|
func NewPropNamePropFind(names ...xml.Name) *PropFind {
|
||||||
|
return &PropFind{Prop: &Prop{Raw: xmlNamesToRaw(names)}}
|
||||||
|
}
|
||||||
|
|
||||||
|
// https://tools.ietf.org/html/rfc4918#section-14.8
|
||||||
|
type Include struct {
|
||||||
|
XMLName xml.Name `xml:"DAV: include"`
|
||||||
|
Raw []RawXMLValue `xml:",any"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// https://tools.ietf.org/html/rfc4918#section-15.9
|
||||||
|
type ResourceType struct {
|
||||||
|
XMLName xml.Name `xml:"DAV: resourcetype"`
|
||||||
|
Raw []RawXMLValue `xml:",any"`
|
||||||
|
}
|
||||||
|
|
||||||
|
func NewResourceType(names ...xml.Name) *ResourceType {
|
||||||
|
return &ResourceType{Raw: xmlNamesToRaw(names)}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (t *ResourceType) Is(name xml.Name) bool {
|
||||||
|
for _, raw := range t.Raw {
|
||||||
|
if n, ok := raw.XMLName(); ok && name == n {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
var CollectionName = xml.Name{Namespace, "collection"}
|
||||||
|
|
||||||
|
// https://tools.ietf.org/html/rfc4918#section-15.4
|
||||||
|
type GetContentLength struct {
|
||||||
|
XMLName xml.Name `xml:"DAV: getcontentlength"`
|
||||||
|
Length int64 `xml:",chardata"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// https://tools.ietf.org/html/rfc4918#section-15.5
|
||||||
|
type GetContentType struct {
|
||||||
|
XMLName xml.Name `xml:"DAV: getcontenttype"`
|
||||||
|
Type string `xml:",chardata"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type Time time.Time
|
||||||
|
|
||||||
|
func (t *Time) UnmarshalText(b []byte) error {
|
||||||
|
tt, err := http.ParseTime(string(b))
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
*t = Time(tt)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (t *Time) MarshalText() ([]byte, error) {
|
||||||
|
s := time.Time(*t).UTC().Format(http.TimeFormat)
|
||||||
|
return []byte(s), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// https://tools.ietf.org/html/rfc4918#section-15.7
|
||||||
|
type GetLastModified struct {
|
||||||
|
XMLName xml.Name `xml:"DAV: getlastmodified"`
|
||||||
|
LastModified Time `xml:",chardata"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// https://tools.ietf.org/html/rfc4918#section-15.6
|
||||||
|
type GetETag struct {
|
||||||
|
XMLName xml.Name `xml:"DAV: getetag"`
|
||||||
|
ETag ETag `xml:",chardata"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type ETag string
|
||||||
|
|
||||||
|
func (etag *ETag) UnmarshalText(b []byte) error {
|
||||||
|
s, err := strconv.Unquote(string(b))
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("webdav: failed to unquote ETag: %v", err)
|
||||||
|
}
|
||||||
|
*etag = ETag(s)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (etag ETag) MarshalText() ([]byte, error) {
|
||||||
|
return []byte(etag.String()), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (etag ETag) String() string {
|
||||||
|
return fmt.Sprintf("%q", string(etag))
|
||||||
|
}
|
||||||
|
|
||||||
|
// https://tools.ietf.org/html/rfc4918#section-14.5
|
||||||
|
type Error struct {
|
||||||
|
XMLName xml.Name `xml:"DAV: error"`
|
||||||
|
Raw []RawXMLValue `xml:",any"`
|
||||||
|
}
|
||||||
|
|
||||||
|
func (err *Error) Error() string {
|
||||||
|
b, _ := xml.Marshal(err)
|
||||||
|
return string(b)
|
||||||
|
}
|
||||||
|
|
||||||
|
// https://tools.ietf.org/html/rfc4918#section-15.2
|
||||||
|
type DisplayName struct {
|
||||||
|
XMLName xml.Name `xml:"DAV: displayname"`
|
||||||
|
Name string `xml:",chardata"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// https://tools.ietf.org/html/rfc5397#section-3
|
||||||
|
type CurrentUserPrincipal struct {
|
||||||
|
XMLName xml.Name `xml:"DAV: current-user-principal"`
|
||||||
|
Href Href `xml:"href,omitempty"`
|
||||||
|
Unauthenticated *struct{} `xml:"unauthenticated,omitempty"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// https://tools.ietf.org/html/rfc4918#section-14.19
|
||||||
|
type PropertyUpdate struct {
|
||||||
|
XMLName xml.Name `xml:"DAV: propertyupdate"`
|
||||||
|
Remove []Remove `xml:"remove"`
|
||||||
|
Set []Set `xml:"set"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// https://tools.ietf.org/html/rfc4918#section-14.23
|
||||||
|
type Remove struct {
|
||||||
|
XMLName xml.Name `xml:"DAV: remove"`
|
||||||
|
Prop Prop `xml:"prop"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// https://tools.ietf.org/html/rfc4918#section-14.26
|
||||||
|
type Set struct {
|
||||||
|
XMLName xml.Name `xml:"DAV: set"`
|
||||||
|
Prop Prop `xml:"prop"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// https://tools.ietf.org/html/rfc6578#section-6.1
|
||||||
|
type SyncCollectionQuery struct {
|
||||||
|
XMLName xml.Name `xml:"DAV: sync-collection"`
|
||||||
|
SyncToken string `xml:"sync-token"`
|
||||||
|
Limit *Limit `xml:"limit,omitempty"`
|
||||||
|
SyncLevel string `xml:"sync-level"`
|
||||||
|
Prop *Prop `xml:"prop"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// https://tools.ietf.org/html/rfc5323#section-5.17
|
||||||
|
type Limit struct {
|
||||||
|
XMLName xml.Name `xml:"DAV: limit"`
|
||||||
|
NResults uint `xml:"nresults"`
|
||||||
|
}
|
108
drives/davClient/internal/internal.go
Normal file
108
drives/davClient/internal/internal.go
Normal file
@ -0,0 +1,108 @@
|
|||||||
|
package internal // Package internal provides low-level helpers for WebDAV clients and servers.
|
||||||
|
import (
|
||||||
|
"errors"
|
||||||
|
"fmt"
|
||||||
|
"net/http"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Depth indicates whether a request applies to the resource's members. It's
|
||||||
|
// defined in RFC 4918 section 10.2.
|
||||||
|
type Depth int
|
||||||
|
|
||||||
|
const (
|
||||||
|
// DepthZero indicates that the request applies only to the resource.
|
||||||
|
DepthZero Depth = 0
|
||||||
|
// DepthOne indicates that the request applies to the resource and its
|
||||||
|
// internal members only.
|
||||||
|
DepthOne Depth = 1
|
||||||
|
// DepthInfinity indicates that the request applies to the resource and all
|
||||||
|
// of its members.
|
||||||
|
DepthInfinity Depth = -1
|
||||||
|
)
|
||||||
|
|
||||||
|
// ParseDepth parses a Depth header.
|
||||||
|
func ParseDepth(s string) (Depth, error) {
|
||||||
|
switch s {
|
||||||
|
case "0":
|
||||||
|
return DepthZero, nil
|
||||||
|
case "1":
|
||||||
|
return DepthOne, nil
|
||||||
|
case "infinity":
|
||||||
|
return DepthInfinity, nil
|
||||||
|
}
|
||||||
|
return 0, fmt.Errorf("webdav: invalid Depth value")
|
||||||
|
}
|
||||||
|
|
||||||
|
// String formats the depth.
|
||||||
|
func (d Depth) String() string {
|
||||||
|
switch d {
|
||||||
|
case DepthZero:
|
||||||
|
return "0"
|
||||||
|
case DepthOne:
|
||||||
|
return "1"
|
||||||
|
case DepthInfinity:
|
||||||
|
return "infinity"
|
||||||
|
}
|
||||||
|
panic("webdav: invalid Depth value")
|
||||||
|
}
|
||||||
|
|
||||||
|
// ParseOverwrite parses an Overwrite header.
|
||||||
|
func ParseOverwrite(s string) (bool, error) {
|
||||||
|
switch s {
|
||||||
|
case "T":
|
||||||
|
return true, nil
|
||||||
|
case "F":
|
||||||
|
return false, nil
|
||||||
|
}
|
||||||
|
return false, fmt.Errorf("webdav: invalid Overwrite value")
|
||||||
|
}
|
||||||
|
|
||||||
|
// FormatOverwrite formats an Overwrite header.
|
||||||
|
func FormatOverwrite(overwrite bool) string {
|
||||||
|
if overwrite {
|
||||||
|
return "T"
|
||||||
|
} else {
|
||||||
|
return "F"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
type HTTPError struct {
|
||||||
|
Code int
|
||||||
|
Err error
|
||||||
|
}
|
||||||
|
|
||||||
|
func HTTPErrorFromError(err error) *HTTPError {
|
||||||
|
if err == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
if httpErr, ok := err.(*HTTPError); ok {
|
||||||
|
return httpErr
|
||||||
|
} else {
|
||||||
|
return &HTTPError{http.StatusInternalServerError, err}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func IsNotFound(err error) bool {
|
||||||
|
var httpErr *HTTPError
|
||||||
|
if errors.As(err, &httpErr) {
|
||||||
|
return httpErr.Code == http.StatusNotFound
|
||||||
|
}
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
func HTTPErrorf(code int, format string, a ...interface{}) *HTTPError {
|
||||||
|
return &HTTPError{code, fmt.Errorf(format, a...)}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (err *HTTPError) Error() string {
|
||||||
|
s := fmt.Sprintf("%v %v", err.Code, http.StatusText(err.Code))
|
||||||
|
if err.Err != nil {
|
||||||
|
return fmt.Sprintf("%v: %v", s, err.Err)
|
||||||
|
} else {
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (err *HTTPError) Unwrap() error {
|
||||||
|
return err.Err
|
||||||
|
}
|
175
drives/davClient/internal/xml.go
Normal file
175
drives/davClient/internal/xml.go
Normal file
@ -0,0 +1,175 @@
|
|||||||
|
package internal
|
||||||
|
|
||||||
|
import (
|
||||||
|
"encoding/xml"
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
"reflect"
|
||||||
|
"strings"
|
||||||
|
)
|
||||||
|
|
||||||
|
// RawXMLValue is a raw XML value. It implements xml.Unmarshaler and
|
||||||
|
// xml.Marshaler and can be used to delay XML decoding or precompute an XML
|
||||||
|
// encoding.
|
||||||
|
type RawXMLValue struct {
|
||||||
|
tok xml.Token // guaranteed not to be xml.EndElement
|
||||||
|
children []RawXMLValue
|
||||||
|
|
||||||
|
// Unfortunately encoding/xml doesn't offer TokenWriter, so we need to
|
||||||
|
// cache outgoing data.
|
||||||
|
out interface{}
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewRawXMLElement creates a new RawXMLValue for an element.
|
||||||
|
func NewRawXMLElement(name xml.Name, attr []xml.Attr, children []RawXMLValue) *RawXMLValue {
|
||||||
|
return &RawXMLValue{tok: xml.StartElement{name, attr}, children: children}
|
||||||
|
}
|
||||||
|
|
||||||
|
// EncodeRawXMLElement encodes a value into a new RawXMLValue. The XML value
|
||||||
|
// can only be used for marshalling.
|
||||||
|
func EncodeRawXMLElement(v interface{}) (*RawXMLValue, error) {
|
||||||
|
return &RawXMLValue{out: v}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// UnmarshalXML implements xml.Unmarshaler.
|
||||||
|
func (val *RawXMLValue) UnmarshalXML(d *xml.Decoder, start xml.StartElement) error {
|
||||||
|
val.tok = start
|
||||||
|
val.children = nil
|
||||||
|
val.out = nil
|
||||||
|
|
||||||
|
for {
|
||||||
|
tok, err := d.Token()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
switch tok := tok.(type) {
|
||||||
|
case xml.StartElement:
|
||||||
|
child := RawXMLValue{}
|
||||||
|
if err := child.UnmarshalXML(d, tok); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
val.children = append(val.children, child)
|
||||||
|
case xml.EndElement:
|
||||||
|
return nil
|
||||||
|
default:
|
||||||
|
val.children = append(val.children, RawXMLValue{tok: xml.CopyToken(tok)})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// MarshalXML implements xml.Marshaler.
|
||||||
|
func (val *RawXMLValue) MarshalXML(e *xml.Encoder, start xml.StartElement) error {
|
||||||
|
if val.out != nil {
|
||||||
|
return e.Encode(val.out)
|
||||||
|
}
|
||||||
|
|
||||||
|
switch tok := val.tok.(type) {
|
||||||
|
case xml.StartElement:
|
||||||
|
if err := e.EncodeToken(tok); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
for _, child := range val.children {
|
||||||
|
// TODO: find a sensible value for the start argument?
|
||||||
|
if err := child.MarshalXML(e, xml.StartElement{}); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return e.EncodeToken(tok.End())
|
||||||
|
case xml.EndElement:
|
||||||
|
panic("unexpected end element")
|
||||||
|
default:
|
||||||
|
return e.EncodeToken(tok)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
var _ xml.Marshaler = (*RawXMLValue)(nil)
|
||||||
|
var _ xml.Unmarshaler = (*RawXMLValue)(nil)
|
||||||
|
|
||||||
|
func (val *RawXMLValue) Decode(v interface{}) error {
|
||||||
|
return xml.NewTokenDecoder(val.TokenReader()).Decode(&v)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (val *RawXMLValue) XMLName() (name xml.Name, ok bool) {
|
||||||
|
if start, ok := val.tok.(xml.StartElement); ok {
|
||||||
|
return start.Name, true
|
||||||
|
}
|
||||||
|
return xml.Name{}, false
|
||||||
|
}
|
||||||
|
|
||||||
|
// TokenReader returns a stream of tokens for the XML value.
|
||||||
|
func (val *RawXMLValue) TokenReader() xml.TokenReader {
|
||||||
|
if val.out != nil {
|
||||||
|
panic("webdav: called RawXMLValue.TokenReader on a marshal-only XML value")
|
||||||
|
}
|
||||||
|
return &rawXMLValueReader{val: val}
|
||||||
|
}
|
||||||
|
|
||||||
|
type rawXMLValueReader struct {
|
||||||
|
val *RawXMLValue
|
||||||
|
start, end bool
|
||||||
|
child int
|
||||||
|
childReader xml.TokenReader
|
||||||
|
}
|
||||||
|
|
||||||
|
func (tr *rawXMLValueReader) Token() (xml.Token, error) {
|
||||||
|
if tr.end {
|
||||||
|
return nil, io.EOF
|
||||||
|
}
|
||||||
|
|
||||||
|
start, ok := tr.val.tok.(xml.StartElement)
|
||||||
|
if !ok {
|
||||||
|
tr.end = true
|
||||||
|
return tr.val.tok, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
if !tr.start {
|
||||||
|
tr.start = true
|
||||||
|
return start, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
for tr.child < len(tr.val.children) {
|
||||||
|
if tr.childReader == nil {
|
||||||
|
tr.childReader = tr.val.children[tr.child].TokenReader()
|
||||||
|
}
|
||||||
|
|
||||||
|
tok, err := tr.childReader.Token()
|
||||||
|
if err == io.EOF {
|
||||||
|
tr.childReader = nil
|
||||||
|
tr.child++
|
||||||
|
} else {
|
||||||
|
return tok, err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
tr.end = true
|
||||||
|
return start.End(), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
var _ xml.TokenReader = (*rawXMLValueReader)(nil)
|
||||||
|
|
||||||
|
func valueXMLName(v interface{}) (xml.Name, error) {
|
||||||
|
t := reflect.TypeOf(v)
|
||||||
|
for t.Kind() == reflect.Ptr {
|
||||||
|
t = t.Elem()
|
||||||
|
}
|
||||||
|
if t.Kind() != reflect.Struct {
|
||||||
|
return xml.Name{}, fmt.Errorf("webdav: %T is not a struct", v)
|
||||||
|
}
|
||||||
|
nameField, ok := t.FieldByName("XMLName")
|
||||||
|
if !ok {
|
||||||
|
return xml.Name{}, fmt.Errorf("webdav: %T is missing an XMLName struct field", v)
|
||||||
|
}
|
||||||
|
if nameField.Type != reflect.TypeOf(xml.Name{}) {
|
||||||
|
return xml.Name{}, fmt.Errorf("webdav: %T.XMLName isn't an xml.Name", v)
|
||||||
|
}
|
||||||
|
tag := nameField.Tag.Get("xml")
|
||||||
|
if tag == "" {
|
||||||
|
return xml.Name{}, fmt.Errorf(`webdav: %T.XMLName is missing an "xml" tag`, v)
|
||||||
|
}
|
||||||
|
name := strings.Split(tag, ",")[0]
|
||||||
|
nameParts := strings.Split(name, " ")
|
||||||
|
if len(nameParts) != 2 {
|
||||||
|
return xml.Name{}, fmt.Errorf("webdav: expected a namespace and local name in %T.XMLName's xml tag", v)
|
||||||
|
}
|
||||||
|
return xml.Name{nameParts[0], nameParts[1]}, nil
|
||||||
|
}
|
119
drives/davClient/model.go
Normal file
119
drives/davClient/model.go
Normal file
@ -0,0 +1,119 @@
|
|||||||
|
package davClient
|
||||||
|
|
||||||
|
import (
|
||||||
|
"errors"
|
||||||
|
"fmt"
|
||||||
|
"net/http"
|
||||||
|
"time"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Depth indicates whether a request applies to the resource's members. It's
|
||||||
|
// defined in RFC 4918 section 10.2.
|
||||||
|
type Depth int
|
||||||
|
|
||||||
|
const (
|
||||||
|
// DepthZero indicates that the request applies only to the resource.
|
||||||
|
DepthZero Depth = 0
|
||||||
|
// DepthOne indicates that the request applies to the resource and its
|
||||||
|
// internal members only.
|
||||||
|
DepthOne Depth = 1
|
||||||
|
// DepthInfinity indicates that the request applies to the resource and all
|
||||||
|
// of its members.
|
||||||
|
DepthInfinity Depth = -1
|
||||||
|
)
|
||||||
|
|
||||||
|
// ParseDepth parses a Depth header.
|
||||||
|
func ParseDepth(s string) (Depth, error) {
|
||||||
|
switch s {
|
||||||
|
case "0":
|
||||||
|
return DepthZero, nil
|
||||||
|
case "1":
|
||||||
|
return DepthOne, nil
|
||||||
|
case "infinity":
|
||||||
|
return DepthInfinity, nil
|
||||||
|
}
|
||||||
|
return 0, fmt.Errorf("webdav: invalid Depth value")
|
||||||
|
}
|
||||||
|
|
||||||
|
// String formats the depth.
|
||||||
|
func (d Depth) String() string {
|
||||||
|
switch d {
|
||||||
|
case DepthZero:
|
||||||
|
return "0"
|
||||||
|
case DepthOne:
|
||||||
|
return "1"
|
||||||
|
case DepthInfinity:
|
||||||
|
return "infinity"
|
||||||
|
}
|
||||||
|
panic("webdav: invalid Depth value")
|
||||||
|
}
|
||||||
|
|
||||||
|
// ParseOverwrite parses an Overwrite header.
|
||||||
|
func ParseOverwrite(s string) (bool, error) {
|
||||||
|
switch s {
|
||||||
|
case "T":
|
||||||
|
return true, nil
|
||||||
|
case "F":
|
||||||
|
return false, nil
|
||||||
|
}
|
||||||
|
return false, fmt.Errorf("webdav: invalid Overwrite value")
|
||||||
|
}
|
||||||
|
|
||||||
|
// FormatOverwrite formats an Overwrite header.
|
||||||
|
func FormatOverwrite(overwrite bool) string {
|
||||||
|
if overwrite {
|
||||||
|
return "T"
|
||||||
|
} else {
|
||||||
|
return "F"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
type HTTPError struct {
|
||||||
|
Code int
|
||||||
|
Err error
|
||||||
|
}
|
||||||
|
|
||||||
|
func HTTPErrorFromError(err error) *HTTPError {
|
||||||
|
if err == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
if httpErr, ok := err.(*HTTPError); ok {
|
||||||
|
return httpErr
|
||||||
|
} else {
|
||||||
|
return &HTTPError{http.StatusInternalServerError, err}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func IsNotFound(err error) bool {
|
||||||
|
var httpErr *HTTPError
|
||||||
|
if errors.As(err, &httpErr) {
|
||||||
|
return httpErr.Code == http.StatusNotFound
|
||||||
|
}
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
func HTTPErrorf(code int, format string, a ...interface{}) *HTTPError {
|
||||||
|
return &HTTPError{code, fmt.Errorf(format, a...)}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (err *HTTPError) Error() string {
|
||||||
|
s := fmt.Sprintf("%v %v", err.Code, http.StatusText(err.Code))
|
||||||
|
if err.Err != nil {
|
||||||
|
return fmt.Sprintf("%v: %v", s, err.Err)
|
||||||
|
} else {
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (err *HTTPError) Unwrap() error {
|
||||||
|
return err.Err
|
||||||
|
}
|
||||||
|
|
||||||
|
type FileInfo struct {
|
||||||
|
Path string
|
||||||
|
Size int64
|
||||||
|
ModTime time.Time
|
||||||
|
IsDir bool
|
||||||
|
MIMEType string
|
||||||
|
ETag string
|
||||||
|
}
|
855
drives/davServer/file.go
Normal file
855
drives/davServer/file.go
Normal file
@ -0,0 +1,855 @@
|
|||||||
|
// Copyright 2014 The Go Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a BSD-style
|
||||||
|
// license that can be found in the LICENSE file.
|
||||||
|
|
||||||
|
package davServer
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"encoding/xml"
|
||||||
|
"io"
|
||||||
|
"io/fs"
|
||||||
|
"net/http"
|
||||||
|
"os"
|
||||||
|
"path"
|
||||||
|
"path/filepath"
|
||||||
|
"runtime"
|
||||||
|
"strconv"
|
||||||
|
"strings"
|
||||||
|
"sync"
|
||||||
|
"time"
|
||||||
|
)
|
||||||
|
|
||||||
|
// slashClean is equivalent to but slightly more efficient than
|
||||||
|
// path.Clean("/" + name).
|
||||||
|
func slashClean(name string) string {
|
||||||
|
if name == "" || name[0] != '/' {
|
||||||
|
name = "/" + name
|
||||||
|
}
|
||||||
|
return path.Clean(name)
|
||||||
|
}
|
||||||
|
|
||||||
|
// A FileSystem implements access to a collection of named files. The elements
|
||||||
|
// in a file path are separated by slash ('/', U+002F) characters, regardless
|
||||||
|
// of host operating system convention.
|
||||||
|
//
|
||||||
|
// Each method has the same semantics as the os package's function of the same
|
||||||
|
// name.
|
||||||
|
//
|
||||||
|
// Note that the os.Rename documentation says that "OS-specific restrictions
|
||||||
|
// might apply". In particular, whether or not renaming a file or directory
|
||||||
|
// overwriting another existing file or directory is an error is OS-dependent.
|
||||||
|
type FileSystem interface {
|
||||||
|
Mkdir(ctx context.Context, name string, perm os.FileMode) error
|
||||||
|
OpenFile(ctx context.Context, name string, flag int, perm os.FileMode) (File, error)
|
||||||
|
RemoveAll(ctx context.Context, name string) error
|
||||||
|
Rename(ctx context.Context, oldName, newName string) error
|
||||||
|
Stat(ctx context.Context, name string) (os.FileInfo, error)
|
||||||
|
}
|
||||||
|
|
||||||
|
// A File is returned by a FileSystem's OpenFile method and can be served by a
|
||||||
|
// Handler.
|
||||||
|
//
|
||||||
|
// A File may optionally implement the DeadPropsHolder interface, if it can
|
||||||
|
// load and save dead properties.
|
||||||
|
type File interface {
|
||||||
|
http.File
|
||||||
|
io.Writer
|
||||||
|
}
|
||||||
|
|
||||||
|
type webdavFile struct {
|
||||||
|
File
|
||||||
|
name string
|
||||||
|
}
|
||||||
|
|
||||||
|
func (f *webdavFile) DeadProps() (map[xml.Name]Property, error) {
|
||||||
|
var (
|
||||||
|
xmlName xml.Name
|
||||||
|
property Property
|
||||||
|
properties = make(map[xml.Name]Property)
|
||||||
|
)
|
||||||
|
var stat fs.FileInfo
|
||||||
|
stat, err := f.Stat()
|
||||||
|
if err == nil {
|
||||||
|
xmlName.Space = "zrok:"
|
||||||
|
xmlName.Local = "lastmodified"
|
||||||
|
property.XMLName = xmlName
|
||||||
|
property.InnerXML = strconv.AppendInt(nil, stat.ModTime().Unix(), 10)
|
||||||
|
properties[xmlName] = property
|
||||||
|
}
|
||||||
|
|
||||||
|
return properties, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (f *webdavFile) Patch(patches []Proppatch) ([]Propstat, error) {
|
||||||
|
var stat Propstat
|
||||||
|
stat.Status = http.StatusOK
|
||||||
|
for _, patch := range patches {
|
||||||
|
for _, prop := range patch.Props {
|
||||||
|
if prop.XMLName.Space == "zrok:" && prop.XMLName.Local == "lastmodified" {
|
||||||
|
modtimeUnix, err := strconv.ParseInt(string(prop.InnerXML), 10, 64)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
if err := f.updateModtime(f.name, time.Unix(modtimeUnix, 0)); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return []Propstat{stat}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (f *webdavFile) updateModtime(path string, modtime time.Time) error {
|
||||||
|
if err := os.Chtimes(f.name, time.Now(), modtime); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// A Dir implements FileSystem using the native file system restricted to a
|
||||||
|
// specific directory tree.
|
||||||
|
//
|
||||||
|
// While the FileSystem.OpenFile method takes '/'-separated paths, a Dir's
|
||||||
|
// string value is a filename on the native file system, not a URL, so it is
|
||||||
|
// separated by filepath.Separator, which isn't necessarily '/'.
|
||||||
|
//
|
||||||
|
// An empty Dir is treated as ".".
|
||||||
|
type Dir string
|
||||||
|
|
||||||
|
func (d Dir) resolve(name string) string {
|
||||||
|
// This implementation is based on Dir.Open's code in the standard net/http package.
|
||||||
|
if filepath.Separator != '/' && strings.IndexRune(name, filepath.Separator) >= 0 ||
|
||||||
|
strings.Contains(name, "\x00") {
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
dir := string(d)
|
||||||
|
if dir == "" {
|
||||||
|
dir = "."
|
||||||
|
}
|
||||||
|
return filepath.Join(dir, filepath.FromSlash(slashClean(name)))
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d Dir) Mkdir(ctx context.Context, name string, perm os.FileMode) error {
|
||||||
|
if name = d.resolve(name); name == "" {
|
||||||
|
return os.ErrNotExist
|
||||||
|
}
|
||||||
|
return os.Mkdir(name, perm)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d Dir) OpenFile(ctx context.Context, name string, flag int, perm os.FileMode) (File, error) {
|
||||||
|
if name = d.resolve(name); name == "" {
|
||||||
|
return nil, os.ErrNotExist
|
||||||
|
}
|
||||||
|
f, err := os.OpenFile(name, flag, perm)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return &webdavFile{f, name}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d Dir) RemoveAll(ctx context.Context, name string) error {
|
||||||
|
if name = d.resolve(name); name == "" {
|
||||||
|
return os.ErrNotExist
|
||||||
|
}
|
||||||
|
if name == filepath.Clean(string(d)) {
|
||||||
|
// Prohibit removing the virtual root directory.
|
||||||
|
return os.ErrInvalid
|
||||||
|
}
|
||||||
|
return os.RemoveAll(name)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d Dir) Rename(ctx context.Context, oldName, newName string) error {
|
||||||
|
if oldName = d.resolve(oldName); oldName == "" {
|
||||||
|
return os.ErrNotExist
|
||||||
|
}
|
||||||
|
if newName = d.resolve(newName); newName == "" {
|
||||||
|
return os.ErrNotExist
|
||||||
|
}
|
||||||
|
if root := filepath.Clean(string(d)); root == oldName || root == newName {
|
||||||
|
// Prohibit renaming from or to the virtual root directory.
|
||||||
|
return os.ErrInvalid
|
||||||
|
}
|
||||||
|
return os.Rename(oldName, newName)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d Dir) Stat(ctx context.Context, name string) (os.FileInfo, error) {
|
||||||
|
if name = d.resolve(name); name == "" {
|
||||||
|
return nil, os.ErrNotExist
|
||||||
|
}
|
||||||
|
return os.Stat(name)
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewMemFS returns a new in-memory FileSystem implementation.
|
||||||
|
func NewMemFS() FileSystem {
|
||||||
|
return &memFS{
|
||||||
|
root: memFSNode{
|
||||||
|
children: make(map[string]*memFSNode),
|
||||||
|
mode: 0660 | os.ModeDir,
|
||||||
|
modTime: time.Now(),
|
||||||
|
},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// A memFS implements FileSystem, storing all metadata and actual file data
|
||||||
|
// in-memory. No limits on filesystem size are used, so it is not recommended
|
||||||
|
// this be used where the clients are untrusted.
|
||||||
|
//
|
||||||
|
// Concurrent access is permitted. The tree structure is protected by a mutex,
|
||||||
|
// and each node's contents and metadata are protected by a per-node mutex.
|
||||||
|
//
|
||||||
|
// TODO: Enforce file permissions.
|
||||||
|
type memFS struct {
|
||||||
|
mu sync.Mutex
|
||||||
|
root memFSNode
|
||||||
|
}
|
||||||
|
|
||||||
|
// TODO: clean up and rationalize the walk/find code.
|
||||||
|
|
||||||
|
// walk walks the directory tree for the fullname, calling f at each step. If f
|
||||||
|
// returns an error, the walk will be aborted and return that same error.
|
||||||
|
//
|
||||||
|
// dir is the directory at that step, frag is the name fragment, and final is
|
||||||
|
// whether it is the final step. For example, walking "/foo/bar/x" will result
|
||||||
|
// in 3 calls to f:
|
||||||
|
// - "/", "foo", false
|
||||||
|
// - "/foo/", "bar", false
|
||||||
|
// - "/foo/bar/", "x", true
|
||||||
|
//
|
||||||
|
// The frag argument will be empty only if dir is the root node and the walk
|
||||||
|
// ends at that root node.
|
||||||
|
func (fs *memFS) walk(op, fullname string, f func(dir *memFSNode, frag string, final bool) error) error {
|
||||||
|
original := fullname
|
||||||
|
fullname = slashClean(fullname)
|
||||||
|
|
||||||
|
// Strip any leading "/"s to make fullname a relative path, as the walk
|
||||||
|
// starts at fs.root.
|
||||||
|
if fullname[0] == '/' {
|
||||||
|
fullname = fullname[1:]
|
||||||
|
}
|
||||||
|
dir := &fs.root
|
||||||
|
|
||||||
|
for {
|
||||||
|
frag, remaining := fullname, ""
|
||||||
|
i := strings.IndexRune(fullname, '/')
|
||||||
|
final := i < 0
|
||||||
|
if !final {
|
||||||
|
frag, remaining = fullname[:i], fullname[i+1:]
|
||||||
|
}
|
||||||
|
if frag == "" && dir != &fs.root {
|
||||||
|
panic("webdav: empty path fragment for a clean path")
|
||||||
|
}
|
||||||
|
if err := f(dir, frag, final); err != nil {
|
||||||
|
return &os.PathError{
|
||||||
|
Op: op,
|
||||||
|
Path: original,
|
||||||
|
Err: err,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if final {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
child := dir.children[frag]
|
||||||
|
if child == nil {
|
||||||
|
return &os.PathError{
|
||||||
|
Op: op,
|
||||||
|
Path: original,
|
||||||
|
Err: os.ErrNotExist,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if !child.mode.IsDir() {
|
||||||
|
return &os.PathError{
|
||||||
|
Op: op,
|
||||||
|
Path: original,
|
||||||
|
Err: os.ErrInvalid,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
dir, fullname = child, remaining
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// find returns the parent of the named node and the relative name fragment
|
||||||
|
// from the parent to the child. For example, if finding "/foo/bar/baz" then
|
||||||
|
// parent will be the node for "/foo/bar" and frag will be "baz".
|
||||||
|
//
|
||||||
|
// If the fullname names the root node, then parent, frag and err will be zero.
|
||||||
|
//
|
||||||
|
// find returns an error if the parent does not already exist or the parent
|
||||||
|
// isn't a directory, but it will not return an error per se if the child does
|
||||||
|
// not already exist. The error returned is either nil or an *os.PathError
|
||||||
|
// whose Op is op.
|
||||||
|
func (fs *memFS) find(op, fullname string) (parent *memFSNode, frag string, err error) {
|
||||||
|
err = fs.walk(op, fullname, func(parent0 *memFSNode, frag0 string, final bool) error {
|
||||||
|
if !final {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
if frag0 != "" {
|
||||||
|
parent, frag = parent0, frag0
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
})
|
||||||
|
return parent, frag, err
|
||||||
|
}
|
||||||
|
|
||||||
|
func (fs *memFS) Mkdir(ctx context.Context, name string, perm os.FileMode) error {
|
||||||
|
fs.mu.Lock()
|
||||||
|
defer fs.mu.Unlock()
|
||||||
|
|
||||||
|
dir, frag, err := fs.find("mkdir", name)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if dir == nil {
|
||||||
|
// We can't create the root.
|
||||||
|
return os.ErrInvalid
|
||||||
|
}
|
||||||
|
if _, ok := dir.children[frag]; ok {
|
||||||
|
return os.ErrExist
|
||||||
|
}
|
||||||
|
dir.children[frag] = &memFSNode{
|
||||||
|
children: make(map[string]*memFSNode),
|
||||||
|
mode: perm.Perm() | os.ModeDir,
|
||||||
|
modTime: time.Now(),
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (fs *memFS) OpenFile(ctx context.Context, name string, flag int, perm os.FileMode) (File, error) {
|
||||||
|
fs.mu.Lock()
|
||||||
|
defer fs.mu.Unlock()
|
||||||
|
|
||||||
|
dir, frag, err := fs.find("open", name)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
var n *memFSNode
|
||||||
|
if dir == nil {
|
||||||
|
// We're opening the root.
|
||||||
|
if runtime.GOOS == "zos" {
|
||||||
|
if flag&os.O_WRONLY != 0 {
|
||||||
|
return nil, os.ErrPermission
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
if flag&(os.O_WRONLY|os.O_RDWR) != 0 {
|
||||||
|
return nil, os.ErrPermission
|
||||||
|
}
|
||||||
|
}
|
||||||
|
n, frag = &fs.root, "/"
|
||||||
|
|
||||||
|
} else {
|
||||||
|
n = dir.children[frag]
|
||||||
|
if flag&(os.O_SYNC|os.O_APPEND) != 0 {
|
||||||
|
// memFile doesn't support these flags yet.
|
||||||
|
return nil, os.ErrInvalid
|
||||||
|
}
|
||||||
|
if flag&os.O_CREATE != 0 {
|
||||||
|
if flag&os.O_EXCL != 0 && n != nil {
|
||||||
|
return nil, os.ErrExist
|
||||||
|
}
|
||||||
|
if n == nil {
|
||||||
|
n = &memFSNode{
|
||||||
|
mode: perm.Perm(),
|
||||||
|
}
|
||||||
|
dir.children[frag] = n
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if n == nil {
|
||||||
|
return nil, os.ErrNotExist
|
||||||
|
}
|
||||||
|
if flag&(os.O_WRONLY|os.O_RDWR) != 0 && flag&os.O_TRUNC != 0 {
|
||||||
|
n.mu.Lock()
|
||||||
|
n.data = nil
|
||||||
|
n.mu.Unlock()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
children := make([]os.FileInfo, 0, len(n.children))
|
||||||
|
for cName, c := range n.children {
|
||||||
|
children = append(children, c.stat(cName))
|
||||||
|
}
|
||||||
|
return &memFile{
|
||||||
|
n: n,
|
||||||
|
nameSnapshot: frag,
|
||||||
|
childrenSnapshot: children,
|
||||||
|
}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (fs *memFS) RemoveAll(ctx context.Context, name string) error {
|
||||||
|
fs.mu.Lock()
|
||||||
|
defer fs.mu.Unlock()
|
||||||
|
|
||||||
|
dir, frag, err := fs.find("remove", name)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if dir == nil {
|
||||||
|
// We can't remove the root.
|
||||||
|
return os.ErrInvalid
|
||||||
|
}
|
||||||
|
delete(dir.children, frag)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (fs *memFS) Rename(ctx context.Context, oldName, newName string) error {
|
||||||
|
fs.mu.Lock()
|
||||||
|
defer fs.mu.Unlock()
|
||||||
|
|
||||||
|
oldName = slashClean(oldName)
|
||||||
|
newName = slashClean(newName)
|
||||||
|
if oldName == newName {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
if strings.HasPrefix(newName, oldName+"/") {
|
||||||
|
// We can't rename oldName to be a sub-directory of itself.
|
||||||
|
return os.ErrInvalid
|
||||||
|
}
|
||||||
|
|
||||||
|
oDir, oFrag, err := fs.find("rename", oldName)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if oDir == nil {
|
||||||
|
// We can't rename from the root.
|
||||||
|
return os.ErrInvalid
|
||||||
|
}
|
||||||
|
|
||||||
|
nDir, nFrag, err := fs.find("rename", newName)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if nDir == nil {
|
||||||
|
// We can't rename to the root.
|
||||||
|
return os.ErrInvalid
|
||||||
|
}
|
||||||
|
|
||||||
|
oNode, ok := oDir.children[oFrag]
|
||||||
|
if !ok {
|
||||||
|
return os.ErrNotExist
|
||||||
|
}
|
||||||
|
if oNode.children != nil {
|
||||||
|
if nNode, ok := nDir.children[nFrag]; ok {
|
||||||
|
if nNode.children == nil {
|
||||||
|
return errNotADirectory
|
||||||
|
}
|
||||||
|
if len(nNode.children) != 0 {
|
||||||
|
return errDirectoryNotEmpty
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
delete(oDir.children, oFrag)
|
||||||
|
nDir.children[nFrag] = oNode
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (fs *memFS) Stat(ctx context.Context, name string) (os.FileInfo, error) {
|
||||||
|
fs.mu.Lock()
|
||||||
|
defer fs.mu.Unlock()
|
||||||
|
|
||||||
|
dir, frag, err := fs.find("stat", name)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
if dir == nil {
|
||||||
|
// We're stat'ting the root.
|
||||||
|
return fs.root.stat("/"), nil
|
||||||
|
}
|
||||||
|
if n, ok := dir.children[frag]; ok {
|
||||||
|
return n.stat(path.Base(name)), nil
|
||||||
|
}
|
||||||
|
return nil, os.ErrNotExist
|
||||||
|
}
|
||||||
|
|
||||||
|
// A memFSNode represents a single entry in the in-memory filesystem and also
|
||||||
|
// implements os.FileInfo.
|
||||||
|
type memFSNode struct {
|
||||||
|
// children is protected by memFS.mu.
|
||||||
|
children map[string]*memFSNode
|
||||||
|
|
||||||
|
mu sync.Mutex
|
||||||
|
data []byte
|
||||||
|
mode os.FileMode
|
||||||
|
modTime time.Time
|
||||||
|
deadProps map[xml.Name]Property
|
||||||
|
}
|
||||||
|
|
||||||
|
func (n *memFSNode) stat(name string) *memFileInfo {
|
||||||
|
n.mu.Lock()
|
||||||
|
defer n.mu.Unlock()
|
||||||
|
return &memFileInfo{
|
||||||
|
name: name,
|
||||||
|
size: int64(len(n.data)),
|
||||||
|
mode: n.mode,
|
||||||
|
modTime: n.modTime,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (n *memFSNode) DeadProps() (map[xml.Name]Property, error) {
|
||||||
|
n.mu.Lock()
|
||||||
|
defer n.mu.Unlock()
|
||||||
|
if len(n.deadProps) == 0 {
|
||||||
|
return nil, nil
|
||||||
|
}
|
||||||
|
ret := make(map[xml.Name]Property, len(n.deadProps))
|
||||||
|
for k, v := range n.deadProps {
|
||||||
|
ret[k] = v
|
||||||
|
}
|
||||||
|
return ret, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (n *memFSNode) Patch(patches []Proppatch) ([]Propstat, error) {
|
||||||
|
n.mu.Lock()
|
||||||
|
defer n.mu.Unlock()
|
||||||
|
pstat := Propstat{Status: http.StatusOK}
|
||||||
|
for _, patch := range patches {
|
||||||
|
for _, p := range patch.Props {
|
||||||
|
pstat.Props = append(pstat.Props, Property{XMLName: p.XMLName})
|
||||||
|
if patch.Remove {
|
||||||
|
delete(n.deadProps, p.XMLName)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
if n.deadProps == nil {
|
||||||
|
n.deadProps = map[xml.Name]Property{}
|
||||||
|
}
|
||||||
|
n.deadProps[p.XMLName] = p
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return []Propstat{pstat}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
type memFileInfo struct {
|
||||||
|
name string
|
||||||
|
size int64
|
||||||
|
mode os.FileMode
|
||||||
|
modTime time.Time
|
||||||
|
}
|
||||||
|
|
||||||
|
func (f *memFileInfo) Name() string { return f.name }
|
||||||
|
func (f *memFileInfo) Size() int64 { return f.size }
|
||||||
|
func (f *memFileInfo) Mode() os.FileMode { return f.mode }
|
||||||
|
func (f *memFileInfo) ModTime() time.Time { return f.modTime }
|
||||||
|
func (f *memFileInfo) IsDir() bool { return f.mode.IsDir() }
|
||||||
|
func (f *memFileInfo) Sys() interface{} { return nil }
|
||||||
|
|
||||||
|
// A memFile is a File implementation for a memFSNode. It is a per-file (not
|
||||||
|
// per-node) read/write position, and a snapshot of the memFS' tree structure
|
||||||
|
// (a node's name and children) for that node.
|
||||||
|
type memFile struct {
|
||||||
|
n *memFSNode
|
||||||
|
nameSnapshot string
|
||||||
|
childrenSnapshot []os.FileInfo
|
||||||
|
// pos is protected by n.mu.
|
||||||
|
pos int
|
||||||
|
}
|
||||||
|
|
||||||
|
// A *memFile implements the optional DeadPropsHolder interface.
|
||||||
|
var _ DeadPropsHolder = (*memFile)(nil)
|
||||||
|
|
||||||
|
func (f *memFile) DeadProps() (map[xml.Name]Property, error) { return f.n.DeadProps() }
|
||||||
|
func (f *memFile) Patch(patches []Proppatch) ([]Propstat, error) { return f.n.Patch(patches) }
|
||||||
|
|
||||||
|
func (f *memFile) Close() error {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (f *memFile) Read(p []byte) (int, error) {
|
||||||
|
f.n.mu.Lock()
|
||||||
|
defer f.n.mu.Unlock()
|
||||||
|
if f.n.mode.IsDir() {
|
||||||
|
return 0, os.ErrInvalid
|
||||||
|
}
|
||||||
|
if f.pos >= len(f.n.data) {
|
||||||
|
return 0, io.EOF
|
||||||
|
}
|
||||||
|
n := copy(p, f.n.data[f.pos:])
|
||||||
|
f.pos += n
|
||||||
|
return n, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (f *memFile) Readdir(count int) ([]os.FileInfo, error) {
|
||||||
|
f.n.mu.Lock()
|
||||||
|
defer f.n.mu.Unlock()
|
||||||
|
if !f.n.mode.IsDir() {
|
||||||
|
return nil, os.ErrInvalid
|
||||||
|
}
|
||||||
|
old := f.pos
|
||||||
|
if old >= len(f.childrenSnapshot) {
|
||||||
|
// The os.File Readdir docs say that at the end of a directory,
|
||||||
|
// the error is io.EOF if count > 0 and nil if count <= 0.
|
||||||
|
if count > 0 {
|
||||||
|
return nil, io.EOF
|
||||||
|
}
|
||||||
|
return nil, nil
|
||||||
|
}
|
||||||
|
if count > 0 {
|
||||||
|
f.pos += count
|
||||||
|
if f.pos > len(f.childrenSnapshot) {
|
||||||
|
f.pos = len(f.childrenSnapshot)
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
f.pos = len(f.childrenSnapshot)
|
||||||
|
old = 0
|
||||||
|
}
|
||||||
|
return f.childrenSnapshot[old:f.pos], nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (f *memFile) Seek(offset int64, whence int) (int64, error) {
|
||||||
|
f.n.mu.Lock()
|
||||||
|
defer f.n.mu.Unlock()
|
||||||
|
npos := f.pos
|
||||||
|
// TODO: How to handle offsets greater than the size of system int?
|
||||||
|
switch whence {
|
||||||
|
case io.SeekStart:
|
||||||
|
npos = int(offset)
|
||||||
|
case io.SeekCurrent:
|
||||||
|
npos += int(offset)
|
||||||
|
case io.SeekEnd:
|
||||||
|
npos = len(f.n.data) + int(offset)
|
||||||
|
default:
|
||||||
|
npos = -1
|
||||||
|
}
|
||||||
|
if npos < 0 {
|
||||||
|
return 0, os.ErrInvalid
|
||||||
|
}
|
||||||
|
f.pos = npos
|
||||||
|
return int64(f.pos), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (f *memFile) Stat() (os.FileInfo, error) {
|
||||||
|
return f.n.stat(f.nameSnapshot), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (f *memFile) Write(p []byte) (int, error) {
|
||||||
|
lenp := len(p)
|
||||||
|
f.n.mu.Lock()
|
||||||
|
defer f.n.mu.Unlock()
|
||||||
|
|
||||||
|
if f.n.mode.IsDir() {
|
||||||
|
return 0, os.ErrInvalid
|
||||||
|
}
|
||||||
|
if f.pos < len(f.n.data) {
|
||||||
|
n := copy(f.n.data[f.pos:], p)
|
||||||
|
f.pos += n
|
||||||
|
p = p[n:]
|
||||||
|
} else if f.pos > len(f.n.data) {
|
||||||
|
// Write permits the creation of holes, if we've seek'ed past the
|
||||||
|
// existing end of file.
|
||||||
|
if f.pos <= cap(f.n.data) {
|
||||||
|
oldLen := len(f.n.data)
|
||||||
|
f.n.data = f.n.data[:f.pos]
|
||||||
|
hole := f.n.data[oldLen:]
|
||||||
|
for i := range hole {
|
||||||
|
hole[i] = 0
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
d := make([]byte, f.pos, f.pos+len(p))
|
||||||
|
copy(d, f.n.data)
|
||||||
|
f.n.data = d
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(p) > 0 {
|
||||||
|
// We should only get here if f.pos == len(f.n.data).
|
||||||
|
f.n.data = append(f.n.data, p...)
|
||||||
|
f.pos = len(f.n.data)
|
||||||
|
}
|
||||||
|
f.n.modTime = time.Now()
|
||||||
|
return lenp, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// moveFiles moves files and/or directories from src to dst.
|
||||||
|
//
|
||||||
|
// See section 9.9.4 for when various HTTP status codes apply.
|
||||||
|
func moveFiles(ctx context.Context, fs FileSystem, src, dst string, overwrite bool) (status int, err error) {
|
||||||
|
created := false
|
||||||
|
if _, err := fs.Stat(ctx, dst); err != nil {
|
||||||
|
if !os.IsNotExist(err) {
|
||||||
|
return http.StatusForbidden, err
|
||||||
|
}
|
||||||
|
created = true
|
||||||
|
} else if overwrite {
|
||||||
|
// Section 9.9.3 says that "If a resource exists at the destination
|
||||||
|
// and the Overwrite header is "T", then prior to performing the move,
|
||||||
|
// the server must perform a DELETE with "Depth: infinity" on the
|
||||||
|
// destination resource.
|
||||||
|
if err := fs.RemoveAll(ctx, dst); err != nil {
|
||||||
|
return http.StatusForbidden, err
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
return http.StatusPreconditionFailed, os.ErrExist
|
||||||
|
}
|
||||||
|
if err := fs.Rename(ctx, src, dst); err != nil {
|
||||||
|
return http.StatusForbidden, err
|
||||||
|
}
|
||||||
|
if created {
|
||||||
|
return http.StatusCreated, nil
|
||||||
|
}
|
||||||
|
return http.StatusNoContent, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func copyProps(dst, src File) error {
|
||||||
|
d, ok := dst.(DeadPropsHolder)
|
||||||
|
if !ok {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
s, ok := src.(DeadPropsHolder)
|
||||||
|
if !ok {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
m, err := s.DeadProps()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
props := make([]Property, 0, len(m))
|
||||||
|
for _, prop := range m {
|
||||||
|
props = append(props, prop)
|
||||||
|
}
|
||||||
|
_, err = d.Patch([]Proppatch{{Props: props}})
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// copyFiles copies files and/or directories from src to dst.
|
||||||
|
//
|
||||||
|
// See section 9.8.5 for when various HTTP status codes apply.
|
||||||
|
func copyFiles(ctx context.Context, fs FileSystem, src, dst string, overwrite bool, depth int, recursion int) (status int, err error) {
|
||||||
|
if recursion == 1000 {
|
||||||
|
return http.StatusInternalServerError, errRecursionTooDeep
|
||||||
|
}
|
||||||
|
recursion++
|
||||||
|
|
||||||
|
// TODO: section 9.8.3 says that "Note that an infinite-depth COPY of /A/
|
||||||
|
// into /A/B/ could lead to infinite recursion if not handled correctly."
|
||||||
|
|
||||||
|
srcFile, err := fs.OpenFile(ctx, src, os.O_RDONLY, 0)
|
||||||
|
if err != nil {
|
||||||
|
if os.IsNotExist(err) {
|
||||||
|
return http.StatusNotFound, err
|
||||||
|
}
|
||||||
|
return http.StatusInternalServerError, err
|
||||||
|
}
|
||||||
|
defer srcFile.Close()
|
||||||
|
srcStat, err := srcFile.Stat()
|
||||||
|
if err != nil {
|
||||||
|
if os.IsNotExist(err) {
|
||||||
|
return http.StatusNotFound, err
|
||||||
|
}
|
||||||
|
return http.StatusInternalServerError, err
|
||||||
|
}
|
||||||
|
srcPerm := srcStat.Mode() & os.ModePerm
|
||||||
|
|
||||||
|
created := false
|
||||||
|
if _, err := fs.Stat(ctx, dst); err != nil {
|
||||||
|
if os.IsNotExist(err) {
|
||||||
|
created = true
|
||||||
|
} else {
|
||||||
|
return http.StatusForbidden, err
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
if !overwrite {
|
||||||
|
return http.StatusPreconditionFailed, os.ErrExist
|
||||||
|
}
|
||||||
|
if err := fs.RemoveAll(ctx, dst); err != nil && !os.IsNotExist(err) {
|
||||||
|
return http.StatusForbidden, err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if srcStat.IsDir() {
|
||||||
|
if err := fs.Mkdir(ctx, dst, srcPerm); err != nil {
|
||||||
|
return http.StatusForbidden, err
|
||||||
|
}
|
||||||
|
if depth == infiniteDepth {
|
||||||
|
children, err := srcFile.Readdir(-1)
|
||||||
|
if err != nil {
|
||||||
|
return http.StatusForbidden, err
|
||||||
|
}
|
||||||
|
for _, c := range children {
|
||||||
|
name := c.Name()
|
||||||
|
s := path.Join(src, name)
|
||||||
|
d := path.Join(dst, name)
|
||||||
|
cStatus, cErr := copyFiles(ctx, fs, s, d, overwrite, depth, recursion)
|
||||||
|
if cErr != nil {
|
||||||
|
// TODO: MultiStatus.
|
||||||
|
return cStatus, cErr
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
} else {
|
||||||
|
dstFile, err := fs.OpenFile(ctx, dst, os.O_RDWR|os.O_CREATE|os.O_TRUNC, srcPerm)
|
||||||
|
if err != nil {
|
||||||
|
if os.IsNotExist(err) {
|
||||||
|
return http.StatusConflict, err
|
||||||
|
}
|
||||||
|
return http.StatusForbidden, err
|
||||||
|
|
||||||
|
}
|
||||||
|
_, copyErr := io.Copy(dstFile, srcFile)
|
||||||
|
propsErr := copyProps(dstFile, srcFile)
|
||||||
|
closeErr := dstFile.Close()
|
||||||
|
if copyErr != nil {
|
||||||
|
return http.StatusInternalServerError, copyErr
|
||||||
|
}
|
||||||
|
if propsErr != nil {
|
||||||
|
return http.StatusInternalServerError, propsErr
|
||||||
|
}
|
||||||
|
if closeErr != nil {
|
||||||
|
return http.StatusInternalServerError, closeErr
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if created {
|
||||||
|
return http.StatusCreated, nil
|
||||||
|
}
|
||||||
|
return http.StatusNoContent, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// walkFS traverses filesystem fs starting at name up to depth levels.
|
||||||
|
//
|
||||||
|
// Allowed values for depth are 0, 1 or infiniteDepth. For each visited node,
|
||||||
|
// walkFS calls walkFn. If a visited file system node is a directory and
|
||||||
|
// walkFn returns filepath.SkipDir, walkFS will skip traversal of this node.
|
||||||
|
func walkFS(ctx context.Context, fs FileSystem, depth int, name string, info os.FileInfo, walkFn filepath.WalkFunc) error {
|
||||||
|
// This implementation is based on Walk's code in the standard path/filepath package.
|
||||||
|
err := walkFn(name, info, nil)
|
||||||
|
if err != nil {
|
||||||
|
if info.IsDir() && err == filepath.SkipDir {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if !info.IsDir() || depth == 0 {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
if depth == 1 {
|
||||||
|
depth = 0
|
||||||
|
}
|
||||||
|
|
||||||
|
// Read directory names.
|
||||||
|
f, err := fs.OpenFile(ctx, name, os.O_RDONLY, 0)
|
||||||
|
if err != nil {
|
||||||
|
return walkFn(name, info, err)
|
||||||
|
}
|
||||||
|
fileInfos, err := f.Readdir(0)
|
||||||
|
f.Close()
|
||||||
|
if err != nil {
|
||||||
|
return walkFn(name, info, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, fileInfo := range fileInfos {
|
||||||
|
filename := path.Join(name, fileInfo.Name())
|
||||||
|
fileInfo, err := fs.Stat(ctx, filename)
|
||||||
|
if err != nil {
|
||||||
|
if err := walkFn(filename, fileInfo, err); err != nil && err != filepath.SkipDir {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
err = walkFS(ctx, fs, depth, filename, fileInfo, walkFn)
|
||||||
|
if err != nil {
|
||||||
|
if !fileInfo.IsDir() || err != filepath.SkipDir {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
1183
drives/davServer/file_test.go
Normal file
1183
drives/davServer/file_test.go
Normal file
File diff suppressed because it is too large
Load Diff
173
drives/davServer/if.go
Normal file
173
drives/davServer/if.go
Normal file
@ -0,0 +1,173 @@
|
|||||||
|
// Copyright 2014 The Go Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a BSD-style
|
||||||
|
// license that can be found in the LICENSE file.
|
||||||
|
|
||||||
|
package davServer
|
||||||
|
|
||||||
|
// The If header is covered by Section 10.4.
|
||||||
|
// http://www.webdav.org/specs/rfc4918.html#HEADER_If
|
||||||
|
|
||||||
|
import (
|
||||||
|
"strings"
|
||||||
|
)
|
||||||
|
|
||||||
|
// ifHeader is a disjunction (OR) of ifLists.
|
||||||
|
type ifHeader struct {
|
||||||
|
lists []ifList
|
||||||
|
}
|
||||||
|
|
||||||
|
// ifList is a conjunction (AND) of Conditions, and an optional resource tag.
|
||||||
|
type ifList struct {
|
||||||
|
resourceTag string
|
||||||
|
conditions []Condition
|
||||||
|
}
|
||||||
|
|
||||||
|
// parseIfHeader parses the "If: foo bar" HTTP header. The httpHeader string
|
||||||
|
// should omit the "If:" prefix and have any "\r\n"s collapsed to a " ", as is
|
||||||
|
// returned by req.Header.Get("If") for an http.Request req.
|
||||||
|
func parseIfHeader(httpHeader string) (h ifHeader, ok bool) {
|
||||||
|
s := strings.TrimSpace(httpHeader)
|
||||||
|
switch tokenType, _, _ := lex(s); tokenType {
|
||||||
|
case '(':
|
||||||
|
return parseNoTagLists(s)
|
||||||
|
case angleTokenType:
|
||||||
|
return parseTaggedLists(s)
|
||||||
|
default:
|
||||||
|
return ifHeader{}, false
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func parseNoTagLists(s string) (h ifHeader, ok bool) {
|
||||||
|
for {
|
||||||
|
l, remaining, ok := parseList(s)
|
||||||
|
if !ok {
|
||||||
|
return ifHeader{}, false
|
||||||
|
}
|
||||||
|
h.lists = append(h.lists, l)
|
||||||
|
if remaining == "" {
|
||||||
|
return h, true
|
||||||
|
}
|
||||||
|
s = remaining
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func parseTaggedLists(s string) (h ifHeader, ok bool) {
|
||||||
|
resourceTag, n := "", 0
|
||||||
|
for first := true; ; first = false {
|
||||||
|
tokenType, tokenStr, remaining := lex(s)
|
||||||
|
switch tokenType {
|
||||||
|
case angleTokenType:
|
||||||
|
if !first && n == 0 {
|
||||||
|
return ifHeader{}, false
|
||||||
|
}
|
||||||
|
resourceTag, n = tokenStr, 0
|
||||||
|
s = remaining
|
||||||
|
case '(':
|
||||||
|
n++
|
||||||
|
l, remaining, ok := parseList(s)
|
||||||
|
if !ok {
|
||||||
|
return ifHeader{}, false
|
||||||
|
}
|
||||||
|
l.resourceTag = resourceTag
|
||||||
|
h.lists = append(h.lists, l)
|
||||||
|
if remaining == "" {
|
||||||
|
return h, true
|
||||||
|
}
|
||||||
|
s = remaining
|
||||||
|
default:
|
||||||
|
return ifHeader{}, false
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func parseList(s string) (l ifList, remaining string, ok bool) {
|
||||||
|
tokenType, _, s := lex(s)
|
||||||
|
if tokenType != '(' {
|
||||||
|
return ifList{}, "", false
|
||||||
|
}
|
||||||
|
for {
|
||||||
|
tokenType, _, remaining = lex(s)
|
||||||
|
if tokenType == ')' {
|
||||||
|
if len(l.conditions) == 0 {
|
||||||
|
return ifList{}, "", false
|
||||||
|
}
|
||||||
|
return l, remaining, true
|
||||||
|
}
|
||||||
|
c, remaining, ok := parseCondition(s)
|
||||||
|
if !ok {
|
||||||
|
return ifList{}, "", false
|
||||||
|
}
|
||||||
|
l.conditions = append(l.conditions, c)
|
||||||
|
s = remaining
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func parseCondition(s string) (c Condition, remaining string, ok bool) {
|
||||||
|
tokenType, tokenStr, s := lex(s)
|
||||||
|
if tokenType == notTokenType {
|
||||||
|
c.Not = true
|
||||||
|
tokenType, tokenStr, s = lex(s)
|
||||||
|
}
|
||||||
|
switch tokenType {
|
||||||
|
case strTokenType, angleTokenType:
|
||||||
|
c.Token = tokenStr
|
||||||
|
case squareTokenType:
|
||||||
|
c.ETag = tokenStr
|
||||||
|
default:
|
||||||
|
return Condition{}, "", false
|
||||||
|
}
|
||||||
|
return c, s, true
|
||||||
|
}
|
||||||
|
|
||||||
|
// Single-rune tokens like '(' or ')' have a token type equal to their rune.
|
||||||
|
// All other tokens have a negative token type.
|
||||||
|
const (
|
||||||
|
errTokenType = rune(-1)
|
||||||
|
eofTokenType = rune(-2)
|
||||||
|
strTokenType = rune(-3)
|
||||||
|
notTokenType = rune(-4)
|
||||||
|
angleTokenType = rune(-5)
|
||||||
|
squareTokenType = rune(-6)
|
||||||
|
)
|
||||||
|
|
||||||
|
func lex(s string) (tokenType rune, tokenStr string, remaining string) {
|
||||||
|
// The net/textproto Reader that parses the HTTP header will collapse
|
||||||
|
// Linear White Space that spans multiple "\r\n" lines to a single " ",
|
||||||
|
// so we don't need to look for '\r' or '\n'.
|
||||||
|
for len(s) > 0 && (s[0] == '\t' || s[0] == ' ') {
|
||||||
|
s = s[1:]
|
||||||
|
}
|
||||||
|
if len(s) == 0 {
|
||||||
|
return eofTokenType, "", ""
|
||||||
|
}
|
||||||
|
i := 0
|
||||||
|
loop:
|
||||||
|
for ; i < len(s); i++ {
|
||||||
|
switch s[i] {
|
||||||
|
case '\t', ' ', '(', ')', '<', '>', '[', ']':
|
||||||
|
break loop
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if i != 0 {
|
||||||
|
tokenStr, remaining = s[:i], s[i:]
|
||||||
|
if tokenStr == "Not" {
|
||||||
|
return notTokenType, "", remaining
|
||||||
|
}
|
||||||
|
return strTokenType, tokenStr, remaining
|
||||||
|
}
|
||||||
|
|
||||||
|
j := 0
|
||||||
|
switch s[0] {
|
||||||
|
case '<':
|
||||||
|
j, tokenType = strings.IndexByte(s, '>'), angleTokenType
|
||||||
|
case '[':
|
||||||
|
j, tokenType = strings.IndexByte(s, ']'), squareTokenType
|
||||||
|
default:
|
||||||
|
return rune(s[0]), "", s[1:]
|
||||||
|
}
|
||||||
|
if j < 0 {
|
||||||
|
return errTokenType, "", ""
|
||||||
|
}
|
||||||
|
return tokenType, s[1:j], s[j+1:]
|
||||||
|
}
|
322
drives/davServer/if_test.go
Normal file
322
drives/davServer/if_test.go
Normal file
@ -0,0 +1,322 @@
|
|||||||
|
// Copyright 2014 The Go Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a BSD-style
|
||||||
|
// license that can be found in the LICENSE file.
|
||||||
|
|
||||||
|
package davServer
|
||||||
|
|
||||||
|
import (
|
||||||
|
"reflect"
|
||||||
|
"strings"
|
||||||
|
"testing"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestParseIfHeader(t *testing.T) {
|
||||||
|
// The "section x.y.z" test cases come from section x.y.z of the spec at
|
||||||
|
// http://www.webdav.org/specs/rfc4918.html
|
||||||
|
testCases := []struct {
|
||||||
|
desc string
|
||||||
|
input string
|
||||||
|
want ifHeader
|
||||||
|
}{{
|
||||||
|
"bad: empty",
|
||||||
|
``,
|
||||||
|
ifHeader{},
|
||||||
|
}, {
|
||||||
|
"bad: no parens",
|
||||||
|
`foobar`,
|
||||||
|
ifHeader{},
|
||||||
|
}, {
|
||||||
|
"bad: empty list #1",
|
||||||
|
`()`,
|
||||||
|
ifHeader{},
|
||||||
|
}, {
|
||||||
|
"bad: empty list #2",
|
||||||
|
`(a) (b c) () (d)`,
|
||||||
|
ifHeader{},
|
||||||
|
}, {
|
||||||
|
"bad: no list after resource #1",
|
||||||
|
`<foo>`,
|
||||||
|
ifHeader{},
|
||||||
|
}, {
|
||||||
|
"bad: no list after resource #2",
|
||||||
|
`<foo> <bar> (a)`,
|
||||||
|
ifHeader{},
|
||||||
|
}, {
|
||||||
|
"bad: no list after resource #3",
|
||||||
|
`<foo> (a) (b) <bar>`,
|
||||||
|
ifHeader{},
|
||||||
|
}, {
|
||||||
|
"bad: no-tag-list followed by tagged-list",
|
||||||
|
`(a) (b) <foo> (c)`,
|
||||||
|
ifHeader{},
|
||||||
|
}, {
|
||||||
|
"bad: unfinished list",
|
||||||
|
`(a`,
|
||||||
|
ifHeader{},
|
||||||
|
}, {
|
||||||
|
"bad: unfinished ETag",
|
||||||
|
`([b`,
|
||||||
|
ifHeader{},
|
||||||
|
}, {
|
||||||
|
"bad: unfinished Notted list",
|
||||||
|
`(Not a`,
|
||||||
|
ifHeader{},
|
||||||
|
}, {
|
||||||
|
"bad: double Not",
|
||||||
|
`(Not Not a)`,
|
||||||
|
ifHeader{},
|
||||||
|
}, {
|
||||||
|
"good: one list with a Token",
|
||||||
|
`(a)`,
|
||||||
|
ifHeader{
|
||||||
|
lists: []ifList{{
|
||||||
|
conditions: []Condition{{
|
||||||
|
Token: `a`,
|
||||||
|
}},
|
||||||
|
}},
|
||||||
|
},
|
||||||
|
}, {
|
||||||
|
"good: one list with an ETag",
|
||||||
|
`([a])`,
|
||||||
|
ifHeader{
|
||||||
|
lists: []ifList{{
|
||||||
|
conditions: []Condition{{
|
||||||
|
ETag: `a`,
|
||||||
|
}},
|
||||||
|
}},
|
||||||
|
},
|
||||||
|
}, {
|
||||||
|
"good: one list with three Nots",
|
||||||
|
`(Not a Not b Not [d])`,
|
||||||
|
ifHeader{
|
||||||
|
lists: []ifList{{
|
||||||
|
conditions: []Condition{{
|
||||||
|
Not: true,
|
||||||
|
Token: `a`,
|
||||||
|
}, {
|
||||||
|
Not: true,
|
||||||
|
Token: `b`,
|
||||||
|
}, {
|
||||||
|
Not: true,
|
||||||
|
ETag: `d`,
|
||||||
|
}},
|
||||||
|
}},
|
||||||
|
},
|
||||||
|
}, {
|
||||||
|
"good: two lists",
|
||||||
|
`(a) (b)`,
|
||||||
|
ifHeader{
|
||||||
|
lists: []ifList{{
|
||||||
|
conditions: []Condition{{
|
||||||
|
Token: `a`,
|
||||||
|
}},
|
||||||
|
}, {
|
||||||
|
conditions: []Condition{{
|
||||||
|
Token: `b`,
|
||||||
|
}},
|
||||||
|
}},
|
||||||
|
},
|
||||||
|
}, {
|
||||||
|
"good: two Notted lists",
|
||||||
|
`(Not a) (Not b)`,
|
||||||
|
ifHeader{
|
||||||
|
lists: []ifList{{
|
||||||
|
conditions: []Condition{{
|
||||||
|
Not: true,
|
||||||
|
Token: `a`,
|
||||||
|
}},
|
||||||
|
}, {
|
||||||
|
conditions: []Condition{{
|
||||||
|
Not: true,
|
||||||
|
Token: `b`,
|
||||||
|
}},
|
||||||
|
}},
|
||||||
|
},
|
||||||
|
}, {
|
||||||
|
"section 7.5.1",
|
||||||
|
`<http://www.example.com/users/f/fielding/index.html>
|
||||||
|
(<urn:uuid:f81d4fae-7dec-11d0-a765-00a0c91e6bf6>)`,
|
||||||
|
ifHeader{
|
||||||
|
lists: []ifList{{
|
||||||
|
resourceTag: `http://www.example.com/users/f/fielding/index.html`,
|
||||||
|
conditions: []Condition{{
|
||||||
|
Token: `urn:uuid:f81d4fae-7dec-11d0-a765-00a0c91e6bf6`,
|
||||||
|
}},
|
||||||
|
}},
|
||||||
|
},
|
||||||
|
}, {
|
||||||
|
"section 7.5.2 #1",
|
||||||
|
`(<urn:uuid:150852e2-3847-42d5-8cbe-0f4f296f26cf>)`,
|
||||||
|
ifHeader{
|
||||||
|
lists: []ifList{{
|
||||||
|
conditions: []Condition{{
|
||||||
|
Token: `urn:uuid:150852e2-3847-42d5-8cbe-0f4f296f26cf`,
|
||||||
|
}},
|
||||||
|
}},
|
||||||
|
},
|
||||||
|
}, {
|
||||||
|
"section 7.5.2 #2",
|
||||||
|
`<http://example.com/locked/>
|
||||||
|
(<urn:uuid:150852e2-3847-42d5-8cbe-0f4f296f26cf>)`,
|
||||||
|
ifHeader{
|
||||||
|
lists: []ifList{{
|
||||||
|
resourceTag: `http://example.com/locked/`,
|
||||||
|
conditions: []Condition{{
|
||||||
|
Token: `urn:uuid:150852e2-3847-42d5-8cbe-0f4f296f26cf`,
|
||||||
|
}},
|
||||||
|
}},
|
||||||
|
},
|
||||||
|
}, {
|
||||||
|
"section 7.5.2 #3",
|
||||||
|
`<http://example.com/locked/member>
|
||||||
|
(<urn:uuid:150852e2-3847-42d5-8cbe-0f4f296f26cf>)`,
|
||||||
|
ifHeader{
|
||||||
|
lists: []ifList{{
|
||||||
|
resourceTag: `http://example.com/locked/member`,
|
||||||
|
conditions: []Condition{{
|
||||||
|
Token: `urn:uuid:150852e2-3847-42d5-8cbe-0f4f296f26cf`,
|
||||||
|
}},
|
||||||
|
}},
|
||||||
|
},
|
||||||
|
}, {
|
||||||
|
"section 9.9.6",
|
||||||
|
`(<urn:uuid:fe184f2e-6eec-41d0-c765-01adc56e6bb4>)
|
||||||
|
(<urn:uuid:e454f3f3-acdc-452a-56c7-00a5c91e4b77>)`,
|
||||||
|
ifHeader{
|
||||||
|
lists: []ifList{{
|
||||||
|
conditions: []Condition{{
|
||||||
|
Token: `urn:uuid:fe184f2e-6eec-41d0-c765-01adc56e6bb4`,
|
||||||
|
}},
|
||||||
|
}, {
|
||||||
|
conditions: []Condition{{
|
||||||
|
Token: `urn:uuid:e454f3f3-acdc-452a-56c7-00a5c91e4b77`,
|
||||||
|
}},
|
||||||
|
}},
|
||||||
|
},
|
||||||
|
}, {
|
||||||
|
"section 9.10.8",
|
||||||
|
`(<urn:uuid:e71d4fae-5dec-22d6-fea5-00a0c91e6be4>)`,
|
||||||
|
ifHeader{
|
||||||
|
lists: []ifList{{
|
||||||
|
conditions: []Condition{{
|
||||||
|
Token: `urn:uuid:e71d4fae-5dec-22d6-fea5-00a0c91e6be4`,
|
||||||
|
}},
|
||||||
|
}},
|
||||||
|
},
|
||||||
|
}, {
|
||||||
|
"section 10.4.6",
|
||||||
|
`(<urn:uuid:181d4fae-7d8c-11d0-a765-00a0c91e6bf2>
|
||||||
|
["I am an ETag"])
|
||||||
|
(["I am another ETag"])`,
|
||||||
|
ifHeader{
|
||||||
|
lists: []ifList{{
|
||||||
|
conditions: []Condition{{
|
||||||
|
Token: `urn:uuid:181d4fae-7d8c-11d0-a765-00a0c91e6bf2`,
|
||||||
|
}, {
|
||||||
|
ETag: `"I am an ETag"`,
|
||||||
|
}},
|
||||||
|
}, {
|
||||||
|
conditions: []Condition{{
|
||||||
|
ETag: `"I am another ETag"`,
|
||||||
|
}},
|
||||||
|
}},
|
||||||
|
},
|
||||||
|
}, {
|
||||||
|
"section 10.4.7",
|
||||||
|
`(Not <urn:uuid:181d4fae-7d8c-11d0-a765-00a0c91e6bf2>
|
||||||
|
<urn:uuid:58f202ac-22cf-11d1-b12d-002035b29092>)`,
|
||||||
|
ifHeader{
|
||||||
|
lists: []ifList{{
|
||||||
|
conditions: []Condition{{
|
||||||
|
Not: true,
|
||||||
|
Token: `urn:uuid:181d4fae-7d8c-11d0-a765-00a0c91e6bf2`,
|
||||||
|
}, {
|
||||||
|
Token: `urn:uuid:58f202ac-22cf-11d1-b12d-002035b29092`,
|
||||||
|
}},
|
||||||
|
}},
|
||||||
|
},
|
||||||
|
}, {
|
||||||
|
"section 10.4.8",
|
||||||
|
`(<urn:uuid:181d4fae-7d8c-11d0-a765-00a0c91e6bf2>)
|
||||||
|
(Not <DAV:no-lock>)`,
|
||||||
|
ifHeader{
|
||||||
|
lists: []ifList{{
|
||||||
|
conditions: []Condition{{
|
||||||
|
Token: `urn:uuid:181d4fae-7d8c-11d0-a765-00a0c91e6bf2`,
|
||||||
|
}},
|
||||||
|
}, {
|
||||||
|
conditions: []Condition{{
|
||||||
|
Not: true,
|
||||||
|
Token: `DAV:no-lock`,
|
||||||
|
}},
|
||||||
|
}},
|
||||||
|
},
|
||||||
|
}, {
|
||||||
|
"section 10.4.9",
|
||||||
|
`</resource1>
|
||||||
|
(<urn:uuid:181d4fae-7d8c-11d0-a765-00a0c91e6bf2>
|
||||||
|
[W/"A weak ETag"]) (["strong ETag"])`,
|
||||||
|
ifHeader{
|
||||||
|
lists: []ifList{{
|
||||||
|
resourceTag: `/resource1`,
|
||||||
|
conditions: []Condition{{
|
||||||
|
Token: `urn:uuid:181d4fae-7d8c-11d0-a765-00a0c91e6bf2`,
|
||||||
|
}, {
|
||||||
|
ETag: `W/"A weak ETag"`,
|
||||||
|
}},
|
||||||
|
}, {
|
||||||
|
resourceTag: `/resource1`,
|
||||||
|
conditions: []Condition{{
|
||||||
|
ETag: `"strong ETag"`,
|
||||||
|
}},
|
||||||
|
}},
|
||||||
|
},
|
||||||
|
}, {
|
||||||
|
"section 10.4.10",
|
||||||
|
`<http://www.example.com/specs/>
|
||||||
|
(<urn:uuid:181d4fae-7d8c-11d0-a765-00a0c91e6bf2>)`,
|
||||||
|
ifHeader{
|
||||||
|
lists: []ifList{{
|
||||||
|
resourceTag: `http://www.example.com/specs/`,
|
||||||
|
conditions: []Condition{{
|
||||||
|
Token: `urn:uuid:181d4fae-7d8c-11d0-a765-00a0c91e6bf2`,
|
||||||
|
}},
|
||||||
|
}},
|
||||||
|
},
|
||||||
|
}, {
|
||||||
|
"section 10.4.11 #1",
|
||||||
|
`</specs/rfc2518.doc> (["4217"])`,
|
||||||
|
ifHeader{
|
||||||
|
lists: []ifList{{
|
||||||
|
resourceTag: `/specs/rfc2518.doc`,
|
||||||
|
conditions: []Condition{{
|
||||||
|
ETag: `"4217"`,
|
||||||
|
}},
|
||||||
|
}},
|
||||||
|
},
|
||||||
|
}, {
|
||||||
|
"section 10.4.11 #2",
|
||||||
|
`</specs/rfc2518.doc> (Not ["4217"])`,
|
||||||
|
ifHeader{
|
||||||
|
lists: []ifList{{
|
||||||
|
resourceTag: `/specs/rfc2518.doc`,
|
||||||
|
conditions: []Condition{{
|
||||||
|
Not: true,
|
||||||
|
ETag: `"4217"`,
|
||||||
|
}},
|
||||||
|
}},
|
||||||
|
},
|
||||||
|
}}
|
||||||
|
|
||||||
|
for _, tc := range testCases {
|
||||||
|
got, ok := parseIfHeader(strings.Replace(tc.input, "\n", "", -1))
|
||||||
|
if gotEmpty := reflect.DeepEqual(got, ifHeader{}); gotEmpty == ok {
|
||||||
|
t.Errorf("%s: should be different: empty header == %t, ok == %t", tc.desc, gotEmpty, ok)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
if !reflect.DeepEqual(got, tc.want) {
|
||||||
|
t.Errorf("%s:\ngot %v\nwant %v", tc.desc, got, tc.want)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
11
drives/davServer/internal/xml/README
Normal file
11
drives/davServer/internal/xml/README
Normal file
@ -0,0 +1,11 @@
|
|||||||
|
This is a fork of the encoding/xml package at ca1d6c4, the last commit before
|
||||||
|
https://go.googlesource.com/go/+/c0d6d33 "encoding/xml: restore Go 1.4 name
|
||||||
|
space behavior" made late in the lead-up to the Go 1.5 release.
|
||||||
|
|
||||||
|
The list of encoding/xml changes is at
|
||||||
|
https://go.googlesource.com/go/+log/master/src/encoding/xml
|
||||||
|
|
||||||
|
This fork is temporary, and I (nigeltao) expect to revert it after Go 1.6 is
|
||||||
|
released.
|
||||||
|
|
||||||
|
See http://golang.org/issue/11841
|
56
drives/davServer/internal/xml/atom_test.go
Normal file
56
drives/davServer/internal/xml/atom_test.go
Normal file
@ -0,0 +1,56 @@
|
|||||||
|
// Copyright 2011 The Go Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a BSD-style
|
||||||
|
// license that can be found in the LICENSE file.
|
||||||
|
|
||||||
|
package xml
|
||||||
|
|
||||||
|
import "time"
|
||||||
|
|
||||||
|
var atomValue = &Feed{
|
||||||
|
XMLName: Name{"http://www.w3.org/2005/Atom", "feed"},
|
||||||
|
Title: "Example Feed",
|
||||||
|
Link: []Link{{Href: "http://example.org/"}},
|
||||||
|
Updated: ParseTime("2003-12-13T18:30:02Z"),
|
||||||
|
Author: Person{Name: "John Doe"},
|
||||||
|
Id: "urn:uuid:60a76c80-d399-11d9-b93C-0003939e0af6",
|
||||||
|
|
||||||
|
Entry: []Entry{
|
||||||
|
{
|
||||||
|
Title: "Atom-Powered Robots Run Amok",
|
||||||
|
Link: []Link{{Href: "http://example.org/2003/12/13/atom03"}},
|
||||||
|
Id: "urn:uuid:1225c695-cfb8-4ebb-aaaa-80da344efa6a",
|
||||||
|
Updated: ParseTime("2003-12-13T18:30:02Z"),
|
||||||
|
Summary: NewText("Some text."),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
var atomXml = `` +
|
||||||
|
`<feed xmlns="http://www.w3.org/2005/Atom" updated="2003-12-13T18:30:02Z">` +
|
||||||
|
`<title>Example Feed</title>` +
|
||||||
|
`<id>urn:uuid:60a76c80-d399-11d9-b93C-0003939e0af6</id>` +
|
||||||
|
`<link href="http://example.org/"></link>` +
|
||||||
|
`<author><name>John Doe</name><uri></uri><email></email></author>` +
|
||||||
|
`<entry>` +
|
||||||
|
`<title>Atom-Powered Robots Run Amok</title>` +
|
||||||
|
`<id>urn:uuid:1225c695-cfb8-4ebb-aaaa-80da344efa6a</id>` +
|
||||||
|
`<link href="http://example.org/2003/12/13/atom03"></link>` +
|
||||||
|
`<updated>2003-12-13T18:30:02Z</updated>` +
|
||||||
|
`<author><name></name><uri></uri><email></email></author>` +
|
||||||
|
`<summary>Some text.</summary>` +
|
||||||
|
`</entry>` +
|
||||||
|
`</feed>`
|
||||||
|
|
||||||
|
func ParseTime(str string) time.Time {
|
||||||
|
t, err := time.Parse(time.RFC3339, str)
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
return t
|
||||||
|
}
|
||||||
|
|
||||||
|
func NewText(text string) Text {
|
||||||
|
return Text{
|
||||||
|
Body: text,
|
||||||
|
}
|
||||||
|
}
|
151
drives/davServer/internal/xml/example_test.go
Normal file
151
drives/davServer/internal/xml/example_test.go
Normal file
@ -0,0 +1,151 @@
|
|||||||
|
// Copyright 2012 The Go Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a BSD-style
|
||||||
|
// license that can be found in the LICENSE file.
|
||||||
|
|
||||||
|
package xml_test
|
||||||
|
|
||||||
|
import (
|
||||||
|
"encoding/xml"
|
||||||
|
"fmt"
|
||||||
|
"os"
|
||||||
|
)
|
||||||
|
|
||||||
|
func ExampleMarshalIndent() {
|
||||||
|
type Address struct {
|
||||||
|
City, State string
|
||||||
|
}
|
||||||
|
type Person struct {
|
||||||
|
XMLName xml.Name `xml:"person"`
|
||||||
|
Id int `xml:"id,attr"`
|
||||||
|
FirstName string `xml:"name>first"`
|
||||||
|
LastName string `xml:"name>last"`
|
||||||
|
Age int `xml:"age"`
|
||||||
|
Height float32 `xml:"height,omitempty"`
|
||||||
|
Married bool
|
||||||
|
Address
|
||||||
|
Comment string `xml:",comment"`
|
||||||
|
}
|
||||||
|
|
||||||
|
v := &Person{Id: 13, FirstName: "John", LastName: "Doe", Age: 42}
|
||||||
|
v.Comment = " Need more details. "
|
||||||
|
v.Address = Address{"Hanga Roa", "Easter Island"}
|
||||||
|
|
||||||
|
output, err := xml.MarshalIndent(v, " ", " ")
|
||||||
|
if err != nil {
|
||||||
|
fmt.Printf("error: %v\n", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
os.Stdout.Write(output)
|
||||||
|
// Output:
|
||||||
|
// <person id="13">
|
||||||
|
// <name>
|
||||||
|
// <first>John</first>
|
||||||
|
// <last>Doe</last>
|
||||||
|
// </name>
|
||||||
|
// <age>42</age>
|
||||||
|
// <Married>false</Married>
|
||||||
|
// <City>Hanga Roa</City>
|
||||||
|
// <State>Easter Island</State>
|
||||||
|
// <!-- Need more details. -->
|
||||||
|
// </person>
|
||||||
|
}
|
||||||
|
|
||||||
|
func ExampleEncoder() {
|
||||||
|
type Address struct {
|
||||||
|
City, State string
|
||||||
|
}
|
||||||
|
type Person struct {
|
||||||
|
XMLName xml.Name `xml:"person"`
|
||||||
|
Id int `xml:"id,attr"`
|
||||||
|
FirstName string `xml:"name>first"`
|
||||||
|
LastName string `xml:"name>last"`
|
||||||
|
Age int `xml:"age"`
|
||||||
|
Height float32 `xml:"height,omitempty"`
|
||||||
|
Married bool
|
||||||
|
Address
|
||||||
|
Comment string `xml:",comment"`
|
||||||
|
}
|
||||||
|
|
||||||
|
v := &Person{Id: 13, FirstName: "John", LastName: "Doe", Age: 42}
|
||||||
|
v.Comment = " Need more details. "
|
||||||
|
v.Address = Address{"Hanga Roa", "Easter Island"}
|
||||||
|
|
||||||
|
enc := xml.NewEncoder(os.Stdout)
|
||||||
|
enc.Indent(" ", " ")
|
||||||
|
if err := enc.Encode(v); err != nil {
|
||||||
|
fmt.Printf("error: %v\n", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Output:
|
||||||
|
// <person id="13">
|
||||||
|
// <name>
|
||||||
|
// <first>John</first>
|
||||||
|
// <last>Doe</last>
|
||||||
|
// </name>
|
||||||
|
// <age>42</age>
|
||||||
|
// <Married>false</Married>
|
||||||
|
// <City>Hanga Roa</City>
|
||||||
|
// <State>Easter Island</State>
|
||||||
|
// <!-- Need more details. -->
|
||||||
|
// </person>
|
||||||
|
}
|
||||||
|
|
||||||
|
// This example demonstrates unmarshaling an XML excerpt into a value with
|
||||||
|
// some preset fields. Note that the Phone field isn't modified and that
|
||||||
|
// the XML <Company> element is ignored. Also, the Groups field is assigned
|
||||||
|
// considering the element path provided in its tag.
|
||||||
|
func ExampleUnmarshal() {
|
||||||
|
type Email struct {
|
||||||
|
Where string `xml:"where,attr"`
|
||||||
|
Addr string
|
||||||
|
}
|
||||||
|
type Address struct {
|
||||||
|
City, State string
|
||||||
|
}
|
||||||
|
type Result struct {
|
||||||
|
XMLName xml.Name `xml:"Person"`
|
||||||
|
Name string `xml:"FullName"`
|
||||||
|
Phone string
|
||||||
|
Email []Email
|
||||||
|
Groups []string `xml:"Group>Value"`
|
||||||
|
Address
|
||||||
|
}
|
||||||
|
v := Result{Name: "none", Phone: "none"}
|
||||||
|
|
||||||
|
data := `
|
||||||
|
<Person>
|
||||||
|
<FullName>Grace R. Emlin</FullName>
|
||||||
|
<Company>Example Inc.</Company>
|
||||||
|
<Email where="home">
|
||||||
|
<Addr>gre@example.com</Addr>
|
||||||
|
</Email>
|
||||||
|
<Email where='work'>
|
||||||
|
<Addr>gre@work.com</Addr>
|
||||||
|
</Email>
|
||||||
|
<Group>
|
||||||
|
<Value>Friends</Value>
|
||||||
|
<Value>Squash</Value>
|
||||||
|
</Group>
|
||||||
|
<City>Hanga Roa</City>
|
||||||
|
<State>Easter Island</State>
|
||||||
|
</Person>
|
||||||
|
`
|
||||||
|
err := xml.Unmarshal([]byte(data), &v)
|
||||||
|
if err != nil {
|
||||||
|
fmt.Printf("error: %v", err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
fmt.Printf("XMLName: %#v\n", v.XMLName)
|
||||||
|
fmt.Printf("Name: %q\n", v.Name)
|
||||||
|
fmt.Printf("Phone: %q\n", v.Phone)
|
||||||
|
fmt.Printf("Email: %v\n", v.Email)
|
||||||
|
fmt.Printf("Groups: %v\n", v.Groups)
|
||||||
|
fmt.Printf("Address: %v\n", v.Address)
|
||||||
|
// Output:
|
||||||
|
// XMLName: xml.Name{Space:"", Local:"Person"}
|
||||||
|
// Name: "Grace R. Emlin"
|
||||||
|
// Phone: "none"
|
||||||
|
// Email: [{home gre@example.com} {work gre@work.com}]
|
||||||
|
// Groups: [Friends Squash]
|
||||||
|
// Address: {Hanga Roa Easter Island}
|
||||||
|
}
|
1223
drives/davServer/internal/xml/marshal.go
Normal file
1223
drives/davServer/internal/xml/marshal.go
Normal file
File diff suppressed because it is too large
Load Diff
1939
drives/davServer/internal/xml/marshal_test.go
Normal file
1939
drives/davServer/internal/xml/marshal_test.go
Normal file
File diff suppressed because it is too large
Load Diff
691
drives/davServer/internal/xml/read.go
Normal file
691
drives/davServer/internal/xml/read.go
Normal file
@ -0,0 +1,691 @@
|
|||||||
|
// Copyright 2009 The Go Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a BSD-style
|
||||||
|
// license that can be found in the LICENSE file.
|
||||||
|
|
||||||
|
package xml
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"encoding"
|
||||||
|
"errors"
|
||||||
|
"fmt"
|
||||||
|
"reflect"
|
||||||
|
"strconv"
|
||||||
|
"strings"
|
||||||
|
)
|
||||||
|
|
||||||
|
// BUG(rsc): Mapping between XML elements and data structures is inherently flawed:
|
||||||
|
// an XML element is an order-dependent collection of anonymous
|
||||||
|
// values, while a data structure is an order-independent collection
|
||||||
|
// of named values.
|
||||||
|
// See package json for a textual representation more suitable
|
||||||
|
// to data structures.
|
||||||
|
|
||||||
|
// Unmarshal parses the XML-encoded data and stores the result in
|
||||||
|
// the value pointed to by v, which must be an arbitrary struct,
|
||||||
|
// slice, or string. Well-formed data that does not fit into v is
|
||||||
|
// discarded.
|
||||||
|
//
|
||||||
|
// Because Unmarshal uses the reflect package, it can only assign
|
||||||
|
// to exported (upper case) fields. Unmarshal uses a case-sensitive
|
||||||
|
// comparison to match XML element names to tag values and struct
|
||||||
|
// field names.
|
||||||
|
//
|
||||||
|
// Unmarshal maps an XML element to a struct using the following rules.
|
||||||
|
// In the rules, the tag of a field refers to the value associated with the
|
||||||
|
// key 'xml' in the struct field's tag (see the example above).
|
||||||
|
//
|
||||||
|
// - If the struct has a field of type []byte or string with tag
|
||||||
|
// ",innerxml", Unmarshal accumulates the raw XML nested inside the
|
||||||
|
// element in that field. The rest of the rules still apply.
|
||||||
|
//
|
||||||
|
// - If the struct has a field named XMLName of type xml.Name,
|
||||||
|
// Unmarshal records the element name in that field.
|
||||||
|
//
|
||||||
|
// - If the XMLName field has an associated tag of the form
|
||||||
|
// "name" or "namespace-URL name", the XML element must have
|
||||||
|
// the given name (and, optionally, name space) or else Unmarshal
|
||||||
|
// returns an error.
|
||||||
|
//
|
||||||
|
// - If the XML element has an attribute whose name matches a
|
||||||
|
// struct field name with an associated tag containing ",attr" or
|
||||||
|
// the explicit name in a struct field tag of the form "name,attr",
|
||||||
|
// Unmarshal records the attribute value in that field.
|
||||||
|
//
|
||||||
|
// - If the XML element contains character data, that data is
|
||||||
|
// accumulated in the first struct field that has tag ",chardata".
|
||||||
|
// The struct field may have type []byte or string.
|
||||||
|
// If there is no such field, the character data is discarded.
|
||||||
|
//
|
||||||
|
// - If the XML element contains comments, they are accumulated in
|
||||||
|
// the first struct field that has tag ",comment". The struct
|
||||||
|
// field may have type []byte or string. If there is no such
|
||||||
|
// field, the comments are discarded.
|
||||||
|
//
|
||||||
|
// - If the XML element contains a sub-element whose name matches
|
||||||
|
// the prefix of a tag formatted as "a" or "a>b>c", unmarshal
|
||||||
|
// will descend into the XML structure looking for elements with the
|
||||||
|
// given names, and will map the innermost elements to that struct
|
||||||
|
// field. A tag starting with ">" is equivalent to one starting
|
||||||
|
// with the field name followed by ">".
|
||||||
|
//
|
||||||
|
// - If the XML element contains a sub-element whose name matches
|
||||||
|
// a struct field's XMLName tag and the struct field has no
|
||||||
|
// explicit name tag as per the previous rule, unmarshal maps
|
||||||
|
// the sub-element to that struct field.
|
||||||
|
//
|
||||||
|
// - If the XML element contains a sub-element whose name matches a
|
||||||
|
// field without any mode flags (",attr", ",chardata", etc), Unmarshal
|
||||||
|
// maps the sub-element to that struct field.
|
||||||
|
//
|
||||||
|
// - If the XML element contains a sub-element that hasn't matched any
|
||||||
|
// of the above rules and the struct has a field with tag ",any",
|
||||||
|
// unmarshal maps the sub-element to that struct field.
|
||||||
|
//
|
||||||
|
// - An anonymous struct field is handled as if the fields of its
|
||||||
|
// value were part of the outer struct.
|
||||||
|
//
|
||||||
|
// - A struct field with tag "-" is never unmarshalled into.
|
||||||
|
//
|
||||||
|
// Unmarshal maps an XML element to a string or []byte by saving the
|
||||||
|
// concatenation of that element's character data in the string or
|
||||||
|
// []byte. The saved []byte is never nil.
|
||||||
|
//
|
||||||
|
// Unmarshal maps an attribute value to a string or []byte by saving
|
||||||
|
// the value in the string or slice.
|
||||||
|
//
|
||||||
|
// Unmarshal maps an XML element to a slice by extending the length of
|
||||||
|
// the slice and mapping the element to the newly created value.
|
||||||
|
//
|
||||||
|
// Unmarshal maps an XML element or attribute value to a bool by
|
||||||
|
// setting it to the boolean value represented by the string.
|
||||||
|
//
|
||||||
|
// Unmarshal maps an XML element or attribute value to an integer or
|
||||||
|
// floating-point field by setting the field to the result of
|
||||||
|
// interpreting the string value in decimal. There is no check for
|
||||||
|
// overflow.
|
||||||
|
//
|
||||||
|
// Unmarshal maps an XML element to an xml.Name by recording the
|
||||||
|
// element name.
|
||||||
|
//
|
||||||
|
// Unmarshal maps an XML element to a pointer by setting the pointer
|
||||||
|
// to a freshly allocated value and then mapping the element to that value.
|
||||||
|
func Unmarshal(data []byte, v interface{}) error {
|
||||||
|
return NewDecoder(bytes.NewReader(data)).Decode(v)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Decode works like xml.Unmarshal, except it reads the decoder
|
||||||
|
// stream to find the start element.
|
||||||
|
func (d *Decoder) Decode(v interface{}) error {
|
||||||
|
return d.DecodeElement(v, nil)
|
||||||
|
}
|
||||||
|
|
||||||
|
// DecodeElement works like xml.Unmarshal except that it takes
|
||||||
|
// a pointer to the start XML element to decode into v.
|
||||||
|
// It is useful when a client reads some raw XML tokens itself
|
||||||
|
// but also wants to defer to Unmarshal for some elements.
|
||||||
|
func (d *Decoder) DecodeElement(v interface{}, start *StartElement) error {
|
||||||
|
val := reflect.ValueOf(v)
|
||||||
|
if val.Kind() != reflect.Ptr {
|
||||||
|
return errors.New("non-pointer passed to Unmarshal")
|
||||||
|
}
|
||||||
|
return d.unmarshal(val.Elem(), start)
|
||||||
|
}
|
||||||
|
|
||||||
|
// An UnmarshalError represents an error in the unmarshalling process.
|
||||||
|
type UnmarshalError string
|
||||||
|
|
||||||
|
func (e UnmarshalError) Error() string { return string(e) }
|
||||||
|
|
||||||
|
// Unmarshaler is the interface implemented by objects that can unmarshal
|
||||||
|
// an XML element description of themselves.
|
||||||
|
//
|
||||||
|
// UnmarshalXML decodes a single XML element
|
||||||
|
// beginning with the given start element.
|
||||||
|
// If it returns an error, the outer call to Unmarshal stops and
|
||||||
|
// returns that error.
|
||||||
|
// UnmarshalXML must consume exactly one XML element.
|
||||||
|
// One common implementation strategy is to unmarshal into
|
||||||
|
// a separate value with a layout matching the expected XML
|
||||||
|
// using d.DecodeElement, and then to copy the data from
|
||||||
|
// that value into the receiver.
|
||||||
|
// Another common strategy is to use d.Token to process the
|
||||||
|
// XML object one token at a time.
|
||||||
|
// UnmarshalXML may not use d.RawToken.
|
||||||
|
type Unmarshaler interface {
|
||||||
|
UnmarshalXML(d *Decoder, start StartElement) error
|
||||||
|
}
|
||||||
|
|
||||||
|
// UnmarshalerAttr is the interface implemented by objects that can unmarshal
|
||||||
|
// an XML attribute description of themselves.
|
||||||
|
//
|
||||||
|
// UnmarshalXMLAttr decodes a single XML attribute.
|
||||||
|
// If it returns an error, the outer call to Unmarshal stops and
|
||||||
|
// returns that error.
|
||||||
|
// UnmarshalXMLAttr is used only for struct fields with the
|
||||||
|
// "attr" option in the field tag.
|
||||||
|
type UnmarshalerAttr interface {
|
||||||
|
UnmarshalXMLAttr(attr Attr) error
|
||||||
|
}
|
||||||
|
|
||||||
|
// receiverType returns the receiver type to use in an expression like "%s.MethodName".
|
||||||
|
func receiverType(val interface{}) string {
|
||||||
|
t := reflect.TypeOf(val)
|
||||||
|
if t.Name() != "" {
|
||||||
|
return t.String()
|
||||||
|
}
|
||||||
|
return "(" + t.String() + ")"
|
||||||
|
}
|
||||||
|
|
||||||
|
// unmarshalInterface unmarshals a single XML element into val.
|
||||||
|
// start is the opening tag of the element.
|
||||||
|
func (p *Decoder) unmarshalInterface(val Unmarshaler, start *StartElement) error {
|
||||||
|
// Record that decoder must stop at end tag corresponding to start.
|
||||||
|
p.pushEOF()
|
||||||
|
|
||||||
|
p.unmarshalDepth++
|
||||||
|
err := val.UnmarshalXML(p, *start)
|
||||||
|
p.unmarshalDepth--
|
||||||
|
if err != nil {
|
||||||
|
p.popEOF()
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
if !p.popEOF() {
|
||||||
|
return fmt.Errorf("xml: %s.UnmarshalXML did not consume entire <%s> element", receiverType(val), start.Name.Local)
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// unmarshalTextInterface unmarshals a single XML element into val.
|
||||||
|
// The chardata contained in the element (but not its children)
|
||||||
|
// is passed to the text unmarshaler.
|
||||||
|
func (p *Decoder) unmarshalTextInterface(val encoding.TextUnmarshaler, start *StartElement) error {
|
||||||
|
var buf []byte
|
||||||
|
depth := 1
|
||||||
|
for depth > 0 {
|
||||||
|
t, err := p.Token()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
switch t := t.(type) {
|
||||||
|
case CharData:
|
||||||
|
if depth == 1 {
|
||||||
|
buf = append(buf, t...)
|
||||||
|
}
|
||||||
|
case StartElement:
|
||||||
|
depth++
|
||||||
|
case EndElement:
|
||||||
|
depth--
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return val.UnmarshalText(buf)
|
||||||
|
}
|
||||||
|
|
||||||
|
// unmarshalAttr unmarshals a single XML attribute into val.
|
||||||
|
func (p *Decoder) unmarshalAttr(val reflect.Value, attr Attr) error {
|
||||||
|
if val.Kind() == reflect.Ptr {
|
||||||
|
if val.IsNil() {
|
||||||
|
val.Set(reflect.New(val.Type().Elem()))
|
||||||
|
}
|
||||||
|
val = val.Elem()
|
||||||
|
}
|
||||||
|
|
||||||
|
if val.CanInterface() && val.Type().Implements(unmarshalerAttrType) {
|
||||||
|
// This is an unmarshaler with a non-pointer receiver,
|
||||||
|
// so it's likely to be incorrect, but we do what we're told.
|
||||||
|
return val.Interface().(UnmarshalerAttr).UnmarshalXMLAttr(attr)
|
||||||
|
}
|
||||||
|
if val.CanAddr() {
|
||||||
|
pv := val.Addr()
|
||||||
|
if pv.CanInterface() && pv.Type().Implements(unmarshalerAttrType) {
|
||||||
|
return pv.Interface().(UnmarshalerAttr).UnmarshalXMLAttr(attr)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Not an UnmarshalerAttr; try encoding.TextUnmarshaler.
|
||||||
|
if val.CanInterface() && val.Type().Implements(textUnmarshalerType) {
|
||||||
|
// This is an unmarshaler with a non-pointer receiver,
|
||||||
|
// so it's likely to be incorrect, but we do what we're told.
|
||||||
|
return val.Interface().(encoding.TextUnmarshaler).UnmarshalText([]byte(attr.Value))
|
||||||
|
}
|
||||||
|
if val.CanAddr() {
|
||||||
|
pv := val.Addr()
|
||||||
|
if pv.CanInterface() && pv.Type().Implements(textUnmarshalerType) {
|
||||||
|
return pv.Interface().(encoding.TextUnmarshaler).UnmarshalText([]byte(attr.Value))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
copyValue(val, []byte(attr.Value))
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
var (
|
||||||
|
unmarshalerType = reflect.TypeOf((*Unmarshaler)(nil)).Elem()
|
||||||
|
unmarshalerAttrType = reflect.TypeOf((*UnmarshalerAttr)(nil)).Elem()
|
||||||
|
textUnmarshalerType = reflect.TypeOf((*encoding.TextUnmarshaler)(nil)).Elem()
|
||||||
|
)
|
||||||
|
|
||||||
|
// Unmarshal a single XML element into val.
|
||||||
|
func (p *Decoder) unmarshal(val reflect.Value, start *StartElement) error {
|
||||||
|
// Find start element if we need it.
|
||||||
|
if start == nil {
|
||||||
|
for {
|
||||||
|
tok, err := p.Token()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if t, ok := tok.(StartElement); ok {
|
||||||
|
start = &t
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Load value from interface, but only if the result will be
|
||||||
|
// usefully addressable.
|
||||||
|
if val.Kind() == reflect.Interface && !val.IsNil() {
|
||||||
|
e := val.Elem()
|
||||||
|
if e.Kind() == reflect.Ptr && !e.IsNil() {
|
||||||
|
val = e
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if val.Kind() == reflect.Ptr {
|
||||||
|
if val.IsNil() {
|
||||||
|
val.Set(reflect.New(val.Type().Elem()))
|
||||||
|
}
|
||||||
|
val = val.Elem()
|
||||||
|
}
|
||||||
|
|
||||||
|
if val.CanInterface() && val.Type().Implements(unmarshalerType) {
|
||||||
|
// This is an unmarshaler with a non-pointer receiver,
|
||||||
|
// so it's likely to be incorrect, but we do what we're told.
|
||||||
|
return p.unmarshalInterface(val.Interface().(Unmarshaler), start)
|
||||||
|
}
|
||||||
|
|
||||||
|
if val.CanAddr() {
|
||||||
|
pv := val.Addr()
|
||||||
|
if pv.CanInterface() && pv.Type().Implements(unmarshalerType) {
|
||||||
|
return p.unmarshalInterface(pv.Interface().(Unmarshaler), start)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if val.CanInterface() && val.Type().Implements(textUnmarshalerType) {
|
||||||
|
return p.unmarshalTextInterface(val.Interface().(encoding.TextUnmarshaler), start)
|
||||||
|
}
|
||||||
|
|
||||||
|
if val.CanAddr() {
|
||||||
|
pv := val.Addr()
|
||||||
|
if pv.CanInterface() && pv.Type().Implements(textUnmarshalerType) {
|
||||||
|
return p.unmarshalTextInterface(pv.Interface().(encoding.TextUnmarshaler), start)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
var (
|
||||||
|
data []byte
|
||||||
|
saveData reflect.Value
|
||||||
|
comment []byte
|
||||||
|
saveComment reflect.Value
|
||||||
|
saveXML reflect.Value
|
||||||
|
saveXMLIndex int
|
||||||
|
saveXMLData []byte
|
||||||
|
saveAny reflect.Value
|
||||||
|
sv reflect.Value
|
||||||
|
tinfo *typeInfo
|
||||||
|
err error
|
||||||
|
)
|
||||||
|
|
||||||
|
switch v := val; v.Kind() {
|
||||||
|
default:
|
||||||
|
return errors.New("unknown type " + v.Type().String())
|
||||||
|
|
||||||
|
case reflect.Interface:
|
||||||
|
// TODO: For now, simply ignore the field. In the near
|
||||||
|
// future we may choose to unmarshal the start
|
||||||
|
// element on it, if not nil.
|
||||||
|
return p.Skip()
|
||||||
|
|
||||||
|
case reflect.Slice:
|
||||||
|
typ := v.Type()
|
||||||
|
if typ.Elem().Kind() == reflect.Uint8 {
|
||||||
|
// []byte
|
||||||
|
saveData = v
|
||||||
|
break
|
||||||
|
}
|
||||||
|
|
||||||
|
// Slice of element values.
|
||||||
|
// Grow slice.
|
||||||
|
n := v.Len()
|
||||||
|
if n >= v.Cap() {
|
||||||
|
ncap := 2 * n
|
||||||
|
if ncap < 4 {
|
||||||
|
ncap = 4
|
||||||
|
}
|
||||||
|
new := reflect.MakeSlice(typ, n, ncap)
|
||||||
|
reflect.Copy(new, v)
|
||||||
|
v.Set(new)
|
||||||
|
}
|
||||||
|
v.SetLen(n + 1)
|
||||||
|
|
||||||
|
// Recur to read element into slice.
|
||||||
|
if err := p.unmarshal(v.Index(n), start); err != nil {
|
||||||
|
v.SetLen(n)
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
|
||||||
|
case reflect.Bool, reflect.Float32, reflect.Float64, reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr, reflect.String:
|
||||||
|
saveData = v
|
||||||
|
|
||||||
|
case reflect.Struct:
|
||||||
|
typ := v.Type()
|
||||||
|
if typ == nameType {
|
||||||
|
v.Set(reflect.ValueOf(start.Name))
|
||||||
|
break
|
||||||
|
}
|
||||||
|
|
||||||
|
sv = v
|
||||||
|
tinfo, err = getTypeInfo(typ)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Validate and assign element name.
|
||||||
|
if tinfo.xmlname != nil {
|
||||||
|
finfo := tinfo.xmlname
|
||||||
|
if finfo.name != "" && finfo.name != start.Name.Local {
|
||||||
|
return UnmarshalError("expected element type <" + finfo.name + "> but have <" + start.Name.Local + ">")
|
||||||
|
}
|
||||||
|
if finfo.xmlns != "" && finfo.xmlns != start.Name.Space {
|
||||||
|
e := "expected element <" + finfo.name + "> in name space " + finfo.xmlns + " but have "
|
||||||
|
if start.Name.Space == "" {
|
||||||
|
e += "no name space"
|
||||||
|
} else {
|
||||||
|
e += start.Name.Space
|
||||||
|
}
|
||||||
|
return UnmarshalError(e)
|
||||||
|
}
|
||||||
|
fv := finfo.value(sv)
|
||||||
|
if _, ok := fv.Interface().(Name); ok {
|
||||||
|
fv.Set(reflect.ValueOf(start.Name))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Assign attributes.
|
||||||
|
// Also, determine whether we need to save character data or comments.
|
||||||
|
for i := range tinfo.fields {
|
||||||
|
finfo := &tinfo.fields[i]
|
||||||
|
switch finfo.flags & fMode {
|
||||||
|
case fAttr:
|
||||||
|
strv := finfo.value(sv)
|
||||||
|
// Look for attribute.
|
||||||
|
for _, a := range start.Attr {
|
||||||
|
if a.Name.Local == finfo.name && (finfo.xmlns == "" || finfo.xmlns == a.Name.Space) {
|
||||||
|
if err := p.unmarshalAttr(strv, a); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
case fCharData:
|
||||||
|
if !saveData.IsValid() {
|
||||||
|
saveData = finfo.value(sv)
|
||||||
|
}
|
||||||
|
|
||||||
|
case fComment:
|
||||||
|
if !saveComment.IsValid() {
|
||||||
|
saveComment = finfo.value(sv)
|
||||||
|
}
|
||||||
|
|
||||||
|
case fAny, fAny | fElement:
|
||||||
|
if !saveAny.IsValid() {
|
||||||
|
saveAny = finfo.value(sv)
|
||||||
|
}
|
||||||
|
|
||||||
|
case fInnerXml:
|
||||||
|
if !saveXML.IsValid() {
|
||||||
|
saveXML = finfo.value(sv)
|
||||||
|
if p.saved == nil {
|
||||||
|
saveXMLIndex = 0
|
||||||
|
p.saved = new(bytes.Buffer)
|
||||||
|
} else {
|
||||||
|
saveXMLIndex = p.savedOffset()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Find end element.
|
||||||
|
// Process sub-elements along the way.
|
||||||
|
Loop:
|
||||||
|
for {
|
||||||
|
var savedOffset int
|
||||||
|
if saveXML.IsValid() {
|
||||||
|
savedOffset = p.savedOffset()
|
||||||
|
}
|
||||||
|
tok, err := p.Token()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
switch t := tok.(type) {
|
||||||
|
case StartElement:
|
||||||
|
consumed := false
|
||||||
|
if sv.IsValid() {
|
||||||
|
consumed, err = p.unmarshalPath(tinfo, sv, nil, &t)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if !consumed && saveAny.IsValid() {
|
||||||
|
consumed = true
|
||||||
|
if err := p.unmarshal(saveAny, &t); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if !consumed {
|
||||||
|
if err := p.Skip(); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
case EndElement:
|
||||||
|
if saveXML.IsValid() {
|
||||||
|
saveXMLData = p.saved.Bytes()[saveXMLIndex:savedOffset]
|
||||||
|
if saveXMLIndex == 0 {
|
||||||
|
p.saved = nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
break Loop
|
||||||
|
|
||||||
|
case CharData:
|
||||||
|
if saveData.IsValid() {
|
||||||
|
data = append(data, t...)
|
||||||
|
}
|
||||||
|
|
||||||
|
case Comment:
|
||||||
|
if saveComment.IsValid() {
|
||||||
|
comment = append(comment, t...)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if saveData.IsValid() && saveData.CanInterface() && saveData.Type().Implements(textUnmarshalerType) {
|
||||||
|
if err := saveData.Interface().(encoding.TextUnmarshaler).UnmarshalText(data); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
saveData = reflect.Value{}
|
||||||
|
}
|
||||||
|
|
||||||
|
if saveData.IsValid() && saveData.CanAddr() {
|
||||||
|
pv := saveData.Addr()
|
||||||
|
if pv.CanInterface() && pv.Type().Implements(textUnmarshalerType) {
|
||||||
|
if err := pv.Interface().(encoding.TextUnmarshaler).UnmarshalText(data); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
saveData = reflect.Value{}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := copyValue(saveData, data); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
switch t := saveComment; t.Kind() {
|
||||||
|
case reflect.String:
|
||||||
|
t.SetString(string(comment))
|
||||||
|
case reflect.Slice:
|
||||||
|
t.Set(reflect.ValueOf(comment))
|
||||||
|
}
|
||||||
|
|
||||||
|
switch t := saveXML; t.Kind() {
|
||||||
|
case reflect.String:
|
||||||
|
t.SetString(string(saveXMLData))
|
||||||
|
case reflect.Slice:
|
||||||
|
t.Set(reflect.ValueOf(saveXMLData))
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func copyValue(dst reflect.Value, src []byte) (err error) {
|
||||||
|
dst0 := dst
|
||||||
|
|
||||||
|
if dst.Kind() == reflect.Ptr {
|
||||||
|
if dst.IsNil() {
|
||||||
|
dst.Set(reflect.New(dst.Type().Elem()))
|
||||||
|
}
|
||||||
|
dst = dst.Elem()
|
||||||
|
}
|
||||||
|
|
||||||
|
// Save accumulated data.
|
||||||
|
switch dst.Kind() {
|
||||||
|
case reflect.Invalid:
|
||||||
|
// Probably a comment.
|
||||||
|
default:
|
||||||
|
return errors.New("cannot unmarshal into " + dst0.Type().String())
|
||||||
|
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
|
||||||
|
itmp, err := strconv.ParseInt(string(src), 10, dst.Type().Bits())
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
dst.SetInt(itmp)
|
||||||
|
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
|
||||||
|
utmp, err := strconv.ParseUint(string(src), 10, dst.Type().Bits())
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
dst.SetUint(utmp)
|
||||||
|
case reflect.Float32, reflect.Float64:
|
||||||
|
ftmp, err := strconv.ParseFloat(string(src), dst.Type().Bits())
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
dst.SetFloat(ftmp)
|
||||||
|
case reflect.Bool:
|
||||||
|
value, err := strconv.ParseBool(strings.TrimSpace(string(src)))
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
dst.SetBool(value)
|
||||||
|
case reflect.String:
|
||||||
|
dst.SetString(string(src))
|
||||||
|
case reflect.Slice:
|
||||||
|
if len(src) == 0 {
|
||||||
|
// non-nil to flag presence
|
||||||
|
src = []byte{}
|
||||||
|
}
|
||||||
|
dst.SetBytes(src)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// unmarshalPath walks down an XML structure looking for wanted
|
||||||
|
// paths, and calls unmarshal on them.
|
||||||
|
// The consumed result tells whether XML elements have been consumed
|
||||||
|
// from the Decoder until start's matching end element, or if it's
|
||||||
|
// still untouched because start is uninteresting for sv's fields.
|
||||||
|
func (p *Decoder) unmarshalPath(tinfo *typeInfo, sv reflect.Value, parents []string, start *StartElement) (consumed bool, err error) {
|
||||||
|
recurse := false
|
||||||
|
Loop:
|
||||||
|
for i := range tinfo.fields {
|
||||||
|
finfo := &tinfo.fields[i]
|
||||||
|
if finfo.flags&fElement == 0 || len(finfo.parents) < len(parents) || finfo.xmlns != "" && finfo.xmlns != start.Name.Space {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
for j := range parents {
|
||||||
|
if parents[j] != finfo.parents[j] {
|
||||||
|
continue Loop
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if len(finfo.parents) == len(parents) && finfo.name == start.Name.Local {
|
||||||
|
// It's a perfect match, unmarshal the field.
|
||||||
|
return true, p.unmarshal(finfo.value(sv), start)
|
||||||
|
}
|
||||||
|
if len(finfo.parents) > len(parents) && finfo.parents[len(parents)] == start.Name.Local {
|
||||||
|
// It's a prefix for the field. Break and recurse
|
||||||
|
// since it's not ok for one field path to be itself
|
||||||
|
// the prefix for another field path.
|
||||||
|
recurse = true
|
||||||
|
|
||||||
|
// We can reuse the same slice as long as we
|
||||||
|
// don't try to append to it.
|
||||||
|
parents = finfo.parents[:len(parents)+1]
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if !recurse {
|
||||||
|
// We have no business with this element.
|
||||||
|
return false, nil
|
||||||
|
}
|
||||||
|
// The element is not a perfect match for any field, but one
|
||||||
|
// or more fields have the path to this element as a parent
|
||||||
|
// prefix. Recurse and attempt to match these.
|
||||||
|
for {
|
||||||
|
var tok Token
|
||||||
|
tok, err = p.Token()
|
||||||
|
if err != nil {
|
||||||
|
return true, err
|
||||||
|
}
|
||||||
|
switch t := tok.(type) {
|
||||||
|
case StartElement:
|
||||||
|
consumed2, err := p.unmarshalPath(tinfo, sv, parents, &t)
|
||||||
|
if err != nil {
|
||||||
|
return true, err
|
||||||
|
}
|
||||||
|
if !consumed2 {
|
||||||
|
if err := p.Skip(); err != nil {
|
||||||
|
return true, err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
case EndElement:
|
||||||
|
return true, nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Skip reads tokens until it has consumed the end element
|
||||||
|
// matching the most recent start element already consumed.
|
||||||
|
// It recurs if it encounters a start element, so it can be used to
|
||||||
|
// skip nested structures.
|
||||||
|
// It returns nil if it finds an end element matching the start
|
||||||
|
// element; otherwise it returns an error describing the problem.
|
||||||
|
func (d *Decoder) Skip() error {
|
||||||
|
for {
|
||||||
|
tok, err := d.Token()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
switch tok.(type) {
|
||||||
|
case StartElement:
|
||||||
|
if err := d.Skip(); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
case EndElement:
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
744
drives/davServer/internal/xml/read_test.go
Normal file
744
drives/davServer/internal/xml/read_test.go
Normal file
@ -0,0 +1,744 @@
|
|||||||
|
// Copyright 2009 The Go Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a BSD-style
|
||||||
|
// license that can be found in the LICENSE file.
|
||||||
|
|
||||||
|
package xml
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
"reflect"
|
||||||
|
"strings"
|
||||||
|
"testing"
|
||||||
|
"time"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Stripped down Atom feed data structures.
|
||||||
|
|
||||||
|
func TestUnmarshalFeed(t *testing.T) {
|
||||||
|
var f Feed
|
||||||
|
if err := Unmarshal([]byte(atomFeedString), &f); err != nil {
|
||||||
|
t.Fatalf("Unmarshal: %s", err)
|
||||||
|
}
|
||||||
|
if !reflect.DeepEqual(f, atomFeed) {
|
||||||
|
t.Fatalf("have %#v\nwant %#v", f, atomFeed)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// hget http://codereview.appspot.com/rss/mine/rsc
|
||||||
|
const atomFeedString = `
|
||||||
|
<?xml version="1.0" encoding="utf-8"?>
|
||||||
|
<feed xmlns="http://www.w3.org/2005/Atom" xml:lang="en-us" updated="2009-10-04T01:35:58+00:00"><title>Code Review - My issues</title><link href="http://codereview.appspot.com/" rel="alternate"></link><link href="http://codereview.appspot.com/rss/mine/rsc" rel="self"></link><id>http://codereview.appspot.com/</id><author><name>rietveld<></name></author><entry><title>rietveld: an attempt at pubsubhubbub
|
||||||
|
</title><link href="http://codereview.appspot.com/126085" rel="alternate"></link><updated>2009-10-04T01:35:58+00:00</updated><author><name>email-address-removed</name></author><id>urn:md5:134d9179c41f806be79b3a5f7877d19a</id><summary type="html">
|
||||||
|
An attempt at adding pubsubhubbub support to Rietveld.
|
||||||
|
http://code.google.com/p/pubsubhubbub
|
||||||
|
http://code.google.com/p/rietveld/issues/detail?id=155
|
||||||
|
|
||||||
|
The server side of the protocol is trivial:
|
||||||
|
1. add a &lt;link rel=&quot;hub&quot; href=&quot;hub-server&quot;&gt; tag to all
|
||||||
|
feeds that will be pubsubhubbubbed.
|
||||||
|
2. every time one of those feeds changes, tell the hub
|
||||||
|
with a simple POST request.
|
||||||
|
|
||||||
|
I have tested this by adding debug prints to a local hub
|
||||||
|
server and checking that the server got the right publish
|
||||||
|
requests.
|
||||||
|
|
||||||
|
I can&#39;t quite get the server to work, but I think the bug
|
||||||
|
is not in my code. I think that the server expects to be
|
||||||
|
able to grab the feed and see the feed&#39;s actual URL in
|
||||||
|
the link rel=&quot;self&quot;, but the default value for that drops
|
||||||
|
the :port from the URL, and I cannot for the life of me
|
||||||
|
figure out how to get the Atom generator deep inside
|
||||||
|
django not to do that, or even where it is doing that,
|
||||||
|
or even what code is running to generate the Atom feed.
|
||||||
|
(I thought I knew but I added some assert False statements
|
||||||
|
and it kept running!)
|
||||||
|
|
||||||
|
Ignoring that particular problem, I would appreciate
|
||||||
|
feedback on the right way to get the two values at
|
||||||
|
the top of feeds.py marked NOTE(rsc).
|
||||||
|
|
||||||
|
|
||||||
|
</summary></entry><entry><title>rietveld: correct tab handling
|
||||||
|
</title><link href="http://codereview.appspot.com/124106" rel="alternate"></link><updated>2009-10-03T23:02:17+00:00</updated><author><name>email-address-removed</name></author><id>urn:md5:0a2a4f19bb815101f0ba2904aed7c35a</id><summary type="html">
|
||||||
|
This fixes the buggy tab rendering that can be seen at
|
||||||
|
http://codereview.appspot.com/116075/diff/1/2
|
||||||
|
|
||||||
|
The fundamental problem was that the tab code was
|
||||||
|
not being told what column the text began in, so it
|
||||||
|
didn&#39;t know where to put the tab stops. Another problem
|
||||||
|
was that some of the code assumed that string byte
|
||||||
|
offsets were the same as column offsets, which is only
|
||||||
|
true if there are no tabs.
|
||||||
|
|
||||||
|
In the process of fixing this, I cleaned up the arguments
|
||||||
|
to Fold and ExpandTabs and renamed them Break and
|
||||||
|
_ExpandTabs so that I could be sure that I found all the
|
||||||
|
call sites. I also wanted to verify that ExpandTabs was
|
||||||
|
not being used from outside intra_region_diff.py.
|
||||||
|
|
||||||
|
|
||||||
|
</summary></entry></feed> `
|
||||||
|
|
||||||
|
type Feed struct {
|
||||||
|
XMLName Name `xml:"http://www.w3.org/2005/Atom feed"`
|
||||||
|
Title string `xml:"title"`
|
||||||
|
Id string `xml:"id"`
|
||||||
|
Link []Link `xml:"link"`
|
||||||
|
Updated time.Time `xml:"updated,attr"`
|
||||||
|
Author Person `xml:"author"`
|
||||||
|
Entry []Entry `xml:"entry"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type Entry struct {
|
||||||
|
Title string `xml:"title"`
|
||||||
|
Id string `xml:"id"`
|
||||||
|
Link []Link `xml:"link"`
|
||||||
|
Updated time.Time `xml:"updated"`
|
||||||
|
Author Person `xml:"author"`
|
||||||
|
Summary Text `xml:"summary"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type Link struct {
|
||||||
|
Rel string `xml:"rel,attr,omitempty"`
|
||||||
|
Href string `xml:"href,attr"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type Person struct {
|
||||||
|
Name string `xml:"name"`
|
||||||
|
URI string `xml:"uri"`
|
||||||
|
Email string `xml:"email"`
|
||||||
|
InnerXML string `xml:",innerxml"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type Text struct {
|
||||||
|
Type string `xml:"type,attr,omitempty"`
|
||||||
|
Body string `xml:",chardata"`
|
||||||
|
}
|
||||||
|
|
||||||
|
var atomFeed = Feed{
|
||||||
|
XMLName: Name{"http://www.w3.org/2005/Atom", "feed"},
|
||||||
|
Title: "Code Review - My issues",
|
||||||
|
Link: []Link{
|
||||||
|
{Rel: "alternate", Href: "http://codereview.appspot.com/"},
|
||||||
|
{Rel: "self", Href: "http://codereview.appspot.com/rss/mine/rsc"},
|
||||||
|
},
|
||||||
|
Id: "http://codereview.appspot.com/",
|
||||||
|
Updated: ParseTime("2009-10-04T01:35:58+00:00"),
|
||||||
|
Author: Person{
|
||||||
|
Name: "rietveld<>",
|
||||||
|
InnerXML: "<name>rietveld<></name>",
|
||||||
|
},
|
||||||
|
Entry: []Entry{
|
||||||
|
{
|
||||||
|
Title: "rietveld: an attempt at pubsubhubbub\n",
|
||||||
|
Link: []Link{
|
||||||
|
{Rel: "alternate", Href: "http://codereview.appspot.com/126085"},
|
||||||
|
},
|
||||||
|
Updated: ParseTime("2009-10-04T01:35:58+00:00"),
|
||||||
|
Author: Person{
|
||||||
|
Name: "email-address-removed",
|
||||||
|
InnerXML: "<name>email-address-removed</name>",
|
||||||
|
},
|
||||||
|
Id: "urn:md5:134d9179c41f806be79b3a5f7877d19a",
|
||||||
|
Summary: Text{
|
||||||
|
Type: "html",
|
||||||
|
Body: `
|
||||||
|
An attempt at adding pubsubhubbub support to Rietveld.
|
||||||
|
http://code.google.com/p/pubsubhubbub
|
||||||
|
http://code.google.com/p/rietveld/issues/detail?id=155
|
||||||
|
|
||||||
|
The server side of the protocol is trivial:
|
||||||
|
1. add a <link rel="hub" href="hub-server"> tag to all
|
||||||
|
feeds that will be pubsubhubbubbed.
|
||||||
|
2. every time one of those feeds changes, tell the hub
|
||||||
|
with a simple POST request.
|
||||||
|
|
||||||
|
I have tested this by adding debug prints to a local hub
|
||||||
|
server and checking that the server got the right publish
|
||||||
|
requests.
|
||||||
|
|
||||||
|
I can't quite get the server to work, but I think the bug
|
||||||
|
is not in my code. I think that the server expects to be
|
||||||
|
able to grab the feed and see the feed's actual URL in
|
||||||
|
the link rel="self", but the default value for that drops
|
||||||
|
the :port from the URL, and I cannot for the life of me
|
||||||
|
figure out how to get the Atom generator deep inside
|
||||||
|
django not to do that, or even where it is doing that,
|
||||||
|
or even what code is running to generate the Atom feed.
|
||||||
|
(I thought I knew but I added some assert False statements
|
||||||
|
and it kept running!)
|
||||||
|
|
||||||
|
Ignoring that particular problem, I would appreciate
|
||||||
|
feedback on the right way to get the two values at
|
||||||
|
the top of feeds.py marked NOTE(rsc).
|
||||||
|
|
||||||
|
|
||||||
|
`,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Title: "rietveld: correct tab handling\n",
|
||||||
|
Link: []Link{
|
||||||
|
{Rel: "alternate", Href: "http://codereview.appspot.com/124106"},
|
||||||
|
},
|
||||||
|
Updated: ParseTime("2009-10-03T23:02:17+00:00"),
|
||||||
|
Author: Person{
|
||||||
|
Name: "email-address-removed",
|
||||||
|
InnerXML: "<name>email-address-removed</name>",
|
||||||
|
},
|
||||||
|
Id: "urn:md5:0a2a4f19bb815101f0ba2904aed7c35a",
|
||||||
|
Summary: Text{
|
||||||
|
Type: "html",
|
||||||
|
Body: `
|
||||||
|
This fixes the buggy tab rendering that can be seen at
|
||||||
|
http://codereview.appspot.com/116075/diff/1/2
|
||||||
|
|
||||||
|
The fundamental problem was that the tab code was
|
||||||
|
not being told what column the text began in, so it
|
||||||
|
didn't know where to put the tab stops. Another problem
|
||||||
|
was that some of the code assumed that string byte
|
||||||
|
offsets were the same as column offsets, which is only
|
||||||
|
true if there are no tabs.
|
||||||
|
|
||||||
|
In the process of fixing this, I cleaned up the arguments
|
||||||
|
to Fold and ExpandTabs and renamed them Break and
|
||||||
|
_ExpandTabs so that I could be sure that I found all the
|
||||||
|
call sites. I also wanted to verify that ExpandTabs was
|
||||||
|
not being used from outside intra_region_diff.py.
|
||||||
|
|
||||||
|
|
||||||
|
`,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
const pathTestString = `
|
||||||
|
<Result>
|
||||||
|
<Before>1</Before>
|
||||||
|
<Items>
|
||||||
|
<Item1>
|
||||||
|
<Value>A</Value>
|
||||||
|
</Item1>
|
||||||
|
<Item2>
|
||||||
|
<Value>B</Value>
|
||||||
|
</Item2>
|
||||||
|
<Item1>
|
||||||
|
<Value>C</Value>
|
||||||
|
<Value>D</Value>
|
||||||
|
</Item1>
|
||||||
|
<_>
|
||||||
|
<Value>E</Value>
|
||||||
|
</_>
|
||||||
|
</Items>
|
||||||
|
<After>2</After>
|
||||||
|
</Result>
|
||||||
|
`
|
||||||
|
|
||||||
|
type PathTestItem struct {
|
||||||
|
Value string
|
||||||
|
}
|
||||||
|
|
||||||
|
type PathTestA struct {
|
||||||
|
Items []PathTestItem `xml:">Item1"`
|
||||||
|
Before, After string
|
||||||
|
}
|
||||||
|
|
||||||
|
type PathTestB struct {
|
||||||
|
Other []PathTestItem `xml:"Items>Item1"`
|
||||||
|
Before, After string
|
||||||
|
}
|
||||||
|
|
||||||
|
type PathTestC struct {
|
||||||
|
Values1 []string `xml:"Items>Item1>Value"`
|
||||||
|
Values2 []string `xml:"Items>Item2>Value"`
|
||||||
|
Before, After string
|
||||||
|
}
|
||||||
|
|
||||||
|
type PathTestSet struct {
|
||||||
|
Item1 []PathTestItem
|
||||||
|
}
|
||||||
|
|
||||||
|
type PathTestD struct {
|
||||||
|
Other PathTestSet `xml:"Items"`
|
||||||
|
Before, After string
|
||||||
|
}
|
||||||
|
|
||||||
|
type PathTestE struct {
|
||||||
|
Underline string `xml:"Items>_>Value"`
|
||||||
|
Before, After string
|
||||||
|
}
|
||||||
|
|
||||||
|
var pathTests = []interface{}{
|
||||||
|
&PathTestA{Items: []PathTestItem{{"A"}, {"D"}}, Before: "1", After: "2"},
|
||||||
|
&PathTestB{Other: []PathTestItem{{"A"}, {"D"}}, Before: "1", After: "2"},
|
||||||
|
&PathTestC{Values1: []string{"A", "C", "D"}, Values2: []string{"B"}, Before: "1", After: "2"},
|
||||||
|
&PathTestD{Other: PathTestSet{Item1: []PathTestItem{{"A"}, {"D"}}}, Before: "1", After: "2"},
|
||||||
|
&PathTestE{Underline: "E", Before: "1", After: "2"},
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestUnmarshalPaths(t *testing.T) {
|
||||||
|
for _, pt := range pathTests {
|
||||||
|
v := reflect.New(reflect.TypeOf(pt).Elem()).Interface()
|
||||||
|
if err := Unmarshal([]byte(pathTestString), v); err != nil {
|
||||||
|
t.Fatalf("Unmarshal: %s", err)
|
||||||
|
}
|
||||||
|
if !reflect.DeepEqual(v, pt) {
|
||||||
|
t.Fatalf("have %#v\nwant %#v", v, pt)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
type BadPathTestA struct {
|
||||||
|
First string `xml:"items>item1"`
|
||||||
|
Other string `xml:"items>item2"`
|
||||||
|
Second string `xml:"items"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type BadPathTestB struct {
|
||||||
|
Other string `xml:"items>item2>value"`
|
||||||
|
First string `xml:"items>item1"`
|
||||||
|
Second string `xml:"items>item1>value"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type BadPathTestC struct {
|
||||||
|
First string
|
||||||
|
Second string `xml:"First"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type BadPathTestD struct {
|
||||||
|
BadPathEmbeddedA
|
||||||
|
BadPathEmbeddedB
|
||||||
|
}
|
||||||
|
|
||||||
|
type BadPathEmbeddedA struct {
|
||||||
|
First string
|
||||||
|
}
|
||||||
|
|
||||||
|
type BadPathEmbeddedB struct {
|
||||||
|
Second string `xml:"First"`
|
||||||
|
}
|
||||||
|
|
||||||
|
var badPathTests = []struct {
|
||||||
|
v, e interface{}
|
||||||
|
}{
|
||||||
|
{&BadPathTestA{}, &TagPathError{reflect.TypeOf(BadPathTestA{}), "First", "items>item1", "Second", "items"}},
|
||||||
|
{&BadPathTestB{}, &TagPathError{reflect.TypeOf(BadPathTestB{}), "First", "items>item1", "Second", "items>item1>value"}},
|
||||||
|
{&BadPathTestC{}, &TagPathError{reflect.TypeOf(BadPathTestC{}), "First", "", "Second", "First"}},
|
||||||
|
{&BadPathTestD{}, &TagPathError{reflect.TypeOf(BadPathTestD{}), "First", "", "Second", "First"}},
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestUnmarshalBadPaths(t *testing.T) {
|
||||||
|
for _, tt := range badPathTests {
|
||||||
|
err := Unmarshal([]byte(pathTestString), tt.v)
|
||||||
|
if !reflect.DeepEqual(err, tt.e) {
|
||||||
|
t.Fatalf("Unmarshal with %#v didn't fail properly:\nhave %#v,\nwant %#v", tt.v, err, tt.e)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
const OK = "OK"
|
||||||
|
const withoutNameTypeData = `
|
||||||
|
<?xml version="1.0" charset="utf-8"?>
|
||||||
|
<Test3 Attr="OK" />`
|
||||||
|
|
||||||
|
type TestThree struct {
|
||||||
|
XMLName Name `xml:"Test3"`
|
||||||
|
Attr string `xml:",attr"`
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestUnmarshalWithoutNameType(t *testing.T) {
|
||||||
|
var x TestThree
|
||||||
|
if err := Unmarshal([]byte(withoutNameTypeData), &x); err != nil {
|
||||||
|
t.Fatalf("Unmarshal: %s", err)
|
||||||
|
}
|
||||||
|
if x.Attr != OK {
|
||||||
|
t.Fatalf("have %v\nwant %v", x.Attr, OK)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestUnmarshalAttr(t *testing.T) {
|
||||||
|
type ParamVal struct {
|
||||||
|
Int int `xml:"int,attr"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type ParamPtr struct {
|
||||||
|
Int *int `xml:"int,attr"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type ParamStringPtr struct {
|
||||||
|
Int *string `xml:"int,attr"`
|
||||||
|
}
|
||||||
|
|
||||||
|
x := []byte(`<Param int="1" />`)
|
||||||
|
|
||||||
|
p1 := &ParamPtr{}
|
||||||
|
if err := Unmarshal(x, p1); err != nil {
|
||||||
|
t.Fatalf("Unmarshal: %s", err)
|
||||||
|
}
|
||||||
|
if p1.Int == nil {
|
||||||
|
t.Fatalf("Unmarshal failed in to *int field")
|
||||||
|
} else if *p1.Int != 1 {
|
||||||
|
t.Fatalf("Unmarshal with %s failed:\nhave %#v,\n want %#v", x, p1.Int, 1)
|
||||||
|
}
|
||||||
|
|
||||||
|
p2 := &ParamVal{}
|
||||||
|
if err := Unmarshal(x, p2); err != nil {
|
||||||
|
t.Fatalf("Unmarshal: %s", err)
|
||||||
|
}
|
||||||
|
if p2.Int != 1 {
|
||||||
|
t.Fatalf("Unmarshal with %s failed:\nhave %#v,\n want %#v", x, p2.Int, 1)
|
||||||
|
}
|
||||||
|
|
||||||
|
p3 := &ParamStringPtr{}
|
||||||
|
if err := Unmarshal(x, p3); err != nil {
|
||||||
|
t.Fatalf("Unmarshal: %s", err)
|
||||||
|
}
|
||||||
|
if p3.Int == nil {
|
||||||
|
t.Fatalf("Unmarshal failed in to *string field")
|
||||||
|
} else if *p3.Int != "1" {
|
||||||
|
t.Fatalf("Unmarshal with %s failed:\nhave %#v,\n want %#v", x, p3.Int, 1)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
type Tables struct {
|
||||||
|
HTable string `xml:"http://www.w3.org/TR/html4/ table"`
|
||||||
|
FTable string `xml:"http://www.w3schools.com/furniture table"`
|
||||||
|
}
|
||||||
|
|
||||||
|
var tables = []struct {
|
||||||
|
xml string
|
||||||
|
tab Tables
|
||||||
|
ns string
|
||||||
|
}{
|
||||||
|
{
|
||||||
|
xml: `<Tables>` +
|
||||||
|
`<table xmlns="http://www.w3.org/TR/html4/">hello</table>` +
|
||||||
|
`<table xmlns="http://www.w3schools.com/furniture">world</table>` +
|
||||||
|
`</Tables>`,
|
||||||
|
tab: Tables{"hello", "world"},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
xml: `<Tables>` +
|
||||||
|
`<table xmlns="http://www.w3schools.com/furniture">world</table>` +
|
||||||
|
`<table xmlns="http://www.w3.org/TR/html4/">hello</table>` +
|
||||||
|
`</Tables>`,
|
||||||
|
tab: Tables{"hello", "world"},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
xml: `<Tables xmlns:f="http://www.w3schools.com/furniture" xmlns:h="http://www.w3.org/TR/html4/">` +
|
||||||
|
`<f:table>world</f:table>` +
|
||||||
|
`<h:table>hello</h:table>` +
|
||||||
|
`</Tables>`,
|
||||||
|
tab: Tables{"hello", "world"},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
xml: `<Tables>` +
|
||||||
|
`<table>bogus</table>` +
|
||||||
|
`</Tables>`,
|
||||||
|
tab: Tables{},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
xml: `<Tables>` +
|
||||||
|
`<table>only</table>` +
|
||||||
|
`</Tables>`,
|
||||||
|
tab: Tables{HTable: "only"},
|
||||||
|
ns: "http://www.w3.org/TR/html4/",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
xml: `<Tables>` +
|
||||||
|
`<table>only</table>` +
|
||||||
|
`</Tables>`,
|
||||||
|
tab: Tables{FTable: "only"},
|
||||||
|
ns: "http://www.w3schools.com/furniture",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
xml: `<Tables>` +
|
||||||
|
`<table>only</table>` +
|
||||||
|
`</Tables>`,
|
||||||
|
tab: Tables{},
|
||||||
|
ns: "something else entirely",
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestUnmarshalNS(t *testing.T) {
|
||||||
|
for i, tt := range tables {
|
||||||
|
var dst Tables
|
||||||
|
var err error
|
||||||
|
if tt.ns != "" {
|
||||||
|
d := NewDecoder(strings.NewReader(tt.xml))
|
||||||
|
d.DefaultSpace = tt.ns
|
||||||
|
err = d.Decode(&dst)
|
||||||
|
} else {
|
||||||
|
err = Unmarshal([]byte(tt.xml), &dst)
|
||||||
|
}
|
||||||
|
if err != nil {
|
||||||
|
t.Errorf("#%d: Unmarshal: %v", i, err)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
want := tt.tab
|
||||||
|
if dst != want {
|
||||||
|
t.Errorf("#%d: dst=%+v, want %+v", i, dst, want)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestRoundTrip(t *testing.T) {
|
||||||
|
// From issue 7535
|
||||||
|
const s = `<ex:element xmlns:ex="http://example.com/schema"></ex:element>`
|
||||||
|
in := bytes.NewBufferString(s)
|
||||||
|
for i := 0; i < 10; i++ {
|
||||||
|
out := &bytes.Buffer{}
|
||||||
|
d := NewDecoder(in)
|
||||||
|
e := NewEncoder(out)
|
||||||
|
|
||||||
|
for {
|
||||||
|
t, err := d.Token()
|
||||||
|
if err == io.EOF {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
if err != nil {
|
||||||
|
fmt.Println("failed:", err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
e.EncodeToken(t)
|
||||||
|
}
|
||||||
|
e.Flush()
|
||||||
|
in = out
|
||||||
|
}
|
||||||
|
if got := in.String(); got != s {
|
||||||
|
t.Errorf("have: %q\nwant: %q\n", got, s)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestMarshalNS(t *testing.T) {
|
||||||
|
dst := Tables{"hello", "world"}
|
||||||
|
data, err := Marshal(&dst)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("Marshal: %v", err)
|
||||||
|
}
|
||||||
|
want := `<Tables><table xmlns="http://www.w3.org/TR/html4/">hello</table><table xmlns="http://www.w3schools.com/furniture">world</table></Tables>`
|
||||||
|
str := string(data)
|
||||||
|
if str != want {
|
||||||
|
t.Errorf("have: %q\nwant: %q\n", str, want)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
type TableAttrs struct {
|
||||||
|
TAttr TAttr
|
||||||
|
}
|
||||||
|
|
||||||
|
type TAttr struct {
|
||||||
|
HTable string `xml:"http://www.w3.org/TR/html4/ table,attr"`
|
||||||
|
FTable string `xml:"http://www.w3schools.com/furniture table,attr"`
|
||||||
|
Lang string `xml:"http://www.w3.org/XML/1998/namespace lang,attr,omitempty"`
|
||||||
|
Other1 string `xml:"http://golang.org/xml/ other,attr,omitempty"`
|
||||||
|
Other2 string `xml:"http://golang.org/xmlfoo/ other,attr,omitempty"`
|
||||||
|
Other3 string `xml:"http://golang.org/json/ other,attr,omitempty"`
|
||||||
|
Other4 string `xml:"http://golang.org/2/json/ other,attr,omitempty"`
|
||||||
|
}
|
||||||
|
|
||||||
|
var tableAttrs = []struct {
|
||||||
|
xml string
|
||||||
|
tab TableAttrs
|
||||||
|
ns string
|
||||||
|
}{
|
||||||
|
{
|
||||||
|
xml: `<TableAttrs xmlns:f="http://www.w3schools.com/furniture" xmlns:h="http://www.w3.org/TR/html4/"><TAttr ` +
|
||||||
|
`h:table="hello" f:table="world" ` +
|
||||||
|
`/></TableAttrs>`,
|
||||||
|
tab: TableAttrs{TAttr{HTable: "hello", FTable: "world"}},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
xml: `<TableAttrs><TAttr xmlns:f="http://www.w3schools.com/furniture" xmlns:h="http://www.w3.org/TR/html4/" ` +
|
||||||
|
`h:table="hello" f:table="world" ` +
|
||||||
|
`/></TableAttrs>`,
|
||||||
|
tab: TableAttrs{TAttr{HTable: "hello", FTable: "world"}},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
xml: `<TableAttrs><TAttr ` +
|
||||||
|
`h:table="hello" f:table="world" xmlns:f="http://www.w3schools.com/furniture" xmlns:h="http://www.w3.org/TR/html4/" ` +
|
||||||
|
`/></TableAttrs>`,
|
||||||
|
tab: TableAttrs{TAttr{HTable: "hello", FTable: "world"}},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
// Default space does not apply to attribute names.
|
||||||
|
xml: `<TableAttrs xmlns="http://www.w3schools.com/furniture" xmlns:h="http://www.w3.org/TR/html4/"><TAttr ` +
|
||||||
|
`h:table="hello" table="world" ` +
|
||||||
|
`/></TableAttrs>`,
|
||||||
|
tab: TableAttrs{TAttr{HTable: "hello", FTable: ""}},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
// Default space does not apply to attribute names.
|
||||||
|
xml: `<TableAttrs xmlns:f="http://www.w3schools.com/furniture"><TAttr xmlns="http://www.w3.org/TR/html4/" ` +
|
||||||
|
`table="hello" f:table="world" ` +
|
||||||
|
`/></TableAttrs>`,
|
||||||
|
tab: TableAttrs{TAttr{HTable: "", FTable: "world"}},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
xml: `<TableAttrs><TAttr ` +
|
||||||
|
`table="bogus" ` +
|
||||||
|
`/></TableAttrs>`,
|
||||||
|
tab: TableAttrs{},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
// Default space does not apply to attribute names.
|
||||||
|
xml: `<TableAttrs xmlns:h="http://www.w3.org/TR/html4/"><TAttr ` +
|
||||||
|
`h:table="hello" table="world" ` +
|
||||||
|
`/></TableAttrs>`,
|
||||||
|
tab: TableAttrs{TAttr{HTable: "hello", FTable: ""}},
|
||||||
|
ns: "http://www.w3schools.com/furniture",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
// Default space does not apply to attribute names.
|
||||||
|
xml: `<TableAttrs xmlns:f="http://www.w3schools.com/furniture"><TAttr ` +
|
||||||
|
`table="hello" f:table="world" ` +
|
||||||
|
`/></TableAttrs>`,
|
||||||
|
tab: TableAttrs{TAttr{HTable: "", FTable: "world"}},
|
||||||
|
ns: "http://www.w3.org/TR/html4/",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
xml: `<TableAttrs><TAttr ` +
|
||||||
|
`table="bogus" ` +
|
||||||
|
`/></TableAttrs>`,
|
||||||
|
tab: TableAttrs{},
|
||||||
|
ns: "something else entirely",
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestUnmarshalNSAttr(t *testing.T) {
|
||||||
|
for i, tt := range tableAttrs {
|
||||||
|
var dst TableAttrs
|
||||||
|
var err error
|
||||||
|
if tt.ns != "" {
|
||||||
|
d := NewDecoder(strings.NewReader(tt.xml))
|
||||||
|
d.DefaultSpace = tt.ns
|
||||||
|
err = d.Decode(&dst)
|
||||||
|
} else {
|
||||||
|
err = Unmarshal([]byte(tt.xml), &dst)
|
||||||
|
}
|
||||||
|
if err != nil {
|
||||||
|
t.Errorf("#%d: Unmarshal: %v", i, err)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
want := tt.tab
|
||||||
|
if dst != want {
|
||||||
|
t.Errorf("#%d: dst=%+v, want %+v", i, dst, want)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestMarshalNSAttr(t *testing.T) {
|
||||||
|
src := TableAttrs{TAttr{"hello", "world", "en_US", "other1", "other2", "other3", "other4"}}
|
||||||
|
data, err := Marshal(&src)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("Marshal: %v", err)
|
||||||
|
}
|
||||||
|
want := `<TableAttrs><TAttr xmlns:json_1="http://golang.org/2/json/" xmlns:json="http://golang.org/json/" xmlns:_xmlfoo="http://golang.org/xmlfoo/" xmlns:_xml="http://golang.org/xml/" xmlns:furniture="http://www.w3schools.com/furniture" xmlns:html4="http://www.w3.org/TR/html4/" html4:table="hello" furniture:table="world" xml:lang="en_US" _xml:other="other1" _xmlfoo:other="other2" json:other="other3" json_1:other="other4"></TAttr></TableAttrs>`
|
||||||
|
str := string(data)
|
||||||
|
if str != want {
|
||||||
|
t.Errorf("Marshal:\nhave: %#q\nwant: %#q\n", str, want)
|
||||||
|
}
|
||||||
|
|
||||||
|
var dst TableAttrs
|
||||||
|
if err := Unmarshal(data, &dst); err != nil {
|
||||||
|
t.Errorf("Unmarshal: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if dst != src {
|
||||||
|
t.Errorf("Unmarshal = %q, want %q", dst, src)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
type MyCharData struct {
|
||||||
|
body string
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *MyCharData) UnmarshalXML(d *Decoder, start StartElement) error {
|
||||||
|
for {
|
||||||
|
t, err := d.Token()
|
||||||
|
if err == io.EOF { // found end of element
|
||||||
|
break
|
||||||
|
}
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if char, ok := t.(CharData); ok {
|
||||||
|
m.body += string(char)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
var _ Unmarshaler = (*MyCharData)(nil)
|
||||||
|
|
||||||
|
func (m *MyCharData) UnmarshalXMLAttr(attr Attr) error {
|
||||||
|
panic("must not call")
|
||||||
|
}
|
||||||
|
|
||||||
|
type MyAttr struct {
|
||||||
|
attr string
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *MyAttr) UnmarshalXMLAttr(attr Attr) error {
|
||||||
|
m.attr = attr.Value
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
var _ UnmarshalerAttr = (*MyAttr)(nil)
|
||||||
|
|
||||||
|
type MyStruct struct {
|
||||||
|
Data *MyCharData
|
||||||
|
Attr *MyAttr `xml:",attr"`
|
||||||
|
|
||||||
|
Data2 MyCharData
|
||||||
|
Attr2 MyAttr `xml:",attr"`
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestUnmarshaler(t *testing.T) {
|
||||||
|
xml := `<?xml version="1.0" encoding="utf-8"?>
|
||||||
|
<MyStruct Attr="attr1" Attr2="attr2">
|
||||||
|
<Data>hello <!-- comment -->world</Data>
|
||||||
|
<Data2>howdy <!-- comment -->world</Data2>
|
||||||
|
</MyStruct>
|
||||||
|
`
|
||||||
|
|
||||||
|
var m MyStruct
|
||||||
|
if err := Unmarshal([]byte(xml), &m); err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if m.Data == nil || m.Attr == nil || m.Data.body != "hello world" || m.Attr.attr != "attr1" || m.Data2.body != "howdy world" || m.Attr2.attr != "attr2" {
|
||||||
|
t.Errorf("m=%#+v\n", m)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
type Pea struct {
|
||||||
|
Cotelydon string
|
||||||
|
}
|
||||||
|
|
||||||
|
type Pod struct {
|
||||||
|
Pea interface{} `xml:"Pea"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// https://golang.org/issue/6836
|
||||||
|
func TestUnmarshalIntoInterface(t *testing.T) {
|
||||||
|
pod := new(Pod)
|
||||||
|
pod.Pea = new(Pea)
|
||||||
|
xml := `<Pod><Pea><Cotelydon>Green stuff</Cotelydon></Pea></Pod>`
|
||||||
|
err := Unmarshal([]byte(xml), pod)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("failed to unmarshal %q: %v", xml, err)
|
||||||
|
}
|
||||||
|
pea, ok := pod.Pea.(*Pea)
|
||||||
|
if !ok {
|
||||||
|
t.Fatalf("unmarshalled into wrong type: have %T want *Pea", pod.Pea)
|
||||||
|
}
|
||||||
|
have, want := pea.Cotelydon, "Green stuff"
|
||||||
|
if have != want {
|
||||||
|
t.Errorf("failed to unmarshal into interface, have %q want %q", have, want)
|
||||||
|
}
|
||||||
|
}
|
371
drives/davServer/internal/xml/typeinfo.go
Normal file
371
drives/davServer/internal/xml/typeinfo.go
Normal file
@ -0,0 +1,371 @@
|
|||||||
|
// Copyright 2011 The Go Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a BSD-style
|
||||||
|
// license that can be found in the LICENSE file.
|
||||||
|
|
||||||
|
package xml
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"reflect"
|
||||||
|
"strings"
|
||||||
|
"sync"
|
||||||
|
)
|
||||||
|
|
||||||
|
// typeInfo holds details for the xml representation of a type.
|
||||||
|
type typeInfo struct {
|
||||||
|
xmlname *fieldInfo
|
||||||
|
fields []fieldInfo
|
||||||
|
}
|
||||||
|
|
||||||
|
// fieldInfo holds details for the xml representation of a single field.
|
||||||
|
type fieldInfo struct {
|
||||||
|
idx []int
|
||||||
|
name string
|
||||||
|
xmlns string
|
||||||
|
flags fieldFlags
|
||||||
|
parents []string
|
||||||
|
}
|
||||||
|
|
||||||
|
type fieldFlags int
|
||||||
|
|
||||||
|
const (
|
||||||
|
fElement fieldFlags = 1 << iota
|
||||||
|
fAttr
|
||||||
|
fCharData
|
||||||
|
fInnerXml
|
||||||
|
fComment
|
||||||
|
fAny
|
||||||
|
|
||||||
|
fOmitEmpty
|
||||||
|
|
||||||
|
fMode = fElement | fAttr | fCharData | fInnerXml | fComment | fAny
|
||||||
|
)
|
||||||
|
|
||||||
|
var tinfoMap = make(map[reflect.Type]*typeInfo)
|
||||||
|
var tinfoLock sync.RWMutex
|
||||||
|
|
||||||
|
var nameType = reflect.TypeOf(Name{})
|
||||||
|
|
||||||
|
// getTypeInfo returns the typeInfo structure with details necessary
|
||||||
|
// for marshalling and unmarshalling typ.
|
||||||
|
func getTypeInfo(typ reflect.Type) (*typeInfo, error) {
|
||||||
|
tinfoLock.RLock()
|
||||||
|
tinfo, ok := tinfoMap[typ]
|
||||||
|
tinfoLock.RUnlock()
|
||||||
|
if ok {
|
||||||
|
return tinfo, nil
|
||||||
|
}
|
||||||
|
tinfo = &typeInfo{}
|
||||||
|
if typ.Kind() == reflect.Struct && typ != nameType {
|
||||||
|
n := typ.NumField()
|
||||||
|
for i := 0; i < n; i++ {
|
||||||
|
f := typ.Field(i)
|
||||||
|
if f.PkgPath != "" || f.Tag.Get("xml") == "-" {
|
||||||
|
continue // Private field
|
||||||
|
}
|
||||||
|
|
||||||
|
// For embedded structs, embed its fields.
|
||||||
|
if f.Anonymous {
|
||||||
|
t := f.Type
|
||||||
|
if t.Kind() == reflect.Ptr {
|
||||||
|
t = t.Elem()
|
||||||
|
}
|
||||||
|
if t.Kind() == reflect.Struct {
|
||||||
|
inner, err := getTypeInfo(t)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
if tinfo.xmlname == nil {
|
||||||
|
tinfo.xmlname = inner.xmlname
|
||||||
|
}
|
||||||
|
for _, finfo := range inner.fields {
|
||||||
|
finfo.idx = append([]int{i}, finfo.idx...)
|
||||||
|
if err := addFieldInfo(typ, tinfo, &finfo); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
finfo, err := structFieldInfo(typ, &f)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
if f.Name == "XMLName" {
|
||||||
|
tinfo.xmlname = finfo
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
// Add the field if it doesn't conflict with other fields.
|
||||||
|
if err := addFieldInfo(typ, tinfo, finfo); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
tinfoLock.Lock()
|
||||||
|
tinfoMap[typ] = tinfo
|
||||||
|
tinfoLock.Unlock()
|
||||||
|
return tinfo, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// structFieldInfo builds and returns a fieldInfo for f.
|
||||||
|
func structFieldInfo(typ reflect.Type, f *reflect.StructField) (*fieldInfo, error) {
|
||||||
|
finfo := &fieldInfo{idx: f.Index}
|
||||||
|
|
||||||
|
// Split the tag from the xml namespace if necessary.
|
||||||
|
tag := f.Tag.Get("xml")
|
||||||
|
if i := strings.Index(tag, " "); i >= 0 {
|
||||||
|
finfo.xmlns, tag = tag[:i], tag[i+1:]
|
||||||
|
}
|
||||||
|
|
||||||
|
// Parse flags.
|
||||||
|
tokens := strings.Split(tag, ",")
|
||||||
|
if len(tokens) == 1 {
|
||||||
|
finfo.flags = fElement
|
||||||
|
} else {
|
||||||
|
tag = tokens[0]
|
||||||
|
for _, flag := range tokens[1:] {
|
||||||
|
switch flag {
|
||||||
|
case "attr":
|
||||||
|
finfo.flags |= fAttr
|
||||||
|
case "chardata":
|
||||||
|
finfo.flags |= fCharData
|
||||||
|
case "innerxml":
|
||||||
|
finfo.flags |= fInnerXml
|
||||||
|
case "comment":
|
||||||
|
finfo.flags |= fComment
|
||||||
|
case "any":
|
||||||
|
finfo.flags |= fAny
|
||||||
|
case "omitempty":
|
||||||
|
finfo.flags |= fOmitEmpty
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Validate the flags used.
|
||||||
|
valid := true
|
||||||
|
switch mode := finfo.flags & fMode; mode {
|
||||||
|
case 0:
|
||||||
|
finfo.flags |= fElement
|
||||||
|
case fAttr, fCharData, fInnerXml, fComment, fAny:
|
||||||
|
if f.Name == "XMLName" || tag != "" && mode != fAttr {
|
||||||
|
valid = false
|
||||||
|
}
|
||||||
|
default:
|
||||||
|
// This will also catch multiple modes in a single field.
|
||||||
|
valid = false
|
||||||
|
}
|
||||||
|
if finfo.flags&fMode == fAny {
|
||||||
|
finfo.flags |= fElement
|
||||||
|
}
|
||||||
|
if finfo.flags&fOmitEmpty != 0 && finfo.flags&(fElement|fAttr) == 0 {
|
||||||
|
valid = false
|
||||||
|
}
|
||||||
|
if !valid {
|
||||||
|
return nil, fmt.Errorf("xml: invalid tag in field %s of type %s: %q",
|
||||||
|
f.Name, typ, f.Tag.Get("xml"))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Use of xmlns without a name is not allowed.
|
||||||
|
if finfo.xmlns != "" && tag == "" {
|
||||||
|
return nil, fmt.Errorf("xml: namespace without name in field %s of type %s: %q",
|
||||||
|
f.Name, typ, f.Tag.Get("xml"))
|
||||||
|
}
|
||||||
|
|
||||||
|
if f.Name == "XMLName" {
|
||||||
|
// The XMLName field records the XML element name. Don't
|
||||||
|
// process it as usual because its name should default to
|
||||||
|
// empty rather than to the field name.
|
||||||
|
finfo.name = tag
|
||||||
|
return finfo, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
if tag == "" {
|
||||||
|
// If the name part of the tag is completely empty, get
|
||||||
|
// default from XMLName of underlying struct if feasible,
|
||||||
|
// or field name otherwise.
|
||||||
|
if xmlname := lookupXMLName(f.Type); xmlname != nil {
|
||||||
|
finfo.xmlns, finfo.name = xmlname.xmlns, xmlname.name
|
||||||
|
} else {
|
||||||
|
finfo.name = f.Name
|
||||||
|
}
|
||||||
|
return finfo, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
if finfo.xmlns == "" && finfo.flags&fAttr == 0 {
|
||||||
|
// If it's an element no namespace specified, get the default
|
||||||
|
// from the XMLName of enclosing struct if possible.
|
||||||
|
if xmlname := lookupXMLName(typ); xmlname != nil {
|
||||||
|
finfo.xmlns = xmlname.xmlns
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Prepare field name and parents.
|
||||||
|
parents := strings.Split(tag, ">")
|
||||||
|
if parents[0] == "" {
|
||||||
|
parents[0] = f.Name
|
||||||
|
}
|
||||||
|
if parents[len(parents)-1] == "" {
|
||||||
|
return nil, fmt.Errorf("xml: trailing '>' in field %s of type %s", f.Name, typ)
|
||||||
|
}
|
||||||
|
finfo.name = parents[len(parents)-1]
|
||||||
|
if len(parents) > 1 {
|
||||||
|
if (finfo.flags & fElement) == 0 {
|
||||||
|
return nil, fmt.Errorf("xml: %s chain not valid with %s flag", tag, strings.Join(tokens[1:], ","))
|
||||||
|
}
|
||||||
|
finfo.parents = parents[:len(parents)-1]
|
||||||
|
}
|
||||||
|
|
||||||
|
// If the field type has an XMLName field, the names must match
|
||||||
|
// so that the behavior of both marshalling and unmarshalling
|
||||||
|
// is straightforward and unambiguous.
|
||||||
|
if finfo.flags&fElement != 0 {
|
||||||
|
ftyp := f.Type
|
||||||
|
xmlname := lookupXMLName(ftyp)
|
||||||
|
if xmlname != nil && xmlname.name != finfo.name {
|
||||||
|
return nil, fmt.Errorf("xml: name %q in tag of %s.%s conflicts with name %q in %s.XMLName",
|
||||||
|
finfo.name, typ, f.Name, xmlname.name, ftyp)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return finfo, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// lookupXMLName returns the fieldInfo for typ's XMLName field
|
||||||
|
// in case it exists and has a valid xml field tag, otherwise
|
||||||
|
// it returns nil.
|
||||||
|
func lookupXMLName(typ reflect.Type) (xmlname *fieldInfo) {
|
||||||
|
for typ.Kind() == reflect.Ptr {
|
||||||
|
typ = typ.Elem()
|
||||||
|
}
|
||||||
|
if typ.Kind() != reflect.Struct {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
for i, n := 0, typ.NumField(); i < n; i++ {
|
||||||
|
f := typ.Field(i)
|
||||||
|
if f.Name != "XMLName" {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
finfo, err := structFieldInfo(typ, &f)
|
||||||
|
if finfo.name != "" && err == nil {
|
||||||
|
return finfo
|
||||||
|
}
|
||||||
|
// Also consider errors as a non-existent field tag
|
||||||
|
// and let getTypeInfo itself report the error.
|
||||||
|
break
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func min(a, b int) int {
|
||||||
|
if a <= b {
|
||||||
|
return a
|
||||||
|
}
|
||||||
|
return b
|
||||||
|
}
|
||||||
|
|
||||||
|
// addFieldInfo adds finfo to tinfo.fields if there are no
|
||||||
|
// conflicts, or if conflicts arise from previous fields that were
|
||||||
|
// obtained from deeper embedded structures than finfo. In the latter
|
||||||
|
// case, the conflicting entries are dropped.
|
||||||
|
// A conflict occurs when the path (parent + name) to a field is
|
||||||
|
// itself a prefix of another path, or when two paths match exactly.
|
||||||
|
// It is okay for field paths to share a common, shorter prefix.
|
||||||
|
func addFieldInfo(typ reflect.Type, tinfo *typeInfo, newf *fieldInfo) error {
|
||||||
|
var conflicts []int
|
||||||
|
Loop:
|
||||||
|
// First, figure all conflicts. Most working code will have none.
|
||||||
|
for i := range tinfo.fields {
|
||||||
|
oldf := &tinfo.fields[i]
|
||||||
|
if oldf.flags&fMode != newf.flags&fMode {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
if oldf.xmlns != "" && newf.xmlns != "" && oldf.xmlns != newf.xmlns {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
minl := min(len(newf.parents), len(oldf.parents))
|
||||||
|
for p := 0; p < minl; p++ {
|
||||||
|
if oldf.parents[p] != newf.parents[p] {
|
||||||
|
continue Loop
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if len(oldf.parents) > len(newf.parents) {
|
||||||
|
if oldf.parents[len(newf.parents)] == newf.name {
|
||||||
|
conflicts = append(conflicts, i)
|
||||||
|
}
|
||||||
|
} else if len(oldf.parents) < len(newf.parents) {
|
||||||
|
if newf.parents[len(oldf.parents)] == oldf.name {
|
||||||
|
conflicts = append(conflicts, i)
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
if newf.name == oldf.name {
|
||||||
|
conflicts = append(conflicts, i)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
// Without conflicts, add the new field and return.
|
||||||
|
if conflicts == nil {
|
||||||
|
tinfo.fields = append(tinfo.fields, *newf)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// If any conflict is shallower, ignore the new field.
|
||||||
|
// This matches the Go field resolution on embedding.
|
||||||
|
for _, i := range conflicts {
|
||||||
|
if len(tinfo.fields[i].idx) < len(newf.idx) {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Otherwise, if any of them is at the same depth level, it's an error.
|
||||||
|
for _, i := range conflicts {
|
||||||
|
oldf := &tinfo.fields[i]
|
||||||
|
if len(oldf.idx) == len(newf.idx) {
|
||||||
|
f1 := typ.FieldByIndex(oldf.idx)
|
||||||
|
f2 := typ.FieldByIndex(newf.idx)
|
||||||
|
return &TagPathError{typ, f1.Name, f1.Tag.Get("xml"), f2.Name, f2.Tag.Get("xml")}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Otherwise, the new field is shallower, and thus takes precedence,
|
||||||
|
// so drop the conflicting fields from tinfo and append the new one.
|
||||||
|
for c := len(conflicts) - 1; c >= 0; c-- {
|
||||||
|
i := conflicts[c]
|
||||||
|
copy(tinfo.fields[i:], tinfo.fields[i+1:])
|
||||||
|
tinfo.fields = tinfo.fields[:len(tinfo.fields)-1]
|
||||||
|
}
|
||||||
|
tinfo.fields = append(tinfo.fields, *newf)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// A TagPathError represents an error in the unmarshalling process
|
||||||
|
// caused by the use of field tags with conflicting paths.
|
||||||
|
type TagPathError struct {
|
||||||
|
Struct reflect.Type
|
||||||
|
Field1, Tag1 string
|
||||||
|
Field2, Tag2 string
|
||||||
|
}
|
||||||
|
|
||||||
|
func (e *TagPathError) Error() string {
|
||||||
|
return fmt.Sprintf("%s field %q with tag %q conflicts with field %q with tag %q", e.Struct, e.Field1, e.Tag1, e.Field2, e.Tag2)
|
||||||
|
}
|
||||||
|
|
||||||
|
// value returns v's field value corresponding to finfo.
|
||||||
|
// It's equivalent to v.FieldByIndex(finfo.idx), but initializes
|
||||||
|
// and dereferences pointers as necessary.
|
||||||
|
func (finfo *fieldInfo) value(v reflect.Value) reflect.Value {
|
||||||
|
for i, x := range finfo.idx {
|
||||||
|
if i > 0 {
|
||||||
|
t := v.Type()
|
||||||
|
if t.Kind() == reflect.Ptr && t.Elem().Kind() == reflect.Struct {
|
||||||
|
if v.IsNil() {
|
||||||
|
v.Set(reflect.New(v.Type().Elem()))
|
||||||
|
}
|
||||||
|
v = v.Elem()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
v = v.Field(x)
|
||||||
|
}
|
||||||
|
return v
|
||||||
|
}
|
1998
drives/davServer/internal/xml/xml.go
Normal file
1998
drives/davServer/internal/xml/xml.go
Normal file
File diff suppressed because it is too large
Load Diff
752
drives/davServer/internal/xml/xml_test.go
Normal file
752
drives/davServer/internal/xml/xml_test.go
Normal file
@ -0,0 +1,752 @@
|
|||||||
|
// Copyright 2009 The Go Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a BSD-style
|
||||||
|
// license that can be found in the LICENSE file.
|
||||||
|
|
||||||
|
package xml
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
"reflect"
|
||||||
|
"strings"
|
||||||
|
"testing"
|
||||||
|
"unicode/utf8"
|
||||||
|
)
|
||||||
|
|
||||||
|
const testInput = `
|
||||||
|
<?xml version="1.0" encoding="UTF-8"?>
|
||||||
|
<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN"
|
||||||
|
"http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd">
|
||||||
|
<body xmlns:foo="ns1" xmlns="ns2" xmlns:tag="ns3" ` +
|
||||||
|
"\r\n\t" + ` >
|
||||||
|
<hello lang="en">World <>'" 白鵬翔</hello>
|
||||||
|
<query>&何; &is-it;</query>
|
||||||
|
<goodbye />
|
||||||
|
<outer foo:attr="value" xmlns:tag="ns4">
|
||||||
|
<inner/>
|
||||||
|
</outer>
|
||||||
|
<tag:name>
|
||||||
|
<![CDATA[Some text here.]]>
|
||||||
|
</tag:name>
|
||||||
|
</body><!-- missing final newline -->`
|
||||||
|
|
||||||
|
var testEntity = map[string]string{"何": "What", "is-it": "is it?"}
|
||||||
|
|
||||||
|
var rawTokens = []Token{
|
||||||
|
CharData("\n"),
|
||||||
|
ProcInst{"xml", []byte(`version="1.0" encoding="UTF-8"`)},
|
||||||
|
CharData("\n"),
|
||||||
|
Directive(`DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN"
|
||||||
|
"http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd"`),
|
||||||
|
CharData("\n"),
|
||||||
|
StartElement{Name{"", "body"}, []Attr{{Name{"xmlns", "foo"}, "ns1"}, {Name{"", "xmlns"}, "ns2"}, {Name{"xmlns", "tag"}, "ns3"}}},
|
||||||
|
CharData("\n "),
|
||||||
|
StartElement{Name{"", "hello"}, []Attr{{Name{"", "lang"}, "en"}}},
|
||||||
|
CharData("World <>'\" 白鵬翔"),
|
||||||
|
EndElement{Name{"", "hello"}},
|
||||||
|
CharData("\n "),
|
||||||
|
StartElement{Name{"", "query"}, []Attr{}},
|
||||||
|
CharData("What is it?"),
|
||||||
|
EndElement{Name{"", "query"}},
|
||||||
|
CharData("\n "),
|
||||||
|
StartElement{Name{"", "goodbye"}, []Attr{}},
|
||||||
|
EndElement{Name{"", "goodbye"}},
|
||||||
|
CharData("\n "),
|
||||||
|
StartElement{Name{"", "outer"}, []Attr{{Name{"foo", "attr"}, "value"}, {Name{"xmlns", "tag"}, "ns4"}}},
|
||||||
|
CharData("\n "),
|
||||||
|
StartElement{Name{"", "inner"}, []Attr{}},
|
||||||
|
EndElement{Name{"", "inner"}},
|
||||||
|
CharData("\n "),
|
||||||
|
EndElement{Name{"", "outer"}},
|
||||||
|
CharData("\n "),
|
||||||
|
StartElement{Name{"tag", "name"}, []Attr{}},
|
||||||
|
CharData("\n "),
|
||||||
|
CharData("Some text here."),
|
||||||
|
CharData("\n "),
|
||||||
|
EndElement{Name{"tag", "name"}},
|
||||||
|
CharData("\n"),
|
||||||
|
EndElement{Name{"", "body"}},
|
||||||
|
Comment(" missing final newline "),
|
||||||
|
}
|
||||||
|
|
||||||
|
var cookedTokens = []Token{
|
||||||
|
CharData("\n"),
|
||||||
|
ProcInst{"xml", []byte(`version="1.0" encoding="UTF-8"`)},
|
||||||
|
CharData("\n"),
|
||||||
|
Directive(`DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN"
|
||||||
|
"http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd"`),
|
||||||
|
CharData("\n"),
|
||||||
|
StartElement{Name{"ns2", "body"}, []Attr{{Name{"xmlns", "foo"}, "ns1"}, {Name{"", "xmlns"}, "ns2"}, {Name{"xmlns", "tag"}, "ns3"}}},
|
||||||
|
CharData("\n "),
|
||||||
|
StartElement{Name{"ns2", "hello"}, []Attr{{Name{"", "lang"}, "en"}}},
|
||||||
|
CharData("World <>'\" 白鵬翔"),
|
||||||
|
EndElement{Name{"ns2", "hello"}},
|
||||||
|
CharData("\n "),
|
||||||
|
StartElement{Name{"ns2", "query"}, []Attr{}},
|
||||||
|
CharData("What is it?"),
|
||||||
|
EndElement{Name{"ns2", "query"}},
|
||||||
|
CharData("\n "),
|
||||||
|
StartElement{Name{"ns2", "goodbye"}, []Attr{}},
|
||||||
|
EndElement{Name{"ns2", "goodbye"}},
|
||||||
|
CharData("\n "),
|
||||||
|
StartElement{Name{"ns2", "outer"}, []Attr{{Name{"ns1", "attr"}, "value"}, {Name{"xmlns", "tag"}, "ns4"}}},
|
||||||
|
CharData("\n "),
|
||||||
|
StartElement{Name{"ns2", "inner"}, []Attr{}},
|
||||||
|
EndElement{Name{"ns2", "inner"}},
|
||||||
|
CharData("\n "),
|
||||||
|
EndElement{Name{"ns2", "outer"}},
|
||||||
|
CharData("\n "),
|
||||||
|
StartElement{Name{"ns3", "name"}, []Attr{}},
|
||||||
|
CharData("\n "),
|
||||||
|
CharData("Some text here."),
|
||||||
|
CharData("\n "),
|
||||||
|
EndElement{Name{"ns3", "name"}},
|
||||||
|
CharData("\n"),
|
||||||
|
EndElement{Name{"ns2", "body"}},
|
||||||
|
Comment(" missing final newline "),
|
||||||
|
}
|
||||||
|
|
||||||
|
const testInputAltEncoding = `
|
||||||
|
<?xml version="1.0" encoding="x-testing-uppercase"?>
|
||||||
|
<TAG>VALUE</TAG>`
|
||||||
|
|
||||||
|
var rawTokensAltEncoding = []Token{
|
||||||
|
CharData("\n"),
|
||||||
|
ProcInst{"xml", []byte(`version="1.0" encoding="x-testing-uppercase"`)},
|
||||||
|
CharData("\n"),
|
||||||
|
StartElement{Name{"", "tag"}, []Attr{}},
|
||||||
|
CharData("value"),
|
||||||
|
EndElement{Name{"", "tag"}},
|
||||||
|
}
|
||||||
|
|
||||||
|
var xmlInput = []string{
|
||||||
|
// unexpected EOF cases
|
||||||
|
"<",
|
||||||
|
"<t",
|
||||||
|
"<t ",
|
||||||
|
"<t/",
|
||||||
|
"<!",
|
||||||
|
"<!-",
|
||||||
|
"<!--",
|
||||||
|
"<!--c-",
|
||||||
|
"<!--c--",
|
||||||
|
"<!d",
|
||||||
|
"<t></",
|
||||||
|
"<t></t",
|
||||||
|
"<?",
|
||||||
|
"<?p",
|
||||||
|
"<t a",
|
||||||
|
"<t a=",
|
||||||
|
"<t a='",
|
||||||
|
"<t a=''",
|
||||||
|
"<t/><![",
|
||||||
|
"<t/><![C",
|
||||||
|
"<t/><![CDATA[d",
|
||||||
|
"<t/><![CDATA[d]",
|
||||||
|
"<t/><![CDATA[d]]",
|
||||||
|
|
||||||
|
// other Syntax errors
|
||||||
|
"<>",
|
||||||
|
"<t/a",
|
||||||
|
"<0 />",
|
||||||
|
"<?0 >",
|
||||||
|
// "<!0 >", // let the Token() caller handle
|
||||||
|
"</0>",
|
||||||
|
"<t 0=''>",
|
||||||
|
"<t a='&'>",
|
||||||
|
"<t a='<'>",
|
||||||
|
"<t> c;</t>",
|
||||||
|
"<t a>",
|
||||||
|
"<t a=>",
|
||||||
|
"<t a=v>",
|
||||||
|
// "<![CDATA[d]]>", // let the Token() caller handle
|
||||||
|
"<t></e>",
|
||||||
|
"<t></>",
|
||||||
|
"<t></t!",
|
||||||
|
"<t>cdata]]></t>",
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestRawToken(t *testing.T) {
|
||||||
|
d := NewDecoder(strings.NewReader(testInput))
|
||||||
|
d.Entity = testEntity
|
||||||
|
testRawToken(t, d, testInput, rawTokens)
|
||||||
|
}
|
||||||
|
|
||||||
|
const nonStrictInput = `
|
||||||
|
<tag>non&entity</tag>
|
||||||
|
<tag>&unknown;entity</tag>
|
||||||
|
<tag>{</tag>
|
||||||
|
<tag>&#zzz;</tag>
|
||||||
|
<tag>&なまえ3;</tag>
|
||||||
|
<tag><-gt;</tag>
|
||||||
|
<tag>&;</tag>
|
||||||
|
<tag>&0a;</tag>
|
||||||
|
`
|
||||||
|
|
||||||
|
var nonStringEntity = map[string]string{"": "oops!", "0a": "oops!"}
|
||||||
|
|
||||||
|
var nonStrictTokens = []Token{
|
||||||
|
CharData("\n"),
|
||||||
|
StartElement{Name{"", "tag"}, []Attr{}},
|
||||||
|
CharData("non&entity"),
|
||||||
|
EndElement{Name{"", "tag"}},
|
||||||
|
CharData("\n"),
|
||||||
|
StartElement{Name{"", "tag"}, []Attr{}},
|
||||||
|
CharData("&unknown;entity"),
|
||||||
|
EndElement{Name{"", "tag"}},
|
||||||
|
CharData("\n"),
|
||||||
|
StartElement{Name{"", "tag"}, []Attr{}},
|
||||||
|
CharData("{"),
|
||||||
|
EndElement{Name{"", "tag"}},
|
||||||
|
CharData("\n"),
|
||||||
|
StartElement{Name{"", "tag"}, []Attr{}},
|
||||||
|
CharData("&#zzz;"),
|
||||||
|
EndElement{Name{"", "tag"}},
|
||||||
|
CharData("\n"),
|
||||||
|
StartElement{Name{"", "tag"}, []Attr{}},
|
||||||
|
CharData("&なまえ3;"),
|
||||||
|
EndElement{Name{"", "tag"}},
|
||||||
|
CharData("\n"),
|
||||||
|
StartElement{Name{"", "tag"}, []Attr{}},
|
||||||
|
CharData("<-gt;"),
|
||||||
|
EndElement{Name{"", "tag"}},
|
||||||
|
CharData("\n"),
|
||||||
|
StartElement{Name{"", "tag"}, []Attr{}},
|
||||||
|
CharData("&;"),
|
||||||
|
EndElement{Name{"", "tag"}},
|
||||||
|
CharData("\n"),
|
||||||
|
StartElement{Name{"", "tag"}, []Attr{}},
|
||||||
|
CharData("&0a;"),
|
||||||
|
EndElement{Name{"", "tag"}},
|
||||||
|
CharData("\n"),
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestNonStrictRawToken(t *testing.T) {
|
||||||
|
d := NewDecoder(strings.NewReader(nonStrictInput))
|
||||||
|
d.Strict = false
|
||||||
|
testRawToken(t, d, nonStrictInput, nonStrictTokens)
|
||||||
|
}
|
||||||
|
|
||||||
|
type downCaser struct {
|
||||||
|
t *testing.T
|
||||||
|
r io.ByteReader
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *downCaser) ReadByte() (c byte, err error) {
|
||||||
|
c, err = d.r.ReadByte()
|
||||||
|
if c >= 'A' && c <= 'Z' {
|
||||||
|
c += 'a' - 'A'
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *downCaser) Read(p []byte) (int, error) {
|
||||||
|
d.t.Fatalf("unexpected Read call on downCaser reader")
|
||||||
|
panic("unreachable")
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestRawTokenAltEncoding(t *testing.T) {
|
||||||
|
d := NewDecoder(strings.NewReader(testInputAltEncoding))
|
||||||
|
d.CharsetReader = func(charset string, input io.Reader) (io.Reader, error) {
|
||||||
|
if charset != "x-testing-uppercase" {
|
||||||
|
t.Fatalf("unexpected charset %q", charset)
|
||||||
|
}
|
||||||
|
return &downCaser{t, input.(io.ByteReader)}, nil
|
||||||
|
}
|
||||||
|
testRawToken(t, d, testInputAltEncoding, rawTokensAltEncoding)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestRawTokenAltEncodingNoConverter(t *testing.T) {
|
||||||
|
d := NewDecoder(strings.NewReader(testInputAltEncoding))
|
||||||
|
token, err := d.RawToken()
|
||||||
|
if token == nil {
|
||||||
|
t.Fatalf("expected a token on first RawToken call")
|
||||||
|
}
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
token, err = d.RawToken()
|
||||||
|
if token != nil {
|
||||||
|
t.Errorf("expected a nil token; got %#v", token)
|
||||||
|
}
|
||||||
|
if err == nil {
|
||||||
|
t.Fatalf("expected an error on second RawToken call")
|
||||||
|
}
|
||||||
|
const encoding = "x-testing-uppercase"
|
||||||
|
if !strings.Contains(err.Error(), encoding) {
|
||||||
|
t.Errorf("expected error to contain %q; got error: %v",
|
||||||
|
encoding, err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func testRawToken(t *testing.T, d *Decoder, raw string, rawTokens []Token) {
|
||||||
|
lastEnd := int64(0)
|
||||||
|
for i, want := range rawTokens {
|
||||||
|
start := d.InputOffset()
|
||||||
|
have, err := d.RawToken()
|
||||||
|
end := d.InputOffset()
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("token %d: unexpected error: %s", i, err)
|
||||||
|
}
|
||||||
|
if !reflect.DeepEqual(have, want) {
|
||||||
|
var shave, swant string
|
||||||
|
if _, ok := have.(CharData); ok {
|
||||||
|
shave = fmt.Sprintf("CharData(%q)", have)
|
||||||
|
} else {
|
||||||
|
shave = fmt.Sprintf("%#v", have)
|
||||||
|
}
|
||||||
|
if _, ok := want.(CharData); ok {
|
||||||
|
swant = fmt.Sprintf("CharData(%q)", want)
|
||||||
|
} else {
|
||||||
|
swant = fmt.Sprintf("%#v", want)
|
||||||
|
}
|
||||||
|
t.Errorf("token %d = %s, want %s", i, shave, swant)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check that InputOffset returned actual token.
|
||||||
|
switch {
|
||||||
|
case start < lastEnd:
|
||||||
|
t.Errorf("token %d: position [%d,%d) for %T is before previous token", i, start, end, have)
|
||||||
|
case start >= end:
|
||||||
|
// Special case: EndElement can be synthesized.
|
||||||
|
if start == end && end == lastEnd {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
t.Errorf("token %d: position [%d,%d) for %T is empty", i, start, end, have)
|
||||||
|
case end > int64(len(raw)):
|
||||||
|
t.Errorf("token %d: position [%d,%d) for %T extends beyond input", i, start, end, have)
|
||||||
|
default:
|
||||||
|
text := raw[start:end]
|
||||||
|
if strings.ContainsAny(text, "<>") && (!strings.HasPrefix(text, "<") || !strings.HasSuffix(text, ">")) {
|
||||||
|
t.Errorf("token %d: misaligned raw token %#q for %T", i, text, have)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
lastEnd = end
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Ensure that directives (specifically !DOCTYPE) include the complete
|
||||||
|
// text of any nested directives, noting that < and > do not change
|
||||||
|
// nesting depth if they are in single or double quotes.
|
||||||
|
|
||||||
|
var nestedDirectivesInput = `
|
||||||
|
<!DOCTYPE [<!ENTITY rdf "http://www.w3.org/1999/02/22-rdf-syntax-ns#">]>
|
||||||
|
<!DOCTYPE [<!ENTITY xlt ">">]>
|
||||||
|
<!DOCTYPE [<!ENTITY xlt "<">]>
|
||||||
|
<!DOCTYPE [<!ENTITY xlt '>'>]>
|
||||||
|
<!DOCTYPE [<!ENTITY xlt '<'>]>
|
||||||
|
<!DOCTYPE [<!ENTITY xlt '">'>]>
|
||||||
|
<!DOCTYPE [<!ENTITY xlt "'<">]>
|
||||||
|
`
|
||||||
|
|
||||||
|
var nestedDirectivesTokens = []Token{
|
||||||
|
CharData("\n"),
|
||||||
|
Directive(`DOCTYPE [<!ENTITY rdf "http://www.w3.org/1999/02/22-rdf-syntax-ns#">]`),
|
||||||
|
CharData("\n"),
|
||||||
|
Directive(`DOCTYPE [<!ENTITY xlt ">">]`),
|
||||||
|
CharData("\n"),
|
||||||
|
Directive(`DOCTYPE [<!ENTITY xlt "<">]`),
|
||||||
|
CharData("\n"),
|
||||||
|
Directive(`DOCTYPE [<!ENTITY xlt '>'>]`),
|
||||||
|
CharData("\n"),
|
||||||
|
Directive(`DOCTYPE [<!ENTITY xlt '<'>]`),
|
||||||
|
CharData("\n"),
|
||||||
|
Directive(`DOCTYPE [<!ENTITY xlt '">'>]`),
|
||||||
|
CharData("\n"),
|
||||||
|
Directive(`DOCTYPE [<!ENTITY xlt "'<">]`),
|
||||||
|
CharData("\n"),
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestNestedDirectives(t *testing.T) {
|
||||||
|
d := NewDecoder(strings.NewReader(nestedDirectivesInput))
|
||||||
|
|
||||||
|
for i, want := range nestedDirectivesTokens {
|
||||||
|
have, err := d.Token()
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("token %d: unexpected error: %s", i, err)
|
||||||
|
}
|
||||||
|
if !reflect.DeepEqual(have, want) {
|
||||||
|
t.Errorf("token %d = %#v want %#v", i, have, want)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestToken(t *testing.T) {
|
||||||
|
d := NewDecoder(strings.NewReader(testInput))
|
||||||
|
d.Entity = testEntity
|
||||||
|
|
||||||
|
for i, want := range cookedTokens {
|
||||||
|
have, err := d.Token()
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("token %d: unexpected error: %s", i, err)
|
||||||
|
}
|
||||||
|
if !reflect.DeepEqual(have, want) {
|
||||||
|
t.Errorf("token %d = %#v want %#v", i, have, want)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestSyntax(t *testing.T) {
|
||||||
|
for i := range xmlInput {
|
||||||
|
d := NewDecoder(strings.NewReader(xmlInput[i]))
|
||||||
|
var err error
|
||||||
|
for _, err = d.Token(); err == nil; _, err = d.Token() {
|
||||||
|
}
|
||||||
|
if _, ok := err.(*SyntaxError); !ok {
|
||||||
|
t.Fatalf(`xmlInput "%s": expected SyntaxError not received`, xmlInput[i])
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
type allScalars struct {
|
||||||
|
True1 bool
|
||||||
|
True2 bool
|
||||||
|
False1 bool
|
||||||
|
False2 bool
|
||||||
|
Int int
|
||||||
|
Int8 int8
|
||||||
|
Int16 int16
|
||||||
|
Int32 int32
|
||||||
|
Int64 int64
|
||||||
|
Uint int
|
||||||
|
Uint8 uint8
|
||||||
|
Uint16 uint16
|
||||||
|
Uint32 uint32
|
||||||
|
Uint64 uint64
|
||||||
|
Uintptr uintptr
|
||||||
|
Float32 float32
|
||||||
|
Float64 float64
|
||||||
|
String string
|
||||||
|
PtrString *string
|
||||||
|
}
|
||||||
|
|
||||||
|
var all = allScalars{
|
||||||
|
True1: true,
|
||||||
|
True2: true,
|
||||||
|
False1: false,
|
||||||
|
False2: false,
|
||||||
|
Int: 1,
|
||||||
|
Int8: -2,
|
||||||
|
Int16: 3,
|
||||||
|
Int32: -4,
|
||||||
|
Int64: 5,
|
||||||
|
Uint: 6,
|
||||||
|
Uint8: 7,
|
||||||
|
Uint16: 8,
|
||||||
|
Uint32: 9,
|
||||||
|
Uint64: 10,
|
||||||
|
Uintptr: 11,
|
||||||
|
Float32: 13.0,
|
||||||
|
Float64: 14.0,
|
||||||
|
String: "15",
|
||||||
|
PtrString: &sixteen,
|
||||||
|
}
|
||||||
|
|
||||||
|
var sixteen = "16"
|
||||||
|
|
||||||
|
const testScalarsInput = `<allscalars>
|
||||||
|
<True1>true</True1>
|
||||||
|
<True2>1</True2>
|
||||||
|
<False1>false</False1>
|
||||||
|
<False2>0</False2>
|
||||||
|
<Int>1</Int>
|
||||||
|
<Int8>-2</Int8>
|
||||||
|
<Int16>3</Int16>
|
||||||
|
<Int32>-4</Int32>
|
||||||
|
<Int64>5</Int64>
|
||||||
|
<Uint>6</Uint>
|
||||||
|
<Uint8>7</Uint8>
|
||||||
|
<Uint16>8</Uint16>
|
||||||
|
<Uint32>9</Uint32>
|
||||||
|
<Uint64>10</Uint64>
|
||||||
|
<Uintptr>11</Uintptr>
|
||||||
|
<Float>12.0</Float>
|
||||||
|
<Float32>13.0</Float32>
|
||||||
|
<Float64>14.0</Float64>
|
||||||
|
<String>15</String>
|
||||||
|
<PtrString>16</PtrString>
|
||||||
|
</allscalars>`
|
||||||
|
|
||||||
|
func TestAllScalars(t *testing.T) {
|
||||||
|
var a allScalars
|
||||||
|
err := Unmarshal([]byte(testScalarsInput), &a)
|
||||||
|
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
if !reflect.DeepEqual(a, all) {
|
||||||
|
t.Errorf("have %+v want %+v", a, all)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
type item struct {
|
||||||
|
Field_a string
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestIssue569(t *testing.T) {
|
||||||
|
data := `<item><Field_a>abcd</Field_a></item>`
|
||||||
|
var i item
|
||||||
|
err := Unmarshal([]byte(data), &i)
|
||||||
|
|
||||||
|
if err != nil || i.Field_a != "abcd" {
|
||||||
|
t.Fatal("Expecting abcd")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestUnquotedAttrs(t *testing.T) {
|
||||||
|
data := "<tag attr=azAZ09:-_\t>"
|
||||||
|
d := NewDecoder(strings.NewReader(data))
|
||||||
|
d.Strict = false
|
||||||
|
token, err := d.Token()
|
||||||
|
if _, ok := err.(*SyntaxError); ok {
|
||||||
|
t.Errorf("Unexpected error: %v", err)
|
||||||
|
}
|
||||||
|
if token.(StartElement).Name.Local != "tag" {
|
||||||
|
t.Errorf("Unexpected tag name: %v", token.(StartElement).Name.Local)
|
||||||
|
}
|
||||||
|
attr := token.(StartElement).Attr[0]
|
||||||
|
if attr.Value != "azAZ09:-_" {
|
||||||
|
t.Errorf("Unexpected attribute value: %v", attr.Value)
|
||||||
|
}
|
||||||
|
if attr.Name.Local != "attr" {
|
||||||
|
t.Errorf("Unexpected attribute name: %v", attr.Name.Local)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestValuelessAttrs(t *testing.T) {
|
||||||
|
tests := [][3]string{
|
||||||
|
{"<p nowrap>", "p", "nowrap"},
|
||||||
|
{"<p nowrap >", "p", "nowrap"},
|
||||||
|
{"<input checked/>", "input", "checked"},
|
||||||
|
{"<input checked />", "input", "checked"},
|
||||||
|
}
|
||||||
|
for _, test := range tests {
|
||||||
|
d := NewDecoder(strings.NewReader(test[0]))
|
||||||
|
d.Strict = false
|
||||||
|
token, err := d.Token()
|
||||||
|
if _, ok := err.(*SyntaxError); ok {
|
||||||
|
t.Errorf("Unexpected error: %v", err)
|
||||||
|
}
|
||||||
|
if token.(StartElement).Name.Local != test[1] {
|
||||||
|
t.Errorf("Unexpected tag name: %v", token.(StartElement).Name.Local)
|
||||||
|
}
|
||||||
|
attr := token.(StartElement).Attr[0]
|
||||||
|
if attr.Value != test[2] {
|
||||||
|
t.Errorf("Unexpected attribute value: %v", attr.Value)
|
||||||
|
}
|
||||||
|
if attr.Name.Local != test[2] {
|
||||||
|
t.Errorf("Unexpected attribute name: %v", attr.Name.Local)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestCopyTokenCharData(t *testing.T) {
|
||||||
|
data := []byte("same data")
|
||||||
|
var tok1 Token = CharData(data)
|
||||||
|
tok2 := CopyToken(tok1)
|
||||||
|
if !reflect.DeepEqual(tok1, tok2) {
|
||||||
|
t.Error("CopyToken(CharData) != CharData")
|
||||||
|
}
|
||||||
|
data[1] = 'o'
|
||||||
|
if reflect.DeepEqual(tok1, tok2) {
|
||||||
|
t.Error("CopyToken(CharData) uses same buffer.")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestCopyTokenStartElement(t *testing.T) {
|
||||||
|
elt := StartElement{Name{"", "hello"}, []Attr{{Name{"", "lang"}, "en"}}}
|
||||||
|
var tok1 Token = elt
|
||||||
|
tok2 := CopyToken(tok1)
|
||||||
|
if tok1.(StartElement).Attr[0].Value != "en" {
|
||||||
|
t.Error("CopyToken overwrote Attr[0]")
|
||||||
|
}
|
||||||
|
if !reflect.DeepEqual(tok1, tok2) {
|
||||||
|
t.Error("CopyToken(StartElement) != StartElement")
|
||||||
|
}
|
||||||
|
tok1.(StartElement).Attr[0] = Attr{Name{"", "lang"}, "de"}
|
||||||
|
if reflect.DeepEqual(tok1, tok2) {
|
||||||
|
t.Error("CopyToken(CharData) uses same buffer.")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestSyntaxErrorLineNum(t *testing.T) {
|
||||||
|
testInput := "<P>Foo<P>\n\n<P>Bar</>\n"
|
||||||
|
d := NewDecoder(strings.NewReader(testInput))
|
||||||
|
var err error
|
||||||
|
for _, err = d.Token(); err == nil; _, err = d.Token() {
|
||||||
|
}
|
||||||
|
synerr, ok := err.(*SyntaxError)
|
||||||
|
if !ok {
|
||||||
|
t.Error("Expected SyntaxError.")
|
||||||
|
}
|
||||||
|
if synerr.Line != 3 {
|
||||||
|
t.Error("SyntaxError didn't have correct line number.")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestTrailingRawToken(t *testing.T) {
|
||||||
|
input := `<FOO></FOO> `
|
||||||
|
d := NewDecoder(strings.NewReader(input))
|
||||||
|
var err error
|
||||||
|
for _, err = d.RawToken(); err == nil; _, err = d.RawToken() {
|
||||||
|
}
|
||||||
|
if err != io.EOF {
|
||||||
|
t.Fatalf("d.RawToken() = _, %v, want _, io.EOF", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestTrailingToken(t *testing.T) {
|
||||||
|
input := `<FOO></FOO> `
|
||||||
|
d := NewDecoder(strings.NewReader(input))
|
||||||
|
var err error
|
||||||
|
for _, err = d.Token(); err == nil; _, err = d.Token() {
|
||||||
|
}
|
||||||
|
if err != io.EOF {
|
||||||
|
t.Fatalf("d.Token() = _, %v, want _, io.EOF", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestEntityInsideCDATA(t *testing.T) {
|
||||||
|
input := `<test><![CDATA[ &val=foo ]]></test>`
|
||||||
|
d := NewDecoder(strings.NewReader(input))
|
||||||
|
var err error
|
||||||
|
for _, err = d.Token(); err == nil; _, err = d.Token() {
|
||||||
|
}
|
||||||
|
if err != io.EOF {
|
||||||
|
t.Fatalf("d.Token() = _, %v, want _, io.EOF", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
var characterTests = []struct {
|
||||||
|
in string
|
||||||
|
err string
|
||||||
|
}{
|
||||||
|
{"\x12<doc/>", "illegal character code U+0012"},
|
||||||
|
{"<?xml version=\"1.0\"?>\x0b<doc/>", "illegal character code U+000B"},
|
||||||
|
{"\xef\xbf\xbe<doc/>", "illegal character code U+FFFE"},
|
||||||
|
{"<?xml version=\"1.0\"?><doc>\r\n<hiya/>\x07<toots/></doc>", "illegal character code U+0007"},
|
||||||
|
{"<?xml version=\"1.0\"?><doc \x12='value'>what's up</doc>", "expected attribute name in element"},
|
||||||
|
{"<doc>&abc\x01;</doc>", "invalid character entity &abc (no semicolon)"},
|
||||||
|
{"<doc>&\x01;</doc>", "invalid character entity & (no semicolon)"},
|
||||||
|
{"<doc>&\xef\xbf\xbe;</doc>", "invalid character entity &\uFFFE;"},
|
||||||
|
{"<doc>&hello;</doc>", "invalid character entity &hello;"},
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestDisallowedCharacters(t *testing.T) {
|
||||||
|
|
||||||
|
for i, tt := range characterTests {
|
||||||
|
d := NewDecoder(strings.NewReader(tt.in))
|
||||||
|
var err error
|
||||||
|
|
||||||
|
for err == nil {
|
||||||
|
_, err = d.Token()
|
||||||
|
}
|
||||||
|
synerr, ok := err.(*SyntaxError)
|
||||||
|
if !ok {
|
||||||
|
t.Fatalf("input %d d.Token() = _, %v, want _, *SyntaxError", i, err)
|
||||||
|
}
|
||||||
|
if synerr.Msg != tt.err {
|
||||||
|
t.Fatalf("input %d synerr.Msg wrong: want %q, got %q", i, tt.err, synerr.Msg)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
type procInstEncodingTest struct {
|
||||||
|
expect, got string
|
||||||
|
}
|
||||||
|
|
||||||
|
var procInstTests = []struct {
|
||||||
|
input string
|
||||||
|
expect [2]string
|
||||||
|
}{
|
||||||
|
{`version="1.0" encoding="utf-8"`, [2]string{"1.0", "utf-8"}},
|
||||||
|
{`version="1.0" encoding='utf-8'`, [2]string{"1.0", "utf-8"}},
|
||||||
|
{`version="1.0" encoding='utf-8' `, [2]string{"1.0", "utf-8"}},
|
||||||
|
{`version="1.0" encoding=utf-8`, [2]string{"1.0", ""}},
|
||||||
|
{`encoding="FOO" `, [2]string{"", "FOO"}},
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestProcInstEncoding(t *testing.T) {
|
||||||
|
for _, test := range procInstTests {
|
||||||
|
if got := procInst("version", test.input); got != test.expect[0] {
|
||||||
|
t.Errorf("procInst(version, %q) = %q; want %q", test.input, got, test.expect[0])
|
||||||
|
}
|
||||||
|
if got := procInst("encoding", test.input); got != test.expect[1] {
|
||||||
|
t.Errorf("procInst(encoding, %q) = %q; want %q", test.input, got, test.expect[1])
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Ensure that directives with comments include the complete
|
||||||
|
// text of any nested directives.
|
||||||
|
|
||||||
|
var directivesWithCommentsInput = `
|
||||||
|
<!DOCTYPE [<!-- a comment --><!ENTITY rdf "http://www.w3.org/1999/02/22-rdf-syntax-ns#">]>
|
||||||
|
<!DOCTYPE [<!ENTITY go "Golang"><!-- a comment-->]>
|
||||||
|
<!DOCTYPE <!-> <!> <!----> <!-->--> <!--->--> [<!ENTITY go "Golang"><!-- a comment-->]>
|
||||||
|
`
|
||||||
|
|
||||||
|
var directivesWithCommentsTokens = []Token{
|
||||||
|
CharData("\n"),
|
||||||
|
Directive(`DOCTYPE [<!ENTITY rdf "http://www.w3.org/1999/02/22-rdf-syntax-ns#">]`),
|
||||||
|
CharData("\n"),
|
||||||
|
Directive(`DOCTYPE [<!ENTITY go "Golang">]`),
|
||||||
|
CharData("\n"),
|
||||||
|
Directive(`DOCTYPE <!-> <!> [<!ENTITY go "Golang">]`),
|
||||||
|
CharData("\n"),
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestDirectivesWithComments(t *testing.T) {
|
||||||
|
d := NewDecoder(strings.NewReader(directivesWithCommentsInput))
|
||||||
|
|
||||||
|
for i, want := range directivesWithCommentsTokens {
|
||||||
|
have, err := d.Token()
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("token %d: unexpected error: %s", i, err)
|
||||||
|
}
|
||||||
|
if !reflect.DeepEqual(have, want) {
|
||||||
|
t.Errorf("token %d = %#v want %#v", i, have, want)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Writer whose Write method always returns an error.
|
||||||
|
type errWriter struct{}
|
||||||
|
|
||||||
|
func (errWriter) Write(p []byte) (n int, err error) { return 0, fmt.Errorf("unwritable") }
|
||||||
|
|
||||||
|
func TestEscapeTextIOErrors(t *testing.T) {
|
||||||
|
expectErr := "unwritable"
|
||||||
|
err := EscapeText(errWriter{}, []byte{'A'})
|
||||||
|
|
||||||
|
if err == nil || err.Error() != expectErr {
|
||||||
|
t.Errorf("have %v, want %v", err, expectErr)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestEscapeTextInvalidChar(t *testing.T) {
|
||||||
|
input := []byte("A \x00 terminated string.")
|
||||||
|
expected := "A \uFFFD terminated string."
|
||||||
|
|
||||||
|
buff := new(bytes.Buffer)
|
||||||
|
if err := EscapeText(buff, input); err != nil {
|
||||||
|
t.Fatalf("have %v, want nil", err)
|
||||||
|
}
|
||||||
|
text := buff.String()
|
||||||
|
|
||||||
|
if text != expected {
|
||||||
|
t.Errorf("have %v, want %v", text, expected)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestIssue5880(t *testing.T) {
|
||||||
|
type T []byte
|
||||||
|
data, err := Marshal(T{192, 168, 0, 1})
|
||||||
|
if err != nil {
|
||||||
|
t.Errorf("Marshal error: %v", err)
|
||||||
|
}
|
||||||
|
if !utf8.Valid(data) {
|
||||||
|
t.Errorf("Marshal generated invalid UTF-8: %x", data)
|
||||||
|
}
|
||||||
|
}
|
94
drives/davServer/litmus_test_server.go
Normal file
94
drives/davServer/litmus_test_server.go
Normal file
@ -0,0 +1,94 @@
|
|||||||
|
// Copyright 2015 The Go Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a BSD-style
|
||||||
|
// license that can be found in the LICENSE file.
|
||||||
|
|
||||||
|
//go:build ignore
|
||||||
|
// +build ignore
|
||||||
|
|
||||||
|
/*
|
||||||
|
This program is a server for the WebDAV 'litmus' compliance test at
|
||||||
|
http://www.webdav.org/neon/litmus/
|
||||||
|
To run the test:
|
||||||
|
|
||||||
|
go run litmus_test_server.go
|
||||||
|
|
||||||
|
and separately, from the downloaded litmus-xxx directory:
|
||||||
|
|
||||||
|
make URL=http://localhost:9999/ check
|
||||||
|
*/
|
||||||
|
package main
|
||||||
|
|
||||||
|
import (
|
||||||
|
"flag"
|
||||||
|
"fmt"
|
||||||
|
"github.com/openziti/zrok/endpoints/drive/driveServer"
|
||||||
|
"log"
|
||||||
|
"net/http"
|
||||||
|
"net/url"
|
||||||
|
)
|
||||||
|
|
||||||
|
var port = flag.Int("port", 9999, "server port")
|
||||||
|
|
||||||
|
func main() {
|
||||||
|
flag.Parse()
|
||||||
|
log.SetFlags(0)
|
||||||
|
h := &driveServer.Handler{
|
||||||
|
FileSystem: driveServer.NewMemFS(),
|
||||||
|
LockSystem: driveServer.NewMemLS(),
|
||||||
|
Logger: func(r *http.Request, err error) {
|
||||||
|
litmus := r.Header.Get("X-Litmus")
|
||||||
|
if len(litmus) > 19 {
|
||||||
|
litmus = litmus[:16] + "..."
|
||||||
|
}
|
||||||
|
|
||||||
|
switch r.Method {
|
||||||
|
case "COPY", "MOVE":
|
||||||
|
dst := ""
|
||||||
|
if u, err := url.Parse(r.Header.Get("Destination")); err == nil {
|
||||||
|
dst = u.Path
|
||||||
|
}
|
||||||
|
o := r.Header.Get("Overwrite")
|
||||||
|
log.Printf("%-20s%-10s%-30s%-30so=%-2s%v", litmus, r.Method, r.URL.Path, dst, o, err)
|
||||||
|
default:
|
||||||
|
log.Printf("%-20s%-10s%-30s%v", litmus, r.Method, r.URL.Path, err)
|
||||||
|
}
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
// The next line would normally be:
|
||||||
|
// http.Handle("/", h)
|
||||||
|
// but we wrap that HTTP handler h to cater for a special case.
|
||||||
|
//
|
||||||
|
// The propfind_invalid2 litmus test case expects an empty namespace prefix
|
||||||
|
// declaration to be an error. The FAQ in the webdav litmus test says:
|
||||||
|
//
|
||||||
|
// "What does the "propfind_invalid2" test check for?...
|
||||||
|
//
|
||||||
|
// If a request was sent with an XML body which included an empty namespace
|
||||||
|
// prefix declaration (xmlns:ns1=""), then the server must reject that with
|
||||||
|
// a "400 Bad Request" response, as it is invalid according to the XML
|
||||||
|
// Namespace specification."
|
||||||
|
//
|
||||||
|
// On the other hand, the Go standard library's encoding/xml package
|
||||||
|
// accepts an empty xmlns namespace, as per the discussion at
|
||||||
|
// https://github.com/golang/go/issues/8068
|
||||||
|
//
|
||||||
|
// Empty namespaces seem disallowed in the second (2006) edition of the XML
|
||||||
|
// standard, but allowed in a later edition. The grammar differs between
|
||||||
|
// http://www.w3.org/TR/2006/REC-xml-names-20060816/#ns-decl and
|
||||||
|
// http://www.w3.org/TR/REC-xml-names/#dt-prefix
|
||||||
|
//
|
||||||
|
// Thus, we assume that the propfind_invalid2 test is obsolete, and
|
||||||
|
// hard-code the 400 Bad Request response that the test expects.
|
||||||
|
http.Handle("/", http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||||
|
if r.Header.Get("X-Litmus") == "props: 3 (propfind_invalid2)" {
|
||||||
|
http.Error(w, "400 Bad Request", http.StatusBadRequest)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
h.ServeHTTP(w, r)
|
||||||
|
}))
|
||||||
|
|
||||||
|
addr := fmt.Sprintf(":%d", *port)
|
||||||
|
log.Printf("Serving %v", addr)
|
||||||
|
log.Fatal(http.ListenAndServe(addr, nil))
|
||||||
|
}
|
445
drives/davServer/lock.go
Normal file
445
drives/davServer/lock.go
Normal file
@ -0,0 +1,445 @@
|
|||||||
|
// Copyright 2014 The Go Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a BSD-style
|
||||||
|
// license that can be found in the LICENSE file.
|
||||||
|
|
||||||
|
package davServer
|
||||||
|
|
||||||
|
import (
|
||||||
|
"container/heap"
|
||||||
|
"errors"
|
||||||
|
"strconv"
|
||||||
|
"strings"
|
||||||
|
"sync"
|
||||||
|
"time"
|
||||||
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
// ErrConfirmationFailed is returned by a LockSystem's Confirm method.
|
||||||
|
ErrConfirmationFailed = errors.New("webdav: confirmation failed")
|
||||||
|
// ErrForbidden is returned by a LockSystem's Unlock method.
|
||||||
|
ErrForbidden = errors.New("webdav: forbidden")
|
||||||
|
// ErrLocked is returned by a LockSystem's Create, Refresh and Unlock methods.
|
||||||
|
ErrLocked = errors.New("webdav: locked")
|
||||||
|
// ErrNoSuchLock is returned by a LockSystem's Refresh and Unlock methods.
|
||||||
|
ErrNoSuchLock = errors.New("webdav: no such lock")
|
||||||
|
)
|
||||||
|
|
||||||
|
// Condition can match a WebDAV resource, based on a token or ETag.
|
||||||
|
// Exactly one of Token and ETag should be non-empty.
|
||||||
|
type Condition struct {
|
||||||
|
Not bool
|
||||||
|
Token string
|
||||||
|
ETag string
|
||||||
|
}
|
||||||
|
|
||||||
|
// LockSystem manages access to a collection of named resources. The elements
|
||||||
|
// in a lock name are separated by slash ('/', U+002F) characters, regardless
|
||||||
|
// of host operating system convention.
|
||||||
|
type LockSystem interface {
|
||||||
|
// Confirm confirms that the caller can claim all of the locks specified by
|
||||||
|
// the given conditions, and that holding the union of all of those locks
|
||||||
|
// gives exclusive access to all of the named resources. Up to two resources
|
||||||
|
// can be named. Empty names are ignored.
|
||||||
|
//
|
||||||
|
// Exactly one of release and err will be non-nil. If release is non-nil,
|
||||||
|
// all of the requested locks are held until release is called. Calling
|
||||||
|
// release does not unlock the lock, in the WebDAV UNLOCK sense, but once
|
||||||
|
// Confirm has confirmed that a lock claim is valid, that lock cannot be
|
||||||
|
// Confirmed again until it has been released.
|
||||||
|
//
|
||||||
|
// If Confirm returns ErrConfirmationFailed then the Handler will continue
|
||||||
|
// to try any other set of locks presented (a WebDAV HTTP request can
|
||||||
|
// present more than one set of locks). If it returns any other non-nil
|
||||||
|
// error, the Handler will write a "500 Internal Server Error" HTTP status.
|
||||||
|
Confirm(now time.Time, name0, name1 string, conditions ...Condition) (release func(), err error)
|
||||||
|
|
||||||
|
// Create creates a lock with the given depth, duration, owner and root
|
||||||
|
// (name). The depth will either be negative (meaning infinite) or zero.
|
||||||
|
//
|
||||||
|
// If Create returns ErrLocked then the Handler will write a "423 Locked"
|
||||||
|
// HTTP status. If it returns any other non-nil error, the Handler will
|
||||||
|
// write a "500 Internal Server Error" HTTP status.
|
||||||
|
//
|
||||||
|
// See http://www.webdav.org/specs/rfc4918.html#rfc.section.9.10.6 for
|
||||||
|
// when to use each error.
|
||||||
|
//
|
||||||
|
// The token returned identifies the created lock. It should be an absolute
|
||||||
|
// URI as defined by RFC 3986, Section 4.3. In particular, it should not
|
||||||
|
// contain whitespace.
|
||||||
|
Create(now time.Time, details LockDetails) (token string, err error)
|
||||||
|
|
||||||
|
// Refresh refreshes the lock with the given token.
|
||||||
|
//
|
||||||
|
// If Refresh returns ErrLocked then the Handler will write a "423 Locked"
|
||||||
|
// HTTP Status. If Refresh returns ErrNoSuchLock then the Handler will write
|
||||||
|
// a "412 Precondition Failed" HTTP Status. If it returns any other non-nil
|
||||||
|
// error, the Handler will write a "500 Internal Server Error" HTTP status.
|
||||||
|
//
|
||||||
|
// See http://www.webdav.org/specs/rfc4918.html#rfc.section.9.10.6 for
|
||||||
|
// when to use each error.
|
||||||
|
Refresh(now time.Time, token string, duration time.Duration) (LockDetails, error)
|
||||||
|
|
||||||
|
// Unlock unlocks the lock with the given token.
|
||||||
|
//
|
||||||
|
// If Unlock returns ErrForbidden then the Handler will write a "403
|
||||||
|
// Forbidden" HTTP Status. If Unlock returns ErrLocked then the Handler
|
||||||
|
// will write a "423 Locked" HTTP status. If Unlock returns ErrNoSuchLock
|
||||||
|
// then the Handler will write a "409 Conflict" HTTP Status. If it returns
|
||||||
|
// any other non-nil error, the Handler will write a "500 Internal Server
|
||||||
|
// Error" HTTP status.
|
||||||
|
//
|
||||||
|
// See http://www.webdav.org/specs/rfc4918.html#rfc.section.9.11.1 for
|
||||||
|
// when to use each error.
|
||||||
|
Unlock(now time.Time, token string) error
|
||||||
|
}
|
||||||
|
|
||||||
|
// LockDetails are a lock's metadata.
|
||||||
|
type LockDetails struct {
|
||||||
|
// Root is the root resource name being locked. For a zero-depth lock, the
|
||||||
|
// root is the only resource being locked.
|
||||||
|
Root string
|
||||||
|
// Duration is the lock timeout. A negative duration means infinite.
|
||||||
|
Duration time.Duration
|
||||||
|
// OwnerXML is the verbatim <owner> XML given in a LOCK HTTP request.
|
||||||
|
//
|
||||||
|
// TODO: does the "verbatim" nature play well with XML namespaces?
|
||||||
|
// Does the OwnerXML field need to have more structure? See
|
||||||
|
// https://codereview.appspot.com/175140043/#msg2
|
||||||
|
OwnerXML string
|
||||||
|
// ZeroDepth is whether the lock has zero depth. If it does not have zero
|
||||||
|
// depth, it has infinite depth.
|
||||||
|
ZeroDepth bool
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewMemLS returns a new in-memory LockSystem.
|
||||||
|
func NewMemLS() LockSystem {
|
||||||
|
return &memLS{
|
||||||
|
byName: make(map[string]*memLSNode),
|
||||||
|
byToken: make(map[string]*memLSNode),
|
||||||
|
gen: uint64(time.Now().Unix()),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
type memLS struct {
|
||||||
|
mu sync.Mutex
|
||||||
|
byName map[string]*memLSNode
|
||||||
|
byToken map[string]*memLSNode
|
||||||
|
gen uint64
|
||||||
|
// byExpiry only contains those nodes whose LockDetails have a finite
|
||||||
|
// Duration and are yet to expire.
|
||||||
|
byExpiry byExpiry
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *memLS) nextToken() string {
|
||||||
|
m.gen++
|
||||||
|
return strconv.FormatUint(m.gen, 10)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *memLS) collectExpiredNodes(now time.Time) {
|
||||||
|
for len(m.byExpiry) > 0 {
|
||||||
|
if now.Before(m.byExpiry[0].expiry) {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
m.remove(m.byExpiry[0])
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *memLS) Confirm(now time.Time, name0, name1 string, conditions ...Condition) (func(), error) {
|
||||||
|
m.mu.Lock()
|
||||||
|
defer m.mu.Unlock()
|
||||||
|
m.collectExpiredNodes(now)
|
||||||
|
|
||||||
|
var n0, n1 *memLSNode
|
||||||
|
if name0 != "" {
|
||||||
|
if n0 = m.lookup(slashClean(name0), conditions...); n0 == nil {
|
||||||
|
return nil, ErrConfirmationFailed
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if name1 != "" {
|
||||||
|
if n1 = m.lookup(slashClean(name1), conditions...); n1 == nil {
|
||||||
|
return nil, ErrConfirmationFailed
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Don't hold the same node twice.
|
||||||
|
if n1 == n0 {
|
||||||
|
n1 = nil
|
||||||
|
}
|
||||||
|
|
||||||
|
if n0 != nil {
|
||||||
|
m.hold(n0)
|
||||||
|
}
|
||||||
|
if n1 != nil {
|
||||||
|
m.hold(n1)
|
||||||
|
}
|
||||||
|
return func() {
|
||||||
|
m.mu.Lock()
|
||||||
|
defer m.mu.Unlock()
|
||||||
|
if n1 != nil {
|
||||||
|
m.unhold(n1)
|
||||||
|
}
|
||||||
|
if n0 != nil {
|
||||||
|
m.unhold(n0)
|
||||||
|
}
|
||||||
|
}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// lookup returns the node n that locks the named resource, provided that n
|
||||||
|
// matches at least one of the given conditions and that lock isn't held by
|
||||||
|
// another party. Otherwise, it returns nil.
|
||||||
|
//
|
||||||
|
// n may be a parent of the named resource, if n is an infinite depth lock.
|
||||||
|
func (m *memLS) lookup(name string, conditions ...Condition) (n *memLSNode) {
|
||||||
|
// TODO: support Condition.Not and Condition.ETag.
|
||||||
|
for _, c := range conditions {
|
||||||
|
n = m.byToken[c.Token]
|
||||||
|
if n == nil || n.held {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
if name == n.details.Root {
|
||||||
|
return n
|
||||||
|
}
|
||||||
|
if n.details.ZeroDepth {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
if n.details.Root == "/" || strings.HasPrefix(name, n.details.Root+"/") {
|
||||||
|
return n
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *memLS) hold(n *memLSNode) {
|
||||||
|
if n.held {
|
||||||
|
panic("webdav: memLS inconsistent held state")
|
||||||
|
}
|
||||||
|
n.held = true
|
||||||
|
if n.details.Duration >= 0 && n.byExpiryIndex >= 0 {
|
||||||
|
heap.Remove(&m.byExpiry, n.byExpiryIndex)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *memLS) unhold(n *memLSNode) {
|
||||||
|
if !n.held {
|
||||||
|
panic("webdav: memLS inconsistent held state")
|
||||||
|
}
|
||||||
|
n.held = false
|
||||||
|
if n.details.Duration >= 0 {
|
||||||
|
heap.Push(&m.byExpiry, n)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *memLS) Create(now time.Time, details LockDetails) (string, error) {
|
||||||
|
m.mu.Lock()
|
||||||
|
defer m.mu.Unlock()
|
||||||
|
m.collectExpiredNodes(now)
|
||||||
|
details.Root = slashClean(details.Root)
|
||||||
|
|
||||||
|
if !m.canCreate(details.Root, details.ZeroDepth) {
|
||||||
|
return "", ErrLocked
|
||||||
|
}
|
||||||
|
n := m.create(details.Root)
|
||||||
|
n.token = m.nextToken()
|
||||||
|
m.byToken[n.token] = n
|
||||||
|
n.details = details
|
||||||
|
if n.details.Duration >= 0 {
|
||||||
|
n.expiry = now.Add(n.details.Duration)
|
||||||
|
heap.Push(&m.byExpiry, n)
|
||||||
|
}
|
||||||
|
return n.token, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *memLS) Refresh(now time.Time, token string, duration time.Duration) (LockDetails, error) {
|
||||||
|
m.mu.Lock()
|
||||||
|
defer m.mu.Unlock()
|
||||||
|
m.collectExpiredNodes(now)
|
||||||
|
|
||||||
|
n := m.byToken[token]
|
||||||
|
if n == nil {
|
||||||
|
return LockDetails{}, ErrNoSuchLock
|
||||||
|
}
|
||||||
|
if n.held {
|
||||||
|
return LockDetails{}, ErrLocked
|
||||||
|
}
|
||||||
|
if n.byExpiryIndex >= 0 {
|
||||||
|
heap.Remove(&m.byExpiry, n.byExpiryIndex)
|
||||||
|
}
|
||||||
|
n.details.Duration = duration
|
||||||
|
if n.details.Duration >= 0 {
|
||||||
|
n.expiry = now.Add(n.details.Duration)
|
||||||
|
heap.Push(&m.byExpiry, n)
|
||||||
|
}
|
||||||
|
return n.details, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *memLS) Unlock(now time.Time, token string) error {
|
||||||
|
m.mu.Lock()
|
||||||
|
defer m.mu.Unlock()
|
||||||
|
m.collectExpiredNodes(now)
|
||||||
|
|
||||||
|
n := m.byToken[token]
|
||||||
|
if n == nil {
|
||||||
|
return ErrNoSuchLock
|
||||||
|
}
|
||||||
|
if n.held {
|
||||||
|
return ErrLocked
|
||||||
|
}
|
||||||
|
m.remove(n)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *memLS) canCreate(name string, zeroDepth bool) bool {
|
||||||
|
return walkToRoot(name, func(name0 string, first bool) bool {
|
||||||
|
n := m.byName[name0]
|
||||||
|
if n == nil {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
if first {
|
||||||
|
if n.token != "" {
|
||||||
|
// The target node is already locked.
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
if !zeroDepth {
|
||||||
|
// The requested lock depth is infinite, and the fact that n exists
|
||||||
|
// (n != nil) means that a descendent of the target node is locked.
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
} else if n.token != "" && !n.details.ZeroDepth {
|
||||||
|
// An ancestor of the target node is locked with infinite depth.
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
return true
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *memLS) create(name string) (ret *memLSNode) {
|
||||||
|
walkToRoot(name, func(name0 string, first bool) bool {
|
||||||
|
n := m.byName[name0]
|
||||||
|
if n == nil {
|
||||||
|
n = &memLSNode{
|
||||||
|
details: LockDetails{
|
||||||
|
Root: name0,
|
||||||
|
},
|
||||||
|
byExpiryIndex: -1,
|
||||||
|
}
|
||||||
|
m.byName[name0] = n
|
||||||
|
}
|
||||||
|
n.refCount++
|
||||||
|
if first {
|
||||||
|
ret = n
|
||||||
|
}
|
||||||
|
return true
|
||||||
|
})
|
||||||
|
return ret
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *memLS) remove(n *memLSNode) {
|
||||||
|
delete(m.byToken, n.token)
|
||||||
|
n.token = ""
|
||||||
|
walkToRoot(n.details.Root, func(name0 string, first bool) bool {
|
||||||
|
x := m.byName[name0]
|
||||||
|
x.refCount--
|
||||||
|
if x.refCount == 0 {
|
||||||
|
delete(m.byName, name0)
|
||||||
|
}
|
||||||
|
return true
|
||||||
|
})
|
||||||
|
if n.byExpiryIndex >= 0 {
|
||||||
|
heap.Remove(&m.byExpiry, n.byExpiryIndex)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func walkToRoot(name string, f func(name0 string, first bool) bool) bool {
|
||||||
|
for first := true; ; first = false {
|
||||||
|
if !f(name, first) {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
if name == "/" {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
name = name[:strings.LastIndex(name, "/")]
|
||||||
|
if name == "" {
|
||||||
|
name = "/"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
type memLSNode struct {
|
||||||
|
// details are the lock metadata. Even if this node's name is not explicitly locked,
|
||||||
|
// details.Root will still equal the node's name.
|
||||||
|
details LockDetails
|
||||||
|
// token is the unique identifier for this node's lock. An empty token means that
|
||||||
|
// this node is not explicitly locked.
|
||||||
|
token string
|
||||||
|
// refCount is the number of self-or-descendent nodes that are explicitly locked.
|
||||||
|
refCount int
|
||||||
|
// expiry is when this node's lock expires.
|
||||||
|
expiry time.Time
|
||||||
|
// byExpiryIndex is the index of this node in memLS.byExpiry. It is -1
|
||||||
|
// if this node does not expire, or has expired.
|
||||||
|
byExpiryIndex int
|
||||||
|
// held is whether this node's lock is actively held by a Confirm call.
|
||||||
|
held bool
|
||||||
|
}
|
||||||
|
|
||||||
|
type byExpiry []*memLSNode
|
||||||
|
|
||||||
|
func (b *byExpiry) Len() int {
|
||||||
|
return len(*b)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (b *byExpiry) Less(i, j int) bool {
|
||||||
|
return (*b)[i].expiry.Before((*b)[j].expiry)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (b *byExpiry) Swap(i, j int) {
|
||||||
|
(*b)[i], (*b)[j] = (*b)[j], (*b)[i]
|
||||||
|
(*b)[i].byExpiryIndex = i
|
||||||
|
(*b)[j].byExpiryIndex = j
|
||||||
|
}
|
||||||
|
|
||||||
|
func (b *byExpiry) Push(x interface{}) {
|
||||||
|
n := x.(*memLSNode)
|
||||||
|
n.byExpiryIndex = len(*b)
|
||||||
|
*b = append(*b, n)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (b *byExpiry) Pop() interface{} {
|
||||||
|
i := len(*b) - 1
|
||||||
|
n := (*b)[i]
|
||||||
|
(*b)[i] = nil
|
||||||
|
n.byExpiryIndex = -1
|
||||||
|
*b = (*b)[:i]
|
||||||
|
return n
|
||||||
|
}
|
||||||
|
|
||||||
|
const infiniteTimeout = -1
|
||||||
|
|
||||||
|
// parseTimeout parses the Timeout HTTP header, as per section 10.7. If s is
|
||||||
|
// empty, an infiniteTimeout is returned.
|
||||||
|
func parseTimeout(s string) (time.Duration, error) {
|
||||||
|
if s == "" {
|
||||||
|
return infiniteTimeout, nil
|
||||||
|
}
|
||||||
|
if i := strings.IndexByte(s, ','); i >= 0 {
|
||||||
|
s = s[:i]
|
||||||
|
}
|
||||||
|
s = strings.TrimSpace(s)
|
||||||
|
if s == "Infinite" {
|
||||||
|
return infiniteTimeout, nil
|
||||||
|
}
|
||||||
|
const pre = "Second-"
|
||||||
|
if !strings.HasPrefix(s, pre) {
|
||||||
|
return 0, errInvalidTimeout
|
||||||
|
}
|
||||||
|
s = s[len(pre):]
|
||||||
|
if s == "" || s[0] < '0' || '9' < s[0] {
|
||||||
|
return 0, errInvalidTimeout
|
||||||
|
}
|
||||||
|
n, err := strconv.ParseInt(s, 10, 64)
|
||||||
|
if err != nil || 1<<32-1 < n {
|
||||||
|
return 0, errInvalidTimeout
|
||||||
|
}
|
||||||
|
return time.Duration(n) * time.Second, nil
|
||||||
|
}
|
735
drives/davServer/lock_test.go
Normal file
735
drives/davServer/lock_test.go
Normal file
@ -0,0 +1,735 @@
|
|||||||
|
// Copyright 2014 The Go Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a BSD-style
|
||||||
|
// license that can be found in the LICENSE file.
|
||||||
|
|
||||||
|
package davServer
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"math/rand"
|
||||||
|
"path"
|
||||||
|
"reflect"
|
||||||
|
"sort"
|
||||||
|
"strconv"
|
||||||
|
"strings"
|
||||||
|
"testing"
|
||||||
|
"time"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestWalkToRoot(t *testing.T) {
|
||||||
|
testCases := []struct {
|
||||||
|
name string
|
||||||
|
want []string
|
||||||
|
}{{
|
||||||
|
"/a/b/c/d",
|
||||||
|
[]string{
|
||||||
|
"/a/b/c/d",
|
||||||
|
"/a/b/c",
|
||||||
|
"/a/b",
|
||||||
|
"/a",
|
||||||
|
"/",
|
||||||
|
},
|
||||||
|
}, {
|
||||||
|
"/a",
|
||||||
|
[]string{
|
||||||
|
"/a",
|
||||||
|
"/",
|
||||||
|
},
|
||||||
|
}, {
|
||||||
|
"/",
|
||||||
|
[]string{
|
||||||
|
"/",
|
||||||
|
},
|
||||||
|
}}
|
||||||
|
|
||||||
|
for _, tc := range testCases {
|
||||||
|
var got []string
|
||||||
|
if !walkToRoot(tc.name, func(name0 string, first bool) bool {
|
||||||
|
if first != (len(got) == 0) {
|
||||||
|
t.Errorf("name=%q: first=%t but len(got)==%d", tc.name, first, len(got))
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
got = append(got, name0)
|
||||||
|
return true
|
||||||
|
}) {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
if !reflect.DeepEqual(got, tc.want) {
|
||||||
|
t.Errorf("name=%q:\ngot %q\nwant %q", tc.name, got, tc.want)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
var lockTestDurations = []time.Duration{
|
||||||
|
infiniteTimeout, // infiniteTimeout means to never expire.
|
||||||
|
0, // A zero duration means to expire immediately.
|
||||||
|
100 * time.Hour, // A very large duration will not expire in these tests.
|
||||||
|
}
|
||||||
|
|
||||||
|
// lockTestNames are the names of a set of mutually compatible locks. For each
|
||||||
|
// name fragment:
|
||||||
|
// - _ means no explicit lock.
|
||||||
|
// - i means an infinite-depth lock,
|
||||||
|
// - z means a zero-depth lock,
|
||||||
|
var lockTestNames = []string{
|
||||||
|
"/_/_/_/_/z",
|
||||||
|
"/_/_/i",
|
||||||
|
"/_/z",
|
||||||
|
"/_/z/i",
|
||||||
|
"/_/z/z",
|
||||||
|
"/_/z/_/i",
|
||||||
|
"/_/z/_/z",
|
||||||
|
"/i",
|
||||||
|
"/z",
|
||||||
|
"/z/_/i",
|
||||||
|
"/z/_/z",
|
||||||
|
}
|
||||||
|
|
||||||
|
func lockTestZeroDepth(name string) bool {
|
||||||
|
switch name[len(name)-1] {
|
||||||
|
case 'i':
|
||||||
|
return false
|
||||||
|
case 'z':
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
panic(fmt.Sprintf("lock name %q did not end with 'i' or 'z'", name))
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestMemLSCanCreate(t *testing.T) {
|
||||||
|
now := time.Unix(0, 0)
|
||||||
|
m := NewMemLS().(*memLS)
|
||||||
|
|
||||||
|
for _, name := range lockTestNames {
|
||||||
|
_, err := m.Create(now, LockDetails{
|
||||||
|
Root: name,
|
||||||
|
Duration: infiniteTimeout,
|
||||||
|
ZeroDepth: lockTestZeroDepth(name),
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("creating lock for %q: %v", name, err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
wantCanCreate := func(name string, zeroDepth bool) bool {
|
||||||
|
for _, n := range lockTestNames {
|
||||||
|
switch {
|
||||||
|
case n == name:
|
||||||
|
// An existing lock has the same name as the proposed lock.
|
||||||
|
return false
|
||||||
|
case strings.HasPrefix(n, name):
|
||||||
|
// An existing lock would be a child of the proposed lock,
|
||||||
|
// which conflicts if the proposed lock has infinite depth.
|
||||||
|
if !zeroDepth {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
case strings.HasPrefix(name, n):
|
||||||
|
// An existing lock would be an ancestor of the proposed lock,
|
||||||
|
// which conflicts if the ancestor has infinite depth.
|
||||||
|
if n[len(n)-1] == 'i' {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
var check func(int, string)
|
||||||
|
check = func(recursion int, name string) {
|
||||||
|
for _, zeroDepth := range []bool{false, true} {
|
||||||
|
got := m.canCreate(name, zeroDepth)
|
||||||
|
want := wantCanCreate(name, zeroDepth)
|
||||||
|
if got != want {
|
||||||
|
t.Errorf("canCreate name=%q zeroDepth=%t: got %t, want %t", name, zeroDepth, got, want)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if recursion == 6 {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if name != "/" {
|
||||||
|
name += "/"
|
||||||
|
}
|
||||||
|
for _, c := range "_iz" {
|
||||||
|
check(recursion+1, name+string(c))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
check(0, "/")
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestMemLSLookup(t *testing.T) {
|
||||||
|
now := time.Unix(0, 0)
|
||||||
|
m := NewMemLS().(*memLS)
|
||||||
|
|
||||||
|
badToken := m.nextToken()
|
||||||
|
t.Logf("badToken=%q", badToken)
|
||||||
|
|
||||||
|
for _, name := range lockTestNames {
|
||||||
|
token, err := m.Create(now, LockDetails{
|
||||||
|
Root: name,
|
||||||
|
Duration: infiniteTimeout,
|
||||||
|
ZeroDepth: lockTestZeroDepth(name),
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("creating lock for %q: %v", name, err)
|
||||||
|
}
|
||||||
|
t.Logf("%-15q -> node=%p token=%q", name, m.byName[name], token)
|
||||||
|
}
|
||||||
|
|
||||||
|
baseNames := append([]string{"/a", "/b/c"}, lockTestNames...)
|
||||||
|
for _, baseName := range baseNames {
|
||||||
|
for _, suffix := range []string{"", "/0", "/1/2/3"} {
|
||||||
|
name := baseName + suffix
|
||||||
|
|
||||||
|
goodToken := ""
|
||||||
|
base := m.byName[baseName]
|
||||||
|
if base != nil && (suffix == "" || !lockTestZeroDepth(baseName)) {
|
||||||
|
goodToken = base.token
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, token := range []string{badToken, goodToken} {
|
||||||
|
if token == "" {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
got := m.lookup(name, Condition{Token: token})
|
||||||
|
want := base
|
||||||
|
if token == badToken {
|
||||||
|
want = nil
|
||||||
|
}
|
||||||
|
if got != want {
|
||||||
|
t.Errorf("name=%-20qtoken=%q (bad=%t): got %p, want %p",
|
||||||
|
name, token, token == badToken, got, want)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestMemLSConfirm(t *testing.T) {
|
||||||
|
now := time.Unix(0, 0)
|
||||||
|
m := NewMemLS().(*memLS)
|
||||||
|
alice, err := m.Create(now, LockDetails{
|
||||||
|
Root: "/alice",
|
||||||
|
Duration: infiniteTimeout,
|
||||||
|
ZeroDepth: false,
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("Create: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
tweedle, err := m.Create(now, LockDetails{
|
||||||
|
Root: "/tweedle",
|
||||||
|
Duration: infiniteTimeout,
|
||||||
|
ZeroDepth: false,
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("Create: %v", err)
|
||||||
|
}
|
||||||
|
if err := m.consistent(); err != nil {
|
||||||
|
t.Fatalf("Create: inconsistent state: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Test a mismatch between name and condition.
|
||||||
|
_, err = m.Confirm(now, "/tweedle/dee", "", Condition{Token: alice})
|
||||||
|
if err != ErrConfirmationFailed {
|
||||||
|
t.Fatalf("Confirm (mismatch): got %v, want ErrConfirmationFailed", err)
|
||||||
|
}
|
||||||
|
if err := m.consistent(); err != nil {
|
||||||
|
t.Fatalf("Confirm (mismatch): inconsistent state: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Test two names (that fall under the same lock) in the one Confirm call.
|
||||||
|
release, err := m.Confirm(now, "/tweedle/dee", "/tweedle/dum", Condition{Token: tweedle})
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("Confirm (twins): %v", err)
|
||||||
|
}
|
||||||
|
if err := m.consistent(); err != nil {
|
||||||
|
t.Fatalf("Confirm (twins): inconsistent state: %v", err)
|
||||||
|
}
|
||||||
|
release()
|
||||||
|
if err := m.consistent(); err != nil {
|
||||||
|
t.Fatalf("release (twins): inconsistent state: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Test the same two names in overlapping Confirm / release calls.
|
||||||
|
releaseDee, err := m.Confirm(now, "/tweedle/dee", "", Condition{Token: tweedle})
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("Confirm (sequence #0): %v", err)
|
||||||
|
}
|
||||||
|
if err := m.consistent(); err != nil {
|
||||||
|
t.Fatalf("Confirm (sequence #0): inconsistent state: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
_, err = m.Confirm(now, "/tweedle/dum", "", Condition{Token: tweedle})
|
||||||
|
if err != ErrConfirmationFailed {
|
||||||
|
t.Fatalf("Confirm (sequence #1): got %v, want ErrConfirmationFailed", err)
|
||||||
|
}
|
||||||
|
if err := m.consistent(); err != nil {
|
||||||
|
t.Fatalf("Confirm (sequence #1): inconsistent state: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
releaseDee()
|
||||||
|
if err := m.consistent(); err != nil {
|
||||||
|
t.Fatalf("release (sequence #2): inconsistent state: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
releaseDum, err := m.Confirm(now, "/tweedle/dum", "", Condition{Token: tweedle})
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("Confirm (sequence #3): %v", err)
|
||||||
|
}
|
||||||
|
if err := m.consistent(); err != nil {
|
||||||
|
t.Fatalf("Confirm (sequence #3): inconsistent state: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Test that you can't unlock a held lock.
|
||||||
|
err = m.Unlock(now, tweedle)
|
||||||
|
if err != ErrLocked {
|
||||||
|
t.Fatalf("Unlock (sequence #4): got %v, want ErrLocked", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
releaseDum()
|
||||||
|
if err := m.consistent(); err != nil {
|
||||||
|
t.Fatalf("release (sequence #5): inconsistent state: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
err = m.Unlock(now, tweedle)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("Unlock (sequence #6): %v", err)
|
||||||
|
}
|
||||||
|
if err := m.consistent(); err != nil {
|
||||||
|
t.Fatalf("Unlock (sequence #6): inconsistent state: %v", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestMemLSNonCanonicalRoot(t *testing.T) {
|
||||||
|
now := time.Unix(0, 0)
|
||||||
|
m := NewMemLS().(*memLS)
|
||||||
|
token, err := m.Create(now, LockDetails{
|
||||||
|
Root: "/foo/./bar//",
|
||||||
|
Duration: 1 * time.Second,
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("Create: %v", err)
|
||||||
|
}
|
||||||
|
if err := m.consistent(); err != nil {
|
||||||
|
t.Fatalf("Create: inconsistent state: %v", err)
|
||||||
|
}
|
||||||
|
if err := m.Unlock(now, token); err != nil {
|
||||||
|
t.Fatalf("Unlock: %v", err)
|
||||||
|
}
|
||||||
|
if err := m.consistent(); err != nil {
|
||||||
|
t.Fatalf("Unlock: inconsistent state: %v", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestMemLSExpiry(t *testing.T) {
|
||||||
|
m := NewMemLS().(*memLS)
|
||||||
|
testCases := []string{
|
||||||
|
"setNow 0",
|
||||||
|
"create /a.5",
|
||||||
|
"want /a.5",
|
||||||
|
"create /c.6",
|
||||||
|
"want /a.5 /c.6",
|
||||||
|
"create /a/b.7",
|
||||||
|
"want /a.5 /a/b.7 /c.6",
|
||||||
|
"setNow 4",
|
||||||
|
"want /a.5 /a/b.7 /c.6",
|
||||||
|
"setNow 5",
|
||||||
|
"want /a/b.7 /c.6",
|
||||||
|
"setNow 6",
|
||||||
|
"want /a/b.7",
|
||||||
|
"setNow 7",
|
||||||
|
"want ",
|
||||||
|
"setNow 8",
|
||||||
|
"want ",
|
||||||
|
"create /a.12",
|
||||||
|
"create /b.13",
|
||||||
|
"create /c.15",
|
||||||
|
"create /a/d.16",
|
||||||
|
"want /a.12 /a/d.16 /b.13 /c.15",
|
||||||
|
"refresh /a.14",
|
||||||
|
"want /a.14 /a/d.16 /b.13 /c.15",
|
||||||
|
"setNow 12",
|
||||||
|
"want /a.14 /a/d.16 /b.13 /c.15",
|
||||||
|
"setNow 13",
|
||||||
|
"want /a.14 /a/d.16 /c.15",
|
||||||
|
"setNow 14",
|
||||||
|
"want /a/d.16 /c.15",
|
||||||
|
"refresh /a/d.20",
|
||||||
|
"refresh /c.20",
|
||||||
|
"want /a/d.20 /c.20",
|
||||||
|
"setNow 20",
|
||||||
|
"want ",
|
||||||
|
}
|
||||||
|
|
||||||
|
tokens := map[string]string{}
|
||||||
|
zTime := time.Unix(0, 0)
|
||||||
|
now := zTime
|
||||||
|
for i, tc := range testCases {
|
||||||
|
j := strings.IndexByte(tc, ' ')
|
||||||
|
if j < 0 {
|
||||||
|
t.Fatalf("test case #%d %q: invalid command", i, tc)
|
||||||
|
}
|
||||||
|
op, arg := tc[:j], tc[j+1:]
|
||||||
|
switch op {
|
||||||
|
default:
|
||||||
|
t.Fatalf("test case #%d %q: invalid operation %q", i, tc, op)
|
||||||
|
|
||||||
|
case "create", "refresh":
|
||||||
|
parts := strings.Split(arg, ".")
|
||||||
|
if len(parts) != 2 {
|
||||||
|
t.Fatalf("test case #%d %q: invalid create", i, tc)
|
||||||
|
}
|
||||||
|
root := parts[0]
|
||||||
|
d, err := strconv.Atoi(parts[1])
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("test case #%d %q: invalid duration", i, tc)
|
||||||
|
}
|
||||||
|
dur := time.Unix(0, 0).Add(time.Duration(d) * time.Second).Sub(now)
|
||||||
|
|
||||||
|
switch op {
|
||||||
|
case "create":
|
||||||
|
token, err := m.Create(now, LockDetails{
|
||||||
|
Root: root,
|
||||||
|
Duration: dur,
|
||||||
|
ZeroDepth: true,
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("test case #%d %q: Create: %v", i, tc, err)
|
||||||
|
}
|
||||||
|
tokens[root] = token
|
||||||
|
|
||||||
|
case "refresh":
|
||||||
|
token := tokens[root]
|
||||||
|
if token == "" {
|
||||||
|
t.Fatalf("test case #%d %q: no token for %q", i, tc, root)
|
||||||
|
}
|
||||||
|
got, err := m.Refresh(now, token, dur)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("test case #%d %q: Refresh: %v", i, tc, err)
|
||||||
|
}
|
||||||
|
want := LockDetails{
|
||||||
|
Root: root,
|
||||||
|
Duration: dur,
|
||||||
|
ZeroDepth: true,
|
||||||
|
}
|
||||||
|
if got != want {
|
||||||
|
t.Fatalf("test case #%d %q:\ngot %v\nwant %v", i, tc, got, want)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
case "setNow":
|
||||||
|
d, err := strconv.Atoi(arg)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("test case #%d %q: invalid duration", i, tc)
|
||||||
|
}
|
||||||
|
now = time.Unix(0, 0).Add(time.Duration(d) * time.Second)
|
||||||
|
|
||||||
|
case "want":
|
||||||
|
m.mu.Lock()
|
||||||
|
m.collectExpiredNodes(now)
|
||||||
|
got := make([]string, 0, len(m.byToken))
|
||||||
|
for _, n := range m.byToken {
|
||||||
|
got = append(got, fmt.Sprintf("%s.%d",
|
||||||
|
n.details.Root, n.expiry.Sub(zTime)/time.Second))
|
||||||
|
}
|
||||||
|
m.mu.Unlock()
|
||||||
|
sort.Strings(got)
|
||||||
|
want := []string{}
|
||||||
|
if arg != "" {
|
||||||
|
want = strings.Split(arg, " ")
|
||||||
|
}
|
||||||
|
if !reflect.DeepEqual(got, want) {
|
||||||
|
t.Fatalf("test case #%d %q:\ngot %q\nwant %q", i, tc, got, want)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := m.consistent(); err != nil {
|
||||||
|
t.Fatalf("test case #%d %q: inconsistent state: %v", i, tc, err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestMemLS(t *testing.T) {
|
||||||
|
now := time.Unix(0, 0)
|
||||||
|
m := NewMemLS().(*memLS)
|
||||||
|
rng := rand.New(rand.NewSource(0))
|
||||||
|
tokens := map[string]string{}
|
||||||
|
nConfirm, nCreate, nRefresh, nUnlock := 0, 0, 0, 0
|
||||||
|
const N = 2000
|
||||||
|
|
||||||
|
for i := 0; i < N; i++ {
|
||||||
|
name := lockTestNames[rng.Intn(len(lockTestNames))]
|
||||||
|
duration := lockTestDurations[rng.Intn(len(lockTestDurations))]
|
||||||
|
confirmed, unlocked := false, false
|
||||||
|
|
||||||
|
// If the name was already locked, we randomly confirm/release, refresh
|
||||||
|
// or unlock it. Otherwise, we create a lock.
|
||||||
|
token := tokens[name]
|
||||||
|
if token != "" {
|
||||||
|
switch rng.Intn(3) {
|
||||||
|
case 0:
|
||||||
|
confirmed = true
|
||||||
|
nConfirm++
|
||||||
|
release, err := m.Confirm(now, name, "", Condition{Token: token})
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("iteration #%d: Confirm %q: %v", i, name, err)
|
||||||
|
}
|
||||||
|
if err := m.consistent(); err != nil {
|
||||||
|
t.Fatalf("iteration #%d: inconsistent state: %v", i, err)
|
||||||
|
}
|
||||||
|
release()
|
||||||
|
|
||||||
|
case 1:
|
||||||
|
nRefresh++
|
||||||
|
if _, err := m.Refresh(now, token, duration); err != nil {
|
||||||
|
t.Fatalf("iteration #%d: Refresh %q: %v", i, name, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
case 2:
|
||||||
|
unlocked = true
|
||||||
|
nUnlock++
|
||||||
|
if err := m.Unlock(now, token); err != nil {
|
||||||
|
t.Fatalf("iteration #%d: Unlock %q: %v", i, name, err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
} else {
|
||||||
|
nCreate++
|
||||||
|
var err error
|
||||||
|
token, err = m.Create(now, LockDetails{
|
||||||
|
Root: name,
|
||||||
|
Duration: duration,
|
||||||
|
ZeroDepth: lockTestZeroDepth(name),
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("iteration #%d: Create %q: %v", i, name, err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if !confirmed {
|
||||||
|
if duration == 0 || unlocked {
|
||||||
|
// A zero-duration lock should expire immediately and is
|
||||||
|
// effectively equivalent to being unlocked.
|
||||||
|
tokens[name] = ""
|
||||||
|
} else {
|
||||||
|
tokens[name] = token
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := m.consistent(); err != nil {
|
||||||
|
t.Fatalf("iteration #%d: inconsistent state: %v", i, err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if nConfirm < N/10 {
|
||||||
|
t.Fatalf("too few Confirm calls: got %d, want >= %d", nConfirm, N/10)
|
||||||
|
}
|
||||||
|
if nCreate < N/10 {
|
||||||
|
t.Fatalf("too few Create calls: got %d, want >= %d", nCreate, N/10)
|
||||||
|
}
|
||||||
|
if nRefresh < N/10 {
|
||||||
|
t.Fatalf("too few Refresh calls: got %d, want >= %d", nRefresh, N/10)
|
||||||
|
}
|
||||||
|
if nUnlock < N/10 {
|
||||||
|
t.Fatalf("too few Unlock calls: got %d, want >= %d", nUnlock, N/10)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *memLS) consistent() error {
|
||||||
|
m.mu.Lock()
|
||||||
|
defer m.mu.Unlock()
|
||||||
|
|
||||||
|
// If m.byName is non-empty, then it must contain an entry for the root "/",
|
||||||
|
// and its refCount should equal the number of locked nodes.
|
||||||
|
if len(m.byName) > 0 {
|
||||||
|
n := m.byName["/"]
|
||||||
|
if n == nil {
|
||||||
|
return fmt.Errorf(`non-empty m.byName does not contain the root "/"`)
|
||||||
|
}
|
||||||
|
if n.refCount != len(m.byToken) {
|
||||||
|
return fmt.Errorf("root node refCount=%d, differs from len(m.byToken)=%d", n.refCount, len(m.byToken))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
for name, n := range m.byName {
|
||||||
|
// The map keys should be consistent with the node's copy of the key.
|
||||||
|
if n.details.Root != name {
|
||||||
|
return fmt.Errorf("node name %q != byName map key %q", n.details.Root, name)
|
||||||
|
}
|
||||||
|
|
||||||
|
// A name must be clean, and start with a "/".
|
||||||
|
if len(name) == 0 || name[0] != '/' {
|
||||||
|
return fmt.Errorf(`node name %q does not start with "/"`, name)
|
||||||
|
}
|
||||||
|
if name != path.Clean(name) {
|
||||||
|
return fmt.Errorf(`node name %q is not clean`, name)
|
||||||
|
}
|
||||||
|
|
||||||
|
// A node's refCount should be positive.
|
||||||
|
if n.refCount <= 0 {
|
||||||
|
return fmt.Errorf("non-positive refCount for node at name %q", name)
|
||||||
|
}
|
||||||
|
|
||||||
|
// A node's refCount should be the number of self-or-descendents that
|
||||||
|
// are locked (i.e. have a non-empty token).
|
||||||
|
var list []string
|
||||||
|
for name0, n0 := range m.byName {
|
||||||
|
// All of lockTestNames' name fragments are one byte long: '_', 'i' or 'z',
|
||||||
|
// so strings.HasPrefix is equivalent to self-or-descendent name match.
|
||||||
|
// We don't have to worry about "/foo/bar" being a false positive match
|
||||||
|
// for "/foo/b".
|
||||||
|
if strings.HasPrefix(name0, name) && n0.token != "" {
|
||||||
|
list = append(list, name0)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if n.refCount != len(list) {
|
||||||
|
sort.Strings(list)
|
||||||
|
return fmt.Errorf("node at name %q has refCount %d but locked self-or-descendents are %q (len=%d)",
|
||||||
|
name, n.refCount, list, len(list))
|
||||||
|
}
|
||||||
|
|
||||||
|
// A node n is in m.byToken if it has a non-empty token.
|
||||||
|
if n.token != "" {
|
||||||
|
if _, ok := m.byToken[n.token]; !ok {
|
||||||
|
return fmt.Errorf("node at name %q has token %q but not in m.byToken", name, n.token)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// A node n is in m.byExpiry if it has a non-negative byExpiryIndex.
|
||||||
|
if n.byExpiryIndex >= 0 {
|
||||||
|
if n.byExpiryIndex >= len(m.byExpiry) {
|
||||||
|
return fmt.Errorf("node at name %q has byExpiryIndex %d but m.byExpiry has length %d", name, n.byExpiryIndex, len(m.byExpiry))
|
||||||
|
}
|
||||||
|
if n != m.byExpiry[n.byExpiryIndex] {
|
||||||
|
return fmt.Errorf("node at name %q has byExpiryIndex %d but that indexes a different node", name, n.byExpiryIndex)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
for token, n := range m.byToken {
|
||||||
|
// The map keys should be consistent with the node's copy of the key.
|
||||||
|
if n.token != token {
|
||||||
|
return fmt.Errorf("node token %q != byToken map key %q", n.token, token)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Every node in m.byToken is in m.byName.
|
||||||
|
if _, ok := m.byName[n.details.Root]; !ok {
|
||||||
|
return fmt.Errorf("node at name %q in m.byToken but not in m.byName", n.details.Root)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
for i, n := range m.byExpiry {
|
||||||
|
// The slice indices should be consistent with the node's copy of the index.
|
||||||
|
if n.byExpiryIndex != i {
|
||||||
|
return fmt.Errorf("node byExpiryIndex %d != byExpiry slice index %d", n.byExpiryIndex, i)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Every node in m.byExpiry is in m.byName.
|
||||||
|
if _, ok := m.byName[n.details.Root]; !ok {
|
||||||
|
return fmt.Errorf("node at name %q in m.byExpiry but not in m.byName", n.details.Root)
|
||||||
|
}
|
||||||
|
|
||||||
|
// No node in m.byExpiry should be held.
|
||||||
|
if n.held {
|
||||||
|
return fmt.Errorf("node at name %q in m.byExpiry is held", n.details.Root)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestParseTimeout(t *testing.T) {
|
||||||
|
testCases := []struct {
|
||||||
|
s string
|
||||||
|
want time.Duration
|
||||||
|
wantErr error
|
||||||
|
}{{
|
||||||
|
"",
|
||||||
|
infiniteTimeout,
|
||||||
|
nil,
|
||||||
|
}, {
|
||||||
|
"Infinite",
|
||||||
|
infiniteTimeout,
|
||||||
|
nil,
|
||||||
|
}, {
|
||||||
|
"Infinitesimal",
|
||||||
|
0,
|
||||||
|
errInvalidTimeout,
|
||||||
|
}, {
|
||||||
|
"infinite",
|
||||||
|
0,
|
||||||
|
errInvalidTimeout,
|
||||||
|
}, {
|
||||||
|
"Second-0",
|
||||||
|
0 * time.Second,
|
||||||
|
nil,
|
||||||
|
}, {
|
||||||
|
"Second-123",
|
||||||
|
123 * time.Second,
|
||||||
|
nil,
|
||||||
|
}, {
|
||||||
|
" Second-456 ",
|
||||||
|
456 * time.Second,
|
||||||
|
nil,
|
||||||
|
}, {
|
||||||
|
"Second-4100000000",
|
||||||
|
4100000000 * time.Second,
|
||||||
|
nil,
|
||||||
|
}, {
|
||||||
|
"junk",
|
||||||
|
0,
|
||||||
|
errInvalidTimeout,
|
||||||
|
}, {
|
||||||
|
"Second-",
|
||||||
|
0,
|
||||||
|
errInvalidTimeout,
|
||||||
|
}, {
|
||||||
|
"Second--1",
|
||||||
|
0,
|
||||||
|
errInvalidTimeout,
|
||||||
|
}, {
|
||||||
|
"Second--123",
|
||||||
|
0,
|
||||||
|
errInvalidTimeout,
|
||||||
|
}, {
|
||||||
|
"Second-+123",
|
||||||
|
0,
|
||||||
|
errInvalidTimeout,
|
||||||
|
}, {
|
||||||
|
"Second-0x123",
|
||||||
|
0,
|
||||||
|
errInvalidTimeout,
|
||||||
|
}, {
|
||||||
|
"second-123",
|
||||||
|
0,
|
||||||
|
errInvalidTimeout,
|
||||||
|
}, {
|
||||||
|
"Second-4294967295",
|
||||||
|
4294967295 * time.Second,
|
||||||
|
nil,
|
||||||
|
}, {
|
||||||
|
// Section 10.7 says that "The timeout value for TimeType "Second"
|
||||||
|
// must not be greater than 2^32-1."
|
||||||
|
"Second-4294967296",
|
||||||
|
0,
|
||||||
|
errInvalidTimeout,
|
||||||
|
}, {
|
||||||
|
// This test case comes from section 9.10.9 of the spec. It says,
|
||||||
|
//
|
||||||
|
// "In this request, the client has specified that it desires an
|
||||||
|
// infinite-length lock, if available, otherwise a timeout of 4.1
|
||||||
|
// billion seconds, if available."
|
||||||
|
//
|
||||||
|
// The Go WebDAV package always supports infinite length locks,
|
||||||
|
// and ignores the fallback after the comma.
|
||||||
|
"Infinite, Second-4100000000",
|
||||||
|
infiniteTimeout,
|
||||||
|
nil,
|
||||||
|
}}
|
||||||
|
|
||||||
|
for _, tc := range testCases {
|
||||||
|
got, gotErr := parseTimeout(tc.s)
|
||||||
|
if got != tc.want || gotErr != tc.wantErr {
|
||||||
|
t.Errorf("parsing %q:\ngot %v, %v\nwant %v, %v", tc.s, got, gotErr, tc.want, tc.wantErr)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
469
drives/davServer/prop.go
Normal file
469
drives/davServer/prop.go
Normal file
@ -0,0 +1,469 @@
|
|||||||
|
// Copyright 2015 The Go Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a BSD-style
|
||||||
|
// license that can be found in the LICENSE file.
|
||||||
|
|
||||||
|
package davServer
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"context"
|
||||||
|
"encoding/xml"
|
||||||
|
"errors"
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
"mime"
|
||||||
|
"net/http"
|
||||||
|
"os"
|
||||||
|
"path/filepath"
|
||||||
|
"strconv"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Proppatch describes a property update instruction as defined in RFC 4918.
|
||||||
|
// See http://www.webdav.org/specs/rfc4918.html#METHOD_PROPPATCH
|
||||||
|
type Proppatch struct {
|
||||||
|
// Remove specifies whether this patch removes properties. If it does not
|
||||||
|
// remove them, it sets them.
|
||||||
|
Remove bool
|
||||||
|
// Props contains the properties to be set or removed.
|
||||||
|
Props []Property
|
||||||
|
}
|
||||||
|
|
||||||
|
// Propstat describes a XML propstat element as defined in RFC 4918.
|
||||||
|
// See http://www.webdav.org/specs/rfc4918.html#ELEMENT_propstat
|
||||||
|
type Propstat struct {
|
||||||
|
// Props contains the properties for which Status applies.
|
||||||
|
Props []Property
|
||||||
|
|
||||||
|
// Status defines the HTTP status code of the properties in Prop.
|
||||||
|
// Allowed values include, but are not limited to the WebDAV status
|
||||||
|
// code extensions for HTTP/1.1.
|
||||||
|
// http://www.webdav.org/specs/rfc4918.html#status.code.extensions.to.http11
|
||||||
|
Status int
|
||||||
|
|
||||||
|
// XMLError contains the XML representation of the optional error element.
|
||||||
|
// XML content within this field must not rely on any predefined
|
||||||
|
// namespace declarations or prefixes. If empty, the XML error element
|
||||||
|
// is omitted.
|
||||||
|
XMLError string
|
||||||
|
|
||||||
|
// ResponseDescription contains the contents of the optional
|
||||||
|
// responsedescription field. If empty, the XML element is omitted.
|
||||||
|
ResponseDescription string
|
||||||
|
}
|
||||||
|
|
||||||
|
// makePropstats returns a slice containing those of x and y whose Props slice
|
||||||
|
// is non-empty. If both are empty, it returns a slice containing an otherwise
|
||||||
|
// zero Propstat whose HTTP status code is 200 OK.
|
||||||
|
func makePropstats(x, y Propstat) []Propstat {
|
||||||
|
pstats := make([]Propstat, 0, 2)
|
||||||
|
if len(x.Props) != 0 {
|
||||||
|
pstats = append(pstats, x)
|
||||||
|
}
|
||||||
|
if len(y.Props) != 0 {
|
||||||
|
pstats = append(pstats, y)
|
||||||
|
}
|
||||||
|
if len(pstats) == 0 {
|
||||||
|
pstats = append(pstats, Propstat{
|
||||||
|
Status: http.StatusOK,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
return pstats
|
||||||
|
}
|
||||||
|
|
||||||
|
// DeadPropsHolder holds the dead properties of a resource.
|
||||||
|
//
|
||||||
|
// Dead properties are those properties that are explicitly defined. In
|
||||||
|
// comparison, live properties, such as DAV:getcontentlength, are implicitly
|
||||||
|
// defined by the underlying resource, and cannot be explicitly overridden or
|
||||||
|
// removed. See the Terminology section of
|
||||||
|
// http://www.webdav.org/specs/rfc4918.html#rfc.section.3
|
||||||
|
//
|
||||||
|
// There is a whitelist of the names of live properties. This package handles
|
||||||
|
// all live properties, and will only pass non-whitelisted names to the Patch
|
||||||
|
// method of DeadPropsHolder implementations.
|
||||||
|
type DeadPropsHolder interface {
|
||||||
|
// DeadProps returns a copy of the dead properties held.
|
||||||
|
DeadProps() (map[xml.Name]Property, error)
|
||||||
|
|
||||||
|
// Patch patches the dead properties held.
|
||||||
|
//
|
||||||
|
// Patching is atomic; either all or no patches succeed. It returns (nil,
|
||||||
|
// non-nil) if an internal server error occurred, otherwise the Propstats
|
||||||
|
// collectively contain one Property for each proposed patch Property. If
|
||||||
|
// all patches succeed, Patch returns a slice of length one and a Propstat
|
||||||
|
// element with a 200 OK HTTP status code. If none succeed, for reasons
|
||||||
|
// other than an internal server error, no Propstat has status 200 OK.
|
||||||
|
//
|
||||||
|
// For more details on when various HTTP status codes apply, see
|
||||||
|
// http://www.webdav.org/specs/rfc4918.html#PROPPATCH-status
|
||||||
|
Patch([]Proppatch) ([]Propstat, error)
|
||||||
|
}
|
||||||
|
|
||||||
|
// liveProps contains all supported, protected DAV: properties.
|
||||||
|
var liveProps = map[xml.Name]struct {
|
||||||
|
// findFn implements the propfind function of this property. If nil,
|
||||||
|
// it indicates a hidden property.
|
||||||
|
findFn func(context.Context, FileSystem, LockSystem, string, os.FileInfo) (string, error)
|
||||||
|
// dir is true if the property applies to directories.
|
||||||
|
dir bool
|
||||||
|
}{
|
||||||
|
{Space: "DAV:", Local: "resourcetype"}: {
|
||||||
|
findFn: findResourceType,
|
||||||
|
dir: true,
|
||||||
|
},
|
||||||
|
{Space: "DAV:", Local: "displayname"}: {
|
||||||
|
findFn: findDisplayName,
|
||||||
|
dir: true,
|
||||||
|
},
|
||||||
|
{Space: "DAV:", Local: "getcontentlength"}: {
|
||||||
|
findFn: findContentLength,
|
||||||
|
dir: false,
|
||||||
|
},
|
||||||
|
{Space: "DAV:", Local: "getlastmodified"}: {
|
||||||
|
findFn: findLastModified,
|
||||||
|
// http://webdav.org/specs/rfc4918.html#PROPERTY_getlastmodified
|
||||||
|
// suggests that getlastmodified should only apply to GETable
|
||||||
|
// resources, and this package does not support GET on directories.
|
||||||
|
//
|
||||||
|
// Nonetheless, some WebDAV clients expect child directories to be
|
||||||
|
// sortable by getlastmodified date, so this value is true, not false.
|
||||||
|
// See golang.org/issue/15334.
|
||||||
|
dir: true,
|
||||||
|
},
|
||||||
|
{Space: "DAV:", Local: "creationdate"}: {
|
||||||
|
findFn: nil,
|
||||||
|
dir: false,
|
||||||
|
},
|
||||||
|
{Space: "DAV:", Local: "getcontentlanguage"}: {
|
||||||
|
findFn: nil,
|
||||||
|
dir: false,
|
||||||
|
},
|
||||||
|
{Space: "DAV:", Local: "getcontenttype"}: {
|
||||||
|
findFn: findContentType,
|
||||||
|
dir: false,
|
||||||
|
},
|
||||||
|
{Space: "DAV:", Local: "getetag"}: {
|
||||||
|
findFn: findETag,
|
||||||
|
// findETag implements ETag as the concatenated hex values of a file's
|
||||||
|
// modification time and size. This is not a reliable synchronization
|
||||||
|
// mechanism for directories, so we do not advertise getetag for DAV
|
||||||
|
// collections.
|
||||||
|
dir: false,
|
||||||
|
},
|
||||||
|
|
||||||
|
// TODO: The lockdiscovery property requires LockSystem to list the
|
||||||
|
// active locks on a resource.
|
||||||
|
{Space: "DAV:", Local: "lockdiscovery"}: {},
|
||||||
|
{Space: "DAV:", Local: "supportedlock"}: {
|
||||||
|
findFn: findSupportedLock,
|
||||||
|
dir: true,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
// TODO(nigeltao) merge props and allprop?
|
||||||
|
|
||||||
|
// props returns the status of the properties named pnames for resource name.
|
||||||
|
//
|
||||||
|
// Each Propstat has a unique status and each property name will only be part
|
||||||
|
// of one Propstat element.
|
||||||
|
func props(ctx context.Context, fs FileSystem, ls LockSystem, name string, pnames []xml.Name) ([]Propstat, error) {
|
||||||
|
f, err := fs.OpenFile(ctx, name, os.O_RDONLY, 0)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
defer f.Close()
|
||||||
|
fi, err := f.Stat()
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
isDir := fi.IsDir()
|
||||||
|
|
||||||
|
var deadProps map[xml.Name]Property
|
||||||
|
if dph, ok := f.(DeadPropsHolder); ok {
|
||||||
|
deadProps, err = dph.DeadProps()
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pstatOK := Propstat{Status: http.StatusOK}
|
||||||
|
pstatNotFound := Propstat{Status: http.StatusNotFound}
|
||||||
|
for _, pn := range pnames {
|
||||||
|
// If this file has dead properties, check if they contain pn.
|
||||||
|
if dp, ok := deadProps[pn]; ok {
|
||||||
|
pstatOK.Props = append(pstatOK.Props, dp)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
// Otherwise, it must either be a live property or we don't know it.
|
||||||
|
if prop := liveProps[pn]; prop.findFn != nil && (prop.dir || !isDir) {
|
||||||
|
innerXML, err := prop.findFn(ctx, fs, ls, name, fi)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
pstatOK.Props = append(pstatOK.Props, Property{
|
||||||
|
XMLName: pn,
|
||||||
|
InnerXML: []byte(innerXML),
|
||||||
|
})
|
||||||
|
} else {
|
||||||
|
pstatNotFound.Props = append(pstatNotFound.Props, Property{
|
||||||
|
XMLName: pn,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return makePropstats(pstatOK, pstatNotFound), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// propnames returns the property names defined for resource name.
|
||||||
|
func propnames(ctx context.Context, fs FileSystem, ls LockSystem, name string) ([]xml.Name, error) {
|
||||||
|
f, err := fs.OpenFile(ctx, name, os.O_RDONLY, 0)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
defer f.Close()
|
||||||
|
fi, err := f.Stat()
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
isDir := fi.IsDir()
|
||||||
|
|
||||||
|
var deadProps map[xml.Name]Property
|
||||||
|
if dph, ok := f.(DeadPropsHolder); ok {
|
||||||
|
deadProps, err = dph.DeadProps()
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pnames := make([]xml.Name, 0, len(liveProps)+len(deadProps))
|
||||||
|
for pn, prop := range liveProps {
|
||||||
|
if prop.findFn != nil && (prop.dir || !isDir) {
|
||||||
|
pnames = append(pnames, pn)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
for pn := range deadProps {
|
||||||
|
pnames = append(pnames, pn)
|
||||||
|
}
|
||||||
|
return pnames, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// allprop returns the properties defined for resource name and the properties
|
||||||
|
// named in include.
|
||||||
|
//
|
||||||
|
// Note that RFC 4918 defines 'allprop' to return the DAV: properties defined
|
||||||
|
// within the RFC plus dead properties. Other live properties should only be
|
||||||
|
// returned if they are named in 'include'.
|
||||||
|
//
|
||||||
|
// See http://www.webdav.org/specs/rfc4918.html#METHOD_PROPFIND
|
||||||
|
func allprop(ctx context.Context, fs FileSystem, ls LockSystem, name string, include []xml.Name) ([]Propstat, error) {
|
||||||
|
pnames, err := propnames(ctx, fs, ls, name)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
// Add names from include if they are not already covered in pnames.
|
||||||
|
nameset := make(map[xml.Name]bool)
|
||||||
|
for _, pn := range pnames {
|
||||||
|
nameset[pn] = true
|
||||||
|
}
|
||||||
|
for _, pn := range include {
|
||||||
|
if !nameset[pn] {
|
||||||
|
pnames = append(pnames, pn)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return props(ctx, fs, ls, name, pnames)
|
||||||
|
}
|
||||||
|
|
||||||
|
// patch patches the properties of resource name. The return values are
|
||||||
|
// constrained in the same manner as DeadPropsHolder.Patch.
|
||||||
|
func patch(ctx context.Context, fs FileSystem, ls LockSystem, name string, patches []Proppatch) ([]Propstat, error) {
|
||||||
|
conflict := false
|
||||||
|
loop:
|
||||||
|
for _, patch := range patches {
|
||||||
|
for _, p := range patch.Props {
|
||||||
|
if _, ok := liveProps[p.XMLName]; ok {
|
||||||
|
conflict = true
|
||||||
|
break loop
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if conflict {
|
||||||
|
pstatForbidden := Propstat{
|
||||||
|
Status: http.StatusForbidden,
|
||||||
|
XMLError: `<D:cannot-modify-protected-property xmlns:D="DAV:"/>`,
|
||||||
|
}
|
||||||
|
pstatFailedDep := Propstat{
|
||||||
|
Status: StatusFailedDependency,
|
||||||
|
}
|
||||||
|
for _, patch := range patches {
|
||||||
|
for _, p := range patch.Props {
|
||||||
|
if _, ok := liveProps[p.XMLName]; ok {
|
||||||
|
pstatForbidden.Props = append(pstatForbidden.Props, Property{XMLName: p.XMLName})
|
||||||
|
} else {
|
||||||
|
pstatFailedDep.Props = append(pstatFailedDep.Props, Property{XMLName: p.XMLName})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return makePropstats(pstatForbidden, pstatFailedDep), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
f, err := fs.OpenFile(ctx, name, os.O_RDWR, 0)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
defer f.Close()
|
||||||
|
if dph, ok := f.(DeadPropsHolder); ok {
|
||||||
|
ret, err := dph.Patch(patches)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
// http://www.webdav.org/specs/rfc4918.html#ELEMENT_propstat says that
|
||||||
|
// "The contents of the prop XML element must only list the names of
|
||||||
|
// properties to which the result in the status element applies."
|
||||||
|
for _, pstat := range ret {
|
||||||
|
for i, p := range pstat.Props {
|
||||||
|
pstat.Props[i] = Property{XMLName: p.XMLName}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return ret, nil
|
||||||
|
}
|
||||||
|
// The file doesn't implement the optional DeadPropsHolder interface, so
|
||||||
|
// all patches are forbidden.
|
||||||
|
pstat := Propstat{Status: http.StatusForbidden}
|
||||||
|
for _, patch := range patches {
|
||||||
|
for _, p := range patch.Props {
|
||||||
|
pstat.Props = append(pstat.Props, Property{XMLName: p.XMLName})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return []Propstat{pstat}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func escapeXML(s string) string {
|
||||||
|
for i := 0; i < len(s); i++ {
|
||||||
|
// As an optimization, if s contains only ASCII letters, digits or a
|
||||||
|
// few special characters, the escaped value is s itself and we don't
|
||||||
|
// need to allocate a buffer and convert between string and []byte.
|
||||||
|
switch c := s[i]; {
|
||||||
|
case c == ' ' || c == '_' ||
|
||||||
|
('+' <= c && c <= '9') || // Digits as well as + , - . and /
|
||||||
|
('A' <= c && c <= 'Z') ||
|
||||||
|
('a' <= c && c <= 'z'):
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
// Otherwise, go through the full escaping process.
|
||||||
|
var buf bytes.Buffer
|
||||||
|
xml.EscapeText(&buf, []byte(s))
|
||||||
|
return buf.String()
|
||||||
|
}
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
func findResourceType(ctx context.Context, fs FileSystem, ls LockSystem, name string, fi os.FileInfo) (string, error) {
|
||||||
|
if fi.IsDir() {
|
||||||
|
return `<D:collection xmlns:D="DAV:"/>`, nil
|
||||||
|
}
|
||||||
|
return "", nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func findDisplayName(ctx context.Context, fs FileSystem, ls LockSystem, name string, fi os.FileInfo) (string, error) {
|
||||||
|
if slashClean(name) == "/" {
|
||||||
|
// Hide the real name of a possibly prefixed root directory.
|
||||||
|
return "", nil
|
||||||
|
}
|
||||||
|
return escapeXML(fi.Name()), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func findContentLength(ctx context.Context, fs FileSystem, ls LockSystem, name string, fi os.FileInfo) (string, error) {
|
||||||
|
return strconv.FormatInt(fi.Size(), 10), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func findLastModified(ctx context.Context, fs FileSystem, ls LockSystem, name string, fi os.FileInfo) (string, error) {
|
||||||
|
return fi.ModTime().UTC().Format(http.TimeFormat), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// ErrNotImplemented should be returned by optional interfaces if they
|
||||||
|
// want the original implementation to be used.
|
||||||
|
var ErrNotImplemented = errors.New("not implemented")
|
||||||
|
|
||||||
|
// ContentTyper is an optional interface for the os.FileInfo
|
||||||
|
// objects returned by the FileSystem.
|
||||||
|
//
|
||||||
|
// If this interface is defined then it will be used to read the
|
||||||
|
// content type from the object.
|
||||||
|
//
|
||||||
|
// If this interface is not defined the file will be opened and the
|
||||||
|
// content type will be guessed from the initial contents of the file.
|
||||||
|
type ContentTyper interface {
|
||||||
|
// ContentType returns the content type for the file.
|
||||||
|
//
|
||||||
|
// If this returns error ErrNotImplemented then the error will
|
||||||
|
// be ignored and the base implementation will be used
|
||||||
|
// instead.
|
||||||
|
ContentType(ctx context.Context) (string, error)
|
||||||
|
}
|
||||||
|
|
||||||
|
func findContentType(ctx context.Context, fs FileSystem, ls LockSystem, name string, fi os.FileInfo) (string, error) {
|
||||||
|
if do, ok := fi.(ContentTyper); ok {
|
||||||
|
ctype, err := do.ContentType(ctx)
|
||||||
|
if err != ErrNotImplemented {
|
||||||
|
return ctype, err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
f, err := fs.OpenFile(ctx, name, os.O_RDONLY, 0)
|
||||||
|
if err != nil {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
defer f.Close()
|
||||||
|
// This implementation is based on serveContent's code in the standard net/http package.
|
||||||
|
ctype := mime.TypeByExtension(filepath.Ext(name))
|
||||||
|
if ctype != "" {
|
||||||
|
return ctype, nil
|
||||||
|
}
|
||||||
|
// Read a chunk to decide between utf-8 text and binary.
|
||||||
|
var buf [512]byte
|
||||||
|
n, err := io.ReadFull(f, buf[:])
|
||||||
|
if err != nil && err != io.EOF && err != io.ErrUnexpectedEOF {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
ctype = http.DetectContentType(buf[:n])
|
||||||
|
// Rewind file.
|
||||||
|
_, err = f.Seek(0, io.SeekStart)
|
||||||
|
return ctype, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// ETager is an optional interface for the os.FileInfo objects
|
||||||
|
// returned by the FileSystem.
|
||||||
|
//
|
||||||
|
// If this interface is defined then it will be used to read the ETag
|
||||||
|
// for the object.
|
||||||
|
//
|
||||||
|
// If this interface is not defined an ETag will be computed using the
|
||||||
|
// ModTime() and the Size() methods of the os.FileInfo object.
|
||||||
|
type ETager interface {
|
||||||
|
// ETag returns an ETag for the file. This should be of the
|
||||||
|
// form "value" or W/"value"
|
||||||
|
//
|
||||||
|
// If this returns error ErrNotImplemented then the error will
|
||||||
|
// be ignored and the base implementation will be used
|
||||||
|
// instead.
|
||||||
|
ETag(ctx context.Context) (string, error)
|
||||||
|
}
|
||||||
|
|
||||||
|
func findETag(ctx context.Context, fs FileSystem, ls LockSystem, name string, fi os.FileInfo) (string, error) {
|
||||||
|
if do, ok := fi.(ETager); ok {
|
||||||
|
etag, err := do.ETag(ctx)
|
||||||
|
if err != ErrNotImplemented {
|
||||||
|
return etag, err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
// The Apache http 2.4 web server by default concatenates the
|
||||||
|
// modification time and size of a file. We replicate the heuristic
|
||||||
|
// with nanosecond granularity.
|
||||||
|
return fmt.Sprintf(`"%x%x"`, fi.ModTime().UnixNano(), fi.Size()), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func findSupportedLock(ctx context.Context, fs FileSystem, ls LockSystem, name string, fi os.FileInfo) (string, error) {
|
||||||
|
return `` +
|
||||||
|
`<D:lockentry xmlns:D="DAV:">` +
|
||||||
|
`<D:lockscope><D:exclusive/></D:lockscope>` +
|
||||||
|
`<D:locktype><D:write/></D:locktype>` +
|
||||||
|
`</D:lockentry>`, nil
|
||||||
|
}
|
716
drives/davServer/prop_test.go
Normal file
716
drives/davServer/prop_test.go
Normal file
@ -0,0 +1,716 @@
|
|||||||
|
// Copyright 2015 The Go Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a BSD-style
|
||||||
|
// license that can be found in the LICENSE file.
|
||||||
|
|
||||||
|
package davServer
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"encoding/xml"
|
||||||
|
"fmt"
|
||||||
|
"net/http"
|
||||||
|
"os"
|
||||||
|
"reflect"
|
||||||
|
"regexp"
|
||||||
|
"sort"
|
||||||
|
"testing"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestMemPS(t *testing.T) {
|
||||||
|
ctx := context.Background()
|
||||||
|
// calcProps calculates the getlastmodified and getetag DAV: property
|
||||||
|
// values in pstats for resource name in file-system fs.
|
||||||
|
calcProps := func(name string, fs FileSystem, ls LockSystem, pstats []Propstat) error {
|
||||||
|
fi, err := fs.Stat(ctx, name)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
for _, pst := range pstats {
|
||||||
|
for i, p := range pst.Props {
|
||||||
|
switch p.XMLName {
|
||||||
|
case xml.Name{Space: "DAV:", Local: "getlastmodified"}:
|
||||||
|
p.InnerXML = []byte(fi.ModTime().UTC().Format(http.TimeFormat))
|
||||||
|
pst.Props[i] = p
|
||||||
|
case xml.Name{Space: "DAV:", Local: "getetag"}:
|
||||||
|
if fi.IsDir() {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
etag, err := findETag(ctx, fs, ls, name, fi)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
p.InnerXML = []byte(etag)
|
||||||
|
pst.Props[i] = p
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
const (
|
||||||
|
lockEntry = `` +
|
||||||
|
`<D:lockentry xmlns:D="DAV:">` +
|
||||||
|
`<D:lockscope><D:exclusive/></D:lockscope>` +
|
||||||
|
`<D:locktype><D:write/></D:locktype>` +
|
||||||
|
`</D:lockentry>`
|
||||||
|
statForbiddenError = `<D:cannot-modify-protected-property xmlns:D="DAV:"/>`
|
||||||
|
)
|
||||||
|
|
||||||
|
type propOp struct {
|
||||||
|
op string
|
||||||
|
name string
|
||||||
|
pnames []xml.Name
|
||||||
|
patches []Proppatch
|
||||||
|
wantPnames []xml.Name
|
||||||
|
wantPropstats []Propstat
|
||||||
|
}
|
||||||
|
|
||||||
|
testCases := []struct {
|
||||||
|
desc string
|
||||||
|
noDeadProps bool
|
||||||
|
buildfs []string
|
||||||
|
propOp []propOp
|
||||||
|
}{{
|
||||||
|
desc: "propname",
|
||||||
|
buildfs: []string{"mkdir /dir", "touch /file"},
|
||||||
|
propOp: []propOp{{
|
||||||
|
op: "propname",
|
||||||
|
name: "/dir",
|
||||||
|
wantPnames: []xml.Name{
|
||||||
|
{Space: "DAV:", Local: "resourcetype"},
|
||||||
|
{Space: "DAV:", Local: "displayname"},
|
||||||
|
{Space: "DAV:", Local: "supportedlock"},
|
||||||
|
{Space: "DAV:", Local: "getlastmodified"},
|
||||||
|
},
|
||||||
|
}, {
|
||||||
|
op: "propname",
|
||||||
|
name: "/file",
|
||||||
|
wantPnames: []xml.Name{
|
||||||
|
{Space: "DAV:", Local: "resourcetype"},
|
||||||
|
{Space: "DAV:", Local: "displayname"},
|
||||||
|
{Space: "DAV:", Local: "getcontentlength"},
|
||||||
|
{Space: "DAV:", Local: "getlastmodified"},
|
||||||
|
{Space: "DAV:", Local: "getcontenttype"},
|
||||||
|
{Space: "DAV:", Local: "getetag"},
|
||||||
|
{Space: "DAV:", Local: "supportedlock"},
|
||||||
|
},
|
||||||
|
}},
|
||||||
|
}, {
|
||||||
|
desc: "allprop dir and file",
|
||||||
|
buildfs: []string{"mkdir /dir", "write /file foobarbaz"},
|
||||||
|
propOp: []propOp{{
|
||||||
|
op: "allprop",
|
||||||
|
name: "/dir",
|
||||||
|
wantPropstats: []Propstat{{
|
||||||
|
Status: http.StatusOK,
|
||||||
|
Props: []Property{{
|
||||||
|
XMLName: xml.Name{Space: "DAV:", Local: "resourcetype"},
|
||||||
|
InnerXML: []byte(`<D:collection xmlns:D="DAV:"/>`),
|
||||||
|
}, {
|
||||||
|
XMLName: xml.Name{Space: "DAV:", Local: "displayname"},
|
||||||
|
InnerXML: []byte("dir"),
|
||||||
|
}, {
|
||||||
|
XMLName: xml.Name{Space: "DAV:", Local: "getlastmodified"},
|
||||||
|
InnerXML: nil, // Calculated during test.
|
||||||
|
}, {
|
||||||
|
XMLName: xml.Name{Space: "DAV:", Local: "supportedlock"},
|
||||||
|
InnerXML: []byte(lockEntry),
|
||||||
|
}},
|
||||||
|
}},
|
||||||
|
}, {
|
||||||
|
op: "allprop",
|
||||||
|
name: "/file",
|
||||||
|
wantPropstats: []Propstat{{
|
||||||
|
Status: http.StatusOK,
|
||||||
|
Props: []Property{{
|
||||||
|
XMLName: xml.Name{Space: "DAV:", Local: "resourcetype"},
|
||||||
|
InnerXML: []byte(""),
|
||||||
|
}, {
|
||||||
|
XMLName: xml.Name{Space: "DAV:", Local: "displayname"},
|
||||||
|
InnerXML: []byte("file"),
|
||||||
|
}, {
|
||||||
|
XMLName: xml.Name{Space: "DAV:", Local: "getcontentlength"},
|
||||||
|
InnerXML: []byte("9"),
|
||||||
|
}, {
|
||||||
|
XMLName: xml.Name{Space: "DAV:", Local: "getlastmodified"},
|
||||||
|
InnerXML: nil, // Calculated during test.
|
||||||
|
}, {
|
||||||
|
XMLName: xml.Name{Space: "DAV:", Local: "getcontenttype"},
|
||||||
|
InnerXML: []byte("text/plain; charset=utf-8"),
|
||||||
|
}, {
|
||||||
|
XMLName: xml.Name{Space: "DAV:", Local: "getetag"},
|
||||||
|
InnerXML: nil, // Calculated during test.
|
||||||
|
}, {
|
||||||
|
XMLName: xml.Name{Space: "DAV:", Local: "supportedlock"},
|
||||||
|
InnerXML: []byte(lockEntry),
|
||||||
|
}},
|
||||||
|
}},
|
||||||
|
}, {
|
||||||
|
op: "allprop",
|
||||||
|
name: "/file",
|
||||||
|
pnames: []xml.Name{
|
||||||
|
{Space: "DAV:", Local: "resourcetype"},
|
||||||
|
{Space: "foo", Local: "bar"},
|
||||||
|
},
|
||||||
|
wantPropstats: []Propstat{{
|
||||||
|
Status: http.StatusOK,
|
||||||
|
Props: []Property{{
|
||||||
|
XMLName: xml.Name{Space: "DAV:", Local: "resourcetype"},
|
||||||
|
InnerXML: []byte(""),
|
||||||
|
}, {
|
||||||
|
XMLName: xml.Name{Space: "DAV:", Local: "displayname"},
|
||||||
|
InnerXML: []byte("file"),
|
||||||
|
}, {
|
||||||
|
XMLName: xml.Name{Space: "DAV:", Local: "getcontentlength"},
|
||||||
|
InnerXML: []byte("9"),
|
||||||
|
}, {
|
||||||
|
XMLName: xml.Name{Space: "DAV:", Local: "getlastmodified"},
|
||||||
|
InnerXML: nil, // Calculated during test.
|
||||||
|
}, {
|
||||||
|
XMLName: xml.Name{Space: "DAV:", Local: "getcontenttype"},
|
||||||
|
InnerXML: []byte("text/plain; charset=utf-8"),
|
||||||
|
}, {
|
||||||
|
XMLName: xml.Name{Space: "DAV:", Local: "getetag"},
|
||||||
|
InnerXML: nil, // Calculated during test.
|
||||||
|
}, {
|
||||||
|
XMLName: xml.Name{Space: "DAV:", Local: "supportedlock"},
|
||||||
|
InnerXML: []byte(lockEntry),
|
||||||
|
}}}, {
|
||||||
|
Status: http.StatusNotFound,
|
||||||
|
Props: []Property{{
|
||||||
|
XMLName: xml.Name{Space: "foo", Local: "bar"},
|
||||||
|
}}},
|
||||||
|
},
|
||||||
|
}},
|
||||||
|
}, {
|
||||||
|
desc: "propfind DAV:resourcetype",
|
||||||
|
buildfs: []string{"mkdir /dir", "touch /file"},
|
||||||
|
propOp: []propOp{{
|
||||||
|
op: "propfind",
|
||||||
|
name: "/dir",
|
||||||
|
pnames: []xml.Name{{Space: "DAV:", Local: "resourcetype"}},
|
||||||
|
wantPropstats: []Propstat{{
|
||||||
|
Status: http.StatusOK,
|
||||||
|
Props: []Property{{
|
||||||
|
XMLName: xml.Name{Space: "DAV:", Local: "resourcetype"},
|
||||||
|
InnerXML: []byte(`<D:collection xmlns:D="DAV:"/>`),
|
||||||
|
}},
|
||||||
|
}},
|
||||||
|
}, {
|
||||||
|
op: "propfind",
|
||||||
|
name: "/file",
|
||||||
|
pnames: []xml.Name{{Space: "DAV:", Local: "resourcetype"}},
|
||||||
|
wantPropstats: []Propstat{{
|
||||||
|
Status: http.StatusOK,
|
||||||
|
Props: []Property{{
|
||||||
|
XMLName: xml.Name{Space: "DAV:", Local: "resourcetype"},
|
||||||
|
InnerXML: []byte(""),
|
||||||
|
}},
|
||||||
|
}},
|
||||||
|
}},
|
||||||
|
}, {
|
||||||
|
desc: "propfind unsupported DAV properties",
|
||||||
|
buildfs: []string{"mkdir /dir"},
|
||||||
|
propOp: []propOp{{
|
||||||
|
op: "propfind",
|
||||||
|
name: "/dir",
|
||||||
|
pnames: []xml.Name{{Space: "DAV:", Local: "getcontentlanguage"}},
|
||||||
|
wantPropstats: []Propstat{{
|
||||||
|
Status: http.StatusNotFound,
|
||||||
|
Props: []Property{{
|
||||||
|
XMLName: xml.Name{Space: "DAV:", Local: "getcontentlanguage"},
|
||||||
|
}},
|
||||||
|
}},
|
||||||
|
}, {
|
||||||
|
op: "propfind",
|
||||||
|
name: "/dir",
|
||||||
|
pnames: []xml.Name{{Space: "DAV:", Local: "creationdate"}},
|
||||||
|
wantPropstats: []Propstat{{
|
||||||
|
Status: http.StatusNotFound,
|
||||||
|
Props: []Property{{
|
||||||
|
XMLName: xml.Name{Space: "DAV:", Local: "creationdate"},
|
||||||
|
}},
|
||||||
|
}},
|
||||||
|
}},
|
||||||
|
}, {
|
||||||
|
desc: "propfind getetag for files but not for directories",
|
||||||
|
buildfs: []string{"mkdir /dir", "touch /file"},
|
||||||
|
propOp: []propOp{{
|
||||||
|
op: "propfind",
|
||||||
|
name: "/dir",
|
||||||
|
pnames: []xml.Name{{Space: "DAV:", Local: "getetag"}},
|
||||||
|
wantPropstats: []Propstat{{
|
||||||
|
Status: http.StatusNotFound,
|
||||||
|
Props: []Property{{
|
||||||
|
XMLName: xml.Name{Space: "DAV:", Local: "getetag"},
|
||||||
|
}},
|
||||||
|
}},
|
||||||
|
}, {
|
||||||
|
op: "propfind",
|
||||||
|
name: "/file",
|
||||||
|
pnames: []xml.Name{{Space: "DAV:", Local: "getetag"}},
|
||||||
|
wantPropstats: []Propstat{{
|
||||||
|
Status: http.StatusOK,
|
||||||
|
Props: []Property{{
|
||||||
|
XMLName: xml.Name{Space: "DAV:", Local: "getetag"},
|
||||||
|
InnerXML: nil, // Calculated during test.
|
||||||
|
}},
|
||||||
|
}},
|
||||||
|
}},
|
||||||
|
}, {
|
||||||
|
desc: "proppatch property on no-dead-properties file system",
|
||||||
|
buildfs: []string{"mkdir /dir"},
|
||||||
|
noDeadProps: true,
|
||||||
|
propOp: []propOp{{
|
||||||
|
op: "proppatch",
|
||||||
|
name: "/dir",
|
||||||
|
patches: []Proppatch{{
|
||||||
|
Props: []Property{{
|
||||||
|
XMLName: xml.Name{Space: "foo", Local: "bar"},
|
||||||
|
}},
|
||||||
|
}},
|
||||||
|
wantPropstats: []Propstat{{
|
||||||
|
Status: http.StatusForbidden,
|
||||||
|
Props: []Property{{
|
||||||
|
XMLName: xml.Name{Space: "foo", Local: "bar"},
|
||||||
|
}},
|
||||||
|
}},
|
||||||
|
}, {
|
||||||
|
op: "proppatch",
|
||||||
|
name: "/dir",
|
||||||
|
patches: []Proppatch{{
|
||||||
|
Props: []Property{{
|
||||||
|
XMLName: xml.Name{Space: "DAV:", Local: "getetag"},
|
||||||
|
}},
|
||||||
|
}},
|
||||||
|
wantPropstats: []Propstat{{
|
||||||
|
Status: http.StatusForbidden,
|
||||||
|
XMLError: statForbiddenError,
|
||||||
|
Props: []Property{{
|
||||||
|
XMLName: xml.Name{Space: "DAV:", Local: "getetag"},
|
||||||
|
}},
|
||||||
|
}},
|
||||||
|
}},
|
||||||
|
}, {
|
||||||
|
desc: "proppatch dead property",
|
||||||
|
buildfs: []string{"mkdir /dir"},
|
||||||
|
propOp: []propOp{{
|
||||||
|
op: "proppatch",
|
||||||
|
name: "/dir",
|
||||||
|
patches: []Proppatch{{
|
||||||
|
Props: []Property{{
|
||||||
|
XMLName: xml.Name{Space: "foo", Local: "bar"},
|
||||||
|
InnerXML: []byte("baz"),
|
||||||
|
}},
|
||||||
|
}},
|
||||||
|
wantPropstats: []Propstat{{
|
||||||
|
Status: http.StatusOK,
|
||||||
|
Props: []Property{{
|
||||||
|
XMLName: xml.Name{Space: "foo", Local: "bar"},
|
||||||
|
}},
|
||||||
|
}},
|
||||||
|
}, {
|
||||||
|
op: "propfind",
|
||||||
|
name: "/dir",
|
||||||
|
pnames: []xml.Name{{Space: "foo", Local: "bar"}},
|
||||||
|
wantPropstats: []Propstat{{
|
||||||
|
Status: http.StatusOK,
|
||||||
|
Props: []Property{{
|
||||||
|
XMLName: xml.Name{Space: "foo", Local: "bar"},
|
||||||
|
InnerXML: []byte("baz"),
|
||||||
|
}},
|
||||||
|
}},
|
||||||
|
}},
|
||||||
|
}, {
|
||||||
|
desc: "proppatch dead property with failed dependency",
|
||||||
|
buildfs: []string{"mkdir /dir"},
|
||||||
|
propOp: []propOp{{
|
||||||
|
op: "proppatch",
|
||||||
|
name: "/dir",
|
||||||
|
patches: []Proppatch{{
|
||||||
|
Props: []Property{{
|
||||||
|
XMLName: xml.Name{Space: "foo", Local: "bar"},
|
||||||
|
InnerXML: []byte("baz"),
|
||||||
|
}},
|
||||||
|
}, {
|
||||||
|
Props: []Property{{
|
||||||
|
XMLName: xml.Name{Space: "DAV:", Local: "displayname"},
|
||||||
|
InnerXML: []byte("xxx"),
|
||||||
|
}},
|
||||||
|
}},
|
||||||
|
wantPropstats: []Propstat{{
|
||||||
|
Status: http.StatusForbidden,
|
||||||
|
XMLError: statForbiddenError,
|
||||||
|
Props: []Property{{
|
||||||
|
XMLName: xml.Name{Space: "DAV:", Local: "displayname"},
|
||||||
|
}},
|
||||||
|
}, {
|
||||||
|
Status: StatusFailedDependency,
|
||||||
|
Props: []Property{{
|
||||||
|
XMLName: xml.Name{Space: "foo", Local: "bar"},
|
||||||
|
}},
|
||||||
|
}},
|
||||||
|
}, {
|
||||||
|
op: "propfind",
|
||||||
|
name: "/dir",
|
||||||
|
pnames: []xml.Name{{Space: "foo", Local: "bar"}},
|
||||||
|
wantPropstats: []Propstat{{
|
||||||
|
Status: http.StatusNotFound,
|
||||||
|
Props: []Property{{
|
||||||
|
XMLName: xml.Name{Space: "foo", Local: "bar"},
|
||||||
|
}},
|
||||||
|
}},
|
||||||
|
}},
|
||||||
|
}, {
|
||||||
|
desc: "proppatch remove dead property",
|
||||||
|
buildfs: []string{"mkdir /dir"},
|
||||||
|
propOp: []propOp{{
|
||||||
|
op: "proppatch",
|
||||||
|
name: "/dir",
|
||||||
|
patches: []Proppatch{{
|
||||||
|
Props: []Property{{
|
||||||
|
XMLName: xml.Name{Space: "foo", Local: "bar"},
|
||||||
|
InnerXML: []byte("baz"),
|
||||||
|
}, {
|
||||||
|
XMLName: xml.Name{Space: "spam", Local: "ham"},
|
||||||
|
InnerXML: []byte("eggs"),
|
||||||
|
}},
|
||||||
|
}},
|
||||||
|
wantPropstats: []Propstat{{
|
||||||
|
Status: http.StatusOK,
|
||||||
|
Props: []Property{{
|
||||||
|
XMLName: xml.Name{Space: "foo", Local: "bar"},
|
||||||
|
}, {
|
||||||
|
XMLName: xml.Name{Space: "spam", Local: "ham"},
|
||||||
|
}},
|
||||||
|
}},
|
||||||
|
}, {
|
||||||
|
op: "propfind",
|
||||||
|
name: "/dir",
|
||||||
|
pnames: []xml.Name{
|
||||||
|
{Space: "foo", Local: "bar"},
|
||||||
|
{Space: "spam", Local: "ham"},
|
||||||
|
},
|
||||||
|
wantPropstats: []Propstat{{
|
||||||
|
Status: http.StatusOK,
|
||||||
|
Props: []Property{{
|
||||||
|
XMLName: xml.Name{Space: "foo", Local: "bar"},
|
||||||
|
InnerXML: []byte("baz"),
|
||||||
|
}, {
|
||||||
|
XMLName: xml.Name{Space: "spam", Local: "ham"},
|
||||||
|
InnerXML: []byte("eggs"),
|
||||||
|
}},
|
||||||
|
}},
|
||||||
|
}, {
|
||||||
|
op: "proppatch",
|
||||||
|
name: "/dir",
|
||||||
|
patches: []Proppatch{{
|
||||||
|
Remove: true,
|
||||||
|
Props: []Property{{
|
||||||
|
XMLName: xml.Name{Space: "foo", Local: "bar"},
|
||||||
|
}},
|
||||||
|
}},
|
||||||
|
wantPropstats: []Propstat{{
|
||||||
|
Status: http.StatusOK,
|
||||||
|
Props: []Property{{
|
||||||
|
XMLName: xml.Name{Space: "foo", Local: "bar"},
|
||||||
|
}},
|
||||||
|
}},
|
||||||
|
}, {
|
||||||
|
op: "propfind",
|
||||||
|
name: "/dir",
|
||||||
|
pnames: []xml.Name{
|
||||||
|
{Space: "foo", Local: "bar"},
|
||||||
|
{Space: "spam", Local: "ham"},
|
||||||
|
},
|
||||||
|
wantPropstats: []Propstat{{
|
||||||
|
Status: http.StatusNotFound,
|
||||||
|
Props: []Property{{
|
||||||
|
XMLName: xml.Name{Space: "foo", Local: "bar"},
|
||||||
|
}},
|
||||||
|
}, {
|
||||||
|
Status: http.StatusOK,
|
||||||
|
Props: []Property{{
|
||||||
|
XMLName: xml.Name{Space: "spam", Local: "ham"},
|
||||||
|
InnerXML: []byte("eggs"),
|
||||||
|
}},
|
||||||
|
}},
|
||||||
|
}},
|
||||||
|
}, {
|
||||||
|
desc: "propname with dead property",
|
||||||
|
buildfs: []string{"touch /file"},
|
||||||
|
propOp: []propOp{{
|
||||||
|
op: "proppatch",
|
||||||
|
name: "/file",
|
||||||
|
patches: []Proppatch{{
|
||||||
|
Props: []Property{{
|
||||||
|
XMLName: xml.Name{Space: "foo", Local: "bar"},
|
||||||
|
InnerXML: []byte("baz"),
|
||||||
|
}},
|
||||||
|
}},
|
||||||
|
wantPropstats: []Propstat{{
|
||||||
|
Status: http.StatusOK,
|
||||||
|
Props: []Property{{
|
||||||
|
XMLName: xml.Name{Space: "foo", Local: "bar"},
|
||||||
|
}},
|
||||||
|
}},
|
||||||
|
}, {
|
||||||
|
op: "propname",
|
||||||
|
name: "/file",
|
||||||
|
wantPnames: []xml.Name{
|
||||||
|
{Space: "DAV:", Local: "resourcetype"},
|
||||||
|
{Space: "DAV:", Local: "displayname"},
|
||||||
|
{Space: "DAV:", Local: "getcontentlength"},
|
||||||
|
{Space: "DAV:", Local: "getlastmodified"},
|
||||||
|
{Space: "DAV:", Local: "getcontenttype"},
|
||||||
|
{Space: "DAV:", Local: "getetag"},
|
||||||
|
{Space: "DAV:", Local: "supportedlock"},
|
||||||
|
{Space: "foo", Local: "bar"},
|
||||||
|
},
|
||||||
|
}},
|
||||||
|
}, {
|
||||||
|
desc: "proppatch remove unknown dead property",
|
||||||
|
buildfs: []string{"mkdir /dir"},
|
||||||
|
propOp: []propOp{{
|
||||||
|
op: "proppatch",
|
||||||
|
name: "/dir",
|
||||||
|
patches: []Proppatch{{
|
||||||
|
Remove: true,
|
||||||
|
Props: []Property{{
|
||||||
|
XMLName: xml.Name{Space: "foo", Local: "bar"},
|
||||||
|
}},
|
||||||
|
}},
|
||||||
|
wantPropstats: []Propstat{{
|
||||||
|
Status: http.StatusOK,
|
||||||
|
Props: []Property{{
|
||||||
|
XMLName: xml.Name{Space: "foo", Local: "bar"},
|
||||||
|
}},
|
||||||
|
}},
|
||||||
|
}},
|
||||||
|
}, {
|
||||||
|
desc: "bad: propfind unknown property",
|
||||||
|
buildfs: []string{"mkdir /dir"},
|
||||||
|
propOp: []propOp{{
|
||||||
|
op: "propfind",
|
||||||
|
name: "/dir",
|
||||||
|
pnames: []xml.Name{{Space: "foo:", Local: "bar"}},
|
||||||
|
wantPropstats: []Propstat{{
|
||||||
|
Status: http.StatusNotFound,
|
||||||
|
Props: []Property{{
|
||||||
|
XMLName: xml.Name{Space: "foo:", Local: "bar"},
|
||||||
|
}},
|
||||||
|
}},
|
||||||
|
}},
|
||||||
|
}}
|
||||||
|
|
||||||
|
for _, tc := range testCases {
|
||||||
|
fs, err := buildTestFS(tc.buildfs)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("%s: cannot create test filesystem: %v", tc.desc, err)
|
||||||
|
}
|
||||||
|
if tc.noDeadProps {
|
||||||
|
fs = noDeadPropsFS{fs}
|
||||||
|
}
|
||||||
|
ls := NewMemLS()
|
||||||
|
for _, op := range tc.propOp {
|
||||||
|
desc := fmt.Sprintf("%s: %s %s", tc.desc, op.op, op.name)
|
||||||
|
if err = calcProps(op.name, fs, ls, op.wantPropstats); err != nil {
|
||||||
|
t.Fatalf("%s: calcProps: %v", desc, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Call property system.
|
||||||
|
var propstats []Propstat
|
||||||
|
switch op.op {
|
||||||
|
case "propname":
|
||||||
|
pnames, err := propnames(ctx, fs, ls, op.name)
|
||||||
|
if err != nil {
|
||||||
|
t.Errorf("%s: got error %v, want nil", desc, err)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
sort.Sort(byXMLName(pnames))
|
||||||
|
sort.Sort(byXMLName(op.wantPnames))
|
||||||
|
if !reflect.DeepEqual(pnames, op.wantPnames) {
|
||||||
|
t.Errorf("%s: pnames\ngot %q\nwant %q", desc, pnames, op.wantPnames)
|
||||||
|
}
|
||||||
|
continue
|
||||||
|
case "allprop":
|
||||||
|
propstats, err = allprop(ctx, fs, ls, op.name, op.pnames)
|
||||||
|
case "propfind":
|
||||||
|
propstats, err = props(ctx, fs, ls, op.name, op.pnames)
|
||||||
|
case "proppatch":
|
||||||
|
propstats, err = patch(ctx, fs, ls, op.name, op.patches)
|
||||||
|
default:
|
||||||
|
t.Fatalf("%s: %s not implemented", desc, op.op)
|
||||||
|
}
|
||||||
|
if err != nil {
|
||||||
|
t.Errorf("%s: got error %v, want nil", desc, err)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
// Compare return values from allprop, propfind or proppatch.
|
||||||
|
for _, pst := range propstats {
|
||||||
|
sort.Sort(byPropname(pst.Props))
|
||||||
|
}
|
||||||
|
for _, pst := range op.wantPropstats {
|
||||||
|
sort.Sort(byPropname(pst.Props))
|
||||||
|
}
|
||||||
|
sort.Sort(byStatus(propstats))
|
||||||
|
sort.Sort(byStatus(op.wantPropstats))
|
||||||
|
if !reflect.DeepEqual(propstats, op.wantPropstats) {
|
||||||
|
t.Errorf("%s: propstat\ngot %q\nwant %q", desc, propstats, op.wantPropstats)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func cmpXMLName(a, b xml.Name) bool {
|
||||||
|
if a.Space != b.Space {
|
||||||
|
return a.Space < b.Space
|
||||||
|
}
|
||||||
|
return a.Local < b.Local
|
||||||
|
}
|
||||||
|
|
||||||
|
type byXMLName []xml.Name
|
||||||
|
|
||||||
|
func (b byXMLName) Len() int { return len(b) }
|
||||||
|
func (b byXMLName) Swap(i, j int) { b[i], b[j] = b[j], b[i] }
|
||||||
|
func (b byXMLName) Less(i, j int) bool { return cmpXMLName(b[i], b[j]) }
|
||||||
|
|
||||||
|
type byPropname []Property
|
||||||
|
|
||||||
|
func (b byPropname) Len() int { return len(b) }
|
||||||
|
func (b byPropname) Swap(i, j int) { b[i], b[j] = b[j], b[i] }
|
||||||
|
func (b byPropname) Less(i, j int) bool { return cmpXMLName(b[i].XMLName, b[j].XMLName) }
|
||||||
|
|
||||||
|
type byStatus []Propstat
|
||||||
|
|
||||||
|
func (b byStatus) Len() int { return len(b) }
|
||||||
|
func (b byStatus) Swap(i, j int) { b[i], b[j] = b[j], b[i] }
|
||||||
|
func (b byStatus) Less(i, j int) bool { return b[i].Status < b[j].Status }
|
||||||
|
|
||||||
|
type noDeadPropsFS struct {
|
||||||
|
FileSystem
|
||||||
|
}
|
||||||
|
|
||||||
|
func (fs noDeadPropsFS) OpenFile(ctx context.Context, name string, flag int, perm os.FileMode) (File, error) {
|
||||||
|
f, err := fs.FileSystem.OpenFile(ctx, name, flag, perm)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return noDeadPropsFile{f}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// noDeadPropsFile wraps a File but strips any optional DeadPropsHolder methods
|
||||||
|
// provided by the underlying File implementation.
|
||||||
|
type noDeadPropsFile struct {
|
||||||
|
f File
|
||||||
|
}
|
||||||
|
|
||||||
|
func (f noDeadPropsFile) Close() error { return f.f.Close() }
|
||||||
|
func (f noDeadPropsFile) Read(p []byte) (int, error) { return f.f.Read(p) }
|
||||||
|
func (f noDeadPropsFile) Readdir(count int) ([]os.FileInfo, error) { return f.f.Readdir(count) }
|
||||||
|
func (f noDeadPropsFile) Seek(off int64, whence int) (int64, error) { return f.f.Seek(off, whence) }
|
||||||
|
func (f noDeadPropsFile) Stat() (os.FileInfo, error) { return f.f.Stat() }
|
||||||
|
func (f noDeadPropsFile) Write(p []byte) (int, error) { return f.f.Write(p) }
|
||||||
|
|
||||||
|
type overrideContentType struct {
|
||||||
|
os.FileInfo
|
||||||
|
contentType string
|
||||||
|
err error
|
||||||
|
}
|
||||||
|
|
||||||
|
func (o *overrideContentType) ContentType(ctx context.Context) (string, error) {
|
||||||
|
return o.contentType, o.err
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestFindContentTypeOverride(t *testing.T) {
|
||||||
|
fs, err := buildTestFS([]string{"touch /file"})
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("cannot create test filesystem: %v", err)
|
||||||
|
}
|
||||||
|
ctx := context.Background()
|
||||||
|
fi, err := fs.Stat(ctx, "/file")
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("cannot Stat /file: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check non overridden case
|
||||||
|
originalContentType, err := findContentType(ctx, fs, nil, "/file", fi)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("findContentType /file failed: %v", err)
|
||||||
|
}
|
||||||
|
if originalContentType != "text/plain; charset=utf-8" {
|
||||||
|
t.Fatalf("ContentType wrong want %q got %q", "text/plain; charset=utf-8", originalContentType)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Now try overriding the ContentType
|
||||||
|
o := &overrideContentType{fi, "OverriddenContentType", nil}
|
||||||
|
ContentType, err := findContentType(ctx, fs, nil, "/file", o)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("findContentType /file failed: %v", err)
|
||||||
|
}
|
||||||
|
if ContentType != o.contentType {
|
||||||
|
t.Fatalf("ContentType wrong want %q got %q", o.contentType, ContentType)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Now return ErrNotImplemented and check we get the original content type
|
||||||
|
o = &overrideContentType{fi, "OverriddenContentType", ErrNotImplemented}
|
||||||
|
ContentType, err = findContentType(ctx, fs, nil, "/file", o)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("findContentType /file failed: %v", err)
|
||||||
|
}
|
||||||
|
if ContentType != originalContentType {
|
||||||
|
t.Fatalf("ContentType wrong want %q got %q", originalContentType, ContentType)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
type overrideETag struct {
|
||||||
|
os.FileInfo
|
||||||
|
eTag string
|
||||||
|
err error
|
||||||
|
}
|
||||||
|
|
||||||
|
func (o *overrideETag) ETag(ctx context.Context) (string, error) {
|
||||||
|
return o.eTag, o.err
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestFindETagOverride(t *testing.T) {
|
||||||
|
fs, err := buildTestFS([]string{"touch /file"})
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("cannot create test filesystem: %v", err)
|
||||||
|
}
|
||||||
|
ctx := context.Background()
|
||||||
|
fi, err := fs.Stat(ctx, "/file")
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("cannot Stat /file: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check non overridden case
|
||||||
|
originalETag, err := findETag(ctx, fs, nil, "/file", fi)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("findETag /file failed: %v", err)
|
||||||
|
}
|
||||||
|
matchETag := regexp.MustCompile(`^"-?[0-9a-f]{6,}"$`)
|
||||||
|
if !matchETag.MatchString(originalETag) {
|
||||||
|
t.Fatalf("ETag wrong, wanted something matching %v got %q", matchETag, originalETag)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Now try overriding the ETag
|
||||||
|
o := &overrideETag{fi, `"OverriddenETag"`, nil}
|
||||||
|
ETag, err := findETag(ctx, fs, nil, "/file", o)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("findETag /file failed: %v", err)
|
||||||
|
}
|
||||||
|
if ETag != o.eTag {
|
||||||
|
t.Fatalf("ETag wrong want %q got %q", o.eTag, ETag)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Now return ErrNotImplemented and check we get the original Etag
|
||||||
|
o = &overrideETag{fi, `"OverriddenETag"`, ErrNotImplemented}
|
||||||
|
ETag, err = findETag(ctx, fs, nil, "/file", o)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("findETag /file failed: %v", err)
|
||||||
|
}
|
||||||
|
if ETag != originalETag {
|
||||||
|
t.Fatalf("ETag wrong want %q got %q", originalETag, ETag)
|
||||||
|
}
|
||||||
|
}
|
754
drives/davServer/webdav.go
Normal file
754
drives/davServer/webdav.go
Normal file
@ -0,0 +1,754 @@
|
|||||||
|
// Copyright 2014 The Go Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a BSD-style
|
||||||
|
// license that can be found in the LICENSE file.
|
||||||
|
|
||||||
|
// Package davServer provides a WebDAV server implementation.
|
||||||
|
package davServer
|
||||||
|
|
||||||
|
import (
|
||||||
|
"errors"
|
||||||
|
"fmt"
|
||||||
|
"github.com/sirupsen/logrus"
|
||||||
|
"io"
|
||||||
|
"net/http"
|
||||||
|
"net/url"
|
||||||
|
"os"
|
||||||
|
"path"
|
||||||
|
"path/filepath"
|
||||||
|
"strconv"
|
||||||
|
"strings"
|
||||||
|
"time"
|
||||||
|
)
|
||||||
|
|
||||||
|
type Handler struct {
|
||||||
|
// Prefix is the URL path prefix to strip from WebDAV resource paths.
|
||||||
|
Prefix string
|
||||||
|
// FileSystem is the virtual file system.
|
||||||
|
FileSystem FileSystem
|
||||||
|
// LockSystem is the lock management system.
|
||||||
|
LockSystem LockSystem
|
||||||
|
// Logger is an optional error logger. If non-nil, it will be called
|
||||||
|
// for all HTTP requests.
|
||||||
|
Logger func(*http.Request, error)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (h *Handler) stripPrefix(p string) (string, int, error) {
|
||||||
|
if h.Prefix == "" {
|
||||||
|
return p, http.StatusOK, nil
|
||||||
|
}
|
||||||
|
if r := strings.TrimPrefix(p, h.Prefix); len(r) < len(p) {
|
||||||
|
return r, http.StatusOK, nil
|
||||||
|
}
|
||||||
|
return p, http.StatusNotFound, errPrefixMismatch
|
||||||
|
}
|
||||||
|
|
||||||
|
func (h *Handler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
|
||||||
|
status, err := http.StatusBadRequest, errUnsupportedMethod
|
||||||
|
if h.FileSystem == nil {
|
||||||
|
status, err = http.StatusInternalServerError, errNoFileSystem
|
||||||
|
} else if h.LockSystem == nil {
|
||||||
|
status, err = http.StatusInternalServerError, errNoLockSystem
|
||||||
|
} else {
|
||||||
|
switch r.Method {
|
||||||
|
case "OPTIONS":
|
||||||
|
status, err = h.handleOptions(w, r)
|
||||||
|
case "GET", "HEAD", "POST":
|
||||||
|
status, err = h.handleGetHeadPost(w, r)
|
||||||
|
case "DELETE":
|
||||||
|
status, err = h.handleDelete(w, r)
|
||||||
|
case "PUT":
|
||||||
|
status, err = h.handlePut(w, r)
|
||||||
|
case "MKCOL":
|
||||||
|
status, err = h.handleMkcol(w, r)
|
||||||
|
case "COPY", "MOVE":
|
||||||
|
status, err = h.handleCopyMove(w, r)
|
||||||
|
case "LOCK":
|
||||||
|
status, err = h.handleLock(w, r)
|
||||||
|
case "UNLOCK":
|
||||||
|
status, err = h.handleUnlock(w, r)
|
||||||
|
case "PROPFIND":
|
||||||
|
status, err = h.handlePropfind(w, r)
|
||||||
|
case "PROPPATCH":
|
||||||
|
status, err = h.handleProppatch(w, r)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if status != 0 {
|
||||||
|
w.WriteHeader(status)
|
||||||
|
if status != http.StatusNoContent {
|
||||||
|
w.Write([]byte(StatusText(status)))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if h.Logger != nil {
|
||||||
|
h.Logger(r, err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (h *Handler) lock(now time.Time, root string) (token string, status int, err error) {
|
||||||
|
token, err = h.LockSystem.Create(now, LockDetails{
|
||||||
|
Root: root,
|
||||||
|
Duration: infiniteTimeout,
|
||||||
|
ZeroDepth: true,
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
if err == ErrLocked {
|
||||||
|
return "", StatusLocked, err
|
||||||
|
}
|
||||||
|
return "", http.StatusInternalServerError, err
|
||||||
|
}
|
||||||
|
return token, 0, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (h *Handler) confirmLocks(r *http.Request, src, dst string) (release func(), status int, err error) {
|
||||||
|
hdr := r.Header.Get("If")
|
||||||
|
if hdr == "" {
|
||||||
|
// An empty If header means that the client hasn't previously created locks.
|
||||||
|
// Even if this client doesn't care about locks, we still need to check that
|
||||||
|
// the resources aren't locked by another client, so we create temporary
|
||||||
|
// locks that would conflict with another client's locks. These temporary
|
||||||
|
// locks are unlocked at the end of the HTTP request.
|
||||||
|
now, srcToken, dstToken := time.Now(), "", ""
|
||||||
|
if src != "" {
|
||||||
|
srcToken, status, err = h.lock(now, src)
|
||||||
|
if err != nil {
|
||||||
|
return nil, status, err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if dst != "" {
|
||||||
|
dstToken, status, err = h.lock(now, dst)
|
||||||
|
if err != nil {
|
||||||
|
if srcToken != "" {
|
||||||
|
h.LockSystem.Unlock(now, srcToken)
|
||||||
|
}
|
||||||
|
return nil, status, err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return func() {
|
||||||
|
if dstToken != "" {
|
||||||
|
h.LockSystem.Unlock(now, dstToken)
|
||||||
|
}
|
||||||
|
if srcToken != "" {
|
||||||
|
h.LockSystem.Unlock(now, srcToken)
|
||||||
|
}
|
||||||
|
}, 0, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
ih, ok := parseIfHeader(hdr)
|
||||||
|
if !ok {
|
||||||
|
return nil, http.StatusBadRequest, errInvalidIfHeader
|
||||||
|
}
|
||||||
|
// ih is a disjunction (OR) of ifLists, so any ifList will do.
|
||||||
|
for _, l := range ih.lists {
|
||||||
|
lsrc := l.resourceTag
|
||||||
|
if lsrc == "" {
|
||||||
|
lsrc = src
|
||||||
|
} else {
|
||||||
|
u, err := url.Parse(lsrc)
|
||||||
|
if err != nil {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
if u.Host != r.Host {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
lsrc, status, err = h.stripPrefix(u.Path)
|
||||||
|
if err != nil {
|
||||||
|
return nil, status, err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
release, err = h.LockSystem.Confirm(time.Now(), lsrc, dst, l.conditions...)
|
||||||
|
if err == ErrConfirmationFailed {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
if err != nil {
|
||||||
|
return nil, http.StatusInternalServerError, err
|
||||||
|
}
|
||||||
|
return release, 0, nil
|
||||||
|
}
|
||||||
|
// Section 10.4.1 says that "If this header is evaluated and all state lists
|
||||||
|
// fail, then the request must fail with a 412 (Precondition Failed) status."
|
||||||
|
// We follow the spec even though the cond_put_corrupt_token test case from
|
||||||
|
// the litmus test warns on seeing a 412 instead of a 423 (Locked).
|
||||||
|
return nil, http.StatusPreconditionFailed, ErrLocked
|
||||||
|
}
|
||||||
|
|
||||||
|
func (h *Handler) handleOptions(w http.ResponseWriter, r *http.Request) (status int, err error) {
|
||||||
|
reqPath, status, err := h.stripPrefix(r.URL.Path)
|
||||||
|
if err != nil {
|
||||||
|
return status, err
|
||||||
|
}
|
||||||
|
ctx := r.Context()
|
||||||
|
allow := "OPTIONS, LOCK, PUT, MKCOL"
|
||||||
|
if fi, err := h.FileSystem.Stat(ctx, reqPath); err == nil {
|
||||||
|
if fi.IsDir() {
|
||||||
|
allow = "OPTIONS, LOCK, DELETE, PROPPATCH, COPY, MOVE, UNLOCK, PROPFIND"
|
||||||
|
} else {
|
||||||
|
allow = "OPTIONS, LOCK, GET, HEAD, POST, DELETE, PROPPATCH, COPY, MOVE, UNLOCK, PROPFIND, PUT"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
w.Header().Set("Allow", allow)
|
||||||
|
// http://www.webdav.org/specs/rfc4918.html#dav.compliance.classes
|
||||||
|
w.Header().Set("DAV", "1, 2")
|
||||||
|
// http://msdn.microsoft.com/en-au/library/cc250217.aspx
|
||||||
|
w.Header().Set("MS-Author-Via", "DAV")
|
||||||
|
return 0, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (h *Handler) handleGetHeadPost(w http.ResponseWriter, r *http.Request) (status int, err error) {
|
||||||
|
reqPath, status, err := h.stripPrefix(r.URL.Path)
|
||||||
|
if err != nil {
|
||||||
|
return status, err
|
||||||
|
}
|
||||||
|
// TODO: check locks for read-only access??
|
||||||
|
ctx := r.Context()
|
||||||
|
f, err := h.FileSystem.OpenFile(ctx, reqPath, os.O_RDONLY, 0)
|
||||||
|
if err != nil {
|
||||||
|
return http.StatusNotFound, err
|
||||||
|
}
|
||||||
|
defer f.Close()
|
||||||
|
fi, err := f.Stat()
|
||||||
|
if err != nil {
|
||||||
|
return http.StatusNotFound, err
|
||||||
|
}
|
||||||
|
if fi.IsDir() {
|
||||||
|
return http.StatusMethodNotAllowed, nil
|
||||||
|
}
|
||||||
|
etag, err := findETag(ctx, h.FileSystem, h.LockSystem, reqPath, fi)
|
||||||
|
if err != nil {
|
||||||
|
return http.StatusInternalServerError, err
|
||||||
|
}
|
||||||
|
w.Header().Set("ETag", etag)
|
||||||
|
// Let ServeContent determine the Content-Type header.
|
||||||
|
http.ServeContent(w, r, reqPath, fi.ModTime(), f)
|
||||||
|
return 0, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (h *Handler) handleDelete(w http.ResponseWriter, r *http.Request) (status int, err error) {
|
||||||
|
reqPath, status, err := h.stripPrefix(r.URL.Path)
|
||||||
|
if err != nil {
|
||||||
|
return status, err
|
||||||
|
}
|
||||||
|
release, status, err := h.confirmLocks(r, reqPath, "")
|
||||||
|
if err != nil {
|
||||||
|
return status, err
|
||||||
|
}
|
||||||
|
defer release()
|
||||||
|
|
||||||
|
ctx := r.Context()
|
||||||
|
|
||||||
|
// TODO: return MultiStatus where appropriate.
|
||||||
|
|
||||||
|
// "godoc os RemoveAll" says that "If the path does not exist, RemoveAll
|
||||||
|
// returns nil (no error)." WebDAV semantics are that it should return a
|
||||||
|
// "404 Not Found". We therefore have to Stat before we RemoveAll.
|
||||||
|
if _, err := h.FileSystem.Stat(ctx, reqPath); err != nil {
|
||||||
|
if os.IsNotExist(err) {
|
||||||
|
return http.StatusNotFound, err
|
||||||
|
}
|
||||||
|
return http.StatusMethodNotAllowed, err
|
||||||
|
}
|
||||||
|
if err := h.FileSystem.RemoveAll(ctx, reqPath); err != nil {
|
||||||
|
return http.StatusMethodNotAllowed, err
|
||||||
|
}
|
||||||
|
return http.StatusNoContent, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (h *Handler) handlePut(w http.ResponseWriter, r *http.Request) (status int, err error) {
|
||||||
|
reqPath, status, err := h.stripPrefix(r.URL.Path)
|
||||||
|
if err != nil {
|
||||||
|
return status, err
|
||||||
|
}
|
||||||
|
release, status, err := h.confirmLocks(r, reqPath, "")
|
||||||
|
if err != nil {
|
||||||
|
return status, err
|
||||||
|
}
|
||||||
|
defer release()
|
||||||
|
// TODO(rost): Support the If-Match, If-None-Match headers? See bradfitz'
|
||||||
|
// comments in http.checkEtag.
|
||||||
|
ctx := r.Context()
|
||||||
|
|
||||||
|
f, err := h.FileSystem.OpenFile(ctx, reqPath, os.O_RDWR|os.O_CREATE|os.O_TRUNC, 0666)
|
||||||
|
if err != nil {
|
||||||
|
return http.StatusNotFound, err
|
||||||
|
}
|
||||||
|
_, copyErr := io.Copy(f, r.Body)
|
||||||
|
fi, statErr := f.Stat()
|
||||||
|
|
||||||
|
modTimes := r.Header["Zrok-Modtime"]
|
||||||
|
if len(modTimes) > 0 {
|
||||||
|
if modTimeV, err := strconv.ParseInt(modTimes[0], 10, 64); err == nil {
|
||||||
|
if v, ok := f.(*webdavFile); ok {
|
||||||
|
if err := v.updateModtime(reqPath, time.Unix(modTimeV, 0)); err != nil {
|
||||||
|
logrus.Warn(err)
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
logrus.Error("!ok")
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
logrus.Error(err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
closeErr := f.Close()
|
||||||
|
// TODO(rost): Returning 405 Method Not Allowed might not be appropriate.
|
||||||
|
if copyErr != nil {
|
||||||
|
return http.StatusMethodNotAllowed, copyErr
|
||||||
|
}
|
||||||
|
if statErr != nil {
|
||||||
|
return http.StatusMethodNotAllowed, statErr
|
||||||
|
}
|
||||||
|
if closeErr != nil {
|
||||||
|
return http.StatusMethodNotAllowed, closeErr
|
||||||
|
}
|
||||||
|
etag, err := findETag(ctx, h.FileSystem, h.LockSystem, reqPath, fi)
|
||||||
|
if err != nil {
|
||||||
|
return http.StatusInternalServerError, err
|
||||||
|
}
|
||||||
|
w.Header().Set("ETag", etag)
|
||||||
|
return http.StatusCreated, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (h *Handler) handleMkcol(w http.ResponseWriter, r *http.Request) (status int, err error) {
|
||||||
|
reqPath, status, err := h.stripPrefix(r.URL.Path)
|
||||||
|
if err != nil {
|
||||||
|
return status, err
|
||||||
|
}
|
||||||
|
release, status, err := h.confirmLocks(r, reqPath, "")
|
||||||
|
if err != nil {
|
||||||
|
return status, err
|
||||||
|
}
|
||||||
|
defer release()
|
||||||
|
|
||||||
|
ctx := r.Context()
|
||||||
|
|
||||||
|
if r.ContentLength > 0 {
|
||||||
|
return http.StatusUnsupportedMediaType, nil
|
||||||
|
}
|
||||||
|
if err := h.FileSystem.Mkdir(ctx, reqPath, 0777); err != nil {
|
||||||
|
if os.IsNotExist(err) {
|
||||||
|
return http.StatusConflict, err
|
||||||
|
}
|
||||||
|
return http.StatusMethodNotAllowed, err
|
||||||
|
}
|
||||||
|
return http.StatusCreated, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (h *Handler) handleCopyMove(w http.ResponseWriter, r *http.Request) (status int, err error) {
|
||||||
|
hdr := r.Header.Get("Destination")
|
||||||
|
if hdr == "" {
|
||||||
|
return http.StatusBadRequest, errInvalidDestination
|
||||||
|
}
|
||||||
|
u, err := url.Parse(hdr)
|
||||||
|
if err != nil {
|
||||||
|
return http.StatusBadRequest, errInvalidDestination
|
||||||
|
}
|
||||||
|
if u.Host != "" && u.Host != r.Host {
|
||||||
|
return http.StatusBadGateway, errInvalidDestination
|
||||||
|
}
|
||||||
|
|
||||||
|
src, status, err := h.stripPrefix(r.URL.Path)
|
||||||
|
if err != nil {
|
||||||
|
return status, err
|
||||||
|
}
|
||||||
|
|
||||||
|
dst, status, err := h.stripPrefix(u.Path)
|
||||||
|
if err != nil {
|
||||||
|
return status, err
|
||||||
|
}
|
||||||
|
|
||||||
|
if dst == "" {
|
||||||
|
return http.StatusBadGateway, errInvalidDestination
|
||||||
|
}
|
||||||
|
if dst == src {
|
||||||
|
return http.StatusForbidden, errDestinationEqualsSource
|
||||||
|
}
|
||||||
|
|
||||||
|
ctx := r.Context()
|
||||||
|
|
||||||
|
if r.Method == "COPY" {
|
||||||
|
// Section 7.5.1 says that a COPY only needs to lock the destination,
|
||||||
|
// not both destination and source. Strictly speaking, this is racy,
|
||||||
|
// even though a COPY doesn't modify the source, if a concurrent
|
||||||
|
// operation modifies the source. However, the litmus test explicitly
|
||||||
|
// checks that COPYing a locked-by-another source is OK.
|
||||||
|
release, status, err := h.confirmLocks(r, "", dst)
|
||||||
|
if err != nil {
|
||||||
|
return status, err
|
||||||
|
}
|
||||||
|
defer release()
|
||||||
|
|
||||||
|
// Section 9.8.3 says that "The COPY method on a collection without a Depth
|
||||||
|
// header must act as if a Depth header with value "infinity" was included".
|
||||||
|
depth := infiniteDepth
|
||||||
|
if hdr := r.Header.Get("Depth"); hdr != "" {
|
||||||
|
depth = parseDepth(hdr)
|
||||||
|
if depth != 0 && depth != infiniteDepth {
|
||||||
|
// Section 9.8.3 says that "A client may submit a Depth header on a
|
||||||
|
// COPY on a collection with a value of "0" or "infinity"."
|
||||||
|
return http.StatusBadRequest, errInvalidDepth
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return copyFiles(ctx, h.FileSystem, src, dst, r.Header.Get("Overwrite") != "F", depth, 0)
|
||||||
|
}
|
||||||
|
|
||||||
|
release, status, err := h.confirmLocks(r, src, dst)
|
||||||
|
if err != nil {
|
||||||
|
return status, err
|
||||||
|
}
|
||||||
|
defer release()
|
||||||
|
|
||||||
|
// Section 9.9.2 says that "The MOVE method on a collection must act as if
|
||||||
|
// a "Depth: infinity" header was used on it. A client must not submit a
|
||||||
|
// Depth header on a MOVE on a collection with any value but "infinity"."
|
||||||
|
if hdr := r.Header.Get("Depth"); hdr != "" {
|
||||||
|
if parseDepth(hdr) != infiniteDepth {
|
||||||
|
return http.StatusBadRequest, errInvalidDepth
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return moveFiles(ctx, h.FileSystem, src, dst, r.Header.Get("Overwrite") == "T")
|
||||||
|
}
|
||||||
|
|
||||||
|
func (h *Handler) handleLock(w http.ResponseWriter, r *http.Request) (retStatus int, retErr error) {
|
||||||
|
duration, err := parseTimeout(r.Header.Get("Timeout"))
|
||||||
|
if err != nil {
|
||||||
|
return http.StatusBadRequest, err
|
||||||
|
}
|
||||||
|
li, status, err := readLockInfo(r.Body)
|
||||||
|
if err != nil {
|
||||||
|
return status, err
|
||||||
|
}
|
||||||
|
|
||||||
|
ctx := r.Context()
|
||||||
|
token, ld, now, created := "", LockDetails{}, time.Now(), false
|
||||||
|
if li == (lockInfo{}) {
|
||||||
|
// An empty lockInfo means to refresh the lock.
|
||||||
|
ih, ok := parseIfHeader(r.Header.Get("If"))
|
||||||
|
if !ok {
|
||||||
|
return http.StatusBadRequest, errInvalidIfHeader
|
||||||
|
}
|
||||||
|
if len(ih.lists) == 1 && len(ih.lists[0].conditions) == 1 {
|
||||||
|
token = ih.lists[0].conditions[0].Token
|
||||||
|
}
|
||||||
|
if token == "" {
|
||||||
|
return http.StatusBadRequest, errInvalidLockToken
|
||||||
|
}
|
||||||
|
ld, err = h.LockSystem.Refresh(now, token, duration)
|
||||||
|
if err != nil {
|
||||||
|
if err == ErrNoSuchLock {
|
||||||
|
return http.StatusPreconditionFailed, err
|
||||||
|
}
|
||||||
|
return http.StatusInternalServerError, err
|
||||||
|
}
|
||||||
|
|
||||||
|
} else {
|
||||||
|
// Section 9.10.3 says that "If no Depth header is submitted on a LOCK request,
|
||||||
|
// then the request MUST act as if a "Depth:infinity" had been submitted."
|
||||||
|
depth := infiniteDepth
|
||||||
|
if hdr := r.Header.Get("Depth"); hdr != "" {
|
||||||
|
depth = parseDepth(hdr)
|
||||||
|
if depth != 0 && depth != infiniteDepth {
|
||||||
|
// Section 9.10.3 says that "Values other than 0 or infinity must not be
|
||||||
|
// used with the Depth header on a LOCK method".
|
||||||
|
return http.StatusBadRequest, errInvalidDepth
|
||||||
|
}
|
||||||
|
}
|
||||||
|
reqPath, status, err := h.stripPrefix(r.URL.Path)
|
||||||
|
if err != nil {
|
||||||
|
return status, err
|
||||||
|
}
|
||||||
|
ld = LockDetails{
|
||||||
|
Root: reqPath,
|
||||||
|
Duration: duration,
|
||||||
|
OwnerXML: li.Owner.InnerXML,
|
||||||
|
ZeroDepth: depth == 0,
|
||||||
|
}
|
||||||
|
token, err = h.LockSystem.Create(now, ld)
|
||||||
|
if err != nil {
|
||||||
|
if err == ErrLocked {
|
||||||
|
return StatusLocked, err
|
||||||
|
}
|
||||||
|
return http.StatusInternalServerError, err
|
||||||
|
}
|
||||||
|
defer func() {
|
||||||
|
if retErr != nil {
|
||||||
|
h.LockSystem.Unlock(now, token)
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
|
// Create the resource if it didn't previously exist.
|
||||||
|
if _, err := h.FileSystem.Stat(ctx, reqPath); err != nil {
|
||||||
|
f, err := h.FileSystem.OpenFile(ctx, reqPath, os.O_RDWR|os.O_CREATE|os.O_TRUNC, 0666)
|
||||||
|
if err != nil {
|
||||||
|
// TODO: detect missing intermediate dirs and return http.StatusConflict?
|
||||||
|
return http.StatusInternalServerError, err
|
||||||
|
}
|
||||||
|
f.Close()
|
||||||
|
created = true
|
||||||
|
}
|
||||||
|
|
||||||
|
// http://www.webdav.org/specs/rfc4918.html#HEADER_Lock-Token says that the
|
||||||
|
// Lock-Token value is a Coded-URL. We add angle brackets.
|
||||||
|
w.Header().Set("Lock-Token", "<"+token+">")
|
||||||
|
}
|
||||||
|
|
||||||
|
w.Header().Set("Content-Type", "application/xml; charset=utf-8")
|
||||||
|
if created {
|
||||||
|
// This is "w.WriteHeader(http.StatusCreated)" and not "return
|
||||||
|
// http.StatusCreated, nil" because we write our own (XML) response to w
|
||||||
|
// and Handler.ServeHTTP would otherwise write "Created".
|
||||||
|
w.WriteHeader(http.StatusCreated)
|
||||||
|
}
|
||||||
|
writeLockInfo(w, token, ld)
|
||||||
|
return 0, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (h *Handler) handleUnlock(w http.ResponseWriter, r *http.Request) (status int, err error) {
|
||||||
|
// http://www.webdav.org/specs/rfc4918.html#HEADER_Lock-Token says that the
|
||||||
|
// Lock-Token value is a Coded-URL. We strip its angle brackets.
|
||||||
|
t := r.Header.Get("Lock-Token")
|
||||||
|
if len(t) < 2 || t[0] != '<' || t[len(t)-1] != '>' {
|
||||||
|
return http.StatusBadRequest, errInvalidLockToken
|
||||||
|
}
|
||||||
|
t = t[1 : len(t)-1]
|
||||||
|
|
||||||
|
switch err = h.LockSystem.Unlock(time.Now(), t); err {
|
||||||
|
case nil:
|
||||||
|
return http.StatusNoContent, err
|
||||||
|
case ErrForbidden:
|
||||||
|
return http.StatusForbidden, err
|
||||||
|
case ErrLocked:
|
||||||
|
return StatusLocked, err
|
||||||
|
case ErrNoSuchLock:
|
||||||
|
return http.StatusConflict, err
|
||||||
|
default:
|
||||||
|
return http.StatusInternalServerError, err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (h *Handler) handlePropfind(w http.ResponseWriter, r *http.Request) (status int, err error) {
|
||||||
|
reqPath, status, err := h.stripPrefix(r.URL.Path)
|
||||||
|
if err != nil {
|
||||||
|
return status, err
|
||||||
|
}
|
||||||
|
ctx := r.Context()
|
||||||
|
fi, err := h.FileSystem.Stat(ctx, reqPath)
|
||||||
|
if err != nil {
|
||||||
|
if os.IsNotExist(err) {
|
||||||
|
return http.StatusNotFound, err
|
||||||
|
}
|
||||||
|
return http.StatusMethodNotAllowed, err
|
||||||
|
}
|
||||||
|
depth := infiniteDepth
|
||||||
|
if hdr := r.Header.Get("Depth"); hdr != "" {
|
||||||
|
depth = parseDepth(hdr)
|
||||||
|
if depth == invalidDepth {
|
||||||
|
return http.StatusBadRequest, errInvalidDepth
|
||||||
|
}
|
||||||
|
}
|
||||||
|
pf, status, err := readPropfind(r.Body)
|
||||||
|
if err != nil {
|
||||||
|
return status, err
|
||||||
|
}
|
||||||
|
|
||||||
|
mw := multistatusWriter{w: w}
|
||||||
|
|
||||||
|
walkFn := func(reqPath string, info os.FileInfo, err error) error {
|
||||||
|
if err != nil {
|
||||||
|
return handlePropfindError(err, info)
|
||||||
|
}
|
||||||
|
|
||||||
|
var pstats []Propstat
|
||||||
|
if pf.Propname != nil {
|
||||||
|
pnames, err := propnames(ctx, h.FileSystem, h.LockSystem, reqPath)
|
||||||
|
if err != nil {
|
||||||
|
return handlePropfindError(err, info)
|
||||||
|
}
|
||||||
|
pstat := Propstat{Status: http.StatusOK}
|
||||||
|
for _, xmlname := range pnames {
|
||||||
|
pstat.Props = append(pstat.Props, Property{XMLName: xmlname})
|
||||||
|
}
|
||||||
|
pstats = append(pstats, pstat)
|
||||||
|
} else if pf.Allprop != nil {
|
||||||
|
pstats, err = allprop(ctx, h.FileSystem, h.LockSystem, reqPath, pf.Prop)
|
||||||
|
} else {
|
||||||
|
pstats, err = props(ctx, h.FileSystem, h.LockSystem, reqPath, pf.Prop)
|
||||||
|
}
|
||||||
|
if err != nil {
|
||||||
|
return handlePropfindError(err, info)
|
||||||
|
}
|
||||||
|
href := path.Join(h.Prefix, reqPath)
|
||||||
|
if href != "/" && info.IsDir() {
|
||||||
|
href += "/"
|
||||||
|
}
|
||||||
|
return mw.write(makePropstatResponse(href, pstats))
|
||||||
|
}
|
||||||
|
|
||||||
|
walkErr := walkFS(ctx, h.FileSystem, depth, reqPath, fi, walkFn)
|
||||||
|
closeErr := mw.close()
|
||||||
|
if walkErr != nil {
|
||||||
|
return http.StatusInternalServerError, walkErr
|
||||||
|
}
|
||||||
|
if closeErr != nil {
|
||||||
|
return http.StatusInternalServerError, closeErr
|
||||||
|
}
|
||||||
|
return 0, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (h *Handler) handleProppatch(w http.ResponseWriter, r *http.Request) (status int, err error) {
|
||||||
|
reqPath, status, err := h.stripPrefix(r.URL.Path)
|
||||||
|
if err != nil {
|
||||||
|
return status, err
|
||||||
|
}
|
||||||
|
release, status, err := h.confirmLocks(r, reqPath, "")
|
||||||
|
if err != nil {
|
||||||
|
return status, err
|
||||||
|
}
|
||||||
|
defer release()
|
||||||
|
|
||||||
|
ctx := r.Context()
|
||||||
|
|
||||||
|
if _, err := h.FileSystem.Stat(ctx, reqPath); err != nil {
|
||||||
|
if os.IsNotExist(err) {
|
||||||
|
return http.StatusNotFound, err
|
||||||
|
}
|
||||||
|
return http.StatusMethodNotAllowed, err
|
||||||
|
}
|
||||||
|
patches, status, err := readProppatch(r.Body)
|
||||||
|
if err != nil {
|
||||||
|
return status, err
|
||||||
|
}
|
||||||
|
pstats, err := patch(ctx, h.FileSystem, h.LockSystem, reqPath, patches)
|
||||||
|
if err != nil {
|
||||||
|
return http.StatusInternalServerError, err
|
||||||
|
}
|
||||||
|
mw := multistatusWriter{w: w}
|
||||||
|
writeErr := mw.write(makePropstatResponse(r.URL.Path, pstats))
|
||||||
|
closeErr := mw.close()
|
||||||
|
if writeErr != nil {
|
||||||
|
return http.StatusInternalServerError, writeErr
|
||||||
|
}
|
||||||
|
if closeErr != nil {
|
||||||
|
return http.StatusInternalServerError, closeErr
|
||||||
|
}
|
||||||
|
return 0, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func makePropstatResponse(href string, pstats []Propstat) *response {
|
||||||
|
resp := response{
|
||||||
|
Href: []string{(&url.URL{Path: href}).EscapedPath()},
|
||||||
|
Propstat: make([]propstat, 0, len(pstats)),
|
||||||
|
}
|
||||||
|
for _, p := range pstats {
|
||||||
|
var xmlErr *xmlError
|
||||||
|
if p.XMLError != "" {
|
||||||
|
xmlErr = &xmlError{InnerXML: []byte(p.XMLError)}
|
||||||
|
}
|
||||||
|
resp.Propstat = append(resp.Propstat, propstat{
|
||||||
|
Status: fmt.Sprintf("HTTP/1.1 %d %s", p.Status, StatusText(p.Status)),
|
||||||
|
Prop: p.Props,
|
||||||
|
ResponseDescription: p.ResponseDescription,
|
||||||
|
Error: xmlErr,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
return &resp
|
||||||
|
}
|
||||||
|
|
||||||
|
func handlePropfindError(err error, info os.FileInfo) error {
|
||||||
|
var skipResp error = nil
|
||||||
|
if info != nil && info.IsDir() {
|
||||||
|
skipResp = filepath.SkipDir
|
||||||
|
}
|
||||||
|
|
||||||
|
if errors.Is(err, os.ErrPermission) {
|
||||||
|
// If the server cannot recurse into a directory because it is not allowed,
|
||||||
|
// then there is nothing more to say about it. Just skip sending anything.
|
||||||
|
return skipResp
|
||||||
|
}
|
||||||
|
|
||||||
|
if _, ok := err.(*os.PathError); ok {
|
||||||
|
// If the file is just bad, it couldn't be a proper WebDAV resource. Skip it.
|
||||||
|
return skipResp
|
||||||
|
}
|
||||||
|
|
||||||
|
// We need to be careful with other errors: there is no way to abort the xml stream
|
||||||
|
// part way through while returning a valid PROPFIND response. Returning only half
|
||||||
|
// the data would be misleading, but so would be returning results tainted by errors.
|
||||||
|
// The current behaviour by returning an error here leads to the stream being aborted,
|
||||||
|
// and the parent http server complaining about writing a spurious header. We should
|
||||||
|
// consider further enhancing this error handling to more gracefully fail, or perhaps
|
||||||
|
// buffer the entire response until we've walked the tree.
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
const (
|
||||||
|
infiniteDepth = -1
|
||||||
|
invalidDepth = -2
|
||||||
|
)
|
||||||
|
|
||||||
|
// parseDepth maps the strings "0", "1" and "infinity" to 0, 1 and
|
||||||
|
// infiniteDepth. Parsing any other string returns invalidDepth.
|
||||||
|
//
|
||||||
|
// Different WebDAV methods have further constraints on valid depths:
|
||||||
|
// - PROPFIND has no further restrictions, as per section 9.1.
|
||||||
|
// - COPY accepts only "0" or "infinity", as per section 9.8.3.
|
||||||
|
// - MOVE accepts only "infinity", as per section 9.9.2.
|
||||||
|
// - LOCK accepts only "0" or "infinity", as per section 9.10.3.
|
||||||
|
//
|
||||||
|
// These constraints are enforced by the handleXxx methods.
|
||||||
|
func parseDepth(s string) int {
|
||||||
|
switch s {
|
||||||
|
case "0":
|
||||||
|
return 0
|
||||||
|
case "1":
|
||||||
|
return 1
|
||||||
|
case "infinity":
|
||||||
|
return infiniteDepth
|
||||||
|
}
|
||||||
|
return invalidDepth
|
||||||
|
}
|
||||||
|
|
||||||
|
// http://www.webdav.org/specs/rfc4918.html#status.code.extensions.to.http11
|
||||||
|
const (
|
||||||
|
StatusMulti = 207
|
||||||
|
StatusUnprocessableEntity = 422
|
||||||
|
StatusLocked = 423
|
||||||
|
StatusFailedDependency = 424
|
||||||
|
StatusInsufficientStorage = 507
|
||||||
|
)
|
||||||
|
|
||||||
|
func StatusText(code int) string {
|
||||||
|
switch code {
|
||||||
|
case StatusMulti:
|
||||||
|
return "Multi-Status"
|
||||||
|
case StatusUnprocessableEntity:
|
||||||
|
return "Unprocessable Entity"
|
||||||
|
case StatusLocked:
|
||||||
|
return "Locked"
|
||||||
|
case StatusFailedDependency:
|
||||||
|
return "Failed Dependency"
|
||||||
|
case StatusInsufficientStorage:
|
||||||
|
return "Insufficient Storage"
|
||||||
|
}
|
||||||
|
return http.StatusText(code)
|
||||||
|
}
|
||||||
|
|
||||||
|
var (
|
||||||
|
errDestinationEqualsSource = errors.New("webdav: destination equals source")
|
||||||
|
errDirectoryNotEmpty = errors.New("webdav: directory not empty")
|
||||||
|
errInvalidDepth = errors.New("webdav: invalid depth")
|
||||||
|
errInvalidDestination = errors.New("webdav: invalid destination")
|
||||||
|
errInvalidIfHeader = errors.New("webdav: invalid If header")
|
||||||
|
errInvalidLockInfo = errors.New("webdav: invalid lock info")
|
||||||
|
errInvalidLockToken = errors.New("webdav: invalid lock token")
|
||||||
|
errInvalidPropfind = errors.New("webdav: invalid propfind")
|
||||||
|
errInvalidProppatch = errors.New("webdav: invalid proppatch")
|
||||||
|
errInvalidResponse = errors.New("webdav: invalid response")
|
||||||
|
errInvalidTimeout = errors.New("webdav: invalid timeout")
|
||||||
|
errNoFileSystem = errors.New("webdav: no file system")
|
||||||
|
errNoLockSystem = errors.New("webdav: no lock system")
|
||||||
|
errNotADirectory = errors.New("webdav: not a directory")
|
||||||
|
errPrefixMismatch = errors.New("webdav: prefix mismatch")
|
||||||
|
errRecursionTooDeep = errors.New("webdav: recursion too deep")
|
||||||
|
errUnsupportedLockInfo = errors.New("webdav: unsupported lock info")
|
||||||
|
errUnsupportedMethod = errors.New("webdav: unsupported method")
|
||||||
|
)
|
349
drives/davServer/webdav_test.go
Normal file
349
drives/davServer/webdav_test.go
Normal file
@ -0,0 +1,349 @@
|
|||||||
|
// Copyright 2015 The Go Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a BSD-style
|
||||||
|
// license that can be found in the LICENSE file.
|
||||||
|
|
||||||
|
package davServer
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"errors"
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
"io/ioutil"
|
||||||
|
"net/http"
|
||||||
|
"net/http/httptest"
|
||||||
|
"net/url"
|
||||||
|
"os"
|
||||||
|
"reflect"
|
||||||
|
"regexp"
|
||||||
|
"sort"
|
||||||
|
"strings"
|
||||||
|
"testing"
|
||||||
|
)
|
||||||
|
|
||||||
|
// TODO: add tests to check XML responses with the expected prefix path
|
||||||
|
func TestPrefix(t *testing.T) {
|
||||||
|
const dst, blah = "Destination", "blah blah blah"
|
||||||
|
|
||||||
|
// createLockBody comes from the example in Section 9.10.7.
|
||||||
|
const createLockBody = `<?xml version="1.0" encoding="utf-8" ?>
|
||||||
|
<D:lockinfo xmlns:D='DAV:'>
|
||||||
|
<D:lockscope><D:exclusive/></D:lockscope>
|
||||||
|
<D:locktype><D:write/></D:locktype>
|
||||||
|
<D:owner>
|
||||||
|
<D:href>http://example.org/~ejw/contact.html</D:href>
|
||||||
|
</D:owner>
|
||||||
|
</D:lockinfo>
|
||||||
|
`
|
||||||
|
|
||||||
|
do := func(method, urlStr string, body string, wantStatusCode int, headers ...string) (http.Header, error) {
|
||||||
|
var bodyReader io.Reader
|
||||||
|
if body != "" {
|
||||||
|
bodyReader = strings.NewReader(body)
|
||||||
|
}
|
||||||
|
req, err := http.NewRequest(method, urlStr, bodyReader)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
for len(headers) >= 2 {
|
||||||
|
req.Header.Add(headers[0], headers[1])
|
||||||
|
headers = headers[2:]
|
||||||
|
}
|
||||||
|
res, err := http.DefaultTransport.RoundTrip(req)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
defer res.Body.Close()
|
||||||
|
if res.StatusCode != wantStatusCode {
|
||||||
|
return nil, fmt.Errorf("got status code %d, want %d", res.StatusCode, wantStatusCode)
|
||||||
|
}
|
||||||
|
return res.Header, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
prefixes := []string{
|
||||||
|
"/",
|
||||||
|
"/a/",
|
||||||
|
"/a/b/",
|
||||||
|
"/a/b/c/",
|
||||||
|
}
|
||||||
|
ctx := context.Background()
|
||||||
|
for _, prefix := range prefixes {
|
||||||
|
fs := NewMemFS()
|
||||||
|
h := &Handler{
|
||||||
|
FileSystem: fs,
|
||||||
|
LockSystem: NewMemLS(),
|
||||||
|
}
|
||||||
|
mux := http.NewServeMux()
|
||||||
|
if prefix != "/" {
|
||||||
|
h.Prefix = prefix
|
||||||
|
}
|
||||||
|
mux.Handle(prefix, h)
|
||||||
|
srv := httptest.NewServer(mux)
|
||||||
|
defer srv.Close()
|
||||||
|
|
||||||
|
// The script is:
|
||||||
|
// MKCOL /a
|
||||||
|
// MKCOL /a/b
|
||||||
|
// PUT /a/b/c
|
||||||
|
// COPY /a/b/c /a/b/d
|
||||||
|
// MKCOL /a/b/e
|
||||||
|
// MOVE /a/b/d /a/b/e/f
|
||||||
|
// LOCK /a/b/e/g
|
||||||
|
// PUT /a/b/e/g
|
||||||
|
// which should yield the (possibly stripped) filenames /a/b/c,
|
||||||
|
// /a/b/e/f and /a/b/e/g, plus their parent directories.
|
||||||
|
|
||||||
|
wantA := map[string]int{
|
||||||
|
"/": http.StatusCreated,
|
||||||
|
"/a/": http.StatusMovedPermanently,
|
||||||
|
"/a/b/": http.StatusNotFound,
|
||||||
|
"/a/b/c/": http.StatusNotFound,
|
||||||
|
}[prefix]
|
||||||
|
if _, err := do("MKCOL", srv.URL+"/a", "", wantA); err != nil {
|
||||||
|
t.Errorf("prefix=%-9q MKCOL /a: %v", prefix, err)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
wantB := map[string]int{
|
||||||
|
"/": http.StatusCreated,
|
||||||
|
"/a/": http.StatusCreated,
|
||||||
|
"/a/b/": http.StatusMovedPermanently,
|
||||||
|
"/a/b/c/": http.StatusNotFound,
|
||||||
|
}[prefix]
|
||||||
|
if _, err := do("MKCOL", srv.URL+"/a/b", "", wantB); err != nil {
|
||||||
|
t.Errorf("prefix=%-9q MKCOL /a/b: %v", prefix, err)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
wantC := map[string]int{
|
||||||
|
"/": http.StatusCreated,
|
||||||
|
"/a/": http.StatusCreated,
|
||||||
|
"/a/b/": http.StatusCreated,
|
||||||
|
"/a/b/c/": http.StatusMovedPermanently,
|
||||||
|
}[prefix]
|
||||||
|
if _, err := do("PUT", srv.URL+"/a/b/c", blah, wantC); err != nil {
|
||||||
|
t.Errorf("prefix=%-9q PUT /a/b/c: %v", prefix, err)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
wantD := map[string]int{
|
||||||
|
"/": http.StatusCreated,
|
||||||
|
"/a/": http.StatusCreated,
|
||||||
|
"/a/b/": http.StatusCreated,
|
||||||
|
"/a/b/c/": http.StatusMovedPermanently,
|
||||||
|
}[prefix]
|
||||||
|
if _, err := do("COPY", srv.URL+"/a/b/c", "", wantD, dst, srv.URL+"/a/b/d"); err != nil {
|
||||||
|
t.Errorf("prefix=%-9q COPY /a/b/c /a/b/d: %v", prefix, err)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
wantE := map[string]int{
|
||||||
|
"/": http.StatusCreated,
|
||||||
|
"/a/": http.StatusCreated,
|
||||||
|
"/a/b/": http.StatusCreated,
|
||||||
|
"/a/b/c/": http.StatusNotFound,
|
||||||
|
}[prefix]
|
||||||
|
if _, err := do("MKCOL", srv.URL+"/a/b/e", "", wantE); err != nil {
|
||||||
|
t.Errorf("prefix=%-9q MKCOL /a/b/e: %v", prefix, err)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
wantF := map[string]int{
|
||||||
|
"/": http.StatusCreated,
|
||||||
|
"/a/": http.StatusCreated,
|
||||||
|
"/a/b/": http.StatusCreated,
|
||||||
|
"/a/b/c/": http.StatusNotFound,
|
||||||
|
}[prefix]
|
||||||
|
if _, err := do("MOVE", srv.URL+"/a/b/d", "", wantF, dst, srv.URL+"/a/b/e/f"); err != nil {
|
||||||
|
t.Errorf("prefix=%-9q MOVE /a/b/d /a/b/e/f: %v", prefix, err)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
var lockToken string
|
||||||
|
wantG := map[string]int{
|
||||||
|
"/": http.StatusCreated,
|
||||||
|
"/a/": http.StatusCreated,
|
||||||
|
"/a/b/": http.StatusCreated,
|
||||||
|
"/a/b/c/": http.StatusNotFound,
|
||||||
|
}[prefix]
|
||||||
|
if h, err := do("LOCK", srv.URL+"/a/b/e/g", createLockBody, wantG); err != nil {
|
||||||
|
t.Errorf("prefix=%-9q LOCK /a/b/e/g: %v", prefix, err)
|
||||||
|
continue
|
||||||
|
} else {
|
||||||
|
lockToken = h.Get("Lock-Token")
|
||||||
|
}
|
||||||
|
|
||||||
|
ifHeader := fmt.Sprintf("<%s/a/b/e/g> (%s)", srv.URL, lockToken)
|
||||||
|
wantH := map[string]int{
|
||||||
|
"/": http.StatusCreated,
|
||||||
|
"/a/": http.StatusCreated,
|
||||||
|
"/a/b/": http.StatusCreated,
|
||||||
|
"/a/b/c/": http.StatusNotFound,
|
||||||
|
}[prefix]
|
||||||
|
if _, err := do("PUT", srv.URL+"/a/b/e/g", blah, wantH, "If", ifHeader); err != nil {
|
||||||
|
t.Errorf("prefix=%-9q PUT /a/b/e/g: %v", prefix, err)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
got, err := find(ctx, nil, fs, "/")
|
||||||
|
if err != nil {
|
||||||
|
t.Errorf("prefix=%-9q find: %v", prefix, err)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
sort.Strings(got)
|
||||||
|
want := map[string][]string{
|
||||||
|
"/": {"/", "/a", "/a/b", "/a/b/c", "/a/b/e", "/a/b/e/f", "/a/b/e/g"},
|
||||||
|
"/a/": {"/", "/b", "/b/c", "/b/e", "/b/e/f", "/b/e/g"},
|
||||||
|
"/a/b/": {"/", "/c", "/e", "/e/f", "/e/g"},
|
||||||
|
"/a/b/c/": {"/"},
|
||||||
|
}[prefix]
|
||||||
|
if !reflect.DeepEqual(got, want) {
|
||||||
|
t.Errorf("prefix=%-9q find:\ngot %v\nwant %v", prefix, got, want)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestEscapeXML(t *testing.T) {
|
||||||
|
// These test cases aren't exhaustive, and there is more than one way to
|
||||||
|
// escape e.g. a quot (as """ or """) or an apos. We presume that
|
||||||
|
// the encoding/xml package tests xml.EscapeText more thoroughly. This test
|
||||||
|
// here is just a sanity check for this package's escapeXML function, and
|
||||||
|
// its attempt to provide a fast path (and avoid a bytes.Buffer allocation)
|
||||||
|
// when escaping filenames is obviously a no-op.
|
||||||
|
testCases := map[string]string{
|
||||||
|
"": "",
|
||||||
|
" ": " ",
|
||||||
|
"&": "&",
|
||||||
|
"*": "*",
|
||||||
|
"+": "+",
|
||||||
|
",": ",",
|
||||||
|
"-": "-",
|
||||||
|
".": ".",
|
||||||
|
"/": "/",
|
||||||
|
"0": "0",
|
||||||
|
"9": "9",
|
||||||
|
":": ":",
|
||||||
|
"<": "<",
|
||||||
|
">": ">",
|
||||||
|
"A": "A",
|
||||||
|
"_": "_",
|
||||||
|
"a": "a",
|
||||||
|
"~": "~",
|
||||||
|
"\u0201": "\u0201",
|
||||||
|
"&": "&amp;",
|
||||||
|
"foo&<b/ar>baz": "foo&<b/ar>baz",
|
||||||
|
}
|
||||||
|
|
||||||
|
for in, want := range testCases {
|
||||||
|
if got := escapeXML(in); got != want {
|
||||||
|
t.Errorf("in=%q: got %q, want %q", in, got, want)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestFilenameEscape(t *testing.T) {
|
||||||
|
hrefRe := regexp.MustCompile(`<D:href>([^<]*)</D:href>`)
|
||||||
|
displayNameRe := regexp.MustCompile(`<D:displayname>([^<]*)</D:displayname>`)
|
||||||
|
do := func(method, urlStr string) (string, string, error) {
|
||||||
|
req, err := http.NewRequest(method, urlStr, nil)
|
||||||
|
if err != nil {
|
||||||
|
return "", "", err
|
||||||
|
}
|
||||||
|
res, err := http.DefaultClient.Do(req)
|
||||||
|
if err != nil {
|
||||||
|
return "", "", err
|
||||||
|
}
|
||||||
|
defer res.Body.Close()
|
||||||
|
|
||||||
|
b, err := ioutil.ReadAll(res.Body)
|
||||||
|
if err != nil {
|
||||||
|
return "", "", err
|
||||||
|
}
|
||||||
|
hrefMatch := hrefRe.FindStringSubmatch(string(b))
|
||||||
|
if len(hrefMatch) != 2 {
|
||||||
|
return "", "", errors.New("D:href not found")
|
||||||
|
}
|
||||||
|
displayNameMatch := displayNameRe.FindStringSubmatch(string(b))
|
||||||
|
if len(displayNameMatch) != 2 {
|
||||||
|
return "", "", errors.New("D:displayname not found")
|
||||||
|
}
|
||||||
|
|
||||||
|
return hrefMatch[1], displayNameMatch[1], nil
|
||||||
|
}
|
||||||
|
|
||||||
|
testCases := []struct {
|
||||||
|
name, wantHref, wantDisplayName string
|
||||||
|
}{{
|
||||||
|
name: `/foo%bar`,
|
||||||
|
wantHref: `/foo%25bar`,
|
||||||
|
wantDisplayName: `foo%bar`,
|
||||||
|
}, {
|
||||||
|
name: `/こんにちわ世界`,
|
||||||
|
wantHref: `/%E3%81%93%E3%82%93%E3%81%AB%E3%81%A1%E3%82%8F%E4%B8%96%E7%95%8C`,
|
||||||
|
wantDisplayName: `こんにちわ世界`,
|
||||||
|
}, {
|
||||||
|
name: `/Program Files/`,
|
||||||
|
wantHref: `/Program%20Files/`,
|
||||||
|
wantDisplayName: `Program Files`,
|
||||||
|
}, {
|
||||||
|
name: `/go+lang`,
|
||||||
|
wantHref: `/go+lang`,
|
||||||
|
wantDisplayName: `go+lang`,
|
||||||
|
}, {
|
||||||
|
name: `/go&lang`,
|
||||||
|
wantHref: `/go&lang`,
|
||||||
|
wantDisplayName: `go&lang`,
|
||||||
|
}, {
|
||||||
|
name: `/go<lang`,
|
||||||
|
wantHref: `/go%3Clang`,
|
||||||
|
wantDisplayName: `go<lang`,
|
||||||
|
}, {
|
||||||
|
name: `/`,
|
||||||
|
wantHref: `/`,
|
||||||
|
wantDisplayName: ``,
|
||||||
|
}}
|
||||||
|
ctx := context.Background()
|
||||||
|
fs := NewMemFS()
|
||||||
|
for _, tc := range testCases {
|
||||||
|
if tc.name != "/" {
|
||||||
|
if strings.HasSuffix(tc.name, "/") {
|
||||||
|
if err := fs.Mkdir(ctx, tc.name, 0755); err != nil {
|
||||||
|
t.Fatalf("name=%q: Mkdir: %v", tc.name, err)
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
f, err := fs.OpenFile(ctx, tc.name, os.O_CREATE, 0644)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("name=%q: OpenFile: %v", tc.name, err)
|
||||||
|
}
|
||||||
|
f.Close()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
srv := httptest.NewServer(&Handler{
|
||||||
|
FileSystem: fs,
|
||||||
|
LockSystem: NewMemLS(),
|
||||||
|
})
|
||||||
|
defer srv.Close()
|
||||||
|
|
||||||
|
u, err := url.Parse(srv.URL)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, tc := range testCases {
|
||||||
|
u.Path = tc.name
|
||||||
|
gotHref, gotDisplayName, err := do("PROPFIND", u.String())
|
||||||
|
if err != nil {
|
||||||
|
t.Errorf("name=%q: PROPFIND: %v", tc.name, err)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
if gotHref != tc.wantHref {
|
||||||
|
t.Errorf("name=%q: got href %q, want %q", tc.name, gotHref, tc.wantHref)
|
||||||
|
}
|
||||||
|
if gotDisplayName != tc.wantDisplayName {
|
||||||
|
t.Errorf("name=%q: got dispayname %q, want %q", tc.name, gotDisplayName, tc.wantDisplayName)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
519
drives/davServer/xml.go
Normal file
519
drives/davServer/xml.go
Normal file
@ -0,0 +1,519 @@
|
|||||||
|
// Copyright 2014 The Go Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a BSD-style
|
||||||
|
// license that can be found in the LICENSE file.
|
||||||
|
|
||||||
|
package davServer
|
||||||
|
|
||||||
|
// The XML encoding is covered by Section 14.
|
||||||
|
// http://www.webdav.org/specs/rfc4918.html#xml.element.definitions
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"encoding/xml"
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
"net/http"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
// As of https://go-review.googlesource.com/#/c/12772/ which was submitted
|
||||||
|
// in July 2015, this package uses an internal fork of the standard
|
||||||
|
// library's encoding/xml package, due to changes in the way namespaces
|
||||||
|
// were encoded. Such changes were introduced in the Go 1.5 cycle, but were
|
||||||
|
// rolled back in response to https://github.com/golang/go/issues/11841
|
||||||
|
//
|
||||||
|
// However, this package's exported API, specifically the Property and
|
||||||
|
// DeadPropsHolder types, need to refer to the standard library's version
|
||||||
|
// of the xml.Name type, as code that imports this package cannot refer to
|
||||||
|
// the internal version.
|
||||||
|
//
|
||||||
|
// This file therefore imports both the internal and external versions, as
|
||||||
|
// ixml and xml, and converts between them.
|
||||||
|
//
|
||||||
|
// In the long term, this package should use the standard library's version
|
||||||
|
// only, and the internal fork deleted, once
|
||||||
|
// https://github.com/golang/go/issues/13400 is resolved.
|
||||||
|
ixml "github.com/openziti/zrok/drives/davServer/internal/xml"
|
||||||
|
)
|
||||||
|
|
||||||
|
// http://www.webdav.org/specs/rfc4918.html#ELEMENT_lockinfo
|
||||||
|
type lockInfo struct {
|
||||||
|
XMLName ixml.Name `xml:"lockinfo"`
|
||||||
|
Exclusive *struct{} `xml:"lockscope>exclusive"`
|
||||||
|
Shared *struct{} `xml:"lockscope>shared"`
|
||||||
|
Write *struct{} `xml:"locktype>write"`
|
||||||
|
Owner owner `xml:"owner"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// http://www.webdav.org/specs/rfc4918.html#ELEMENT_owner
|
||||||
|
type owner struct {
|
||||||
|
InnerXML string `xml:",innerxml"`
|
||||||
|
}
|
||||||
|
|
||||||
|
func readLockInfo(r io.Reader) (li lockInfo, status int, err error) {
|
||||||
|
c := &countingReader{r: r}
|
||||||
|
if err = ixml.NewDecoder(c).Decode(&li); err != nil {
|
||||||
|
if err == io.EOF {
|
||||||
|
if c.n == 0 {
|
||||||
|
// An empty body means to refresh the lock.
|
||||||
|
// http://www.webdav.org/specs/rfc4918.html#refreshing-locks
|
||||||
|
return lockInfo{}, 0, nil
|
||||||
|
}
|
||||||
|
err = errInvalidLockInfo
|
||||||
|
}
|
||||||
|
return lockInfo{}, http.StatusBadRequest, err
|
||||||
|
}
|
||||||
|
// We only support exclusive (non-shared) write locks. In practice, these are
|
||||||
|
// the only types of locks that seem to matter.
|
||||||
|
if li.Exclusive == nil || li.Shared != nil || li.Write == nil {
|
||||||
|
return lockInfo{}, http.StatusNotImplemented, errUnsupportedLockInfo
|
||||||
|
}
|
||||||
|
return li, 0, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
type countingReader struct {
|
||||||
|
n int
|
||||||
|
r io.Reader
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *countingReader) Read(p []byte) (int, error) {
|
||||||
|
n, err := c.r.Read(p)
|
||||||
|
c.n += n
|
||||||
|
return n, err
|
||||||
|
}
|
||||||
|
|
||||||
|
func writeLockInfo(w io.Writer, token string, ld LockDetails) (int, error) {
|
||||||
|
depth := "infinity"
|
||||||
|
if ld.ZeroDepth {
|
||||||
|
depth = "0"
|
||||||
|
}
|
||||||
|
timeout := ld.Duration / time.Second
|
||||||
|
return fmt.Fprintf(w, "<?xml version=\"1.0\" encoding=\"utf-8\"?>\n"+
|
||||||
|
"<D:prop xmlns:D=\"DAV:\"><D:lockdiscovery><D:activelock>\n"+
|
||||||
|
" <D:locktype><D:write/></D:locktype>\n"+
|
||||||
|
" <D:lockscope><D:exclusive/></D:lockscope>\n"+
|
||||||
|
" <D:depth>%s</D:depth>\n"+
|
||||||
|
" <D:owner>%s</D:owner>\n"+
|
||||||
|
" <D:timeout>Second-%d</D:timeout>\n"+
|
||||||
|
" <D:locktoken><D:href>%s</D:href></D:locktoken>\n"+
|
||||||
|
" <D:lockroot><D:href>%s</D:href></D:lockroot>\n"+
|
||||||
|
"</D:activelock></D:lockdiscovery></D:prop>",
|
||||||
|
depth, ld.OwnerXML, timeout, escape(token), escape(ld.Root),
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
||||||
|
func escape(s string) string {
|
||||||
|
for i := 0; i < len(s); i++ {
|
||||||
|
switch s[i] {
|
||||||
|
case '"', '&', '\'', '<', '>':
|
||||||
|
b := bytes.NewBuffer(nil)
|
||||||
|
ixml.EscapeText(b, []byte(s))
|
||||||
|
return b.String()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// next returns the next token, if any, in the XML stream of d.
|
||||||
|
// RFC 4918 requires to ignore comments, processing instructions
|
||||||
|
// and directives.
|
||||||
|
// http://www.webdav.org/specs/rfc4918.html#property_values
|
||||||
|
// http://www.webdav.org/specs/rfc4918.html#xml-extensibility
|
||||||
|
func next(d *ixml.Decoder) (ixml.Token, error) {
|
||||||
|
for {
|
||||||
|
t, err := d.Token()
|
||||||
|
if err != nil {
|
||||||
|
return t, err
|
||||||
|
}
|
||||||
|
switch t.(type) {
|
||||||
|
case ixml.Comment, ixml.Directive, ixml.ProcInst:
|
||||||
|
continue
|
||||||
|
default:
|
||||||
|
return t, nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// http://www.webdav.org/specs/rfc4918.html#ELEMENT_prop (for propfind)
|
||||||
|
type propfindProps []xml.Name
|
||||||
|
|
||||||
|
// UnmarshalXML appends the property names enclosed within start to pn.
|
||||||
|
//
|
||||||
|
// It returns an error if start does not contain any properties or if
|
||||||
|
// properties contain values. Character data between properties is ignored.
|
||||||
|
func (pn *propfindProps) UnmarshalXML(d *ixml.Decoder, start ixml.StartElement) error {
|
||||||
|
for {
|
||||||
|
t, err := next(d)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
switch t.(type) {
|
||||||
|
case ixml.EndElement:
|
||||||
|
if len(*pn) == 0 {
|
||||||
|
return fmt.Errorf("%s must not be empty", start.Name.Local)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
case ixml.StartElement:
|
||||||
|
name := t.(ixml.StartElement).Name
|
||||||
|
t, err = next(d)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if _, ok := t.(ixml.EndElement); !ok {
|
||||||
|
return fmt.Errorf("unexpected token %T", t)
|
||||||
|
}
|
||||||
|
*pn = append(*pn, xml.Name(name))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// http://www.webdav.org/specs/rfc4918.html#ELEMENT_propfind
|
||||||
|
type propfind struct {
|
||||||
|
XMLName ixml.Name `xml:"DAV: propfind"`
|
||||||
|
Allprop *struct{} `xml:"DAV: allprop"`
|
||||||
|
Propname *struct{} `xml:"DAV: propname"`
|
||||||
|
Prop propfindProps `xml:"DAV: prop"`
|
||||||
|
Include propfindProps `xml:"DAV: include"`
|
||||||
|
}
|
||||||
|
|
||||||
|
func readPropfind(r io.Reader) (pf propfind, status int, err error) {
|
||||||
|
c := countingReader{r: r}
|
||||||
|
if err = ixml.NewDecoder(&c).Decode(&pf); err != nil {
|
||||||
|
if err == io.EOF {
|
||||||
|
if c.n == 0 {
|
||||||
|
// An empty body means to propfind allprop.
|
||||||
|
// http://www.webdav.org/specs/rfc4918.html#METHOD_PROPFIND
|
||||||
|
return propfind{Allprop: new(struct{})}, 0, nil
|
||||||
|
}
|
||||||
|
err = errInvalidPropfind
|
||||||
|
}
|
||||||
|
return propfind{}, http.StatusBadRequest, err
|
||||||
|
}
|
||||||
|
|
||||||
|
if pf.Allprop == nil && pf.Include != nil {
|
||||||
|
return propfind{}, http.StatusBadRequest, errInvalidPropfind
|
||||||
|
}
|
||||||
|
if pf.Allprop != nil && (pf.Prop != nil || pf.Propname != nil) {
|
||||||
|
return propfind{}, http.StatusBadRequest, errInvalidPropfind
|
||||||
|
}
|
||||||
|
if pf.Prop != nil && pf.Propname != nil {
|
||||||
|
return propfind{}, http.StatusBadRequest, errInvalidPropfind
|
||||||
|
}
|
||||||
|
if pf.Propname == nil && pf.Allprop == nil && pf.Prop == nil {
|
||||||
|
return propfind{}, http.StatusBadRequest, errInvalidPropfind
|
||||||
|
}
|
||||||
|
return pf, 0, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Property represents a single DAV resource property as defined in RFC 4918.
|
||||||
|
// See http://www.webdav.org/specs/rfc4918.html#data.model.for.resource.properties
|
||||||
|
type Property struct {
|
||||||
|
// XMLName is the fully qualified name that identifies this property.
|
||||||
|
XMLName xml.Name
|
||||||
|
|
||||||
|
// Lang is an optional xml:lang attribute.
|
||||||
|
Lang string `xml:"xml:lang,attr,omitempty"`
|
||||||
|
|
||||||
|
// InnerXML contains the XML representation of the property value.
|
||||||
|
// See http://www.webdav.org/specs/rfc4918.html#property_values
|
||||||
|
//
|
||||||
|
// Property values of complex type or mixed-content must have fully
|
||||||
|
// expanded XML namespaces or be self-contained with according
|
||||||
|
// XML namespace declarations. They must not rely on any XML
|
||||||
|
// namespace declarations within the scope of the XML document,
|
||||||
|
// even including the DAV: namespace.
|
||||||
|
InnerXML []byte `xml:",innerxml"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// ixmlProperty is the same as the Property type except it holds an ixml.Name
|
||||||
|
// instead of an xml.Name.
|
||||||
|
type ixmlProperty struct {
|
||||||
|
XMLName ixml.Name
|
||||||
|
Lang string `xml:"xml:lang,attr,omitempty"`
|
||||||
|
InnerXML []byte `xml:",innerxml"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// http://www.webdav.org/specs/rfc4918.html#ELEMENT_error
|
||||||
|
// See multistatusWriter for the "D:" namespace prefix.
|
||||||
|
type xmlError struct {
|
||||||
|
XMLName ixml.Name `xml:"D:error"`
|
||||||
|
InnerXML []byte `xml:",innerxml"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// http://www.webdav.org/specs/rfc4918.html#ELEMENT_propstat
|
||||||
|
// See multistatusWriter for the "D:" namespace prefix.
|
||||||
|
type propstat struct {
|
||||||
|
Prop []Property `xml:"D:prop>_ignored_"`
|
||||||
|
Status string `xml:"D:status"`
|
||||||
|
Error *xmlError `xml:"D:error"`
|
||||||
|
ResponseDescription string `xml:"D:responsedescription,omitempty"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// ixmlPropstat is the same as the propstat type except it holds an ixml.Name
|
||||||
|
// instead of an xml.Name.
|
||||||
|
type ixmlPropstat struct {
|
||||||
|
Prop []ixmlProperty `xml:"D:prop>_ignored_"`
|
||||||
|
Status string `xml:"D:status"`
|
||||||
|
Error *xmlError `xml:"D:error"`
|
||||||
|
ResponseDescription string `xml:"D:responsedescription,omitempty"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// MarshalXML prepends the "D:" namespace prefix on properties in the DAV: namespace
|
||||||
|
// before encoding. See multistatusWriter.
|
||||||
|
func (ps propstat) MarshalXML(e *ixml.Encoder, start ixml.StartElement) error {
|
||||||
|
// Convert from a propstat to an ixmlPropstat.
|
||||||
|
ixmlPs := ixmlPropstat{
|
||||||
|
Prop: make([]ixmlProperty, len(ps.Prop)),
|
||||||
|
Status: ps.Status,
|
||||||
|
Error: ps.Error,
|
||||||
|
ResponseDescription: ps.ResponseDescription,
|
||||||
|
}
|
||||||
|
for k, prop := range ps.Prop {
|
||||||
|
ixmlPs.Prop[k] = ixmlProperty{
|
||||||
|
XMLName: ixml.Name(prop.XMLName),
|
||||||
|
Lang: prop.Lang,
|
||||||
|
InnerXML: prop.InnerXML,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
for k, prop := range ixmlPs.Prop {
|
||||||
|
if prop.XMLName.Space == "DAV:" {
|
||||||
|
prop.XMLName = ixml.Name{Space: "", Local: "D:" + prop.XMLName.Local}
|
||||||
|
ixmlPs.Prop[k] = prop
|
||||||
|
}
|
||||||
|
}
|
||||||
|
// Distinct type to avoid infinite recursion of MarshalXML.
|
||||||
|
type newpropstat ixmlPropstat
|
||||||
|
return e.EncodeElement(newpropstat(ixmlPs), start)
|
||||||
|
}
|
||||||
|
|
||||||
|
// http://www.webdav.org/specs/rfc4918.html#ELEMENT_response
|
||||||
|
// See multistatusWriter for the "D:" namespace prefix.
|
||||||
|
type response struct {
|
||||||
|
XMLName ixml.Name `xml:"D:response"`
|
||||||
|
Href []string `xml:"D:href"`
|
||||||
|
Propstat []propstat `xml:"D:propstat"`
|
||||||
|
Status string `xml:"D:status,omitempty"`
|
||||||
|
Error *xmlError `xml:"D:error"`
|
||||||
|
ResponseDescription string `xml:"D:responsedescription,omitempty"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// MultistatusWriter marshals one or more Responses into a XML
|
||||||
|
// multistatus response.
|
||||||
|
// See http://www.webdav.org/specs/rfc4918.html#ELEMENT_multistatus
|
||||||
|
// TODO(rsto, mpl): As a workaround, the "D:" namespace prefix, defined as
|
||||||
|
// "DAV:" on this element, is prepended on the nested response, as well as on all
|
||||||
|
// its nested elements. All property names in the DAV: namespace are prefixed as
|
||||||
|
// well. This is because some versions of Mini-Redirector (on windows 7) ignore
|
||||||
|
// elements with a default namespace (no prefixed namespace). A less intrusive fix
|
||||||
|
// should be possible after golang.org/cl/11074. See https://golang.org/issue/11177
|
||||||
|
type multistatusWriter struct {
|
||||||
|
// ResponseDescription contains the optional responsedescription
|
||||||
|
// of the multistatus XML element. Only the latest content before
|
||||||
|
// close will be emitted. Empty response descriptions are not
|
||||||
|
// written.
|
||||||
|
responseDescription string
|
||||||
|
|
||||||
|
w http.ResponseWriter
|
||||||
|
enc *ixml.Encoder
|
||||||
|
}
|
||||||
|
|
||||||
|
// Write validates and emits a DAV response as part of a multistatus response
|
||||||
|
// element.
|
||||||
|
//
|
||||||
|
// It sets the HTTP status code of its underlying http.ResponseWriter to 207
|
||||||
|
// (Multi-Status) and populates the Content-Type header. If r is the
|
||||||
|
// first, valid response to be written, Write prepends the XML representation
|
||||||
|
// of r with a multistatus tag. Callers must call close after the last response
|
||||||
|
// has been written.
|
||||||
|
func (w *multistatusWriter) write(r *response) error {
|
||||||
|
switch len(r.Href) {
|
||||||
|
case 0:
|
||||||
|
return errInvalidResponse
|
||||||
|
case 1:
|
||||||
|
if len(r.Propstat) > 0 != (r.Status == "") {
|
||||||
|
return errInvalidResponse
|
||||||
|
}
|
||||||
|
default:
|
||||||
|
if len(r.Propstat) > 0 || r.Status == "" {
|
||||||
|
return errInvalidResponse
|
||||||
|
}
|
||||||
|
}
|
||||||
|
err := w.writeHeader()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
return w.enc.Encode(r)
|
||||||
|
}
|
||||||
|
|
||||||
|
// writeHeader writes a XML multistatus start element on w's underlying
|
||||||
|
// http.ResponseWriter and returns the result of the write operation.
|
||||||
|
// After the first write attempt, writeHeader becomes a no-op.
|
||||||
|
func (w *multistatusWriter) writeHeader() error {
|
||||||
|
if w.enc != nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
w.w.Header().Add("Content-Type", "text/xml; charset=utf-8")
|
||||||
|
w.w.WriteHeader(StatusMulti)
|
||||||
|
_, err := fmt.Fprintf(w.w, `<?xml version="1.0" encoding="UTF-8"?>`)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
w.enc = ixml.NewEncoder(w.w)
|
||||||
|
return w.enc.EncodeToken(ixml.StartElement{
|
||||||
|
Name: ixml.Name{
|
||||||
|
Space: "DAV:",
|
||||||
|
Local: "multistatus",
|
||||||
|
},
|
||||||
|
Attr: []ixml.Attr{{
|
||||||
|
Name: ixml.Name{Space: "xmlns", Local: "D"},
|
||||||
|
Value: "DAV:",
|
||||||
|
}},
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// Close completes the marshalling of the multistatus response. It returns
|
||||||
|
// an error if the multistatus response could not be completed. If both the
|
||||||
|
// return value and field enc of w are nil, then no multistatus response has
|
||||||
|
// been written.
|
||||||
|
func (w *multistatusWriter) close() error {
|
||||||
|
if w.enc == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
var end []ixml.Token
|
||||||
|
if w.responseDescription != "" {
|
||||||
|
name := ixml.Name{Space: "DAV:", Local: "responsedescription"}
|
||||||
|
end = append(end,
|
||||||
|
ixml.StartElement{Name: name},
|
||||||
|
ixml.CharData(w.responseDescription),
|
||||||
|
ixml.EndElement{Name: name},
|
||||||
|
)
|
||||||
|
}
|
||||||
|
end = append(end, ixml.EndElement{
|
||||||
|
Name: ixml.Name{Space: "DAV:", Local: "multistatus"},
|
||||||
|
})
|
||||||
|
for _, t := range end {
|
||||||
|
err := w.enc.EncodeToken(t)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return w.enc.Flush()
|
||||||
|
}
|
||||||
|
|
||||||
|
var xmlLangName = ixml.Name{Space: "http://www.w3.org/XML/1998/namespace", Local: "lang"}
|
||||||
|
|
||||||
|
func xmlLang(s ixml.StartElement, d string) string {
|
||||||
|
for _, attr := range s.Attr {
|
||||||
|
if attr.Name == xmlLangName {
|
||||||
|
return attr.Value
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return d
|
||||||
|
}
|
||||||
|
|
||||||
|
type xmlValue []byte
|
||||||
|
|
||||||
|
func (v *xmlValue) UnmarshalXML(d *ixml.Decoder, start ixml.StartElement) error {
|
||||||
|
// The XML value of a property can be arbitrary, mixed-content XML.
|
||||||
|
// To make sure that the unmarshalled value contains all required
|
||||||
|
// namespaces, we encode all the property value XML tokens into a
|
||||||
|
// buffer. This forces the encoder to redeclare any used namespaces.
|
||||||
|
var b bytes.Buffer
|
||||||
|
e := ixml.NewEncoder(&b)
|
||||||
|
for {
|
||||||
|
t, err := next(d)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if e, ok := t.(ixml.EndElement); ok && e.Name == start.Name {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
if err = e.EncodeToken(t); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
err := e.Flush()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
*v = b.Bytes()
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// http://www.webdav.org/specs/rfc4918.html#ELEMENT_prop (for proppatch)
|
||||||
|
type proppatchProps []Property
|
||||||
|
|
||||||
|
// UnmarshalXML appends the property names and values enclosed within start
|
||||||
|
// to ps.
|
||||||
|
//
|
||||||
|
// An xml:lang attribute that is defined either on the DAV:prop or property
|
||||||
|
// name XML element is propagated to the property's Lang field.
|
||||||
|
//
|
||||||
|
// UnmarshalXML returns an error if start does not contain any properties or if
|
||||||
|
// property values contain syntactically incorrect XML.
|
||||||
|
func (ps *proppatchProps) UnmarshalXML(d *ixml.Decoder, start ixml.StartElement) error {
|
||||||
|
lang := xmlLang(start, "")
|
||||||
|
for {
|
||||||
|
t, err := next(d)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
switch elem := t.(type) {
|
||||||
|
case ixml.EndElement:
|
||||||
|
if len(*ps) == 0 {
|
||||||
|
return fmt.Errorf("%s must not be empty", start.Name.Local)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
case ixml.StartElement:
|
||||||
|
p := Property{
|
||||||
|
XMLName: xml.Name(t.(ixml.StartElement).Name),
|
||||||
|
Lang: xmlLang(t.(ixml.StartElement), lang),
|
||||||
|
}
|
||||||
|
err = d.DecodeElement(((*xmlValue)(&p.InnerXML)), &elem)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
*ps = append(*ps, p)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// http://www.webdav.org/specs/rfc4918.html#ELEMENT_set
|
||||||
|
// http://www.webdav.org/specs/rfc4918.html#ELEMENT_remove
|
||||||
|
type setRemove struct {
|
||||||
|
XMLName ixml.Name
|
||||||
|
Lang string `xml:"xml:lang,attr,omitempty"`
|
||||||
|
Prop proppatchProps `xml:"DAV: prop"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// http://www.webdav.org/specs/rfc4918.html#ELEMENT_propertyupdate
|
||||||
|
type propertyupdate struct {
|
||||||
|
XMLName ixml.Name `xml:"DAV: propertyupdate"`
|
||||||
|
Lang string `xml:"xml:lang,attr,omitempty"`
|
||||||
|
SetRemove []setRemove `xml:",any"`
|
||||||
|
}
|
||||||
|
|
||||||
|
func readProppatch(r io.Reader) (patches []Proppatch, status int, err error) {
|
||||||
|
var pu propertyupdate
|
||||||
|
if err = ixml.NewDecoder(r).Decode(&pu); err != nil {
|
||||||
|
return nil, http.StatusBadRequest, err
|
||||||
|
}
|
||||||
|
for _, op := range pu.SetRemove {
|
||||||
|
remove := false
|
||||||
|
switch op.XMLName {
|
||||||
|
case ixml.Name{Space: "DAV:", Local: "set"}:
|
||||||
|
// No-op.
|
||||||
|
case ixml.Name{Space: "DAV:", Local: "remove"}:
|
||||||
|
for _, p := range op.Prop {
|
||||||
|
if len(p.InnerXML) > 0 {
|
||||||
|
return nil, http.StatusBadRequest, errInvalidProppatch
|
||||||
|
}
|
||||||
|
}
|
||||||
|
remove = true
|
||||||
|
default:
|
||||||
|
return nil, http.StatusBadRequest, errInvalidProppatch
|
||||||
|
}
|
||||||
|
patches = append(patches, Proppatch{Remove: remove, Props: op.Prop})
|
||||||
|
}
|
||||||
|
return patches, 0, nil
|
||||||
|
}
|
905
drives/davServer/xml_test.go
Normal file
905
drives/davServer/xml_test.go
Normal file
@ -0,0 +1,905 @@
|
|||||||
|
// Copyright 2014 The Go Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a BSD-style
|
||||||
|
// license that can be found in the LICENSE file.
|
||||||
|
|
||||||
|
package davServer
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"encoding/xml"
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
"net/http"
|
||||||
|
"net/http/httptest"
|
||||||
|
"reflect"
|
||||||
|
"sort"
|
||||||
|
"strings"
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
ixml "github.com/openziti/zrok/drives/davServer/internal/xml"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestReadLockInfo(t *testing.T) {
|
||||||
|
// The "section x.y.z" test cases come from section x.y.z of the spec at
|
||||||
|
// http://www.webdav.org/specs/rfc4918.html
|
||||||
|
testCases := []struct {
|
||||||
|
desc string
|
||||||
|
input string
|
||||||
|
wantLI lockInfo
|
||||||
|
wantStatus int
|
||||||
|
}{{
|
||||||
|
"bad: junk",
|
||||||
|
"xxx",
|
||||||
|
lockInfo{},
|
||||||
|
http.StatusBadRequest,
|
||||||
|
}, {
|
||||||
|
"bad: invalid owner XML",
|
||||||
|
"" +
|
||||||
|
"<D:lockinfo xmlns:D='DAV:'>\n" +
|
||||||
|
" <D:lockscope><D:exclusive/></D:lockscope>\n" +
|
||||||
|
" <D:locktype><D:write/></D:locktype>\n" +
|
||||||
|
" <D:owner>\n" +
|
||||||
|
" <D:href> no end tag \n" +
|
||||||
|
" </D:owner>\n" +
|
||||||
|
"</D:lockinfo>",
|
||||||
|
lockInfo{},
|
||||||
|
http.StatusBadRequest,
|
||||||
|
}, {
|
||||||
|
"bad: invalid UTF-8",
|
||||||
|
"" +
|
||||||
|
"<D:lockinfo xmlns:D='DAV:'>\n" +
|
||||||
|
" <D:lockscope><D:exclusive/></D:lockscope>\n" +
|
||||||
|
" <D:locktype><D:write/></D:locktype>\n" +
|
||||||
|
" <D:owner>\n" +
|
||||||
|
" <D:href> \xff </D:href>\n" +
|
||||||
|
" </D:owner>\n" +
|
||||||
|
"</D:lockinfo>",
|
||||||
|
lockInfo{},
|
||||||
|
http.StatusBadRequest,
|
||||||
|
}, {
|
||||||
|
"bad: unfinished XML #1",
|
||||||
|
"" +
|
||||||
|
"<D:lockinfo xmlns:D='DAV:'>\n" +
|
||||||
|
" <D:lockscope><D:exclusive/></D:lockscope>\n" +
|
||||||
|
" <D:locktype><D:write/></D:locktype>\n",
|
||||||
|
lockInfo{},
|
||||||
|
http.StatusBadRequest,
|
||||||
|
}, {
|
||||||
|
"bad: unfinished XML #2",
|
||||||
|
"" +
|
||||||
|
"<D:lockinfo xmlns:D='DAV:'>\n" +
|
||||||
|
" <D:lockscope><D:exclusive/></D:lockscope>\n" +
|
||||||
|
" <D:locktype><D:write/></D:locktype>\n" +
|
||||||
|
" <D:owner>\n",
|
||||||
|
lockInfo{},
|
||||||
|
http.StatusBadRequest,
|
||||||
|
}, {
|
||||||
|
"good: empty",
|
||||||
|
"",
|
||||||
|
lockInfo{},
|
||||||
|
0,
|
||||||
|
}, {
|
||||||
|
"good: plain-text owner",
|
||||||
|
"" +
|
||||||
|
"<D:lockinfo xmlns:D='DAV:'>\n" +
|
||||||
|
" <D:lockscope><D:exclusive/></D:lockscope>\n" +
|
||||||
|
" <D:locktype><D:write/></D:locktype>\n" +
|
||||||
|
" <D:owner>gopher</D:owner>\n" +
|
||||||
|
"</D:lockinfo>",
|
||||||
|
lockInfo{
|
||||||
|
XMLName: ixml.Name{Space: "DAV:", Local: "lockinfo"},
|
||||||
|
Exclusive: new(struct{}),
|
||||||
|
Write: new(struct{}),
|
||||||
|
Owner: owner{
|
||||||
|
InnerXML: "gopher",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
0,
|
||||||
|
}, {
|
||||||
|
"section 9.10.7",
|
||||||
|
"" +
|
||||||
|
"<D:lockinfo xmlns:D='DAV:'>\n" +
|
||||||
|
" <D:lockscope><D:exclusive/></D:lockscope>\n" +
|
||||||
|
" <D:locktype><D:write/></D:locktype>\n" +
|
||||||
|
" <D:owner>\n" +
|
||||||
|
" <D:href>http://example.org/~ejw/contact.html</D:href>\n" +
|
||||||
|
" </D:owner>\n" +
|
||||||
|
"</D:lockinfo>",
|
||||||
|
lockInfo{
|
||||||
|
XMLName: ixml.Name{Space: "DAV:", Local: "lockinfo"},
|
||||||
|
Exclusive: new(struct{}),
|
||||||
|
Write: new(struct{}),
|
||||||
|
Owner: owner{
|
||||||
|
InnerXML: "\n <D:href>http://example.org/~ejw/contact.html</D:href>\n ",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
0,
|
||||||
|
}}
|
||||||
|
|
||||||
|
for _, tc := range testCases {
|
||||||
|
li, status, err := readLockInfo(strings.NewReader(tc.input))
|
||||||
|
if tc.wantStatus != 0 {
|
||||||
|
if err == nil {
|
||||||
|
t.Errorf("%s: got nil error, want non-nil", tc.desc)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
} else if err != nil {
|
||||||
|
t.Errorf("%s: %v", tc.desc, err)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
if !reflect.DeepEqual(li, tc.wantLI) || status != tc.wantStatus {
|
||||||
|
t.Errorf("%s:\ngot lockInfo=%v, status=%v\nwant lockInfo=%v, status=%v",
|
||||||
|
tc.desc, li, status, tc.wantLI, tc.wantStatus)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestReadPropfind(t *testing.T) {
|
||||||
|
testCases := []struct {
|
||||||
|
desc string
|
||||||
|
input string
|
||||||
|
wantPF propfind
|
||||||
|
wantStatus int
|
||||||
|
}{{
|
||||||
|
desc: "propfind: propname",
|
||||||
|
input: "" +
|
||||||
|
"<A:propfind xmlns:A='DAV:'>\n" +
|
||||||
|
" <A:propname/>\n" +
|
||||||
|
"</A:propfind>",
|
||||||
|
wantPF: propfind{
|
||||||
|
XMLName: ixml.Name{Space: "DAV:", Local: "propfind"},
|
||||||
|
Propname: new(struct{}),
|
||||||
|
},
|
||||||
|
}, {
|
||||||
|
desc: "propfind: empty body means allprop",
|
||||||
|
input: "",
|
||||||
|
wantPF: propfind{
|
||||||
|
Allprop: new(struct{}),
|
||||||
|
},
|
||||||
|
}, {
|
||||||
|
desc: "propfind: allprop",
|
||||||
|
input: "" +
|
||||||
|
"<A:propfind xmlns:A='DAV:'>\n" +
|
||||||
|
" <A:allprop/>\n" +
|
||||||
|
"</A:propfind>",
|
||||||
|
wantPF: propfind{
|
||||||
|
XMLName: ixml.Name{Space: "DAV:", Local: "propfind"},
|
||||||
|
Allprop: new(struct{}),
|
||||||
|
},
|
||||||
|
}, {
|
||||||
|
desc: "propfind: allprop followed by include",
|
||||||
|
input: "" +
|
||||||
|
"<A:propfind xmlns:A='DAV:'>\n" +
|
||||||
|
" <A:allprop/>\n" +
|
||||||
|
" <A:include><A:displayname/></A:include>\n" +
|
||||||
|
"</A:propfind>",
|
||||||
|
wantPF: propfind{
|
||||||
|
XMLName: ixml.Name{Space: "DAV:", Local: "propfind"},
|
||||||
|
Allprop: new(struct{}),
|
||||||
|
Include: propfindProps{xml.Name{Space: "DAV:", Local: "displayname"}},
|
||||||
|
},
|
||||||
|
}, {
|
||||||
|
desc: "propfind: include followed by allprop",
|
||||||
|
input: "" +
|
||||||
|
"<A:propfind xmlns:A='DAV:'>\n" +
|
||||||
|
" <A:include><A:displayname/></A:include>\n" +
|
||||||
|
" <A:allprop/>\n" +
|
||||||
|
"</A:propfind>",
|
||||||
|
wantPF: propfind{
|
||||||
|
XMLName: ixml.Name{Space: "DAV:", Local: "propfind"},
|
||||||
|
Allprop: new(struct{}),
|
||||||
|
Include: propfindProps{xml.Name{Space: "DAV:", Local: "displayname"}},
|
||||||
|
},
|
||||||
|
}, {
|
||||||
|
desc: "propfind: propfind",
|
||||||
|
input: "" +
|
||||||
|
"<A:propfind xmlns:A='DAV:'>\n" +
|
||||||
|
" <A:prop><A:displayname/></A:prop>\n" +
|
||||||
|
"</A:propfind>",
|
||||||
|
wantPF: propfind{
|
||||||
|
XMLName: ixml.Name{Space: "DAV:", Local: "propfind"},
|
||||||
|
Prop: propfindProps{xml.Name{Space: "DAV:", Local: "displayname"}},
|
||||||
|
},
|
||||||
|
}, {
|
||||||
|
desc: "propfind: prop with ignored comments",
|
||||||
|
input: "" +
|
||||||
|
"<A:propfind xmlns:A='DAV:'>\n" +
|
||||||
|
" <A:prop>\n" +
|
||||||
|
" <!-- ignore -->\n" +
|
||||||
|
" <A:displayname><!-- ignore --></A:displayname>\n" +
|
||||||
|
" </A:prop>\n" +
|
||||||
|
"</A:propfind>",
|
||||||
|
wantPF: propfind{
|
||||||
|
XMLName: ixml.Name{Space: "DAV:", Local: "propfind"},
|
||||||
|
Prop: propfindProps{xml.Name{Space: "DAV:", Local: "displayname"}},
|
||||||
|
},
|
||||||
|
}, {
|
||||||
|
desc: "propfind: propfind with ignored whitespace",
|
||||||
|
input: "" +
|
||||||
|
"<A:propfind xmlns:A='DAV:'>\n" +
|
||||||
|
" <A:prop> <A:displayname/></A:prop>\n" +
|
||||||
|
"</A:propfind>",
|
||||||
|
wantPF: propfind{
|
||||||
|
XMLName: ixml.Name{Space: "DAV:", Local: "propfind"},
|
||||||
|
Prop: propfindProps{xml.Name{Space: "DAV:", Local: "displayname"}},
|
||||||
|
},
|
||||||
|
}, {
|
||||||
|
desc: "propfind: propfind with ignored mixed-content",
|
||||||
|
input: "" +
|
||||||
|
"<A:propfind xmlns:A='DAV:'>\n" +
|
||||||
|
" <A:prop>foo<A:displayname/>bar</A:prop>\n" +
|
||||||
|
"</A:propfind>",
|
||||||
|
wantPF: propfind{
|
||||||
|
XMLName: ixml.Name{Space: "DAV:", Local: "propfind"},
|
||||||
|
Prop: propfindProps{xml.Name{Space: "DAV:", Local: "displayname"}},
|
||||||
|
},
|
||||||
|
}, {
|
||||||
|
desc: "propfind: propname with ignored element (section A.4)",
|
||||||
|
input: "" +
|
||||||
|
"<A:propfind xmlns:A='DAV:'>\n" +
|
||||||
|
" <A:propname/>\n" +
|
||||||
|
" <E:leave-out xmlns:E='E:'>*boss*</E:leave-out>\n" +
|
||||||
|
"</A:propfind>",
|
||||||
|
wantPF: propfind{
|
||||||
|
XMLName: ixml.Name{Space: "DAV:", Local: "propfind"},
|
||||||
|
Propname: new(struct{}),
|
||||||
|
},
|
||||||
|
}, {
|
||||||
|
desc: "propfind: bad: junk",
|
||||||
|
input: "xxx",
|
||||||
|
wantStatus: http.StatusBadRequest,
|
||||||
|
}, {
|
||||||
|
desc: "propfind: bad: propname and allprop (section A.3)",
|
||||||
|
input: "" +
|
||||||
|
"<A:propfind xmlns:A='DAV:'>\n" +
|
||||||
|
" <A:propname/>" +
|
||||||
|
" <A:allprop/>" +
|
||||||
|
"</A:propfind>",
|
||||||
|
wantStatus: http.StatusBadRequest,
|
||||||
|
}, {
|
||||||
|
desc: "propfind: bad: propname and prop",
|
||||||
|
input: "" +
|
||||||
|
"<A:propfind xmlns:A='DAV:'>\n" +
|
||||||
|
" <A:prop><A:displayname/></A:prop>\n" +
|
||||||
|
" <A:propname/>\n" +
|
||||||
|
"</A:propfind>",
|
||||||
|
wantStatus: http.StatusBadRequest,
|
||||||
|
}, {
|
||||||
|
desc: "propfind: bad: allprop and prop",
|
||||||
|
input: "" +
|
||||||
|
"<A:propfind xmlns:A='DAV:'>\n" +
|
||||||
|
" <A:allprop/>\n" +
|
||||||
|
" <A:prop><A:foo/><A:/prop>\n" +
|
||||||
|
"</A:propfind>",
|
||||||
|
wantStatus: http.StatusBadRequest,
|
||||||
|
}, {
|
||||||
|
desc: "propfind: bad: empty propfind with ignored element (section A.4)",
|
||||||
|
input: "" +
|
||||||
|
"<A:propfind xmlns:A='DAV:'>\n" +
|
||||||
|
" <E:expired-props/>\n" +
|
||||||
|
"</A:propfind>",
|
||||||
|
wantStatus: http.StatusBadRequest,
|
||||||
|
}, {
|
||||||
|
desc: "propfind: bad: empty prop",
|
||||||
|
input: "" +
|
||||||
|
"<A:propfind xmlns:A='DAV:'>\n" +
|
||||||
|
" <A:prop/>\n" +
|
||||||
|
"</A:propfind>",
|
||||||
|
wantStatus: http.StatusBadRequest,
|
||||||
|
}, {
|
||||||
|
desc: "propfind: bad: prop with just chardata",
|
||||||
|
input: "" +
|
||||||
|
"<A:propfind xmlns:A='DAV:'>\n" +
|
||||||
|
" <A:prop>foo</A:prop>\n" +
|
||||||
|
"</A:propfind>",
|
||||||
|
wantStatus: http.StatusBadRequest,
|
||||||
|
}, {
|
||||||
|
desc: "bad: interrupted prop",
|
||||||
|
input: "" +
|
||||||
|
"<A:propfind xmlns:A='DAV:'>\n" +
|
||||||
|
" <A:prop><A:foo></A:prop>\n",
|
||||||
|
wantStatus: http.StatusBadRequest,
|
||||||
|
}, {
|
||||||
|
desc: "bad: malformed end element prop",
|
||||||
|
input: "" +
|
||||||
|
"<A:propfind xmlns:A='DAV:'>\n" +
|
||||||
|
" <A:prop><A:foo/></A:bar></A:prop>\n",
|
||||||
|
wantStatus: http.StatusBadRequest,
|
||||||
|
}, {
|
||||||
|
desc: "propfind: bad: property with chardata value",
|
||||||
|
input: "" +
|
||||||
|
"<A:propfind xmlns:A='DAV:'>\n" +
|
||||||
|
" <A:prop><A:foo>bar</A:foo></A:prop>\n" +
|
||||||
|
"</A:propfind>",
|
||||||
|
wantStatus: http.StatusBadRequest,
|
||||||
|
}, {
|
||||||
|
desc: "propfind: bad: property with whitespace value",
|
||||||
|
input: "" +
|
||||||
|
"<A:propfind xmlns:A='DAV:'>\n" +
|
||||||
|
" <A:prop><A:foo> </A:foo></A:prop>\n" +
|
||||||
|
"</A:propfind>",
|
||||||
|
wantStatus: http.StatusBadRequest,
|
||||||
|
}, {
|
||||||
|
desc: "propfind: bad: include without allprop",
|
||||||
|
input: "" +
|
||||||
|
"<A:propfind xmlns:A='DAV:'>\n" +
|
||||||
|
" <A:include><A:foo/></A:include>\n" +
|
||||||
|
"</A:propfind>",
|
||||||
|
wantStatus: http.StatusBadRequest,
|
||||||
|
}}
|
||||||
|
|
||||||
|
for _, tc := range testCases {
|
||||||
|
pf, status, err := readPropfind(strings.NewReader(tc.input))
|
||||||
|
if tc.wantStatus != 0 {
|
||||||
|
if err == nil {
|
||||||
|
t.Errorf("%s: got nil error, want non-nil", tc.desc)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
} else if err != nil {
|
||||||
|
t.Errorf("%s: %v", tc.desc, err)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
if !reflect.DeepEqual(pf, tc.wantPF) || status != tc.wantStatus {
|
||||||
|
t.Errorf("%s:\ngot propfind=%v, status=%v\nwant propfind=%v, status=%v",
|
||||||
|
tc.desc, pf, status, tc.wantPF, tc.wantStatus)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestMultistatusWriter(t *testing.T) {
|
||||||
|
///The "section x.y.z" test cases come from section x.y.z of the spec at
|
||||||
|
// http://www.webdav.org/specs/rfc4918.html
|
||||||
|
testCases := []struct {
|
||||||
|
desc string
|
||||||
|
responses []response
|
||||||
|
respdesc string
|
||||||
|
writeHeader bool
|
||||||
|
wantXML string
|
||||||
|
wantCode int
|
||||||
|
wantErr error
|
||||||
|
}{{
|
||||||
|
desc: "section 9.2.2 (failed dependency)",
|
||||||
|
responses: []response{{
|
||||||
|
Href: []string{"http://example.com/foo"},
|
||||||
|
Propstat: []propstat{{
|
||||||
|
Prop: []Property{{
|
||||||
|
XMLName: xml.Name{
|
||||||
|
Space: "http://ns.example.com/",
|
||||||
|
Local: "Authors",
|
||||||
|
},
|
||||||
|
}},
|
||||||
|
Status: "HTTP/1.1 424 Failed Dependency",
|
||||||
|
}, {
|
||||||
|
Prop: []Property{{
|
||||||
|
XMLName: xml.Name{
|
||||||
|
Space: "http://ns.example.com/",
|
||||||
|
Local: "Copyright-Owner",
|
||||||
|
},
|
||||||
|
}},
|
||||||
|
Status: "HTTP/1.1 409 Conflict",
|
||||||
|
}},
|
||||||
|
ResponseDescription: "Copyright Owner cannot be deleted or altered.",
|
||||||
|
}},
|
||||||
|
wantXML: `` +
|
||||||
|
`<?xml version="1.0" encoding="UTF-8"?>` +
|
||||||
|
`<multistatus xmlns="DAV:">` +
|
||||||
|
` <response>` +
|
||||||
|
` <href>http://example.com/foo</href>` +
|
||||||
|
` <propstat>` +
|
||||||
|
` <prop>` +
|
||||||
|
` <Authors xmlns="http://ns.example.com/"></Authors>` +
|
||||||
|
` </prop>` +
|
||||||
|
` <status>HTTP/1.1 424 Failed Dependency</status>` +
|
||||||
|
` </propstat>` +
|
||||||
|
` <propstat xmlns="DAV:">` +
|
||||||
|
` <prop>` +
|
||||||
|
` <Copyright-Owner xmlns="http://ns.example.com/"></Copyright-Owner>` +
|
||||||
|
` </prop>` +
|
||||||
|
` <status>HTTP/1.1 409 Conflict</status>` +
|
||||||
|
` </propstat>` +
|
||||||
|
` <responsedescription>Copyright Owner cannot be deleted or altered.</responsedescription>` +
|
||||||
|
`</response>` +
|
||||||
|
`</multistatus>`,
|
||||||
|
wantCode: StatusMulti,
|
||||||
|
}, {
|
||||||
|
desc: "section 9.6.2 (lock-token-submitted)",
|
||||||
|
responses: []response{{
|
||||||
|
Href: []string{"http://example.com/foo"},
|
||||||
|
Status: "HTTP/1.1 423 Locked",
|
||||||
|
Error: &xmlError{
|
||||||
|
InnerXML: []byte(`<lock-token-submitted xmlns="DAV:"/>`),
|
||||||
|
},
|
||||||
|
}},
|
||||||
|
wantXML: `` +
|
||||||
|
`<?xml version="1.0" encoding="UTF-8"?>` +
|
||||||
|
`<multistatus xmlns="DAV:">` +
|
||||||
|
` <response>` +
|
||||||
|
` <href>http://example.com/foo</href>` +
|
||||||
|
` <status>HTTP/1.1 423 Locked</status>` +
|
||||||
|
` <error><lock-token-submitted xmlns="DAV:"/></error>` +
|
||||||
|
` </response>` +
|
||||||
|
`</multistatus>`,
|
||||||
|
wantCode: StatusMulti,
|
||||||
|
}, {
|
||||||
|
desc: "section 9.1.3",
|
||||||
|
responses: []response{{
|
||||||
|
Href: []string{"http://example.com/foo"},
|
||||||
|
Propstat: []propstat{{
|
||||||
|
Prop: []Property{{
|
||||||
|
XMLName: xml.Name{Space: "http://ns.example.com/boxschema/", Local: "bigbox"},
|
||||||
|
InnerXML: []byte(`` +
|
||||||
|
`<BoxType xmlns="http://ns.example.com/boxschema/">` +
|
||||||
|
`Box type A` +
|
||||||
|
`</BoxType>`),
|
||||||
|
}, {
|
||||||
|
XMLName: xml.Name{Space: "http://ns.example.com/boxschema/", Local: "author"},
|
||||||
|
InnerXML: []byte(`` +
|
||||||
|
`<Name xmlns="http://ns.example.com/boxschema/">` +
|
||||||
|
`J.J. Johnson` +
|
||||||
|
`</Name>`),
|
||||||
|
}},
|
||||||
|
Status: "HTTP/1.1 200 OK",
|
||||||
|
}, {
|
||||||
|
Prop: []Property{{
|
||||||
|
XMLName: xml.Name{Space: "http://ns.example.com/boxschema/", Local: "DingALing"},
|
||||||
|
}, {
|
||||||
|
XMLName: xml.Name{Space: "http://ns.example.com/boxschema/", Local: "Random"},
|
||||||
|
}},
|
||||||
|
Status: "HTTP/1.1 403 Forbidden",
|
||||||
|
ResponseDescription: "The user does not have access to the DingALing property.",
|
||||||
|
}},
|
||||||
|
}},
|
||||||
|
respdesc: "There has been an access violation error.",
|
||||||
|
wantXML: `` +
|
||||||
|
`<?xml version="1.0" encoding="UTF-8"?>` +
|
||||||
|
`<multistatus xmlns="DAV:" xmlns:B="http://ns.example.com/boxschema/">` +
|
||||||
|
` <response>` +
|
||||||
|
` <href>http://example.com/foo</href>` +
|
||||||
|
` <propstat>` +
|
||||||
|
` <prop>` +
|
||||||
|
` <B:bigbox><B:BoxType>Box type A</B:BoxType></B:bigbox>` +
|
||||||
|
` <B:author><B:Name>J.J. Johnson</B:Name></B:author>` +
|
||||||
|
` </prop>` +
|
||||||
|
` <status>HTTP/1.1 200 OK</status>` +
|
||||||
|
` </propstat>` +
|
||||||
|
` <propstat>` +
|
||||||
|
` <prop>` +
|
||||||
|
` <B:DingALing/>` +
|
||||||
|
` <B:Random/>` +
|
||||||
|
` </prop>` +
|
||||||
|
` <status>HTTP/1.1 403 Forbidden</status>` +
|
||||||
|
` <responsedescription>The user does not have access to the DingALing property.</responsedescription>` +
|
||||||
|
` </propstat>` +
|
||||||
|
` </response>` +
|
||||||
|
` <responsedescription>There has been an access violation error.</responsedescription>` +
|
||||||
|
`</multistatus>`,
|
||||||
|
wantCode: StatusMulti,
|
||||||
|
}, {
|
||||||
|
desc: "no response written",
|
||||||
|
// default of http.responseWriter
|
||||||
|
wantCode: http.StatusOK,
|
||||||
|
}, {
|
||||||
|
desc: "no response written (with description)",
|
||||||
|
respdesc: "too bad",
|
||||||
|
// default of http.responseWriter
|
||||||
|
wantCode: http.StatusOK,
|
||||||
|
}, {
|
||||||
|
desc: "empty multistatus with header",
|
||||||
|
writeHeader: true,
|
||||||
|
wantXML: `<multistatus xmlns="DAV:"></multistatus>`,
|
||||||
|
wantCode: StatusMulti,
|
||||||
|
}, {
|
||||||
|
desc: "bad: no href",
|
||||||
|
responses: []response{{
|
||||||
|
Propstat: []propstat{{
|
||||||
|
Prop: []Property{{
|
||||||
|
XMLName: xml.Name{
|
||||||
|
Space: "http://example.com/",
|
||||||
|
Local: "foo",
|
||||||
|
},
|
||||||
|
}},
|
||||||
|
Status: "HTTP/1.1 200 OK",
|
||||||
|
}},
|
||||||
|
}},
|
||||||
|
wantErr: errInvalidResponse,
|
||||||
|
// default of http.responseWriter
|
||||||
|
wantCode: http.StatusOK,
|
||||||
|
}, {
|
||||||
|
desc: "bad: multiple hrefs and no status",
|
||||||
|
responses: []response{{
|
||||||
|
Href: []string{"http://example.com/foo", "http://example.com/bar"},
|
||||||
|
}},
|
||||||
|
wantErr: errInvalidResponse,
|
||||||
|
// default of http.responseWriter
|
||||||
|
wantCode: http.StatusOK,
|
||||||
|
}, {
|
||||||
|
desc: "bad: one href and no propstat",
|
||||||
|
responses: []response{{
|
||||||
|
Href: []string{"http://example.com/foo"},
|
||||||
|
}},
|
||||||
|
wantErr: errInvalidResponse,
|
||||||
|
// default of http.responseWriter
|
||||||
|
wantCode: http.StatusOK,
|
||||||
|
}, {
|
||||||
|
desc: "bad: status with one href and propstat",
|
||||||
|
responses: []response{{
|
||||||
|
Href: []string{"http://example.com/foo"},
|
||||||
|
Propstat: []propstat{{
|
||||||
|
Prop: []Property{{
|
||||||
|
XMLName: xml.Name{
|
||||||
|
Space: "http://example.com/",
|
||||||
|
Local: "foo",
|
||||||
|
},
|
||||||
|
}},
|
||||||
|
Status: "HTTP/1.1 200 OK",
|
||||||
|
}},
|
||||||
|
Status: "HTTP/1.1 200 OK",
|
||||||
|
}},
|
||||||
|
wantErr: errInvalidResponse,
|
||||||
|
// default of http.responseWriter
|
||||||
|
wantCode: http.StatusOK,
|
||||||
|
}, {
|
||||||
|
desc: "bad: multiple hrefs and propstat",
|
||||||
|
responses: []response{{
|
||||||
|
Href: []string{
|
||||||
|
"http://example.com/foo",
|
||||||
|
"http://example.com/bar",
|
||||||
|
},
|
||||||
|
Propstat: []propstat{{
|
||||||
|
Prop: []Property{{
|
||||||
|
XMLName: xml.Name{
|
||||||
|
Space: "http://example.com/",
|
||||||
|
Local: "foo",
|
||||||
|
},
|
||||||
|
}},
|
||||||
|
Status: "HTTP/1.1 200 OK",
|
||||||
|
}},
|
||||||
|
}},
|
||||||
|
wantErr: errInvalidResponse,
|
||||||
|
// default of http.responseWriter
|
||||||
|
wantCode: http.StatusOK,
|
||||||
|
}}
|
||||||
|
|
||||||
|
n := xmlNormalizer{omitWhitespace: true}
|
||||||
|
loop:
|
||||||
|
for _, tc := range testCases {
|
||||||
|
rec := httptest.NewRecorder()
|
||||||
|
w := multistatusWriter{w: rec, responseDescription: tc.respdesc}
|
||||||
|
if tc.writeHeader {
|
||||||
|
if err := w.writeHeader(); err != nil {
|
||||||
|
t.Errorf("%s: got writeHeader error %v, want nil", tc.desc, err)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
}
|
||||||
|
for _, r := range tc.responses {
|
||||||
|
if err := w.write(&r); err != nil {
|
||||||
|
if err != tc.wantErr {
|
||||||
|
t.Errorf("%s: got write error %v, want %v",
|
||||||
|
tc.desc, err, tc.wantErr)
|
||||||
|
}
|
||||||
|
continue loop
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if err := w.close(); err != tc.wantErr {
|
||||||
|
t.Errorf("%s: got close error %v, want %v",
|
||||||
|
tc.desc, err, tc.wantErr)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
if rec.Code != tc.wantCode {
|
||||||
|
t.Errorf("%s: got HTTP status code %d, want %d\n",
|
||||||
|
tc.desc, rec.Code, tc.wantCode)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
gotXML := rec.Body.String()
|
||||||
|
eq, err := n.equalXML(strings.NewReader(gotXML), strings.NewReader(tc.wantXML))
|
||||||
|
if err != nil {
|
||||||
|
t.Errorf("%s: equalXML: %v", tc.desc, err)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
if !eq {
|
||||||
|
t.Errorf("%s: XML body\ngot %s\nwant %s", tc.desc, gotXML, tc.wantXML)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestReadProppatch(t *testing.T) {
|
||||||
|
ppStr := func(pps []Proppatch) string {
|
||||||
|
var outer []string
|
||||||
|
for _, pp := range pps {
|
||||||
|
var inner []string
|
||||||
|
for _, p := range pp.Props {
|
||||||
|
inner = append(inner, fmt.Sprintf("{XMLName: %q, Lang: %q, InnerXML: %q}",
|
||||||
|
p.XMLName, p.Lang, p.InnerXML))
|
||||||
|
}
|
||||||
|
outer = append(outer, fmt.Sprintf("{Remove: %t, Props: [%s]}",
|
||||||
|
pp.Remove, strings.Join(inner, ", ")))
|
||||||
|
}
|
||||||
|
return "[" + strings.Join(outer, ", ") + "]"
|
||||||
|
}
|
||||||
|
|
||||||
|
testCases := []struct {
|
||||||
|
desc string
|
||||||
|
input string
|
||||||
|
wantPP []Proppatch
|
||||||
|
wantStatus int
|
||||||
|
}{{
|
||||||
|
desc: "proppatch: section 9.2 (with simple property value)",
|
||||||
|
input: `` +
|
||||||
|
`<?xml version="1.0" encoding="utf-8" ?>` +
|
||||||
|
`<D:propertyupdate xmlns:D="DAV:"` +
|
||||||
|
` xmlns:Z="http://ns.example.com/z/">` +
|
||||||
|
` <D:set>` +
|
||||||
|
` <D:prop><Z:Authors>somevalue</Z:Authors></D:prop>` +
|
||||||
|
` </D:set>` +
|
||||||
|
` <D:remove>` +
|
||||||
|
` <D:prop><Z:Copyright-Owner/></D:prop>` +
|
||||||
|
` </D:remove>` +
|
||||||
|
`</D:propertyupdate>`,
|
||||||
|
wantPP: []Proppatch{{
|
||||||
|
Props: []Property{{
|
||||||
|
xml.Name{Space: "http://ns.example.com/z/", Local: "Authors"},
|
||||||
|
"",
|
||||||
|
[]byte(`somevalue`),
|
||||||
|
}},
|
||||||
|
}, {
|
||||||
|
Remove: true,
|
||||||
|
Props: []Property{{
|
||||||
|
xml.Name{Space: "http://ns.example.com/z/", Local: "Copyright-Owner"},
|
||||||
|
"",
|
||||||
|
nil,
|
||||||
|
}},
|
||||||
|
}},
|
||||||
|
}, {
|
||||||
|
desc: "proppatch: lang attribute on prop",
|
||||||
|
input: `` +
|
||||||
|
`<?xml version="1.0" encoding="utf-8" ?>` +
|
||||||
|
`<D:propertyupdate xmlns:D="DAV:">` +
|
||||||
|
` <D:set>` +
|
||||||
|
` <D:prop xml:lang="en">` +
|
||||||
|
` <foo xmlns="http://example.com/ns"/>` +
|
||||||
|
` </D:prop>` +
|
||||||
|
` </D:set>` +
|
||||||
|
`</D:propertyupdate>`,
|
||||||
|
wantPP: []Proppatch{{
|
||||||
|
Props: []Property{{
|
||||||
|
xml.Name{Space: "http://example.com/ns", Local: "foo"},
|
||||||
|
"en",
|
||||||
|
nil,
|
||||||
|
}},
|
||||||
|
}},
|
||||||
|
}, {
|
||||||
|
desc: "bad: remove with value",
|
||||||
|
input: `` +
|
||||||
|
`<?xml version="1.0" encoding="utf-8" ?>` +
|
||||||
|
`<D:propertyupdate xmlns:D="DAV:"` +
|
||||||
|
` xmlns:Z="http://ns.example.com/z/">` +
|
||||||
|
` <D:remove>` +
|
||||||
|
` <D:prop>` +
|
||||||
|
` <Z:Authors>` +
|
||||||
|
` <Z:Author>Jim Whitehead</Z:Author>` +
|
||||||
|
` </Z:Authors>` +
|
||||||
|
` </D:prop>` +
|
||||||
|
` </D:remove>` +
|
||||||
|
`</D:propertyupdate>`,
|
||||||
|
wantStatus: http.StatusBadRequest,
|
||||||
|
}, {
|
||||||
|
desc: "bad: empty propertyupdate",
|
||||||
|
input: `` +
|
||||||
|
`<?xml version="1.0" encoding="utf-8" ?>` +
|
||||||
|
`<D:propertyupdate xmlns:D="DAV:"` +
|
||||||
|
`</D:propertyupdate>`,
|
||||||
|
wantStatus: http.StatusBadRequest,
|
||||||
|
}, {
|
||||||
|
desc: "bad: empty prop",
|
||||||
|
input: `` +
|
||||||
|
`<?xml version="1.0" encoding="utf-8" ?>` +
|
||||||
|
`<D:propertyupdate xmlns:D="DAV:"` +
|
||||||
|
` xmlns:Z="http://ns.example.com/z/">` +
|
||||||
|
` <D:remove>` +
|
||||||
|
` <D:prop/>` +
|
||||||
|
` </D:remove>` +
|
||||||
|
`</D:propertyupdate>`,
|
||||||
|
wantStatus: http.StatusBadRequest,
|
||||||
|
}}
|
||||||
|
|
||||||
|
for _, tc := range testCases {
|
||||||
|
pp, status, err := readProppatch(strings.NewReader(tc.input))
|
||||||
|
if tc.wantStatus != 0 {
|
||||||
|
if err == nil {
|
||||||
|
t.Errorf("%s: got nil error, want non-nil", tc.desc)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
} else if err != nil {
|
||||||
|
t.Errorf("%s: %v", tc.desc, err)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
if status != tc.wantStatus {
|
||||||
|
t.Errorf("%s: got status %d, want %d", tc.desc, status, tc.wantStatus)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
if !reflect.DeepEqual(pp, tc.wantPP) || status != tc.wantStatus {
|
||||||
|
t.Errorf("%s: proppatch\ngot %v\nwant %v", tc.desc, ppStr(pp), ppStr(tc.wantPP))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestUnmarshalXMLValue(t *testing.T) {
|
||||||
|
testCases := []struct {
|
||||||
|
desc string
|
||||||
|
input string
|
||||||
|
wantVal string
|
||||||
|
}{{
|
||||||
|
desc: "simple char data",
|
||||||
|
input: "<root>foo</root>",
|
||||||
|
wantVal: "foo",
|
||||||
|
}, {
|
||||||
|
desc: "empty element",
|
||||||
|
input: "<root><foo/></root>",
|
||||||
|
wantVal: "<foo/>",
|
||||||
|
}, {
|
||||||
|
desc: "preserve namespace",
|
||||||
|
input: `<root><foo xmlns="bar"/></root>`,
|
||||||
|
wantVal: `<foo xmlns="bar"/>`,
|
||||||
|
}, {
|
||||||
|
desc: "preserve root element namespace",
|
||||||
|
input: `<root xmlns:bar="bar"><bar:foo/></root>`,
|
||||||
|
wantVal: `<foo xmlns="bar"/>`,
|
||||||
|
}, {
|
||||||
|
desc: "preserve whitespace",
|
||||||
|
input: "<root> \t </root>",
|
||||||
|
wantVal: " \t ",
|
||||||
|
}, {
|
||||||
|
desc: "preserve mixed content",
|
||||||
|
input: `<root xmlns="bar"> <foo>a<bam xmlns="baz"/> </foo> </root>`,
|
||||||
|
wantVal: ` <foo xmlns="bar">a<bam xmlns="baz"/> </foo> `,
|
||||||
|
}, {
|
||||||
|
desc: "section 9.2",
|
||||||
|
input: `` +
|
||||||
|
`<Z:Authors xmlns:Z="http://ns.example.com/z/">` +
|
||||||
|
` <Z:Author>Jim Whitehead</Z:Author>` +
|
||||||
|
` <Z:Author>Roy Fielding</Z:Author>` +
|
||||||
|
`</Z:Authors>`,
|
||||||
|
wantVal: `` +
|
||||||
|
` <Author xmlns="http://ns.example.com/z/">Jim Whitehead</Author>` +
|
||||||
|
` <Author xmlns="http://ns.example.com/z/">Roy Fielding</Author>`,
|
||||||
|
}, {
|
||||||
|
desc: "section 4.3.1 (mixed content)",
|
||||||
|
input: `` +
|
||||||
|
`<x:author ` +
|
||||||
|
` xmlns:x='http://example.com/ns' ` +
|
||||||
|
` xmlns:D="DAV:">` +
|
||||||
|
` <x:name>Jane Doe</x:name>` +
|
||||||
|
` <!-- Jane's contact info -->` +
|
||||||
|
` <x:uri type='email'` +
|
||||||
|
` added='2005-11-26'>mailto:jane.doe@example.com</x:uri>` +
|
||||||
|
` <x:uri type='web'` +
|
||||||
|
` added='2005-11-27'>http://www.example.com</x:uri>` +
|
||||||
|
` <x:notes xmlns:h='http://www.w3.org/1999/xhtml'>` +
|
||||||
|
` Jane has been working way <h:em>too</h:em> long on the` +
|
||||||
|
` long-awaited revision of <![CDATA[<RFC2518>]]>.` +
|
||||||
|
` </x:notes>` +
|
||||||
|
`</x:author>`,
|
||||||
|
wantVal: `` +
|
||||||
|
` <name xmlns="http://example.com/ns">Jane Doe</name>` +
|
||||||
|
` ` +
|
||||||
|
` <uri type='email'` +
|
||||||
|
` xmlns="http://example.com/ns" ` +
|
||||||
|
` added='2005-11-26'>mailto:jane.doe@example.com</uri>` +
|
||||||
|
` <uri added='2005-11-27'` +
|
||||||
|
` type='web'` +
|
||||||
|
` xmlns="http://example.com/ns">http://www.example.com</uri>` +
|
||||||
|
` <notes xmlns="http://example.com/ns" ` +
|
||||||
|
` xmlns:h="http://www.w3.org/1999/xhtml">` +
|
||||||
|
` Jane has been working way <h:em>too</h:em> long on the` +
|
||||||
|
` long-awaited revision of <RFC2518>.` +
|
||||||
|
` </notes>`,
|
||||||
|
}}
|
||||||
|
|
||||||
|
var n xmlNormalizer
|
||||||
|
for _, tc := range testCases {
|
||||||
|
d := ixml.NewDecoder(strings.NewReader(tc.input))
|
||||||
|
var v xmlValue
|
||||||
|
if err := d.Decode(&v); err != nil {
|
||||||
|
t.Errorf("%s: got error %v, want nil", tc.desc, err)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
eq, err := n.equalXML(bytes.NewReader(v), strings.NewReader(tc.wantVal))
|
||||||
|
if err != nil {
|
||||||
|
t.Errorf("%s: equalXML: %v", tc.desc, err)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
if !eq {
|
||||||
|
t.Errorf("%s:\ngot %s\nwant %s", tc.desc, string(v), tc.wantVal)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// xmlNormalizer normalizes XML.
|
||||||
|
type xmlNormalizer struct {
|
||||||
|
// omitWhitespace instructs to ignore whitespace between element tags.
|
||||||
|
omitWhitespace bool
|
||||||
|
// omitComments instructs to ignore XML comments.
|
||||||
|
omitComments bool
|
||||||
|
}
|
||||||
|
|
||||||
|
// normalize writes the normalized XML content of r to w. It applies the
|
||||||
|
// following rules
|
||||||
|
//
|
||||||
|
// - Rename namespace prefixes according to an internal heuristic.
|
||||||
|
// - Remove unnecessary namespace declarations.
|
||||||
|
// - Sort attributes in XML start elements in lexical order of their
|
||||||
|
// fully qualified name.
|
||||||
|
// - Remove XML directives and processing instructions.
|
||||||
|
// - Remove CDATA between XML tags that only contains whitespace, if
|
||||||
|
// instructed to do so.
|
||||||
|
// - Remove comments, if instructed to do so.
|
||||||
|
func (n *xmlNormalizer) normalize(w io.Writer, r io.Reader) error {
|
||||||
|
d := ixml.NewDecoder(r)
|
||||||
|
e := ixml.NewEncoder(w)
|
||||||
|
for {
|
||||||
|
t, err := d.Token()
|
||||||
|
if err != nil {
|
||||||
|
if t == nil && err == io.EOF {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
switch val := t.(type) {
|
||||||
|
case ixml.Directive, ixml.ProcInst:
|
||||||
|
continue
|
||||||
|
case ixml.Comment:
|
||||||
|
if n.omitComments {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
case ixml.CharData:
|
||||||
|
if n.omitWhitespace && len(bytes.TrimSpace(val)) == 0 {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
case ixml.StartElement:
|
||||||
|
start, _ := ixml.CopyToken(val).(ixml.StartElement)
|
||||||
|
attr := start.Attr[:0]
|
||||||
|
for _, a := range start.Attr {
|
||||||
|
if a.Name.Space == "xmlns" || a.Name.Local == "xmlns" {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
attr = append(attr, a)
|
||||||
|
}
|
||||||
|
sort.Sort(byName(attr))
|
||||||
|
start.Attr = attr
|
||||||
|
t = start
|
||||||
|
}
|
||||||
|
err = e.EncodeToken(t)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return e.Flush()
|
||||||
|
}
|
||||||
|
|
||||||
|
// equalXML tests for equality of the normalized XML contents of a and b.
|
||||||
|
func (n *xmlNormalizer) equalXML(a, b io.Reader) (bool, error) {
|
||||||
|
var buf bytes.Buffer
|
||||||
|
if err := n.normalize(&buf, a); err != nil {
|
||||||
|
return false, err
|
||||||
|
}
|
||||||
|
normA := buf.String()
|
||||||
|
buf.Reset()
|
||||||
|
if err := n.normalize(&buf, b); err != nil {
|
||||||
|
return false, err
|
||||||
|
}
|
||||||
|
normB := buf.String()
|
||||||
|
return normA == normB, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
type byName []ixml.Attr
|
||||||
|
|
||||||
|
func (a byName) Len() int { return len(a) }
|
||||||
|
func (a byName) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
|
||||||
|
func (a byName) Less(i, j int) bool {
|
||||||
|
if a[i].Name.Space != a[j].Name.Space {
|
||||||
|
return a[i].Name.Space < a[j].Name.Space
|
||||||
|
}
|
||||||
|
return a[i].Name.Local < a[j].Name.Local
|
||||||
|
}
|
152
drives/sync/filesystem.go
Normal file
152
drives/sync/filesystem.go
Normal file
@ -0,0 +1,152 @@
|
|||||||
|
package sync
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"fmt"
|
||||||
|
"github.com/openziti/zrok/drives/davServer"
|
||||||
|
"io"
|
||||||
|
"io/fs"
|
||||||
|
"os"
|
||||||
|
"path/filepath"
|
||||||
|
"time"
|
||||||
|
)
|
||||||
|
|
||||||
|
type FilesystemTargetConfig struct {
|
||||||
|
Root string
|
||||||
|
}
|
||||||
|
|
||||||
|
type FilesystemTarget struct {
|
||||||
|
cfg *FilesystemTargetConfig
|
||||||
|
root fs.FS
|
||||||
|
tree []*Object
|
||||||
|
}
|
||||||
|
|
||||||
|
func NewFilesystemTarget(cfg *FilesystemTargetConfig) *FilesystemTarget {
|
||||||
|
root := os.DirFS(cfg.Root)
|
||||||
|
return &FilesystemTarget{cfg: cfg, root: root}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (t *FilesystemTarget) Inventory() ([]*Object, error) {
|
||||||
|
fi, err := os.Stat(t.cfg.Root)
|
||||||
|
if os.IsNotExist(err) {
|
||||||
|
return nil, nil
|
||||||
|
}
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
if !fi.IsDir() {
|
||||||
|
t.cfg.Root = filepath.Dir(t.cfg.Root)
|
||||||
|
return []*Object{{
|
||||||
|
Path: "/" + fi.Name(),
|
||||||
|
IsDir: false,
|
||||||
|
Size: fi.Size(),
|
||||||
|
Modified: fi.ModTime(),
|
||||||
|
}}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
t.tree = nil
|
||||||
|
if err := fs.WalkDir(t.root, ".", t.recurse); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return t.tree, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (t *FilesystemTarget) Dir(path string) ([]*Object, error) {
|
||||||
|
des, err := os.ReadDir(t.cfg.Root)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
var objects []*Object
|
||||||
|
for _, de := range des {
|
||||||
|
fi, err := de.Info()
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
objects = append(objects, &Object{
|
||||||
|
Path: de.Name(),
|
||||||
|
IsDir: de.IsDir(),
|
||||||
|
Size: fi.Size(),
|
||||||
|
Modified: fi.ModTime(),
|
||||||
|
})
|
||||||
|
}
|
||||||
|
return objects, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (t *FilesystemTarget) Mkdir(path string) error {
|
||||||
|
return os.MkdirAll(filepath.Join(t.cfg.Root, path), os.ModePerm)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (t *FilesystemTarget) recurse(path string, d fs.DirEntry, err error) error {
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
fi, err := d.Info()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
etag := ""
|
||||||
|
if v, ok := fi.(davServer.ETager); ok {
|
||||||
|
etag, err = v.ETag(context.Background())
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
etag = fmt.Sprintf(`"%x%x"`, fi.ModTime().UTC().UnixNano(), fi.Size())
|
||||||
|
}
|
||||||
|
if path != "." {
|
||||||
|
outPath := "/" + path
|
||||||
|
if fi.IsDir() {
|
||||||
|
outPath = outPath + "/"
|
||||||
|
}
|
||||||
|
t.tree = append(t.tree, &Object{
|
||||||
|
Path: outPath,
|
||||||
|
IsDir: fi.IsDir(),
|
||||||
|
Size: fi.Size(),
|
||||||
|
Modified: fi.ModTime(),
|
||||||
|
ETag: etag,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (t *FilesystemTarget) ReadStream(path string) (io.ReadCloser, error) {
|
||||||
|
return os.Open(filepath.Join(t.cfg.Root, path))
|
||||||
|
}
|
||||||
|
|
||||||
|
func (t *FilesystemTarget) WriteStream(path string, stream io.Reader, mode os.FileMode) error {
|
||||||
|
targetPath := filepath.Join(t.cfg.Root, path)
|
||||||
|
|
||||||
|
if err := os.MkdirAll(filepath.Dir(targetPath), mode); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
f, err := os.Create(targetPath)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
_, err = io.Copy(f, stream)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (t *FilesystemTarget) WriteStreamWithModTime(path string, stream io.Reader, mode os.FileMode, modTime time.Time) error {
|
||||||
|
return t.WriteStream(path, stream, mode)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (t *FilesystemTarget) Move(src, dest string) error {
|
||||||
|
return os.Rename(filepath.Join(t.cfg.Root, src), filepath.Join(filepath.Dir(t.cfg.Root), dest))
|
||||||
|
}
|
||||||
|
|
||||||
|
func (t *FilesystemTarget) Rm(path string) error {
|
||||||
|
return os.RemoveAll(filepath.Join(t.cfg.Root, path))
|
||||||
|
}
|
||||||
|
|
||||||
|
func (t *FilesystemTarget) SetModificationTime(path string, mtime time.Time) error {
|
||||||
|
targetPath := filepath.Join(t.cfg.Root, path)
|
||||||
|
if err := os.Chtimes(targetPath, time.Now(), mtime); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
27
drives/sync/model.go
Normal file
27
drives/sync/model.go
Normal file
@ -0,0 +1,27 @@
|
|||||||
|
package sync
|
||||||
|
|
||||||
|
import (
|
||||||
|
"io"
|
||||||
|
"os"
|
||||||
|
"time"
|
||||||
|
)
|
||||||
|
|
||||||
|
type Object struct {
|
||||||
|
Path string
|
||||||
|
IsDir bool
|
||||||
|
Size int64
|
||||||
|
Modified time.Time
|
||||||
|
ETag string
|
||||||
|
}
|
||||||
|
|
||||||
|
type Target interface {
|
||||||
|
Inventory() ([]*Object, error)
|
||||||
|
Dir(path string) ([]*Object, error)
|
||||||
|
Mkdir(path string) error
|
||||||
|
ReadStream(path string) (io.ReadCloser, error)
|
||||||
|
WriteStream(path string, stream io.Reader, mode os.FileMode) error
|
||||||
|
WriteStreamWithModTime(path string, stream io.Reader, mode os.FileMode, modTime time.Time) error
|
||||||
|
Move(src, dest string) error
|
||||||
|
Rm(path string) error
|
||||||
|
SetModificationTime(path string, mtime time.Time) error
|
||||||
|
}
|
59
drives/sync/synchronizer.go
Normal file
59
drives/sync/synchronizer.go
Normal file
@ -0,0 +1,59 @@
|
|||||||
|
package sync
|
||||||
|
|
||||||
|
import (
|
||||||
|
"github.com/pkg/errors"
|
||||||
|
"github.com/sirupsen/logrus"
|
||||||
|
"os"
|
||||||
|
)
|
||||||
|
|
||||||
|
func OneWay(src, dst Target, sync bool) error {
|
||||||
|
srcTree, err := src.Inventory()
|
||||||
|
if err != nil {
|
||||||
|
return errors.Wrap(err, "error creating source inventory")
|
||||||
|
}
|
||||||
|
|
||||||
|
var dstTree []*Object
|
||||||
|
if sync {
|
||||||
|
dstTree, err = dst.Inventory()
|
||||||
|
if err != nil {
|
||||||
|
return errors.Wrap(err, "error creating destination inventory")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
dstIndex := make(map[string]*Object)
|
||||||
|
for _, f := range dstTree {
|
||||||
|
dstIndex[f.Path] = f
|
||||||
|
}
|
||||||
|
|
||||||
|
var copyList []*Object
|
||||||
|
for _, srcF := range srcTree {
|
||||||
|
if dstF, found := dstIndex[srcF.Path]; found {
|
||||||
|
if !srcF.IsDir && (dstF.Size != srcF.Size || dstF.Modified.Unix() != srcF.Modified.Unix()) {
|
||||||
|
logrus.Debugf("%v <- dstF.Size = '%d', srcF.Size = '%d', dstF.Modified.UTC = '%d', srcF.Modified.UTC = '%d'", srcF.Path, dstF.Size, srcF.Size, dstF.Modified.Unix(), srcF.Modified.Unix())
|
||||||
|
copyList = append(copyList, srcF)
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
logrus.Debugf("%v <- !found", srcF.Path)
|
||||||
|
copyList = append(copyList, srcF)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, copyPath := range copyList {
|
||||||
|
if copyPath.IsDir {
|
||||||
|
if err := dst.Mkdir(copyPath.Path); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
ss, err := src.ReadStream(copyPath.Path)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if err := dst.WriteStreamWithModTime(copyPath.Path, ss, os.ModePerm, copyPath.Modified); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
logrus.Infof("=> %v", copyPath.Path)
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
34
drives/sync/target.go
Normal file
34
drives/sync/target.go
Normal file
@ -0,0 +1,34 @@
|
|||||||
|
package sync
|
||||||
|
|
||||||
|
import (
|
||||||
|
"github.com/openziti/zrok/environment/env_core"
|
||||||
|
"github.com/pkg/errors"
|
||||||
|
"net/url"
|
||||||
|
"strings"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TargetForURL(url *url.URL, root env_core.Root, basicAuth string) (Target, error) {
|
||||||
|
switch url.Scheme {
|
||||||
|
case "file":
|
||||||
|
return NewFilesystemTarget(&FilesystemTargetConfig{Root: url.Path}), nil
|
||||||
|
|
||||||
|
case "zrok":
|
||||||
|
return NewZrokTarget(&ZrokTargetConfig{URL: url, Root: root})
|
||||||
|
|
||||||
|
case "http", "https":
|
||||||
|
var username string
|
||||||
|
var password string
|
||||||
|
if basicAuth != "" {
|
||||||
|
authTokens := strings.Split(basicAuth, ":")
|
||||||
|
if len(authTokens) != 2 {
|
||||||
|
return nil, errors.Errorf("invalid basic authentication (expect 'username:password')")
|
||||||
|
}
|
||||||
|
username = authTokens[0]
|
||||||
|
password = authTokens[1]
|
||||||
|
}
|
||||||
|
return NewWebDAVTarget(&WebDAVTargetConfig{URL: url, Username: username, Password: password})
|
||||||
|
|
||||||
|
default:
|
||||||
|
return nil, errors.Errorf("unknown URL scheme '%v'", url.Scheme)
|
||||||
|
}
|
||||||
|
}
|
144
drives/sync/webdav.go
Normal file
144
drives/sync/webdav.go
Normal file
@ -0,0 +1,144 @@
|
|||||||
|
package sync
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"github.com/openziti/zrok/drives/davClient"
|
||||||
|
"github.com/pkg/errors"
|
||||||
|
"io"
|
||||||
|
"net/http"
|
||||||
|
"net/url"
|
||||||
|
"os"
|
||||||
|
"path/filepath"
|
||||||
|
"time"
|
||||||
|
)
|
||||||
|
|
||||||
|
type WebDAVTargetConfig struct {
|
||||||
|
URL *url.URL
|
||||||
|
Username string
|
||||||
|
Password string
|
||||||
|
}
|
||||||
|
|
||||||
|
type WebDAVTarget struct {
|
||||||
|
cfg *WebDAVTargetConfig
|
||||||
|
dc *davClient.Client
|
||||||
|
}
|
||||||
|
|
||||||
|
func NewWebDAVTarget(cfg *WebDAVTargetConfig) (*WebDAVTarget, error) {
|
||||||
|
var httpClient davClient.HTTPClient
|
||||||
|
httpClient = http.DefaultClient
|
||||||
|
if cfg.Username != "" || cfg.Password != "" {
|
||||||
|
httpClient = davClient.HTTPClientWithBasicAuth(httpClient, cfg.Username, cfg.Password)
|
||||||
|
}
|
||||||
|
dc, err := davClient.NewClient(httpClient, cfg.URL.String())
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return &WebDAVTarget{cfg: cfg, dc: dc}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (t *WebDAVTarget) Inventory() ([]*Object, error) {
|
||||||
|
rootFi, err := t.dc.Stat(context.Background(), t.cfg.URL.Path)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
if !rootFi.IsDir {
|
||||||
|
base := filepath.Base(t.cfg.URL.Path)
|
||||||
|
t.cfg.URL.Path = filepath.Dir(t.cfg.URL.Path)
|
||||||
|
return []*Object{{
|
||||||
|
Path: "/" + base,
|
||||||
|
IsDir: false,
|
||||||
|
Size: rootFi.Size,
|
||||||
|
Modified: rootFi.ModTime,
|
||||||
|
}}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
fis, err := t.dc.Readdir(context.Background(), "", true)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
var objects []*Object
|
||||||
|
for _, fi := range fis {
|
||||||
|
if fi.Path != "/" {
|
||||||
|
objects = append(objects, &Object{
|
||||||
|
Path: fi.Path,
|
||||||
|
IsDir: fi.IsDir,
|
||||||
|
Size: fi.Size,
|
||||||
|
Modified: fi.ModTime,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return objects, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (t *WebDAVTarget) Dir(path string) ([]*Object, error) {
|
||||||
|
fis, err := t.dc.Readdir(context.Background(), t.cfg.URL.Path, false)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
var objects []*Object
|
||||||
|
for _, fi := range fis {
|
||||||
|
if fi.Path != "/" && fi.Path != t.cfg.URL.Path+"/" {
|
||||||
|
objects = append(objects, &Object{
|
||||||
|
Path: filepath.Base(fi.Path),
|
||||||
|
IsDir: fi.IsDir,
|
||||||
|
Size: fi.Size,
|
||||||
|
Modified: fi.ModTime,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return objects, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (t *WebDAVTarget) Mkdir(path string) error {
|
||||||
|
fi, err := t.dc.Stat(context.Background(), filepath.Join(t.cfg.URL.Path, path))
|
||||||
|
if err == nil {
|
||||||
|
if fi.IsDir {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
return errors.Errorf("'%v' already exists; not directory", path)
|
||||||
|
}
|
||||||
|
return t.dc.Mkdir(context.Background(), filepath.Join(t.cfg.URL.Path, path))
|
||||||
|
}
|
||||||
|
|
||||||
|
func (t *WebDAVTarget) ReadStream(path string) (io.ReadCloser, error) {
|
||||||
|
return t.dc.Open(context.Background(), filepath.Join(t.cfg.URL.Path, path))
|
||||||
|
}
|
||||||
|
|
||||||
|
func (t *WebDAVTarget) WriteStream(path string, rs io.Reader, _ os.FileMode) error {
|
||||||
|
ws, err := t.dc.Create(context.Background(), filepath.Join(t.cfg.URL.Path, path))
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
defer func() { _ = ws.Close() }()
|
||||||
|
_, err = io.Copy(ws, rs)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (t *WebDAVTarget) WriteStreamWithModTime(path string, rs io.Reader, _ os.FileMode, modTime time.Time) error {
|
||||||
|
ws, err := t.dc.CreateWithModTime(context.Background(), filepath.Join(t.cfg.URL.Path, path), modTime)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
defer func() { _ = ws.Close() }()
|
||||||
|
_, err = io.Copy(ws, rs)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (t *WebDAVTarget) Move(src, dest string) error {
|
||||||
|
return t.dc.MoveAll(context.Background(), filepath.Join(t.cfg.URL.Path, src), dest, true)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (t *WebDAVTarget) Rm(path string) error {
|
||||||
|
return t.dc.RemoveAll(context.Background(), filepath.Join(t.cfg.URL.Path, path))
|
||||||
|
}
|
||||||
|
|
||||||
|
func (t *WebDAVTarget) SetModificationTime(path string, mtime time.Time) error {
|
||||||
|
return t.dc.Touch(context.Background(), filepath.Join(t.cfg.URL.Path, path), mtime)
|
||||||
|
}
|
156
drives/sync/zrok.go
Normal file
156
drives/sync/zrok.go
Normal file
@ -0,0 +1,156 @@
|
|||||||
|
package sync
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"github.com/openziti/zrok/drives/davClient"
|
||||||
|
"github.com/openziti/zrok/environment/env_core"
|
||||||
|
"github.com/openziti/zrok/sdk/golang/sdk"
|
||||||
|
"github.com/pkg/errors"
|
||||||
|
"io"
|
||||||
|
"net"
|
||||||
|
"net/http"
|
||||||
|
"net/url"
|
||||||
|
"os"
|
||||||
|
"path/filepath"
|
||||||
|
"strings"
|
||||||
|
"time"
|
||||||
|
)
|
||||||
|
|
||||||
|
type ZrokTargetConfig struct {
|
||||||
|
URL *url.URL
|
||||||
|
Root env_core.Root
|
||||||
|
}
|
||||||
|
|
||||||
|
type ZrokTarget struct {
|
||||||
|
cfg *ZrokTargetConfig
|
||||||
|
dc *davClient.Client
|
||||||
|
}
|
||||||
|
|
||||||
|
type zrokDialContext struct {
|
||||||
|
root env_core.Root
|
||||||
|
}
|
||||||
|
|
||||||
|
func (zdc *zrokDialContext) Dial(_ context.Context, _, addr string) (net.Conn, error) {
|
||||||
|
share := strings.Split(addr, ":")[0]
|
||||||
|
return sdk.NewDialer(share, zdc.root)
|
||||||
|
}
|
||||||
|
|
||||||
|
func NewZrokTarget(cfg *ZrokTargetConfig) (*ZrokTarget, error) {
|
||||||
|
transport := http.DefaultTransport.(*http.Transport).Clone()
|
||||||
|
transport.DialContext = (&zrokDialContext{cfg.Root}).Dial
|
||||||
|
transport.TLSClientConfig.InsecureSkipVerify = true
|
||||||
|
httpUrl := strings.Replace(cfg.URL.String(), "zrok:", "http:", 1)
|
||||||
|
dc, err := davClient.NewClient(&http.Client{Transport: transport}, httpUrl)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return &ZrokTarget{cfg: cfg, dc: dc}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (t *ZrokTarget) Inventory() ([]*Object, error) {
|
||||||
|
rootFi, err := t.dc.Stat(context.Background(), t.cfg.URL.Path)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
if !rootFi.IsDir {
|
||||||
|
base := filepath.Base(t.cfg.URL.Path)
|
||||||
|
t.cfg.URL.Path = filepath.Dir(t.cfg.URL.Path)
|
||||||
|
return []*Object{{
|
||||||
|
Path: "/" + base,
|
||||||
|
IsDir: false,
|
||||||
|
Size: rootFi.Size,
|
||||||
|
Modified: rootFi.ModTime,
|
||||||
|
}}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
fis, err := t.dc.Readdir(context.Background(), t.cfg.URL.Path, true)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
var objects []*Object
|
||||||
|
for _, fi := range fis {
|
||||||
|
if fi.Path != "/" {
|
||||||
|
objects = append(objects, &Object{
|
||||||
|
Path: fi.Path,
|
||||||
|
IsDir: fi.IsDir,
|
||||||
|
Size: fi.Size,
|
||||||
|
Modified: fi.ModTime,
|
||||||
|
ETag: fi.ETag,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return objects, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (t *ZrokTarget) Dir(path string) ([]*Object, error) {
|
||||||
|
fis, err := t.dc.Readdir(context.Background(), t.cfg.URL.Path, false)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
var objects []*Object
|
||||||
|
for _, fi := range fis {
|
||||||
|
if fi.Path != "/" && fi.Path != t.cfg.URL.Path+"/" {
|
||||||
|
objects = append(objects, &Object{
|
||||||
|
Path: filepath.Base(fi.Path),
|
||||||
|
IsDir: fi.IsDir,
|
||||||
|
Size: fi.Size,
|
||||||
|
Modified: fi.ModTime,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return objects, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (t *ZrokTarget) Mkdir(path string) error {
|
||||||
|
fi, err := t.dc.Stat(context.Background(), filepath.Join(t.cfg.URL.Path, path))
|
||||||
|
if err == nil {
|
||||||
|
if fi.IsDir {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
return errors.Errorf("'%v' already exists; not directory", path)
|
||||||
|
}
|
||||||
|
return t.dc.Mkdir(context.Background(), filepath.Join(t.cfg.URL.Path, path))
|
||||||
|
}
|
||||||
|
|
||||||
|
func (t *ZrokTarget) ReadStream(path string) (io.ReadCloser, error) {
|
||||||
|
return t.dc.Open(context.Background(), filepath.Join(t.cfg.URL.Path, path))
|
||||||
|
}
|
||||||
|
|
||||||
|
func (t *ZrokTarget) WriteStream(path string, rs io.Reader, _ os.FileMode) error {
|
||||||
|
ws, err := t.dc.Create(context.Background(), filepath.Join(t.cfg.URL.Path, path))
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
defer func() { _ = ws.Close() }()
|
||||||
|
_, err = io.Copy(ws, rs)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (t *ZrokTarget) WriteStreamWithModTime(path string, rs io.Reader, _ os.FileMode, modTime time.Time) error {
|
||||||
|
ws, err := t.dc.CreateWithModTime(context.Background(), filepath.Join(t.cfg.URL.Path, path), modTime)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
defer func() { _ = ws.Close() }()
|
||||||
|
_, err = io.Copy(ws, rs)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (t *ZrokTarget) Move(src, dest string) error {
|
||||||
|
return t.dc.MoveAll(context.Background(), filepath.Join(t.cfg.URL.Path, src), dest, true)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (t *ZrokTarget) Rm(path string) error {
|
||||||
|
return t.dc.RemoveAll(context.Background(), filepath.Join(t.cfg.URL.Path, path))
|
||||||
|
}
|
||||||
|
|
||||||
|
func (t *ZrokTarget) SetModificationTime(path string, mtime time.Time) error {
|
||||||
|
return t.dc.Touch(context.Background(), filepath.Join(t.cfg.URL.Path, path), mtime)
|
||||||
|
}
|
@ -4,9 +4,9 @@ import (
|
|||||||
"fmt"
|
"fmt"
|
||||||
"github.com/openziti/sdk-golang/ziti"
|
"github.com/openziti/sdk-golang/ziti"
|
||||||
"github.com/openziti/sdk-golang/ziti/edge"
|
"github.com/openziti/sdk-golang/ziti/edge"
|
||||||
|
"github.com/openziti/zrok/drives/davServer"
|
||||||
"github.com/openziti/zrok/endpoints"
|
"github.com/openziti/zrok/endpoints"
|
||||||
"github.com/pkg/errors"
|
"github.com/pkg/errors"
|
||||||
"golang.org/x/net/webdav"
|
|
||||||
"net/http"
|
"net/http"
|
||||||
"time"
|
"time"
|
||||||
)
|
)
|
||||||
@ -42,9 +42,9 @@ func NewBackend(cfg *BackendConfig) (*Backend, error) {
|
|||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
handler := &webdav.Handler{
|
handler := &davServer.Handler{
|
||||||
FileSystem: webdav.Dir(cfg.DriveRoot),
|
FileSystem: davServer.Dir(cfg.DriveRoot),
|
||||||
LockSystem: webdav.NewMemLS(),
|
LockSystem: davServer.NewMemLS(),
|
||||||
Logger: func(r *http.Request, err error) {
|
Logger: func(r *http.Request, err error) {
|
||||||
if cfg.Requests != nil {
|
if cfg.Requests != nil {
|
||||||
cfg.Requests <- &endpoints.Request{
|
cfg.Requests <- &endpoints.Request{
|
||||||
|
53
endpoints/socks/backend.go
Normal file
53
endpoints/socks/backend.go
Normal file
@ -0,0 +1,53 @@
|
|||||||
|
package socks
|
||||||
|
|
||||||
|
import (
|
||||||
|
"github.com/openziti/sdk-golang/ziti"
|
||||||
|
"github.com/openziti/sdk-golang/ziti/edge"
|
||||||
|
"github.com/openziti/zrok/endpoints"
|
||||||
|
"github.com/pkg/errors"
|
||||||
|
"time"
|
||||||
|
)
|
||||||
|
|
||||||
|
type BackendConfig struct {
|
||||||
|
IdentityPath string
|
||||||
|
ShrToken string
|
||||||
|
Requests chan *endpoints.Request
|
||||||
|
}
|
||||||
|
|
||||||
|
type Backend struct {
|
||||||
|
cfg *BackendConfig
|
||||||
|
listener edge.Listener
|
||||||
|
server *Server
|
||||||
|
}
|
||||||
|
|
||||||
|
func NewBackend(cfg *BackendConfig) (*Backend, error) {
|
||||||
|
options := ziti.ListenOptions{
|
||||||
|
ConnectTimeout: 5 * time.Minute,
|
||||||
|
WaitForNEstablishedListeners: 1,
|
||||||
|
}
|
||||||
|
zcfg, err := ziti.NewConfigFromFile(cfg.IdentityPath)
|
||||||
|
if err != nil {
|
||||||
|
return nil, errors.Wrap(err, "error loading ziti identity")
|
||||||
|
}
|
||||||
|
zctx, err := ziti.NewContext(zcfg)
|
||||||
|
if err != nil {
|
||||||
|
return nil, errors.Wrap(err, "error loading ziti context")
|
||||||
|
}
|
||||||
|
listener, err := zctx.ListenWithOptions(cfg.ShrToken, &options)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
return &Backend{
|
||||||
|
cfg: cfg,
|
||||||
|
listener: listener,
|
||||||
|
server: &Server{Requests: cfg.Requests},
|
||||||
|
}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (b *Backend) Run() error {
|
||||||
|
if err := b.server.Serve(b.listener); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
415
endpoints/socks/socks5.go
Executable file
415
endpoints/socks/socks5.go
Executable file
@ -0,0 +1,415 @@
|
|||||||
|
// Copyright (c) Tailscale Inc & AUTHORS
|
||||||
|
// SPDX-License-Identifier: BSD-3-Clause
|
||||||
|
|
||||||
|
// Package socks5 is a SOCKS5 server implementation.
|
||||||
|
//
|
||||||
|
// This is used for userspace networking in Tailscale. Specifically,
|
||||||
|
// this is used for dialing out of the machine to other nodes, without
|
||||||
|
// the host kernel's involvement, so it doesn't proper routing tables,
|
||||||
|
// TUN, IPv6, etc. This package is meant to only handle the SOCKS5 protocol
|
||||||
|
// details and not any integration with Tailscale internals itself.
|
||||||
|
//
|
||||||
|
// The glue between this package and Tailscale is in net/socks5/tssocks.
|
||||||
|
package socks
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"encoding/binary"
|
||||||
|
"fmt"
|
||||||
|
"github.com/openziti/zrok/endpoints"
|
||||||
|
"github.com/sirupsen/logrus"
|
||||||
|
"io"
|
||||||
|
"net"
|
||||||
|
"strconv"
|
||||||
|
"time"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Authentication METHODs described in RFC 1928, section 3.
|
||||||
|
const (
|
||||||
|
noAuthRequired byte = 0
|
||||||
|
passwordAuth byte = 2
|
||||||
|
noAcceptableAuth byte = 255
|
||||||
|
)
|
||||||
|
|
||||||
|
// passwordAuthVersion is the auth version byte described in RFC 1929.
|
||||||
|
const passwordAuthVersion = 1
|
||||||
|
|
||||||
|
// socks5Version is the byte that represents the SOCKS version
|
||||||
|
// in requests.
|
||||||
|
const socks5Version byte = 5
|
||||||
|
|
||||||
|
// commandType are the bytes sent in SOCKS5 packets
|
||||||
|
// that represent the kind of connection the client needs.
|
||||||
|
type commandType byte
|
||||||
|
|
||||||
|
// The set of valid SOCKS5 commands as described in RFC 1928.
|
||||||
|
const (
|
||||||
|
connect commandType = 1
|
||||||
|
bind commandType = 2
|
||||||
|
udpAssociate commandType = 3
|
||||||
|
)
|
||||||
|
|
||||||
|
// addrType are the bytes sent in SOCKS5 packets
|
||||||
|
// that represent particular address types.
|
||||||
|
type addrType byte
|
||||||
|
|
||||||
|
// The set of valid SOCKS5 address types as defined in RFC 1928.
|
||||||
|
const (
|
||||||
|
ipv4 addrType = 1
|
||||||
|
domainName addrType = 3
|
||||||
|
ipv6 addrType = 4
|
||||||
|
)
|
||||||
|
|
||||||
|
// replyCode are the bytes sent in SOCKS5 packets
|
||||||
|
// that represent replies from the server to a client
|
||||||
|
// request.
|
||||||
|
type replyCode byte
|
||||||
|
|
||||||
|
// The set of valid SOCKS5 reply types as per the RFC 1928.
|
||||||
|
const (
|
||||||
|
success replyCode = 0
|
||||||
|
generalFailure replyCode = 1
|
||||||
|
connectionNotAllowed replyCode = 2
|
||||||
|
networkUnreachable replyCode = 3
|
||||||
|
hostUnreachable replyCode = 4
|
||||||
|
connectionRefused replyCode = 5
|
||||||
|
ttlExpired replyCode = 6
|
||||||
|
commandNotSupported replyCode = 7
|
||||||
|
addrTypeNotSupported replyCode = 8
|
||||||
|
)
|
||||||
|
|
||||||
|
// Server is a SOCKS5 proxy server.
|
||||||
|
type Server struct {
|
||||||
|
// Dialer optionally specifies the dialer to use for outgoing connections.
|
||||||
|
// If nil, the net package's standard dialer is used.
|
||||||
|
Dialer func(ctx context.Context, network, addr string) (net.Conn, error)
|
||||||
|
|
||||||
|
// Username and Password, if set, are the credential clients must provide.
|
||||||
|
Username string
|
||||||
|
Password string
|
||||||
|
|
||||||
|
// For notifying user-facing components about activity
|
||||||
|
Requests chan *endpoints.Request
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *Server) dial(ctx context.Context, network, addr string) (net.Conn, error) {
|
||||||
|
dial := s.Dialer
|
||||||
|
if dial == nil {
|
||||||
|
dialer := &net.Dialer{}
|
||||||
|
dial = dialer.DialContext
|
||||||
|
}
|
||||||
|
return dial(ctx, network, addr)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Serve accepts and handles incoming connections on the given listener.
|
||||||
|
func (s *Server) Serve(l net.Listener) error {
|
||||||
|
defer l.Close()
|
||||||
|
for {
|
||||||
|
c, err := l.Accept()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
go func() {
|
||||||
|
defer c.Close()
|
||||||
|
conn := &Conn{clientConn: c, srv: s}
|
||||||
|
err := conn.Run()
|
||||||
|
if err != nil {
|
||||||
|
logrus.Infof("client connection failed: %v", err)
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Conn is a SOCKS5 connection for client to reach
|
||||||
|
// server.
|
||||||
|
type Conn struct {
|
||||||
|
// The struct is filled by each of the internal
|
||||||
|
// methods in turn as the transaction progresses.
|
||||||
|
|
||||||
|
srv *Server
|
||||||
|
clientConn net.Conn
|
||||||
|
request *request
|
||||||
|
}
|
||||||
|
|
||||||
|
// Run starts the new connection.
|
||||||
|
func (c *Conn) Run() error {
|
||||||
|
needAuth := c.srv.Username != "" || c.srv.Password != ""
|
||||||
|
authMethod := noAuthRequired
|
||||||
|
if needAuth {
|
||||||
|
authMethod = passwordAuth
|
||||||
|
}
|
||||||
|
|
||||||
|
err := parseClientGreeting(c.clientConn, authMethod)
|
||||||
|
if err != nil {
|
||||||
|
c.clientConn.Write([]byte{socks5Version, noAcceptableAuth})
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
c.clientConn.Write([]byte{socks5Version, authMethod})
|
||||||
|
if !needAuth {
|
||||||
|
return c.handleRequest()
|
||||||
|
}
|
||||||
|
|
||||||
|
user, pwd, err := parseClientAuth(c.clientConn)
|
||||||
|
if err != nil || user != c.srv.Username || pwd != c.srv.Password {
|
||||||
|
c.clientConn.Write([]byte{1, 1}) // auth error
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
c.clientConn.Write([]byte{1, 0}) // auth success
|
||||||
|
|
||||||
|
return c.handleRequest()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *Conn) handleRequest() error {
|
||||||
|
req, err := parseClientRequest(c.clientConn)
|
||||||
|
if err != nil {
|
||||||
|
res := &response{reply: generalFailure}
|
||||||
|
buf, _ := res.marshal()
|
||||||
|
c.clientConn.Write(buf)
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if req.command != connect {
|
||||||
|
res := &response{reply: commandNotSupported}
|
||||||
|
buf, _ := res.marshal()
|
||||||
|
c.clientConn.Write(buf)
|
||||||
|
return fmt.Errorf("unsupported command %v", req.command)
|
||||||
|
}
|
||||||
|
c.request = req
|
||||||
|
|
||||||
|
if c.srv.Requests != nil {
|
||||||
|
c.srv.Requests <- &endpoints.Request{
|
||||||
|
Stamp: time.Now(),
|
||||||
|
Method: "CONNECT",
|
||||||
|
Path: fmt.Sprintf("%v:%d", c.request.destination, c.request.port),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
|
||||||
|
defer cancel()
|
||||||
|
srv, err := c.srv.dial(
|
||||||
|
ctx,
|
||||||
|
"tcp",
|
||||||
|
net.JoinHostPort(c.request.destination, strconv.Itoa(int(c.request.port))),
|
||||||
|
)
|
||||||
|
if err != nil {
|
||||||
|
res := &response{reply: generalFailure}
|
||||||
|
buf, _ := res.marshal()
|
||||||
|
c.clientConn.Write(buf)
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
defer srv.Close()
|
||||||
|
serverAddr, serverPortStr, err := net.SplitHostPort(srv.LocalAddr().String())
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
serverPort, _ := strconv.Atoi(serverPortStr)
|
||||||
|
|
||||||
|
var bindAddrType addrType
|
||||||
|
if ip := net.ParseIP(serverAddr); ip != nil {
|
||||||
|
if ip.To4() != nil {
|
||||||
|
bindAddrType = ipv4
|
||||||
|
} else {
|
||||||
|
bindAddrType = ipv6
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
bindAddrType = domainName
|
||||||
|
}
|
||||||
|
res := &response{
|
||||||
|
reply: success,
|
||||||
|
bindAddrType: bindAddrType,
|
||||||
|
bindAddr: serverAddr,
|
||||||
|
bindPort: uint16(serverPort),
|
||||||
|
}
|
||||||
|
buf, err := res.marshal()
|
||||||
|
if err != nil {
|
||||||
|
res = &response{reply: generalFailure}
|
||||||
|
buf, _ = res.marshal()
|
||||||
|
}
|
||||||
|
c.clientConn.Write(buf)
|
||||||
|
|
||||||
|
errc := make(chan error, 2)
|
||||||
|
go func() {
|
||||||
|
_, err := io.Copy(c.clientConn, srv)
|
||||||
|
if err != nil {
|
||||||
|
err = fmt.Errorf("from backend to client: %w", err)
|
||||||
|
}
|
||||||
|
errc <- err
|
||||||
|
}()
|
||||||
|
go func() {
|
||||||
|
_, err := io.Copy(srv, c.clientConn)
|
||||||
|
if err != nil {
|
||||||
|
err = fmt.Errorf("from client to backend: %w", err)
|
||||||
|
}
|
||||||
|
errc <- err
|
||||||
|
}()
|
||||||
|
return <-errc
|
||||||
|
}
|
||||||
|
|
||||||
|
// parseClientGreeting parses a request initiation packet.
|
||||||
|
func parseClientGreeting(r io.Reader, authMethod byte) error {
|
||||||
|
var hdr [2]byte
|
||||||
|
_, err := io.ReadFull(r, hdr[:])
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("could not read packet header")
|
||||||
|
}
|
||||||
|
if hdr[0] != socks5Version {
|
||||||
|
return fmt.Errorf("incompatible SOCKS version")
|
||||||
|
}
|
||||||
|
count := int(hdr[1])
|
||||||
|
methods := make([]byte, count)
|
||||||
|
_, err = io.ReadFull(r, methods)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("could not read methods")
|
||||||
|
}
|
||||||
|
for _, m := range methods {
|
||||||
|
if m == authMethod {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return fmt.Errorf("no acceptable auth methods")
|
||||||
|
}
|
||||||
|
|
||||||
|
func parseClientAuth(r io.Reader) (usr, pwd string, err error) {
|
||||||
|
var hdr [2]byte
|
||||||
|
if _, err := io.ReadFull(r, hdr[:]); err != nil {
|
||||||
|
return "", "", fmt.Errorf("could not read auth packet header")
|
||||||
|
}
|
||||||
|
if hdr[0] != passwordAuthVersion {
|
||||||
|
return "", "", fmt.Errorf("bad SOCKS auth version")
|
||||||
|
}
|
||||||
|
usrLen := int(hdr[1])
|
||||||
|
usrBytes := make([]byte, usrLen)
|
||||||
|
if _, err := io.ReadFull(r, usrBytes); err != nil {
|
||||||
|
return "", "", fmt.Errorf("could not read auth packet username")
|
||||||
|
}
|
||||||
|
var hdrPwd [1]byte
|
||||||
|
if _, err := io.ReadFull(r, hdrPwd[:]); err != nil {
|
||||||
|
return "", "", fmt.Errorf("could not read auth packet password length")
|
||||||
|
}
|
||||||
|
pwdLen := int(hdrPwd[0])
|
||||||
|
pwdBytes := make([]byte, pwdLen)
|
||||||
|
if _, err := io.ReadFull(r, pwdBytes); err != nil {
|
||||||
|
return "", "", fmt.Errorf("could not read auth packet password")
|
||||||
|
}
|
||||||
|
return string(usrBytes), string(pwdBytes), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// request represents data contained within a SOCKS5
|
||||||
|
// connection request packet.
|
||||||
|
type request struct {
|
||||||
|
command commandType
|
||||||
|
destination string
|
||||||
|
port uint16
|
||||||
|
destAddrType addrType
|
||||||
|
}
|
||||||
|
|
||||||
|
// parseClientRequest converts raw packet bytes into a
|
||||||
|
// SOCKS5Request struct.
|
||||||
|
func parseClientRequest(r io.Reader) (*request, error) {
|
||||||
|
var hdr [4]byte
|
||||||
|
_, err := io.ReadFull(r, hdr[:])
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("could not read packet header")
|
||||||
|
}
|
||||||
|
cmd := hdr[1]
|
||||||
|
destAddrType := addrType(hdr[3])
|
||||||
|
|
||||||
|
var destination string
|
||||||
|
var port uint16
|
||||||
|
|
||||||
|
if destAddrType == ipv4 {
|
||||||
|
var ip [4]byte
|
||||||
|
_, err = io.ReadFull(r, ip[:])
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("could not read IPv4 address")
|
||||||
|
}
|
||||||
|
destination = net.IP(ip[:]).String()
|
||||||
|
} else if destAddrType == domainName {
|
||||||
|
var dstSizeByte [1]byte
|
||||||
|
_, err = io.ReadFull(r, dstSizeByte[:])
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("could not read domain name size")
|
||||||
|
}
|
||||||
|
dstSize := int(dstSizeByte[0])
|
||||||
|
domainName := make([]byte, dstSize)
|
||||||
|
_, err = io.ReadFull(r, domainName)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("could not read domain name")
|
||||||
|
}
|
||||||
|
destination = string(domainName)
|
||||||
|
} else if destAddrType == ipv6 {
|
||||||
|
var ip [16]byte
|
||||||
|
_, err = io.ReadFull(r, ip[:])
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("could not read IPv6 address")
|
||||||
|
}
|
||||||
|
destination = net.IP(ip[:]).String()
|
||||||
|
} else {
|
||||||
|
return nil, fmt.Errorf("unsupported address type")
|
||||||
|
}
|
||||||
|
var portBytes [2]byte
|
||||||
|
_, err = io.ReadFull(r, portBytes[:])
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("could not read port")
|
||||||
|
}
|
||||||
|
port = binary.BigEndian.Uint16(portBytes[:])
|
||||||
|
|
||||||
|
return &request{
|
||||||
|
command: commandType(cmd),
|
||||||
|
destination: destination,
|
||||||
|
port: port,
|
||||||
|
destAddrType: destAddrType,
|
||||||
|
}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// response contains the contents of
|
||||||
|
// a response packet sent from the proxy
|
||||||
|
// to the client.
|
||||||
|
type response struct {
|
||||||
|
reply replyCode
|
||||||
|
bindAddrType addrType
|
||||||
|
bindAddr string
|
||||||
|
bindPort uint16
|
||||||
|
}
|
||||||
|
|
||||||
|
// marshal converts a SOCKS5Response struct into
|
||||||
|
// a packet. If res.reply == Success, it may throw an error on
|
||||||
|
// receiving an invalid bind address. Otherwise, it will not throw.
|
||||||
|
func (res *response) marshal() ([]byte, error) {
|
||||||
|
pkt := make([]byte, 4)
|
||||||
|
pkt[0] = socks5Version
|
||||||
|
pkt[1] = byte(res.reply)
|
||||||
|
pkt[2] = 0 // null reserved byte
|
||||||
|
pkt[3] = byte(res.bindAddrType)
|
||||||
|
|
||||||
|
if res.reply != success {
|
||||||
|
return pkt, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
var addr []byte
|
||||||
|
switch res.bindAddrType {
|
||||||
|
case ipv4:
|
||||||
|
addr = net.ParseIP(res.bindAddr).To4()
|
||||||
|
if addr == nil {
|
||||||
|
return nil, fmt.Errorf("invalid IPv4 address for binding")
|
||||||
|
}
|
||||||
|
case domainName:
|
||||||
|
if len(res.bindAddr) > 255 {
|
||||||
|
return nil, fmt.Errorf("invalid domain name for binding")
|
||||||
|
}
|
||||||
|
addr = make([]byte, 0, len(res.bindAddr)+1)
|
||||||
|
addr = append(addr, byte(len(res.bindAddr)))
|
||||||
|
addr = append(addr, []byte(res.bindAddr)...)
|
||||||
|
case ipv6:
|
||||||
|
addr = net.ParseIP(res.bindAddr).To16()
|
||||||
|
if addr == nil {
|
||||||
|
return nil, fmt.Errorf("invalid IPv6 address for binding")
|
||||||
|
}
|
||||||
|
default:
|
||||||
|
return nil, fmt.Errorf("unsupported address type")
|
||||||
|
}
|
||||||
|
|
||||||
|
pkt = append(pkt, addr...)
|
||||||
|
pkt = binary.BigEndian.AppendUint16(pkt, uint16(res.bindPort))
|
||||||
|
|
||||||
|
return pkt, nil
|
||||||
|
}
|
56
go.mod
56
go.mod
@ -9,14 +9,14 @@ require (
|
|||||||
github.com/charmbracelet/bubbletea v0.23.1
|
github.com/charmbracelet/bubbletea v0.23.1
|
||||||
github.com/charmbracelet/lipgloss v0.6.0
|
github.com/charmbracelet/lipgloss v0.6.0
|
||||||
github.com/go-openapi/errors v0.21.0
|
github.com/go-openapi/errors v0.21.0
|
||||||
github.com/go-openapi/loads v0.21.3
|
github.com/go-openapi/loads v0.21.5
|
||||||
github.com/go-openapi/runtime v0.26.2
|
github.com/go-openapi/runtime v0.27.1
|
||||||
github.com/go-openapi/spec v0.20.12
|
github.com/go-openapi/spec v0.20.14
|
||||||
github.com/go-openapi/strfmt v0.21.10
|
github.com/go-openapi/strfmt v0.22.0
|
||||||
github.com/go-openapi/swag v0.22.5
|
github.com/go-openapi/swag v0.22.9
|
||||||
github.com/go-openapi/validate v0.22.4
|
github.com/go-openapi/validate v0.22.6
|
||||||
github.com/golang-jwt/jwt/v5 v5.0.0
|
github.com/golang-jwt/jwt/v5 v5.2.0
|
||||||
github.com/google/uuid v1.5.0
|
github.com/google/uuid v1.6.0
|
||||||
github.com/gorilla/websocket v1.5.1
|
github.com/gorilla/websocket v1.5.1
|
||||||
github.com/iancoleman/strcase v0.2.0
|
github.com/iancoleman/strcase v0.2.0
|
||||||
github.com/influxdata/influxdb-client-go/v2 v2.11.0
|
github.com/influxdata/influxdb-client-go/v2 v2.11.0
|
||||||
@ -30,25 +30,25 @@ require (
|
|||||||
github.com/michaelquigley/pfxlog v0.6.10
|
github.com/michaelquigley/pfxlog v0.6.10
|
||||||
github.com/muesli/reflow v0.3.0
|
github.com/muesli/reflow v0.3.0
|
||||||
github.com/nxadm/tail v1.4.8
|
github.com/nxadm/tail v1.4.8
|
||||||
github.com/openziti/channel/v2 v2.0.113
|
github.com/openziti/channel/v2 v2.0.119
|
||||||
github.com/openziti/edge-api v0.26.7
|
github.com/openziti/edge-api v0.26.10
|
||||||
github.com/openziti/fabric v0.23.26
|
github.com/openziti/fabric v0.23.26
|
||||||
github.com/openziti/identity v1.0.68
|
github.com/openziti/identity v1.0.70
|
||||||
github.com/openziti/sdk-golang v0.22.6
|
github.com/openziti/sdk-golang v0.22.28
|
||||||
github.com/openziti/transport/v2 v2.0.119
|
github.com/openziti/transport/v2 v2.0.122
|
||||||
github.com/pkg/errors v0.9.1
|
github.com/pkg/errors v0.9.1
|
||||||
github.com/rabbitmq/amqp091-go v1.8.1
|
github.com/rabbitmq/amqp091-go v1.8.1
|
||||||
github.com/rubenv/sql-migrate v1.6.0
|
github.com/rubenv/sql-migrate v1.6.0
|
||||||
github.com/shirou/gopsutil/v3 v3.23.11
|
github.com/shirou/gopsutil/v3 v3.24.1
|
||||||
github.com/sirupsen/logrus v1.9.3
|
github.com/sirupsen/logrus v1.9.3
|
||||||
github.com/spf13/cobra v1.8.0
|
github.com/spf13/cobra v1.8.0
|
||||||
github.com/stretchr/testify v1.8.4
|
github.com/stretchr/testify v1.8.4
|
||||||
github.com/wneessen/go-mail v0.2.7
|
github.com/wneessen/go-mail v0.2.7
|
||||||
github.com/zitadel/oidc/v2 v2.12.0
|
github.com/zitadel/oidc/v2 v2.12.0
|
||||||
go.uber.org/zap v1.25.0
|
go.uber.org/zap v1.25.0
|
||||||
golang.org/x/crypto v0.17.0
|
golang.org/x/crypto v0.18.0
|
||||||
golang.org/x/net v0.19.0
|
golang.org/x/net v0.20.0
|
||||||
golang.org/x/oauth2 v0.13.0
|
golang.org/x/oauth2 v0.16.0
|
||||||
golang.org/x/time v0.3.0
|
golang.org/x/time v0.3.0
|
||||||
nhooyr.io/websocket v1.8.10
|
nhooyr.io/websocket v1.8.10
|
||||||
)
|
)
|
||||||
@ -97,13 +97,13 @@ require (
|
|||||||
github.com/go-gorp/gorp/v3 v3.1.0 // indirect
|
github.com/go-gorp/gorp/v3 v3.1.0 // indirect
|
||||||
github.com/go-kit/kit v0.10.0 // indirect
|
github.com/go-kit/kit v0.10.0 // indirect
|
||||||
github.com/go-logfmt/logfmt v0.6.0 // indirect
|
github.com/go-logfmt/logfmt v0.6.0 // indirect
|
||||||
github.com/go-logr/logr v1.3.0 // indirect
|
github.com/go-logr/logr v1.4.1 // indirect
|
||||||
github.com/go-logr/stdr v1.2.2 // indirect
|
github.com/go-logr/stdr v1.2.2 // indirect
|
||||||
github.com/go-ole/go-ole v1.2.6 // indirect
|
github.com/go-ole/go-ole v1.2.6 // indirect
|
||||||
github.com/go-openapi/analysis v0.21.5 // indirect
|
github.com/go-openapi/analysis v0.22.2 // indirect
|
||||||
github.com/go-openapi/jsonpointer v0.20.1 // indirect
|
github.com/go-openapi/jsonpointer v0.20.2 // indirect
|
||||||
github.com/go-openapi/jsonreference v0.20.3 // indirect
|
github.com/go-openapi/jsonreference v0.20.4 // indirect
|
||||||
github.com/go-resty/resty/v2 v2.10.0 // indirect
|
github.com/go-resty/resty/v2 v2.11.0 // indirect
|
||||||
github.com/go-sql-driver/mysql v1.7.1 // indirect
|
github.com/go-sql-driver/mysql v1.7.1 // indirect
|
||||||
github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572 // indirect
|
github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572 // indirect
|
||||||
github.com/golang/glog v1.1.2 // indirect
|
github.com/golang/glog v1.1.2 // indirect
|
||||||
@ -161,9 +161,9 @@ require (
|
|||||||
github.com/oklog/ulid v1.3.1 // indirect
|
github.com/oklog/ulid v1.3.1 // indirect
|
||||||
github.com/onsi/ginkgo/v2 v2.9.5 // indirect
|
github.com/onsi/ginkgo/v2 v2.9.5 // indirect
|
||||||
github.com/opentracing/opentracing-go v1.2.0 // indirect
|
github.com/opentracing/opentracing-go v1.2.0 // indirect
|
||||||
github.com/openziti/foundation/v2 v2.0.35 // indirect
|
github.com/openziti/foundation/v2 v2.0.37 // indirect
|
||||||
github.com/openziti/metrics v1.2.41 // indirect
|
github.com/openziti/metrics v1.2.45 // indirect
|
||||||
github.com/openziti/secretstream v0.1.15 // indirect
|
github.com/openziti/secretstream v0.1.16 // indirect
|
||||||
github.com/openziti/storage v0.2.6 // indirect
|
github.com/openziti/storage v0.2.6 // indirect
|
||||||
github.com/orcaman/concurrent-map/v2 v2.0.1 // indirect
|
github.com/orcaman/concurrent-map/v2 v2.0.1 // indirect
|
||||||
github.com/parallaxsecond/parsec-client-go v0.0.0-20221025095442-f0a77d263cf9 // indirect
|
github.com/parallaxsecond/parsec-client-go v0.0.0-20221025095442-f0a77d263cf9 // indirect
|
||||||
@ -224,9 +224,9 @@ require (
|
|||||||
go.uber.org/multierr v1.11.0 // indirect
|
go.uber.org/multierr v1.11.0 // indirect
|
||||||
golang.org/x/exp v0.0.0-20230905200255-921286631fa9 // indirect
|
golang.org/x/exp v0.0.0-20230905200255-921286631fa9 // indirect
|
||||||
golang.org/x/mod v0.12.0 // indirect
|
golang.org/x/mod v0.12.0 // indirect
|
||||||
golang.org/x/sync v0.4.0 // indirect
|
golang.org/x/sync v0.5.0 // indirect
|
||||||
golang.org/x/sys v0.15.0 // indirect
|
golang.org/x/sys v0.17.0 // indirect
|
||||||
golang.org/x/term v0.15.0 // indirect
|
golang.org/x/term v0.17.0 // indirect
|
||||||
golang.org/x/text v0.14.0 // indirect
|
golang.org/x/text v0.14.0 // indirect
|
||||||
golang.org/x/tools v0.13.0 // indirect
|
golang.org/x/tools v0.13.0 // indirect
|
||||||
google.golang.org/appengine v1.6.7 // indirect
|
google.golang.org/appengine v1.6.7 // indirect
|
||||||
|
178
go.sum
178
go.sum
@ -100,7 +100,6 @@ github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj
|
|||||||
github.com/aryann/difflib v0.0.0-20170710044230-e206f873d14a/go.mod h1:DAHtR1m6lCRdSC2Tm3DSWRPvIPr6xNKyeHdqDQSQT+A=
|
github.com/aryann/difflib v0.0.0-20170710044230-e206f873d14a/go.mod h1:DAHtR1m6lCRdSC2Tm3DSWRPvIPr6xNKyeHdqDQSQT+A=
|
||||||
github.com/aryann/difflib v0.0.0-20210328193216-ff5ff6dc229b h1:uUXgbcPDK3KpW29o4iy7GtuappbWT0l5NaMo9H9pJDw=
|
github.com/aryann/difflib v0.0.0-20210328193216-ff5ff6dc229b h1:uUXgbcPDK3KpW29o4iy7GtuappbWT0l5NaMo9H9pJDw=
|
||||||
github.com/aryann/difflib v0.0.0-20210328193216-ff5ff6dc229b/go.mod h1:DAHtR1m6lCRdSC2Tm3DSWRPvIPr6xNKyeHdqDQSQT+A=
|
github.com/aryann/difflib v0.0.0-20210328193216-ff5ff6dc229b/go.mod h1:DAHtR1m6lCRdSC2Tm3DSWRPvIPr6xNKyeHdqDQSQT+A=
|
||||||
github.com/asaskevich/govalidator v0.0.0-20200907205600-7a23bdc65eef/go.mod h1:WaHUgvxTVq04UNunO+XhnAqY/wQc+bxr74GqbsZ/Jqw=
|
|
||||||
github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2 h1:DklsrG3dyBCFEj5IhUbnKptjxatkF07cF2ak3yi77so=
|
github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2 h1:DklsrG3dyBCFEj5IhUbnKptjxatkF07cF2ak3yi77so=
|
||||||
github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2/go.mod h1:WaHUgvxTVq04UNunO+XhnAqY/wQc+bxr74GqbsZ/Jqw=
|
github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2/go.mod h1:WaHUgvxTVq04UNunO+XhnAqY/wQc+bxr74GqbsZ/Jqw=
|
||||||
github.com/atotto/clipboard v0.1.4 h1:EH0zSVneZPSuFR11BlR9YppQTVDbh5+16AmcJi4g1z4=
|
github.com/atotto/clipboard v0.1.4 h1:EH0zSVneZPSuFR11BlR9YppQTVDbh5+16AmcJi4g1z4=
|
||||||
@ -182,7 +181,6 @@ github.com/cpuguy83/go-md2man/v2 v2.0.2/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46t
|
|||||||
github.com/cpuguy83/go-md2man/v2 v2.0.3 h1:qMCsGGgs+MAzDFyp9LpAe1Lqy/fY/qCovCm0qnXZOBM=
|
github.com/cpuguy83/go-md2man/v2 v2.0.3 h1:qMCsGGgs+MAzDFyp9LpAe1Lqy/fY/qCovCm0qnXZOBM=
|
||||||
github.com/cpuguy83/go-md2man/v2 v2.0.3/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o=
|
github.com/cpuguy83/go-md2man/v2 v2.0.3/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o=
|
||||||
github.com/creack/pty v1.1.7/go.mod h1:lj5s0c3V2DBrqTV7llrYr5NG6My20zk30Fl46Y7DoTY=
|
github.com/creack/pty v1.1.7/go.mod h1:lj5s0c3V2DBrqTV7llrYr5NG6My20zk30Fl46Y7DoTY=
|
||||||
github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E=
|
|
||||||
github.com/cyberdelia/templates v0.0.0-20141128023046-ca7fffd4298c/go.mod h1:GyV+0YP4qX0UQ7r2MoYZ+AvYDp12OF5yg4q8rGnyNh4=
|
github.com/cyberdelia/templates v0.0.0-20141128023046-ca7fffd4298c/go.mod h1:GyV+0YP4qX0UQ7r2MoYZ+AvYDp12OF5yg4q8rGnyNh4=
|
||||||
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||||
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
|
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
|
||||||
@ -262,63 +260,36 @@ github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG
|
|||||||
github.com/go-logfmt/logfmt v0.6.0 h1:wGYYu3uicYdqXVgoYbvnkrPVXkuLM1p1ifugDMEdRi4=
|
github.com/go-logfmt/logfmt v0.6.0 h1:wGYYu3uicYdqXVgoYbvnkrPVXkuLM1p1ifugDMEdRi4=
|
||||||
github.com/go-logfmt/logfmt v0.6.0/go.mod h1:WYhtIu8zTZfxdn5+rREduYbwxfcBr/Vr6KEVveWlfTs=
|
github.com/go-logfmt/logfmt v0.6.0/go.mod h1:WYhtIu8zTZfxdn5+rREduYbwxfcBr/Vr6KEVveWlfTs=
|
||||||
github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A=
|
github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A=
|
||||||
github.com/go-logr/logr v1.3.0 h1:2y3SDp0ZXuc6/cjLSZ+Q3ir+QB9T/iG5yYRXqsagWSY=
|
github.com/go-logr/logr v1.4.1 h1:pKouT5E8xu9zeFC39JXRDukb6JFQPXM5p5I91188VAQ=
|
||||||
github.com/go-logr/logr v1.3.0/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY=
|
github.com/go-logr/logr v1.4.1/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY=
|
||||||
github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag=
|
github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag=
|
||||||
github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE=
|
github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE=
|
||||||
github.com/go-ole/go-ole v1.2.6 h1:/Fpf6oFPoeFik9ty7siob0G6Ke8QvQEuVcuChpwXzpY=
|
github.com/go-ole/go-ole v1.2.6 h1:/Fpf6oFPoeFik9ty7siob0G6Ke8QvQEuVcuChpwXzpY=
|
||||||
github.com/go-ole/go-ole v1.2.6/go.mod h1:pprOEPIfldk/42T2oK7lQ4v4JSDwmV0As9GaiUsvbm0=
|
github.com/go-ole/go-ole v1.2.6/go.mod h1:pprOEPIfldk/42T2oK7lQ4v4JSDwmV0As9GaiUsvbm0=
|
||||||
github.com/go-openapi/analysis v0.21.4 h1:ZDFLvSNxpDaomuCueM0BlSXxpANBlFYiBvr+GXrvIHc=
|
github.com/go-openapi/analysis v0.22.2 h1:ZBmNoP2h5omLKr/srIC9bfqrUGzT6g6gNv03HE9Vpj0=
|
||||||
github.com/go-openapi/analysis v0.21.4/go.mod h1:4zQ35W4neeZTqh3ol0rv/O8JBbka9QyAgQRPp9y3pfo=
|
github.com/go-openapi/analysis v0.22.2/go.mod h1:pDF4UbZsQTo/oNuRfAWWd4dAh4yuYf//LYorPTjrpvo=
|
||||||
github.com/go-openapi/analysis v0.21.5 h1:3tHfEBh6Ia8eKc4M7khOGjPOAlWKJ10d877Cr9teujI=
|
|
||||||
github.com/go-openapi/analysis v0.21.5/go.mod h1:25YcZosX9Lwz2wBsrFrrsL8bmjjXdlyP6zsr2AMy29M=
|
|
||||||
github.com/go-openapi/errors v0.20.2/go.mod h1:cM//ZKUKyO06HSwqAelJ5NsEMMcpa6VpXe8DOa1Mi1M=
|
|
||||||
github.com/go-openapi/errors v0.20.4 h1:unTcVm6PispJsMECE3zWgvG4xTiKda1LIR5rCRWLG6M=
|
|
||||||
github.com/go-openapi/errors v0.20.4/go.mod h1:Z3FlZ4I8jEGxjUK+bugx3on2mIAk4txuAOhlsB1FSgk=
|
|
||||||
github.com/go-openapi/errors v0.21.0 h1:FhChC/duCnfoLj1gZ0BgaBmzhJC2SL/sJr8a2vAobSY=
|
github.com/go-openapi/errors v0.21.0 h1:FhChC/duCnfoLj1gZ0BgaBmzhJC2SL/sJr8a2vAobSY=
|
||||||
github.com/go-openapi/errors v0.21.0/go.mod h1:jxNTMUxRCKj65yb/okJGEtahVd7uvWnuWfj53bse4ho=
|
github.com/go-openapi/errors v0.21.0/go.mod h1:jxNTMUxRCKj65yb/okJGEtahVd7uvWnuWfj53bse4ho=
|
||||||
github.com/go-openapi/jsonpointer v0.19.3/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg=
|
|
||||||
github.com/go-openapi/jsonpointer v0.19.5/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg=
|
github.com/go-openapi/jsonpointer v0.19.5/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg=
|
||||||
github.com/go-openapi/jsonpointer v0.19.6/go.mod h1:osyAmYz/mB/C3I+WsTTSgw1ONzaLJoLCyoi6/zppojs=
|
github.com/go-openapi/jsonpointer v0.20.2 h1:mQc3nmndL8ZBzStEo3JYF8wzmeWffDH4VbXz58sAx6Q=
|
||||||
github.com/go-openapi/jsonpointer v0.20.0 h1:ESKJdU9ASRfaPNOPRx12IUyA1vn3R9GiE3KYD14BXdQ=
|
github.com/go-openapi/jsonpointer v0.20.2/go.mod h1:bHen+N0u1KEO3YlmqOjTT9Adn1RfD91Ar825/PuiRVs=
|
||||||
github.com/go-openapi/jsonpointer v0.20.0/go.mod h1:6PGzBjjIIumbLYysB73Klnms1mwnU4G3YHOECG3CedA=
|
github.com/go-openapi/jsonreference v0.20.4 h1:bKlDxQxQJgwpUSgOENiMPzCTBVuc7vTdXSSgNeAhojU=
|
||||||
github.com/go-openapi/jsonpointer v0.20.1 h1:MkK4VEIEZMj4wT9PmjaUmGflVBr9nvud4Q4UVFbDoBE=
|
github.com/go-openapi/jsonreference v0.20.4/go.mod h1:5pZJyJP2MnYCpoeoMAql78cCHauHj0V9Lhc506VOpw4=
|
||||||
github.com/go-openapi/jsonpointer v0.20.1/go.mod h1:bHen+N0u1KEO3YlmqOjTT9Adn1RfD91Ar825/PuiRVs=
|
github.com/go-openapi/loads v0.21.5 h1:jDzF4dSoHw6ZFADCGltDb2lE4F6De7aWSpe+IcsRzT0=
|
||||||
github.com/go-openapi/jsonreference v0.20.0/go.mod h1:Ag74Ico3lPc+zR+qjn4XBUmXymS4zJbYVCZmcgkasdo=
|
github.com/go-openapi/loads v0.21.5/go.mod h1:PxTsnFBoBe+z89riT+wYt3prmSBP6GDAQh2l9H1Flz8=
|
||||||
github.com/go-openapi/jsonreference v0.20.2 h1:3sVjiK66+uXK/6oQ8xgcRKcFgQ5KXa2KvnJRumpMGbE=
|
github.com/go-openapi/runtime v0.27.1 h1:ae53yaOoh+fx/X5Eaq8cRmavHgDma65XPZuvBqvJYto=
|
||||||
github.com/go-openapi/jsonreference v0.20.2/go.mod h1:Bl1zwGIM8/wsvqjsOQLJ/SH+En5Ap4rVB5KVcIDZG2k=
|
github.com/go-openapi/runtime v0.27.1/go.mod h1:fijeJEiEclyS8BRurYE1DE5TLb9/KZl6eAdbzjsrlLU=
|
||||||
github.com/go-openapi/jsonreference v0.20.3 h1:EjGcjTW8pD1mRis6+w/gmoBdqv5+RbE9B85D1NgDOVQ=
|
github.com/go-openapi/spec v0.20.14 h1:7CBlRnw+mtjFGlPDRZmAMnq35cRzI91xj03HVyUi/Do=
|
||||||
github.com/go-openapi/jsonreference v0.20.3/go.mod h1:FviDZ46i9ivh810gqzFLl5NttD5q3tSlMLqLr6okedM=
|
github.com/go-openapi/spec v0.20.14/go.mod h1:8EOhTpBoFiask8rrgwbLC3zmJfz4zsCUueRuPM6GNkw=
|
||||||
github.com/go-openapi/loads v0.21.2 h1:r2a/xFIYeZ4Qd2TnGpWDIQNcP80dIaZgf704za8enro=
|
github.com/go-openapi/strfmt v0.22.0 h1:Ew9PnEYc246TwrEspvBdDHS4BVKXy/AOVsfqGDgAcaI=
|
||||||
github.com/go-openapi/loads v0.21.2/go.mod h1:Jq58Os6SSGz0rzh62ptiu8Z31I+OTHqmULx5e/gJbNw=
|
github.com/go-openapi/strfmt v0.22.0/go.mod h1:HzJ9kokGIju3/K6ap8jL+OlGAbjpSv27135Yr9OivU4=
|
||||||
github.com/go-openapi/loads v0.21.3 h1:8sSH2FIm/SnbDUGv572md4YqVMFne/a9Eubvcd3anew=
|
|
||||||
github.com/go-openapi/loads v0.21.3/go.mod h1:Y3aMR24iHbKHppOj91nQ/SHc0cuPbAr4ndY4a02xydc=
|
|
||||||
github.com/go-openapi/runtime v0.26.2 h1:elWyB9MacRzvIVgAZCBJmqTi7hBzU0hlKD4IvfX0Zl0=
|
|
||||||
github.com/go-openapi/runtime v0.26.2/go.mod h1:O034jyRZ557uJKzngbMDJXkcKJVzXJiymdSfgejrcRw=
|
|
||||||
github.com/go-openapi/spec v0.20.6/go.mod h1:2OpW+JddWPrpXSCIX8eOx7lZ5iyuWj3RYR6VaaBKcWA=
|
|
||||||
github.com/go-openapi/spec v0.20.11 h1:J/TzFDLTt4Rcl/l1PmyErvkqlJDncGvPTMnCI39I4gY=
|
|
||||||
github.com/go-openapi/spec v0.20.11/go.mod h1:2OpW+JddWPrpXSCIX8eOx7lZ5iyuWj3RYR6VaaBKcWA=
|
|
||||||
github.com/go-openapi/spec v0.20.12 h1:cgSLbrsmziAP2iais+Vz7kSazwZ8rsUZd6TUzdDgkVI=
|
|
||||||
github.com/go-openapi/spec v0.20.12/go.mod h1:iSCgnBcwbMW9SfzJb8iYynXvcY6C/QFrI7otzF7xGM4=
|
|
||||||
github.com/go-openapi/strfmt v0.21.3/go.mod h1:k+RzNO0Da+k3FrrynSNN8F7n/peCmQQqbbXjtDfvmGg=
|
|
||||||
github.com/go-openapi/strfmt v0.21.9 h1:LnEGOO9qyEC1v22Bzr323M98G13paIUGPU7yeJtG9Xs=
|
|
||||||
github.com/go-openapi/strfmt v0.21.9/go.mod h1:0k3v301mglEaZRJdDDGSlN6Npq4VMVU69DE0LUyf7uA=
|
|
||||||
github.com/go-openapi/strfmt v0.21.10 h1:JIsly3KXZB/Qf4UzvzJpg4OELH/0ASDQsyk//TTBDDk=
|
|
||||||
github.com/go-openapi/strfmt v0.21.10/go.mod h1:vNDMwbilnl7xKiO/Ve/8H8Bb2JIInBnH+lqiw6QWgis=
|
|
||||||
github.com/go-openapi/swag v0.19.5/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk=
|
github.com/go-openapi/swag v0.19.5/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk=
|
||||||
github.com/go-openapi/swag v0.19.15/go.mod h1:QYRuS/SOXUCsnplDa677K7+DxSOj6IPNl/eQntq43wQ=
|
github.com/go-openapi/swag v0.22.9 h1:XX2DssF+mQKM2DHsbgZK74y/zj4mo9I99+89xUmuZCE=
|
||||||
github.com/go-openapi/swag v0.21.1/go.mod h1:QYRuS/SOXUCsnplDa677K7+DxSOj6IPNl/eQntq43wQ=
|
github.com/go-openapi/swag v0.22.9/go.mod h1:3/OXnFfnMAwBD099SwYRk7GD3xOrr1iL7d/XNLXVVwE=
|
||||||
github.com/go-openapi/swag v0.22.3/go.mod h1:UzaqsxGiab7freDnrUUra0MwWfN/q7tE4j+VcZ0yl14=
|
github.com/go-openapi/validate v0.22.6 h1:+NhuwcEYpWdO5Nm4bmvhGLW0rt1Fcc532Mu3wpypXfo=
|
||||||
github.com/go-openapi/swag v0.22.4 h1:QLMzNJnMGPRNDCbySlcj1x01tzU8/9LTTL9hZZZogBU=
|
github.com/go-openapi/validate v0.22.6/go.mod h1:eaddXSqKeTg5XpSmj1dYyFTK/95n/XHwcOY+BMxKMyM=
|
||||||
github.com/go-openapi/swag v0.22.4/go.mod h1:UzaqsxGiab7freDnrUUra0MwWfN/q7tE4j+VcZ0yl14=
|
github.com/go-resty/resty/v2 v2.11.0 h1:i7jMfNOJYMp69lq7qozJP+bjgzfAzeOhuGlyDrqxT/8=
|
||||||
github.com/go-openapi/swag v0.22.5 h1:fVS63IE3M0lsuWRzuom3RLwUMVI2peDH01s6M70ugys=
|
github.com/go-resty/resty/v2 v2.11.0/go.mod h1:iiP/OpA0CkcL3IGt1O0+/SIItFUbkkyw5BGXiVdTu+A=
|
||||||
github.com/go-openapi/swag v0.22.5/go.mod h1:Gl91UqO+btAM0plGGxHqJcQZ1ZTy6jbmridBTsDy8A0=
|
|
||||||
github.com/go-openapi/validate v0.22.3 h1:KxG9mu5HBRYbecRb37KRCihvGGtND2aXziBAv0NNfyI=
|
|
||||||
github.com/go-openapi/validate v0.22.3/go.mod h1:kVxh31KbfsxU8ZyoHaDbLBWU5CnMdqBUEtadQ2G4d5M=
|
|
||||||
github.com/go-openapi/validate v0.22.4 h1:5v3jmMyIPKTR8Lv9syBAIRxG6lY0RqeBPB1LKEijzk8=
|
|
||||||
github.com/go-openapi/validate v0.22.4/go.mod h1:qm6O8ZIcPVdSY5219468Jv7kBdGvkiZLPOmqnqTUZ2A=
|
|
||||||
github.com/go-resty/resty/v2 v2.10.0 h1:Qla4W/+TMmv0fOeeRqzEpXPLfTUnR5HZ1+lGs+CkiCo=
|
|
||||||
github.com/go-resty/resty/v2 v2.10.0/go.mod h1:iiP/OpA0CkcL3IGt1O0+/SIItFUbkkyw5BGXiVdTu+A=
|
|
||||||
github.com/go-sql-driver/mysql v1.4.0/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG5ZlKdlhCg5w=
|
github.com/go-sql-driver/mysql v1.4.0/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG5ZlKdlhCg5w=
|
||||||
github.com/go-sql-driver/mysql v1.6.0/go.mod h1:DCzpHaOWr8IXmIStZouvnhqoel9Qv2LBy8hT2VhHyBg=
|
github.com/go-sql-driver/mysql v1.6.0/go.mod h1:DCzpHaOWr8IXmIStZouvnhqoel9Qv2LBy8hT2VhHyBg=
|
||||||
github.com/go-sql-driver/mysql v1.7.1 h1:lUIinVbN1DY0xBg0eMOzmmtGoHwWBbvnWubQUrtU8EI=
|
github.com/go-sql-driver/mysql v1.7.1 h1:lUIinVbN1DY0xBg0eMOzmmtGoHwWBbvnWubQUrtU8EI=
|
||||||
@ -336,8 +307,8 @@ github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7a
|
|||||||
github.com/gogo/protobuf v1.2.0/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ=
|
github.com/gogo/protobuf v1.2.0/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ=
|
||||||
github.com/gogo/protobuf v1.2.1/go.mod h1:hp+jE20tsWTFYpLwKvXlhS1hjn+gTNwPg2I6zVXpSg4=
|
github.com/gogo/protobuf v1.2.1/go.mod h1:hp+jE20tsWTFYpLwKvXlhS1hjn+gTNwPg2I6zVXpSg4=
|
||||||
github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q=
|
github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q=
|
||||||
github.com/golang-jwt/jwt/v5 v5.0.0 h1:1n1XNM9hk7O9mnQoNBGolZvzebBQ7p93ULHRc28XJUE=
|
github.com/golang-jwt/jwt/v5 v5.2.0 h1:d/ix8ftRUorsN+5eMIlF4T6J8CAt9rch3My2winC1Jw=
|
||||||
github.com/golang-jwt/jwt/v5 v5.0.0/go.mod h1:pqrtFR0X4osieyHYxtmOUWsAWrfe1Q5UVIyoH402zdk=
|
github.com/golang-jwt/jwt/v5 v5.2.0/go.mod h1:pqrtFR0X4osieyHYxtmOUWsAWrfe1Q5UVIyoH402zdk=
|
||||||
github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q=
|
github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q=
|
||||||
github.com/golang/glog v1.1.2 h1:DVjP2PbBOzHyzA+dn3WhHIq4NdVu3Q+pvivFICf/7fo=
|
github.com/golang/glog v1.1.2 h1:DVjP2PbBOzHyzA+dn3WhHIq4NdVu3Q+pvivFICf/7fo=
|
||||||
github.com/golang/glog v1.1.2/go.mod h1:zR+okUeTbrL6EL3xHUDxZuEtGv04p5shwip1+mL/rLQ=
|
github.com/golang/glog v1.1.2/go.mod h1:zR+okUeTbrL6EL3xHUDxZuEtGv04p5shwip1+mL/rLQ=
|
||||||
@ -433,10 +404,8 @@ github.com/google/s2a-go v0.1.7/go.mod h1:50CgR4k1jNlWBu4UfS4AcfhVe1r6pdZPygJ3R8
|
|||||||
github.com/google/uuid v1.0.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
github.com/google/uuid v1.0.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
||||||
github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
||||||
github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
||||||
github.com/google/uuid v1.4.0 h1:MtMxsa51/r9yyhkyLsVeVt0B+BGQZzpQiTQ4eHZ8bc4=
|
github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0=
|
||||||
github.com/google/uuid v1.4.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
||||||
github.com/google/uuid v1.5.0 h1:1p67kYwdtXjb0gL0BPiP1Av9wiZPo5A8z2cWkTZ+eyU=
|
|
||||||
github.com/google/uuid v1.5.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
|
||||||
github.com/googleapis/enterprise-certificate-proxy v0.2.5 h1:UR4rDjcgpgEnqpIEvkiqTYKBCKLNmlge2eVjoZfySzM=
|
github.com/googleapis/enterprise-certificate-proxy v0.2.5 h1:UR4rDjcgpgEnqpIEvkiqTYKBCKLNmlge2eVjoZfySzM=
|
||||||
github.com/googleapis/enterprise-certificate-proxy v0.2.5/go.mod h1:RxW0N9901Cko1VOCW3SXCpWP+mlIEkk2tP7jnHy9a3w=
|
github.com/googleapis/enterprise-certificate-proxy v0.2.5/go.mod h1:RxW0N9901Cko1VOCW3SXCpWP+mlIEkk2tP7jnHy9a3w=
|
||||||
github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg=
|
github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg=
|
||||||
@ -602,7 +571,6 @@ github.com/kr/fs v0.1.0/go.mod h1:FFnZGqtBN9Gxj7eW1uZ42v5BccTP0vu6NEaFoC2HwRg=
|
|||||||
github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc=
|
github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc=
|
||||||
github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
|
github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
|
||||||
github.com/kr/pretty v0.2.0/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI=
|
github.com/kr/pretty v0.2.0/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI=
|
||||||
github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI=
|
|
||||||
github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE=
|
github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE=
|
||||||
github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk=
|
github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk=
|
||||||
github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
|
github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
|
||||||
@ -633,7 +601,6 @@ github.com/magiconair/properties v1.8.0/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czP
|
|||||||
github.com/magiconair/properties v1.8.5/go.mod h1:y3VJvCyxH9uVvJTWEGAELF3aiYNyPKd5NZ3oSwXrF60=
|
github.com/magiconair/properties v1.8.5/go.mod h1:y3VJvCyxH9uVvJTWEGAELF3aiYNyPKd5NZ3oSwXrF60=
|
||||||
github.com/mailru/easyjson v0.0.0-20190614124828-94de47d64c63/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc=
|
github.com/mailru/easyjson v0.0.0-20190614124828-94de47d64c63/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc=
|
||||||
github.com/mailru/easyjson v0.0.0-20190626092158-b2ccc519800e/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc=
|
github.com/mailru/easyjson v0.0.0-20190626092158-b2ccc519800e/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc=
|
||||||
github.com/mailru/easyjson v0.7.6/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc=
|
|
||||||
github.com/mailru/easyjson v0.7.7 h1:UGYAvKxe3sBsEDzO8ZeWOSlIQfWFlxbzLZe7hwFURr0=
|
github.com/mailru/easyjson v0.7.7 h1:UGYAvKxe3sBsEDzO8ZeWOSlIQfWFlxbzLZe7hwFURr0=
|
||||||
github.com/mailru/easyjson v0.7.7/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc=
|
github.com/mailru/easyjson v0.7.7/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc=
|
||||||
github.com/manifoldco/promptui v0.9.0 h1:3V4HzJk1TtXW1MTZMP7mdlwbBpIinw3HztaIlYthEiA=
|
github.com/manifoldco/promptui v0.9.0 h1:3V4HzJk1TtXW1MTZMP7mdlwbBpIinw3HztaIlYthEiA=
|
||||||
@ -703,7 +670,6 @@ github.com/mitchellh/gox v0.4.0/go.mod h1:Sd9lOJ0+aimLBi73mGofS1ycjY8lL3uZM3JPS4
|
|||||||
github.com/mitchellh/iochan v1.0.0/go.mod h1:JwYml1nuB7xOzsp52dPpHFffvOCDupsG0QubkSMEySY=
|
github.com/mitchellh/iochan v1.0.0/go.mod h1:JwYml1nuB7xOzsp52dPpHFffvOCDupsG0QubkSMEySY=
|
||||||
github.com/mitchellh/mapstructure v0.0.0-20160808181253-ca63d7c062ee/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y=
|
github.com/mitchellh/mapstructure v0.0.0-20160808181253-ca63d7c062ee/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y=
|
||||||
github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y=
|
github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y=
|
||||||
github.com/mitchellh/mapstructure v1.3.3/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo=
|
|
||||||
github.com/mitchellh/mapstructure v1.4.1/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo=
|
github.com/mitchellh/mapstructure v1.4.1/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo=
|
||||||
github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY=
|
github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY=
|
||||||
github.com/mitchellh/mapstructure v1.5.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo=
|
github.com/mitchellh/mapstructure v1.5.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo=
|
||||||
@ -729,6 +695,8 @@ github.com/muesli/termenv v0.13.0 h1:wK20DRpJdDX8b7Ek2QfhvqhRQFZ237RGRO0RQ/Iqdy0
|
|||||||
github.com/muesli/termenv v0.13.0/go.mod h1:sP1+uffeLaEYpyOTb8pLCUctGcGLnoFjSn4YJK5e2bc=
|
github.com/muesli/termenv v0.13.0/go.mod h1:sP1+uffeLaEYpyOTb8pLCUctGcGLnoFjSn4YJK5e2bc=
|
||||||
github.com/muhlemmer/gu v0.3.1 h1:7EAqmFrW7n3hETvuAdmFmn4hS8W+z3LgKtrnow+YzNM=
|
github.com/muhlemmer/gu v0.3.1 h1:7EAqmFrW7n3hETvuAdmFmn4hS8W+z3LgKtrnow+YzNM=
|
||||||
github.com/muhlemmer/gu v0.3.1/go.mod h1:YHtHR+gxM+bKEIIs7Hmi9sPT3ZDUvTN/i88wQpZkrdM=
|
github.com/muhlemmer/gu v0.3.1/go.mod h1:YHtHR+gxM+bKEIIs7Hmi9sPT3ZDUvTN/i88wQpZkrdM=
|
||||||
|
github.com/muhlemmer/httpforwarded v0.1.0 h1:x4DLrzXdliq8mprgUMR0olDvHGkou5BJsK/vWUetyzY=
|
||||||
|
github.com/muhlemmer/httpforwarded v0.1.0/go.mod h1:yo9czKedo2pdZhoXe+yDkGVbU0TJ0q9oQ90BVoDEtw0=
|
||||||
github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U=
|
github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U=
|
||||||
github.com/nats-io/jwt v0.3.0/go.mod h1:fRYCDE99xlTsqUzISS1Bi75UBJ6ljOJQOAAu5VglpSg=
|
github.com/nats-io/jwt v0.3.0/go.mod h1:fRYCDE99xlTsqUzISS1Bi75UBJ6ljOJQOAAu5VglpSg=
|
||||||
github.com/nats-io/jwt v0.3.2/go.mod h1:/euKqTS1ZD+zzjYrY7pseZrTtWQSjujC7xjPc8wL6eU=
|
github.com/nats-io/jwt v0.3.2/go.mod h1:/euKqTS1ZD+zzjYrY7pseZrTtWQSjujC7xjPc8wL6eU=
|
||||||
@ -771,36 +739,26 @@ github.com/openzipkin-contrib/zipkin-go-opentracing v0.4.5/go.mod h1:/wsWhb9smxS
|
|||||||
github.com/openzipkin/zipkin-go v0.1.6/go.mod h1:QgAqvLzwWbR/WpD4A3cGpPtJrZXNIiJc5AZX7/PBEpw=
|
github.com/openzipkin/zipkin-go v0.1.6/go.mod h1:QgAqvLzwWbR/WpD4A3cGpPtJrZXNIiJc5AZX7/PBEpw=
|
||||||
github.com/openzipkin/zipkin-go v0.2.1/go.mod h1:NaW6tEwdmWMaCDZzg8sh+IBNOxHMPnhQw8ySjnjRyN4=
|
github.com/openzipkin/zipkin-go v0.2.1/go.mod h1:NaW6tEwdmWMaCDZzg8sh+IBNOxHMPnhQw8ySjnjRyN4=
|
||||||
github.com/openzipkin/zipkin-go v0.2.2/go.mod h1:NaW6tEwdmWMaCDZzg8sh+IBNOxHMPnhQw8ySjnjRyN4=
|
github.com/openzipkin/zipkin-go v0.2.2/go.mod h1:NaW6tEwdmWMaCDZzg8sh+IBNOxHMPnhQw8ySjnjRyN4=
|
||||||
github.com/openziti/channel/v2 v2.0.111 h1:ZZDyUUFcyshitXjUqAMjdAKbaDMpgV7oX1Jp1I35Rc4=
|
github.com/openziti/channel/v2 v2.0.119 h1:stfSrnDqoTi78LMvQA3+NSivHjQnRrYKrgij5NaOENI=
|
||||||
github.com/openziti/channel/v2 v2.0.111/go.mod h1:abw0qwT0MzWvh1eI2P6D6CD17PRHL8EEo1d3DHCyCdM=
|
github.com/openziti/channel/v2 v2.0.119/go.mod h1:lSRJwqmbkE34DgXYEmUhVCzwcQcx65vZGE8nuBNK458=
|
||||||
github.com/openziti/channel/v2 v2.0.113 h1:J7GdiwusrwpHtbQKAgQErRe4RJdeqTUQhIZNgsZpDn4=
|
github.com/openziti/edge-api v0.26.10 h1:LEDuJHZsExi0PBVO9iVuIdZWJ7eFo/i4TJhXoSFmfOU=
|
||||||
github.com/openziti/channel/v2 v2.0.113/go.mod h1:1cVOKm52klHza4WxRgcT4Ec/NlFrGwjXPZt/V7bSE+o=
|
github.com/openziti/edge-api v0.26.10/go.mod h1:FQLjav9AfqxQYSL0xKPDZ/JWTSZXApkk7jM2/iczGXM=
|
||||||
github.com/openziti/edge-api v0.26.6 h1:qM5XRaYYZxRIkb3xDkAU1D2EkQx8a+oHvCWbd+v0fbA=
|
|
||||||
github.com/openziti/edge-api v0.26.6/go.mod h1:DX+fKivCRgnl5oxuLLoEx044arhjlgI7peGlqxN+Iks=
|
|
||||||
github.com/openziti/edge-api v0.26.7 h1:dHLH7+O+Yp3HPmhgAbvq8z93EcZDypiSOHm/PVVUCoc=
|
|
||||||
github.com/openziti/edge-api v0.26.7/go.mod h1:yKQYyc3zhHRM7Y2BRd5R7gT9zelh46julW4chjv2W9I=
|
|
||||||
github.com/openziti/fabric v0.23.26 h1:wEPNh8m3qcq9sw1Zmg5YgFZw1FovsKGu53rRf8qzI7A=
|
github.com/openziti/fabric v0.23.26 h1:wEPNh8m3qcq9sw1Zmg5YgFZw1FovsKGu53rRf8qzI7A=
|
||||||
github.com/openziti/fabric v0.23.26/go.mod h1:0MtkZqIHs3cJPP4DB88xsWUemDm77nN/GvWBBfq7peo=
|
github.com/openziti/fabric v0.23.26/go.mod h1:0MtkZqIHs3cJPP4DB88xsWUemDm77nN/GvWBBfq7peo=
|
||||||
github.com/openziti/foundation/v2 v2.0.35 h1:4VaMwZ2kAy6jwBYeQIBN2m8rcGroTDX4A2Jp7NAKb6M=
|
github.com/openziti/foundation/v2 v2.0.37 h1:7pa4vWrlwllEoLXaK2rx91AffLQJ8k5pvc92oWANavA=
|
||||||
github.com/openziti/foundation/v2 v2.0.35/go.mod h1:Xnb3IxP1e1UcgLggqSunEpCjH3iHozkPi9Bd9GESbwo=
|
github.com/openziti/foundation/v2 v2.0.37/go.mod h1:2NxzCnJbMw35U9RrFcdEaiXdxIMfBHOUNPngpyhvKeY=
|
||||||
github.com/openziti/identity v1.0.68 h1:SaFr7BeFQBoWQDiT28vUb8D9w7v6lIAK6/9RkwmV0OU=
|
github.com/openziti/identity v1.0.70 h1:JNwtJHmIS0DcXookm2xuXyh4z92T1O21GQvuO8PmHWs=
|
||||||
github.com/openziti/identity v1.0.68/go.mod h1:HbOu3TQ032v8xE6xZWjO51azF4fUxRLjO/l/oGqJwUI=
|
github.com/openziti/identity v1.0.70/go.mod h1:jsKBL4G1BsmDSCIfhK4jha5B3Sevgy1jyZq0GtFKhSk=
|
||||||
github.com/openziti/metrics v1.2.40 h1:gySRgR8prCPqaEjmUtX0eXFs7NkI9uPAzp+z6A8+JqA=
|
github.com/openziti/metrics v1.2.45 h1:+3zqszLWyFdTgzbsQD90V0yJcC9Ek77qKaIGMQXkAXs=
|
||||||
github.com/openziti/metrics v1.2.40/go.mod h1:HXdVryf3xpZfnY4VcaOjMxiBv+qw0wJlEJNLbooB9hY=
|
github.com/openziti/metrics v1.2.45/go.mod h1:g6CgAEbFes2UtdfGrsR8AKkuoBVL5dkU61843uQvllM=
|
||||||
github.com/openziti/metrics v1.2.41 h1:JShcFb6qJPA2cMiWQLtcSXiJjsrhEWpH+aVcjT/Mcbs=
|
github.com/openziti/sdk-golang v0.22.28 h1:s159CT42dXug4GiJiN/kM6/ol+N2LFZ2tUk6bOpbgiI=
|
||||||
github.com/openziti/metrics v1.2.41/go.mod h1:L9h0NrliMA3+p7+ascKgvx28qoKHymN9l+CMA+Q+sZc=
|
github.com/openziti/sdk-golang v0.22.28/go.mod h1:BLaLvcLqAgf3JFoDPWLTj3j3X5rndo6ZejdDdkMlihQ=
|
||||||
github.com/openziti/sdk-golang v0.21.2 h1:P66cslOAmQX37VFan+df+MoD2PqaFjHWDNMpuhhXHSo=
|
github.com/openziti/secretstream v0.1.16 h1:tVanF7OpJL1MJ1gvWaRlR2i+kAbrGsxr3q6EXFOS08U=
|
||||||
github.com/openziti/sdk-golang v0.21.2/go.mod h1:mepEUD39DsBm/v1WVLedYRoYCFdet5mmJ5Sxqm/zkFI=
|
github.com/openziti/secretstream v0.1.16/go.mod h1:bvjGBUW/0e5MzD5S3FW3rhGASRNWAi+kTkTENZ9qRDE=
|
||||||
github.com/openziti/sdk-golang v0.22.6 h1:AG0FNnh3QHTmqgL7Igl+ib2V65h0I2GskZ4Xi97eOo8=
|
|
||||||
github.com/openziti/sdk-golang v0.22.6/go.mod h1:eq0Ww3cX8SVUyhAVGlZSZPIGBrMEnUioVBRME/IZ7rU=
|
|
||||||
github.com/openziti/secretstream v0.1.14 h1:Ta+nB5Prcct+L5LIKUA1nE56QhWS6lMPQYTlpxUltU0=
|
|
||||||
github.com/openziti/secretstream v0.1.14/go.mod h1:/hhuLfu+GIv0+cnapfsu/VOnXEvmTt3GKtCu+lQ0RIw=
|
|
||||||
github.com/openziti/secretstream v0.1.15 h1:bGoPlT5zmZ+BiLKFMlaARG3gfiUzuhX/kmK6OInaghU=
|
|
||||||
github.com/openziti/secretstream v0.1.15/go.mod h1:LyghB5JOlgvFASkLYPiBgjj5rcFXKiLD4qwHYRfBxnU=
|
|
||||||
github.com/openziti/storage v0.2.6 h1:/pbIRzDwrczMWRVkN75PfwAXFbArplIqhpRsUrsUOBc=
|
github.com/openziti/storage v0.2.6 h1:/pbIRzDwrczMWRVkN75PfwAXFbArplIqhpRsUrsUOBc=
|
||||||
github.com/openziti/storage v0.2.6/go.mod h1:JnjCofrnPcajwn6VIB2CgI7pVVUFBL7evbezIsQ4AgA=
|
github.com/openziti/storage v0.2.6/go.mod h1:JnjCofrnPcajwn6VIB2CgI7pVVUFBL7evbezIsQ4AgA=
|
||||||
github.com/openziti/transport/v2 v2.0.119 h1:KOgHU+9EZUVPvv8ncifqHmNEcFUHbJHigo3jyPvWnOc=
|
github.com/openziti/transport/v2 v2.0.122 h1:XWwZ6JcSO1nvbZgfp6kdf8aR5LEEN343mpZlhSihirk=
|
||||||
github.com/openziti/transport/v2 v2.0.119/go.mod h1:H2IIBP6ed9isE/eJHGXtAZL0d73ApYOpLG9sSvutNNI=
|
github.com/openziti/transport/v2 v2.0.122/go.mod h1:07Ak2jMsyZmi7/ECxGNfMXk8cF1Vj5qtKj90FoAnK8A=
|
||||||
github.com/orcaman/concurrent-map/v2 v2.0.1 h1:jOJ5Pg2w1oeB6PeDurIYf6k9PQ+aTITr/6lP/L/zp6c=
|
github.com/orcaman/concurrent-map/v2 v2.0.1 h1:jOJ5Pg2w1oeB6PeDurIYf6k9PQ+aTITr/6lP/L/zp6c=
|
||||||
github.com/orcaman/concurrent-map/v2 v2.0.1/go.mod h1:9Eq3TG2oBe5FirmYWQfYO5iH1q0Jv47PLaNK++uCdOM=
|
github.com/orcaman/concurrent-map/v2 v2.0.1/go.mod h1:9Eq3TG2oBe5FirmYWQfYO5iH1q0Jv47PLaNK++uCdOM=
|
||||||
github.com/pact-foundation/pact-go v1.0.4/go.mod h1:uExwJY4kCzNPcHRj+hCR/HBbOOIwwtUjcrb0b5/5kLM=
|
github.com/pact-foundation/pact-go v1.0.4/go.mod h1:uExwJY4kCzNPcHRj+hCR/HBbOOIwwtUjcrb0b5/5kLM=
|
||||||
@ -874,9 +832,8 @@ github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6L
|
|||||||
github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4=
|
github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4=
|
||||||
github.com/rogpeppe/go-internal v1.11.0 h1:cWPaGQEPrBb5/AsnsZesgZZ9yb1OQ+GOISoDNXVBh4M=
|
github.com/rogpeppe/go-internal v1.11.0 h1:cWPaGQEPrBb5/AsnsZesgZZ9yb1OQ+GOISoDNXVBh4M=
|
||||||
github.com/rogpeppe/go-internal v1.11.0/go.mod h1:ddIwULY96R17DhadqLgMfk9H9tvdUzkipdSkR5nkCZA=
|
github.com/rogpeppe/go-internal v1.11.0/go.mod h1:ddIwULY96R17DhadqLgMfk9H9tvdUzkipdSkR5nkCZA=
|
||||||
github.com/rs/cors v1.9.0 h1:l9HGsTsHJcvW14Nk7J9KFz8bzeAWXn3CG6bgt7LsrAE=
|
|
||||||
github.com/rs/cors v1.9.0/go.mod h1:XyqrcTp5zjWr1wsJ8PIRZssZ8b/WMcMf71DJnit4EMU=
|
|
||||||
github.com/rs/cors v1.10.1 h1:L0uuZVXIKlI1SShY2nhFfo44TYvDPQ1w4oFkUJNfhyo=
|
github.com/rs/cors v1.10.1 h1:L0uuZVXIKlI1SShY2nhFfo44TYvDPQ1w4oFkUJNfhyo=
|
||||||
|
github.com/rs/cors v1.10.1/go.mod h1:XyqrcTp5zjWr1wsJ8PIRZssZ8b/WMcMf71DJnit4EMU=
|
||||||
github.com/rs/xid v1.2.1/go.mod h1:+uKXf+4Djp6Md1KODXJxgGQPKngRmWyn10oCKFzNHOQ=
|
github.com/rs/xid v1.2.1/go.mod h1:+uKXf+4Djp6Md1KODXJxgGQPKngRmWyn10oCKFzNHOQ=
|
||||||
github.com/rs/xid v1.5.0 h1:mKX4bl4iPYJtEIxp6CYiUuLQ/8DYMoz0PUdtGgMFRVc=
|
github.com/rs/xid v1.5.0 h1:mKX4bl4iPYJtEIxp6CYiUuLQ/8DYMoz0PUdtGgMFRVc=
|
||||||
github.com/rs/xid v1.5.0/go.mod h1:trrq9SKmegXys3aeAKXMUTdJsYXVwGY3RLcfgqegfbg=
|
github.com/rs/xid v1.5.0/go.mod h1:trrq9SKmegXys3aeAKXMUTdJsYXVwGY3RLcfgqegfbg=
|
||||||
@ -895,10 +852,8 @@ github.com/satori/go.uuid v1.2.0/go.mod h1:dA0hQrYB0VpLJoorglMZABFdXlWrHn1NEOzdh
|
|||||||
github.com/schollz/jsonstore v1.1.0 h1:WZBDjgezFS34CHI+myb4s8GGpir3UMpy7vWoCeO0n6E=
|
github.com/schollz/jsonstore v1.1.0 h1:WZBDjgezFS34CHI+myb4s8GGpir3UMpy7vWoCeO0n6E=
|
||||||
github.com/schollz/jsonstore v1.1.0/go.mod h1:15c6+9guw8vDRyozGjN3FoILt0wpruJk9Pi66vjaZfg=
|
github.com/schollz/jsonstore v1.1.0/go.mod h1:15c6+9guw8vDRyozGjN3FoILt0wpruJk9Pi66vjaZfg=
|
||||||
github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg65j358z/aeFdxmN0P9QXhEzd20vsDc=
|
github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg65j358z/aeFdxmN0P9QXhEzd20vsDc=
|
||||||
github.com/shirou/gopsutil/v3 v3.23.10 h1:/N42opWlYzegYaVkWejXWJpbzKv2JDy3mrgGzKsh9hM=
|
github.com/shirou/gopsutil/v3 v3.24.1 h1:R3t6ondCEvmARp3wxODhXMTLC/klMa87h2PHUw5m7QI=
|
||||||
github.com/shirou/gopsutil/v3 v3.23.10/go.mod h1:JIE26kpucQi+innVlAUnIEOSBhBUkirr5b44yr55+WE=
|
github.com/shirou/gopsutil/v3 v3.24.1/go.mod h1:UU7a2MSBQa+kW1uuDq8DeEBS8kmrnQwsv2b5O513rwU=
|
||||||
github.com/shirou/gopsutil/v3 v3.23.11 h1:i3jP9NjCPUz7FiZKxlMnODZkdSIp2gnzfrvsu9CuWEQ=
|
|
||||||
github.com/shirou/gopsutil/v3 v3.23.11/go.mod h1:1FrWgea594Jp7qmjHUUPlJDTPgcsb9mGnXDxavtikzM=
|
|
||||||
github.com/shoenig/go-m1cpu v0.1.6 h1:nxdKQNcEB6vzgA2E2bvzKIYRuNj7XNJ4S/aRSwKzFtM=
|
github.com/shoenig/go-m1cpu v0.1.6 h1:nxdKQNcEB6vzgA2E2bvzKIYRuNj7XNJ4S/aRSwKzFtM=
|
||||||
github.com/shoenig/go-m1cpu v0.1.6/go.mod h1:1JJMcUBvfNwpq05QDQVAnx3gUHr9IYF7GNg9SUEw2VQ=
|
github.com/shoenig/go-m1cpu v0.1.6/go.mod h1:1JJMcUBvfNwpq05QDQVAnx3gUHr9IYF7GNg9SUEw2VQ=
|
||||||
github.com/shoenig/test v0.6.4 h1:kVTaSd7WLz5WZ2IaoM0RSzRsUD+m8wRR+5qvntpn4LU=
|
github.com/shoenig/test v0.6.4 h1:kVTaSd7WLz5WZ2IaoM0RSzRsUD+m8wRR+5qvntpn4LU=
|
||||||
@ -981,7 +936,6 @@ github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXl
|
|||||||
github.com/subosito/gotenv v1.2.0/go.mod h1:N0PQaV/YGNqwC0u51sEeR/aUtSLEXKX9iv69rRypqCw=
|
github.com/subosito/gotenv v1.2.0/go.mod h1:N0PQaV/YGNqwC0u51sEeR/aUtSLEXKX9iv69rRypqCw=
|
||||||
github.com/tailscale/tscert v0.0.0-20230806124524-28a91b69a046 h1:8rUlviSVOEe7TMk7W0gIPrW8MqEzYfZHpsNWSf8s2vg=
|
github.com/tailscale/tscert v0.0.0-20230806124524-28a91b69a046 h1:8rUlviSVOEe7TMk7W0gIPrW8MqEzYfZHpsNWSf8s2vg=
|
||||||
github.com/tailscale/tscert v0.0.0-20230806124524-28a91b69a046/go.mod h1:kNGUQ3VESx3VZwRwA9MSCUegIl6+saPL8Noq82ozCaU=
|
github.com/tailscale/tscert v0.0.0-20230806124524-28a91b69a046/go.mod h1:kNGUQ3VESx3VZwRwA9MSCUegIl6+saPL8Noq82ozCaU=
|
||||||
github.com/tidwall/pretty v1.0.0/go.mod h1:XNkn88O1ChpSDQmQeStsy+sBenx6DDtFZJxhVysOjyk=
|
|
||||||
github.com/tklauser/go-sysconf v0.3.12 h1:0QaGUFOdQaIVdPgfITYzaTegZvdCjmYO52cSFAEVmqU=
|
github.com/tklauser/go-sysconf v0.3.12 h1:0QaGUFOdQaIVdPgfITYzaTegZvdCjmYO52cSFAEVmqU=
|
||||||
github.com/tklauser/go-sysconf v0.3.12/go.mod h1:Ho14jnntGE1fpdOqQEEaiKRpvIavV0hSfmBq8nJbHYI=
|
github.com/tklauser/go-sysconf v0.3.12/go.mod h1:Ho14jnntGE1fpdOqQEEaiKRpvIavV0hSfmBq8nJbHYI=
|
||||||
github.com/tklauser/numcpus v0.6.1 h1:ng9scYS7az0Bk4OZLvrNXNSAO2Pxr1XXRAPyjhIx+Fk=
|
github.com/tklauser/numcpus v0.6.1 h1:ng9scYS7az0Bk4OZLvrNXNSAO2Pxr1XXRAPyjhIx+Fk=
|
||||||
@ -1000,9 +954,7 @@ github.com/wneessen/go-mail v0.2.7/go.mod h1:m25lkU2GYQnlVr6tdwK533/UXxo57V0kLOj
|
|||||||
github.com/x448/float16 v0.8.4 h1:qLwI1I70+NjRFUR3zs1JPUCgaCXSh3SW62uAKT1mSBM=
|
github.com/x448/float16 v0.8.4 h1:qLwI1I70+NjRFUR3zs1JPUCgaCXSh3SW62uAKT1mSBM=
|
||||||
github.com/x448/float16 v0.8.4/go.mod h1:14CWIYCyZA/cWjXOioeEpHeN/83MdbZDRQHoFcYsOfg=
|
github.com/x448/float16 v0.8.4/go.mod h1:14CWIYCyZA/cWjXOioeEpHeN/83MdbZDRQHoFcYsOfg=
|
||||||
github.com/xdg-go/pbkdf2 v1.0.0/go.mod h1:jrpuAogTd400dnrH08LKmI/xc1MbPOebTwRqcT5RDeI=
|
github.com/xdg-go/pbkdf2 v1.0.0/go.mod h1:jrpuAogTd400dnrH08LKmI/xc1MbPOebTwRqcT5RDeI=
|
||||||
github.com/xdg-go/scram v1.1.1/go.mod h1:RaEWvsqvNKKvBPvcKeFjrG2cJqOkHTiyTpzz23ni57g=
|
|
||||||
github.com/xdg-go/scram v1.1.2/go.mod h1:RT/sEzTbU5y00aCK8UOx6R7YryM0iF1N2MOmC3kKLN4=
|
github.com/xdg-go/scram v1.1.2/go.mod h1:RT/sEzTbU5y00aCK8UOx6R7YryM0iF1N2MOmC3kKLN4=
|
||||||
github.com/xdg-go/stringprep v1.0.3/go.mod h1:W3f5j4i+9rC0kuIEJL0ky1VpHXQU3ocBgklLGvcBnW8=
|
|
||||||
github.com/xdg-go/stringprep v1.0.4/go.mod h1:mPGuuIYwz7CmR2bT9j4GbQqutWS1zV24gijq1dTyGkM=
|
github.com/xdg-go/stringprep v1.0.4/go.mod h1:mPGuuIYwz7CmR2bT9j4GbQqutWS1zV24gijq1dTyGkM=
|
||||||
github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU=
|
github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU=
|
||||||
github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77/go.mod h1:aYKd//L2LvnjZzWKhF00oedf4jCCReLcmhLdhm1A27Q=
|
github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77/go.mod h1:aYKd//L2LvnjZzWKhF00oedf4jCCReLcmhLdhm1A27Q=
|
||||||
@ -1027,8 +979,6 @@ github.com/zeebo/blake3 v0.2.3/go.mod h1:mjJjZpnsyIVtVgTOSpJ9vmRE4wgDeyt2HU3qXvv
|
|||||||
github.com/zeebo/pcg v1.0.1 h1:lyqfGeWiv4ahac6ttHs+I5hwtH/+1mrhlCtVNQM2kHo=
|
github.com/zeebo/pcg v1.0.1 h1:lyqfGeWiv4ahac6ttHs+I5hwtH/+1mrhlCtVNQM2kHo=
|
||||||
github.com/zeebo/pcg v1.0.1/go.mod h1:09F0S9iiKrwn9rlI5yjLkmrug154/YRW6KnnXVDM/l4=
|
github.com/zeebo/pcg v1.0.1/go.mod h1:09F0S9iiKrwn9rlI5yjLkmrug154/YRW6KnnXVDM/l4=
|
||||||
github.com/zenazn/goji v0.9.0/go.mod h1:7S9M489iMyHBNxwZnk9/EHS098H4/F6TATF2mIxtB1Q=
|
github.com/zenazn/goji v0.9.0/go.mod h1:7S9M489iMyHBNxwZnk9/EHS098H4/F6TATF2mIxtB1Q=
|
||||||
github.com/zitadel/oidc/v2 v2.7.0 h1:IGX4EDk6tegTjUSsZDWeTfLseFU0BdJ/Glf1tgys2lU=
|
|
||||||
github.com/zitadel/oidc/v2 v2.7.0/go.mod h1:zkUkVJS0sDVy9m0UA9RgO3f8i/C0rtjvXU36UJj7T+0=
|
|
||||||
github.com/zitadel/oidc/v2 v2.12.0 h1:4aMTAy99/4pqNwrawEyJqhRb3yY3PtcDxnoDSryhpn4=
|
github.com/zitadel/oidc/v2 v2.12.0 h1:4aMTAy99/4pqNwrawEyJqhRb3yY3PtcDxnoDSryhpn4=
|
||||||
github.com/zitadel/oidc/v2 v2.12.0/go.mod h1:LrRav74IiThHGapQgCHZOUNtnqJG0tcZKHro/91rtLw=
|
github.com/zitadel/oidc/v2 v2.12.0/go.mod h1:LrRav74IiThHGapQgCHZOUNtnqJG0tcZKHro/91rtLw=
|
||||||
go.etcd.io/bbolt v1.3.3/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU=
|
go.etcd.io/bbolt v1.3.3/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU=
|
||||||
@ -1038,7 +988,6 @@ go.etcd.io/etcd v0.0.0-20191023171146-3cf2f69b5738/go.mod h1:dnLIgRNXwCJa5e+c6mI
|
|||||||
go.etcd.io/etcd/api/v3 v3.5.0/go.mod h1:cbVKeC6lCfl7j/8jBhAK6aIYO9XOjdptoxU/nLQcPvs=
|
go.etcd.io/etcd/api/v3 v3.5.0/go.mod h1:cbVKeC6lCfl7j/8jBhAK6aIYO9XOjdptoxU/nLQcPvs=
|
||||||
go.etcd.io/etcd/client/pkg/v3 v3.5.0/go.mod h1:IJHfcCEKxYu1Os13ZdwCwIUTUVGYTSAM3YSwc9/Ac1g=
|
go.etcd.io/etcd/client/pkg/v3 v3.5.0/go.mod h1:IJHfcCEKxYu1Os13ZdwCwIUTUVGYTSAM3YSwc9/Ac1g=
|
||||||
go.etcd.io/etcd/client/v2 v2.305.0/go.mod h1:h9puh54ZTgAKtEbut2oe9P4L/oqKCVB6xsXlzd7alYQ=
|
go.etcd.io/etcd/client/v2 v2.305.0/go.mod h1:h9puh54ZTgAKtEbut2oe9P4L/oqKCVB6xsXlzd7alYQ=
|
||||||
go.mongodb.org/mongo-driver v1.10.0/go.mod h1:wsihk0Kdgv8Kqu1Anit4sfK+22vSFbUrAVEYRhCXrA8=
|
|
||||||
go.mongodb.org/mongo-driver v1.13.1 h1:YIc7HTYsKndGK4RFzJ3covLz1byri52x0IoMB0Pt/vk=
|
go.mongodb.org/mongo-driver v1.13.1 h1:YIc7HTYsKndGK4RFzJ3covLz1byri52x0IoMB0Pt/vk=
|
||||||
go.mongodb.org/mongo-driver v1.13.1/go.mod h1:wcDf1JBCXy2mOW0bWHwO/IOYqdca1MPCwDtFu/Z9+eo=
|
go.mongodb.org/mongo-driver v1.13.1/go.mod h1:wcDf1JBCXy2mOW0bWHwO/IOYqdca1MPCwDtFu/Z9+eo=
|
||||||
go.mozilla.org/pkcs7 v0.0.0-20210730143726-725912489c62/go.mod h1:SNgMg+EgDFwmvSmLRTNKC5fegJjB7v23qTQ0XLGUNHk=
|
go.mozilla.org/pkcs7 v0.0.0-20210730143726-725912489c62/go.mod h1:SNgMg+EgDFwmvSmLRTNKC5fegJjB7v23qTQ0XLGUNHk=
|
||||||
@ -1132,8 +1081,8 @@ golang.org/x/crypto v0.1.0/go.mod h1:RecgLatLF4+eUMCP1PoPZQb+cVrJcOPbHkTkbkB9sbw
|
|||||||
golang.org/x/crypto v0.3.0/go.mod h1:hebNnKkNXi2UzZN1eVRvBB7co0a+JxK6XbPiWVs/3J4=
|
golang.org/x/crypto v0.3.0/go.mod h1:hebNnKkNXi2UzZN1eVRvBB7co0a+JxK6XbPiWVs/3J4=
|
||||||
golang.org/x/crypto v0.6.0/go.mod h1:OFC/31mSvZgRz0V1QTNCzfAI1aIRzbiufJtkMIlEp58=
|
golang.org/x/crypto v0.6.0/go.mod h1:OFC/31mSvZgRz0V1QTNCzfAI1aIRzbiufJtkMIlEp58=
|
||||||
golang.org/x/crypto v0.14.0/go.mod h1:MVFd36DqK4CsrnJYDkBA3VC4m2GkXAM0PvzMCn4JQf4=
|
golang.org/x/crypto v0.14.0/go.mod h1:MVFd36DqK4CsrnJYDkBA3VC4m2GkXAM0PvzMCn4JQf4=
|
||||||
golang.org/x/crypto v0.17.0 h1:r8bRNjWL3GshPW3gkd+RpvzWrZAwPS49OmTGZ/uhM4k=
|
golang.org/x/crypto v0.18.0 h1:PGVlW0xEltQnzFZ55hkuX5+KLyrMYhHld1YHO4AKcdc=
|
||||||
golang.org/x/crypto v0.17.0/go.mod h1:gCAAfMLgwOJRpTjQ2zCCt2OcSfYMTeZVSRtQlPC7Nq4=
|
golang.org/x/crypto v0.18.0/go.mod h1:R0j02AL6hcrfOiy9T4ZYp/rcWeMxM3L6QYxlOuEG1mg=
|
||||||
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
|
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
|
||||||
golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
|
golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
|
||||||
golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8=
|
golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8=
|
||||||
@ -1227,8 +1176,8 @@ golang.org/x/net v0.2.0/go.mod h1:KqCZLdyyvdV855qA2rE3GC2aiw5xGR5TEjj8smXukLY=
|
|||||||
golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs=
|
golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs=
|
||||||
golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg=
|
golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg=
|
||||||
golang.org/x/net v0.17.0/go.mod h1:NxSsAGuq816PNPmqtQdLE42eU2Fs7NoRIZrHJAlaCOE=
|
golang.org/x/net v0.17.0/go.mod h1:NxSsAGuq816PNPmqtQdLE42eU2Fs7NoRIZrHJAlaCOE=
|
||||||
golang.org/x/net v0.19.0 h1:zTwKpTd2XuCqf8huc7Fo2iSy+4RHPd10s4KzeTnVr1c=
|
golang.org/x/net v0.20.0 h1:aCL9BSgETF1k+blQaYUBx9hJ9LOGP3gAVemcZlf1Kpo=
|
||||||
golang.org/x/net v0.19.0/go.mod h1:CfAk/cbD4CthTvqiEl8NpboMuiuOYsAr/7NOjZJtv1U=
|
golang.org/x/net v0.20.0/go.mod h1:z8BVo6PvndSri0LbOE3hAn0apkU+1YvI6E70E9jsnvY=
|
||||||
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
|
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
|
||||||
golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
|
golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
|
||||||
golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
|
golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
|
||||||
@ -1241,10 +1190,8 @@ golang.org/x/oauth2 v0.0.0-20210218202405-ba52d332ba99/go.mod h1:KelEdhl1UZF7XfJ
|
|||||||
golang.org/x/oauth2 v0.0.0-20210220000619-9bb904979d93/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
|
golang.org/x/oauth2 v0.0.0-20210220000619-9bb904979d93/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
|
||||||
golang.org/x/oauth2 v0.0.0-20210313182246-cd4f82c27b84/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
|
golang.org/x/oauth2 v0.0.0-20210313182246-cd4f82c27b84/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
|
||||||
golang.org/x/oauth2 v0.0.0-20210402161424-2e8d93401602/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
|
golang.org/x/oauth2 v0.0.0-20210402161424-2e8d93401602/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
|
||||||
golang.org/x/oauth2 v0.12.0 h1:smVPGxink+n1ZI5pkQa8y6fZT0RW0MgCO5bFpepy4B4=
|
golang.org/x/oauth2 v0.16.0 h1:aDkGMBSYxElaoP81NpoUoz2oo2R2wHdZpGToUxfyQrQ=
|
||||||
golang.org/x/oauth2 v0.12.0/go.mod h1:A74bZ3aGXgCY0qaIC9Ahg6Lglin4AMAco8cIv9baba4=
|
golang.org/x/oauth2 v0.16.0/go.mod h1:hqZ+0LWXsiVoZpeld6jVt06P3adbS2Uu911W1SsJv2o=
|
||||||
golang.org/x/oauth2 v0.13.0 h1:jDDenyj+WgFtmV3zYVoi8aE2BwtXFLWOA67ZfNWftiY=
|
|
||||||
golang.org/x/oauth2 v0.13.0/go.mod h1:/JMhi4ZRXAf4HG9LiNmxvk+45+96RUlVThiH8FzNBn0=
|
|
||||||
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||||
golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||||
golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||||
@ -1258,8 +1205,8 @@ golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJ
|
|||||||
golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||||
golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||||
golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||||
golang.org/x/sync v0.4.0 h1:zxkM55ReGkDlKSM+Fu41A+zmbZuaPVbGMzvvdUPznYQ=
|
golang.org/x/sync v0.5.0 h1:60k92dhOjHxJkrqnwsfl8KuaHbn/5dl0lUPUklKo3qE=
|
||||||
golang.org/x/sync v0.4.0/go.mod h1:FU7BRWz2tNW+3quACPkgCx/L+uEAv1htQ0V83Z9Rj+Y=
|
golang.org/x/sync v0.5.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
|
||||||
golang.org/x/sys v0.0.0-20170728174421-0f826bdd13b5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
golang.org/x/sys v0.0.0-20170728174421-0f826bdd13b5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||||
golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||||
golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||||
@ -1341,8 +1288,9 @@ golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
|||||||
golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||||
golang.org/x/sys v0.11.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
golang.org/x/sys v0.11.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||||
golang.org/x/sys v0.13.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
golang.org/x/sys v0.13.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||||
golang.org/x/sys v0.15.0 h1:h48lPFYpsTvQJZF4EKyI4aLHaev3CxivZmv7yZig9pc=
|
golang.org/x/sys v0.16.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
|
||||||
golang.org/x/sys v0.15.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
|
golang.org/x/sys v0.17.0 h1:25cE3gD+tdBA7lp7QfhuV+rJiE9YXTcS3VG1SqssI/Y=
|
||||||
|
golang.org/x/sys v0.17.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
|
||||||
golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw=
|
golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw=
|
||||||
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
|
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
|
||||||
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
|
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
|
||||||
@ -1351,8 +1299,8 @@ golang.org/x/term v0.2.0/go.mod h1:TVmDHMZPmdnySmBfhjOoOdhjzdE1h4u1VwSiw2l1Nuc=
|
|||||||
golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k=
|
golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k=
|
||||||
golang.org/x/term v0.8.0/go.mod h1:xPskH00ivmX89bAKVGSKKtLOWNx2+17Eiy94tnKShWo=
|
golang.org/x/term v0.8.0/go.mod h1:xPskH00ivmX89bAKVGSKKtLOWNx2+17Eiy94tnKShWo=
|
||||||
golang.org/x/term v0.13.0/go.mod h1:LTmsnFJwVN6bCy1rVCoS+qHT1HhALEFxKncY3WNNh4U=
|
golang.org/x/term v0.13.0/go.mod h1:LTmsnFJwVN6bCy1rVCoS+qHT1HhALEFxKncY3WNNh4U=
|
||||||
golang.org/x/term v0.15.0 h1:y/Oo/a/q3IXu26lQgl04j/gjuBDOBlx7X6Om1j2CPW4=
|
golang.org/x/term v0.17.0 h1:mkTF7LCd6WGJNL3K1Ad7kwxNfYAW6a8a8QqtMblp/4U=
|
||||||
golang.org/x/term v0.15.0/go.mod h1:BDl952bC7+uMoWR75FIrCDx79TPU9oHkTZ9yRbYOrX0=
|
golang.org/x/term v0.17.0/go.mod h1:lLRBjIVuehSbZlaOtGMbcMncT+aqLLLmKrsjNrUguwk=
|
||||||
golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||||
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||||
golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||||
@ -1567,15 +1515,12 @@ google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGj
|
|||||||
google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c=
|
google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c=
|
||||||
google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw=
|
google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw=
|
||||||
google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
|
google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
|
||||||
google.golang.org/protobuf v1.31.0 h1:g0LDEJHgrBl9N9r17Ru3sqWhkIx2NB67okBHPwC7hs8=
|
|
||||||
google.golang.org/protobuf v1.31.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I=
|
|
||||||
google.golang.org/protobuf v1.32.0 h1:pPC6BG5ex8PDFnkbrGU3EixyhKcQ2aDuBS36lqK/C7I=
|
google.golang.org/protobuf v1.32.0 h1:pPC6BG5ex8PDFnkbrGU3EixyhKcQ2aDuBS36lqK/C7I=
|
||||||
google.golang.org/protobuf v1.32.0/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos=
|
google.golang.org/protobuf v1.32.0/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos=
|
||||||
gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw=
|
gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw=
|
||||||
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||||
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||||
gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||||
gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
|
||||||
gopkg.in/check.v1 v1.0.0-20200902074654-038fdea0a05b/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
gopkg.in/check.v1 v1.0.0-20200902074654-038fdea0a05b/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||||
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk=
|
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk=
|
||||||
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q=
|
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q=
|
||||||
@ -1601,11 +1546,8 @@ gopkg.in/yaml.v2 v2.2.3/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
|||||||
gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||||
gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||||
gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||||
gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY=
|
|
||||||
gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ=
|
gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ=
|
||||||
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||||
gopkg.in/yaml.v3 v3.0.0-20200605160147-a5ece683394c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
|
||||||
gopkg.in/yaml.v3 v3.0.0-20200615113413-eeeca48fe776/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
|
||||||
gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||||
gopkg.in/yaml.v3 v3.0.0/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
gopkg.in/yaml.v3 v3.0.0/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||||
gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
|
gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
|
||||||
|
@ -28,7 +28,7 @@ type ShareRequest struct {
|
|||||||
AuthUsers []*AuthUser `json:"authUsers"`
|
AuthUsers []*AuthUser `json:"authUsers"`
|
||||||
|
|
||||||
// backend mode
|
// backend mode
|
||||||
// Enum: [proxy web tcpTunnel udpTunnel caddy drive]
|
// Enum: [proxy web tcpTunnel udpTunnel caddy drive socks]
|
||||||
BackendMode string `json:"backendMode,omitempty"`
|
BackendMode string `json:"backendMode,omitempty"`
|
||||||
|
|
||||||
// backend proxy endpoint
|
// backend proxy endpoint
|
||||||
@ -117,7 +117,7 @@ var shareRequestTypeBackendModePropEnum []interface{}
|
|||||||
|
|
||||||
func init() {
|
func init() {
|
||||||
var res []string
|
var res []string
|
||||||
if err := json.Unmarshal([]byte(`["proxy","web","tcpTunnel","udpTunnel","caddy","drive"]`), &res); err != nil {
|
if err := json.Unmarshal([]byte(`["proxy","web","tcpTunnel","udpTunnel","caddy","drive","socks"]`), &res); err != nil {
|
||||||
panic(err)
|
panic(err)
|
||||||
}
|
}
|
||||||
for _, v := range res {
|
for _, v := range res {
|
||||||
@ -144,6 +144,9 @@ const (
|
|||||||
|
|
||||||
// ShareRequestBackendModeDrive captures enum value "drive"
|
// ShareRequestBackendModeDrive captures enum value "drive"
|
||||||
ShareRequestBackendModeDrive string = "drive"
|
ShareRequestBackendModeDrive string = "drive"
|
||||||
|
|
||||||
|
// ShareRequestBackendModeSocks captures enum value "socks"
|
||||||
|
ShareRequestBackendModeSocks string = "socks"
|
||||||
)
|
)
|
||||||
|
|
||||||
// prop value enum
|
// prop value enum
|
||||||
|
@ -1476,7 +1476,8 @@ func init() {
|
|||||||
"tcpTunnel",
|
"tcpTunnel",
|
||||||
"udpTunnel",
|
"udpTunnel",
|
||||||
"caddy",
|
"caddy",
|
||||||
"drive"
|
"drive",
|
||||||
|
"socks"
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
"backendProxyEndpoint": {
|
"backendProxyEndpoint": {
|
||||||
@ -3099,7 +3100,8 @@ func init() {
|
|||||||
"tcpTunnel",
|
"tcpTunnel",
|
||||||
"udpTunnel",
|
"udpTunnel",
|
||||||
"caddy",
|
"caddy",
|
||||||
"drive"
|
"drive",
|
||||||
|
"socks"
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
"backendProxyEndpoint": {
|
"backendProxyEndpoint": {
|
||||||
|
@ -184,7 +184,7 @@ class ShareRequest(object):
|
|||||||
:param backend_mode: The backend_mode of this ShareRequest. # noqa: E501
|
:param backend_mode: The backend_mode of this ShareRequest. # noqa: E501
|
||||||
:type: str
|
:type: str
|
||||||
"""
|
"""
|
||||||
allowed_values = ["proxy", "web", "tcpTunnel", "udpTunnel", "caddy", "drive"] # noqa: E501
|
allowed_values = ["proxy", "web", "tcpTunnel", "udpTunnel", "caddy", "drive", "socks"] # noqa: E501
|
||||||
if backend_mode not in allowed_values:
|
if backend_mode not in allowed_values:
|
||||||
raise ValueError(
|
raise ValueError(
|
||||||
"Invalid value for `backend_mode` ({0}), must be one of {1}" # noqa: E501
|
"Invalid value for `backend_mode` ({0}), must be one of {1}" # noqa: E501
|
||||||
|
@ -973,7 +973,7 @@ definitions:
|
|||||||
type: string
|
type: string
|
||||||
backendMode:
|
backendMode:
|
||||||
type: string
|
type: string
|
||||||
enum: ["proxy", "web", "tcpTunnel", "udpTunnel", "caddy", "drive"]
|
enum: ["proxy", "web", "tcpTunnel", "udpTunnel", "caddy", "drive", "socks"]
|
||||||
backendProxyEndpoint:
|
backendProxyEndpoint:
|
||||||
type: string
|
type: string
|
||||||
authScheme:
|
authScheme:
|
||||||
|
@ -82,12 +82,8 @@ const config = {
|
|||||||
pages: {
|
pages: {
|
||||||
path: './src/pages'
|
path: './src/pages'
|
||||||
},
|
},
|
||||||
// googleAnalytics: {
|
googleTagManager: {
|
||||||
//
|
containerId: 'GTM-MDFLZPK8',
|
||||||
// },
|
|
||||||
gtag: {
|
|
||||||
trackingID: 'G-V2KMEXWJ10',
|
|
||||||
anonymizeIP: true,
|
|
||||||
},
|
},
|
||||||
sitemap: {
|
sitemap: {
|
||||||
|
|
||||||
|
Loading…
x
Reference in New Issue
Block a user