Merge pull request #473 from openziti/zrok_copy_p1

'zrok copy' (phase 1) (#438)
This commit is contained in:
Michael Quigley 2024-01-22 10:48:01 -05:00 committed by GitHub
commit 8d88e32d91
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
44 changed files with 18277 additions and 4 deletions

59
ACKNOWLEDGEMENTS.md Normal file
View File

@ -0,0 +1,59 @@
# ACKNOWLEDGEMENTS
## github.com/openziti/zrok/drives/davServer
The `davServer` package is based on code from `https://cs.opensource.google/go/go/`, which included the following license:
> Copyright (c) 2009 The Go Authors. All rights reserved.
>
> Redistribution and use in source and binary forms, with or without
> modification, are permitted provided that the following conditions are
> met:
>
> * Redistributions of source code must retain the above copyright
> notice, this list of conditions and the following disclaimer.
> * Redistributions in binary form must reproduce the above
> copyright notice, this list of conditions and the following disclaimer
> in the documentation and/or other materials provided with the
> distribution.
> * Neither the name of Google Inc. nor the names of its
> contributors may be used to endorse or promote products derived from
> this software without specific prior written permission.
>
> THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
> "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
> LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
> A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
> OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
> SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
> LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
> DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
> THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
> (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
> OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE
## github.com/openziti/zrok/drives/davClient
The `davClient` package is based on code from `github.com/emersion/go-webdav`, which included the following license:
> The MIT License (MIT)
>
> Copyright (c) 2020 Simon Ser
>
> Permission is hereby granted, free of charge, to any person obtaining a copy
> of this software and associated documentation files (the "Software"), to deal
> in the Software without restriction, including without limitation the rights
> to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
> copies of the Software, and to permit persons to whom the Software is
> furnished to do so, subject to the following conditions:
>
> The above copyright notice and this permission notice shall be included in all
> copies or substantial portions of the Software.
>
> THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
> IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
> FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
> AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
> LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
> OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
> SOFTWARE.

View File

@ -2,6 +2,8 @@
## v0.4.23
FEATURE: New CLI commands have been implemented for working with the `drive` share backend mode (part of the "zrok Drives" functionality). These commands include `zrok cp`, `zrok mkdir` `zrok mv`, `zrok ls`, and `zrok rm`. These are initial, minimal versions of these commands and very likely contain bugs and ergonomic annoyances. There is a guide available at (`docs/guides/drives/cli.md`) that explains how to work with these tools in detail (https://github.com/openziti/zrok/issues/438)
FEATURE: Python SDK now has a decorator for integrating with various server side frameworks. See the `http-server` example.
FEATURE: Python SDK share and access handling now supports context management.

106
cmd/zrok/copy.go Normal file
View File

@ -0,0 +1,106 @@
package main
import (
"fmt"
"github.com/openziti/zrok/drives/sync"
"github.com/openziti/zrok/environment"
"github.com/openziti/zrok/sdk/golang/sdk"
"github.com/openziti/zrok/tui"
"github.com/spf13/cobra"
"net/url"
"os"
)
func init() {
rootCmd.AddCommand(newCopyCommand().cmd)
}
type copyCommand struct {
cmd *cobra.Command
sync bool
basicAuth string
}
func newCopyCommand() *copyCommand {
cmd := &cobra.Command{
Use: "copy <source> [<target>] (<target> defaults to 'file://.`)",
Short: "Copy (unidirectional sync) zrok drive contents from <source> to <target> ('http://', 'file://', and 'zrok://' supported)",
Aliases: []string{"cp"},
Args: cobra.RangeArgs(1, 2),
}
command := &copyCommand{cmd: cmd}
cmd.Run = command.run
cmd.Flags().BoolVarP(&command.sync, "sync", "s", false, "Only copy modified files (one-way synchronize)")
cmd.Flags().StringVarP(&command.basicAuth, "basic-auth", "a", "", "Basic authentication <username:password>")
return command
}
func (cmd *copyCommand) run(_ *cobra.Command, args []string) {
if cmd.basicAuth == "" {
cmd.basicAuth = os.Getenv("ZROK_DRIVES_BASIC_AUTH")
}
sourceUrl, err := url.Parse(args[0])
if err != nil {
tui.Error(fmt.Sprintf("invalid source '%v'", args[0]), err)
}
if sourceUrl.Scheme == "" {
sourceUrl.Scheme = "file"
}
targetStr := "."
if len(args) == 2 {
targetStr = args[1]
}
targetUrl, err := url.Parse(targetStr)
if err != nil {
tui.Error(fmt.Sprintf("invalid target '%v'", targetStr), err)
}
if targetUrl.Scheme == "" {
targetUrl.Scheme = "file"
}
root, err := environment.LoadRoot()
if err != nil {
tui.Error("error loading root", err)
}
var allocatedAccesses []*sdk.Access
if sourceUrl.Scheme == "zrok" {
access, err := sdk.CreateAccess(root, &sdk.AccessRequest{ShareToken: sourceUrl.Host})
if err != nil {
tui.Error("error creating access", err)
}
allocatedAccesses = append(allocatedAccesses, access)
}
if targetUrl.Scheme == "zrok" {
access, err := sdk.CreateAccess(root, &sdk.AccessRequest{ShareToken: targetUrl.Host})
if err != nil {
tui.Error("error creating access", err)
}
allocatedAccesses = append(allocatedAccesses, access)
}
defer func() {
for _, access := range allocatedAccesses {
err := sdk.DeleteAccess(root, access)
if err != nil {
tui.Warning("error deleting target access", err)
}
}
}()
source, err := sync.TargetForURL(sourceUrl, root, cmd.basicAuth)
if err != nil {
tui.Error(fmt.Sprintf("error creating target for '%v'", sourceUrl), err)
}
target, err := sync.TargetForURL(targetUrl, root, cmd.basicAuth)
if err != nil {
tui.Error(fmt.Sprintf("error creating target for '%v'", targetUrl), err)
}
if err := sync.OneWay(source, target, cmd.sync); err != nil {
tui.Error("error copying", err)
}
fmt.Println("copy complete!")
}

95
cmd/zrok/ls.go Normal file
View File

@ -0,0 +1,95 @@
package main
import (
"fmt"
"github.com/jedib0t/go-pretty/v6/table"
"github.com/openziti/zrok/drives/sync"
"github.com/openziti/zrok/environment"
"github.com/openziti/zrok/sdk/golang/sdk"
"github.com/openziti/zrok/tui"
"github.com/openziti/zrok/util"
"github.com/sirupsen/logrus"
"github.com/spf13/cobra"
"net/url"
"os"
"sort"
)
func init() {
rootCmd.AddCommand(newLsCommand().cmd)
}
type lsCommand struct {
cmd *cobra.Command
basicAuth string
}
func newLsCommand() *lsCommand {
cmd := &cobra.Command{
Use: "ls <target>",
Short: "List the contents of drive <target> ('http://', 'zrok://','file://')",
Aliases: []string{"dir"},
Args: cobra.ExactArgs(1),
}
command := &lsCommand{cmd: cmd}
cmd.Run = command.run
cmd.Flags().StringVarP(&command.basicAuth, "basic-auth", "a", "", "Basic authentication <username:password>")
return command
}
func (cmd *lsCommand) run(_ *cobra.Command, args []string) {
if cmd.basicAuth == "" {
cmd.basicAuth = os.Getenv("ZROK_DRIVES_BASIC_AUTH")
}
targetUrl, err := url.Parse(args[0])
if err != nil {
tui.Error(fmt.Sprintf("invalid target '%v'", args[0]), err)
}
if targetUrl.Scheme == "" {
targetUrl.Scheme = "file"
}
root, err := environment.LoadRoot()
if err != nil {
tui.Error("error loading root", err)
}
if targetUrl.Scheme == "zrok" {
access, err := sdk.CreateAccess(root, &sdk.AccessRequest{ShareToken: targetUrl.Host})
if err != nil {
tui.Error("error creating access", err)
}
defer func() {
if err := sdk.DeleteAccess(root, access); err != nil {
logrus.Warningf("error freeing access: %v", err)
}
}()
}
target, err := sync.TargetForURL(targetUrl, root, cmd.basicAuth)
if err != nil {
tui.Error(fmt.Sprintf("error creating target for '%v'", targetUrl), err)
}
objects, err := target.Dir("/")
if err != nil {
tui.Error("error listing directory", err)
}
sort.Slice(objects, func(i, j int) bool {
return objects[i].Path < objects[j].Path
})
tw := table.NewWriter()
tw.SetOutputMirror(os.Stdout)
tw.SetStyle(table.StyleLight)
tw.AppendHeader(table.Row{"type", "Name", "Size", "Modified"})
for _, object := range objects {
if object.IsDir {
tw.AppendRow(table.Row{"DIR", object.Path, "", ""})
} else {
tw.AppendRow(table.Row{"", object.Path, util.BytesToSize(object.Size), object.Modified.Local()})
}
}
tw.Render()
}

75
cmd/zrok/md.go Normal file
View File

@ -0,0 +1,75 @@
package main
import (
"fmt"
"github.com/openziti/zrok/drives/sync"
"github.com/openziti/zrok/environment"
"github.com/openziti/zrok/sdk/golang/sdk"
"github.com/openziti/zrok/tui"
"github.com/sirupsen/logrus"
"github.com/spf13/cobra"
"net/url"
"os"
)
func init() {
rootCmd.AddCommand(newMdCommand().cmd)
}
type mdCommand struct {
cmd *cobra.Command
basicAuth string
}
func newMdCommand() *mdCommand {
cmd := &cobra.Command{
Use: "md <target>",
Short: "Make directory at <target> ('http://', 'zrok://', 'file://')",
Aliases: []string{"mkdir"},
Args: cobra.ExactArgs(1),
}
command := &mdCommand{cmd: cmd}
cmd.Run = command.run
cmd.Flags().StringVarP(&command.basicAuth, "basic-auth", "a", "", "Basic authentication <username:password>")
return command
}
func (cmd *mdCommand) run(_ *cobra.Command, args []string) {
if cmd.basicAuth == "" {
cmd.basicAuth = os.Getenv("ZROK_DRIVES_BASIC_AUTH")
}
targetUrl, err := url.Parse(args[0])
if err != nil {
tui.Error(fmt.Sprintf("invalid target '%v'", args[0]), err)
}
if targetUrl.Scheme == "" {
targetUrl.Scheme = "file"
}
root, err := environment.LoadRoot()
if err != nil {
tui.Error("error loading root", err)
}
if targetUrl.Scheme == "zrok" {
access, err := sdk.CreateAccess(root, &sdk.AccessRequest{ShareToken: targetUrl.Host})
if err != nil {
tui.Error("error creating access", err)
}
defer func() {
if err := sdk.DeleteAccess(root, access); err != nil {
logrus.Warningf("error freeing access: %v", err)
}
}()
}
target, err := sync.TargetForURL(targetUrl, root, cmd.basicAuth)
if err != nil {
tui.Error(fmt.Sprintf("error creating target for '%v'", targetUrl), err)
}
if err := target.Mkdir("/"); err != nil {
tui.Error("error creating directory", err)
}
}

75
cmd/zrok/mv.go Normal file
View File

@ -0,0 +1,75 @@
package main
import (
"fmt"
"github.com/openziti/zrok/drives/sync"
"github.com/openziti/zrok/environment"
"github.com/openziti/zrok/sdk/golang/sdk"
"github.com/openziti/zrok/tui"
"github.com/sirupsen/logrus"
"github.com/spf13/cobra"
"net/url"
"os"
)
func init() {
rootCmd.AddCommand(newMvCommand().cmd)
}
type mvCommand struct {
cmd *cobra.Command
basicAuth string
}
func newMvCommand() *mvCommand {
cmd := &cobra.Command{
Use: "mv <target> <newPath>",
Short: "Move the drive <target> to <newPath> ('http://', 'zrok://', 'file://')",
Aliases: []string{"move"},
Args: cobra.ExactArgs(2),
}
command := &mvCommand{cmd: cmd}
cmd.Run = command.run
cmd.Flags().StringVarP(&command.basicAuth, "basic-auth", "a", "", "Basic authentication <username:password>")
return command
}
func (cmd *mvCommand) run(_ *cobra.Command, args []string) {
if cmd.basicAuth == "" {
cmd.basicAuth = os.Getenv("ZROK_DRIVES_BASIC_AUTH")
}
targetUrl, err := url.Parse(args[0])
if err != nil {
tui.Error(fmt.Sprintf("invalid target '%v'", args[0]), err)
}
if targetUrl.Scheme == "" {
targetUrl.Scheme = "file"
}
root, err := environment.LoadRoot()
if err != nil {
tui.Error("error loading root", err)
}
if targetUrl.Scheme == "zrok" {
access, err := sdk.CreateAccess(root, &sdk.AccessRequest{ShareToken: targetUrl.Host})
if err != nil {
tui.Error("error creating access", err)
}
defer func() {
if err := sdk.DeleteAccess(root, access); err != nil {
logrus.Warningf("error freeing access: %v", err)
}
}()
}
target, err := sync.TargetForURL(targetUrl, root, cmd.basicAuth)
if err != nil {
tui.Error(fmt.Sprintf("error creating target for '%v'", targetUrl), err)
}
if err := target.Move("/", args[1]); err != nil {
tui.Error("error moving", err)
}
}

75
cmd/zrok/rm.go Normal file
View File

@ -0,0 +1,75 @@
package main
import (
"fmt"
"github.com/openziti/zrok/drives/sync"
"github.com/openziti/zrok/environment"
"github.com/openziti/zrok/sdk/golang/sdk"
"github.com/openziti/zrok/tui"
"github.com/sirupsen/logrus"
"github.com/spf13/cobra"
"net/url"
"os"
)
func init() {
rootCmd.AddCommand(newRmCommand().cmd)
}
type rmCommand struct {
cmd *cobra.Command
basicAuth string
}
func newRmCommand() *rmCommand {
cmd := &cobra.Command{
Use: "rm <target>",
Short: "Remove (delete) the contents of drive <target> ('http://', 'zrok://', 'file://')",
Aliases: []string{"del"},
Args: cobra.ExactArgs(1),
}
command := &rmCommand{cmd: cmd}
cmd.Run = command.run
cmd.Flags().StringVarP(&command.basicAuth, "basic-auth", "a", "", "Basic authentication <username:password>")
return command
}
func (cmd *rmCommand) run(_ *cobra.Command, args []string) {
if cmd.basicAuth == "" {
cmd.basicAuth = os.Getenv("ZROK_DRIVES_BASIC_AUTH")
}
targetUrl, err := url.Parse(args[0])
if err != nil {
tui.Error(fmt.Sprintf("invalid target '%v'", args[0]), err)
}
if targetUrl.Scheme == "" {
targetUrl.Scheme = "file"
}
root, err := environment.LoadRoot()
if err != nil {
tui.Error("error loading root", err)
}
if targetUrl.Scheme == "zrok" {
access, err := sdk.CreateAccess(root, &sdk.AccessRequest{ShareToken: targetUrl.Host})
if err != nil {
tui.Error("error creating access", err)
}
defer func() {
if err := sdk.DeleteAccess(root, access); err != nil {
logrus.Warningf("error freeing access: %v", err)
}
}()
}
target, err := sync.TargetForURL(targetUrl, root, cmd.basicAuth)
if err != nil {
tui.Error(fmt.Sprintf("error creating target for '%v'", targetUrl), err)
}
if err := target.Rm("/"); err != nil {
tui.Error("error removing", err)
}
}

314
docs/guides/drives/cli.md Normal file
View File

@ -0,0 +1,314 @@
# The Drives CLI
The zrok drives CLI tools allow for simple, ergonomic management and synchronization of local and remote files.
## Sharing a Drive
Virtual drives are shared through the `zrok` CLI using the `--backend-mode drive` flag through the `zrok share` command, using either the `public` or `private` sharing modes. We'll use the `private` sharing mode for this example:
```
$ mkdir /tmp/junk
$ zrok share private --headless --backend-mode drive /tmp/junk
[ 0.124] INFO sdk-golang/ziti.(*listenerManager).createSessionWithBackoff: {session token=[cf640aac-2706-49ae-9cc9-9a497d67d9c5]} new service session
[ 0.145] INFO main.(*sharePrivateCommand).run: allow other to access your share with the following command:
zrok access private wkcfb58vj51l
```
The command shown above creates an ephemeral, `private` drive share pointed at the local `/tmp/junk` folder.
Notice that the share token allocated by `zrok` is `wkcfb58vj51l`. We'll use that share token to identify our virtual drive in the following operations.
## Working with a Private Drive Share
First, let's copy a file into our virtual drive using the `zrok copy` command:
```
$ zrok copy LICENSE zrok://wkcfb58vj51l
[ 0.119] INFO zrok/drives/sync.OneWay: => /LICENSE
copy complete!
```
We used the URL scheme `zrok://<shareToken>` to refer to the private virtual drive we allocated above using the `zrok share private` command. Use `zrok://` URLs with the drives CLI tools to refer to contents of private virtual drives.
Next, let's get a directory listing of the virtual drive:
```
$ zrok ls zrok://wkcfb58vj51l
┌──────┬─────────┬─────────┬───────────────────────────────┐
│ TYPE │ NAME │ SIZE │ MODIFIED │
├──────┼─────────┼─────────┼───────────────────────────────┤
│ │ LICENSE │ 11.3 kB │ 2024-01-19 12:16:46 -0500 EST │
└──────┴─────────┴─────────┴───────────────────────────────┘
```
We can make directories on the virtual drive:
```
$ zrok mkdir zrok://wkcfb58vj51l/stuff
$ zrok ls zrok://wkcfb58vj51l
┌──────┬─────────┬─────────┬───────────────────────────────┐
│ TYPE │ NAME │ SIZE │ MODIFIED │
├──────┼─────────┼─────────┼───────────────────────────────┤
│ │ LICENSE │ 11.3 kB │ 2024-01-19 12:16:46 -0500 EST │
│ DIR │ stuff │ │ │
└──────┴─────────┴─────────┴───────────────────────────────┘
```
We can copy the contents of a local directory into the new directory on the virtual drive:
```
$ ls -l util/
total 20
-rw-rw-r-- 1 michael michael 329 Jul 21 13:17 email.go
-rw-rw-r-- 1 michael michael 456 Jul 21 13:17 headers.go
-rw-rw-r-- 1 michael michael 609 Jul 21 13:17 proxy.go
-rw-rw-r-- 1 michael michael 361 Jul 21 13:17 size.go
-rw-rw-r-- 1 michael michael 423 Jan 2 11:57 uniqueName.go
$ zrok copy util/ zrok://wkcfb58vj51l/stuff
[ 0.123] INFO zrok/drives/sync.OneWay: => /email.go
[ 0.194] INFO zrok/drives/sync.OneWay: => /headers.go
[ 0.267] INFO zrok/drives/sync.OneWay: => /proxy.go
[ 0.337] INFO zrok/drives/sync.OneWay: => /size.go
[ 0.408] INFO zrok/drives/sync.OneWay: => /uniqueName.go
copy complete!
$ zrok ls zrok://wkcfb58vj51l/stuff
┌──────┬───────────────┬───────┬───────────────────────────────┐
│ TYPE │ NAME │ SIZE │ MODIFIED │
├──────┼───────────────┼───────┼───────────────────────────────┤
│ │ email.go │ 329 B │ 2024-01-19 12:26:45 -0500 EST │
│ │ headers.go │ 456 B │ 2024-01-19 12:26:45 -0500 EST │
│ │ proxy.go │ 609 B │ 2024-01-19 12:26:45 -0500 EST │
│ │ size.go │ 361 B │ 2024-01-19 12:26:45 -0500 EST │
│ │ uniqueName.go │ 423 B │ 2024-01-19 12:26:45 -0500 EST │
└──────┴───────────────┴───────┴───────────────────────────────┘
```
And we can remove files and directories from the virtual drive:
```
$ zrok rm zrok://wkcfb58vj51l/LICENSE
$ zrok ls zrok://wkcfb58vj51l
┌──────┬───────┬──────┬──────────┐
│ TYPE │ NAME │ SIZE │ MODIFIED │
├──────┼───────┼──────┼──────────┤
│ DIR │ stuff │ │ │
└──────┴───────┴──────┴──────────┘
$ zrok rm zrok://wkcfb58vj51l/stuff
$ zrok ls zrok://wkcfb58vj51l
┌──────┬──────┬──────┬──────────┐
│ TYPE │ NAME │ SIZE │ MODIFIED │
├──────┼──────┼──────┼──────────┤
└──────┴──────┴──────┴──────────┘
```
## Working with Public Shares
Public shares work very similarly to private shares, they just use a different URL scheme:
```
$ zrok share public --headless --backend-mode drive /tmp/junk
[ 0.708] INFO sdk-golang/ziti.(*listenerManager).createSessionWithBackoff: {session token=[05e0f48b-242b-4fd9-8edb-259488535c47]} new service session
[ 0.878] INFO main.(*sharePublicCommand).run: access your zrok share at the following endpoints:
https://6kiww4bn7iok.share.zrok.io
```
The same commands, with a different URL scheme work with the `zrok` drives CLI:
```
$ zrok copy util/ https://6kiww4bn7iok.share.zrok.io
[ 0.268] INFO zrok/drives/sync.OneWay: => /email.go
[ 0.406] INFO zrok/drives/sync.OneWay: => /headers.go
[ 0.530] INFO zrok/drives/sync.OneWay: => /proxy.go
[ 0.655] INFO zrok/drives/sync.OneWay: => /size.go
[ 0.714] INFO zrok/drives/sync.OneWay: => /uniqueName.go
copy complete!
michael@fourtyfour Fri Jan 19 12:42:52 ~/Repos/nf/zrok
$ zrok ls https://6kiww4bn7iok.share.zrok.io
┌──────┬───────────────┬───────┬───────────────────────────────┐
│ TYPE │ NAME │ SIZE │ MODIFIED │
├──────┼───────────────┼───────┼───────────────────────────────┤
│ │ email.go │ 329 B │ 2023-07-21 13:17:56 -0400 EDT │
│ │ headers.go │ 456 B │ 2023-07-21 13:17:56 -0400 EDT │
│ │ proxy.go │ 609 B │ 2023-07-21 13:17:56 -0400 EDT │
│ │ size.go │ 361 B │ 2023-07-21 13:17:56 -0400 EDT │
│ │ uniqueName.go │ 423 B │ 2024-01-02 11:57:14 -0500 EST │
└──────┴───────────────┴───────┴───────────────────────────────┘
```
For basic authentication provided by public shares, the `zrok` drives CLI offers the `--basic-auth` flag, which accepts a `<username>:<password>` parameter to specify the authentication for the public virtual drive (if it's required).
Alternatively, the authentication can be set using the `ZROK_DRIVES_BASIC_AUTH` environment variable:
```
$ export ZROK_DRIVES_BASIC_AUTH=username:password
```
## One-way Synchronization
The `zrok copy` command includes a `--sync` flag, which only copies files detected as _modified_. `zrok` considers a file with the same modification timestamp and size to be the same. Of course, this is not a strong guarantee that the files are equivalent. Future `zrok` drives versions will provide a cryptographically strong mechanism (a-la `rsync` and friends) to guarantee that files and trees of files are synchronized.
For now, the `--sync` flag provides a convenience mechanism to allow resuming copies of large file trees and provide a reasonable guarantee that the trees are in sync.
Let's take a look at `zrok copy --sync` in action:
```
$ zrok copy --sync docs/ https://glmv049c62p7.share.zrok.io
[ 0.636] INFO zrok/drives/sync.OneWay: => /_attic/
[ 0.760] INFO zrok/drives/sync.OneWay: => /_attic/network/
[ 0.816] INFO zrok/drives/sync.OneWay: => /_attic/network/_category_.json
[ 0.928] INFO zrok/drives/sync.OneWay: => /_attic/network/prod/
[ 0.987] INFO zrok/drives/sync.OneWay: => /_attic/network/prod/ziti-ctrl.service
[ 1.048] INFO zrok/drives/sync.OneWay: => /_attic/network/prod/ziti-ctrl.yml
[ 1.107] INFO zrok/drives/sync.OneWay: => /_attic/network/prod/ziti-router0.service
[ 1.167] INFO zrok/drives/sync.OneWay: => /_attic/network/prod/ziti-router0.yml
[ 1.218] INFO zrok/drives/sync.OneWay: => /_attic/network/prod/zrok-access-public.service
[ 1.273] INFO zrok/drives/sync.OneWay: => /_attic/network/prod/zrok-ctrl.service
[ 1.328] INFO zrok/drives/sync.OneWay: => /_attic/network/prod/zrok-ctrl.yml
[ 1.382] INFO zrok/drives/sync.OneWay: => /_attic/network/prod/zrok.io-network-skeleton.md
[ 1.447] INFO zrok/drives/sync.OneWay: => /_attic/overview.md
[ 1.572] INFO zrok/drives/sync.OneWay: => /_attic/sharing/
[ 1.622] INFO zrok/drives/sync.OneWay: => /_attic/sharing/_category_.json
[ 1.673] INFO zrok/drives/sync.OneWay: => /_attic/sharing/reserved_services.md
[ 1.737] INFO zrok/drives/sync.OneWay: => /_attic/sharing/sharing_modes.md
[ 1.793] INFO zrok/drives/sync.OneWay: => /_attic/v0.2_account_requests.md
[ 1.902] INFO zrok/drives/sync.OneWay: => /_attic/v0.4_limits.md
...
[ 9.691] INFO zrok/drives/sync.OneWay: => /images/zrok_web_ui_empty_shares.png
[ 9.812] INFO zrok/drives/sync.OneWay: => /images/zrok_web_ui_new_environment.png
[ 9.870] INFO zrok/drives/sync.OneWay: => /images/zrok_zoom_to_fit.png
copy complete!
```
Because the target drive was empty, `zrok copy --sync` copied the entire contents of the local `docs/` tree into the virtual drive. However, if we run that command again, we get:
```
$ zrok copy --sync docs/ https://glmv049c62p7.share.zrok.io
copy complete!
```
The virtual drive contents are already in sync with the local filesystem tree, so there is nothing for it to copy.
Let's alter the contents of the drive and run the `--sync` again:
```
$ zrok rm https://glmv049c62p7.share.zrok.io/images
$ zrok copy --sync docs/ https://glmv049c62p7.share.zrok.io
[ 0.364] INFO zrok/drives/sync.OneWay: => /images/
[ 0.456] INFO zrok/drives/sync.OneWay: => /images/zrok.png
[ 0.795] INFO zrok/drives/sync.OneWay: => /images/zrok_cover.png
[ 0.866] INFO zrok/drives/sync.OneWay: => /images/zrok_deployment.drawio
...
[ 2.254] INFO zrok/drives/sync.OneWay: => /images/zrok_web_ui_empty_shares.png
[ 2.340] INFO zrok/drives/sync.OneWay: => /images/zrok_web_ui_new_environment.png
[ 2.391] INFO zrok/drives/sync.OneWay: => /images/zrok_zoom_to_fit.png
copy complete!
```
Because we removed the `images/` tree from the virtual drive, `zrok copy --sync` detected this and copied the local `images/` tree back onto the virtual drive.
## Drive-to-Drive Copies and Synchronization
The `zrok copy` CLI can operate on pairs of virtual drives remotely, without ever having to store files locally. This allow for drive-to-drive copies and synchronization.
Here are a couple of examples:
```
$ zrok copy --sync https://glmv049c62p7.share.zrok.io https://glmv049c62p7.share.zrok.io
copy complete!
```
Specifying the same URL for both the source and the target of a `--sync` operation should always result in nothing being copied... they are the same drive with the same state.
We can copy files between two virtual drives with a single command:
```
$ zrok copy --sync https://glmv049c62p7.share.zrok.io zrok://hsml272j3xzf
[ 1.396] INFO zrok/drives/sync.OneWay: => /_attic/
[ 2.083] INFO zrok/drives/sync.OneWay: => /_attic/overview.md
[ 2.704] INFO zrok/drives/sync.OneWay: => /_attic/sharing/
...
[ 118.240] INFO zrok/drives/sync.OneWay: => /images/zrok_web_console_empty.png
[ 118.920] INFO zrok/drives/sync.OneWay: => /images/zrok_enable_modal.png
[ 119.589] INFO zrok/drives/sync.OneWay: => /images/zrok_cover.png
[ 120.214] INFO zrok/drives/sync.OneWay: => /getting-started.mdx
copy complete!
$ zrok copy --sync https://glmv049c62p7.share.zrok.io zrok://hsml272j3xzf
copy complete!
```
## Copying from Drives to the Local Filesystem
In the current version of the drives CLI, `zrok copy` always assumes the destination is a directory. There is currently no way to do:
```
$ zrok copy somefile someotherfile
```
What you'll end up with on the local filesystem is:
```
somefile
someotherfile/somefile
```
It's in the backlog to support file destinations in a future release of `zrok`. So, when using `zrok copy`, always take note of the destination.
`zrok copy` supports a default destination of `file://.`, so you can do single parameter `zrok copy` commands like this:
```
$ zrok ls https://azc47r3cwjds.share.zrok.io
┌──────┬─────────┬─────────┬───────────────────────────────┐
│ TYPE │ NAME │ SIZE │ MODIFIED │
├──────┼─────────┼─────────┼───────────────────────────────┤
│ │ LICENSE │ 11.3 kB │ 2023-07-21 13:17:56 -0400 EDT │
└──────┴─────────┴─────────┴───────────────────────────────┘
$ zrok copy https://azc47r3cwjds.share.zrok.io/LICENSE
[ 0.260] INFO zrok/drives/sync.OneWay: => /LICENSE
copy complete!
$ ls -l
total 12
-rw-rw-r-- 1 michael michael 11346 Jan 19 13:29 LICENSE
```
You can also specify a local folder as the destination for your copy:
```
$ zrok copy https://azc47r3cwjds.share.zrok.io/LICENSE /tmp/inbox
[ 0.221] INFO zrok/drives/sync.OneWay: => /LICENSE
copy complete!
$ l /tmp/inbox
total 12
-rw-rw-r-- 1 michael michael 11346 Jan 19 13:30 LICENSE
```
## Unique Names and Reserved Shares
Private reserved shares with unque names can be particularly useful with the drives CLI:
```
$ zrok reserve private -b drive --unique-name mydrive /tmp/junk
[ 0.315] INFO main.(*reserveCommand).run: your reserved share token is 'mydrive'
$ zrok share reserved --headless mydrive
[ 0.289] INFO main.(*shareReservedCommand).run: sharing target: '/tmp/junk'
[ 0.289] INFO main.(*shareReservedCommand).run: using existing backend proxy endpoint: /tmp/junk
[ 0.767] INFO sdk-golang/ziti.(*listenerManager).createSessionWithBackoff: {session token=[d519a436-9fb5-4207-afd5-7cbc28fb779a]} new service session
[ 0.927] INFO main.(*shareReservedCommand).run: use this command to access your zrok share: 'zrok access private mydrive'
```
This makes working with `zrok://` URLs particularly convenient:
```
$ zrok ls zrok://mydrive
┌──────┬─────────┬─────────┬───────────────────────────────┐
│ TYPE │ NAME │ SIZE │ MODIFIED │
├──────┼─────────┼─────────┼───────────────────────────────┤
│ │ LICENSE │ 11.3 kB │ 2023-07-21 13:17:56 -0400 EDT │
└──────┴─────────┴─────────┴───────────────────────────────┘
```
## Future Enhancements
Coming in a future release of `zrok` drives are features like:
* two-way synchronization between multiple hosts... allowing for shared "dropbox-like" usage scenarios between multiple environments
* better ergonomics for single-file destinations

305
drives/davClient/client.go Normal file
View File

@ -0,0 +1,305 @@
package davClient
import (
"context"
"fmt"
"github.com/openziti/zrok/drives/davClient/internal"
"io"
"net/http"
"time"
)
// HTTPClient performs HTTP requests. It's implemented by *http.Client.
type HTTPClient interface {
Do(req *http.Request) (*http.Response, error)
}
type basicAuthHTTPClient struct {
c HTTPClient
username, password string
}
func (c *basicAuthHTTPClient) Do(req *http.Request) (*http.Response, error) {
req.SetBasicAuth(c.username, c.password)
return c.c.Do(req)
}
// HTTPClientWithBasicAuth returns an HTTP client that adds basic
// authentication to all outgoing requests. If c is nil, http.DefaultClient is
// used.
func HTTPClientWithBasicAuth(c HTTPClient, username, password string) HTTPClient {
if c == nil {
c = http.DefaultClient
}
return &basicAuthHTTPClient{c, username, password}
}
// Client provides access to a remote WebDAV filesystem.
type Client struct {
ic *internal.Client
}
func NewClient(c HTTPClient, endpoint string) (*Client, error) {
ic, err := internal.NewClient(c, endpoint)
if err != nil {
return nil, err
}
return &Client{ic}, nil
}
func (c *Client) FindCurrentUserPrincipal(ctx context.Context) (string, error) {
propfind := internal.NewPropNamePropFind(internal.CurrentUserPrincipalName)
// TODO: consider retrying on the root URI "/" if this fails, as suggested
// by the RFC?
resp, err := c.ic.PropFindFlat(ctx, "", propfind)
if err != nil {
return "", err
}
var prop internal.CurrentUserPrincipal
if err := resp.DecodeProp(&prop); err != nil {
return "", err
}
if prop.Unauthenticated != nil {
return "", fmt.Errorf("webdav: unauthenticated")
}
return prop.Href.Path, nil
}
var fileInfoPropFind = internal.NewPropNamePropFind(
internal.ResourceTypeName,
internal.GetContentLengthName,
internal.GetLastModifiedName,
internal.GetContentTypeName,
internal.GetETagName,
)
func fileInfoFromResponse(resp *internal.Response) (*FileInfo, error) {
path, err := resp.Path()
if err != nil {
return nil, err
}
fi := &FileInfo{Path: path}
var resType internal.ResourceType
if err := resp.DecodeProp(&resType); err != nil {
return nil, err
}
if resType.Is(internal.CollectionName) {
fi.IsDir = true
} else {
var getLen internal.GetContentLength
if err := resp.DecodeProp(&getLen); err != nil {
return nil, err
}
var getType internal.GetContentType
if err := resp.DecodeProp(&getType); err != nil && !internal.IsNotFound(err) {
return nil, err
}
var getETag internal.GetETag
if err := resp.DecodeProp(&getETag); err != nil && !internal.IsNotFound(err) {
return nil, err
}
fi.Size = getLen.Length
fi.MIMEType = getType.Type
fi.ETag = string(getETag.ETag)
}
var getMod internal.GetLastModified
if err := resp.DecodeProp(&getMod); err != nil && !internal.IsNotFound(err) {
return nil, err
}
fi.ModTime = time.Time(getMod.LastModified)
return fi, nil
}
func (c *Client) Stat(ctx context.Context, name string) (*FileInfo, error) {
resp, err := c.ic.PropFindFlat(ctx, name, fileInfoPropFind)
if err != nil {
return nil, err
}
return fileInfoFromResponse(resp)
}
func (c *Client) Open(ctx context.Context, name string) (io.ReadCloser, error) {
req, err := c.ic.NewRequest(http.MethodGet, name, nil)
if err != nil {
return nil, err
}
resp, err := c.ic.Do(req.WithContext(ctx))
if err != nil {
return nil, err
}
return resp.Body, nil
}
func (c *Client) Readdir(ctx context.Context, name string, recursive bool) ([]FileInfo, error) {
depth := internal.DepthOne
if recursive {
depth = internal.DepthInfinity
}
ms, err := c.ic.PropFind(ctx, name, depth, fileInfoPropFind)
if err != nil {
return nil, err
}
l := make([]FileInfo, 0, len(ms.Responses))
for _, resp := range ms.Responses {
fi, err := fileInfoFromResponse(&resp)
if err != nil {
return l, err
}
l = append(l, *fi)
}
return l, nil
}
type fileWriter struct {
pw *io.PipeWriter
done <-chan error
}
func (fw *fileWriter) Write(b []byte) (int, error) {
return fw.pw.Write(b)
}
func (fw *fileWriter) Close() error {
if err := fw.pw.Close(); err != nil {
return err
}
return <-fw.done
}
func (c *Client) Create(ctx context.Context, name string) (io.WriteCloser, error) {
pr, pw := io.Pipe()
req, err := c.ic.NewRequest(http.MethodPut, name, pr)
if err != nil {
pw.Close()
return nil, err
}
done := make(chan error, 1)
go func() {
resp, err := c.ic.Do(req.WithContext(ctx))
if err != nil {
done <- err
return
}
resp.Body.Close()
done <- nil
}()
return &fileWriter{pw, done}, nil
}
func (c *Client) CreateWithModTime(ctx context.Context, name string, modTime time.Time) (io.WriteCloser, error) {
pr, pw := io.Pipe()
req, err := c.ic.NewRequest(http.MethodPut, name, pr)
if err != nil {
pw.Close()
return nil, err
}
req.Header.Set("Zrok-Modtime", fmt.Sprintf("%d", modTime.Unix()))
done := make(chan error, 1)
go func() {
resp, err := c.ic.Do(req.WithContext(ctx))
if err != nil {
done <- err
return
}
resp.Body.Close()
done <- nil
}()
return &fileWriter{pw, done}, nil
}
func (c *Client) Touch(ctx context.Context, path string, mtime time.Time) error {
status, err := c.ic.Touch(ctx, path, mtime)
if err != nil {
return err
}
for _, resp := range status.Responses {
if resp.Err() != nil {
return resp.Err()
}
}
return nil
}
func (c *Client) RemoveAll(ctx context.Context, name string) error {
req, err := c.ic.NewRequest(http.MethodDelete, name, nil)
if err != nil {
return err
}
resp, err := c.ic.Do(req.WithContext(ctx))
if err != nil {
return err
}
resp.Body.Close()
return nil
}
func (c *Client) Mkdir(ctx context.Context, name string) error {
req, err := c.ic.NewRequest("MKCOL", name, nil)
if err != nil {
return err
}
resp, err := c.ic.Do(req.WithContext(ctx))
if err != nil {
return err
}
resp.Body.Close()
return nil
}
func (c *Client) CopyAll(ctx context.Context, name, dest string, overwrite bool) error {
req, err := c.ic.NewRequest("COPY", name, nil)
if err != nil {
return err
}
req.Header.Set("Destination", c.ic.ResolveHref(dest).String())
req.Header.Set("Overwrite", internal.FormatOverwrite(overwrite))
resp, err := c.ic.Do(req.WithContext(ctx))
if err != nil {
return err
}
resp.Body.Close()
return nil
}
func (c *Client) MoveAll(ctx context.Context, name, dest string, overwrite bool) error {
req, err := c.ic.NewRequest("MOVE", name, nil)
if err != nil {
return err
}
req.Header.Set("Destination", c.ic.ResolveHref(dest).String())
req.Header.Set("Overwrite", internal.FormatOverwrite(overwrite))
resp, err := c.ic.Do(req.WithContext(ctx))
if err != nil {
return err
}
resp.Body.Close()
return nil
}

View File

@ -0,0 +1,286 @@
package internal
import (
"bytes"
"context"
"encoding/xml"
"fmt"
"io"
"mime"
"net"
"net/http"
"net/url"
"path"
"strings"
"time"
"unicode"
)
// DiscoverContextURL performs a DNS-based CardDAV/CalDAV service discovery as
// described in RFC 6352 section 11. It returns the URL to the CardDAV server.
func DiscoverContextURL(ctx context.Context, service, domain string) (string, error) {
var resolver net.Resolver
// Only lookup TLS records, plaintext connections are insecure
_, addrs, err := resolver.LookupSRV(ctx, service+"s", "tcp", domain)
if dnsErr, ok := err.(*net.DNSError); ok {
if dnsErr.IsTemporary {
return "", err
}
} else if err != nil {
return "", err
}
if len(addrs) == 0 {
return "", fmt.Errorf("webdav: domain doesn't have an SRV record")
}
addr := addrs[0]
target := strings.TrimSuffix(addr.Target, ".")
if target == "" {
return "", fmt.Errorf("webdav: empty target in SRV record")
}
// TODO: perform a TXT lookup, check for a "path" key in the response
u := url.URL{Scheme: "https"}
if addr.Port == 443 {
u.Host = target
} else {
u.Host = fmt.Sprintf("%v:%v", target, addr.Port)
}
u.Path = "/.well-known/" + service
return u.String(), nil
}
// HTTPClient performs HTTP requests. It's implemented by *http.Client.
type HTTPClient interface {
Do(req *http.Request) (*http.Response, error)
}
type Client struct {
http HTTPClient
endpoint *url.URL
}
func NewClient(c HTTPClient, endpoint string) (*Client, error) {
if c == nil {
c = http.DefaultClient
}
u, err := url.Parse(endpoint)
if err != nil {
return nil, err
}
if u.Path == "" {
// This is important to avoid issues with path.Join
u.Path = "/"
}
return &Client{http: c, endpoint: u}, nil
}
func (c *Client) ResolveHref(p string) *url.URL {
if !strings.HasPrefix(p, "/") {
p = path.Join(c.endpoint.Path, p)
}
return &url.URL{
Scheme: c.endpoint.Scheme,
User: c.endpoint.User,
Host: c.endpoint.Host,
Path: p,
}
}
func (c *Client) NewRequest(method string, path string, body io.Reader) (*http.Request, error) {
return http.NewRequest(method, c.ResolveHref(path).String(), body)
}
func (c *Client) NewXMLRequest(method string, path string, v interface{}) (*http.Request, error) {
var buf bytes.Buffer
buf.WriteString(xml.Header)
if err := xml.NewEncoder(&buf).Encode(v); err != nil {
return nil, err
}
req, err := c.NewRequest(method, path, &buf)
if err != nil {
return nil, err
}
req.Header.Add("Content-Type", "text/xml; charset=\"utf-8\"")
return req, nil
}
func (c *Client) Do(req *http.Request) (*http.Response, error) {
resp, err := c.http.Do(req)
if err != nil {
return nil, err
}
if resp.StatusCode/100 != 2 {
defer resp.Body.Close()
contentType := resp.Header.Get("Content-Type")
if contentType == "" {
contentType = "text/plain"
}
var wrappedErr error
t, _, _ := mime.ParseMediaType(contentType)
if t == "application/xml" || t == "text/xml" {
var davErr Error
if err := xml.NewDecoder(resp.Body).Decode(&davErr); err != nil {
wrappedErr = err
} else {
wrappedErr = &davErr
}
} else if strings.HasPrefix(t, "text/") {
lr := io.LimitedReader{R: resp.Body, N: 1024}
var buf bytes.Buffer
io.Copy(&buf, &lr)
resp.Body.Close()
if s := strings.TrimSpace(buf.String()); s != "" {
if lr.N == 0 {
s += " […]"
}
wrappedErr = fmt.Errorf("%v", s)
}
}
return nil, &HTTPError{Code: resp.StatusCode, Err: wrappedErr}
}
return resp, nil
}
func (c *Client) DoMultiStatus(req *http.Request) (*MultiStatus, error) {
resp, err := c.Do(req)
if err != nil {
return nil, err
}
defer resp.Body.Close()
if resp.StatusCode != http.StatusMultiStatus {
return nil, fmt.Errorf("HTTP multi-status request failed: %v", resp.Status)
}
// TODO: the response can be quite large, support streaming Response elements
var ms MultiStatus
if err := xml.NewDecoder(resp.Body).Decode(&ms); err != nil {
return nil, err
}
return &ms, nil
}
func (c *Client) PropFind(ctx context.Context, path string, depth Depth, propfind *PropFind) (*MultiStatus, error) {
req, err := c.NewXMLRequest("PROPFIND", path, propfind)
if err != nil {
return nil, err
}
req.Header.Add("Depth", depth.String())
return c.DoMultiStatus(req.WithContext(ctx))
}
func (c *Client) Touch(ctx context.Context, path string, mtime time.Time) (*MultiStatus, error) {
tstr := fmt.Sprintf("%d", mtime.Unix())
var v []RawXMLValue
for _, c := range tstr {
v = append(v, RawXMLValue{tok: xml.CharData{byte(c)}})
}
pup := &PropertyUpdate{
Set: []Set{
{
Prop: Prop{
Raw: []RawXMLValue{
*NewRawXMLElement(xml.Name{Space: "zrok:", Local: "lastmodified"}, nil, v),
},
},
},
},
}
status, err := c.PropUpdate(ctx, path, pup)
return status, err
}
func (c *Client) PropUpdate(ctx context.Context, path string, propupd *PropertyUpdate) (*MultiStatus, error) {
req, err := c.NewXMLRequest("PROPPATCH", path, propupd)
if err != nil {
return nil, err
}
return c.DoMultiStatus(req.WithContext(ctx))
}
// PropfindFlat performs a PROPFIND request with a zero depth.
func (c *Client) PropFindFlat(ctx context.Context, path string, propfind *PropFind) (*Response, error) {
ms, err := c.PropFind(ctx, path, DepthZero, propfind)
if err != nil {
return nil, err
}
// If the client followed a redirect, the Href might be different from the request path
if len(ms.Responses) != 1 {
return nil, fmt.Errorf("PROPFIND with Depth: 0 returned %d responses", len(ms.Responses))
}
return &ms.Responses[0], nil
}
func parseCommaSeparatedSet(values []string, upper bool) map[string]bool {
m := make(map[string]bool)
for _, v := range values {
fields := strings.FieldsFunc(v, func(r rune) bool {
return unicode.IsSpace(r) || r == ','
})
for _, f := range fields {
if upper {
f = strings.ToUpper(f)
} else {
f = strings.ToLower(f)
}
m[f] = true
}
}
return m
}
func (c *Client) Options(ctx context.Context, path string) (classes map[string]bool, methods map[string]bool, err error) {
req, err := c.NewRequest(http.MethodOptions, path, nil)
if err != nil {
return nil, nil, err
}
resp, err := c.Do(req.WithContext(ctx))
if err != nil {
return nil, nil, err
}
resp.Body.Close()
classes = parseCommaSeparatedSet(resp.Header["Dav"], false)
if !classes["1"] {
return nil, nil, fmt.Errorf("webdav: server doesn't support DAV class 1")
}
methods = parseCommaSeparatedSet(resp.Header["Allow"], true)
return classes, methods, nil
}
// SyncCollection perform a `sync-collection` REPORT operation on a resource
func (c *Client) SyncCollection(ctx context.Context, path, syncToken string, level Depth, limit *Limit, prop *Prop) (*MultiStatus, error) {
q := SyncCollectionQuery{
SyncToken: syncToken,
SyncLevel: level.String(),
Limit: limit,
Prop: prop,
}
req, err := c.NewXMLRequest("REPORT", path, &q)
if err != nil {
return nil, err
}
ms, err := c.DoMultiStatus(req.WithContext(ctx))
if err != nil {
return nil, err
}
return ms, nil
}

View File

@ -0,0 +1,452 @@
package internal
import (
"encoding/xml"
"errors"
"fmt"
"net/http"
"net/url"
"strconv"
"strings"
"time"
)
const Namespace = "DAV:"
var (
ResourceTypeName = xml.Name{Namespace, "resourcetype"}
DisplayNameName = xml.Name{Namespace, "displayname"}
GetContentLengthName = xml.Name{Namespace, "getcontentlength"}
GetContentTypeName = xml.Name{Namespace, "getcontenttype"}
GetLastModifiedName = xml.Name{Namespace, "getlastmodified"}
GetETagName = xml.Name{Namespace, "getetag"}
CurrentUserPrincipalName = xml.Name{Namespace, "current-user-principal"}
)
type Status struct {
Code int
Text string
}
func (s *Status) MarshalText() ([]byte, error) {
text := s.Text
if text == "" {
text = http.StatusText(s.Code)
}
return []byte(fmt.Sprintf("HTTP/1.1 %v %v", s.Code, text)), nil
}
func (s *Status) UnmarshalText(b []byte) error {
if len(b) == 0 {
return nil
}
parts := strings.SplitN(string(b), " ", 3)
if len(parts) != 3 {
return fmt.Errorf("webdav: invalid HTTP status %q: expected 3 fields", s)
}
code, err := strconv.Atoi(parts[1])
if err != nil {
return fmt.Errorf("webdav: invalid HTTP status %q: failed to parse code: %v", s, err)
}
s.Code = code
s.Text = parts[2]
return nil
}
func (s *Status) Err() error {
if s == nil {
return nil
}
// TODO: handle 2xx, 3xx
if s.Code != http.StatusOK {
return &HTTPError{Code: s.Code}
}
return nil
}
type Href url.URL
func (h *Href) String() string {
u := (*url.URL)(h)
return u.String()
}
func (h *Href) MarshalText() ([]byte, error) {
return []byte(h.String()), nil
}
func (h *Href) UnmarshalText(b []byte) error {
u, err := url.Parse(string(b))
if err != nil {
return err
}
*h = Href(*u)
return nil
}
// https://tools.ietf.org/html/rfc4918#section-14.16
type MultiStatus struct {
XMLName xml.Name `xml:"DAV: multistatus"`
Responses []Response `xml:"response"`
ResponseDescription string `xml:"responsedescription,omitempty"`
SyncToken string `xml:"sync-token,omitempty"`
}
func NewMultiStatus(resps ...Response) *MultiStatus {
return &MultiStatus{Responses: resps}
}
// https://tools.ietf.org/html/rfc4918#section-14.24
type Response struct {
XMLName xml.Name `xml:"DAV: response"`
Hrefs []Href `xml:"href"`
PropStats []PropStat `xml:"propstat,omitempty"`
ResponseDescription string `xml:"responsedescription,omitempty"`
Status *Status `xml:"status,omitempty"`
Error *Error `xml:"error,omitempty"`
Location *Location `xml:"location,omitempty"`
}
func NewOKResponse(path string) *Response {
href := Href{Path: path}
return &Response{
Hrefs: []Href{href},
Status: &Status{Code: http.StatusOK},
}
}
func NewErrorResponse(path string, err error) *Response {
code := http.StatusInternalServerError
var httpErr *HTTPError
if errors.As(err, &httpErr) {
code = httpErr.Code
}
var errElt *Error
errors.As(err, &errElt)
href := Href{Path: path}
return &Response{
Hrefs: []Href{href},
Status: &Status{Code: code},
ResponseDescription: err.Error(),
Error: errElt,
}
}
func (resp *Response) Err() error {
if resp.Status == nil || resp.Status.Code/100 == 2 {
return nil
}
var err error
if resp.Error != nil {
err = resp.Error
}
if resp.ResponseDescription != "" {
if err != nil {
err = fmt.Errorf("%v (%w)", resp.ResponseDescription, err)
} else {
err = fmt.Errorf("%v", resp.ResponseDescription)
}
}
return &HTTPError{
Code: resp.Status.Code,
Err: err,
}
}
func (resp *Response) Path() (string, error) {
err := resp.Err()
var path string
if len(resp.Hrefs) == 1 {
path = resp.Hrefs[0].Path
} else if err == nil {
err = fmt.Errorf("webdav: malformed response: expected exactly one href element, got %v", len(resp.Hrefs))
}
return path, err
}
func (resp *Response) DecodeProp(values ...interface{}) error {
for _, v := range values {
// TODO wrap errors with more context (XML name)
name, err := valueXMLName(v)
if err != nil {
return err
}
if err := resp.Err(); err != nil {
return newPropError(name, err)
}
for _, propstat := range resp.PropStats {
raw := propstat.Prop.Get(name)
if raw == nil {
continue
}
if err := propstat.Status.Err(); err != nil {
return newPropError(name, err)
}
if err := raw.Decode(v); err != nil {
return newPropError(name, err)
}
return nil
}
return newPropError(name, &HTTPError{
Code: http.StatusNotFound,
Err: fmt.Errorf("missing property"),
})
}
return nil
}
func newPropError(name xml.Name, err error) error {
return fmt.Errorf("property <%v %v>: %w", name.Space, name.Local, err)
}
func (resp *Response) EncodeProp(code int, v interface{}) error {
raw, err := EncodeRawXMLElement(v)
if err != nil {
return err
}
for i := range resp.PropStats {
propstat := &resp.PropStats[i]
if propstat.Status.Code == code {
propstat.Prop.Raw = append(propstat.Prop.Raw, *raw)
return nil
}
}
resp.PropStats = append(resp.PropStats, PropStat{
Status: Status{Code: code},
Prop: Prop{Raw: []RawXMLValue{*raw}},
})
return nil
}
// https://tools.ietf.org/html/rfc4918#section-14.9
type Location struct {
XMLName xml.Name `xml:"DAV: location"`
Href Href `xml:"href"`
}
// https://tools.ietf.org/html/rfc4918#section-14.22
type PropStat struct {
XMLName xml.Name `xml:"DAV: propstat"`
Prop Prop `xml:"prop"`
Status Status `xml:"status"`
ResponseDescription string `xml:"responsedescription,omitempty"`
Error *Error `xml:"error,omitempty"`
}
// https://tools.ietf.org/html/rfc4918#section-14.18
type Prop struct {
XMLName xml.Name `xml:"DAV: prop"`
Raw []RawXMLValue `xml:",any"`
}
func EncodeProp(values ...interface{}) (*Prop, error) {
l := make([]RawXMLValue, len(values))
for i, v := range values {
raw, err := EncodeRawXMLElement(v)
if err != nil {
return nil, err
}
l[i] = *raw
}
return &Prop{Raw: l}, nil
}
func (p *Prop) Get(name xml.Name) *RawXMLValue {
for i := range p.Raw {
raw := &p.Raw[i]
if n, ok := raw.XMLName(); ok && name == n {
return raw
}
}
return nil
}
func (p *Prop) Decode(v interface{}) error {
name, err := valueXMLName(v)
if err != nil {
return err
}
raw := p.Get(name)
if raw == nil {
return HTTPErrorf(http.StatusNotFound, "missing property %s", name)
}
return raw.Decode(v)
}
// https://tools.ietf.org/html/rfc4918#section-14.20
type PropFind struct {
XMLName xml.Name `xml:"DAV: propfind"`
Prop *Prop `xml:"prop,omitempty"`
AllProp *struct{} `xml:"allprop,omitempty"`
Include *Include `xml:"include,omitempty"`
PropName *struct{} `xml:"propname,omitempty"`
}
func xmlNamesToRaw(names []xml.Name) []RawXMLValue {
l := make([]RawXMLValue, len(names))
for i, name := range names {
l[i] = *NewRawXMLElement(name, nil, nil)
}
return l
}
func NewPropNamePropFind(names ...xml.Name) *PropFind {
return &PropFind{Prop: &Prop{Raw: xmlNamesToRaw(names)}}
}
// https://tools.ietf.org/html/rfc4918#section-14.8
type Include struct {
XMLName xml.Name `xml:"DAV: include"`
Raw []RawXMLValue `xml:",any"`
}
// https://tools.ietf.org/html/rfc4918#section-15.9
type ResourceType struct {
XMLName xml.Name `xml:"DAV: resourcetype"`
Raw []RawXMLValue `xml:",any"`
}
func NewResourceType(names ...xml.Name) *ResourceType {
return &ResourceType{Raw: xmlNamesToRaw(names)}
}
func (t *ResourceType) Is(name xml.Name) bool {
for _, raw := range t.Raw {
if n, ok := raw.XMLName(); ok && name == n {
return true
}
}
return false
}
var CollectionName = xml.Name{Namespace, "collection"}
// https://tools.ietf.org/html/rfc4918#section-15.4
type GetContentLength struct {
XMLName xml.Name `xml:"DAV: getcontentlength"`
Length int64 `xml:",chardata"`
}
// https://tools.ietf.org/html/rfc4918#section-15.5
type GetContentType struct {
XMLName xml.Name `xml:"DAV: getcontenttype"`
Type string `xml:",chardata"`
}
type Time time.Time
func (t *Time) UnmarshalText(b []byte) error {
tt, err := http.ParseTime(string(b))
if err != nil {
return err
}
*t = Time(tt)
return nil
}
func (t *Time) MarshalText() ([]byte, error) {
s := time.Time(*t).UTC().Format(http.TimeFormat)
return []byte(s), nil
}
// https://tools.ietf.org/html/rfc4918#section-15.7
type GetLastModified struct {
XMLName xml.Name `xml:"DAV: getlastmodified"`
LastModified Time `xml:",chardata"`
}
// https://tools.ietf.org/html/rfc4918#section-15.6
type GetETag struct {
XMLName xml.Name `xml:"DAV: getetag"`
ETag ETag `xml:",chardata"`
}
type ETag string
func (etag *ETag) UnmarshalText(b []byte) error {
s, err := strconv.Unquote(string(b))
if err != nil {
return fmt.Errorf("webdav: failed to unquote ETag: %v", err)
}
*etag = ETag(s)
return nil
}
func (etag ETag) MarshalText() ([]byte, error) {
return []byte(etag.String()), nil
}
func (etag ETag) String() string {
return fmt.Sprintf("%q", string(etag))
}
// https://tools.ietf.org/html/rfc4918#section-14.5
type Error struct {
XMLName xml.Name `xml:"DAV: error"`
Raw []RawXMLValue `xml:",any"`
}
func (err *Error) Error() string {
b, _ := xml.Marshal(err)
return string(b)
}
// https://tools.ietf.org/html/rfc4918#section-15.2
type DisplayName struct {
XMLName xml.Name `xml:"DAV: displayname"`
Name string `xml:",chardata"`
}
// https://tools.ietf.org/html/rfc5397#section-3
type CurrentUserPrincipal struct {
XMLName xml.Name `xml:"DAV: current-user-principal"`
Href Href `xml:"href,omitempty"`
Unauthenticated *struct{} `xml:"unauthenticated,omitempty"`
}
// https://tools.ietf.org/html/rfc4918#section-14.19
type PropertyUpdate struct {
XMLName xml.Name `xml:"DAV: propertyupdate"`
Remove []Remove `xml:"remove"`
Set []Set `xml:"set"`
}
// https://tools.ietf.org/html/rfc4918#section-14.23
type Remove struct {
XMLName xml.Name `xml:"DAV: remove"`
Prop Prop `xml:"prop"`
}
// https://tools.ietf.org/html/rfc4918#section-14.26
type Set struct {
XMLName xml.Name `xml:"DAV: set"`
Prop Prop `xml:"prop"`
}
// https://tools.ietf.org/html/rfc6578#section-6.1
type SyncCollectionQuery struct {
XMLName xml.Name `xml:"DAV: sync-collection"`
SyncToken string `xml:"sync-token"`
Limit *Limit `xml:"limit,omitempty"`
SyncLevel string `xml:"sync-level"`
Prop *Prop `xml:"prop"`
}
// https://tools.ietf.org/html/rfc5323#section-5.17
type Limit struct {
XMLName xml.Name `xml:"DAV: limit"`
NResults uint `xml:"nresults"`
}

View File

@ -0,0 +1,108 @@
package internal // Package internal provides low-level helpers for WebDAV clients and servers.
import (
"errors"
"fmt"
"net/http"
)
// Depth indicates whether a request applies to the resource's members. It's
// defined in RFC 4918 section 10.2.
type Depth int
const (
// DepthZero indicates that the request applies only to the resource.
DepthZero Depth = 0
// DepthOne indicates that the request applies to the resource and its
// internal members only.
DepthOne Depth = 1
// DepthInfinity indicates that the request applies to the resource and all
// of its members.
DepthInfinity Depth = -1
)
// ParseDepth parses a Depth header.
func ParseDepth(s string) (Depth, error) {
switch s {
case "0":
return DepthZero, nil
case "1":
return DepthOne, nil
case "infinity":
return DepthInfinity, nil
}
return 0, fmt.Errorf("webdav: invalid Depth value")
}
// String formats the depth.
func (d Depth) String() string {
switch d {
case DepthZero:
return "0"
case DepthOne:
return "1"
case DepthInfinity:
return "infinity"
}
panic("webdav: invalid Depth value")
}
// ParseOverwrite parses an Overwrite header.
func ParseOverwrite(s string) (bool, error) {
switch s {
case "T":
return true, nil
case "F":
return false, nil
}
return false, fmt.Errorf("webdav: invalid Overwrite value")
}
// FormatOverwrite formats an Overwrite header.
func FormatOverwrite(overwrite bool) string {
if overwrite {
return "T"
} else {
return "F"
}
}
type HTTPError struct {
Code int
Err error
}
func HTTPErrorFromError(err error) *HTTPError {
if err == nil {
return nil
}
if httpErr, ok := err.(*HTTPError); ok {
return httpErr
} else {
return &HTTPError{http.StatusInternalServerError, err}
}
}
func IsNotFound(err error) bool {
var httpErr *HTTPError
if errors.As(err, &httpErr) {
return httpErr.Code == http.StatusNotFound
}
return false
}
func HTTPErrorf(code int, format string, a ...interface{}) *HTTPError {
return &HTTPError{code, fmt.Errorf(format, a...)}
}
func (err *HTTPError) Error() string {
s := fmt.Sprintf("%v %v", err.Code, http.StatusText(err.Code))
if err.Err != nil {
return fmt.Sprintf("%v: %v", s, err.Err)
} else {
return s
}
}
func (err *HTTPError) Unwrap() error {
return err.Err
}

View File

@ -0,0 +1,175 @@
package internal
import (
"encoding/xml"
"fmt"
"io"
"reflect"
"strings"
)
// RawXMLValue is a raw XML value. It implements xml.Unmarshaler and
// xml.Marshaler and can be used to delay XML decoding or precompute an XML
// encoding.
type RawXMLValue struct {
tok xml.Token // guaranteed not to be xml.EndElement
children []RawXMLValue
// Unfortunately encoding/xml doesn't offer TokenWriter, so we need to
// cache outgoing data.
out interface{}
}
// NewRawXMLElement creates a new RawXMLValue for an element.
func NewRawXMLElement(name xml.Name, attr []xml.Attr, children []RawXMLValue) *RawXMLValue {
return &RawXMLValue{tok: xml.StartElement{name, attr}, children: children}
}
// EncodeRawXMLElement encodes a value into a new RawXMLValue. The XML value
// can only be used for marshalling.
func EncodeRawXMLElement(v interface{}) (*RawXMLValue, error) {
return &RawXMLValue{out: v}, nil
}
// UnmarshalXML implements xml.Unmarshaler.
func (val *RawXMLValue) UnmarshalXML(d *xml.Decoder, start xml.StartElement) error {
val.tok = start
val.children = nil
val.out = nil
for {
tok, err := d.Token()
if err != nil {
return err
}
switch tok := tok.(type) {
case xml.StartElement:
child := RawXMLValue{}
if err := child.UnmarshalXML(d, tok); err != nil {
return err
}
val.children = append(val.children, child)
case xml.EndElement:
return nil
default:
val.children = append(val.children, RawXMLValue{tok: xml.CopyToken(tok)})
}
}
}
// MarshalXML implements xml.Marshaler.
func (val *RawXMLValue) MarshalXML(e *xml.Encoder, start xml.StartElement) error {
if val.out != nil {
return e.Encode(val.out)
}
switch tok := val.tok.(type) {
case xml.StartElement:
if err := e.EncodeToken(tok); err != nil {
return err
}
for _, child := range val.children {
// TODO: find a sensible value for the start argument?
if err := child.MarshalXML(e, xml.StartElement{}); err != nil {
return err
}
}
return e.EncodeToken(tok.End())
case xml.EndElement:
panic("unexpected end element")
default:
return e.EncodeToken(tok)
}
}
var _ xml.Marshaler = (*RawXMLValue)(nil)
var _ xml.Unmarshaler = (*RawXMLValue)(nil)
func (val *RawXMLValue) Decode(v interface{}) error {
return xml.NewTokenDecoder(val.TokenReader()).Decode(&v)
}
func (val *RawXMLValue) XMLName() (name xml.Name, ok bool) {
if start, ok := val.tok.(xml.StartElement); ok {
return start.Name, true
}
return xml.Name{}, false
}
// TokenReader returns a stream of tokens for the XML value.
func (val *RawXMLValue) TokenReader() xml.TokenReader {
if val.out != nil {
panic("webdav: called RawXMLValue.TokenReader on a marshal-only XML value")
}
return &rawXMLValueReader{val: val}
}
type rawXMLValueReader struct {
val *RawXMLValue
start, end bool
child int
childReader xml.TokenReader
}
func (tr *rawXMLValueReader) Token() (xml.Token, error) {
if tr.end {
return nil, io.EOF
}
start, ok := tr.val.tok.(xml.StartElement)
if !ok {
tr.end = true
return tr.val.tok, nil
}
if !tr.start {
tr.start = true
return start, nil
}
for tr.child < len(tr.val.children) {
if tr.childReader == nil {
tr.childReader = tr.val.children[tr.child].TokenReader()
}
tok, err := tr.childReader.Token()
if err == io.EOF {
tr.childReader = nil
tr.child++
} else {
return tok, err
}
}
tr.end = true
return start.End(), nil
}
var _ xml.TokenReader = (*rawXMLValueReader)(nil)
func valueXMLName(v interface{}) (xml.Name, error) {
t := reflect.TypeOf(v)
for t.Kind() == reflect.Ptr {
t = t.Elem()
}
if t.Kind() != reflect.Struct {
return xml.Name{}, fmt.Errorf("webdav: %T is not a struct", v)
}
nameField, ok := t.FieldByName("XMLName")
if !ok {
return xml.Name{}, fmt.Errorf("webdav: %T is missing an XMLName struct field", v)
}
if nameField.Type != reflect.TypeOf(xml.Name{}) {
return xml.Name{}, fmt.Errorf("webdav: %T.XMLName isn't an xml.Name", v)
}
tag := nameField.Tag.Get("xml")
if tag == "" {
return xml.Name{}, fmt.Errorf(`webdav: %T.XMLName is missing an "xml" tag`, v)
}
name := strings.Split(tag, ",")[0]
nameParts := strings.Split(name, " ")
if len(nameParts) != 2 {
return xml.Name{}, fmt.Errorf("webdav: expected a namespace and local name in %T.XMLName's xml tag", v)
}
return xml.Name{nameParts[0], nameParts[1]}, nil
}

119
drives/davClient/model.go Normal file
View File

@ -0,0 +1,119 @@
package davClient
import (
"errors"
"fmt"
"net/http"
"time"
)
// Depth indicates whether a request applies to the resource's members. It's
// defined in RFC 4918 section 10.2.
type Depth int
const (
// DepthZero indicates that the request applies only to the resource.
DepthZero Depth = 0
// DepthOne indicates that the request applies to the resource and its
// internal members only.
DepthOne Depth = 1
// DepthInfinity indicates that the request applies to the resource and all
// of its members.
DepthInfinity Depth = -1
)
// ParseDepth parses a Depth header.
func ParseDepth(s string) (Depth, error) {
switch s {
case "0":
return DepthZero, nil
case "1":
return DepthOne, nil
case "infinity":
return DepthInfinity, nil
}
return 0, fmt.Errorf("webdav: invalid Depth value")
}
// String formats the depth.
func (d Depth) String() string {
switch d {
case DepthZero:
return "0"
case DepthOne:
return "1"
case DepthInfinity:
return "infinity"
}
panic("webdav: invalid Depth value")
}
// ParseOverwrite parses an Overwrite header.
func ParseOverwrite(s string) (bool, error) {
switch s {
case "T":
return true, nil
case "F":
return false, nil
}
return false, fmt.Errorf("webdav: invalid Overwrite value")
}
// FormatOverwrite formats an Overwrite header.
func FormatOverwrite(overwrite bool) string {
if overwrite {
return "T"
} else {
return "F"
}
}
type HTTPError struct {
Code int
Err error
}
func HTTPErrorFromError(err error) *HTTPError {
if err == nil {
return nil
}
if httpErr, ok := err.(*HTTPError); ok {
return httpErr
} else {
return &HTTPError{http.StatusInternalServerError, err}
}
}
func IsNotFound(err error) bool {
var httpErr *HTTPError
if errors.As(err, &httpErr) {
return httpErr.Code == http.StatusNotFound
}
return false
}
func HTTPErrorf(code int, format string, a ...interface{}) *HTTPError {
return &HTTPError{code, fmt.Errorf(format, a...)}
}
func (err *HTTPError) Error() string {
s := fmt.Sprintf("%v %v", err.Code, http.StatusText(err.Code))
if err.Err != nil {
return fmt.Sprintf("%v: %v", s, err.Err)
} else {
return s
}
}
func (err *HTTPError) Unwrap() error {
return err.Err
}
type FileInfo struct {
Path string
Size int64
ModTime time.Time
IsDir bool
MIMEType string
ETag string
}

855
drives/davServer/file.go Normal file
View File

@ -0,0 +1,855 @@
// Copyright 2014 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package davServer
import (
"context"
"encoding/xml"
"io"
"io/fs"
"net/http"
"os"
"path"
"path/filepath"
"runtime"
"strconv"
"strings"
"sync"
"time"
)
// slashClean is equivalent to but slightly more efficient than
// path.Clean("/" + name).
func slashClean(name string) string {
if name == "" || name[0] != '/' {
name = "/" + name
}
return path.Clean(name)
}
// A FileSystem implements access to a collection of named files. The elements
// in a file path are separated by slash ('/', U+002F) characters, regardless
// of host operating system convention.
//
// Each method has the same semantics as the os package's function of the same
// name.
//
// Note that the os.Rename documentation says that "OS-specific restrictions
// might apply". In particular, whether or not renaming a file or directory
// overwriting another existing file or directory is an error is OS-dependent.
type FileSystem interface {
Mkdir(ctx context.Context, name string, perm os.FileMode) error
OpenFile(ctx context.Context, name string, flag int, perm os.FileMode) (File, error)
RemoveAll(ctx context.Context, name string) error
Rename(ctx context.Context, oldName, newName string) error
Stat(ctx context.Context, name string) (os.FileInfo, error)
}
// A File is returned by a FileSystem's OpenFile method and can be served by a
// Handler.
//
// A File may optionally implement the DeadPropsHolder interface, if it can
// load and save dead properties.
type File interface {
http.File
io.Writer
}
type webdavFile struct {
File
name string
}
func (f *webdavFile) DeadProps() (map[xml.Name]Property, error) {
var (
xmlName xml.Name
property Property
properties = make(map[xml.Name]Property)
)
var stat fs.FileInfo
stat, err := f.Stat()
if err == nil {
xmlName.Space = "zrok:"
xmlName.Local = "lastmodified"
property.XMLName = xmlName
property.InnerXML = strconv.AppendInt(nil, stat.ModTime().Unix(), 10)
properties[xmlName] = property
}
return properties, nil
}
func (f *webdavFile) Patch(patches []Proppatch) ([]Propstat, error) {
var stat Propstat
stat.Status = http.StatusOK
for _, patch := range patches {
for _, prop := range patch.Props {
if prop.XMLName.Space == "zrok:" && prop.XMLName.Local == "lastmodified" {
modtimeUnix, err := strconv.ParseInt(string(prop.InnerXML), 10, 64)
if err != nil {
return nil, err
}
if err := f.updateModtime(f.name, time.Unix(modtimeUnix, 0)); err != nil {
return nil, err
}
}
}
}
return []Propstat{stat}, nil
}
func (f *webdavFile) updateModtime(path string, modtime time.Time) error {
if err := os.Chtimes(f.name, time.Now(), modtime); err != nil {
return err
}
return nil
}
// A Dir implements FileSystem using the native file system restricted to a
// specific directory tree.
//
// While the FileSystem.OpenFile method takes '/'-separated paths, a Dir's
// string value is a filename on the native file system, not a URL, so it is
// separated by filepath.Separator, which isn't necessarily '/'.
//
// An empty Dir is treated as ".".
type Dir string
func (d Dir) resolve(name string) string {
// This implementation is based on Dir.Open's code in the standard net/http package.
if filepath.Separator != '/' && strings.IndexRune(name, filepath.Separator) >= 0 ||
strings.Contains(name, "\x00") {
return ""
}
dir := string(d)
if dir == "" {
dir = "."
}
return filepath.Join(dir, filepath.FromSlash(slashClean(name)))
}
func (d Dir) Mkdir(ctx context.Context, name string, perm os.FileMode) error {
if name = d.resolve(name); name == "" {
return os.ErrNotExist
}
return os.Mkdir(name, perm)
}
func (d Dir) OpenFile(ctx context.Context, name string, flag int, perm os.FileMode) (File, error) {
if name = d.resolve(name); name == "" {
return nil, os.ErrNotExist
}
f, err := os.OpenFile(name, flag, perm)
if err != nil {
return nil, err
}
return &webdavFile{f, name}, nil
}
func (d Dir) RemoveAll(ctx context.Context, name string) error {
if name = d.resolve(name); name == "" {
return os.ErrNotExist
}
if name == filepath.Clean(string(d)) {
// Prohibit removing the virtual root directory.
return os.ErrInvalid
}
return os.RemoveAll(name)
}
func (d Dir) Rename(ctx context.Context, oldName, newName string) error {
if oldName = d.resolve(oldName); oldName == "" {
return os.ErrNotExist
}
if newName = d.resolve(newName); newName == "" {
return os.ErrNotExist
}
if root := filepath.Clean(string(d)); root == oldName || root == newName {
// Prohibit renaming from or to the virtual root directory.
return os.ErrInvalid
}
return os.Rename(oldName, newName)
}
func (d Dir) Stat(ctx context.Context, name string) (os.FileInfo, error) {
if name = d.resolve(name); name == "" {
return nil, os.ErrNotExist
}
return os.Stat(name)
}
// NewMemFS returns a new in-memory FileSystem implementation.
func NewMemFS() FileSystem {
return &memFS{
root: memFSNode{
children: make(map[string]*memFSNode),
mode: 0660 | os.ModeDir,
modTime: time.Now(),
},
}
}
// A memFS implements FileSystem, storing all metadata and actual file data
// in-memory. No limits on filesystem size are used, so it is not recommended
// this be used where the clients are untrusted.
//
// Concurrent access is permitted. The tree structure is protected by a mutex,
// and each node's contents and metadata are protected by a per-node mutex.
//
// TODO: Enforce file permissions.
type memFS struct {
mu sync.Mutex
root memFSNode
}
// TODO: clean up and rationalize the walk/find code.
// walk walks the directory tree for the fullname, calling f at each step. If f
// returns an error, the walk will be aborted and return that same error.
//
// dir is the directory at that step, frag is the name fragment, and final is
// whether it is the final step. For example, walking "/foo/bar/x" will result
// in 3 calls to f:
// - "/", "foo", false
// - "/foo/", "bar", false
// - "/foo/bar/", "x", true
//
// The frag argument will be empty only if dir is the root node and the walk
// ends at that root node.
func (fs *memFS) walk(op, fullname string, f func(dir *memFSNode, frag string, final bool) error) error {
original := fullname
fullname = slashClean(fullname)
// Strip any leading "/"s to make fullname a relative path, as the walk
// starts at fs.root.
if fullname[0] == '/' {
fullname = fullname[1:]
}
dir := &fs.root
for {
frag, remaining := fullname, ""
i := strings.IndexRune(fullname, '/')
final := i < 0
if !final {
frag, remaining = fullname[:i], fullname[i+1:]
}
if frag == "" && dir != &fs.root {
panic("webdav: empty path fragment for a clean path")
}
if err := f(dir, frag, final); err != nil {
return &os.PathError{
Op: op,
Path: original,
Err: err,
}
}
if final {
break
}
child := dir.children[frag]
if child == nil {
return &os.PathError{
Op: op,
Path: original,
Err: os.ErrNotExist,
}
}
if !child.mode.IsDir() {
return &os.PathError{
Op: op,
Path: original,
Err: os.ErrInvalid,
}
}
dir, fullname = child, remaining
}
return nil
}
// find returns the parent of the named node and the relative name fragment
// from the parent to the child. For example, if finding "/foo/bar/baz" then
// parent will be the node for "/foo/bar" and frag will be "baz".
//
// If the fullname names the root node, then parent, frag and err will be zero.
//
// find returns an error if the parent does not already exist or the parent
// isn't a directory, but it will not return an error per se if the child does
// not already exist. The error returned is either nil or an *os.PathError
// whose Op is op.
func (fs *memFS) find(op, fullname string) (parent *memFSNode, frag string, err error) {
err = fs.walk(op, fullname, func(parent0 *memFSNode, frag0 string, final bool) error {
if !final {
return nil
}
if frag0 != "" {
parent, frag = parent0, frag0
}
return nil
})
return parent, frag, err
}
func (fs *memFS) Mkdir(ctx context.Context, name string, perm os.FileMode) error {
fs.mu.Lock()
defer fs.mu.Unlock()
dir, frag, err := fs.find("mkdir", name)
if err != nil {
return err
}
if dir == nil {
// We can't create the root.
return os.ErrInvalid
}
if _, ok := dir.children[frag]; ok {
return os.ErrExist
}
dir.children[frag] = &memFSNode{
children: make(map[string]*memFSNode),
mode: perm.Perm() | os.ModeDir,
modTime: time.Now(),
}
return nil
}
func (fs *memFS) OpenFile(ctx context.Context, name string, flag int, perm os.FileMode) (File, error) {
fs.mu.Lock()
defer fs.mu.Unlock()
dir, frag, err := fs.find("open", name)
if err != nil {
return nil, err
}
var n *memFSNode
if dir == nil {
// We're opening the root.
if runtime.GOOS == "zos" {
if flag&os.O_WRONLY != 0 {
return nil, os.ErrPermission
}
} else {
if flag&(os.O_WRONLY|os.O_RDWR) != 0 {
return nil, os.ErrPermission
}
}
n, frag = &fs.root, "/"
} else {
n = dir.children[frag]
if flag&(os.O_SYNC|os.O_APPEND) != 0 {
// memFile doesn't support these flags yet.
return nil, os.ErrInvalid
}
if flag&os.O_CREATE != 0 {
if flag&os.O_EXCL != 0 && n != nil {
return nil, os.ErrExist
}
if n == nil {
n = &memFSNode{
mode: perm.Perm(),
}
dir.children[frag] = n
}
}
if n == nil {
return nil, os.ErrNotExist
}
if flag&(os.O_WRONLY|os.O_RDWR) != 0 && flag&os.O_TRUNC != 0 {
n.mu.Lock()
n.data = nil
n.mu.Unlock()
}
}
children := make([]os.FileInfo, 0, len(n.children))
for cName, c := range n.children {
children = append(children, c.stat(cName))
}
return &memFile{
n: n,
nameSnapshot: frag,
childrenSnapshot: children,
}, nil
}
func (fs *memFS) RemoveAll(ctx context.Context, name string) error {
fs.mu.Lock()
defer fs.mu.Unlock()
dir, frag, err := fs.find("remove", name)
if err != nil {
return err
}
if dir == nil {
// We can't remove the root.
return os.ErrInvalid
}
delete(dir.children, frag)
return nil
}
func (fs *memFS) Rename(ctx context.Context, oldName, newName string) error {
fs.mu.Lock()
defer fs.mu.Unlock()
oldName = slashClean(oldName)
newName = slashClean(newName)
if oldName == newName {
return nil
}
if strings.HasPrefix(newName, oldName+"/") {
// We can't rename oldName to be a sub-directory of itself.
return os.ErrInvalid
}
oDir, oFrag, err := fs.find("rename", oldName)
if err != nil {
return err
}
if oDir == nil {
// We can't rename from the root.
return os.ErrInvalid
}
nDir, nFrag, err := fs.find("rename", newName)
if err != nil {
return err
}
if nDir == nil {
// We can't rename to the root.
return os.ErrInvalid
}
oNode, ok := oDir.children[oFrag]
if !ok {
return os.ErrNotExist
}
if oNode.children != nil {
if nNode, ok := nDir.children[nFrag]; ok {
if nNode.children == nil {
return errNotADirectory
}
if len(nNode.children) != 0 {
return errDirectoryNotEmpty
}
}
}
delete(oDir.children, oFrag)
nDir.children[nFrag] = oNode
return nil
}
func (fs *memFS) Stat(ctx context.Context, name string) (os.FileInfo, error) {
fs.mu.Lock()
defer fs.mu.Unlock()
dir, frag, err := fs.find("stat", name)
if err != nil {
return nil, err
}
if dir == nil {
// We're stat'ting the root.
return fs.root.stat("/"), nil
}
if n, ok := dir.children[frag]; ok {
return n.stat(path.Base(name)), nil
}
return nil, os.ErrNotExist
}
// A memFSNode represents a single entry in the in-memory filesystem and also
// implements os.FileInfo.
type memFSNode struct {
// children is protected by memFS.mu.
children map[string]*memFSNode
mu sync.Mutex
data []byte
mode os.FileMode
modTime time.Time
deadProps map[xml.Name]Property
}
func (n *memFSNode) stat(name string) *memFileInfo {
n.mu.Lock()
defer n.mu.Unlock()
return &memFileInfo{
name: name,
size: int64(len(n.data)),
mode: n.mode,
modTime: n.modTime,
}
}
func (n *memFSNode) DeadProps() (map[xml.Name]Property, error) {
n.mu.Lock()
defer n.mu.Unlock()
if len(n.deadProps) == 0 {
return nil, nil
}
ret := make(map[xml.Name]Property, len(n.deadProps))
for k, v := range n.deadProps {
ret[k] = v
}
return ret, nil
}
func (n *memFSNode) Patch(patches []Proppatch) ([]Propstat, error) {
n.mu.Lock()
defer n.mu.Unlock()
pstat := Propstat{Status: http.StatusOK}
for _, patch := range patches {
for _, p := range patch.Props {
pstat.Props = append(pstat.Props, Property{XMLName: p.XMLName})
if patch.Remove {
delete(n.deadProps, p.XMLName)
continue
}
if n.deadProps == nil {
n.deadProps = map[xml.Name]Property{}
}
n.deadProps[p.XMLName] = p
}
}
return []Propstat{pstat}, nil
}
type memFileInfo struct {
name string
size int64
mode os.FileMode
modTime time.Time
}
func (f *memFileInfo) Name() string { return f.name }
func (f *memFileInfo) Size() int64 { return f.size }
func (f *memFileInfo) Mode() os.FileMode { return f.mode }
func (f *memFileInfo) ModTime() time.Time { return f.modTime }
func (f *memFileInfo) IsDir() bool { return f.mode.IsDir() }
func (f *memFileInfo) Sys() interface{} { return nil }
// A memFile is a File implementation for a memFSNode. It is a per-file (not
// per-node) read/write position, and a snapshot of the memFS' tree structure
// (a node's name and children) for that node.
type memFile struct {
n *memFSNode
nameSnapshot string
childrenSnapshot []os.FileInfo
// pos is protected by n.mu.
pos int
}
// A *memFile implements the optional DeadPropsHolder interface.
var _ DeadPropsHolder = (*memFile)(nil)
func (f *memFile) DeadProps() (map[xml.Name]Property, error) { return f.n.DeadProps() }
func (f *memFile) Patch(patches []Proppatch) ([]Propstat, error) { return f.n.Patch(patches) }
func (f *memFile) Close() error {
return nil
}
func (f *memFile) Read(p []byte) (int, error) {
f.n.mu.Lock()
defer f.n.mu.Unlock()
if f.n.mode.IsDir() {
return 0, os.ErrInvalid
}
if f.pos >= len(f.n.data) {
return 0, io.EOF
}
n := copy(p, f.n.data[f.pos:])
f.pos += n
return n, nil
}
func (f *memFile) Readdir(count int) ([]os.FileInfo, error) {
f.n.mu.Lock()
defer f.n.mu.Unlock()
if !f.n.mode.IsDir() {
return nil, os.ErrInvalid
}
old := f.pos
if old >= len(f.childrenSnapshot) {
// The os.File Readdir docs say that at the end of a directory,
// the error is io.EOF if count > 0 and nil if count <= 0.
if count > 0 {
return nil, io.EOF
}
return nil, nil
}
if count > 0 {
f.pos += count
if f.pos > len(f.childrenSnapshot) {
f.pos = len(f.childrenSnapshot)
}
} else {
f.pos = len(f.childrenSnapshot)
old = 0
}
return f.childrenSnapshot[old:f.pos], nil
}
func (f *memFile) Seek(offset int64, whence int) (int64, error) {
f.n.mu.Lock()
defer f.n.mu.Unlock()
npos := f.pos
// TODO: How to handle offsets greater than the size of system int?
switch whence {
case io.SeekStart:
npos = int(offset)
case io.SeekCurrent:
npos += int(offset)
case io.SeekEnd:
npos = len(f.n.data) + int(offset)
default:
npos = -1
}
if npos < 0 {
return 0, os.ErrInvalid
}
f.pos = npos
return int64(f.pos), nil
}
func (f *memFile) Stat() (os.FileInfo, error) {
return f.n.stat(f.nameSnapshot), nil
}
func (f *memFile) Write(p []byte) (int, error) {
lenp := len(p)
f.n.mu.Lock()
defer f.n.mu.Unlock()
if f.n.mode.IsDir() {
return 0, os.ErrInvalid
}
if f.pos < len(f.n.data) {
n := copy(f.n.data[f.pos:], p)
f.pos += n
p = p[n:]
} else if f.pos > len(f.n.data) {
// Write permits the creation of holes, if we've seek'ed past the
// existing end of file.
if f.pos <= cap(f.n.data) {
oldLen := len(f.n.data)
f.n.data = f.n.data[:f.pos]
hole := f.n.data[oldLen:]
for i := range hole {
hole[i] = 0
}
} else {
d := make([]byte, f.pos, f.pos+len(p))
copy(d, f.n.data)
f.n.data = d
}
}
if len(p) > 0 {
// We should only get here if f.pos == len(f.n.data).
f.n.data = append(f.n.data, p...)
f.pos = len(f.n.data)
}
f.n.modTime = time.Now()
return lenp, nil
}
// moveFiles moves files and/or directories from src to dst.
//
// See section 9.9.4 for when various HTTP status codes apply.
func moveFiles(ctx context.Context, fs FileSystem, src, dst string, overwrite bool) (status int, err error) {
created := false
if _, err := fs.Stat(ctx, dst); err != nil {
if !os.IsNotExist(err) {
return http.StatusForbidden, err
}
created = true
} else if overwrite {
// Section 9.9.3 says that "If a resource exists at the destination
// and the Overwrite header is "T", then prior to performing the move,
// the server must perform a DELETE with "Depth: infinity" on the
// destination resource.
if err := fs.RemoveAll(ctx, dst); err != nil {
return http.StatusForbidden, err
}
} else {
return http.StatusPreconditionFailed, os.ErrExist
}
if err := fs.Rename(ctx, src, dst); err != nil {
return http.StatusForbidden, err
}
if created {
return http.StatusCreated, nil
}
return http.StatusNoContent, nil
}
func copyProps(dst, src File) error {
d, ok := dst.(DeadPropsHolder)
if !ok {
return nil
}
s, ok := src.(DeadPropsHolder)
if !ok {
return nil
}
m, err := s.DeadProps()
if err != nil {
return err
}
props := make([]Property, 0, len(m))
for _, prop := range m {
props = append(props, prop)
}
_, err = d.Patch([]Proppatch{{Props: props}})
return err
}
// copyFiles copies files and/or directories from src to dst.
//
// See section 9.8.5 for when various HTTP status codes apply.
func copyFiles(ctx context.Context, fs FileSystem, src, dst string, overwrite bool, depth int, recursion int) (status int, err error) {
if recursion == 1000 {
return http.StatusInternalServerError, errRecursionTooDeep
}
recursion++
// TODO: section 9.8.3 says that "Note that an infinite-depth COPY of /A/
// into /A/B/ could lead to infinite recursion if not handled correctly."
srcFile, err := fs.OpenFile(ctx, src, os.O_RDONLY, 0)
if err != nil {
if os.IsNotExist(err) {
return http.StatusNotFound, err
}
return http.StatusInternalServerError, err
}
defer srcFile.Close()
srcStat, err := srcFile.Stat()
if err != nil {
if os.IsNotExist(err) {
return http.StatusNotFound, err
}
return http.StatusInternalServerError, err
}
srcPerm := srcStat.Mode() & os.ModePerm
created := false
if _, err := fs.Stat(ctx, dst); err != nil {
if os.IsNotExist(err) {
created = true
} else {
return http.StatusForbidden, err
}
} else {
if !overwrite {
return http.StatusPreconditionFailed, os.ErrExist
}
if err := fs.RemoveAll(ctx, dst); err != nil && !os.IsNotExist(err) {
return http.StatusForbidden, err
}
}
if srcStat.IsDir() {
if err := fs.Mkdir(ctx, dst, srcPerm); err != nil {
return http.StatusForbidden, err
}
if depth == infiniteDepth {
children, err := srcFile.Readdir(-1)
if err != nil {
return http.StatusForbidden, err
}
for _, c := range children {
name := c.Name()
s := path.Join(src, name)
d := path.Join(dst, name)
cStatus, cErr := copyFiles(ctx, fs, s, d, overwrite, depth, recursion)
if cErr != nil {
// TODO: MultiStatus.
return cStatus, cErr
}
}
}
} else {
dstFile, err := fs.OpenFile(ctx, dst, os.O_RDWR|os.O_CREATE|os.O_TRUNC, srcPerm)
if err != nil {
if os.IsNotExist(err) {
return http.StatusConflict, err
}
return http.StatusForbidden, err
}
_, copyErr := io.Copy(dstFile, srcFile)
propsErr := copyProps(dstFile, srcFile)
closeErr := dstFile.Close()
if copyErr != nil {
return http.StatusInternalServerError, copyErr
}
if propsErr != nil {
return http.StatusInternalServerError, propsErr
}
if closeErr != nil {
return http.StatusInternalServerError, closeErr
}
}
if created {
return http.StatusCreated, nil
}
return http.StatusNoContent, nil
}
// walkFS traverses filesystem fs starting at name up to depth levels.
//
// Allowed values for depth are 0, 1 or infiniteDepth. For each visited node,
// walkFS calls walkFn. If a visited file system node is a directory and
// walkFn returns filepath.SkipDir, walkFS will skip traversal of this node.
func walkFS(ctx context.Context, fs FileSystem, depth int, name string, info os.FileInfo, walkFn filepath.WalkFunc) error {
// This implementation is based on Walk's code in the standard path/filepath package.
err := walkFn(name, info, nil)
if err != nil {
if info.IsDir() && err == filepath.SkipDir {
return nil
}
return err
}
if !info.IsDir() || depth == 0 {
return nil
}
if depth == 1 {
depth = 0
}
// Read directory names.
f, err := fs.OpenFile(ctx, name, os.O_RDONLY, 0)
if err != nil {
return walkFn(name, info, err)
}
fileInfos, err := f.Readdir(0)
f.Close()
if err != nil {
return walkFn(name, info, err)
}
for _, fileInfo := range fileInfos {
filename := path.Join(name, fileInfo.Name())
fileInfo, err := fs.Stat(ctx, filename)
if err != nil {
if err := walkFn(filename, fileInfo, err); err != nil && err != filepath.SkipDir {
return err
}
} else {
err = walkFS(ctx, fs, depth, filename, fileInfo, walkFn)
if err != nil {
if !fileInfo.IsDir() || err != filepath.SkipDir {
return err
}
}
}
}
return nil
}

File diff suppressed because it is too large Load Diff

173
drives/davServer/if.go Normal file
View File

@ -0,0 +1,173 @@
// Copyright 2014 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package davServer
// The If header is covered by Section 10.4.
// http://www.webdav.org/specs/rfc4918.html#HEADER_If
import (
"strings"
)
// ifHeader is a disjunction (OR) of ifLists.
type ifHeader struct {
lists []ifList
}
// ifList is a conjunction (AND) of Conditions, and an optional resource tag.
type ifList struct {
resourceTag string
conditions []Condition
}
// parseIfHeader parses the "If: foo bar" HTTP header. The httpHeader string
// should omit the "If:" prefix and have any "\r\n"s collapsed to a " ", as is
// returned by req.Header.Get("If") for an http.Request req.
func parseIfHeader(httpHeader string) (h ifHeader, ok bool) {
s := strings.TrimSpace(httpHeader)
switch tokenType, _, _ := lex(s); tokenType {
case '(':
return parseNoTagLists(s)
case angleTokenType:
return parseTaggedLists(s)
default:
return ifHeader{}, false
}
}
func parseNoTagLists(s string) (h ifHeader, ok bool) {
for {
l, remaining, ok := parseList(s)
if !ok {
return ifHeader{}, false
}
h.lists = append(h.lists, l)
if remaining == "" {
return h, true
}
s = remaining
}
}
func parseTaggedLists(s string) (h ifHeader, ok bool) {
resourceTag, n := "", 0
for first := true; ; first = false {
tokenType, tokenStr, remaining := lex(s)
switch tokenType {
case angleTokenType:
if !first && n == 0 {
return ifHeader{}, false
}
resourceTag, n = tokenStr, 0
s = remaining
case '(':
n++
l, remaining, ok := parseList(s)
if !ok {
return ifHeader{}, false
}
l.resourceTag = resourceTag
h.lists = append(h.lists, l)
if remaining == "" {
return h, true
}
s = remaining
default:
return ifHeader{}, false
}
}
}
func parseList(s string) (l ifList, remaining string, ok bool) {
tokenType, _, s := lex(s)
if tokenType != '(' {
return ifList{}, "", false
}
for {
tokenType, _, remaining = lex(s)
if tokenType == ')' {
if len(l.conditions) == 0 {
return ifList{}, "", false
}
return l, remaining, true
}
c, remaining, ok := parseCondition(s)
if !ok {
return ifList{}, "", false
}
l.conditions = append(l.conditions, c)
s = remaining
}
}
func parseCondition(s string) (c Condition, remaining string, ok bool) {
tokenType, tokenStr, s := lex(s)
if tokenType == notTokenType {
c.Not = true
tokenType, tokenStr, s = lex(s)
}
switch tokenType {
case strTokenType, angleTokenType:
c.Token = tokenStr
case squareTokenType:
c.ETag = tokenStr
default:
return Condition{}, "", false
}
return c, s, true
}
// Single-rune tokens like '(' or ')' have a token type equal to their rune.
// All other tokens have a negative token type.
const (
errTokenType = rune(-1)
eofTokenType = rune(-2)
strTokenType = rune(-3)
notTokenType = rune(-4)
angleTokenType = rune(-5)
squareTokenType = rune(-6)
)
func lex(s string) (tokenType rune, tokenStr string, remaining string) {
// The net/textproto Reader that parses the HTTP header will collapse
// Linear White Space that spans multiple "\r\n" lines to a single " ",
// so we don't need to look for '\r' or '\n'.
for len(s) > 0 && (s[0] == '\t' || s[0] == ' ') {
s = s[1:]
}
if len(s) == 0 {
return eofTokenType, "", ""
}
i := 0
loop:
for ; i < len(s); i++ {
switch s[i] {
case '\t', ' ', '(', ')', '<', '>', '[', ']':
break loop
}
}
if i != 0 {
tokenStr, remaining = s[:i], s[i:]
if tokenStr == "Not" {
return notTokenType, "", remaining
}
return strTokenType, tokenStr, remaining
}
j := 0
switch s[0] {
case '<':
j, tokenType = strings.IndexByte(s, '>'), angleTokenType
case '[':
j, tokenType = strings.IndexByte(s, ']'), squareTokenType
default:
return rune(s[0]), "", s[1:]
}
if j < 0 {
return errTokenType, "", ""
}
return tokenType, s[1:j], s[j+1:]
}

322
drives/davServer/if_test.go Normal file
View File

@ -0,0 +1,322 @@
// Copyright 2014 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package davServer
import (
"reflect"
"strings"
"testing"
)
func TestParseIfHeader(t *testing.T) {
// The "section x.y.z" test cases come from section x.y.z of the spec at
// http://www.webdav.org/specs/rfc4918.html
testCases := []struct {
desc string
input string
want ifHeader
}{{
"bad: empty",
``,
ifHeader{},
}, {
"bad: no parens",
`foobar`,
ifHeader{},
}, {
"bad: empty list #1",
`()`,
ifHeader{},
}, {
"bad: empty list #2",
`(a) (b c) () (d)`,
ifHeader{},
}, {
"bad: no list after resource #1",
`<foo>`,
ifHeader{},
}, {
"bad: no list after resource #2",
`<foo> <bar> (a)`,
ifHeader{},
}, {
"bad: no list after resource #3",
`<foo> (a) (b) <bar>`,
ifHeader{},
}, {
"bad: no-tag-list followed by tagged-list",
`(a) (b) <foo> (c)`,
ifHeader{},
}, {
"bad: unfinished list",
`(a`,
ifHeader{},
}, {
"bad: unfinished ETag",
`([b`,
ifHeader{},
}, {
"bad: unfinished Notted list",
`(Not a`,
ifHeader{},
}, {
"bad: double Not",
`(Not Not a)`,
ifHeader{},
}, {
"good: one list with a Token",
`(a)`,
ifHeader{
lists: []ifList{{
conditions: []Condition{{
Token: `a`,
}},
}},
},
}, {
"good: one list with an ETag",
`([a])`,
ifHeader{
lists: []ifList{{
conditions: []Condition{{
ETag: `a`,
}},
}},
},
}, {
"good: one list with three Nots",
`(Not a Not b Not [d])`,
ifHeader{
lists: []ifList{{
conditions: []Condition{{
Not: true,
Token: `a`,
}, {
Not: true,
Token: `b`,
}, {
Not: true,
ETag: `d`,
}},
}},
},
}, {
"good: two lists",
`(a) (b)`,
ifHeader{
lists: []ifList{{
conditions: []Condition{{
Token: `a`,
}},
}, {
conditions: []Condition{{
Token: `b`,
}},
}},
},
}, {
"good: two Notted lists",
`(Not a) (Not b)`,
ifHeader{
lists: []ifList{{
conditions: []Condition{{
Not: true,
Token: `a`,
}},
}, {
conditions: []Condition{{
Not: true,
Token: `b`,
}},
}},
},
}, {
"section 7.5.1",
`<http://www.example.com/users/f/fielding/index.html>
(<urn:uuid:f81d4fae-7dec-11d0-a765-00a0c91e6bf6>)`,
ifHeader{
lists: []ifList{{
resourceTag: `http://www.example.com/users/f/fielding/index.html`,
conditions: []Condition{{
Token: `urn:uuid:f81d4fae-7dec-11d0-a765-00a0c91e6bf6`,
}},
}},
},
}, {
"section 7.5.2 #1",
`(<urn:uuid:150852e2-3847-42d5-8cbe-0f4f296f26cf>)`,
ifHeader{
lists: []ifList{{
conditions: []Condition{{
Token: `urn:uuid:150852e2-3847-42d5-8cbe-0f4f296f26cf`,
}},
}},
},
}, {
"section 7.5.2 #2",
`<http://example.com/locked/>
(<urn:uuid:150852e2-3847-42d5-8cbe-0f4f296f26cf>)`,
ifHeader{
lists: []ifList{{
resourceTag: `http://example.com/locked/`,
conditions: []Condition{{
Token: `urn:uuid:150852e2-3847-42d5-8cbe-0f4f296f26cf`,
}},
}},
},
}, {
"section 7.5.2 #3",
`<http://example.com/locked/member>
(<urn:uuid:150852e2-3847-42d5-8cbe-0f4f296f26cf>)`,
ifHeader{
lists: []ifList{{
resourceTag: `http://example.com/locked/member`,
conditions: []Condition{{
Token: `urn:uuid:150852e2-3847-42d5-8cbe-0f4f296f26cf`,
}},
}},
},
}, {
"section 9.9.6",
`(<urn:uuid:fe184f2e-6eec-41d0-c765-01adc56e6bb4>)
(<urn:uuid:e454f3f3-acdc-452a-56c7-00a5c91e4b77>)`,
ifHeader{
lists: []ifList{{
conditions: []Condition{{
Token: `urn:uuid:fe184f2e-6eec-41d0-c765-01adc56e6bb4`,
}},
}, {
conditions: []Condition{{
Token: `urn:uuid:e454f3f3-acdc-452a-56c7-00a5c91e4b77`,
}},
}},
},
}, {
"section 9.10.8",
`(<urn:uuid:e71d4fae-5dec-22d6-fea5-00a0c91e6be4>)`,
ifHeader{
lists: []ifList{{
conditions: []Condition{{
Token: `urn:uuid:e71d4fae-5dec-22d6-fea5-00a0c91e6be4`,
}},
}},
},
}, {
"section 10.4.6",
`(<urn:uuid:181d4fae-7d8c-11d0-a765-00a0c91e6bf2>
["I am an ETag"])
(["I am another ETag"])`,
ifHeader{
lists: []ifList{{
conditions: []Condition{{
Token: `urn:uuid:181d4fae-7d8c-11d0-a765-00a0c91e6bf2`,
}, {
ETag: `"I am an ETag"`,
}},
}, {
conditions: []Condition{{
ETag: `"I am another ETag"`,
}},
}},
},
}, {
"section 10.4.7",
`(Not <urn:uuid:181d4fae-7d8c-11d0-a765-00a0c91e6bf2>
<urn:uuid:58f202ac-22cf-11d1-b12d-002035b29092>)`,
ifHeader{
lists: []ifList{{
conditions: []Condition{{
Not: true,
Token: `urn:uuid:181d4fae-7d8c-11d0-a765-00a0c91e6bf2`,
}, {
Token: `urn:uuid:58f202ac-22cf-11d1-b12d-002035b29092`,
}},
}},
},
}, {
"section 10.4.8",
`(<urn:uuid:181d4fae-7d8c-11d0-a765-00a0c91e6bf2>)
(Not <DAV:no-lock>)`,
ifHeader{
lists: []ifList{{
conditions: []Condition{{
Token: `urn:uuid:181d4fae-7d8c-11d0-a765-00a0c91e6bf2`,
}},
}, {
conditions: []Condition{{
Not: true,
Token: `DAV:no-lock`,
}},
}},
},
}, {
"section 10.4.9",
`</resource1>
(<urn:uuid:181d4fae-7d8c-11d0-a765-00a0c91e6bf2>
[W/"A weak ETag"]) (["strong ETag"])`,
ifHeader{
lists: []ifList{{
resourceTag: `/resource1`,
conditions: []Condition{{
Token: `urn:uuid:181d4fae-7d8c-11d0-a765-00a0c91e6bf2`,
}, {
ETag: `W/"A weak ETag"`,
}},
}, {
resourceTag: `/resource1`,
conditions: []Condition{{
ETag: `"strong ETag"`,
}},
}},
},
}, {
"section 10.4.10",
`<http://www.example.com/specs/>
(<urn:uuid:181d4fae-7d8c-11d0-a765-00a0c91e6bf2>)`,
ifHeader{
lists: []ifList{{
resourceTag: `http://www.example.com/specs/`,
conditions: []Condition{{
Token: `urn:uuid:181d4fae-7d8c-11d0-a765-00a0c91e6bf2`,
}},
}},
},
}, {
"section 10.4.11 #1",
`</specs/rfc2518.doc> (["4217"])`,
ifHeader{
lists: []ifList{{
resourceTag: `/specs/rfc2518.doc`,
conditions: []Condition{{
ETag: `"4217"`,
}},
}},
},
}, {
"section 10.4.11 #2",
`</specs/rfc2518.doc> (Not ["4217"])`,
ifHeader{
lists: []ifList{{
resourceTag: `/specs/rfc2518.doc`,
conditions: []Condition{{
Not: true,
ETag: `"4217"`,
}},
}},
},
}}
for _, tc := range testCases {
got, ok := parseIfHeader(strings.Replace(tc.input, "\n", "", -1))
if gotEmpty := reflect.DeepEqual(got, ifHeader{}); gotEmpty == ok {
t.Errorf("%s: should be different: empty header == %t, ok == %t", tc.desc, gotEmpty, ok)
continue
}
if !reflect.DeepEqual(got, tc.want) {
t.Errorf("%s:\ngot %v\nwant %v", tc.desc, got, tc.want)
continue
}
}
}

View File

@ -0,0 +1,11 @@
This is a fork of the encoding/xml package at ca1d6c4, the last commit before
https://go.googlesource.com/go/+/c0d6d33 "encoding/xml: restore Go 1.4 name
space behavior" made late in the lead-up to the Go 1.5 release.
The list of encoding/xml changes is at
https://go.googlesource.com/go/+log/master/src/encoding/xml
This fork is temporary, and I (nigeltao) expect to revert it after Go 1.6 is
released.
See http://golang.org/issue/11841

View File

@ -0,0 +1,56 @@
// Copyright 2011 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package xml
import "time"
var atomValue = &Feed{
XMLName: Name{"http://www.w3.org/2005/Atom", "feed"},
Title: "Example Feed",
Link: []Link{{Href: "http://example.org/"}},
Updated: ParseTime("2003-12-13T18:30:02Z"),
Author: Person{Name: "John Doe"},
Id: "urn:uuid:60a76c80-d399-11d9-b93C-0003939e0af6",
Entry: []Entry{
{
Title: "Atom-Powered Robots Run Amok",
Link: []Link{{Href: "http://example.org/2003/12/13/atom03"}},
Id: "urn:uuid:1225c695-cfb8-4ebb-aaaa-80da344efa6a",
Updated: ParseTime("2003-12-13T18:30:02Z"),
Summary: NewText("Some text."),
},
},
}
var atomXml = `` +
`<feed xmlns="http://www.w3.org/2005/Atom" updated="2003-12-13T18:30:02Z">` +
`<title>Example Feed</title>` +
`<id>urn:uuid:60a76c80-d399-11d9-b93C-0003939e0af6</id>` +
`<link href="http://example.org/"></link>` +
`<author><name>John Doe</name><uri></uri><email></email></author>` +
`<entry>` +
`<title>Atom-Powered Robots Run Amok</title>` +
`<id>urn:uuid:1225c695-cfb8-4ebb-aaaa-80da344efa6a</id>` +
`<link href="http://example.org/2003/12/13/atom03"></link>` +
`<updated>2003-12-13T18:30:02Z</updated>` +
`<author><name></name><uri></uri><email></email></author>` +
`<summary>Some text.</summary>` +
`</entry>` +
`</feed>`
func ParseTime(str string) time.Time {
t, err := time.Parse(time.RFC3339, str)
if err != nil {
panic(err)
}
return t
}
func NewText(text string) Text {
return Text{
Body: text,
}
}

View File

@ -0,0 +1,151 @@
// Copyright 2012 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package xml_test
import (
"encoding/xml"
"fmt"
"os"
)
func ExampleMarshalIndent() {
type Address struct {
City, State string
}
type Person struct {
XMLName xml.Name `xml:"person"`
Id int `xml:"id,attr"`
FirstName string `xml:"name>first"`
LastName string `xml:"name>last"`
Age int `xml:"age"`
Height float32 `xml:"height,omitempty"`
Married bool
Address
Comment string `xml:",comment"`
}
v := &Person{Id: 13, FirstName: "John", LastName: "Doe", Age: 42}
v.Comment = " Need more details. "
v.Address = Address{"Hanga Roa", "Easter Island"}
output, err := xml.MarshalIndent(v, " ", " ")
if err != nil {
fmt.Printf("error: %v\n", err)
}
os.Stdout.Write(output)
// Output:
// <person id="13">
// <name>
// <first>John</first>
// <last>Doe</last>
// </name>
// <age>42</age>
// <Married>false</Married>
// <City>Hanga Roa</City>
// <State>Easter Island</State>
// <!-- Need more details. -->
// </person>
}
func ExampleEncoder() {
type Address struct {
City, State string
}
type Person struct {
XMLName xml.Name `xml:"person"`
Id int `xml:"id,attr"`
FirstName string `xml:"name>first"`
LastName string `xml:"name>last"`
Age int `xml:"age"`
Height float32 `xml:"height,omitempty"`
Married bool
Address
Comment string `xml:",comment"`
}
v := &Person{Id: 13, FirstName: "John", LastName: "Doe", Age: 42}
v.Comment = " Need more details. "
v.Address = Address{"Hanga Roa", "Easter Island"}
enc := xml.NewEncoder(os.Stdout)
enc.Indent(" ", " ")
if err := enc.Encode(v); err != nil {
fmt.Printf("error: %v\n", err)
}
// Output:
// <person id="13">
// <name>
// <first>John</first>
// <last>Doe</last>
// </name>
// <age>42</age>
// <Married>false</Married>
// <City>Hanga Roa</City>
// <State>Easter Island</State>
// <!-- Need more details. -->
// </person>
}
// This example demonstrates unmarshaling an XML excerpt into a value with
// some preset fields. Note that the Phone field isn't modified and that
// the XML <Company> element is ignored. Also, the Groups field is assigned
// considering the element path provided in its tag.
func ExampleUnmarshal() {
type Email struct {
Where string `xml:"where,attr"`
Addr string
}
type Address struct {
City, State string
}
type Result struct {
XMLName xml.Name `xml:"Person"`
Name string `xml:"FullName"`
Phone string
Email []Email
Groups []string `xml:"Group>Value"`
Address
}
v := Result{Name: "none", Phone: "none"}
data := `
<Person>
<FullName>Grace R. Emlin</FullName>
<Company>Example Inc.</Company>
<Email where="home">
<Addr>gre@example.com</Addr>
</Email>
<Email where='work'>
<Addr>gre@work.com</Addr>
</Email>
<Group>
<Value>Friends</Value>
<Value>Squash</Value>
</Group>
<City>Hanga Roa</City>
<State>Easter Island</State>
</Person>
`
err := xml.Unmarshal([]byte(data), &v)
if err != nil {
fmt.Printf("error: %v", err)
return
}
fmt.Printf("XMLName: %#v\n", v.XMLName)
fmt.Printf("Name: %q\n", v.Name)
fmt.Printf("Phone: %q\n", v.Phone)
fmt.Printf("Email: %v\n", v.Email)
fmt.Printf("Groups: %v\n", v.Groups)
fmt.Printf("Address: %v\n", v.Address)
// Output:
// XMLName: xml.Name{Space:"", Local:"Person"}
// Name: "Grace R. Emlin"
// Phone: "none"
// Email: [{home gre@example.com} {work gre@work.com}]
// Groups: [Friends Squash]
// Address: {Hanga Roa Easter Island}
}

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,691 @@
// Copyright 2009 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package xml
import (
"bytes"
"encoding"
"errors"
"fmt"
"reflect"
"strconv"
"strings"
)
// BUG(rsc): Mapping between XML elements and data structures is inherently flawed:
// an XML element is an order-dependent collection of anonymous
// values, while a data structure is an order-independent collection
// of named values.
// See package json for a textual representation more suitable
// to data structures.
// Unmarshal parses the XML-encoded data and stores the result in
// the value pointed to by v, which must be an arbitrary struct,
// slice, or string. Well-formed data that does not fit into v is
// discarded.
//
// Because Unmarshal uses the reflect package, it can only assign
// to exported (upper case) fields. Unmarshal uses a case-sensitive
// comparison to match XML element names to tag values and struct
// field names.
//
// Unmarshal maps an XML element to a struct using the following rules.
// In the rules, the tag of a field refers to the value associated with the
// key 'xml' in the struct field's tag (see the example above).
//
// - If the struct has a field of type []byte or string with tag
// ",innerxml", Unmarshal accumulates the raw XML nested inside the
// element in that field. The rest of the rules still apply.
//
// - If the struct has a field named XMLName of type xml.Name,
// Unmarshal records the element name in that field.
//
// - If the XMLName field has an associated tag of the form
// "name" or "namespace-URL name", the XML element must have
// the given name (and, optionally, name space) or else Unmarshal
// returns an error.
//
// - If the XML element has an attribute whose name matches a
// struct field name with an associated tag containing ",attr" or
// the explicit name in a struct field tag of the form "name,attr",
// Unmarshal records the attribute value in that field.
//
// - If the XML element contains character data, that data is
// accumulated in the first struct field that has tag ",chardata".
// The struct field may have type []byte or string.
// If there is no such field, the character data is discarded.
//
// - If the XML element contains comments, they are accumulated in
// the first struct field that has tag ",comment". The struct
// field may have type []byte or string. If there is no such
// field, the comments are discarded.
//
// - If the XML element contains a sub-element whose name matches
// the prefix of a tag formatted as "a" or "a>b>c", unmarshal
// will descend into the XML structure looking for elements with the
// given names, and will map the innermost elements to that struct
// field. A tag starting with ">" is equivalent to one starting
// with the field name followed by ">".
//
// - If the XML element contains a sub-element whose name matches
// a struct field's XMLName tag and the struct field has no
// explicit name tag as per the previous rule, unmarshal maps
// the sub-element to that struct field.
//
// - If the XML element contains a sub-element whose name matches a
// field without any mode flags (",attr", ",chardata", etc), Unmarshal
// maps the sub-element to that struct field.
//
// - If the XML element contains a sub-element that hasn't matched any
// of the above rules and the struct has a field with tag ",any",
// unmarshal maps the sub-element to that struct field.
//
// - An anonymous struct field is handled as if the fields of its
// value were part of the outer struct.
//
// - A struct field with tag "-" is never unmarshalled into.
//
// Unmarshal maps an XML element to a string or []byte by saving the
// concatenation of that element's character data in the string or
// []byte. The saved []byte is never nil.
//
// Unmarshal maps an attribute value to a string or []byte by saving
// the value in the string or slice.
//
// Unmarshal maps an XML element to a slice by extending the length of
// the slice and mapping the element to the newly created value.
//
// Unmarshal maps an XML element or attribute value to a bool by
// setting it to the boolean value represented by the string.
//
// Unmarshal maps an XML element or attribute value to an integer or
// floating-point field by setting the field to the result of
// interpreting the string value in decimal. There is no check for
// overflow.
//
// Unmarshal maps an XML element to an xml.Name by recording the
// element name.
//
// Unmarshal maps an XML element to a pointer by setting the pointer
// to a freshly allocated value and then mapping the element to that value.
func Unmarshal(data []byte, v interface{}) error {
return NewDecoder(bytes.NewReader(data)).Decode(v)
}
// Decode works like xml.Unmarshal, except it reads the decoder
// stream to find the start element.
func (d *Decoder) Decode(v interface{}) error {
return d.DecodeElement(v, nil)
}
// DecodeElement works like xml.Unmarshal except that it takes
// a pointer to the start XML element to decode into v.
// It is useful when a client reads some raw XML tokens itself
// but also wants to defer to Unmarshal for some elements.
func (d *Decoder) DecodeElement(v interface{}, start *StartElement) error {
val := reflect.ValueOf(v)
if val.Kind() != reflect.Ptr {
return errors.New("non-pointer passed to Unmarshal")
}
return d.unmarshal(val.Elem(), start)
}
// An UnmarshalError represents an error in the unmarshalling process.
type UnmarshalError string
func (e UnmarshalError) Error() string { return string(e) }
// Unmarshaler is the interface implemented by objects that can unmarshal
// an XML element description of themselves.
//
// UnmarshalXML decodes a single XML element
// beginning with the given start element.
// If it returns an error, the outer call to Unmarshal stops and
// returns that error.
// UnmarshalXML must consume exactly one XML element.
// One common implementation strategy is to unmarshal into
// a separate value with a layout matching the expected XML
// using d.DecodeElement, and then to copy the data from
// that value into the receiver.
// Another common strategy is to use d.Token to process the
// XML object one token at a time.
// UnmarshalXML may not use d.RawToken.
type Unmarshaler interface {
UnmarshalXML(d *Decoder, start StartElement) error
}
// UnmarshalerAttr is the interface implemented by objects that can unmarshal
// an XML attribute description of themselves.
//
// UnmarshalXMLAttr decodes a single XML attribute.
// If it returns an error, the outer call to Unmarshal stops and
// returns that error.
// UnmarshalXMLAttr is used only for struct fields with the
// "attr" option in the field tag.
type UnmarshalerAttr interface {
UnmarshalXMLAttr(attr Attr) error
}
// receiverType returns the receiver type to use in an expression like "%s.MethodName".
func receiverType(val interface{}) string {
t := reflect.TypeOf(val)
if t.Name() != "" {
return t.String()
}
return "(" + t.String() + ")"
}
// unmarshalInterface unmarshals a single XML element into val.
// start is the opening tag of the element.
func (p *Decoder) unmarshalInterface(val Unmarshaler, start *StartElement) error {
// Record that decoder must stop at end tag corresponding to start.
p.pushEOF()
p.unmarshalDepth++
err := val.UnmarshalXML(p, *start)
p.unmarshalDepth--
if err != nil {
p.popEOF()
return err
}
if !p.popEOF() {
return fmt.Errorf("xml: %s.UnmarshalXML did not consume entire <%s> element", receiverType(val), start.Name.Local)
}
return nil
}
// unmarshalTextInterface unmarshals a single XML element into val.
// The chardata contained in the element (but not its children)
// is passed to the text unmarshaler.
func (p *Decoder) unmarshalTextInterface(val encoding.TextUnmarshaler, start *StartElement) error {
var buf []byte
depth := 1
for depth > 0 {
t, err := p.Token()
if err != nil {
return err
}
switch t := t.(type) {
case CharData:
if depth == 1 {
buf = append(buf, t...)
}
case StartElement:
depth++
case EndElement:
depth--
}
}
return val.UnmarshalText(buf)
}
// unmarshalAttr unmarshals a single XML attribute into val.
func (p *Decoder) unmarshalAttr(val reflect.Value, attr Attr) error {
if val.Kind() == reflect.Ptr {
if val.IsNil() {
val.Set(reflect.New(val.Type().Elem()))
}
val = val.Elem()
}
if val.CanInterface() && val.Type().Implements(unmarshalerAttrType) {
// This is an unmarshaler with a non-pointer receiver,
// so it's likely to be incorrect, but we do what we're told.
return val.Interface().(UnmarshalerAttr).UnmarshalXMLAttr(attr)
}
if val.CanAddr() {
pv := val.Addr()
if pv.CanInterface() && pv.Type().Implements(unmarshalerAttrType) {
return pv.Interface().(UnmarshalerAttr).UnmarshalXMLAttr(attr)
}
}
// Not an UnmarshalerAttr; try encoding.TextUnmarshaler.
if val.CanInterface() && val.Type().Implements(textUnmarshalerType) {
// This is an unmarshaler with a non-pointer receiver,
// so it's likely to be incorrect, but we do what we're told.
return val.Interface().(encoding.TextUnmarshaler).UnmarshalText([]byte(attr.Value))
}
if val.CanAddr() {
pv := val.Addr()
if pv.CanInterface() && pv.Type().Implements(textUnmarshalerType) {
return pv.Interface().(encoding.TextUnmarshaler).UnmarshalText([]byte(attr.Value))
}
}
copyValue(val, []byte(attr.Value))
return nil
}
var (
unmarshalerType = reflect.TypeOf((*Unmarshaler)(nil)).Elem()
unmarshalerAttrType = reflect.TypeOf((*UnmarshalerAttr)(nil)).Elem()
textUnmarshalerType = reflect.TypeOf((*encoding.TextUnmarshaler)(nil)).Elem()
)
// Unmarshal a single XML element into val.
func (p *Decoder) unmarshal(val reflect.Value, start *StartElement) error {
// Find start element if we need it.
if start == nil {
for {
tok, err := p.Token()
if err != nil {
return err
}
if t, ok := tok.(StartElement); ok {
start = &t
break
}
}
}
// Load value from interface, but only if the result will be
// usefully addressable.
if val.Kind() == reflect.Interface && !val.IsNil() {
e := val.Elem()
if e.Kind() == reflect.Ptr && !e.IsNil() {
val = e
}
}
if val.Kind() == reflect.Ptr {
if val.IsNil() {
val.Set(reflect.New(val.Type().Elem()))
}
val = val.Elem()
}
if val.CanInterface() && val.Type().Implements(unmarshalerType) {
// This is an unmarshaler with a non-pointer receiver,
// so it's likely to be incorrect, but we do what we're told.
return p.unmarshalInterface(val.Interface().(Unmarshaler), start)
}
if val.CanAddr() {
pv := val.Addr()
if pv.CanInterface() && pv.Type().Implements(unmarshalerType) {
return p.unmarshalInterface(pv.Interface().(Unmarshaler), start)
}
}
if val.CanInterface() && val.Type().Implements(textUnmarshalerType) {
return p.unmarshalTextInterface(val.Interface().(encoding.TextUnmarshaler), start)
}
if val.CanAddr() {
pv := val.Addr()
if pv.CanInterface() && pv.Type().Implements(textUnmarshalerType) {
return p.unmarshalTextInterface(pv.Interface().(encoding.TextUnmarshaler), start)
}
}
var (
data []byte
saveData reflect.Value
comment []byte
saveComment reflect.Value
saveXML reflect.Value
saveXMLIndex int
saveXMLData []byte
saveAny reflect.Value
sv reflect.Value
tinfo *typeInfo
err error
)
switch v := val; v.Kind() {
default:
return errors.New("unknown type " + v.Type().String())
case reflect.Interface:
// TODO: For now, simply ignore the field. In the near
// future we may choose to unmarshal the start
// element on it, if not nil.
return p.Skip()
case reflect.Slice:
typ := v.Type()
if typ.Elem().Kind() == reflect.Uint8 {
// []byte
saveData = v
break
}
// Slice of element values.
// Grow slice.
n := v.Len()
if n >= v.Cap() {
ncap := 2 * n
if ncap < 4 {
ncap = 4
}
new := reflect.MakeSlice(typ, n, ncap)
reflect.Copy(new, v)
v.Set(new)
}
v.SetLen(n + 1)
// Recur to read element into slice.
if err := p.unmarshal(v.Index(n), start); err != nil {
v.SetLen(n)
return err
}
return nil
case reflect.Bool, reflect.Float32, reflect.Float64, reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr, reflect.String:
saveData = v
case reflect.Struct:
typ := v.Type()
if typ == nameType {
v.Set(reflect.ValueOf(start.Name))
break
}
sv = v
tinfo, err = getTypeInfo(typ)
if err != nil {
return err
}
// Validate and assign element name.
if tinfo.xmlname != nil {
finfo := tinfo.xmlname
if finfo.name != "" && finfo.name != start.Name.Local {
return UnmarshalError("expected element type <" + finfo.name + "> but have <" + start.Name.Local + ">")
}
if finfo.xmlns != "" && finfo.xmlns != start.Name.Space {
e := "expected element <" + finfo.name + "> in name space " + finfo.xmlns + " but have "
if start.Name.Space == "" {
e += "no name space"
} else {
e += start.Name.Space
}
return UnmarshalError(e)
}
fv := finfo.value(sv)
if _, ok := fv.Interface().(Name); ok {
fv.Set(reflect.ValueOf(start.Name))
}
}
// Assign attributes.
// Also, determine whether we need to save character data or comments.
for i := range tinfo.fields {
finfo := &tinfo.fields[i]
switch finfo.flags & fMode {
case fAttr:
strv := finfo.value(sv)
// Look for attribute.
for _, a := range start.Attr {
if a.Name.Local == finfo.name && (finfo.xmlns == "" || finfo.xmlns == a.Name.Space) {
if err := p.unmarshalAttr(strv, a); err != nil {
return err
}
break
}
}
case fCharData:
if !saveData.IsValid() {
saveData = finfo.value(sv)
}
case fComment:
if !saveComment.IsValid() {
saveComment = finfo.value(sv)
}
case fAny, fAny | fElement:
if !saveAny.IsValid() {
saveAny = finfo.value(sv)
}
case fInnerXml:
if !saveXML.IsValid() {
saveXML = finfo.value(sv)
if p.saved == nil {
saveXMLIndex = 0
p.saved = new(bytes.Buffer)
} else {
saveXMLIndex = p.savedOffset()
}
}
}
}
}
// Find end element.
// Process sub-elements along the way.
Loop:
for {
var savedOffset int
if saveXML.IsValid() {
savedOffset = p.savedOffset()
}
tok, err := p.Token()
if err != nil {
return err
}
switch t := tok.(type) {
case StartElement:
consumed := false
if sv.IsValid() {
consumed, err = p.unmarshalPath(tinfo, sv, nil, &t)
if err != nil {
return err
}
if !consumed && saveAny.IsValid() {
consumed = true
if err := p.unmarshal(saveAny, &t); err != nil {
return err
}
}
}
if !consumed {
if err := p.Skip(); err != nil {
return err
}
}
case EndElement:
if saveXML.IsValid() {
saveXMLData = p.saved.Bytes()[saveXMLIndex:savedOffset]
if saveXMLIndex == 0 {
p.saved = nil
}
}
break Loop
case CharData:
if saveData.IsValid() {
data = append(data, t...)
}
case Comment:
if saveComment.IsValid() {
comment = append(comment, t...)
}
}
}
if saveData.IsValid() && saveData.CanInterface() && saveData.Type().Implements(textUnmarshalerType) {
if err := saveData.Interface().(encoding.TextUnmarshaler).UnmarshalText(data); err != nil {
return err
}
saveData = reflect.Value{}
}
if saveData.IsValid() && saveData.CanAddr() {
pv := saveData.Addr()
if pv.CanInterface() && pv.Type().Implements(textUnmarshalerType) {
if err := pv.Interface().(encoding.TextUnmarshaler).UnmarshalText(data); err != nil {
return err
}
saveData = reflect.Value{}
}
}
if err := copyValue(saveData, data); err != nil {
return err
}
switch t := saveComment; t.Kind() {
case reflect.String:
t.SetString(string(comment))
case reflect.Slice:
t.Set(reflect.ValueOf(comment))
}
switch t := saveXML; t.Kind() {
case reflect.String:
t.SetString(string(saveXMLData))
case reflect.Slice:
t.Set(reflect.ValueOf(saveXMLData))
}
return nil
}
func copyValue(dst reflect.Value, src []byte) (err error) {
dst0 := dst
if dst.Kind() == reflect.Ptr {
if dst.IsNil() {
dst.Set(reflect.New(dst.Type().Elem()))
}
dst = dst.Elem()
}
// Save accumulated data.
switch dst.Kind() {
case reflect.Invalid:
// Probably a comment.
default:
return errors.New("cannot unmarshal into " + dst0.Type().String())
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
itmp, err := strconv.ParseInt(string(src), 10, dst.Type().Bits())
if err != nil {
return err
}
dst.SetInt(itmp)
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
utmp, err := strconv.ParseUint(string(src), 10, dst.Type().Bits())
if err != nil {
return err
}
dst.SetUint(utmp)
case reflect.Float32, reflect.Float64:
ftmp, err := strconv.ParseFloat(string(src), dst.Type().Bits())
if err != nil {
return err
}
dst.SetFloat(ftmp)
case reflect.Bool:
value, err := strconv.ParseBool(strings.TrimSpace(string(src)))
if err != nil {
return err
}
dst.SetBool(value)
case reflect.String:
dst.SetString(string(src))
case reflect.Slice:
if len(src) == 0 {
// non-nil to flag presence
src = []byte{}
}
dst.SetBytes(src)
}
return nil
}
// unmarshalPath walks down an XML structure looking for wanted
// paths, and calls unmarshal on them.
// The consumed result tells whether XML elements have been consumed
// from the Decoder until start's matching end element, or if it's
// still untouched because start is uninteresting for sv's fields.
func (p *Decoder) unmarshalPath(tinfo *typeInfo, sv reflect.Value, parents []string, start *StartElement) (consumed bool, err error) {
recurse := false
Loop:
for i := range tinfo.fields {
finfo := &tinfo.fields[i]
if finfo.flags&fElement == 0 || len(finfo.parents) < len(parents) || finfo.xmlns != "" && finfo.xmlns != start.Name.Space {
continue
}
for j := range parents {
if parents[j] != finfo.parents[j] {
continue Loop
}
}
if len(finfo.parents) == len(parents) && finfo.name == start.Name.Local {
// It's a perfect match, unmarshal the field.
return true, p.unmarshal(finfo.value(sv), start)
}
if len(finfo.parents) > len(parents) && finfo.parents[len(parents)] == start.Name.Local {
// It's a prefix for the field. Break and recurse
// since it's not ok for one field path to be itself
// the prefix for another field path.
recurse = true
// We can reuse the same slice as long as we
// don't try to append to it.
parents = finfo.parents[:len(parents)+1]
break
}
}
if !recurse {
// We have no business with this element.
return false, nil
}
// The element is not a perfect match for any field, but one
// or more fields have the path to this element as a parent
// prefix. Recurse and attempt to match these.
for {
var tok Token
tok, err = p.Token()
if err != nil {
return true, err
}
switch t := tok.(type) {
case StartElement:
consumed2, err := p.unmarshalPath(tinfo, sv, parents, &t)
if err != nil {
return true, err
}
if !consumed2 {
if err := p.Skip(); err != nil {
return true, err
}
}
case EndElement:
return true, nil
}
}
}
// Skip reads tokens until it has consumed the end element
// matching the most recent start element already consumed.
// It recurs if it encounters a start element, so it can be used to
// skip nested structures.
// It returns nil if it finds an end element matching the start
// element; otherwise it returns an error describing the problem.
func (d *Decoder) Skip() error {
for {
tok, err := d.Token()
if err != nil {
return err
}
switch tok.(type) {
case StartElement:
if err := d.Skip(); err != nil {
return err
}
case EndElement:
return nil
}
}
}

View File

@ -0,0 +1,744 @@
// Copyright 2009 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package xml
import (
"bytes"
"fmt"
"io"
"reflect"
"strings"
"testing"
"time"
)
// Stripped down Atom feed data structures.
func TestUnmarshalFeed(t *testing.T) {
var f Feed
if err := Unmarshal([]byte(atomFeedString), &f); err != nil {
t.Fatalf("Unmarshal: %s", err)
}
if !reflect.DeepEqual(f, atomFeed) {
t.Fatalf("have %#v\nwant %#v", f, atomFeed)
}
}
// hget http://codereview.appspot.com/rss/mine/rsc
const atomFeedString = `
<?xml version="1.0" encoding="utf-8"?>
<feed xmlns="http://www.w3.org/2005/Atom" xml:lang="en-us" updated="2009-10-04T01:35:58+00:00"><title>Code Review - My issues</title><link href="http://codereview.appspot.com/" rel="alternate"></link><link href="http://codereview.appspot.com/rss/mine/rsc" rel="self"></link><id>http://codereview.appspot.com/</id><author><name>rietveld&lt;&gt;</name></author><entry><title>rietveld: an attempt at pubsubhubbub
</title><link href="http://codereview.appspot.com/126085" rel="alternate"></link><updated>2009-10-04T01:35:58+00:00</updated><author><name>email-address-removed</name></author><id>urn:md5:134d9179c41f806be79b3a5f7877d19a</id><summary type="html">
An attempt at adding pubsubhubbub support to Rietveld.
http://code.google.com/p/pubsubhubbub
http://code.google.com/p/rietveld/issues/detail?id=155
The server side of the protocol is trivial:
1. add a &amp;lt;link rel=&amp;quot;hub&amp;quot; href=&amp;quot;hub-server&amp;quot;&amp;gt; tag to all
feeds that will be pubsubhubbubbed.
2. every time one of those feeds changes, tell the hub
with a simple POST request.
I have tested this by adding debug prints to a local hub
server and checking that the server got the right publish
requests.
I can&amp;#39;t quite get the server to work, but I think the bug
is not in my code. I think that the server expects to be
able to grab the feed and see the feed&amp;#39;s actual URL in
the link rel=&amp;quot;self&amp;quot;, but the default value for that drops
the :port from the URL, and I cannot for the life of me
figure out how to get the Atom generator deep inside
django not to do that, or even where it is doing that,
or even what code is running to generate the Atom feed.
(I thought I knew but I added some assert False statements
and it kept running!)
Ignoring that particular problem, I would appreciate
feedback on the right way to get the two values at
the top of feeds.py marked NOTE(rsc).
</summary></entry><entry><title>rietveld: correct tab handling
</title><link href="http://codereview.appspot.com/124106" rel="alternate"></link><updated>2009-10-03T23:02:17+00:00</updated><author><name>email-address-removed</name></author><id>urn:md5:0a2a4f19bb815101f0ba2904aed7c35a</id><summary type="html">
This fixes the buggy tab rendering that can be seen at
http://codereview.appspot.com/116075/diff/1/2
The fundamental problem was that the tab code was
not being told what column the text began in, so it
didn&amp;#39;t know where to put the tab stops. Another problem
was that some of the code assumed that string byte
offsets were the same as column offsets, which is only
true if there are no tabs.
In the process of fixing this, I cleaned up the arguments
to Fold and ExpandTabs and renamed them Break and
_ExpandTabs so that I could be sure that I found all the
call sites. I also wanted to verify that ExpandTabs was
not being used from outside intra_region_diff.py.
</summary></entry></feed> `
type Feed struct {
XMLName Name `xml:"http://www.w3.org/2005/Atom feed"`
Title string `xml:"title"`
Id string `xml:"id"`
Link []Link `xml:"link"`
Updated time.Time `xml:"updated,attr"`
Author Person `xml:"author"`
Entry []Entry `xml:"entry"`
}
type Entry struct {
Title string `xml:"title"`
Id string `xml:"id"`
Link []Link `xml:"link"`
Updated time.Time `xml:"updated"`
Author Person `xml:"author"`
Summary Text `xml:"summary"`
}
type Link struct {
Rel string `xml:"rel,attr,omitempty"`
Href string `xml:"href,attr"`
}
type Person struct {
Name string `xml:"name"`
URI string `xml:"uri"`
Email string `xml:"email"`
InnerXML string `xml:",innerxml"`
}
type Text struct {
Type string `xml:"type,attr,omitempty"`
Body string `xml:",chardata"`
}
var atomFeed = Feed{
XMLName: Name{"http://www.w3.org/2005/Atom", "feed"},
Title: "Code Review - My issues",
Link: []Link{
{Rel: "alternate", Href: "http://codereview.appspot.com/"},
{Rel: "self", Href: "http://codereview.appspot.com/rss/mine/rsc"},
},
Id: "http://codereview.appspot.com/",
Updated: ParseTime("2009-10-04T01:35:58+00:00"),
Author: Person{
Name: "rietveld<>",
InnerXML: "<name>rietveld&lt;&gt;</name>",
},
Entry: []Entry{
{
Title: "rietveld: an attempt at pubsubhubbub\n",
Link: []Link{
{Rel: "alternate", Href: "http://codereview.appspot.com/126085"},
},
Updated: ParseTime("2009-10-04T01:35:58+00:00"),
Author: Person{
Name: "email-address-removed",
InnerXML: "<name>email-address-removed</name>",
},
Id: "urn:md5:134d9179c41f806be79b3a5f7877d19a",
Summary: Text{
Type: "html",
Body: `
An attempt at adding pubsubhubbub support to Rietveld.
http://code.google.com/p/pubsubhubbub
http://code.google.com/p/rietveld/issues/detail?id=155
The server side of the protocol is trivial:
1. add a &lt;link rel=&quot;hub&quot; href=&quot;hub-server&quot;&gt; tag to all
feeds that will be pubsubhubbubbed.
2. every time one of those feeds changes, tell the hub
with a simple POST request.
I have tested this by adding debug prints to a local hub
server and checking that the server got the right publish
requests.
I can&#39;t quite get the server to work, but I think the bug
is not in my code. I think that the server expects to be
able to grab the feed and see the feed&#39;s actual URL in
the link rel=&quot;self&quot;, but the default value for that drops
the :port from the URL, and I cannot for the life of me
figure out how to get the Atom generator deep inside
django not to do that, or even where it is doing that,
or even what code is running to generate the Atom feed.
(I thought I knew but I added some assert False statements
and it kept running!)
Ignoring that particular problem, I would appreciate
feedback on the right way to get the two values at
the top of feeds.py marked NOTE(rsc).
`,
},
},
{
Title: "rietveld: correct tab handling\n",
Link: []Link{
{Rel: "alternate", Href: "http://codereview.appspot.com/124106"},
},
Updated: ParseTime("2009-10-03T23:02:17+00:00"),
Author: Person{
Name: "email-address-removed",
InnerXML: "<name>email-address-removed</name>",
},
Id: "urn:md5:0a2a4f19bb815101f0ba2904aed7c35a",
Summary: Text{
Type: "html",
Body: `
This fixes the buggy tab rendering that can be seen at
http://codereview.appspot.com/116075/diff/1/2
The fundamental problem was that the tab code was
not being told what column the text began in, so it
didn&#39;t know where to put the tab stops. Another problem
was that some of the code assumed that string byte
offsets were the same as column offsets, which is only
true if there are no tabs.
In the process of fixing this, I cleaned up the arguments
to Fold and ExpandTabs and renamed them Break and
_ExpandTabs so that I could be sure that I found all the
call sites. I also wanted to verify that ExpandTabs was
not being used from outside intra_region_diff.py.
`,
},
},
},
}
const pathTestString = `
<Result>
<Before>1</Before>
<Items>
<Item1>
<Value>A</Value>
</Item1>
<Item2>
<Value>B</Value>
</Item2>
<Item1>
<Value>C</Value>
<Value>D</Value>
</Item1>
<_>
<Value>E</Value>
</_>
</Items>
<After>2</After>
</Result>
`
type PathTestItem struct {
Value string
}
type PathTestA struct {
Items []PathTestItem `xml:">Item1"`
Before, After string
}
type PathTestB struct {
Other []PathTestItem `xml:"Items>Item1"`
Before, After string
}
type PathTestC struct {
Values1 []string `xml:"Items>Item1>Value"`
Values2 []string `xml:"Items>Item2>Value"`
Before, After string
}
type PathTestSet struct {
Item1 []PathTestItem
}
type PathTestD struct {
Other PathTestSet `xml:"Items"`
Before, After string
}
type PathTestE struct {
Underline string `xml:"Items>_>Value"`
Before, After string
}
var pathTests = []interface{}{
&PathTestA{Items: []PathTestItem{{"A"}, {"D"}}, Before: "1", After: "2"},
&PathTestB{Other: []PathTestItem{{"A"}, {"D"}}, Before: "1", After: "2"},
&PathTestC{Values1: []string{"A", "C", "D"}, Values2: []string{"B"}, Before: "1", After: "2"},
&PathTestD{Other: PathTestSet{Item1: []PathTestItem{{"A"}, {"D"}}}, Before: "1", After: "2"},
&PathTestE{Underline: "E", Before: "1", After: "2"},
}
func TestUnmarshalPaths(t *testing.T) {
for _, pt := range pathTests {
v := reflect.New(reflect.TypeOf(pt).Elem()).Interface()
if err := Unmarshal([]byte(pathTestString), v); err != nil {
t.Fatalf("Unmarshal: %s", err)
}
if !reflect.DeepEqual(v, pt) {
t.Fatalf("have %#v\nwant %#v", v, pt)
}
}
}
type BadPathTestA struct {
First string `xml:"items>item1"`
Other string `xml:"items>item2"`
Second string `xml:"items"`
}
type BadPathTestB struct {
Other string `xml:"items>item2>value"`
First string `xml:"items>item1"`
Second string `xml:"items>item1>value"`
}
type BadPathTestC struct {
First string
Second string `xml:"First"`
}
type BadPathTestD struct {
BadPathEmbeddedA
BadPathEmbeddedB
}
type BadPathEmbeddedA struct {
First string
}
type BadPathEmbeddedB struct {
Second string `xml:"First"`
}
var badPathTests = []struct {
v, e interface{}
}{
{&BadPathTestA{}, &TagPathError{reflect.TypeOf(BadPathTestA{}), "First", "items>item1", "Second", "items"}},
{&BadPathTestB{}, &TagPathError{reflect.TypeOf(BadPathTestB{}), "First", "items>item1", "Second", "items>item1>value"}},
{&BadPathTestC{}, &TagPathError{reflect.TypeOf(BadPathTestC{}), "First", "", "Second", "First"}},
{&BadPathTestD{}, &TagPathError{reflect.TypeOf(BadPathTestD{}), "First", "", "Second", "First"}},
}
func TestUnmarshalBadPaths(t *testing.T) {
for _, tt := range badPathTests {
err := Unmarshal([]byte(pathTestString), tt.v)
if !reflect.DeepEqual(err, tt.e) {
t.Fatalf("Unmarshal with %#v didn't fail properly:\nhave %#v,\nwant %#v", tt.v, err, tt.e)
}
}
}
const OK = "OK"
const withoutNameTypeData = `
<?xml version="1.0" charset="utf-8"?>
<Test3 Attr="OK" />`
type TestThree struct {
XMLName Name `xml:"Test3"`
Attr string `xml:",attr"`
}
func TestUnmarshalWithoutNameType(t *testing.T) {
var x TestThree
if err := Unmarshal([]byte(withoutNameTypeData), &x); err != nil {
t.Fatalf("Unmarshal: %s", err)
}
if x.Attr != OK {
t.Fatalf("have %v\nwant %v", x.Attr, OK)
}
}
func TestUnmarshalAttr(t *testing.T) {
type ParamVal struct {
Int int `xml:"int,attr"`
}
type ParamPtr struct {
Int *int `xml:"int,attr"`
}
type ParamStringPtr struct {
Int *string `xml:"int,attr"`
}
x := []byte(`<Param int="1" />`)
p1 := &ParamPtr{}
if err := Unmarshal(x, p1); err != nil {
t.Fatalf("Unmarshal: %s", err)
}
if p1.Int == nil {
t.Fatalf("Unmarshal failed in to *int field")
} else if *p1.Int != 1 {
t.Fatalf("Unmarshal with %s failed:\nhave %#v,\n want %#v", x, p1.Int, 1)
}
p2 := &ParamVal{}
if err := Unmarshal(x, p2); err != nil {
t.Fatalf("Unmarshal: %s", err)
}
if p2.Int != 1 {
t.Fatalf("Unmarshal with %s failed:\nhave %#v,\n want %#v", x, p2.Int, 1)
}
p3 := &ParamStringPtr{}
if err := Unmarshal(x, p3); err != nil {
t.Fatalf("Unmarshal: %s", err)
}
if p3.Int == nil {
t.Fatalf("Unmarshal failed in to *string field")
} else if *p3.Int != "1" {
t.Fatalf("Unmarshal with %s failed:\nhave %#v,\n want %#v", x, p3.Int, 1)
}
}
type Tables struct {
HTable string `xml:"http://www.w3.org/TR/html4/ table"`
FTable string `xml:"http://www.w3schools.com/furniture table"`
}
var tables = []struct {
xml string
tab Tables
ns string
}{
{
xml: `<Tables>` +
`<table xmlns="http://www.w3.org/TR/html4/">hello</table>` +
`<table xmlns="http://www.w3schools.com/furniture">world</table>` +
`</Tables>`,
tab: Tables{"hello", "world"},
},
{
xml: `<Tables>` +
`<table xmlns="http://www.w3schools.com/furniture">world</table>` +
`<table xmlns="http://www.w3.org/TR/html4/">hello</table>` +
`</Tables>`,
tab: Tables{"hello", "world"},
},
{
xml: `<Tables xmlns:f="http://www.w3schools.com/furniture" xmlns:h="http://www.w3.org/TR/html4/">` +
`<f:table>world</f:table>` +
`<h:table>hello</h:table>` +
`</Tables>`,
tab: Tables{"hello", "world"},
},
{
xml: `<Tables>` +
`<table>bogus</table>` +
`</Tables>`,
tab: Tables{},
},
{
xml: `<Tables>` +
`<table>only</table>` +
`</Tables>`,
tab: Tables{HTable: "only"},
ns: "http://www.w3.org/TR/html4/",
},
{
xml: `<Tables>` +
`<table>only</table>` +
`</Tables>`,
tab: Tables{FTable: "only"},
ns: "http://www.w3schools.com/furniture",
},
{
xml: `<Tables>` +
`<table>only</table>` +
`</Tables>`,
tab: Tables{},
ns: "something else entirely",
},
}
func TestUnmarshalNS(t *testing.T) {
for i, tt := range tables {
var dst Tables
var err error
if tt.ns != "" {
d := NewDecoder(strings.NewReader(tt.xml))
d.DefaultSpace = tt.ns
err = d.Decode(&dst)
} else {
err = Unmarshal([]byte(tt.xml), &dst)
}
if err != nil {
t.Errorf("#%d: Unmarshal: %v", i, err)
continue
}
want := tt.tab
if dst != want {
t.Errorf("#%d: dst=%+v, want %+v", i, dst, want)
}
}
}
func TestRoundTrip(t *testing.T) {
// From issue 7535
const s = `<ex:element xmlns:ex="http://example.com/schema"></ex:element>`
in := bytes.NewBufferString(s)
for i := 0; i < 10; i++ {
out := &bytes.Buffer{}
d := NewDecoder(in)
e := NewEncoder(out)
for {
t, err := d.Token()
if err == io.EOF {
break
}
if err != nil {
fmt.Println("failed:", err)
return
}
e.EncodeToken(t)
}
e.Flush()
in = out
}
if got := in.String(); got != s {
t.Errorf("have: %q\nwant: %q\n", got, s)
}
}
func TestMarshalNS(t *testing.T) {
dst := Tables{"hello", "world"}
data, err := Marshal(&dst)
if err != nil {
t.Fatalf("Marshal: %v", err)
}
want := `<Tables><table xmlns="http://www.w3.org/TR/html4/">hello</table><table xmlns="http://www.w3schools.com/furniture">world</table></Tables>`
str := string(data)
if str != want {
t.Errorf("have: %q\nwant: %q\n", str, want)
}
}
type TableAttrs struct {
TAttr TAttr
}
type TAttr struct {
HTable string `xml:"http://www.w3.org/TR/html4/ table,attr"`
FTable string `xml:"http://www.w3schools.com/furniture table,attr"`
Lang string `xml:"http://www.w3.org/XML/1998/namespace lang,attr,omitempty"`
Other1 string `xml:"http://golang.org/xml/ other,attr,omitempty"`
Other2 string `xml:"http://golang.org/xmlfoo/ other,attr,omitempty"`
Other3 string `xml:"http://golang.org/json/ other,attr,omitempty"`
Other4 string `xml:"http://golang.org/2/json/ other,attr,omitempty"`
}
var tableAttrs = []struct {
xml string
tab TableAttrs
ns string
}{
{
xml: `<TableAttrs xmlns:f="http://www.w3schools.com/furniture" xmlns:h="http://www.w3.org/TR/html4/"><TAttr ` +
`h:table="hello" f:table="world" ` +
`/></TableAttrs>`,
tab: TableAttrs{TAttr{HTable: "hello", FTable: "world"}},
},
{
xml: `<TableAttrs><TAttr xmlns:f="http://www.w3schools.com/furniture" xmlns:h="http://www.w3.org/TR/html4/" ` +
`h:table="hello" f:table="world" ` +
`/></TableAttrs>`,
tab: TableAttrs{TAttr{HTable: "hello", FTable: "world"}},
},
{
xml: `<TableAttrs><TAttr ` +
`h:table="hello" f:table="world" xmlns:f="http://www.w3schools.com/furniture" xmlns:h="http://www.w3.org/TR/html4/" ` +
`/></TableAttrs>`,
tab: TableAttrs{TAttr{HTable: "hello", FTable: "world"}},
},
{
// Default space does not apply to attribute names.
xml: `<TableAttrs xmlns="http://www.w3schools.com/furniture" xmlns:h="http://www.w3.org/TR/html4/"><TAttr ` +
`h:table="hello" table="world" ` +
`/></TableAttrs>`,
tab: TableAttrs{TAttr{HTable: "hello", FTable: ""}},
},
{
// Default space does not apply to attribute names.
xml: `<TableAttrs xmlns:f="http://www.w3schools.com/furniture"><TAttr xmlns="http://www.w3.org/TR/html4/" ` +
`table="hello" f:table="world" ` +
`/></TableAttrs>`,
tab: TableAttrs{TAttr{HTable: "", FTable: "world"}},
},
{
xml: `<TableAttrs><TAttr ` +
`table="bogus" ` +
`/></TableAttrs>`,
tab: TableAttrs{},
},
{
// Default space does not apply to attribute names.
xml: `<TableAttrs xmlns:h="http://www.w3.org/TR/html4/"><TAttr ` +
`h:table="hello" table="world" ` +
`/></TableAttrs>`,
tab: TableAttrs{TAttr{HTable: "hello", FTable: ""}},
ns: "http://www.w3schools.com/furniture",
},
{
// Default space does not apply to attribute names.
xml: `<TableAttrs xmlns:f="http://www.w3schools.com/furniture"><TAttr ` +
`table="hello" f:table="world" ` +
`/></TableAttrs>`,
tab: TableAttrs{TAttr{HTable: "", FTable: "world"}},
ns: "http://www.w3.org/TR/html4/",
},
{
xml: `<TableAttrs><TAttr ` +
`table="bogus" ` +
`/></TableAttrs>`,
tab: TableAttrs{},
ns: "something else entirely",
},
}
func TestUnmarshalNSAttr(t *testing.T) {
for i, tt := range tableAttrs {
var dst TableAttrs
var err error
if tt.ns != "" {
d := NewDecoder(strings.NewReader(tt.xml))
d.DefaultSpace = tt.ns
err = d.Decode(&dst)
} else {
err = Unmarshal([]byte(tt.xml), &dst)
}
if err != nil {
t.Errorf("#%d: Unmarshal: %v", i, err)
continue
}
want := tt.tab
if dst != want {
t.Errorf("#%d: dst=%+v, want %+v", i, dst, want)
}
}
}
func TestMarshalNSAttr(t *testing.T) {
src := TableAttrs{TAttr{"hello", "world", "en_US", "other1", "other2", "other3", "other4"}}
data, err := Marshal(&src)
if err != nil {
t.Fatalf("Marshal: %v", err)
}
want := `<TableAttrs><TAttr xmlns:json_1="http://golang.org/2/json/" xmlns:json="http://golang.org/json/" xmlns:_xmlfoo="http://golang.org/xmlfoo/" xmlns:_xml="http://golang.org/xml/" xmlns:furniture="http://www.w3schools.com/furniture" xmlns:html4="http://www.w3.org/TR/html4/" html4:table="hello" furniture:table="world" xml:lang="en_US" _xml:other="other1" _xmlfoo:other="other2" json:other="other3" json_1:other="other4"></TAttr></TableAttrs>`
str := string(data)
if str != want {
t.Errorf("Marshal:\nhave: %#q\nwant: %#q\n", str, want)
}
var dst TableAttrs
if err := Unmarshal(data, &dst); err != nil {
t.Errorf("Unmarshal: %v", err)
}
if dst != src {
t.Errorf("Unmarshal = %q, want %q", dst, src)
}
}
type MyCharData struct {
body string
}
func (m *MyCharData) UnmarshalXML(d *Decoder, start StartElement) error {
for {
t, err := d.Token()
if err == io.EOF { // found end of element
break
}
if err != nil {
return err
}
if char, ok := t.(CharData); ok {
m.body += string(char)
}
}
return nil
}
var _ Unmarshaler = (*MyCharData)(nil)
func (m *MyCharData) UnmarshalXMLAttr(attr Attr) error {
panic("must not call")
}
type MyAttr struct {
attr string
}
func (m *MyAttr) UnmarshalXMLAttr(attr Attr) error {
m.attr = attr.Value
return nil
}
var _ UnmarshalerAttr = (*MyAttr)(nil)
type MyStruct struct {
Data *MyCharData
Attr *MyAttr `xml:",attr"`
Data2 MyCharData
Attr2 MyAttr `xml:",attr"`
}
func TestUnmarshaler(t *testing.T) {
xml := `<?xml version="1.0" encoding="utf-8"?>
<MyStruct Attr="attr1" Attr2="attr2">
<Data>hello <!-- comment -->world</Data>
<Data2>howdy <!-- comment -->world</Data2>
</MyStruct>
`
var m MyStruct
if err := Unmarshal([]byte(xml), &m); err != nil {
t.Fatal(err)
}
if m.Data == nil || m.Attr == nil || m.Data.body != "hello world" || m.Attr.attr != "attr1" || m.Data2.body != "howdy world" || m.Attr2.attr != "attr2" {
t.Errorf("m=%#+v\n", m)
}
}
type Pea struct {
Cotelydon string
}
type Pod struct {
Pea interface{} `xml:"Pea"`
}
// https://golang.org/issue/6836
func TestUnmarshalIntoInterface(t *testing.T) {
pod := new(Pod)
pod.Pea = new(Pea)
xml := `<Pod><Pea><Cotelydon>Green stuff</Cotelydon></Pea></Pod>`
err := Unmarshal([]byte(xml), pod)
if err != nil {
t.Fatalf("failed to unmarshal %q: %v", xml, err)
}
pea, ok := pod.Pea.(*Pea)
if !ok {
t.Fatalf("unmarshalled into wrong type: have %T want *Pea", pod.Pea)
}
have, want := pea.Cotelydon, "Green stuff"
if have != want {
t.Errorf("failed to unmarshal into interface, have %q want %q", have, want)
}
}

View File

@ -0,0 +1,371 @@
// Copyright 2011 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package xml
import (
"fmt"
"reflect"
"strings"
"sync"
)
// typeInfo holds details for the xml representation of a type.
type typeInfo struct {
xmlname *fieldInfo
fields []fieldInfo
}
// fieldInfo holds details for the xml representation of a single field.
type fieldInfo struct {
idx []int
name string
xmlns string
flags fieldFlags
parents []string
}
type fieldFlags int
const (
fElement fieldFlags = 1 << iota
fAttr
fCharData
fInnerXml
fComment
fAny
fOmitEmpty
fMode = fElement | fAttr | fCharData | fInnerXml | fComment | fAny
)
var tinfoMap = make(map[reflect.Type]*typeInfo)
var tinfoLock sync.RWMutex
var nameType = reflect.TypeOf(Name{})
// getTypeInfo returns the typeInfo structure with details necessary
// for marshalling and unmarshalling typ.
func getTypeInfo(typ reflect.Type) (*typeInfo, error) {
tinfoLock.RLock()
tinfo, ok := tinfoMap[typ]
tinfoLock.RUnlock()
if ok {
return tinfo, nil
}
tinfo = &typeInfo{}
if typ.Kind() == reflect.Struct && typ != nameType {
n := typ.NumField()
for i := 0; i < n; i++ {
f := typ.Field(i)
if f.PkgPath != "" || f.Tag.Get("xml") == "-" {
continue // Private field
}
// For embedded structs, embed its fields.
if f.Anonymous {
t := f.Type
if t.Kind() == reflect.Ptr {
t = t.Elem()
}
if t.Kind() == reflect.Struct {
inner, err := getTypeInfo(t)
if err != nil {
return nil, err
}
if tinfo.xmlname == nil {
tinfo.xmlname = inner.xmlname
}
for _, finfo := range inner.fields {
finfo.idx = append([]int{i}, finfo.idx...)
if err := addFieldInfo(typ, tinfo, &finfo); err != nil {
return nil, err
}
}
continue
}
}
finfo, err := structFieldInfo(typ, &f)
if err != nil {
return nil, err
}
if f.Name == "XMLName" {
tinfo.xmlname = finfo
continue
}
// Add the field if it doesn't conflict with other fields.
if err := addFieldInfo(typ, tinfo, finfo); err != nil {
return nil, err
}
}
}
tinfoLock.Lock()
tinfoMap[typ] = tinfo
tinfoLock.Unlock()
return tinfo, nil
}
// structFieldInfo builds and returns a fieldInfo for f.
func structFieldInfo(typ reflect.Type, f *reflect.StructField) (*fieldInfo, error) {
finfo := &fieldInfo{idx: f.Index}
// Split the tag from the xml namespace if necessary.
tag := f.Tag.Get("xml")
if i := strings.Index(tag, " "); i >= 0 {
finfo.xmlns, tag = tag[:i], tag[i+1:]
}
// Parse flags.
tokens := strings.Split(tag, ",")
if len(tokens) == 1 {
finfo.flags = fElement
} else {
tag = tokens[0]
for _, flag := range tokens[1:] {
switch flag {
case "attr":
finfo.flags |= fAttr
case "chardata":
finfo.flags |= fCharData
case "innerxml":
finfo.flags |= fInnerXml
case "comment":
finfo.flags |= fComment
case "any":
finfo.flags |= fAny
case "omitempty":
finfo.flags |= fOmitEmpty
}
}
// Validate the flags used.
valid := true
switch mode := finfo.flags & fMode; mode {
case 0:
finfo.flags |= fElement
case fAttr, fCharData, fInnerXml, fComment, fAny:
if f.Name == "XMLName" || tag != "" && mode != fAttr {
valid = false
}
default:
// This will also catch multiple modes in a single field.
valid = false
}
if finfo.flags&fMode == fAny {
finfo.flags |= fElement
}
if finfo.flags&fOmitEmpty != 0 && finfo.flags&(fElement|fAttr) == 0 {
valid = false
}
if !valid {
return nil, fmt.Errorf("xml: invalid tag in field %s of type %s: %q",
f.Name, typ, f.Tag.Get("xml"))
}
}
// Use of xmlns without a name is not allowed.
if finfo.xmlns != "" && tag == "" {
return nil, fmt.Errorf("xml: namespace without name in field %s of type %s: %q",
f.Name, typ, f.Tag.Get("xml"))
}
if f.Name == "XMLName" {
// The XMLName field records the XML element name. Don't
// process it as usual because its name should default to
// empty rather than to the field name.
finfo.name = tag
return finfo, nil
}
if tag == "" {
// If the name part of the tag is completely empty, get
// default from XMLName of underlying struct if feasible,
// or field name otherwise.
if xmlname := lookupXMLName(f.Type); xmlname != nil {
finfo.xmlns, finfo.name = xmlname.xmlns, xmlname.name
} else {
finfo.name = f.Name
}
return finfo, nil
}
if finfo.xmlns == "" && finfo.flags&fAttr == 0 {
// If it's an element no namespace specified, get the default
// from the XMLName of enclosing struct if possible.
if xmlname := lookupXMLName(typ); xmlname != nil {
finfo.xmlns = xmlname.xmlns
}
}
// Prepare field name and parents.
parents := strings.Split(tag, ">")
if parents[0] == "" {
parents[0] = f.Name
}
if parents[len(parents)-1] == "" {
return nil, fmt.Errorf("xml: trailing '>' in field %s of type %s", f.Name, typ)
}
finfo.name = parents[len(parents)-1]
if len(parents) > 1 {
if (finfo.flags & fElement) == 0 {
return nil, fmt.Errorf("xml: %s chain not valid with %s flag", tag, strings.Join(tokens[1:], ","))
}
finfo.parents = parents[:len(parents)-1]
}
// If the field type has an XMLName field, the names must match
// so that the behavior of both marshalling and unmarshalling
// is straightforward and unambiguous.
if finfo.flags&fElement != 0 {
ftyp := f.Type
xmlname := lookupXMLName(ftyp)
if xmlname != nil && xmlname.name != finfo.name {
return nil, fmt.Errorf("xml: name %q in tag of %s.%s conflicts with name %q in %s.XMLName",
finfo.name, typ, f.Name, xmlname.name, ftyp)
}
}
return finfo, nil
}
// lookupXMLName returns the fieldInfo for typ's XMLName field
// in case it exists and has a valid xml field tag, otherwise
// it returns nil.
func lookupXMLName(typ reflect.Type) (xmlname *fieldInfo) {
for typ.Kind() == reflect.Ptr {
typ = typ.Elem()
}
if typ.Kind() != reflect.Struct {
return nil
}
for i, n := 0, typ.NumField(); i < n; i++ {
f := typ.Field(i)
if f.Name != "XMLName" {
continue
}
finfo, err := structFieldInfo(typ, &f)
if finfo.name != "" && err == nil {
return finfo
}
// Also consider errors as a non-existent field tag
// and let getTypeInfo itself report the error.
break
}
return nil
}
func min(a, b int) int {
if a <= b {
return a
}
return b
}
// addFieldInfo adds finfo to tinfo.fields if there are no
// conflicts, or if conflicts arise from previous fields that were
// obtained from deeper embedded structures than finfo. In the latter
// case, the conflicting entries are dropped.
// A conflict occurs when the path (parent + name) to a field is
// itself a prefix of another path, or when two paths match exactly.
// It is okay for field paths to share a common, shorter prefix.
func addFieldInfo(typ reflect.Type, tinfo *typeInfo, newf *fieldInfo) error {
var conflicts []int
Loop:
// First, figure all conflicts. Most working code will have none.
for i := range tinfo.fields {
oldf := &tinfo.fields[i]
if oldf.flags&fMode != newf.flags&fMode {
continue
}
if oldf.xmlns != "" && newf.xmlns != "" && oldf.xmlns != newf.xmlns {
continue
}
minl := min(len(newf.parents), len(oldf.parents))
for p := 0; p < minl; p++ {
if oldf.parents[p] != newf.parents[p] {
continue Loop
}
}
if len(oldf.parents) > len(newf.parents) {
if oldf.parents[len(newf.parents)] == newf.name {
conflicts = append(conflicts, i)
}
} else if len(oldf.parents) < len(newf.parents) {
if newf.parents[len(oldf.parents)] == oldf.name {
conflicts = append(conflicts, i)
}
} else {
if newf.name == oldf.name {
conflicts = append(conflicts, i)
}
}
}
// Without conflicts, add the new field and return.
if conflicts == nil {
tinfo.fields = append(tinfo.fields, *newf)
return nil
}
// If any conflict is shallower, ignore the new field.
// This matches the Go field resolution on embedding.
for _, i := range conflicts {
if len(tinfo.fields[i].idx) < len(newf.idx) {
return nil
}
}
// Otherwise, if any of them is at the same depth level, it's an error.
for _, i := range conflicts {
oldf := &tinfo.fields[i]
if len(oldf.idx) == len(newf.idx) {
f1 := typ.FieldByIndex(oldf.idx)
f2 := typ.FieldByIndex(newf.idx)
return &TagPathError{typ, f1.Name, f1.Tag.Get("xml"), f2.Name, f2.Tag.Get("xml")}
}
}
// Otherwise, the new field is shallower, and thus takes precedence,
// so drop the conflicting fields from tinfo and append the new one.
for c := len(conflicts) - 1; c >= 0; c-- {
i := conflicts[c]
copy(tinfo.fields[i:], tinfo.fields[i+1:])
tinfo.fields = tinfo.fields[:len(tinfo.fields)-1]
}
tinfo.fields = append(tinfo.fields, *newf)
return nil
}
// A TagPathError represents an error in the unmarshalling process
// caused by the use of field tags with conflicting paths.
type TagPathError struct {
Struct reflect.Type
Field1, Tag1 string
Field2, Tag2 string
}
func (e *TagPathError) Error() string {
return fmt.Sprintf("%s field %q with tag %q conflicts with field %q with tag %q", e.Struct, e.Field1, e.Tag1, e.Field2, e.Tag2)
}
// value returns v's field value corresponding to finfo.
// It's equivalent to v.FieldByIndex(finfo.idx), but initializes
// and dereferences pointers as necessary.
func (finfo *fieldInfo) value(v reflect.Value) reflect.Value {
for i, x := range finfo.idx {
if i > 0 {
t := v.Type()
if t.Kind() == reflect.Ptr && t.Elem().Kind() == reflect.Struct {
if v.IsNil() {
v.Set(reflect.New(v.Type().Elem()))
}
v = v.Elem()
}
}
v = v.Field(x)
}
return v
}

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,752 @@
// Copyright 2009 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package xml
import (
"bytes"
"fmt"
"io"
"reflect"
"strings"
"testing"
"unicode/utf8"
)
const testInput = `
<?xml version="1.0" encoding="UTF-8"?>
<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN"
"http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd">
<body xmlns:foo="ns1" xmlns="ns2" xmlns:tag="ns3" ` +
"\r\n\t" + ` >
<hello lang="en">World &lt;&gt;&apos;&quot; &#x767d;&#40300;</hello>
<query>&; &is-it;</query>
<goodbye />
<outer foo:attr="value" xmlns:tag="ns4">
<inner/>
</outer>
<tag:name>
<![CDATA[Some text here.]]>
</tag:name>
</body><!-- missing final newline -->`
var testEntity = map[string]string{"何": "What", "is-it": "is it?"}
var rawTokens = []Token{
CharData("\n"),
ProcInst{"xml", []byte(`version="1.0" encoding="UTF-8"`)},
CharData("\n"),
Directive(`DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN"
"http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd"`),
CharData("\n"),
StartElement{Name{"", "body"}, []Attr{{Name{"xmlns", "foo"}, "ns1"}, {Name{"", "xmlns"}, "ns2"}, {Name{"xmlns", "tag"}, "ns3"}}},
CharData("\n "),
StartElement{Name{"", "hello"}, []Attr{{Name{"", "lang"}, "en"}}},
CharData("World <>'\" 白鵬翔"),
EndElement{Name{"", "hello"}},
CharData("\n "),
StartElement{Name{"", "query"}, []Attr{}},
CharData("What is it?"),
EndElement{Name{"", "query"}},
CharData("\n "),
StartElement{Name{"", "goodbye"}, []Attr{}},
EndElement{Name{"", "goodbye"}},
CharData("\n "),
StartElement{Name{"", "outer"}, []Attr{{Name{"foo", "attr"}, "value"}, {Name{"xmlns", "tag"}, "ns4"}}},
CharData("\n "),
StartElement{Name{"", "inner"}, []Attr{}},
EndElement{Name{"", "inner"}},
CharData("\n "),
EndElement{Name{"", "outer"}},
CharData("\n "),
StartElement{Name{"tag", "name"}, []Attr{}},
CharData("\n "),
CharData("Some text here."),
CharData("\n "),
EndElement{Name{"tag", "name"}},
CharData("\n"),
EndElement{Name{"", "body"}},
Comment(" missing final newline "),
}
var cookedTokens = []Token{
CharData("\n"),
ProcInst{"xml", []byte(`version="1.0" encoding="UTF-8"`)},
CharData("\n"),
Directive(`DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN"
"http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd"`),
CharData("\n"),
StartElement{Name{"ns2", "body"}, []Attr{{Name{"xmlns", "foo"}, "ns1"}, {Name{"", "xmlns"}, "ns2"}, {Name{"xmlns", "tag"}, "ns3"}}},
CharData("\n "),
StartElement{Name{"ns2", "hello"}, []Attr{{Name{"", "lang"}, "en"}}},
CharData("World <>'\" 白鵬翔"),
EndElement{Name{"ns2", "hello"}},
CharData("\n "),
StartElement{Name{"ns2", "query"}, []Attr{}},
CharData("What is it?"),
EndElement{Name{"ns2", "query"}},
CharData("\n "),
StartElement{Name{"ns2", "goodbye"}, []Attr{}},
EndElement{Name{"ns2", "goodbye"}},
CharData("\n "),
StartElement{Name{"ns2", "outer"}, []Attr{{Name{"ns1", "attr"}, "value"}, {Name{"xmlns", "tag"}, "ns4"}}},
CharData("\n "),
StartElement{Name{"ns2", "inner"}, []Attr{}},
EndElement{Name{"ns2", "inner"}},
CharData("\n "),
EndElement{Name{"ns2", "outer"}},
CharData("\n "),
StartElement{Name{"ns3", "name"}, []Attr{}},
CharData("\n "),
CharData("Some text here."),
CharData("\n "),
EndElement{Name{"ns3", "name"}},
CharData("\n"),
EndElement{Name{"ns2", "body"}},
Comment(" missing final newline "),
}
const testInputAltEncoding = `
<?xml version="1.0" encoding="x-testing-uppercase"?>
<TAG>VALUE</TAG>`
var rawTokensAltEncoding = []Token{
CharData("\n"),
ProcInst{"xml", []byte(`version="1.0" encoding="x-testing-uppercase"`)},
CharData("\n"),
StartElement{Name{"", "tag"}, []Attr{}},
CharData("value"),
EndElement{Name{"", "tag"}},
}
var xmlInput = []string{
// unexpected EOF cases
"<",
"<t",
"<t ",
"<t/",
"<!",
"<!-",
"<!--",
"<!--c-",
"<!--c--",
"<!d",
"<t></",
"<t></t",
"<?",
"<?p",
"<t a",
"<t a=",
"<t a='",
"<t a=''",
"<t/><![",
"<t/><![C",
"<t/><![CDATA[d",
"<t/><![CDATA[d]",
"<t/><![CDATA[d]]",
// other Syntax errors
"<>",
"<t/a",
"<0 />",
"<?0 >",
// "<!0 >", // let the Token() caller handle
"</0>",
"<t 0=''>",
"<t a='&'>",
"<t a='<'>",
"<t>&nbspc;</t>",
"<t a>",
"<t a=>",
"<t a=v>",
// "<![CDATA[d]]>", // let the Token() caller handle
"<t></e>",
"<t></>",
"<t></t!",
"<t>cdata]]></t>",
}
func TestRawToken(t *testing.T) {
d := NewDecoder(strings.NewReader(testInput))
d.Entity = testEntity
testRawToken(t, d, testInput, rawTokens)
}
const nonStrictInput = `
<tag>non&entity</tag>
<tag>&unknown;entity</tag>
<tag>&#123</tag>
<tag>&#zzz;</tag>
<tag>&なまえ3;</tag>
<tag>&lt-gt;</tag>
<tag>&;</tag>
<tag>&0a;</tag>
`
var nonStringEntity = map[string]string{"": "oops!", "0a": "oops!"}
var nonStrictTokens = []Token{
CharData("\n"),
StartElement{Name{"", "tag"}, []Attr{}},
CharData("non&entity"),
EndElement{Name{"", "tag"}},
CharData("\n"),
StartElement{Name{"", "tag"}, []Attr{}},
CharData("&unknown;entity"),
EndElement{Name{"", "tag"}},
CharData("\n"),
StartElement{Name{"", "tag"}, []Attr{}},
CharData("&#123"),
EndElement{Name{"", "tag"}},
CharData("\n"),
StartElement{Name{"", "tag"}, []Attr{}},
CharData("&#zzz;"),
EndElement{Name{"", "tag"}},
CharData("\n"),
StartElement{Name{"", "tag"}, []Attr{}},
CharData("&なまえ3;"),
EndElement{Name{"", "tag"}},
CharData("\n"),
StartElement{Name{"", "tag"}, []Attr{}},
CharData("&lt-gt;"),
EndElement{Name{"", "tag"}},
CharData("\n"),
StartElement{Name{"", "tag"}, []Attr{}},
CharData("&;"),
EndElement{Name{"", "tag"}},
CharData("\n"),
StartElement{Name{"", "tag"}, []Attr{}},
CharData("&0a;"),
EndElement{Name{"", "tag"}},
CharData("\n"),
}
func TestNonStrictRawToken(t *testing.T) {
d := NewDecoder(strings.NewReader(nonStrictInput))
d.Strict = false
testRawToken(t, d, nonStrictInput, nonStrictTokens)
}
type downCaser struct {
t *testing.T
r io.ByteReader
}
func (d *downCaser) ReadByte() (c byte, err error) {
c, err = d.r.ReadByte()
if c >= 'A' && c <= 'Z' {
c += 'a' - 'A'
}
return
}
func (d *downCaser) Read(p []byte) (int, error) {
d.t.Fatalf("unexpected Read call on downCaser reader")
panic("unreachable")
}
func TestRawTokenAltEncoding(t *testing.T) {
d := NewDecoder(strings.NewReader(testInputAltEncoding))
d.CharsetReader = func(charset string, input io.Reader) (io.Reader, error) {
if charset != "x-testing-uppercase" {
t.Fatalf("unexpected charset %q", charset)
}
return &downCaser{t, input.(io.ByteReader)}, nil
}
testRawToken(t, d, testInputAltEncoding, rawTokensAltEncoding)
}
func TestRawTokenAltEncodingNoConverter(t *testing.T) {
d := NewDecoder(strings.NewReader(testInputAltEncoding))
token, err := d.RawToken()
if token == nil {
t.Fatalf("expected a token on first RawToken call")
}
if err != nil {
t.Fatal(err)
}
token, err = d.RawToken()
if token != nil {
t.Errorf("expected a nil token; got %#v", token)
}
if err == nil {
t.Fatalf("expected an error on second RawToken call")
}
const encoding = "x-testing-uppercase"
if !strings.Contains(err.Error(), encoding) {
t.Errorf("expected error to contain %q; got error: %v",
encoding, err)
}
}
func testRawToken(t *testing.T, d *Decoder, raw string, rawTokens []Token) {
lastEnd := int64(0)
for i, want := range rawTokens {
start := d.InputOffset()
have, err := d.RawToken()
end := d.InputOffset()
if err != nil {
t.Fatalf("token %d: unexpected error: %s", i, err)
}
if !reflect.DeepEqual(have, want) {
var shave, swant string
if _, ok := have.(CharData); ok {
shave = fmt.Sprintf("CharData(%q)", have)
} else {
shave = fmt.Sprintf("%#v", have)
}
if _, ok := want.(CharData); ok {
swant = fmt.Sprintf("CharData(%q)", want)
} else {
swant = fmt.Sprintf("%#v", want)
}
t.Errorf("token %d = %s, want %s", i, shave, swant)
}
// Check that InputOffset returned actual token.
switch {
case start < lastEnd:
t.Errorf("token %d: position [%d,%d) for %T is before previous token", i, start, end, have)
case start >= end:
// Special case: EndElement can be synthesized.
if start == end && end == lastEnd {
break
}
t.Errorf("token %d: position [%d,%d) for %T is empty", i, start, end, have)
case end > int64(len(raw)):
t.Errorf("token %d: position [%d,%d) for %T extends beyond input", i, start, end, have)
default:
text := raw[start:end]
if strings.ContainsAny(text, "<>") && (!strings.HasPrefix(text, "<") || !strings.HasSuffix(text, ">")) {
t.Errorf("token %d: misaligned raw token %#q for %T", i, text, have)
}
}
lastEnd = end
}
}
// Ensure that directives (specifically !DOCTYPE) include the complete
// text of any nested directives, noting that < and > do not change
// nesting depth if they are in single or double quotes.
var nestedDirectivesInput = `
<!DOCTYPE [<!ENTITY rdf "http://www.w3.org/1999/02/22-rdf-syntax-ns#">]>
<!DOCTYPE [<!ENTITY xlt ">">]>
<!DOCTYPE [<!ENTITY xlt "<">]>
<!DOCTYPE [<!ENTITY xlt '>'>]>
<!DOCTYPE [<!ENTITY xlt '<'>]>
<!DOCTYPE [<!ENTITY xlt '">'>]>
<!DOCTYPE [<!ENTITY xlt "'<">]>
`
var nestedDirectivesTokens = []Token{
CharData("\n"),
Directive(`DOCTYPE [<!ENTITY rdf "http://www.w3.org/1999/02/22-rdf-syntax-ns#">]`),
CharData("\n"),
Directive(`DOCTYPE [<!ENTITY xlt ">">]`),
CharData("\n"),
Directive(`DOCTYPE [<!ENTITY xlt "<">]`),
CharData("\n"),
Directive(`DOCTYPE [<!ENTITY xlt '>'>]`),
CharData("\n"),
Directive(`DOCTYPE [<!ENTITY xlt '<'>]`),
CharData("\n"),
Directive(`DOCTYPE [<!ENTITY xlt '">'>]`),
CharData("\n"),
Directive(`DOCTYPE [<!ENTITY xlt "'<">]`),
CharData("\n"),
}
func TestNestedDirectives(t *testing.T) {
d := NewDecoder(strings.NewReader(nestedDirectivesInput))
for i, want := range nestedDirectivesTokens {
have, err := d.Token()
if err != nil {
t.Fatalf("token %d: unexpected error: %s", i, err)
}
if !reflect.DeepEqual(have, want) {
t.Errorf("token %d = %#v want %#v", i, have, want)
}
}
}
func TestToken(t *testing.T) {
d := NewDecoder(strings.NewReader(testInput))
d.Entity = testEntity
for i, want := range cookedTokens {
have, err := d.Token()
if err != nil {
t.Fatalf("token %d: unexpected error: %s", i, err)
}
if !reflect.DeepEqual(have, want) {
t.Errorf("token %d = %#v want %#v", i, have, want)
}
}
}
func TestSyntax(t *testing.T) {
for i := range xmlInput {
d := NewDecoder(strings.NewReader(xmlInput[i]))
var err error
for _, err = d.Token(); err == nil; _, err = d.Token() {
}
if _, ok := err.(*SyntaxError); !ok {
t.Fatalf(`xmlInput "%s": expected SyntaxError not received`, xmlInput[i])
}
}
}
type allScalars struct {
True1 bool
True2 bool
False1 bool
False2 bool
Int int
Int8 int8
Int16 int16
Int32 int32
Int64 int64
Uint int
Uint8 uint8
Uint16 uint16
Uint32 uint32
Uint64 uint64
Uintptr uintptr
Float32 float32
Float64 float64
String string
PtrString *string
}
var all = allScalars{
True1: true,
True2: true,
False1: false,
False2: false,
Int: 1,
Int8: -2,
Int16: 3,
Int32: -4,
Int64: 5,
Uint: 6,
Uint8: 7,
Uint16: 8,
Uint32: 9,
Uint64: 10,
Uintptr: 11,
Float32: 13.0,
Float64: 14.0,
String: "15",
PtrString: &sixteen,
}
var sixteen = "16"
const testScalarsInput = `<allscalars>
<True1>true</True1>
<True2>1</True2>
<False1>false</False1>
<False2>0</False2>
<Int>1</Int>
<Int8>-2</Int8>
<Int16>3</Int16>
<Int32>-4</Int32>
<Int64>5</Int64>
<Uint>6</Uint>
<Uint8>7</Uint8>
<Uint16>8</Uint16>
<Uint32>9</Uint32>
<Uint64>10</Uint64>
<Uintptr>11</Uintptr>
<Float>12.0</Float>
<Float32>13.0</Float32>
<Float64>14.0</Float64>
<String>15</String>
<PtrString>16</PtrString>
</allscalars>`
func TestAllScalars(t *testing.T) {
var a allScalars
err := Unmarshal([]byte(testScalarsInput), &a)
if err != nil {
t.Fatal(err)
}
if !reflect.DeepEqual(a, all) {
t.Errorf("have %+v want %+v", a, all)
}
}
type item struct {
Field_a string
}
func TestIssue569(t *testing.T) {
data := `<item><Field_a>abcd</Field_a></item>`
var i item
err := Unmarshal([]byte(data), &i)
if err != nil || i.Field_a != "abcd" {
t.Fatal("Expecting abcd")
}
}
func TestUnquotedAttrs(t *testing.T) {
data := "<tag attr=azAZ09:-_\t>"
d := NewDecoder(strings.NewReader(data))
d.Strict = false
token, err := d.Token()
if _, ok := err.(*SyntaxError); ok {
t.Errorf("Unexpected error: %v", err)
}
if token.(StartElement).Name.Local != "tag" {
t.Errorf("Unexpected tag name: %v", token.(StartElement).Name.Local)
}
attr := token.(StartElement).Attr[0]
if attr.Value != "azAZ09:-_" {
t.Errorf("Unexpected attribute value: %v", attr.Value)
}
if attr.Name.Local != "attr" {
t.Errorf("Unexpected attribute name: %v", attr.Name.Local)
}
}
func TestValuelessAttrs(t *testing.T) {
tests := [][3]string{
{"<p nowrap>", "p", "nowrap"},
{"<p nowrap >", "p", "nowrap"},
{"<input checked/>", "input", "checked"},
{"<input checked />", "input", "checked"},
}
for _, test := range tests {
d := NewDecoder(strings.NewReader(test[0]))
d.Strict = false
token, err := d.Token()
if _, ok := err.(*SyntaxError); ok {
t.Errorf("Unexpected error: %v", err)
}
if token.(StartElement).Name.Local != test[1] {
t.Errorf("Unexpected tag name: %v", token.(StartElement).Name.Local)
}
attr := token.(StartElement).Attr[0]
if attr.Value != test[2] {
t.Errorf("Unexpected attribute value: %v", attr.Value)
}
if attr.Name.Local != test[2] {
t.Errorf("Unexpected attribute name: %v", attr.Name.Local)
}
}
}
func TestCopyTokenCharData(t *testing.T) {
data := []byte("same data")
var tok1 Token = CharData(data)
tok2 := CopyToken(tok1)
if !reflect.DeepEqual(tok1, tok2) {
t.Error("CopyToken(CharData) != CharData")
}
data[1] = 'o'
if reflect.DeepEqual(tok1, tok2) {
t.Error("CopyToken(CharData) uses same buffer.")
}
}
func TestCopyTokenStartElement(t *testing.T) {
elt := StartElement{Name{"", "hello"}, []Attr{{Name{"", "lang"}, "en"}}}
var tok1 Token = elt
tok2 := CopyToken(tok1)
if tok1.(StartElement).Attr[0].Value != "en" {
t.Error("CopyToken overwrote Attr[0]")
}
if !reflect.DeepEqual(tok1, tok2) {
t.Error("CopyToken(StartElement) != StartElement")
}
tok1.(StartElement).Attr[0] = Attr{Name{"", "lang"}, "de"}
if reflect.DeepEqual(tok1, tok2) {
t.Error("CopyToken(CharData) uses same buffer.")
}
}
func TestSyntaxErrorLineNum(t *testing.T) {
testInput := "<P>Foo<P>\n\n<P>Bar</>\n"
d := NewDecoder(strings.NewReader(testInput))
var err error
for _, err = d.Token(); err == nil; _, err = d.Token() {
}
synerr, ok := err.(*SyntaxError)
if !ok {
t.Error("Expected SyntaxError.")
}
if synerr.Line != 3 {
t.Error("SyntaxError didn't have correct line number.")
}
}
func TestTrailingRawToken(t *testing.T) {
input := `<FOO></FOO> `
d := NewDecoder(strings.NewReader(input))
var err error
for _, err = d.RawToken(); err == nil; _, err = d.RawToken() {
}
if err != io.EOF {
t.Fatalf("d.RawToken() = _, %v, want _, io.EOF", err)
}
}
func TestTrailingToken(t *testing.T) {
input := `<FOO></FOO> `
d := NewDecoder(strings.NewReader(input))
var err error
for _, err = d.Token(); err == nil; _, err = d.Token() {
}
if err != io.EOF {
t.Fatalf("d.Token() = _, %v, want _, io.EOF", err)
}
}
func TestEntityInsideCDATA(t *testing.T) {
input := `<test><![CDATA[ &val=foo ]]></test>`
d := NewDecoder(strings.NewReader(input))
var err error
for _, err = d.Token(); err == nil; _, err = d.Token() {
}
if err != io.EOF {
t.Fatalf("d.Token() = _, %v, want _, io.EOF", err)
}
}
var characterTests = []struct {
in string
err string
}{
{"\x12<doc/>", "illegal character code U+0012"},
{"<?xml version=\"1.0\"?>\x0b<doc/>", "illegal character code U+000B"},
{"\xef\xbf\xbe<doc/>", "illegal character code U+FFFE"},
{"<?xml version=\"1.0\"?><doc>\r\n<hiya/>\x07<toots/></doc>", "illegal character code U+0007"},
{"<?xml version=\"1.0\"?><doc \x12='value'>what's up</doc>", "expected attribute name in element"},
{"<doc>&abc\x01;</doc>", "invalid character entity &abc (no semicolon)"},
{"<doc>&\x01;</doc>", "invalid character entity & (no semicolon)"},
{"<doc>&\xef\xbf\xbe;</doc>", "invalid character entity &\uFFFE;"},
{"<doc>&hello;</doc>", "invalid character entity &hello;"},
}
func TestDisallowedCharacters(t *testing.T) {
for i, tt := range characterTests {
d := NewDecoder(strings.NewReader(tt.in))
var err error
for err == nil {
_, err = d.Token()
}
synerr, ok := err.(*SyntaxError)
if !ok {
t.Fatalf("input %d d.Token() = _, %v, want _, *SyntaxError", i, err)
}
if synerr.Msg != tt.err {
t.Fatalf("input %d synerr.Msg wrong: want %q, got %q", i, tt.err, synerr.Msg)
}
}
}
type procInstEncodingTest struct {
expect, got string
}
var procInstTests = []struct {
input string
expect [2]string
}{
{`version="1.0" encoding="utf-8"`, [2]string{"1.0", "utf-8"}},
{`version="1.0" encoding='utf-8'`, [2]string{"1.0", "utf-8"}},
{`version="1.0" encoding='utf-8' `, [2]string{"1.0", "utf-8"}},
{`version="1.0" encoding=utf-8`, [2]string{"1.0", ""}},
{`encoding="FOO" `, [2]string{"", "FOO"}},
}
func TestProcInstEncoding(t *testing.T) {
for _, test := range procInstTests {
if got := procInst("version", test.input); got != test.expect[0] {
t.Errorf("procInst(version, %q) = %q; want %q", test.input, got, test.expect[0])
}
if got := procInst("encoding", test.input); got != test.expect[1] {
t.Errorf("procInst(encoding, %q) = %q; want %q", test.input, got, test.expect[1])
}
}
}
// Ensure that directives with comments include the complete
// text of any nested directives.
var directivesWithCommentsInput = `
<!DOCTYPE [<!-- a comment --><!ENTITY rdf "http://www.w3.org/1999/02/22-rdf-syntax-ns#">]>
<!DOCTYPE [<!ENTITY go "Golang"><!-- a comment-->]>
<!DOCTYPE <!-> <!> <!----> <!-->--> <!--->--> [<!ENTITY go "Golang"><!-- a comment-->]>
`
var directivesWithCommentsTokens = []Token{
CharData("\n"),
Directive(`DOCTYPE [<!ENTITY rdf "http://www.w3.org/1999/02/22-rdf-syntax-ns#">]`),
CharData("\n"),
Directive(`DOCTYPE [<!ENTITY go "Golang">]`),
CharData("\n"),
Directive(`DOCTYPE <!-> <!> [<!ENTITY go "Golang">]`),
CharData("\n"),
}
func TestDirectivesWithComments(t *testing.T) {
d := NewDecoder(strings.NewReader(directivesWithCommentsInput))
for i, want := range directivesWithCommentsTokens {
have, err := d.Token()
if err != nil {
t.Fatalf("token %d: unexpected error: %s", i, err)
}
if !reflect.DeepEqual(have, want) {
t.Errorf("token %d = %#v want %#v", i, have, want)
}
}
}
// Writer whose Write method always returns an error.
type errWriter struct{}
func (errWriter) Write(p []byte) (n int, err error) { return 0, fmt.Errorf("unwritable") }
func TestEscapeTextIOErrors(t *testing.T) {
expectErr := "unwritable"
err := EscapeText(errWriter{}, []byte{'A'})
if err == nil || err.Error() != expectErr {
t.Errorf("have %v, want %v", err, expectErr)
}
}
func TestEscapeTextInvalidChar(t *testing.T) {
input := []byte("A \x00 terminated string.")
expected := "A \uFFFD terminated string."
buff := new(bytes.Buffer)
if err := EscapeText(buff, input); err != nil {
t.Fatalf("have %v, want nil", err)
}
text := buff.String()
if text != expected {
t.Errorf("have %v, want %v", text, expected)
}
}
func TestIssue5880(t *testing.T) {
type T []byte
data, err := Marshal(T{192, 168, 0, 1})
if err != nil {
t.Errorf("Marshal error: %v", err)
}
if !utf8.Valid(data) {
t.Errorf("Marshal generated invalid UTF-8: %x", data)
}
}

View File

@ -0,0 +1,94 @@
// Copyright 2015 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
//go:build ignore
// +build ignore
/*
This program is a server for the WebDAV 'litmus' compliance test at
http://www.webdav.org/neon/litmus/
To run the test:
go run litmus_test_server.go
and separately, from the downloaded litmus-xxx directory:
make URL=http://localhost:9999/ check
*/
package main
import (
"flag"
"fmt"
"github.com/openziti/zrok/endpoints/drive/driveServer"
"log"
"net/http"
"net/url"
)
var port = flag.Int("port", 9999, "server port")
func main() {
flag.Parse()
log.SetFlags(0)
h := &driveServer.Handler{
FileSystem: driveServer.NewMemFS(),
LockSystem: driveServer.NewMemLS(),
Logger: func(r *http.Request, err error) {
litmus := r.Header.Get("X-Litmus")
if len(litmus) > 19 {
litmus = litmus[:16] + "..."
}
switch r.Method {
case "COPY", "MOVE":
dst := ""
if u, err := url.Parse(r.Header.Get("Destination")); err == nil {
dst = u.Path
}
o := r.Header.Get("Overwrite")
log.Printf("%-20s%-10s%-30s%-30so=%-2s%v", litmus, r.Method, r.URL.Path, dst, o, err)
default:
log.Printf("%-20s%-10s%-30s%v", litmus, r.Method, r.URL.Path, err)
}
},
}
// The next line would normally be:
// http.Handle("/", h)
// but we wrap that HTTP handler h to cater for a special case.
//
// The propfind_invalid2 litmus test case expects an empty namespace prefix
// declaration to be an error. The FAQ in the webdav litmus test says:
//
// "What does the "propfind_invalid2" test check for?...
//
// If a request was sent with an XML body which included an empty namespace
// prefix declaration (xmlns:ns1=""), then the server must reject that with
// a "400 Bad Request" response, as it is invalid according to the XML
// Namespace specification."
//
// On the other hand, the Go standard library's encoding/xml package
// accepts an empty xmlns namespace, as per the discussion at
// https://github.com/golang/go/issues/8068
//
// Empty namespaces seem disallowed in the second (2006) edition of the XML
// standard, but allowed in a later edition. The grammar differs between
// http://www.w3.org/TR/2006/REC-xml-names-20060816/#ns-decl and
// http://www.w3.org/TR/REC-xml-names/#dt-prefix
//
// Thus, we assume that the propfind_invalid2 test is obsolete, and
// hard-code the 400 Bad Request response that the test expects.
http.Handle("/", http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
if r.Header.Get("X-Litmus") == "props: 3 (propfind_invalid2)" {
http.Error(w, "400 Bad Request", http.StatusBadRequest)
return
}
h.ServeHTTP(w, r)
}))
addr := fmt.Sprintf(":%d", *port)
log.Printf("Serving %v", addr)
log.Fatal(http.ListenAndServe(addr, nil))
}

445
drives/davServer/lock.go Normal file
View File

@ -0,0 +1,445 @@
// Copyright 2014 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package davServer
import (
"container/heap"
"errors"
"strconv"
"strings"
"sync"
"time"
)
var (
// ErrConfirmationFailed is returned by a LockSystem's Confirm method.
ErrConfirmationFailed = errors.New("webdav: confirmation failed")
// ErrForbidden is returned by a LockSystem's Unlock method.
ErrForbidden = errors.New("webdav: forbidden")
// ErrLocked is returned by a LockSystem's Create, Refresh and Unlock methods.
ErrLocked = errors.New("webdav: locked")
// ErrNoSuchLock is returned by a LockSystem's Refresh and Unlock methods.
ErrNoSuchLock = errors.New("webdav: no such lock")
)
// Condition can match a WebDAV resource, based on a token or ETag.
// Exactly one of Token and ETag should be non-empty.
type Condition struct {
Not bool
Token string
ETag string
}
// LockSystem manages access to a collection of named resources. The elements
// in a lock name are separated by slash ('/', U+002F) characters, regardless
// of host operating system convention.
type LockSystem interface {
// Confirm confirms that the caller can claim all of the locks specified by
// the given conditions, and that holding the union of all of those locks
// gives exclusive access to all of the named resources. Up to two resources
// can be named. Empty names are ignored.
//
// Exactly one of release and err will be non-nil. If release is non-nil,
// all of the requested locks are held until release is called. Calling
// release does not unlock the lock, in the WebDAV UNLOCK sense, but once
// Confirm has confirmed that a lock claim is valid, that lock cannot be
// Confirmed again until it has been released.
//
// If Confirm returns ErrConfirmationFailed then the Handler will continue
// to try any other set of locks presented (a WebDAV HTTP request can
// present more than one set of locks). If it returns any other non-nil
// error, the Handler will write a "500 Internal Server Error" HTTP status.
Confirm(now time.Time, name0, name1 string, conditions ...Condition) (release func(), err error)
// Create creates a lock with the given depth, duration, owner and root
// (name). The depth will either be negative (meaning infinite) or zero.
//
// If Create returns ErrLocked then the Handler will write a "423 Locked"
// HTTP status. If it returns any other non-nil error, the Handler will
// write a "500 Internal Server Error" HTTP status.
//
// See http://www.webdav.org/specs/rfc4918.html#rfc.section.9.10.6 for
// when to use each error.
//
// The token returned identifies the created lock. It should be an absolute
// URI as defined by RFC 3986, Section 4.3. In particular, it should not
// contain whitespace.
Create(now time.Time, details LockDetails) (token string, err error)
// Refresh refreshes the lock with the given token.
//
// If Refresh returns ErrLocked then the Handler will write a "423 Locked"
// HTTP Status. If Refresh returns ErrNoSuchLock then the Handler will write
// a "412 Precondition Failed" HTTP Status. If it returns any other non-nil
// error, the Handler will write a "500 Internal Server Error" HTTP status.
//
// See http://www.webdav.org/specs/rfc4918.html#rfc.section.9.10.6 for
// when to use each error.
Refresh(now time.Time, token string, duration time.Duration) (LockDetails, error)
// Unlock unlocks the lock with the given token.
//
// If Unlock returns ErrForbidden then the Handler will write a "403
// Forbidden" HTTP Status. If Unlock returns ErrLocked then the Handler
// will write a "423 Locked" HTTP status. If Unlock returns ErrNoSuchLock
// then the Handler will write a "409 Conflict" HTTP Status. If it returns
// any other non-nil error, the Handler will write a "500 Internal Server
// Error" HTTP status.
//
// See http://www.webdav.org/specs/rfc4918.html#rfc.section.9.11.1 for
// when to use each error.
Unlock(now time.Time, token string) error
}
// LockDetails are a lock's metadata.
type LockDetails struct {
// Root is the root resource name being locked. For a zero-depth lock, the
// root is the only resource being locked.
Root string
// Duration is the lock timeout. A negative duration means infinite.
Duration time.Duration
// OwnerXML is the verbatim <owner> XML given in a LOCK HTTP request.
//
// TODO: does the "verbatim" nature play well with XML namespaces?
// Does the OwnerXML field need to have more structure? See
// https://codereview.appspot.com/175140043/#msg2
OwnerXML string
// ZeroDepth is whether the lock has zero depth. If it does not have zero
// depth, it has infinite depth.
ZeroDepth bool
}
// NewMemLS returns a new in-memory LockSystem.
func NewMemLS() LockSystem {
return &memLS{
byName: make(map[string]*memLSNode),
byToken: make(map[string]*memLSNode),
gen: uint64(time.Now().Unix()),
}
}
type memLS struct {
mu sync.Mutex
byName map[string]*memLSNode
byToken map[string]*memLSNode
gen uint64
// byExpiry only contains those nodes whose LockDetails have a finite
// Duration and are yet to expire.
byExpiry byExpiry
}
func (m *memLS) nextToken() string {
m.gen++
return strconv.FormatUint(m.gen, 10)
}
func (m *memLS) collectExpiredNodes(now time.Time) {
for len(m.byExpiry) > 0 {
if now.Before(m.byExpiry[0].expiry) {
break
}
m.remove(m.byExpiry[0])
}
}
func (m *memLS) Confirm(now time.Time, name0, name1 string, conditions ...Condition) (func(), error) {
m.mu.Lock()
defer m.mu.Unlock()
m.collectExpiredNodes(now)
var n0, n1 *memLSNode
if name0 != "" {
if n0 = m.lookup(slashClean(name0), conditions...); n0 == nil {
return nil, ErrConfirmationFailed
}
}
if name1 != "" {
if n1 = m.lookup(slashClean(name1), conditions...); n1 == nil {
return nil, ErrConfirmationFailed
}
}
// Don't hold the same node twice.
if n1 == n0 {
n1 = nil
}
if n0 != nil {
m.hold(n0)
}
if n1 != nil {
m.hold(n1)
}
return func() {
m.mu.Lock()
defer m.mu.Unlock()
if n1 != nil {
m.unhold(n1)
}
if n0 != nil {
m.unhold(n0)
}
}, nil
}
// lookup returns the node n that locks the named resource, provided that n
// matches at least one of the given conditions and that lock isn't held by
// another party. Otherwise, it returns nil.
//
// n may be a parent of the named resource, if n is an infinite depth lock.
func (m *memLS) lookup(name string, conditions ...Condition) (n *memLSNode) {
// TODO: support Condition.Not and Condition.ETag.
for _, c := range conditions {
n = m.byToken[c.Token]
if n == nil || n.held {
continue
}
if name == n.details.Root {
return n
}
if n.details.ZeroDepth {
continue
}
if n.details.Root == "/" || strings.HasPrefix(name, n.details.Root+"/") {
return n
}
}
return nil
}
func (m *memLS) hold(n *memLSNode) {
if n.held {
panic("webdav: memLS inconsistent held state")
}
n.held = true
if n.details.Duration >= 0 && n.byExpiryIndex >= 0 {
heap.Remove(&m.byExpiry, n.byExpiryIndex)
}
}
func (m *memLS) unhold(n *memLSNode) {
if !n.held {
panic("webdav: memLS inconsistent held state")
}
n.held = false
if n.details.Duration >= 0 {
heap.Push(&m.byExpiry, n)
}
}
func (m *memLS) Create(now time.Time, details LockDetails) (string, error) {
m.mu.Lock()
defer m.mu.Unlock()
m.collectExpiredNodes(now)
details.Root = slashClean(details.Root)
if !m.canCreate(details.Root, details.ZeroDepth) {
return "", ErrLocked
}
n := m.create(details.Root)
n.token = m.nextToken()
m.byToken[n.token] = n
n.details = details
if n.details.Duration >= 0 {
n.expiry = now.Add(n.details.Duration)
heap.Push(&m.byExpiry, n)
}
return n.token, nil
}
func (m *memLS) Refresh(now time.Time, token string, duration time.Duration) (LockDetails, error) {
m.mu.Lock()
defer m.mu.Unlock()
m.collectExpiredNodes(now)
n := m.byToken[token]
if n == nil {
return LockDetails{}, ErrNoSuchLock
}
if n.held {
return LockDetails{}, ErrLocked
}
if n.byExpiryIndex >= 0 {
heap.Remove(&m.byExpiry, n.byExpiryIndex)
}
n.details.Duration = duration
if n.details.Duration >= 0 {
n.expiry = now.Add(n.details.Duration)
heap.Push(&m.byExpiry, n)
}
return n.details, nil
}
func (m *memLS) Unlock(now time.Time, token string) error {
m.mu.Lock()
defer m.mu.Unlock()
m.collectExpiredNodes(now)
n := m.byToken[token]
if n == nil {
return ErrNoSuchLock
}
if n.held {
return ErrLocked
}
m.remove(n)
return nil
}
func (m *memLS) canCreate(name string, zeroDepth bool) bool {
return walkToRoot(name, func(name0 string, first bool) bool {
n := m.byName[name0]
if n == nil {
return true
}
if first {
if n.token != "" {
// The target node is already locked.
return false
}
if !zeroDepth {
// The requested lock depth is infinite, and the fact that n exists
// (n != nil) means that a descendent of the target node is locked.
return false
}
} else if n.token != "" && !n.details.ZeroDepth {
// An ancestor of the target node is locked with infinite depth.
return false
}
return true
})
}
func (m *memLS) create(name string) (ret *memLSNode) {
walkToRoot(name, func(name0 string, first bool) bool {
n := m.byName[name0]
if n == nil {
n = &memLSNode{
details: LockDetails{
Root: name0,
},
byExpiryIndex: -1,
}
m.byName[name0] = n
}
n.refCount++
if first {
ret = n
}
return true
})
return ret
}
func (m *memLS) remove(n *memLSNode) {
delete(m.byToken, n.token)
n.token = ""
walkToRoot(n.details.Root, func(name0 string, first bool) bool {
x := m.byName[name0]
x.refCount--
if x.refCount == 0 {
delete(m.byName, name0)
}
return true
})
if n.byExpiryIndex >= 0 {
heap.Remove(&m.byExpiry, n.byExpiryIndex)
}
}
func walkToRoot(name string, f func(name0 string, first bool) bool) bool {
for first := true; ; first = false {
if !f(name, first) {
return false
}
if name == "/" {
break
}
name = name[:strings.LastIndex(name, "/")]
if name == "" {
name = "/"
}
}
return true
}
type memLSNode struct {
// details are the lock metadata. Even if this node's name is not explicitly locked,
// details.Root will still equal the node's name.
details LockDetails
// token is the unique identifier for this node's lock. An empty token means that
// this node is not explicitly locked.
token string
// refCount is the number of self-or-descendent nodes that are explicitly locked.
refCount int
// expiry is when this node's lock expires.
expiry time.Time
// byExpiryIndex is the index of this node in memLS.byExpiry. It is -1
// if this node does not expire, or has expired.
byExpiryIndex int
// held is whether this node's lock is actively held by a Confirm call.
held bool
}
type byExpiry []*memLSNode
func (b *byExpiry) Len() int {
return len(*b)
}
func (b *byExpiry) Less(i, j int) bool {
return (*b)[i].expiry.Before((*b)[j].expiry)
}
func (b *byExpiry) Swap(i, j int) {
(*b)[i], (*b)[j] = (*b)[j], (*b)[i]
(*b)[i].byExpiryIndex = i
(*b)[j].byExpiryIndex = j
}
func (b *byExpiry) Push(x interface{}) {
n := x.(*memLSNode)
n.byExpiryIndex = len(*b)
*b = append(*b, n)
}
func (b *byExpiry) Pop() interface{} {
i := len(*b) - 1
n := (*b)[i]
(*b)[i] = nil
n.byExpiryIndex = -1
*b = (*b)[:i]
return n
}
const infiniteTimeout = -1
// parseTimeout parses the Timeout HTTP header, as per section 10.7. If s is
// empty, an infiniteTimeout is returned.
func parseTimeout(s string) (time.Duration, error) {
if s == "" {
return infiniteTimeout, nil
}
if i := strings.IndexByte(s, ','); i >= 0 {
s = s[:i]
}
s = strings.TrimSpace(s)
if s == "Infinite" {
return infiniteTimeout, nil
}
const pre = "Second-"
if !strings.HasPrefix(s, pre) {
return 0, errInvalidTimeout
}
s = s[len(pre):]
if s == "" || s[0] < '0' || '9' < s[0] {
return 0, errInvalidTimeout
}
n, err := strconv.ParseInt(s, 10, 64)
if err != nil || 1<<32-1 < n {
return 0, errInvalidTimeout
}
return time.Duration(n) * time.Second, nil
}

View File

@ -0,0 +1,735 @@
// Copyright 2014 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package davServer
import (
"fmt"
"math/rand"
"path"
"reflect"
"sort"
"strconv"
"strings"
"testing"
"time"
)
func TestWalkToRoot(t *testing.T) {
testCases := []struct {
name string
want []string
}{{
"/a/b/c/d",
[]string{
"/a/b/c/d",
"/a/b/c",
"/a/b",
"/a",
"/",
},
}, {
"/a",
[]string{
"/a",
"/",
},
}, {
"/",
[]string{
"/",
},
}}
for _, tc := range testCases {
var got []string
if !walkToRoot(tc.name, func(name0 string, first bool) bool {
if first != (len(got) == 0) {
t.Errorf("name=%q: first=%t but len(got)==%d", tc.name, first, len(got))
return false
}
got = append(got, name0)
return true
}) {
continue
}
if !reflect.DeepEqual(got, tc.want) {
t.Errorf("name=%q:\ngot %q\nwant %q", tc.name, got, tc.want)
}
}
}
var lockTestDurations = []time.Duration{
infiniteTimeout, // infiniteTimeout means to never expire.
0, // A zero duration means to expire immediately.
100 * time.Hour, // A very large duration will not expire in these tests.
}
// lockTestNames are the names of a set of mutually compatible locks. For each
// name fragment:
// - _ means no explicit lock.
// - i means an infinite-depth lock,
// - z means a zero-depth lock,
var lockTestNames = []string{
"/_/_/_/_/z",
"/_/_/i",
"/_/z",
"/_/z/i",
"/_/z/z",
"/_/z/_/i",
"/_/z/_/z",
"/i",
"/z",
"/z/_/i",
"/z/_/z",
}
func lockTestZeroDepth(name string) bool {
switch name[len(name)-1] {
case 'i':
return false
case 'z':
return true
}
panic(fmt.Sprintf("lock name %q did not end with 'i' or 'z'", name))
}
func TestMemLSCanCreate(t *testing.T) {
now := time.Unix(0, 0)
m := NewMemLS().(*memLS)
for _, name := range lockTestNames {
_, err := m.Create(now, LockDetails{
Root: name,
Duration: infiniteTimeout,
ZeroDepth: lockTestZeroDepth(name),
})
if err != nil {
t.Fatalf("creating lock for %q: %v", name, err)
}
}
wantCanCreate := func(name string, zeroDepth bool) bool {
for _, n := range lockTestNames {
switch {
case n == name:
// An existing lock has the same name as the proposed lock.
return false
case strings.HasPrefix(n, name):
// An existing lock would be a child of the proposed lock,
// which conflicts if the proposed lock has infinite depth.
if !zeroDepth {
return false
}
case strings.HasPrefix(name, n):
// An existing lock would be an ancestor of the proposed lock,
// which conflicts if the ancestor has infinite depth.
if n[len(n)-1] == 'i' {
return false
}
}
}
return true
}
var check func(int, string)
check = func(recursion int, name string) {
for _, zeroDepth := range []bool{false, true} {
got := m.canCreate(name, zeroDepth)
want := wantCanCreate(name, zeroDepth)
if got != want {
t.Errorf("canCreate name=%q zeroDepth=%t: got %t, want %t", name, zeroDepth, got, want)
}
}
if recursion == 6 {
return
}
if name != "/" {
name += "/"
}
for _, c := range "_iz" {
check(recursion+1, name+string(c))
}
}
check(0, "/")
}
func TestMemLSLookup(t *testing.T) {
now := time.Unix(0, 0)
m := NewMemLS().(*memLS)
badToken := m.nextToken()
t.Logf("badToken=%q", badToken)
for _, name := range lockTestNames {
token, err := m.Create(now, LockDetails{
Root: name,
Duration: infiniteTimeout,
ZeroDepth: lockTestZeroDepth(name),
})
if err != nil {
t.Fatalf("creating lock for %q: %v", name, err)
}
t.Logf("%-15q -> node=%p token=%q", name, m.byName[name], token)
}
baseNames := append([]string{"/a", "/b/c"}, lockTestNames...)
for _, baseName := range baseNames {
for _, suffix := range []string{"", "/0", "/1/2/3"} {
name := baseName + suffix
goodToken := ""
base := m.byName[baseName]
if base != nil && (suffix == "" || !lockTestZeroDepth(baseName)) {
goodToken = base.token
}
for _, token := range []string{badToken, goodToken} {
if token == "" {
continue
}
got := m.lookup(name, Condition{Token: token})
want := base
if token == badToken {
want = nil
}
if got != want {
t.Errorf("name=%-20qtoken=%q (bad=%t): got %p, want %p",
name, token, token == badToken, got, want)
}
}
}
}
}
func TestMemLSConfirm(t *testing.T) {
now := time.Unix(0, 0)
m := NewMemLS().(*memLS)
alice, err := m.Create(now, LockDetails{
Root: "/alice",
Duration: infiniteTimeout,
ZeroDepth: false,
})
if err != nil {
t.Fatalf("Create: %v", err)
}
tweedle, err := m.Create(now, LockDetails{
Root: "/tweedle",
Duration: infiniteTimeout,
ZeroDepth: false,
})
if err != nil {
t.Fatalf("Create: %v", err)
}
if err := m.consistent(); err != nil {
t.Fatalf("Create: inconsistent state: %v", err)
}
// Test a mismatch between name and condition.
_, err = m.Confirm(now, "/tweedle/dee", "", Condition{Token: alice})
if err != ErrConfirmationFailed {
t.Fatalf("Confirm (mismatch): got %v, want ErrConfirmationFailed", err)
}
if err := m.consistent(); err != nil {
t.Fatalf("Confirm (mismatch): inconsistent state: %v", err)
}
// Test two names (that fall under the same lock) in the one Confirm call.
release, err := m.Confirm(now, "/tweedle/dee", "/tweedle/dum", Condition{Token: tweedle})
if err != nil {
t.Fatalf("Confirm (twins): %v", err)
}
if err := m.consistent(); err != nil {
t.Fatalf("Confirm (twins): inconsistent state: %v", err)
}
release()
if err := m.consistent(); err != nil {
t.Fatalf("release (twins): inconsistent state: %v", err)
}
// Test the same two names in overlapping Confirm / release calls.
releaseDee, err := m.Confirm(now, "/tweedle/dee", "", Condition{Token: tweedle})
if err != nil {
t.Fatalf("Confirm (sequence #0): %v", err)
}
if err := m.consistent(); err != nil {
t.Fatalf("Confirm (sequence #0): inconsistent state: %v", err)
}
_, err = m.Confirm(now, "/tweedle/dum", "", Condition{Token: tweedle})
if err != ErrConfirmationFailed {
t.Fatalf("Confirm (sequence #1): got %v, want ErrConfirmationFailed", err)
}
if err := m.consistent(); err != nil {
t.Fatalf("Confirm (sequence #1): inconsistent state: %v", err)
}
releaseDee()
if err := m.consistent(); err != nil {
t.Fatalf("release (sequence #2): inconsistent state: %v", err)
}
releaseDum, err := m.Confirm(now, "/tweedle/dum", "", Condition{Token: tweedle})
if err != nil {
t.Fatalf("Confirm (sequence #3): %v", err)
}
if err := m.consistent(); err != nil {
t.Fatalf("Confirm (sequence #3): inconsistent state: %v", err)
}
// Test that you can't unlock a held lock.
err = m.Unlock(now, tweedle)
if err != ErrLocked {
t.Fatalf("Unlock (sequence #4): got %v, want ErrLocked", err)
}
releaseDum()
if err := m.consistent(); err != nil {
t.Fatalf("release (sequence #5): inconsistent state: %v", err)
}
err = m.Unlock(now, tweedle)
if err != nil {
t.Fatalf("Unlock (sequence #6): %v", err)
}
if err := m.consistent(); err != nil {
t.Fatalf("Unlock (sequence #6): inconsistent state: %v", err)
}
}
func TestMemLSNonCanonicalRoot(t *testing.T) {
now := time.Unix(0, 0)
m := NewMemLS().(*memLS)
token, err := m.Create(now, LockDetails{
Root: "/foo/./bar//",
Duration: 1 * time.Second,
})
if err != nil {
t.Fatalf("Create: %v", err)
}
if err := m.consistent(); err != nil {
t.Fatalf("Create: inconsistent state: %v", err)
}
if err := m.Unlock(now, token); err != nil {
t.Fatalf("Unlock: %v", err)
}
if err := m.consistent(); err != nil {
t.Fatalf("Unlock: inconsistent state: %v", err)
}
}
func TestMemLSExpiry(t *testing.T) {
m := NewMemLS().(*memLS)
testCases := []string{
"setNow 0",
"create /a.5",
"want /a.5",
"create /c.6",
"want /a.5 /c.6",
"create /a/b.7",
"want /a.5 /a/b.7 /c.6",
"setNow 4",
"want /a.5 /a/b.7 /c.6",
"setNow 5",
"want /a/b.7 /c.6",
"setNow 6",
"want /a/b.7",
"setNow 7",
"want ",
"setNow 8",
"want ",
"create /a.12",
"create /b.13",
"create /c.15",
"create /a/d.16",
"want /a.12 /a/d.16 /b.13 /c.15",
"refresh /a.14",
"want /a.14 /a/d.16 /b.13 /c.15",
"setNow 12",
"want /a.14 /a/d.16 /b.13 /c.15",
"setNow 13",
"want /a.14 /a/d.16 /c.15",
"setNow 14",
"want /a/d.16 /c.15",
"refresh /a/d.20",
"refresh /c.20",
"want /a/d.20 /c.20",
"setNow 20",
"want ",
}
tokens := map[string]string{}
zTime := time.Unix(0, 0)
now := zTime
for i, tc := range testCases {
j := strings.IndexByte(tc, ' ')
if j < 0 {
t.Fatalf("test case #%d %q: invalid command", i, tc)
}
op, arg := tc[:j], tc[j+1:]
switch op {
default:
t.Fatalf("test case #%d %q: invalid operation %q", i, tc, op)
case "create", "refresh":
parts := strings.Split(arg, ".")
if len(parts) != 2 {
t.Fatalf("test case #%d %q: invalid create", i, tc)
}
root := parts[0]
d, err := strconv.Atoi(parts[1])
if err != nil {
t.Fatalf("test case #%d %q: invalid duration", i, tc)
}
dur := time.Unix(0, 0).Add(time.Duration(d) * time.Second).Sub(now)
switch op {
case "create":
token, err := m.Create(now, LockDetails{
Root: root,
Duration: dur,
ZeroDepth: true,
})
if err != nil {
t.Fatalf("test case #%d %q: Create: %v", i, tc, err)
}
tokens[root] = token
case "refresh":
token := tokens[root]
if token == "" {
t.Fatalf("test case #%d %q: no token for %q", i, tc, root)
}
got, err := m.Refresh(now, token, dur)
if err != nil {
t.Fatalf("test case #%d %q: Refresh: %v", i, tc, err)
}
want := LockDetails{
Root: root,
Duration: dur,
ZeroDepth: true,
}
if got != want {
t.Fatalf("test case #%d %q:\ngot %v\nwant %v", i, tc, got, want)
}
}
case "setNow":
d, err := strconv.Atoi(arg)
if err != nil {
t.Fatalf("test case #%d %q: invalid duration", i, tc)
}
now = time.Unix(0, 0).Add(time.Duration(d) * time.Second)
case "want":
m.mu.Lock()
m.collectExpiredNodes(now)
got := make([]string, 0, len(m.byToken))
for _, n := range m.byToken {
got = append(got, fmt.Sprintf("%s.%d",
n.details.Root, n.expiry.Sub(zTime)/time.Second))
}
m.mu.Unlock()
sort.Strings(got)
want := []string{}
if arg != "" {
want = strings.Split(arg, " ")
}
if !reflect.DeepEqual(got, want) {
t.Fatalf("test case #%d %q:\ngot %q\nwant %q", i, tc, got, want)
}
}
if err := m.consistent(); err != nil {
t.Fatalf("test case #%d %q: inconsistent state: %v", i, tc, err)
}
}
}
func TestMemLS(t *testing.T) {
now := time.Unix(0, 0)
m := NewMemLS().(*memLS)
rng := rand.New(rand.NewSource(0))
tokens := map[string]string{}
nConfirm, nCreate, nRefresh, nUnlock := 0, 0, 0, 0
const N = 2000
for i := 0; i < N; i++ {
name := lockTestNames[rng.Intn(len(lockTestNames))]
duration := lockTestDurations[rng.Intn(len(lockTestDurations))]
confirmed, unlocked := false, false
// If the name was already locked, we randomly confirm/release, refresh
// or unlock it. Otherwise, we create a lock.
token := tokens[name]
if token != "" {
switch rng.Intn(3) {
case 0:
confirmed = true
nConfirm++
release, err := m.Confirm(now, name, "", Condition{Token: token})
if err != nil {
t.Fatalf("iteration #%d: Confirm %q: %v", i, name, err)
}
if err := m.consistent(); err != nil {
t.Fatalf("iteration #%d: inconsistent state: %v", i, err)
}
release()
case 1:
nRefresh++
if _, err := m.Refresh(now, token, duration); err != nil {
t.Fatalf("iteration #%d: Refresh %q: %v", i, name, err)
}
case 2:
unlocked = true
nUnlock++
if err := m.Unlock(now, token); err != nil {
t.Fatalf("iteration #%d: Unlock %q: %v", i, name, err)
}
}
} else {
nCreate++
var err error
token, err = m.Create(now, LockDetails{
Root: name,
Duration: duration,
ZeroDepth: lockTestZeroDepth(name),
})
if err != nil {
t.Fatalf("iteration #%d: Create %q: %v", i, name, err)
}
}
if !confirmed {
if duration == 0 || unlocked {
// A zero-duration lock should expire immediately and is
// effectively equivalent to being unlocked.
tokens[name] = ""
} else {
tokens[name] = token
}
}
if err := m.consistent(); err != nil {
t.Fatalf("iteration #%d: inconsistent state: %v", i, err)
}
}
if nConfirm < N/10 {
t.Fatalf("too few Confirm calls: got %d, want >= %d", nConfirm, N/10)
}
if nCreate < N/10 {
t.Fatalf("too few Create calls: got %d, want >= %d", nCreate, N/10)
}
if nRefresh < N/10 {
t.Fatalf("too few Refresh calls: got %d, want >= %d", nRefresh, N/10)
}
if nUnlock < N/10 {
t.Fatalf("too few Unlock calls: got %d, want >= %d", nUnlock, N/10)
}
}
func (m *memLS) consistent() error {
m.mu.Lock()
defer m.mu.Unlock()
// If m.byName is non-empty, then it must contain an entry for the root "/",
// and its refCount should equal the number of locked nodes.
if len(m.byName) > 0 {
n := m.byName["/"]
if n == nil {
return fmt.Errorf(`non-empty m.byName does not contain the root "/"`)
}
if n.refCount != len(m.byToken) {
return fmt.Errorf("root node refCount=%d, differs from len(m.byToken)=%d", n.refCount, len(m.byToken))
}
}
for name, n := range m.byName {
// The map keys should be consistent with the node's copy of the key.
if n.details.Root != name {
return fmt.Errorf("node name %q != byName map key %q", n.details.Root, name)
}
// A name must be clean, and start with a "/".
if len(name) == 0 || name[0] != '/' {
return fmt.Errorf(`node name %q does not start with "/"`, name)
}
if name != path.Clean(name) {
return fmt.Errorf(`node name %q is not clean`, name)
}
// A node's refCount should be positive.
if n.refCount <= 0 {
return fmt.Errorf("non-positive refCount for node at name %q", name)
}
// A node's refCount should be the number of self-or-descendents that
// are locked (i.e. have a non-empty token).
var list []string
for name0, n0 := range m.byName {
// All of lockTestNames' name fragments are one byte long: '_', 'i' or 'z',
// so strings.HasPrefix is equivalent to self-or-descendent name match.
// We don't have to worry about "/foo/bar" being a false positive match
// for "/foo/b".
if strings.HasPrefix(name0, name) && n0.token != "" {
list = append(list, name0)
}
}
if n.refCount != len(list) {
sort.Strings(list)
return fmt.Errorf("node at name %q has refCount %d but locked self-or-descendents are %q (len=%d)",
name, n.refCount, list, len(list))
}
// A node n is in m.byToken if it has a non-empty token.
if n.token != "" {
if _, ok := m.byToken[n.token]; !ok {
return fmt.Errorf("node at name %q has token %q but not in m.byToken", name, n.token)
}
}
// A node n is in m.byExpiry if it has a non-negative byExpiryIndex.
if n.byExpiryIndex >= 0 {
if n.byExpiryIndex >= len(m.byExpiry) {
return fmt.Errorf("node at name %q has byExpiryIndex %d but m.byExpiry has length %d", name, n.byExpiryIndex, len(m.byExpiry))
}
if n != m.byExpiry[n.byExpiryIndex] {
return fmt.Errorf("node at name %q has byExpiryIndex %d but that indexes a different node", name, n.byExpiryIndex)
}
}
}
for token, n := range m.byToken {
// The map keys should be consistent with the node's copy of the key.
if n.token != token {
return fmt.Errorf("node token %q != byToken map key %q", n.token, token)
}
// Every node in m.byToken is in m.byName.
if _, ok := m.byName[n.details.Root]; !ok {
return fmt.Errorf("node at name %q in m.byToken but not in m.byName", n.details.Root)
}
}
for i, n := range m.byExpiry {
// The slice indices should be consistent with the node's copy of the index.
if n.byExpiryIndex != i {
return fmt.Errorf("node byExpiryIndex %d != byExpiry slice index %d", n.byExpiryIndex, i)
}
// Every node in m.byExpiry is in m.byName.
if _, ok := m.byName[n.details.Root]; !ok {
return fmt.Errorf("node at name %q in m.byExpiry but not in m.byName", n.details.Root)
}
// No node in m.byExpiry should be held.
if n.held {
return fmt.Errorf("node at name %q in m.byExpiry is held", n.details.Root)
}
}
return nil
}
func TestParseTimeout(t *testing.T) {
testCases := []struct {
s string
want time.Duration
wantErr error
}{{
"",
infiniteTimeout,
nil,
}, {
"Infinite",
infiniteTimeout,
nil,
}, {
"Infinitesimal",
0,
errInvalidTimeout,
}, {
"infinite",
0,
errInvalidTimeout,
}, {
"Second-0",
0 * time.Second,
nil,
}, {
"Second-123",
123 * time.Second,
nil,
}, {
" Second-456 ",
456 * time.Second,
nil,
}, {
"Second-4100000000",
4100000000 * time.Second,
nil,
}, {
"junk",
0,
errInvalidTimeout,
}, {
"Second-",
0,
errInvalidTimeout,
}, {
"Second--1",
0,
errInvalidTimeout,
}, {
"Second--123",
0,
errInvalidTimeout,
}, {
"Second-+123",
0,
errInvalidTimeout,
}, {
"Second-0x123",
0,
errInvalidTimeout,
}, {
"second-123",
0,
errInvalidTimeout,
}, {
"Second-4294967295",
4294967295 * time.Second,
nil,
}, {
// Section 10.7 says that "The timeout value for TimeType "Second"
// must not be greater than 2^32-1."
"Second-4294967296",
0,
errInvalidTimeout,
}, {
// This test case comes from section 9.10.9 of the spec. It says,
//
// "In this request, the client has specified that it desires an
// infinite-length lock, if available, otherwise a timeout of 4.1
// billion seconds, if available."
//
// The Go WebDAV package always supports infinite length locks,
// and ignores the fallback after the comma.
"Infinite, Second-4100000000",
infiniteTimeout,
nil,
}}
for _, tc := range testCases {
got, gotErr := parseTimeout(tc.s)
if got != tc.want || gotErr != tc.wantErr {
t.Errorf("parsing %q:\ngot %v, %v\nwant %v, %v", tc.s, got, gotErr, tc.want, tc.wantErr)
}
}
}

469
drives/davServer/prop.go Normal file
View File

@ -0,0 +1,469 @@
// Copyright 2015 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package davServer
import (
"bytes"
"context"
"encoding/xml"
"errors"
"fmt"
"io"
"mime"
"net/http"
"os"
"path/filepath"
"strconv"
)
// Proppatch describes a property update instruction as defined in RFC 4918.
// See http://www.webdav.org/specs/rfc4918.html#METHOD_PROPPATCH
type Proppatch struct {
// Remove specifies whether this patch removes properties. If it does not
// remove them, it sets them.
Remove bool
// Props contains the properties to be set or removed.
Props []Property
}
// Propstat describes a XML propstat element as defined in RFC 4918.
// See http://www.webdav.org/specs/rfc4918.html#ELEMENT_propstat
type Propstat struct {
// Props contains the properties for which Status applies.
Props []Property
// Status defines the HTTP status code of the properties in Prop.
// Allowed values include, but are not limited to the WebDAV status
// code extensions for HTTP/1.1.
// http://www.webdav.org/specs/rfc4918.html#status.code.extensions.to.http11
Status int
// XMLError contains the XML representation of the optional error element.
// XML content within this field must not rely on any predefined
// namespace declarations or prefixes. If empty, the XML error element
// is omitted.
XMLError string
// ResponseDescription contains the contents of the optional
// responsedescription field. If empty, the XML element is omitted.
ResponseDescription string
}
// makePropstats returns a slice containing those of x and y whose Props slice
// is non-empty. If both are empty, it returns a slice containing an otherwise
// zero Propstat whose HTTP status code is 200 OK.
func makePropstats(x, y Propstat) []Propstat {
pstats := make([]Propstat, 0, 2)
if len(x.Props) != 0 {
pstats = append(pstats, x)
}
if len(y.Props) != 0 {
pstats = append(pstats, y)
}
if len(pstats) == 0 {
pstats = append(pstats, Propstat{
Status: http.StatusOK,
})
}
return pstats
}
// DeadPropsHolder holds the dead properties of a resource.
//
// Dead properties are those properties that are explicitly defined. In
// comparison, live properties, such as DAV:getcontentlength, are implicitly
// defined by the underlying resource, and cannot be explicitly overridden or
// removed. See the Terminology section of
// http://www.webdav.org/specs/rfc4918.html#rfc.section.3
//
// There is a whitelist of the names of live properties. This package handles
// all live properties, and will only pass non-whitelisted names to the Patch
// method of DeadPropsHolder implementations.
type DeadPropsHolder interface {
// DeadProps returns a copy of the dead properties held.
DeadProps() (map[xml.Name]Property, error)
// Patch patches the dead properties held.
//
// Patching is atomic; either all or no patches succeed. It returns (nil,
// non-nil) if an internal server error occurred, otherwise the Propstats
// collectively contain one Property for each proposed patch Property. If
// all patches succeed, Patch returns a slice of length one and a Propstat
// element with a 200 OK HTTP status code. If none succeed, for reasons
// other than an internal server error, no Propstat has status 200 OK.
//
// For more details on when various HTTP status codes apply, see
// http://www.webdav.org/specs/rfc4918.html#PROPPATCH-status
Patch([]Proppatch) ([]Propstat, error)
}
// liveProps contains all supported, protected DAV: properties.
var liveProps = map[xml.Name]struct {
// findFn implements the propfind function of this property. If nil,
// it indicates a hidden property.
findFn func(context.Context, FileSystem, LockSystem, string, os.FileInfo) (string, error)
// dir is true if the property applies to directories.
dir bool
}{
{Space: "DAV:", Local: "resourcetype"}: {
findFn: findResourceType,
dir: true,
},
{Space: "DAV:", Local: "displayname"}: {
findFn: findDisplayName,
dir: true,
},
{Space: "DAV:", Local: "getcontentlength"}: {
findFn: findContentLength,
dir: false,
},
{Space: "DAV:", Local: "getlastmodified"}: {
findFn: findLastModified,
// http://webdav.org/specs/rfc4918.html#PROPERTY_getlastmodified
// suggests that getlastmodified should only apply to GETable
// resources, and this package does not support GET on directories.
//
// Nonetheless, some WebDAV clients expect child directories to be
// sortable by getlastmodified date, so this value is true, not false.
// See golang.org/issue/15334.
dir: true,
},
{Space: "DAV:", Local: "creationdate"}: {
findFn: nil,
dir: false,
},
{Space: "DAV:", Local: "getcontentlanguage"}: {
findFn: nil,
dir: false,
},
{Space: "DAV:", Local: "getcontenttype"}: {
findFn: findContentType,
dir: false,
},
{Space: "DAV:", Local: "getetag"}: {
findFn: findETag,
// findETag implements ETag as the concatenated hex values of a file's
// modification time and size. This is not a reliable synchronization
// mechanism for directories, so we do not advertise getetag for DAV
// collections.
dir: false,
},
// TODO: The lockdiscovery property requires LockSystem to list the
// active locks on a resource.
{Space: "DAV:", Local: "lockdiscovery"}: {},
{Space: "DAV:", Local: "supportedlock"}: {
findFn: findSupportedLock,
dir: true,
},
}
// TODO(nigeltao) merge props and allprop?
// props returns the status of the properties named pnames for resource name.
//
// Each Propstat has a unique status and each property name will only be part
// of one Propstat element.
func props(ctx context.Context, fs FileSystem, ls LockSystem, name string, pnames []xml.Name) ([]Propstat, error) {
f, err := fs.OpenFile(ctx, name, os.O_RDONLY, 0)
if err != nil {
return nil, err
}
defer f.Close()
fi, err := f.Stat()
if err != nil {
return nil, err
}
isDir := fi.IsDir()
var deadProps map[xml.Name]Property
if dph, ok := f.(DeadPropsHolder); ok {
deadProps, err = dph.DeadProps()
if err != nil {
return nil, err
}
}
pstatOK := Propstat{Status: http.StatusOK}
pstatNotFound := Propstat{Status: http.StatusNotFound}
for _, pn := range pnames {
// If this file has dead properties, check if they contain pn.
if dp, ok := deadProps[pn]; ok {
pstatOK.Props = append(pstatOK.Props, dp)
continue
}
// Otherwise, it must either be a live property or we don't know it.
if prop := liveProps[pn]; prop.findFn != nil && (prop.dir || !isDir) {
innerXML, err := prop.findFn(ctx, fs, ls, name, fi)
if err != nil {
return nil, err
}
pstatOK.Props = append(pstatOK.Props, Property{
XMLName: pn,
InnerXML: []byte(innerXML),
})
} else {
pstatNotFound.Props = append(pstatNotFound.Props, Property{
XMLName: pn,
})
}
}
return makePropstats(pstatOK, pstatNotFound), nil
}
// propnames returns the property names defined for resource name.
func propnames(ctx context.Context, fs FileSystem, ls LockSystem, name string) ([]xml.Name, error) {
f, err := fs.OpenFile(ctx, name, os.O_RDONLY, 0)
if err != nil {
return nil, err
}
defer f.Close()
fi, err := f.Stat()
if err != nil {
return nil, err
}
isDir := fi.IsDir()
var deadProps map[xml.Name]Property
if dph, ok := f.(DeadPropsHolder); ok {
deadProps, err = dph.DeadProps()
if err != nil {
return nil, err
}
}
pnames := make([]xml.Name, 0, len(liveProps)+len(deadProps))
for pn, prop := range liveProps {
if prop.findFn != nil && (prop.dir || !isDir) {
pnames = append(pnames, pn)
}
}
for pn := range deadProps {
pnames = append(pnames, pn)
}
return pnames, nil
}
// allprop returns the properties defined for resource name and the properties
// named in include.
//
// Note that RFC 4918 defines 'allprop' to return the DAV: properties defined
// within the RFC plus dead properties. Other live properties should only be
// returned if they are named in 'include'.
//
// See http://www.webdav.org/specs/rfc4918.html#METHOD_PROPFIND
func allprop(ctx context.Context, fs FileSystem, ls LockSystem, name string, include []xml.Name) ([]Propstat, error) {
pnames, err := propnames(ctx, fs, ls, name)
if err != nil {
return nil, err
}
// Add names from include if they are not already covered in pnames.
nameset := make(map[xml.Name]bool)
for _, pn := range pnames {
nameset[pn] = true
}
for _, pn := range include {
if !nameset[pn] {
pnames = append(pnames, pn)
}
}
return props(ctx, fs, ls, name, pnames)
}
// patch patches the properties of resource name. The return values are
// constrained in the same manner as DeadPropsHolder.Patch.
func patch(ctx context.Context, fs FileSystem, ls LockSystem, name string, patches []Proppatch) ([]Propstat, error) {
conflict := false
loop:
for _, patch := range patches {
for _, p := range patch.Props {
if _, ok := liveProps[p.XMLName]; ok {
conflict = true
break loop
}
}
}
if conflict {
pstatForbidden := Propstat{
Status: http.StatusForbidden,
XMLError: `<D:cannot-modify-protected-property xmlns:D="DAV:"/>`,
}
pstatFailedDep := Propstat{
Status: StatusFailedDependency,
}
for _, patch := range patches {
for _, p := range patch.Props {
if _, ok := liveProps[p.XMLName]; ok {
pstatForbidden.Props = append(pstatForbidden.Props, Property{XMLName: p.XMLName})
} else {
pstatFailedDep.Props = append(pstatFailedDep.Props, Property{XMLName: p.XMLName})
}
}
}
return makePropstats(pstatForbidden, pstatFailedDep), nil
}
f, err := fs.OpenFile(ctx, name, os.O_RDWR, 0)
if err != nil {
return nil, err
}
defer f.Close()
if dph, ok := f.(DeadPropsHolder); ok {
ret, err := dph.Patch(patches)
if err != nil {
return nil, err
}
// http://www.webdav.org/specs/rfc4918.html#ELEMENT_propstat says that
// "The contents of the prop XML element must only list the names of
// properties to which the result in the status element applies."
for _, pstat := range ret {
for i, p := range pstat.Props {
pstat.Props[i] = Property{XMLName: p.XMLName}
}
}
return ret, nil
}
// The file doesn't implement the optional DeadPropsHolder interface, so
// all patches are forbidden.
pstat := Propstat{Status: http.StatusForbidden}
for _, patch := range patches {
for _, p := range patch.Props {
pstat.Props = append(pstat.Props, Property{XMLName: p.XMLName})
}
}
return []Propstat{pstat}, nil
}
func escapeXML(s string) string {
for i := 0; i < len(s); i++ {
// As an optimization, if s contains only ASCII letters, digits or a
// few special characters, the escaped value is s itself and we don't
// need to allocate a buffer and convert between string and []byte.
switch c := s[i]; {
case c == ' ' || c == '_' ||
('+' <= c && c <= '9') || // Digits as well as + , - . and /
('A' <= c && c <= 'Z') ||
('a' <= c && c <= 'z'):
continue
}
// Otherwise, go through the full escaping process.
var buf bytes.Buffer
xml.EscapeText(&buf, []byte(s))
return buf.String()
}
return s
}
func findResourceType(ctx context.Context, fs FileSystem, ls LockSystem, name string, fi os.FileInfo) (string, error) {
if fi.IsDir() {
return `<D:collection xmlns:D="DAV:"/>`, nil
}
return "", nil
}
func findDisplayName(ctx context.Context, fs FileSystem, ls LockSystem, name string, fi os.FileInfo) (string, error) {
if slashClean(name) == "/" {
// Hide the real name of a possibly prefixed root directory.
return "", nil
}
return escapeXML(fi.Name()), nil
}
func findContentLength(ctx context.Context, fs FileSystem, ls LockSystem, name string, fi os.FileInfo) (string, error) {
return strconv.FormatInt(fi.Size(), 10), nil
}
func findLastModified(ctx context.Context, fs FileSystem, ls LockSystem, name string, fi os.FileInfo) (string, error) {
return fi.ModTime().UTC().Format(http.TimeFormat), nil
}
// ErrNotImplemented should be returned by optional interfaces if they
// want the original implementation to be used.
var ErrNotImplemented = errors.New("not implemented")
// ContentTyper is an optional interface for the os.FileInfo
// objects returned by the FileSystem.
//
// If this interface is defined then it will be used to read the
// content type from the object.
//
// If this interface is not defined the file will be opened and the
// content type will be guessed from the initial contents of the file.
type ContentTyper interface {
// ContentType returns the content type for the file.
//
// If this returns error ErrNotImplemented then the error will
// be ignored and the base implementation will be used
// instead.
ContentType(ctx context.Context) (string, error)
}
func findContentType(ctx context.Context, fs FileSystem, ls LockSystem, name string, fi os.FileInfo) (string, error) {
if do, ok := fi.(ContentTyper); ok {
ctype, err := do.ContentType(ctx)
if err != ErrNotImplemented {
return ctype, err
}
}
f, err := fs.OpenFile(ctx, name, os.O_RDONLY, 0)
if err != nil {
return "", err
}
defer f.Close()
// This implementation is based on serveContent's code in the standard net/http package.
ctype := mime.TypeByExtension(filepath.Ext(name))
if ctype != "" {
return ctype, nil
}
// Read a chunk to decide between utf-8 text and binary.
var buf [512]byte
n, err := io.ReadFull(f, buf[:])
if err != nil && err != io.EOF && err != io.ErrUnexpectedEOF {
return "", err
}
ctype = http.DetectContentType(buf[:n])
// Rewind file.
_, err = f.Seek(0, io.SeekStart)
return ctype, err
}
// ETager is an optional interface for the os.FileInfo objects
// returned by the FileSystem.
//
// If this interface is defined then it will be used to read the ETag
// for the object.
//
// If this interface is not defined an ETag will be computed using the
// ModTime() and the Size() methods of the os.FileInfo object.
type ETager interface {
// ETag returns an ETag for the file. This should be of the
// form "value" or W/"value"
//
// If this returns error ErrNotImplemented then the error will
// be ignored and the base implementation will be used
// instead.
ETag(ctx context.Context) (string, error)
}
func findETag(ctx context.Context, fs FileSystem, ls LockSystem, name string, fi os.FileInfo) (string, error) {
if do, ok := fi.(ETager); ok {
etag, err := do.ETag(ctx)
if err != ErrNotImplemented {
return etag, err
}
}
// The Apache http 2.4 web server by default concatenates the
// modification time and size of a file. We replicate the heuristic
// with nanosecond granularity.
return fmt.Sprintf(`"%x%x"`, fi.ModTime().UnixNano(), fi.Size()), nil
}
func findSupportedLock(ctx context.Context, fs FileSystem, ls LockSystem, name string, fi os.FileInfo) (string, error) {
return `` +
`<D:lockentry xmlns:D="DAV:">` +
`<D:lockscope><D:exclusive/></D:lockscope>` +
`<D:locktype><D:write/></D:locktype>` +
`</D:lockentry>`, nil
}

View File

@ -0,0 +1,716 @@
// Copyright 2015 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package davServer
import (
"context"
"encoding/xml"
"fmt"
"net/http"
"os"
"reflect"
"regexp"
"sort"
"testing"
)
func TestMemPS(t *testing.T) {
ctx := context.Background()
// calcProps calculates the getlastmodified and getetag DAV: property
// values in pstats for resource name in file-system fs.
calcProps := func(name string, fs FileSystem, ls LockSystem, pstats []Propstat) error {
fi, err := fs.Stat(ctx, name)
if err != nil {
return err
}
for _, pst := range pstats {
for i, p := range pst.Props {
switch p.XMLName {
case xml.Name{Space: "DAV:", Local: "getlastmodified"}:
p.InnerXML = []byte(fi.ModTime().UTC().Format(http.TimeFormat))
pst.Props[i] = p
case xml.Name{Space: "DAV:", Local: "getetag"}:
if fi.IsDir() {
continue
}
etag, err := findETag(ctx, fs, ls, name, fi)
if err != nil {
return err
}
p.InnerXML = []byte(etag)
pst.Props[i] = p
}
}
}
return nil
}
const (
lockEntry = `` +
`<D:lockentry xmlns:D="DAV:">` +
`<D:lockscope><D:exclusive/></D:lockscope>` +
`<D:locktype><D:write/></D:locktype>` +
`</D:lockentry>`
statForbiddenError = `<D:cannot-modify-protected-property xmlns:D="DAV:"/>`
)
type propOp struct {
op string
name string
pnames []xml.Name
patches []Proppatch
wantPnames []xml.Name
wantPropstats []Propstat
}
testCases := []struct {
desc string
noDeadProps bool
buildfs []string
propOp []propOp
}{{
desc: "propname",
buildfs: []string{"mkdir /dir", "touch /file"},
propOp: []propOp{{
op: "propname",
name: "/dir",
wantPnames: []xml.Name{
{Space: "DAV:", Local: "resourcetype"},
{Space: "DAV:", Local: "displayname"},
{Space: "DAV:", Local: "supportedlock"},
{Space: "DAV:", Local: "getlastmodified"},
},
}, {
op: "propname",
name: "/file",
wantPnames: []xml.Name{
{Space: "DAV:", Local: "resourcetype"},
{Space: "DAV:", Local: "displayname"},
{Space: "DAV:", Local: "getcontentlength"},
{Space: "DAV:", Local: "getlastmodified"},
{Space: "DAV:", Local: "getcontenttype"},
{Space: "DAV:", Local: "getetag"},
{Space: "DAV:", Local: "supportedlock"},
},
}},
}, {
desc: "allprop dir and file",
buildfs: []string{"mkdir /dir", "write /file foobarbaz"},
propOp: []propOp{{
op: "allprop",
name: "/dir",
wantPropstats: []Propstat{{
Status: http.StatusOK,
Props: []Property{{
XMLName: xml.Name{Space: "DAV:", Local: "resourcetype"},
InnerXML: []byte(`<D:collection xmlns:D="DAV:"/>`),
}, {
XMLName: xml.Name{Space: "DAV:", Local: "displayname"},
InnerXML: []byte("dir"),
}, {
XMLName: xml.Name{Space: "DAV:", Local: "getlastmodified"},
InnerXML: nil, // Calculated during test.
}, {
XMLName: xml.Name{Space: "DAV:", Local: "supportedlock"},
InnerXML: []byte(lockEntry),
}},
}},
}, {
op: "allprop",
name: "/file",
wantPropstats: []Propstat{{
Status: http.StatusOK,
Props: []Property{{
XMLName: xml.Name{Space: "DAV:", Local: "resourcetype"},
InnerXML: []byte(""),
}, {
XMLName: xml.Name{Space: "DAV:", Local: "displayname"},
InnerXML: []byte("file"),
}, {
XMLName: xml.Name{Space: "DAV:", Local: "getcontentlength"},
InnerXML: []byte("9"),
}, {
XMLName: xml.Name{Space: "DAV:", Local: "getlastmodified"},
InnerXML: nil, // Calculated during test.
}, {
XMLName: xml.Name{Space: "DAV:", Local: "getcontenttype"},
InnerXML: []byte("text/plain; charset=utf-8"),
}, {
XMLName: xml.Name{Space: "DAV:", Local: "getetag"},
InnerXML: nil, // Calculated during test.
}, {
XMLName: xml.Name{Space: "DAV:", Local: "supportedlock"},
InnerXML: []byte(lockEntry),
}},
}},
}, {
op: "allprop",
name: "/file",
pnames: []xml.Name{
{Space: "DAV:", Local: "resourcetype"},
{Space: "foo", Local: "bar"},
},
wantPropstats: []Propstat{{
Status: http.StatusOK,
Props: []Property{{
XMLName: xml.Name{Space: "DAV:", Local: "resourcetype"},
InnerXML: []byte(""),
}, {
XMLName: xml.Name{Space: "DAV:", Local: "displayname"},
InnerXML: []byte("file"),
}, {
XMLName: xml.Name{Space: "DAV:", Local: "getcontentlength"},
InnerXML: []byte("9"),
}, {
XMLName: xml.Name{Space: "DAV:", Local: "getlastmodified"},
InnerXML: nil, // Calculated during test.
}, {
XMLName: xml.Name{Space: "DAV:", Local: "getcontenttype"},
InnerXML: []byte("text/plain; charset=utf-8"),
}, {
XMLName: xml.Name{Space: "DAV:", Local: "getetag"},
InnerXML: nil, // Calculated during test.
}, {
XMLName: xml.Name{Space: "DAV:", Local: "supportedlock"},
InnerXML: []byte(lockEntry),
}}}, {
Status: http.StatusNotFound,
Props: []Property{{
XMLName: xml.Name{Space: "foo", Local: "bar"},
}}},
},
}},
}, {
desc: "propfind DAV:resourcetype",
buildfs: []string{"mkdir /dir", "touch /file"},
propOp: []propOp{{
op: "propfind",
name: "/dir",
pnames: []xml.Name{{Space: "DAV:", Local: "resourcetype"}},
wantPropstats: []Propstat{{
Status: http.StatusOK,
Props: []Property{{
XMLName: xml.Name{Space: "DAV:", Local: "resourcetype"},
InnerXML: []byte(`<D:collection xmlns:D="DAV:"/>`),
}},
}},
}, {
op: "propfind",
name: "/file",
pnames: []xml.Name{{Space: "DAV:", Local: "resourcetype"}},
wantPropstats: []Propstat{{
Status: http.StatusOK,
Props: []Property{{
XMLName: xml.Name{Space: "DAV:", Local: "resourcetype"},
InnerXML: []byte(""),
}},
}},
}},
}, {
desc: "propfind unsupported DAV properties",
buildfs: []string{"mkdir /dir"},
propOp: []propOp{{
op: "propfind",
name: "/dir",
pnames: []xml.Name{{Space: "DAV:", Local: "getcontentlanguage"}},
wantPropstats: []Propstat{{
Status: http.StatusNotFound,
Props: []Property{{
XMLName: xml.Name{Space: "DAV:", Local: "getcontentlanguage"},
}},
}},
}, {
op: "propfind",
name: "/dir",
pnames: []xml.Name{{Space: "DAV:", Local: "creationdate"}},
wantPropstats: []Propstat{{
Status: http.StatusNotFound,
Props: []Property{{
XMLName: xml.Name{Space: "DAV:", Local: "creationdate"},
}},
}},
}},
}, {
desc: "propfind getetag for files but not for directories",
buildfs: []string{"mkdir /dir", "touch /file"},
propOp: []propOp{{
op: "propfind",
name: "/dir",
pnames: []xml.Name{{Space: "DAV:", Local: "getetag"}},
wantPropstats: []Propstat{{
Status: http.StatusNotFound,
Props: []Property{{
XMLName: xml.Name{Space: "DAV:", Local: "getetag"},
}},
}},
}, {
op: "propfind",
name: "/file",
pnames: []xml.Name{{Space: "DAV:", Local: "getetag"}},
wantPropstats: []Propstat{{
Status: http.StatusOK,
Props: []Property{{
XMLName: xml.Name{Space: "DAV:", Local: "getetag"},
InnerXML: nil, // Calculated during test.
}},
}},
}},
}, {
desc: "proppatch property on no-dead-properties file system",
buildfs: []string{"mkdir /dir"},
noDeadProps: true,
propOp: []propOp{{
op: "proppatch",
name: "/dir",
patches: []Proppatch{{
Props: []Property{{
XMLName: xml.Name{Space: "foo", Local: "bar"},
}},
}},
wantPropstats: []Propstat{{
Status: http.StatusForbidden,
Props: []Property{{
XMLName: xml.Name{Space: "foo", Local: "bar"},
}},
}},
}, {
op: "proppatch",
name: "/dir",
patches: []Proppatch{{
Props: []Property{{
XMLName: xml.Name{Space: "DAV:", Local: "getetag"},
}},
}},
wantPropstats: []Propstat{{
Status: http.StatusForbidden,
XMLError: statForbiddenError,
Props: []Property{{
XMLName: xml.Name{Space: "DAV:", Local: "getetag"},
}},
}},
}},
}, {
desc: "proppatch dead property",
buildfs: []string{"mkdir /dir"},
propOp: []propOp{{
op: "proppatch",
name: "/dir",
patches: []Proppatch{{
Props: []Property{{
XMLName: xml.Name{Space: "foo", Local: "bar"},
InnerXML: []byte("baz"),
}},
}},
wantPropstats: []Propstat{{
Status: http.StatusOK,
Props: []Property{{
XMLName: xml.Name{Space: "foo", Local: "bar"},
}},
}},
}, {
op: "propfind",
name: "/dir",
pnames: []xml.Name{{Space: "foo", Local: "bar"}},
wantPropstats: []Propstat{{
Status: http.StatusOK,
Props: []Property{{
XMLName: xml.Name{Space: "foo", Local: "bar"},
InnerXML: []byte("baz"),
}},
}},
}},
}, {
desc: "proppatch dead property with failed dependency",
buildfs: []string{"mkdir /dir"},
propOp: []propOp{{
op: "proppatch",
name: "/dir",
patches: []Proppatch{{
Props: []Property{{
XMLName: xml.Name{Space: "foo", Local: "bar"},
InnerXML: []byte("baz"),
}},
}, {
Props: []Property{{
XMLName: xml.Name{Space: "DAV:", Local: "displayname"},
InnerXML: []byte("xxx"),
}},
}},
wantPropstats: []Propstat{{
Status: http.StatusForbidden,
XMLError: statForbiddenError,
Props: []Property{{
XMLName: xml.Name{Space: "DAV:", Local: "displayname"},
}},
}, {
Status: StatusFailedDependency,
Props: []Property{{
XMLName: xml.Name{Space: "foo", Local: "bar"},
}},
}},
}, {
op: "propfind",
name: "/dir",
pnames: []xml.Name{{Space: "foo", Local: "bar"}},
wantPropstats: []Propstat{{
Status: http.StatusNotFound,
Props: []Property{{
XMLName: xml.Name{Space: "foo", Local: "bar"},
}},
}},
}},
}, {
desc: "proppatch remove dead property",
buildfs: []string{"mkdir /dir"},
propOp: []propOp{{
op: "proppatch",
name: "/dir",
patches: []Proppatch{{
Props: []Property{{
XMLName: xml.Name{Space: "foo", Local: "bar"},
InnerXML: []byte("baz"),
}, {
XMLName: xml.Name{Space: "spam", Local: "ham"},
InnerXML: []byte("eggs"),
}},
}},
wantPropstats: []Propstat{{
Status: http.StatusOK,
Props: []Property{{
XMLName: xml.Name{Space: "foo", Local: "bar"},
}, {
XMLName: xml.Name{Space: "spam", Local: "ham"},
}},
}},
}, {
op: "propfind",
name: "/dir",
pnames: []xml.Name{
{Space: "foo", Local: "bar"},
{Space: "spam", Local: "ham"},
},
wantPropstats: []Propstat{{
Status: http.StatusOK,
Props: []Property{{
XMLName: xml.Name{Space: "foo", Local: "bar"},
InnerXML: []byte("baz"),
}, {
XMLName: xml.Name{Space: "spam", Local: "ham"},
InnerXML: []byte("eggs"),
}},
}},
}, {
op: "proppatch",
name: "/dir",
patches: []Proppatch{{
Remove: true,
Props: []Property{{
XMLName: xml.Name{Space: "foo", Local: "bar"},
}},
}},
wantPropstats: []Propstat{{
Status: http.StatusOK,
Props: []Property{{
XMLName: xml.Name{Space: "foo", Local: "bar"},
}},
}},
}, {
op: "propfind",
name: "/dir",
pnames: []xml.Name{
{Space: "foo", Local: "bar"},
{Space: "spam", Local: "ham"},
},
wantPropstats: []Propstat{{
Status: http.StatusNotFound,
Props: []Property{{
XMLName: xml.Name{Space: "foo", Local: "bar"},
}},
}, {
Status: http.StatusOK,
Props: []Property{{
XMLName: xml.Name{Space: "spam", Local: "ham"},
InnerXML: []byte("eggs"),
}},
}},
}},
}, {
desc: "propname with dead property",
buildfs: []string{"touch /file"},
propOp: []propOp{{
op: "proppatch",
name: "/file",
patches: []Proppatch{{
Props: []Property{{
XMLName: xml.Name{Space: "foo", Local: "bar"},
InnerXML: []byte("baz"),
}},
}},
wantPropstats: []Propstat{{
Status: http.StatusOK,
Props: []Property{{
XMLName: xml.Name{Space: "foo", Local: "bar"},
}},
}},
}, {
op: "propname",
name: "/file",
wantPnames: []xml.Name{
{Space: "DAV:", Local: "resourcetype"},
{Space: "DAV:", Local: "displayname"},
{Space: "DAV:", Local: "getcontentlength"},
{Space: "DAV:", Local: "getlastmodified"},
{Space: "DAV:", Local: "getcontenttype"},
{Space: "DAV:", Local: "getetag"},
{Space: "DAV:", Local: "supportedlock"},
{Space: "foo", Local: "bar"},
},
}},
}, {
desc: "proppatch remove unknown dead property",
buildfs: []string{"mkdir /dir"},
propOp: []propOp{{
op: "proppatch",
name: "/dir",
patches: []Proppatch{{
Remove: true,
Props: []Property{{
XMLName: xml.Name{Space: "foo", Local: "bar"},
}},
}},
wantPropstats: []Propstat{{
Status: http.StatusOK,
Props: []Property{{
XMLName: xml.Name{Space: "foo", Local: "bar"},
}},
}},
}},
}, {
desc: "bad: propfind unknown property",
buildfs: []string{"mkdir /dir"},
propOp: []propOp{{
op: "propfind",
name: "/dir",
pnames: []xml.Name{{Space: "foo:", Local: "bar"}},
wantPropstats: []Propstat{{
Status: http.StatusNotFound,
Props: []Property{{
XMLName: xml.Name{Space: "foo:", Local: "bar"},
}},
}},
}},
}}
for _, tc := range testCases {
fs, err := buildTestFS(tc.buildfs)
if err != nil {
t.Fatalf("%s: cannot create test filesystem: %v", tc.desc, err)
}
if tc.noDeadProps {
fs = noDeadPropsFS{fs}
}
ls := NewMemLS()
for _, op := range tc.propOp {
desc := fmt.Sprintf("%s: %s %s", tc.desc, op.op, op.name)
if err = calcProps(op.name, fs, ls, op.wantPropstats); err != nil {
t.Fatalf("%s: calcProps: %v", desc, err)
}
// Call property system.
var propstats []Propstat
switch op.op {
case "propname":
pnames, err := propnames(ctx, fs, ls, op.name)
if err != nil {
t.Errorf("%s: got error %v, want nil", desc, err)
continue
}
sort.Sort(byXMLName(pnames))
sort.Sort(byXMLName(op.wantPnames))
if !reflect.DeepEqual(pnames, op.wantPnames) {
t.Errorf("%s: pnames\ngot %q\nwant %q", desc, pnames, op.wantPnames)
}
continue
case "allprop":
propstats, err = allprop(ctx, fs, ls, op.name, op.pnames)
case "propfind":
propstats, err = props(ctx, fs, ls, op.name, op.pnames)
case "proppatch":
propstats, err = patch(ctx, fs, ls, op.name, op.patches)
default:
t.Fatalf("%s: %s not implemented", desc, op.op)
}
if err != nil {
t.Errorf("%s: got error %v, want nil", desc, err)
continue
}
// Compare return values from allprop, propfind or proppatch.
for _, pst := range propstats {
sort.Sort(byPropname(pst.Props))
}
for _, pst := range op.wantPropstats {
sort.Sort(byPropname(pst.Props))
}
sort.Sort(byStatus(propstats))
sort.Sort(byStatus(op.wantPropstats))
if !reflect.DeepEqual(propstats, op.wantPropstats) {
t.Errorf("%s: propstat\ngot %q\nwant %q", desc, propstats, op.wantPropstats)
}
}
}
}
func cmpXMLName(a, b xml.Name) bool {
if a.Space != b.Space {
return a.Space < b.Space
}
return a.Local < b.Local
}
type byXMLName []xml.Name
func (b byXMLName) Len() int { return len(b) }
func (b byXMLName) Swap(i, j int) { b[i], b[j] = b[j], b[i] }
func (b byXMLName) Less(i, j int) bool { return cmpXMLName(b[i], b[j]) }
type byPropname []Property
func (b byPropname) Len() int { return len(b) }
func (b byPropname) Swap(i, j int) { b[i], b[j] = b[j], b[i] }
func (b byPropname) Less(i, j int) bool { return cmpXMLName(b[i].XMLName, b[j].XMLName) }
type byStatus []Propstat
func (b byStatus) Len() int { return len(b) }
func (b byStatus) Swap(i, j int) { b[i], b[j] = b[j], b[i] }
func (b byStatus) Less(i, j int) bool { return b[i].Status < b[j].Status }
type noDeadPropsFS struct {
FileSystem
}
func (fs noDeadPropsFS) OpenFile(ctx context.Context, name string, flag int, perm os.FileMode) (File, error) {
f, err := fs.FileSystem.OpenFile(ctx, name, flag, perm)
if err != nil {
return nil, err
}
return noDeadPropsFile{f}, nil
}
// noDeadPropsFile wraps a File but strips any optional DeadPropsHolder methods
// provided by the underlying File implementation.
type noDeadPropsFile struct {
f File
}
func (f noDeadPropsFile) Close() error { return f.f.Close() }
func (f noDeadPropsFile) Read(p []byte) (int, error) { return f.f.Read(p) }
func (f noDeadPropsFile) Readdir(count int) ([]os.FileInfo, error) { return f.f.Readdir(count) }
func (f noDeadPropsFile) Seek(off int64, whence int) (int64, error) { return f.f.Seek(off, whence) }
func (f noDeadPropsFile) Stat() (os.FileInfo, error) { return f.f.Stat() }
func (f noDeadPropsFile) Write(p []byte) (int, error) { return f.f.Write(p) }
type overrideContentType struct {
os.FileInfo
contentType string
err error
}
func (o *overrideContentType) ContentType(ctx context.Context) (string, error) {
return o.contentType, o.err
}
func TestFindContentTypeOverride(t *testing.T) {
fs, err := buildTestFS([]string{"touch /file"})
if err != nil {
t.Fatalf("cannot create test filesystem: %v", err)
}
ctx := context.Background()
fi, err := fs.Stat(ctx, "/file")
if err != nil {
t.Fatalf("cannot Stat /file: %v", err)
}
// Check non overridden case
originalContentType, err := findContentType(ctx, fs, nil, "/file", fi)
if err != nil {
t.Fatalf("findContentType /file failed: %v", err)
}
if originalContentType != "text/plain; charset=utf-8" {
t.Fatalf("ContentType wrong want %q got %q", "text/plain; charset=utf-8", originalContentType)
}
// Now try overriding the ContentType
o := &overrideContentType{fi, "OverriddenContentType", nil}
ContentType, err := findContentType(ctx, fs, nil, "/file", o)
if err != nil {
t.Fatalf("findContentType /file failed: %v", err)
}
if ContentType != o.contentType {
t.Fatalf("ContentType wrong want %q got %q", o.contentType, ContentType)
}
// Now return ErrNotImplemented and check we get the original content type
o = &overrideContentType{fi, "OverriddenContentType", ErrNotImplemented}
ContentType, err = findContentType(ctx, fs, nil, "/file", o)
if err != nil {
t.Fatalf("findContentType /file failed: %v", err)
}
if ContentType != originalContentType {
t.Fatalf("ContentType wrong want %q got %q", originalContentType, ContentType)
}
}
type overrideETag struct {
os.FileInfo
eTag string
err error
}
func (o *overrideETag) ETag(ctx context.Context) (string, error) {
return o.eTag, o.err
}
func TestFindETagOverride(t *testing.T) {
fs, err := buildTestFS([]string{"touch /file"})
if err != nil {
t.Fatalf("cannot create test filesystem: %v", err)
}
ctx := context.Background()
fi, err := fs.Stat(ctx, "/file")
if err != nil {
t.Fatalf("cannot Stat /file: %v", err)
}
// Check non overridden case
originalETag, err := findETag(ctx, fs, nil, "/file", fi)
if err != nil {
t.Fatalf("findETag /file failed: %v", err)
}
matchETag := regexp.MustCompile(`^"-?[0-9a-f]{6,}"$`)
if !matchETag.MatchString(originalETag) {
t.Fatalf("ETag wrong, wanted something matching %v got %q", matchETag, originalETag)
}
// Now try overriding the ETag
o := &overrideETag{fi, `"OverriddenETag"`, nil}
ETag, err := findETag(ctx, fs, nil, "/file", o)
if err != nil {
t.Fatalf("findETag /file failed: %v", err)
}
if ETag != o.eTag {
t.Fatalf("ETag wrong want %q got %q", o.eTag, ETag)
}
// Now return ErrNotImplemented and check we get the original Etag
o = &overrideETag{fi, `"OverriddenETag"`, ErrNotImplemented}
ETag, err = findETag(ctx, fs, nil, "/file", o)
if err != nil {
t.Fatalf("findETag /file failed: %v", err)
}
if ETag != originalETag {
t.Fatalf("ETag wrong want %q got %q", originalETag, ETag)
}
}

754
drives/davServer/webdav.go Normal file
View File

@ -0,0 +1,754 @@
// Copyright 2014 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package davServer provides a WebDAV server implementation.
package davServer
import (
"errors"
"fmt"
"github.com/sirupsen/logrus"
"io"
"net/http"
"net/url"
"os"
"path"
"path/filepath"
"strconv"
"strings"
"time"
)
type Handler struct {
// Prefix is the URL path prefix to strip from WebDAV resource paths.
Prefix string
// FileSystem is the virtual file system.
FileSystem FileSystem
// LockSystem is the lock management system.
LockSystem LockSystem
// Logger is an optional error logger. If non-nil, it will be called
// for all HTTP requests.
Logger func(*http.Request, error)
}
func (h *Handler) stripPrefix(p string) (string, int, error) {
if h.Prefix == "" {
return p, http.StatusOK, nil
}
if r := strings.TrimPrefix(p, h.Prefix); len(r) < len(p) {
return r, http.StatusOK, nil
}
return p, http.StatusNotFound, errPrefixMismatch
}
func (h *Handler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
status, err := http.StatusBadRequest, errUnsupportedMethod
if h.FileSystem == nil {
status, err = http.StatusInternalServerError, errNoFileSystem
} else if h.LockSystem == nil {
status, err = http.StatusInternalServerError, errNoLockSystem
} else {
switch r.Method {
case "OPTIONS":
status, err = h.handleOptions(w, r)
case "GET", "HEAD", "POST":
status, err = h.handleGetHeadPost(w, r)
case "DELETE":
status, err = h.handleDelete(w, r)
case "PUT":
status, err = h.handlePut(w, r)
case "MKCOL":
status, err = h.handleMkcol(w, r)
case "COPY", "MOVE":
status, err = h.handleCopyMove(w, r)
case "LOCK":
status, err = h.handleLock(w, r)
case "UNLOCK":
status, err = h.handleUnlock(w, r)
case "PROPFIND":
status, err = h.handlePropfind(w, r)
case "PROPPATCH":
status, err = h.handleProppatch(w, r)
}
}
if status != 0 {
w.WriteHeader(status)
if status != http.StatusNoContent {
w.Write([]byte(StatusText(status)))
}
}
if h.Logger != nil {
h.Logger(r, err)
}
}
func (h *Handler) lock(now time.Time, root string) (token string, status int, err error) {
token, err = h.LockSystem.Create(now, LockDetails{
Root: root,
Duration: infiniteTimeout,
ZeroDepth: true,
})
if err != nil {
if err == ErrLocked {
return "", StatusLocked, err
}
return "", http.StatusInternalServerError, err
}
return token, 0, nil
}
func (h *Handler) confirmLocks(r *http.Request, src, dst string) (release func(), status int, err error) {
hdr := r.Header.Get("If")
if hdr == "" {
// An empty If header means that the client hasn't previously created locks.
// Even if this client doesn't care about locks, we still need to check that
// the resources aren't locked by another client, so we create temporary
// locks that would conflict with another client's locks. These temporary
// locks are unlocked at the end of the HTTP request.
now, srcToken, dstToken := time.Now(), "", ""
if src != "" {
srcToken, status, err = h.lock(now, src)
if err != nil {
return nil, status, err
}
}
if dst != "" {
dstToken, status, err = h.lock(now, dst)
if err != nil {
if srcToken != "" {
h.LockSystem.Unlock(now, srcToken)
}
return nil, status, err
}
}
return func() {
if dstToken != "" {
h.LockSystem.Unlock(now, dstToken)
}
if srcToken != "" {
h.LockSystem.Unlock(now, srcToken)
}
}, 0, nil
}
ih, ok := parseIfHeader(hdr)
if !ok {
return nil, http.StatusBadRequest, errInvalidIfHeader
}
// ih is a disjunction (OR) of ifLists, so any ifList will do.
for _, l := range ih.lists {
lsrc := l.resourceTag
if lsrc == "" {
lsrc = src
} else {
u, err := url.Parse(lsrc)
if err != nil {
continue
}
if u.Host != r.Host {
continue
}
lsrc, status, err = h.stripPrefix(u.Path)
if err != nil {
return nil, status, err
}
}
release, err = h.LockSystem.Confirm(time.Now(), lsrc, dst, l.conditions...)
if err == ErrConfirmationFailed {
continue
}
if err != nil {
return nil, http.StatusInternalServerError, err
}
return release, 0, nil
}
// Section 10.4.1 says that "If this header is evaluated and all state lists
// fail, then the request must fail with a 412 (Precondition Failed) status."
// We follow the spec even though the cond_put_corrupt_token test case from
// the litmus test warns on seeing a 412 instead of a 423 (Locked).
return nil, http.StatusPreconditionFailed, ErrLocked
}
func (h *Handler) handleOptions(w http.ResponseWriter, r *http.Request) (status int, err error) {
reqPath, status, err := h.stripPrefix(r.URL.Path)
if err != nil {
return status, err
}
ctx := r.Context()
allow := "OPTIONS, LOCK, PUT, MKCOL"
if fi, err := h.FileSystem.Stat(ctx, reqPath); err == nil {
if fi.IsDir() {
allow = "OPTIONS, LOCK, DELETE, PROPPATCH, COPY, MOVE, UNLOCK, PROPFIND"
} else {
allow = "OPTIONS, LOCK, GET, HEAD, POST, DELETE, PROPPATCH, COPY, MOVE, UNLOCK, PROPFIND, PUT"
}
}
w.Header().Set("Allow", allow)
// http://www.webdav.org/specs/rfc4918.html#dav.compliance.classes
w.Header().Set("DAV", "1, 2")
// http://msdn.microsoft.com/en-au/library/cc250217.aspx
w.Header().Set("MS-Author-Via", "DAV")
return 0, nil
}
func (h *Handler) handleGetHeadPost(w http.ResponseWriter, r *http.Request) (status int, err error) {
reqPath, status, err := h.stripPrefix(r.URL.Path)
if err != nil {
return status, err
}
// TODO: check locks for read-only access??
ctx := r.Context()
f, err := h.FileSystem.OpenFile(ctx, reqPath, os.O_RDONLY, 0)
if err != nil {
return http.StatusNotFound, err
}
defer f.Close()
fi, err := f.Stat()
if err != nil {
return http.StatusNotFound, err
}
if fi.IsDir() {
return http.StatusMethodNotAllowed, nil
}
etag, err := findETag(ctx, h.FileSystem, h.LockSystem, reqPath, fi)
if err != nil {
return http.StatusInternalServerError, err
}
w.Header().Set("ETag", etag)
// Let ServeContent determine the Content-Type header.
http.ServeContent(w, r, reqPath, fi.ModTime(), f)
return 0, nil
}
func (h *Handler) handleDelete(w http.ResponseWriter, r *http.Request) (status int, err error) {
reqPath, status, err := h.stripPrefix(r.URL.Path)
if err != nil {
return status, err
}
release, status, err := h.confirmLocks(r, reqPath, "")
if err != nil {
return status, err
}
defer release()
ctx := r.Context()
// TODO: return MultiStatus where appropriate.
// "godoc os RemoveAll" says that "If the path does not exist, RemoveAll
// returns nil (no error)." WebDAV semantics are that it should return a
// "404 Not Found". We therefore have to Stat before we RemoveAll.
if _, err := h.FileSystem.Stat(ctx, reqPath); err != nil {
if os.IsNotExist(err) {
return http.StatusNotFound, err
}
return http.StatusMethodNotAllowed, err
}
if err := h.FileSystem.RemoveAll(ctx, reqPath); err != nil {
return http.StatusMethodNotAllowed, err
}
return http.StatusNoContent, nil
}
func (h *Handler) handlePut(w http.ResponseWriter, r *http.Request) (status int, err error) {
reqPath, status, err := h.stripPrefix(r.URL.Path)
if err != nil {
return status, err
}
release, status, err := h.confirmLocks(r, reqPath, "")
if err != nil {
return status, err
}
defer release()
// TODO(rost): Support the If-Match, If-None-Match headers? See bradfitz'
// comments in http.checkEtag.
ctx := r.Context()
f, err := h.FileSystem.OpenFile(ctx, reqPath, os.O_RDWR|os.O_CREATE|os.O_TRUNC, 0666)
if err != nil {
return http.StatusNotFound, err
}
_, copyErr := io.Copy(f, r.Body)
fi, statErr := f.Stat()
modTimes := r.Header["Zrok-Modtime"]
if len(modTimes) > 0 {
if modTimeV, err := strconv.ParseInt(modTimes[0], 10, 64); err == nil {
if v, ok := f.(*webdavFile); ok {
if err := v.updateModtime(reqPath, time.Unix(modTimeV, 0)); err != nil {
logrus.Warn(err)
}
} else {
logrus.Error("!ok")
}
} else {
logrus.Error(err)
}
}
closeErr := f.Close()
// TODO(rost): Returning 405 Method Not Allowed might not be appropriate.
if copyErr != nil {
return http.StatusMethodNotAllowed, copyErr
}
if statErr != nil {
return http.StatusMethodNotAllowed, statErr
}
if closeErr != nil {
return http.StatusMethodNotAllowed, closeErr
}
etag, err := findETag(ctx, h.FileSystem, h.LockSystem, reqPath, fi)
if err != nil {
return http.StatusInternalServerError, err
}
w.Header().Set("ETag", etag)
return http.StatusCreated, nil
}
func (h *Handler) handleMkcol(w http.ResponseWriter, r *http.Request) (status int, err error) {
reqPath, status, err := h.stripPrefix(r.URL.Path)
if err != nil {
return status, err
}
release, status, err := h.confirmLocks(r, reqPath, "")
if err != nil {
return status, err
}
defer release()
ctx := r.Context()
if r.ContentLength > 0 {
return http.StatusUnsupportedMediaType, nil
}
if err := h.FileSystem.Mkdir(ctx, reqPath, 0777); err != nil {
if os.IsNotExist(err) {
return http.StatusConflict, err
}
return http.StatusMethodNotAllowed, err
}
return http.StatusCreated, nil
}
func (h *Handler) handleCopyMove(w http.ResponseWriter, r *http.Request) (status int, err error) {
hdr := r.Header.Get("Destination")
if hdr == "" {
return http.StatusBadRequest, errInvalidDestination
}
u, err := url.Parse(hdr)
if err != nil {
return http.StatusBadRequest, errInvalidDestination
}
if u.Host != "" && u.Host != r.Host {
return http.StatusBadGateway, errInvalidDestination
}
src, status, err := h.stripPrefix(r.URL.Path)
if err != nil {
return status, err
}
dst, status, err := h.stripPrefix(u.Path)
if err != nil {
return status, err
}
if dst == "" {
return http.StatusBadGateway, errInvalidDestination
}
if dst == src {
return http.StatusForbidden, errDestinationEqualsSource
}
ctx := r.Context()
if r.Method == "COPY" {
// Section 7.5.1 says that a COPY only needs to lock the destination,
// not both destination and source. Strictly speaking, this is racy,
// even though a COPY doesn't modify the source, if a concurrent
// operation modifies the source. However, the litmus test explicitly
// checks that COPYing a locked-by-another source is OK.
release, status, err := h.confirmLocks(r, "", dst)
if err != nil {
return status, err
}
defer release()
// Section 9.8.3 says that "The COPY method on a collection without a Depth
// header must act as if a Depth header with value "infinity" was included".
depth := infiniteDepth
if hdr := r.Header.Get("Depth"); hdr != "" {
depth = parseDepth(hdr)
if depth != 0 && depth != infiniteDepth {
// Section 9.8.3 says that "A client may submit a Depth header on a
// COPY on a collection with a value of "0" or "infinity"."
return http.StatusBadRequest, errInvalidDepth
}
}
return copyFiles(ctx, h.FileSystem, src, dst, r.Header.Get("Overwrite") != "F", depth, 0)
}
release, status, err := h.confirmLocks(r, src, dst)
if err != nil {
return status, err
}
defer release()
// Section 9.9.2 says that "The MOVE method on a collection must act as if
// a "Depth: infinity" header was used on it. A client must not submit a
// Depth header on a MOVE on a collection with any value but "infinity"."
if hdr := r.Header.Get("Depth"); hdr != "" {
if parseDepth(hdr) != infiniteDepth {
return http.StatusBadRequest, errInvalidDepth
}
}
return moveFiles(ctx, h.FileSystem, src, dst, r.Header.Get("Overwrite") == "T")
}
func (h *Handler) handleLock(w http.ResponseWriter, r *http.Request) (retStatus int, retErr error) {
duration, err := parseTimeout(r.Header.Get("Timeout"))
if err != nil {
return http.StatusBadRequest, err
}
li, status, err := readLockInfo(r.Body)
if err != nil {
return status, err
}
ctx := r.Context()
token, ld, now, created := "", LockDetails{}, time.Now(), false
if li == (lockInfo{}) {
// An empty lockInfo means to refresh the lock.
ih, ok := parseIfHeader(r.Header.Get("If"))
if !ok {
return http.StatusBadRequest, errInvalidIfHeader
}
if len(ih.lists) == 1 && len(ih.lists[0].conditions) == 1 {
token = ih.lists[0].conditions[0].Token
}
if token == "" {
return http.StatusBadRequest, errInvalidLockToken
}
ld, err = h.LockSystem.Refresh(now, token, duration)
if err != nil {
if err == ErrNoSuchLock {
return http.StatusPreconditionFailed, err
}
return http.StatusInternalServerError, err
}
} else {
// Section 9.10.3 says that "If no Depth header is submitted on a LOCK request,
// then the request MUST act as if a "Depth:infinity" had been submitted."
depth := infiniteDepth
if hdr := r.Header.Get("Depth"); hdr != "" {
depth = parseDepth(hdr)
if depth != 0 && depth != infiniteDepth {
// Section 9.10.3 says that "Values other than 0 or infinity must not be
// used with the Depth header on a LOCK method".
return http.StatusBadRequest, errInvalidDepth
}
}
reqPath, status, err := h.stripPrefix(r.URL.Path)
if err != nil {
return status, err
}
ld = LockDetails{
Root: reqPath,
Duration: duration,
OwnerXML: li.Owner.InnerXML,
ZeroDepth: depth == 0,
}
token, err = h.LockSystem.Create(now, ld)
if err != nil {
if err == ErrLocked {
return StatusLocked, err
}
return http.StatusInternalServerError, err
}
defer func() {
if retErr != nil {
h.LockSystem.Unlock(now, token)
}
}()
// Create the resource if it didn't previously exist.
if _, err := h.FileSystem.Stat(ctx, reqPath); err != nil {
f, err := h.FileSystem.OpenFile(ctx, reqPath, os.O_RDWR|os.O_CREATE|os.O_TRUNC, 0666)
if err != nil {
// TODO: detect missing intermediate dirs and return http.StatusConflict?
return http.StatusInternalServerError, err
}
f.Close()
created = true
}
// http://www.webdav.org/specs/rfc4918.html#HEADER_Lock-Token says that the
// Lock-Token value is a Coded-URL. We add angle brackets.
w.Header().Set("Lock-Token", "<"+token+">")
}
w.Header().Set("Content-Type", "application/xml; charset=utf-8")
if created {
// This is "w.WriteHeader(http.StatusCreated)" and not "return
// http.StatusCreated, nil" because we write our own (XML) response to w
// and Handler.ServeHTTP would otherwise write "Created".
w.WriteHeader(http.StatusCreated)
}
writeLockInfo(w, token, ld)
return 0, nil
}
func (h *Handler) handleUnlock(w http.ResponseWriter, r *http.Request) (status int, err error) {
// http://www.webdav.org/specs/rfc4918.html#HEADER_Lock-Token says that the
// Lock-Token value is a Coded-URL. We strip its angle brackets.
t := r.Header.Get("Lock-Token")
if len(t) < 2 || t[0] != '<' || t[len(t)-1] != '>' {
return http.StatusBadRequest, errInvalidLockToken
}
t = t[1 : len(t)-1]
switch err = h.LockSystem.Unlock(time.Now(), t); err {
case nil:
return http.StatusNoContent, err
case ErrForbidden:
return http.StatusForbidden, err
case ErrLocked:
return StatusLocked, err
case ErrNoSuchLock:
return http.StatusConflict, err
default:
return http.StatusInternalServerError, err
}
}
func (h *Handler) handlePropfind(w http.ResponseWriter, r *http.Request) (status int, err error) {
reqPath, status, err := h.stripPrefix(r.URL.Path)
if err != nil {
return status, err
}
ctx := r.Context()
fi, err := h.FileSystem.Stat(ctx, reqPath)
if err != nil {
if os.IsNotExist(err) {
return http.StatusNotFound, err
}
return http.StatusMethodNotAllowed, err
}
depth := infiniteDepth
if hdr := r.Header.Get("Depth"); hdr != "" {
depth = parseDepth(hdr)
if depth == invalidDepth {
return http.StatusBadRequest, errInvalidDepth
}
}
pf, status, err := readPropfind(r.Body)
if err != nil {
return status, err
}
mw := multistatusWriter{w: w}
walkFn := func(reqPath string, info os.FileInfo, err error) error {
if err != nil {
return handlePropfindError(err, info)
}
var pstats []Propstat
if pf.Propname != nil {
pnames, err := propnames(ctx, h.FileSystem, h.LockSystem, reqPath)
if err != nil {
return handlePropfindError(err, info)
}
pstat := Propstat{Status: http.StatusOK}
for _, xmlname := range pnames {
pstat.Props = append(pstat.Props, Property{XMLName: xmlname})
}
pstats = append(pstats, pstat)
} else if pf.Allprop != nil {
pstats, err = allprop(ctx, h.FileSystem, h.LockSystem, reqPath, pf.Prop)
} else {
pstats, err = props(ctx, h.FileSystem, h.LockSystem, reqPath, pf.Prop)
}
if err != nil {
return handlePropfindError(err, info)
}
href := path.Join(h.Prefix, reqPath)
if href != "/" && info.IsDir() {
href += "/"
}
return mw.write(makePropstatResponse(href, pstats))
}
walkErr := walkFS(ctx, h.FileSystem, depth, reqPath, fi, walkFn)
closeErr := mw.close()
if walkErr != nil {
return http.StatusInternalServerError, walkErr
}
if closeErr != nil {
return http.StatusInternalServerError, closeErr
}
return 0, nil
}
func (h *Handler) handleProppatch(w http.ResponseWriter, r *http.Request) (status int, err error) {
reqPath, status, err := h.stripPrefix(r.URL.Path)
if err != nil {
return status, err
}
release, status, err := h.confirmLocks(r, reqPath, "")
if err != nil {
return status, err
}
defer release()
ctx := r.Context()
if _, err := h.FileSystem.Stat(ctx, reqPath); err != nil {
if os.IsNotExist(err) {
return http.StatusNotFound, err
}
return http.StatusMethodNotAllowed, err
}
patches, status, err := readProppatch(r.Body)
if err != nil {
return status, err
}
pstats, err := patch(ctx, h.FileSystem, h.LockSystem, reqPath, patches)
if err != nil {
return http.StatusInternalServerError, err
}
mw := multistatusWriter{w: w}
writeErr := mw.write(makePropstatResponse(r.URL.Path, pstats))
closeErr := mw.close()
if writeErr != nil {
return http.StatusInternalServerError, writeErr
}
if closeErr != nil {
return http.StatusInternalServerError, closeErr
}
return 0, nil
}
func makePropstatResponse(href string, pstats []Propstat) *response {
resp := response{
Href: []string{(&url.URL{Path: href}).EscapedPath()},
Propstat: make([]propstat, 0, len(pstats)),
}
for _, p := range pstats {
var xmlErr *xmlError
if p.XMLError != "" {
xmlErr = &xmlError{InnerXML: []byte(p.XMLError)}
}
resp.Propstat = append(resp.Propstat, propstat{
Status: fmt.Sprintf("HTTP/1.1 %d %s", p.Status, StatusText(p.Status)),
Prop: p.Props,
ResponseDescription: p.ResponseDescription,
Error: xmlErr,
})
}
return &resp
}
func handlePropfindError(err error, info os.FileInfo) error {
var skipResp error = nil
if info != nil && info.IsDir() {
skipResp = filepath.SkipDir
}
if errors.Is(err, os.ErrPermission) {
// If the server cannot recurse into a directory because it is not allowed,
// then there is nothing more to say about it. Just skip sending anything.
return skipResp
}
if _, ok := err.(*os.PathError); ok {
// If the file is just bad, it couldn't be a proper WebDAV resource. Skip it.
return skipResp
}
// We need to be careful with other errors: there is no way to abort the xml stream
// part way through while returning a valid PROPFIND response. Returning only half
// the data would be misleading, but so would be returning results tainted by errors.
// The current behaviour by returning an error here leads to the stream being aborted,
// and the parent http server complaining about writing a spurious header. We should
// consider further enhancing this error handling to more gracefully fail, or perhaps
// buffer the entire response until we've walked the tree.
return err
}
const (
infiniteDepth = -1
invalidDepth = -2
)
// parseDepth maps the strings "0", "1" and "infinity" to 0, 1 and
// infiniteDepth. Parsing any other string returns invalidDepth.
//
// Different WebDAV methods have further constraints on valid depths:
// - PROPFIND has no further restrictions, as per section 9.1.
// - COPY accepts only "0" or "infinity", as per section 9.8.3.
// - MOVE accepts only "infinity", as per section 9.9.2.
// - LOCK accepts only "0" or "infinity", as per section 9.10.3.
//
// These constraints are enforced by the handleXxx methods.
func parseDepth(s string) int {
switch s {
case "0":
return 0
case "1":
return 1
case "infinity":
return infiniteDepth
}
return invalidDepth
}
// http://www.webdav.org/specs/rfc4918.html#status.code.extensions.to.http11
const (
StatusMulti = 207
StatusUnprocessableEntity = 422
StatusLocked = 423
StatusFailedDependency = 424
StatusInsufficientStorage = 507
)
func StatusText(code int) string {
switch code {
case StatusMulti:
return "Multi-Status"
case StatusUnprocessableEntity:
return "Unprocessable Entity"
case StatusLocked:
return "Locked"
case StatusFailedDependency:
return "Failed Dependency"
case StatusInsufficientStorage:
return "Insufficient Storage"
}
return http.StatusText(code)
}
var (
errDestinationEqualsSource = errors.New("webdav: destination equals source")
errDirectoryNotEmpty = errors.New("webdav: directory not empty")
errInvalidDepth = errors.New("webdav: invalid depth")
errInvalidDestination = errors.New("webdav: invalid destination")
errInvalidIfHeader = errors.New("webdav: invalid If header")
errInvalidLockInfo = errors.New("webdav: invalid lock info")
errInvalidLockToken = errors.New("webdav: invalid lock token")
errInvalidPropfind = errors.New("webdav: invalid propfind")
errInvalidProppatch = errors.New("webdav: invalid proppatch")
errInvalidResponse = errors.New("webdav: invalid response")
errInvalidTimeout = errors.New("webdav: invalid timeout")
errNoFileSystem = errors.New("webdav: no file system")
errNoLockSystem = errors.New("webdav: no lock system")
errNotADirectory = errors.New("webdav: not a directory")
errPrefixMismatch = errors.New("webdav: prefix mismatch")
errRecursionTooDeep = errors.New("webdav: recursion too deep")
errUnsupportedLockInfo = errors.New("webdav: unsupported lock info")
errUnsupportedMethod = errors.New("webdav: unsupported method")
)

View File

@ -0,0 +1,349 @@
// Copyright 2015 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package davServer
import (
"context"
"errors"
"fmt"
"io"
"io/ioutil"
"net/http"
"net/http/httptest"
"net/url"
"os"
"reflect"
"regexp"
"sort"
"strings"
"testing"
)
// TODO: add tests to check XML responses with the expected prefix path
func TestPrefix(t *testing.T) {
const dst, blah = "Destination", "blah blah blah"
// createLockBody comes from the example in Section 9.10.7.
const createLockBody = `<?xml version="1.0" encoding="utf-8" ?>
<D:lockinfo xmlns:D='DAV:'>
<D:lockscope><D:exclusive/></D:lockscope>
<D:locktype><D:write/></D:locktype>
<D:owner>
<D:href>http://example.org/~ejw/contact.html</D:href>
</D:owner>
</D:lockinfo>
`
do := func(method, urlStr string, body string, wantStatusCode int, headers ...string) (http.Header, error) {
var bodyReader io.Reader
if body != "" {
bodyReader = strings.NewReader(body)
}
req, err := http.NewRequest(method, urlStr, bodyReader)
if err != nil {
return nil, err
}
for len(headers) >= 2 {
req.Header.Add(headers[0], headers[1])
headers = headers[2:]
}
res, err := http.DefaultTransport.RoundTrip(req)
if err != nil {
return nil, err
}
defer res.Body.Close()
if res.StatusCode != wantStatusCode {
return nil, fmt.Errorf("got status code %d, want %d", res.StatusCode, wantStatusCode)
}
return res.Header, nil
}
prefixes := []string{
"/",
"/a/",
"/a/b/",
"/a/b/c/",
}
ctx := context.Background()
for _, prefix := range prefixes {
fs := NewMemFS()
h := &Handler{
FileSystem: fs,
LockSystem: NewMemLS(),
}
mux := http.NewServeMux()
if prefix != "/" {
h.Prefix = prefix
}
mux.Handle(prefix, h)
srv := httptest.NewServer(mux)
defer srv.Close()
// The script is:
// MKCOL /a
// MKCOL /a/b
// PUT /a/b/c
// COPY /a/b/c /a/b/d
// MKCOL /a/b/e
// MOVE /a/b/d /a/b/e/f
// LOCK /a/b/e/g
// PUT /a/b/e/g
// which should yield the (possibly stripped) filenames /a/b/c,
// /a/b/e/f and /a/b/e/g, plus their parent directories.
wantA := map[string]int{
"/": http.StatusCreated,
"/a/": http.StatusMovedPermanently,
"/a/b/": http.StatusNotFound,
"/a/b/c/": http.StatusNotFound,
}[prefix]
if _, err := do("MKCOL", srv.URL+"/a", "", wantA); err != nil {
t.Errorf("prefix=%-9q MKCOL /a: %v", prefix, err)
continue
}
wantB := map[string]int{
"/": http.StatusCreated,
"/a/": http.StatusCreated,
"/a/b/": http.StatusMovedPermanently,
"/a/b/c/": http.StatusNotFound,
}[prefix]
if _, err := do("MKCOL", srv.URL+"/a/b", "", wantB); err != nil {
t.Errorf("prefix=%-9q MKCOL /a/b: %v", prefix, err)
continue
}
wantC := map[string]int{
"/": http.StatusCreated,
"/a/": http.StatusCreated,
"/a/b/": http.StatusCreated,
"/a/b/c/": http.StatusMovedPermanently,
}[prefix]
if _, err := do("PUT", srv.URL+"/a/b/c", blah, wantC); err != nil {
t.Errorf("prefix=%-9q PUT /a/b/c: %v", prefix, err)
continue
}
wantD := map[string]int{
"/": http.StatusCreated,
"/a/": http.StatusCreated,
"/a/b/": http.StatusCreated,
"/a/b/c/": http.StatusMovedPermanently,
}[prefix]
if _, err := do("COPY", srv.URL+"/a/b/c", "", wantD, dst, srv.URL+"/a/b/d"); err != nil {
t.Errorf("prefix=%-9q COPY /a/b/c /a/b/d: %v", prefix, err)
continue
}
wantE := map[string]int{
"/": http.StatusCreated,
"/a/": http.StatusCreated,
"/a/b/": http.StatusCreated,
"/a/b/c/": http.StatusNotFound,
}[prefix]
if _, err := do("MKCOL", srv.URL+"/a/b/e", "", wantE); err != nil {
t.Errorf("prefix=%-9q MKCOL /a/b/e: %v", prefix, err)
continue
}
wantF := map[string]int{
"/": http.StatusCreated,
"/a/": http.StatusCreated,
"/a/b/": http.StatusCreated,
"/a/b/c/": http.StatusNotFound,
}[prefix]
if _, err := do("MOVE", srv.URL+"/a/b/d", "", wantF, dst, srv.URL+"/a/b/e/f"); err != nil {
t.Errorf("prefix=%-9q MOVE /a/b/d /a/b/e/f: %v", prefix, err)
continue
}
var lockToken string
wantG := map[string]int{
"/": http.StatusCreated,
"/a/": http.StatusCreated,
"/a/b/": http.StatusCreated,
"/a/b/c/": http.StatusNotFound,
}[prefix]
if h, err := do("LOCK", srv.URL+"/a/b/e/g", createLockBody, wantG); err != nil {
t.Errorf("prefix=%-9q LOCK /a/b/e/g: %v", prefix, err)
continue
} else {
lockToken = h.Get("Lock-Token")
}
ifHeader := fmt.Sprintf("<%s/a/b/e/g> (%s)", srv.URL, lockToken)
wantH := map[string]int{
"/": http.StatusCreated,
"/a/": http.StatusCreated,
"/a/b/": http.StatusCreated,
"/a/b/c/": http.StatusNotFound,
}[prefix]
if _, err := do("PUT", srv.URL+"/a/b/e/g", blah, wantH, "If", ifHeader); err != nil {
t.Errorf("prefix=%-9q PUT /a/b/e/g: %v", prefix, err)
continue
}
got, err := find(ctx, nil, fs, "/")
if err != nil {
t.Errorf("prefix=%-9q find: %v", prefix, err)
continue
}
sort.Strings(got)
want := map[string][]string{
"/": {"/", "/a", "/a/b", "/a/b/c", "/a/b/e", "/a/b/e/f", "/a/b/e/g"},
"/a/": {"/", "/b", "/b/c", "/b/e", "/b/e/f", "/b/e/g"},
"/a/b/": {"/", "/c", "/e", "/e/f", "/e/g"},
"/a/b/c/": {"/"},
}[prefix]
if !reflect.DeepEqual(got, want) {
t.Errorf("prefix=%-9q find:\ngot %v\nwant %v", prefix, got, want)
continue
}
}
}
func TestEscapeXML(t *testing.T) {
// These test cases aren't exhaustive, and there is more than one way to
// escape e.g. a quot (as "&#34;" or "&quot;") or an apos. We presume that
// the encoding/xml package tests xml.EscapeText more thoroughly. This test
// here is just a sanity check for this package's escapeXML function, and
// its attempt to provide a fast path (and avoid a bytes.Buffer allocation)
// when escaping filenames is obviously a no-op.
testCases := map[string]string{
"": "",
" ": " ",
"&": "&amp;",
"*": "*",
"+": "+",
",": ",",
"-": "-",
".": ".",
"/": "/",
"0": "0",
"9": "9",
":": ":",
"<": "&lt;",
">": "&gt;",
"A": "A",
"_": "_",
"a": "a",
"~": "~",
"\u0201": "\u0201",
"&amp;": "&amp;amp;",
"foo&<b/ar>baz": "foo&amp;&lt;b/ar&gt;baz",
}
for in, want := range testCases {
if got := escapeXML(in); got != want {
t.Errorf("in=%q: got %q, want %q", in, got, want)
}
}
}
func TestFilenameEscape(t *testing.T) {
hrefRe := regexp.MustCompile(`<D:href>([^<]*)</D:href>`)
displayNameRe := regexp.MustCompile(`<D:displayname>([^<]*)</D:displayname>`)
do := func(method, urlStr string) (string, string, error) {
req, err := http.NewRequest(method, urlStr, nil)
if err != nil {
return "", "", err
}
res, err := http.DefaultClient.Do(req)
if err != nil {
return "", "", err
}
defer res.Body.Close()
b, err := ioutil.ReadAll(res.Body)
if err != nil {
return "", "", err
}
hrefMatch := hrefRe.FindStringSubmatch(string(b))
if len(hrefMatch) != 2 {
return "", "", errors.New("D:href not found")
}
displayNameMatch := displayNameRe.FindStringSubmatch(string(b))
if len(displayNameMatch) != 2 {
return "", "", errors.New("D:displayname not found")
}
return hrefMatch[1], displayNameMatch[1], nil
}
testCases := []struct {
name, wantHref, wantDisplayName string
}{{
name: `/foo%bar`,
wantHref: `/foo%25bar`,
wantDisplayName: `foo%bar`,
}, {
name: `/こんにちわ世界`,
wantHref: `/%E3%81%93%E3%82%93%E3%81%AB%E3%81%A1%E3%82%8F%E4%B8%96%E7%95%8C`,
wantDisplayName: `こんにちわ世界`,
}, {
name: `/Program Files/`,
wantHref: `/Program%20Files/`,
wantDisplayName: `Program Files`,
}, {
name: `/go+lang`,
wantHref: `/go+lang`,
wantDisplayName: `go+lang`,
}, {
name: `/go&lang`,
wantHref: `/go&amp;lang`,
wantDisplayName: `go&amp;lang`,
}, {
name: `/go<lang`,
wantHref: `/go%3Clang`,
wantDisplayName: `go&lt;lang`,
}, {
name: `/`,
wantHref: `/`,
wantDisplayName: ``,
}}
ctx := context.Background()
fs := NewMemFS()
for _, tc := range testCases {
if tc.name != "/" {
if strings.HasSuffix(tc.name, "/") {
if err := fs.Mkdir(ctx, tc.name, 0755); err != nil {
t.Fatalf("name=%q: Mkdir: %v", tc.name, err)
}
} else {
f, err := fs.OpenFile(ctx, tc.name, os.O_CREATE, 0644)
if err != nil {
t.Fatalf("name=%q: OpenFile: %v", tc.name, err)
}
f.Close()
}
}
}
srv := httptest.NewServer(&Handler{
FileSystem: fs,
LockSystem: NewMemLS(),
})
defer srv.Close()
u, err := url.Parse(srv.URL)
if err != nil {
t.Fatal(err)
}
for _, tc := range testCases {
u.Path = tc.name
gotHref, gotDisplayName, err := do("PROPFIND", u.String())
if err != nil {
t.Errorf("name=%q: PROPFIND: %v", tc.name, err)
continue
}
if gotHref != tc.wantHref {
t.Errorf("name=%q: got href %q, want %q", tc.name, gotHref, tc.wantHref)
}
if gotDisplayName != tc.wantDisplayName {
t.Errorf("name=%q: got dispayname %q, want %q", tc.name, gotDisplayName, tc.wantDisplayName)
}
}
}

519
drives/davServer/xml.go Normal file
View File

@ -0,0 +1,519 @@
// Copyright 2014 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package davServer
// The XML encoding is covered by Section 14.
// http://www.webdav.org/specs/rfc4918.html#xml.element.definitions
import (
"bytes"
"encoding/xml"
"fmt"
"io"
"net/http"
"time"
// As of https://go-review.googlesource.com/#/c/12772/ which was submitted
// in July 2015, this package uses an internal fork of the standard
// library's encoding/xml package, due to changes in the way namespaces
// were encoded. Such changes were introduced in the Go 1.5 cycle, but were
// rolled back in response to https://github.com/golang/go/issues/11841
//
// However, this package's exported API, specifically the Property and
// DeadPropsHolder types, need to refer to the standard library's version
// of the xml.Name type, as code that imports this package cannot refer to
// the internal version.
//
// This file therefore imports both the internal and external versions, as
// ixml and xml, and converts between them.
//
// In the long term, this package should use the standard library's version
// only, and the internal fork deleted, once
// https://github.com/golang/go/issues/13400 is resolved.
ixml "github.com/openziti/zrok/drives/davServer/internal/xml"
)
// http://www.webdav.org/specs/rfc4918.html#ELEMENT_lockinfo
type lockInfo struct {
XMLName ixml.Name `xml:"lockinfo"`
Exclusive *struct{} `xml:"lockscope>exclusive"`
Shared *struct{} `xml:"lockscope>shared"`
Write *struct{} `xml:"locktype>write"`
Owner owner `xml:"owner"`
}
// http://www.webdav.org/specs/rfc4918.html#ELEMENT_owner
type owner struct {
InnerXML string `xml:",innerxml"`
}
func readLockInfo(r io.Reader) (li lockInfo, status int, err error) {
c := &countingReader{r: r}
if err = ixml.NewDecoder(c).Decode(&li); err != nil {
if err == io.EOF {
if c.n == 0 {
// An empty body means to refresh the lock.
// http://www.webdav.org/specs/rfc4918.html#refreshing-locks
return lockInfo{}, 0, nil
}
err = errInvalidLockInfo
}
return lockInfo{}, http.StatusBadRequest, err
}
// We only support exclusive (non-shared) write locks. In practice, these are
// the only types of locks that seem to matter.
if li.Exclusive == nil || li.Shared != nil || li.Write == nil {
return lockInfo{}, http.StatusNotImplemented, errUnsupportedLockInfo
}
return li, 0, nil
}
type countingReader struct {
n int
r io.Reader
}
func (c *countingReader) Read(p []byte) (int, error) {
n, err := c.r.Read(p)
c.n += n
return n, err
}
func writeLockInfo(w io.Writer, token string, ld LockDetails) (int, error) {
depth := "infinity"
if ld.ZeroDepth {
depth = "0"
}
timeout := ld.Duration / time.Second
return fmt.Fprintf(w, "<?xml version=\"1.0\" encoding=\"utf-8\"?>\n"+
"<D:prop xmlns:D=\"DAV:\"><D:lockdiscovery><D:activelock>\n"+
" <D:locktype><D:write/></D:locktype>\n"+
" <D:lockscope><D:exclusive/></D:lockscope>\n"+
" <D:depth>%s</D:depth>\n"+
" <D:owner>%s</D:owner>\n"+
" <D:timeout>Second-%d</D:timeout>\n"+
" <D:locktoken><D:href>%s</D:href></D:locktoken>\n"+
" <D:lockroot><D:href>%s</D:href></D:lockroot>\n"+
"</D:activelock></D:lockdiscovery></D:prop>",
depth, ld.OwnerXML, timeout, escape(token), escape(ld.Root),
)
}
func escape(s string) string {
for i := 0; i < len(s); i++ {
switch s[i] {
case '"', '&', '\'', '<', '>':
b := bytes.NewBuffer(nil)
ixml.EscapeText(b, []byte(s))
return b.String()
}
}
return s
}
// next returns the next token, if any, in the XML stream of d.
// RFC 4918 requires to ignore comments, processing instructions
// and directives.
// http://www.webdav.org/specs/rfc4918.html#property_values
// http://www.webdav.org/specs/rfc4918.html#xml-extensibility
func next(d *ixml.Decoder) (ixml.Token, error) {
for {
t, err := d.Token()
if err != nil {
return t, err
}
switch t.(type) {
case ixml.Comment, ixml.Directive, ixml.ProcInst:
continue
default:
return t, nil
}
}
}
// http://www.webdav.org/specs/rfc4918.html#ELEMENT_prop (for propfind)
type propfindProps []xml.Name
// UnmarshalXML appends the property names enclosed within start to pn.
//
// It returns an error if start does not contain any properties or if
// properties contain values. Character data between properties is ignored.
func (pn *propfindProps) UnmarshalXML(d *ixml.Decoder, start ixml.StartElement) error {
for {
t, err := next(d)
if err != nil {
return err
}
switch t.(type) {
case ixml.EndElement:
if len(*pn) == 0 {
return fmt.Errorf("%s must not be empty", start.Name.Local)
}
return nil
case ixml.StartElement:
name := t.(ixml.StartElement).Name
t, err = next(d)
if err != nil {
return err
}
if _, ok := t.(ixml.EndElement); !ok {
return fmt.Errorf("unexpected token %T", t)
}
*pn = append(*pn, xml.Name(name))
}
}
}
// http://www.webdav.org/specs/rfc4918.html#ELEMENT_propfind
type propfind struct {
XMLName ixml.Name `xml:"DAV: propfind"`
Allprop *struct{} `xml:"DAV: allprop"`
Propname *struct{} `xml:"DAV: propname"`
Prop propfindProps `xml:"DAV: prop"`
Include propfindProps `xml:"DAV: include"`
}
func readPropfind(r io.Reader) (pf propfind, status int, err error) {
c := countingReader{r: r}
if err = ixml.NewDecoder(&c).Decode(&pf); err != nil {
if err == io.EOF {
if c.n == 0 {
// An empty body means to propfind allprop.
// http://www.webdav.org/specs/rfc4918.html#METHOD_PROPFIND
return propfind{Allprop: new(struct{})}, 0, nil
}
err = errInvalidPropfind
}
return propfind{}, http.StatusBadRequest, err
}
if pf.Allprop == nil && pf.Include != nil {
return propfind{}, http.StatusBadRequest, errInvalidPropfind
}
if pf.Allprop != nil && (pf.Prop != nil || pf.Propname != nil) {
return propfind{}, http.StatusBadRequest, errInvalidPropfind
}
if pf.Prop != nil && pf.Propname != nil {
return propfind{}, http.StatusBadRequest, errInvalidPropfind
}
if pf.Propname == nil && pf.Allprop == nil && pf.Prop == nil {
return propfind{}, http.StatusBadRequest, errInvalidPropfind
}
return pf, 0, nil
}
// Property represents a single DAV resource property as defined in RFC 4918.
// See http://www.webdav.org/specs/rfc4918.html#data.model.for.resource.properties
type Property struct {
// XMLName is the fully qualified name that identifies this property.
XMLName xml.Name
// Lang is an optional xml:lang attribute.
Lang string `xml:"xml:lang,attr,omitempty"`
// InnerXML contains the XML representation of the property value.
// See http://www.webdav.org/specs/rfc4918.html#property_values
//
// Property values of complex type or mixed-content must have fully
// expanded XML namespaces or be self-contained with according
// XML namespace declarations. They must not rely on any XML
// namespace declarations within the scope of the XML document,
// even including the DAV: namespace.
InnerXML []byte `xml:",innerxml"`
}
// ixmlProperty is the same as the Property type except it holds an ixml.Name
// instead of an xml.Name.
type ixmlProperty struct {
XMLName ixml.Name
Lang string `xml:"xml:lang,attr,omitempty"`
InnerXML []byte `xml:",innerxml"`
}
// http://www.webdav.org/specs/rfc4918.html#ELEMENT_error
// See multistatusWriter for the "D:" namespace prefix.
type xmlError struct {
XMLName ixml.Name `xml:"D:error"`
InnerXML []byte `xml:",innerxml"`
}
// http://www.webdav.org/specs/rfc4918.html#ELEMENT_propstat
// See multistatusWriter for the "D:" namespace prefix.
type propstat struct {
Prop []Property `xml:"D:prop>_ignored_"`
Status string `xml:"D:status"`
Error *xmlError `xml:"D:error"`
ResponseDescription string `xml:"D:responsedescription,omitempty"`
}
// ixmlPropstat is the same as the propstat type except it holds an ixml.Name
// instead of an xml.Name.
type ixmlPropstat struct {
Prop []ixmlProperty `xml:"D:prop>_ignored_"`
Status string `xml:"D:status"`
Error *xmlError `xml:"D:error"`
ResponseDescription string `xml:"D:responsedescription,omitempty"`
}
// MarshalXML prepends the "D:" namespace prefix on properties in the DAV: namespace
// before encoding. See multistatusWriter.
func (ps propstat) MarshalXML(e *ixml.Encoder, start ixml.StartElement) error {
// Convert from a propstat to an ixmlPropstat.
ixmlPs := ixmlPropstat{
Prop: make([]ixmlProperty, len(ps.Prop)),
Status: ps.Status,
Error: ps.Error,
ResponseDescription: ps.ResponseDescription,
}
for k, prop := range ps.Prop {
ixmlPs.Prop[k] = ixmlProperty{
XMLName: ixml.Name(prop.XMLName),
Lang: prop.Lang,
InnerXML: prop.InnerXML,
}
}
for k, prop := range ixmlPs.Prop {
if prop.XMLName.Space == "DAV:" {
prop.XMLName = ixml.Name{Space: "", Local: "D:" + prop.XMLName.Local}
ixmlPs.Prop[k] = prop
}
}
// Distinct type to avoid infinite recursion of MarshalXML.
type newpropstat ixmlPropstat
return e.EncodeElement(newpropstat(ixmlPs), start)
}
// http://www.webdav.org/specs/rfc4918.html#ELEMENT_response
// See multistatusWriter for the "D:" namespace prefix.
type response struct {
XMLName ixml.Name `xml:"D:response"`
Href []string `xml:"D:href"`
Propstat []propstat `xml:"D:propstat"`
Status string `xml:"D:status,omitempty"`
Error *xmlError `xml:"D:error"`
ResponseDescription string `xml:"D:responsedescription,omitempty"`
}
// MultistatusWriter marshals one or more Responses into a XML
// multistatus response.
// See http://www.webdav.org/specs/rfc4918.html#ELEMENT_multistatus
// TODO(rsto, mpl): As a workaround, the "D:" namespace prefix, defined as
// "DAV:" on this element, is prepended on the nested response, as well as on all
// its nested elements. All property names in the DAV: namespace are prefixed as
// well. This is because some versions of Mini-Redirector (on windows 7) ignore
// elements with a default namespace (no prefixed namespace). A less intrusive fix
// should be possible after golang.org/cl/11074. See https://golang.org/issue/11177
type multistatusWriter struct {
// ResponseDescription contains the optional responsedescription
// of the multistatus XML element. Only the latest content before
// close will be emitted. Empty response descriptions are not
// written.
responseDescription string
w http.ResponseWriter
enc *ixml.Encoder
}
// Write validates and emits a DAV response as part of a multistatus response
// element.
//
// It sets the HTTP status code of its underlying http.ResponseWriter to 207
// (Multi-Status) and populates the Content-Type header. If r is the
// first, valid response to be written, Write prepends the XML representation
// of r with a multistatus tag. Callers must call close after the last response
// has been written.
func (w *multistatusWriter) write(r *response) error {
switch len(r.Href) {
case 0:
return errInvalidResponse
case 1:
if len(r.Propstat) > 0 != (r.Status == "") {
return errInvalidResponse
}
default:
if len(r.Propstat) > 0 || r.Status == "" {
return errInvalidResponse
}
}
err := w.writeHeader()
if err != nil {
return err
}
return w.enc.Encode(r)
}
// writeHeader writes a XML multistatus start element on w's underlying
// http.ResponseWriter and returns the result of the write operation.
// After the first write attempt, writeHeader becomes a no-op.
func (w *multistatusWriter) writeHeader() error {
if w.enc != nil {
return nil
}
w.w.Header().Add("Content-Type", "text/xml; charset=utf-8")
w.w.WriteHeader(StatusMulti)
_, err := fmt.Fprintf(w.w, `<?xml version="1.0" encoding="UTF-8"?>`)
if err != nil {
return err
}
w.enc = ixml.NewEncoder(w.w)
return w.enc.EncodeToken(ixml.StartElement{
Name: ixml.Name{
Space: "DAV:",
Local: "multistatus",
},
Attr: []ixml.Attr{{
Name: ixml.Name{Space: "xmlns", Local: "D"},
Value: "DAV:",
}},
})
}
// Close completes the marshalling of the multistatus response. It returns
// an error if the multistatus response could not be completed. If both the
// return value and field enc of w are nil, then no multistatus response has
// been written.
func (w *multistatusWriter) close() error {
if w.enc == nil {
return nil
}
var end []ixml.Token
if w.responseDescription != "" {
name := ixml.Name{Space: "DAV:", Local: "responsedescription"}
end = append(end,
ixml.StartElement{Name: name},
ixml.CharData(w.responseDescription),
ixml.EndElement{Name: name},
)
}
end = append(end, ixml.EndElement{
Name: ixml.Name{Space: "DAV:", Local: "multistatus"},
})
for _, t := range end {
err := w.enc.EncodeToken(t)
if err != nil {
return err
}
}
return w.enc.Flush()
}
var xmlLangName = ixml.Name{Space: "http://www.w3.org/XML/1998/namespace", Local: "lang"}
func xmlLang(s ixml.StartElement, d string) string {
for _, attr := range s.Attr {
if attr.Name == xmlLangName {
return attr.Value
}
}
return d
}
type xmlValue []byte
func (v *xmlValue) UnmarshalXML(d *ixml.Decoder, start ixml.StartElement) error {
// The XML value of a property can be arbitrary, mixed-content XML.
// To make sure that the unmarshalled value contains all required
// namespaces, we encode all the property value XML tokens into a
// buffer. This forces the encoder to redeclare any used namespaces.
var b bytes.Buffer
e := ixml.NewEncoder(&b)
for {
t, err := next(d)
if err != nil {
return err
}
if e, ok := t.(ixml.EndElement); ok && e.Name == start.Name {
break
}
if err = e.EncodeToken(t); err != nil {
return err
}
}
err := e.Flush()
if err != nil {
return err
}
*v = b.Bytes()
return nil
}
// http://www.webdav.org/specs/rfc4918.html#ELEMENT_prop (for proppatch)
type proppatchProps []Property
// UnmarshalXML appends the property names and values enclosed within start
// to ps.
//
// An xml:lang attribute that is defined either on the DAV:prop or property
// name XML element is propagated to the property's Lang field.
//
// UnmarshalXML returns an error if start does not contain any properties or if
// property values contain syntactically incorrect XML.
func (ps *proppatchProps) UnmarshalXML(d *ixml.Decoder, start ixml.StartElement) error {
lang := xmlLang(start, "")
for {
t, err := next(d)
if err != nil {
return err
}
switch elem := t.(type) {
case ixml.EndElement:
if len(*ps) == 0 {
return fmt.Errorf("%s must not be empty", start.Name.Local)
}
return nil
case ixml.StartElement:
p := Property{
XMLName: xml.Name(t.(ixml.StartElement).Name),
Lang: xmlLang(t.(ixml.StartElement), lang),
}
err = d.DecodeElement(((*xmlValue)(&p.InnerXML)), &elem)
if err != nil {
return err
}
*ps = append(*ps, p)
}
}
}
// http://www.webdav.org/specs/rfc4918.html#ELEMENT_set
// http://www.webdav.org/specs/rfc4918.html#ELEMENT_remove
type setRemove struct {
XMLName ixml.Name
Lang string `xml:"xml:lang,attr,omitempty"`
Prop proppatchProps `xml:"DAV: prop"`
}
// http://www.webdav.org/specs/rfc4918.html#ELEMENT_propertyupdate
type propertyupdate struct {
XMLName ixml.Name `xml:"DAV: propertyupdate"`
Lang string `xml:"xml:lang,attr,omitempty"`
SetRemove []setRemove `xml:",any"`
}
func readProppatch(r io.Reader) (patches []Proppatch, status int, err error) {
var pu propertyupdate
if err = ixml.NewDecoder(r).Decode(&pu); err != nil {
return nil, http.StatusBadRequest, err
}
for _, op := range pu.SetRemove {
remove := false
switch op.XMLName {
case ixml.Name{Space: "DAV:", Local: "set"}:
// No-op.
case ixml.Name{Space: "DAV:", Local: "remove"}:
for _, p := range op.Prop {
if len(p.InnerXML) > 0 {
return nil, http.StatusBadRequest, errInvalidProppatch
}
}
remove = true
default:
return nil, http.StatusBadRequest, errInvalidProppatch
}
patches = append(patches, Proppatch{Remove: remove, Props: op.Prop})
}
return patches, 0, nil
}

View File

@ -0,0 +1,905 @@
// Copyright 2014 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package davServer
import (
"bytes"
"encoding/xml"
"fmt"
"io"
"net/http"
"net/http/httptest"
"reflect"
"sort"
"strings"
"testing"
ixml "github.com/openziti/zrok/drives/davServer/internal/xml"
)
func TestReadLockInfo(t *testing.T) {
// The "section x.y.z" test cases come from section x.y.z of the spec at
// http://www.webdav.org/specs/rfc4918.html
testCases := []struct {
desc string
input string
wantLI lockInfo
wantStatus int
}{{
"bad: junk",
"xxx",
lockInfo{},
http.StatusBadRequest,
}, {
"bad: invalid owner XML",
"" +
"<D:lockinfo xmlns:D='DAV:'>\n" +
" <D:lockscope><D:exclusive/></D:lockscope>\n" +
" <D:locktype><D:write/></D:locktype>\n" +
" <D:owner>\n" +
" <D:href> no end tag \n" +
" </D:owner>\n" +
"</D:lockinfo>",
lockInfo{},
http.StatusBadRequest,
}, {
"bad: invalid UTF-8",
"" +
"<D:lockinfo xmlns:D='DAV:'>\n" +
" <D:lockscope><D:exclusive/></D:lockscope>\n" +
" <D:locktype><D:write/></D:locktype>\n" +
" <D:owner>\n" +
" <D:href> \xff </D:href>\n" +
" </D:owner>\n" +
"</D:lockinfo>",
lockInfo{},
http.StatusBadRequest,
}, {
"bad: unfinished XML #1",
"" +
"<D:lockinfo xmlns:D='DAV:'>\n" +
" <D:lockscope><D:exclusive/></D:lockscope>\n" +
" <D:locktype><D:write/></D:locktype>\n",
lockInfo{},
http.StatusBadRequest,
}, {
"bad: unfinished XML #2",
"" +
"<D:lockinfo xmlns:D='DAV:'>\n" +
" <D:lockscope><D:exclusive/></D:lockscope>\n" +
" <D:locktype><D:write/></D:locktype>\n" +
" <D:owner>\n",
lockInfo{},
http.StatusBadRequest,
}, {
"good: empty",
"",
lockInfo{},
0,
}, {
"good: plain-text owner",
"" +
"<D:lockinfo xmlns:D='DAV:'>\n" +
" <D:lockscope><D:exclusive/></D:lockscope>\n" +
" <D:locktype><D:write/></D:locktype>\n" +
" <D:owner>gopher</D:owner>\n" +
"</D:lockinfo>",
lockInfo{
XMLName: ixml.Name{Space: "DAV:", Local: "lockinfo"},
Exclusive: new(struct{}),
Write: new(struct{}),
Owner: owner{
InnerXML: "gopher",
},
},
0,
}, {
"section 9.10.7",
"" +
"<D:lockinfo xmlns:D='DAV:'>\n" +
" <D:lockscope><D:exclusive/></D:lockscope>\n" +
" <D:locktype><D:write/></D:locktype>\n" +
" <D:owner>\n" +
" <D:href>http://example.org/~ejw/contact.html</D:href>\n" +
" </D:owner>\n" +
"</D:lockinfo>",
lockInfo{
XMLName: ixml.Name{Space: "DAV:", Local: "lockinfo"},
Exclusive: new(struct{}),
Write: new(struct{}),
Owner: owner{
InnerXML: "\n <D:href>http://example.org/~ejw/contact.html</D:href>\n ",
},
},
0,
}}
for _, tc := range testCases {
li, status, err := readLockInfo(strings.NewReader(tc.input))
if tc.wantStatus != 0 {
if err == nil {
t.Errorf("%s: got nil error, want non-nil", tc.desc)
continue
}
} else if err != nil {
t.Errorf("%s: %v", tc.desc, err)
continue
}
if !reflect.DeepEqual(li, tc.wantLI) || status != tc.wantStatus {
t.Errorf("%s:\ngot lockInfo=%v, status=%v\nwant lockInfo=%v, status=%v",
tc.desc, li, status, tc.wantLI, tc.wantStatus)
continue
}
}
}
func TestReadPropfind(t *testing.T) {
testCases := []struct {
desc string
input string
wantPF propfind
wantStatus int
}{{
desc: "propfind: propname",
input: "" +
"<A:propfind xmlns:A='DAV:'>\n" +
" <A:propname/>\n" +
"</A:propfind>",
wantPF: propfind{
XMLName: ixml.Name{Space: "DAV:", Local: "propfind"},
Propname: new(struct{}),
},
}, {
desc: "propfind: empty body means allprop",
input: "",
wantPF: propfind{
Allprop: new(struct{}),
},
}, {
desc: "propfind: allprop",
input: "" +
"<A:propfind xmlns:A='DAV:'>\n" +
" <A:allprop/>\n" +
"</A:propfind>",
wantPF: propfind{
XMLName: ixml.Name{Space: "DAV:", Local: "propfind"},
Allprop: new(struct{}),
},
}, {
desc: "propfind: allprop followed by include",
input: "" +
"<A:propfind xmlns:A='DAV:'>\n" +
" <A:allprop/>\n" +
" <A:include><A:displayname/></A:include>\n" +
"</A:propfind>",
wantPF: propfind{
XMLName: ixml.Name{Space: "DAV:", Local: "propfind"},
Allprop: new(struct{}),
Include: propfindProps{xml.Name{Space: "DAV:", Local: "displayname"}},
},
}, {
desc: "propfind: include followed by allprop",
input: "" +
"<A:propfind xmlns:A='DAV:'>\n" +
" <A:include><A:displayname/></A:include>\n" +
" <A:allprop/>\n" +
"</A:propfind>",
wantPF: propfind{
XMLName: ixml.Name{Space: "DAV:", Local: "propfind"},
Allprop: new(struct{}),
Include: propfindProps{xml.Name{Space: "DAV:", Local: "displayname"}},
},
}, {
desc: "propfind: propfind",
input: "" +
"<A:propfind xmlns:A='DAV:'>\n" +
" <A:prop><A:displayname/></A:prop>\n" +
"</A:propfind>",
wantPF: propfind{
XMLName: ixml.Name{Space: "DAV:", Local: "propfind"},
Prop: propfindProps{xml.Name{Space: "DAV:", Local: "displayname"}},
},
}, {
desc: "propfind: prop with ignored comments",
input: "" +
"<A:propfind xmlns:A='DAV:'>\n" +
" <A:prop>\n" +
" <!-- ignore -->\n" +
" <A:displayname><!-- ignore --></A:displayname>\n" +
" </A:prop>\n" +
"</A:propfind>",
wantPF: propfind{
XMLName: ixml.Name{Space: "DAV:", Local: "propfind"},
Prop: propfindProps{xml.Name{Space: "DAV:", Local: "displayname"}},
},
}, {
desc: "propfind: propfind with ignored whitespace",
input: "" +
"<A:propfind xmlns:A='DAV:'>\n" +
" <A:prop> <A:displayname/></A:prop>\n" +
"</A:propfind>",
wantPF: propfind{
XMLName: ixml.Name{Space: "DAV:", Local: "propfind"},
Prop: propfindProps{xml.Name{Space: "DAV:", Local: "displayname"}},
},
}, {
desc: "propfind: propfind with ignored mixed-content",
input: "" +
"<A:propfind xmlns:A='DAV:'>\n" +
" <A:prop>foo<A:displayname/>bar</A:prop>\n" +
"</A:propfind>",
wantPF: propfind{
XMLName: ixml.Name{Space: "DAV:", Local: "propfind"},
Prop: propfindProps{xml.Name{Space: "DAV:", Local: "displayname"}},
},
}, {
desc: "propfind: propname with ignored element (section A.4)",
input: "" +
"<A:propfind xmlns:A='DAV:'>\n" +
" <A:propname/>\n" +
" <E:leave-out xmlns:E='E:'>*boss*</E:leave-out>\n" +
"</A:propfind>",
wantPF: propfind{
XMLName: ixml.Name{Space: "DAV:", Local: "propfind"},
Propname: new(struct{}),
},
}, {
desc: "propfind: bad: junk",
input: "xxx",
wantStatus: http.StatusBadRequest,
}, {
desc: "propfind: bad: propname and allprop (section A.3)",
input: "" +
"<A:propfind xmlns:A='DAV:'>\n" +
" <A:propname/>" +
" <A:allprop/>" +
"</A:propfind>",
wantStatus: http.StatusBadRequest,
}, {
desc: "propfind: bad: propname and prop",
input: "" +
"<A:propfind xmlns:A='DAV:'>\n" +
" <A:prop><A:displayname/></A:prop>\n" +
" <A:propname/>\n" +
"</A:propfind>",
wantStatus: http.StatusBadRequest,
}, {
desc: "propfind: bad: allprop and prop",
input: "" +
"<A:propfind xmlns:A='DAV:'>\n" +
" <A:allprop/>\n" +
" <A:prop><A:foo/><A:/prop>\n" +
"</A:propfind>",
wantStatus: http.StatusBadRequest,
}, {
desc: "propfind: bad: empty propfind with ignored element (section A.4)",
input: "" +
"<A:propfind xmlns:A='DAV:'>\n" +
" <E:expired-props/>\n" +
"</A:propfind>",
wantStatus: http.StatusBadRequest,
}, {
desc: "propfind: bad: empty prop",
input: "" +
"<A:propfind xmlns:A='DAV:'>\n" +
" <A:prop/>\n" +
"</A:propfind>",
wantStatus: http.StatusBadRequest,
}, {
desc: "propfind: bad: prop with just chardata",
input: "" +
"<A:propfind xmlns:A='DAV:'>\n" +
" <A:prop>foo</A:prop>\n" +
"</A:propfind>",
wantStatus: http.StatusBadRequest,
}, {
desc: "bad: interrupted prop",
input: "" +
"<A:propfind xmlns:A='DAV:'>\n" +
" <A:prop><A:foo></A:prop>\n",
wantStatus: http.StatusBadRequest,
}, {
desc: "bad: malformed end element prop",
input: "" +
"<A:propfind xmlns:A='DAV:'>\n" +
" <A:prop><A:foo/></A:bar></A:prop>\n",
wantStatus: http.StatusBadRequest,
}, {
desc: "propfind: bad: property with chardata value",
input: "" +
"<A:propfind xmlns:A='DAV:'>\n" +
" <A:prop><A:foo>bar</A:foo></A:prop>\n" +
"</A:propfind>",
wantStatus: http.StatusBadRequest,
}, {
desc: "propfind: bad: property with whitespace value",
input: "" +
"<A:propfind xmlns:A='DAV:'>\n" +
" <A:prop><A:foo> </A:foo></A:prop>\n" +
"</A:propfind>",
wantStatus: http.StatusBadRequest,
}, {
desc: "propfind: bad: include without allprop",
input: "" +
"<A:propfind xmlns:A='DAV:'>\n" +
" <A:include><A:foo/></A:include>\n" +
"</A:propfind>",
wantStatus: http.StatusBadRequest,
}}
for _, tc := range testCases {
pf, status, err := readPropfind(strings.NewReader(tc.input))
if tc.wantStatus != 0 {
if err == nil {
t.Errorf("%s: got nil error, want non-nil", tc.desc)
continue
}
} else if err != nil {
t.Errorf("%s: %v", tc.desc, err)
continue
}
if !reflect.DeepEqual(pf, tc.wantPF) || status != tc.wantStatus {
t.Errorf("%s:\ngot propfind=%v, status=%v\nwant propfind=%v, status=%v",
tc.desc, pf, status, tc.wantPF, tc.wantStatus)
continue
}
}
}
func TestMultistatusWriter(t *testing.T) {
///The "section x.y.z" test cases come from section x.y.z of the spec at
// http://www.webdav.org/specs/rfc4918.html
testCases := []struct {
desc string
responses []response
respdesc string
writeHeader bool
wantXML string
wantCode int
wantErr error
}{{
desc: "section 9.2.2 (failed dependency)",
responses: []response{{
Href: []string{"http://example.com/foo"},
Propstat: []propstat{{
Prop: []Property{{
XMLName: xml.Name{
Space: "http://ns.example.com/",
Local: "Authors",
},
}},
Status: "HTTP/1.1 424 Failed Dependency",
}, {
Prop: []Property{{
XMLName: xml.Name{
Space: "http://ns.example.com/",
Local: "Copyright-Owner",
},
}},
Status: "HTTP/1.1 409 Conflict",
}},
ResponseDescription: "Copyright Owner cannot be deleted or altered.",
}},
wantXML: `` +
`<?xml version="1.0" encoding="UTF-8"?>` +
`<multistatus xmlns="DAV:">` +
` <response>` +
` <href>http://example.com/foo</href>` +
` <propstat>` +
` <prop>` +
` <Authors xmlns="http://ns.example.com/"></Authors>` +
` </prop>` +
` <status>HTTP/1.1 424 Failed Dependency</status>` +
` </propstat>` +
` <propstat xmlns="DAV:">` +
` <prop>` +
` <Copyright-Owner xmlns="http://ns.example.com/"></Copyright-Owner>` +
` </prop>` +
` <status>HTTP/1.1 409 Conflict</status>` +
` </propstat>` +
` <responsedescription>Copyright Owner cannot be deleted or altered.</responsedescription>` +
`</response>` +
`</multistatus>`,
wantCode: StatusMulti,
}, {
desc: "section 9.6.2 (lock-token-submitted)",
responses: []response{{
Href: []string{"http://example.com/foo"},
Status: "HTTP/1.1 423 Locked",
Error: &xmlError{
InnerXML: []byte(`<lock-token-submitted xmlns="DAV:"/>`),
},
}},
wantXML: `` +
`<?xml version="1.0" encoding="UTF-8"?>` +
`<multistatus xmlns="DAV:">` +
` <response>` +
` <href>http://example.com/foo</href>` +
` <status>HTTP/1.1 423 Locked</status>` +
` <error><lock-token-submitted xmlns="DAV:"/></error>` +
` </response>` +
`</multistatus>`,
wantCode: StatusMulti,
}, {
desc: "section 9.1.3",
responses: []response{{
Href: []string{"http://example.com/foo"},
Propstat: []propstat{{
Prop: []Property{{
XMLName: xml.Name{Space: "http://ns.example.com/boxschema/", Local: "bigbox"},
InnerXML: []byte(`` +
`<BoxType xmlns="http://ns.example.com/boxschema/">` +
`Box type A` +
`</BoxType>`),
}, {
XMLName: xml.Name{Space: "http://ns.example.com/boxschema/", Local: "author"},
InnerXML: []byte(`` +
`<Name xmlns="http://ns.example.com/boxschema/">` +
`J.J. Johnson` +
`</Name>`),
}},
Status: "HTTP/1.1 200 OK",
}, {
Prop: []Property{{
XMLName: xml.Name{Space: "http://ns.example.com/boxschema/", Local: "DingALing"},
}, {
XMLName: xml.Name{Space: "http://ns.example.com/boxschema/", Local: "Random"},
}},
Status: "HTTP/1.1 403 Forbidden",
ResponseDescription: "The user does not have access to the DingALing property.",
}},
}},
respdesc: "There has been an access violation error.",
wantXML: `` +
`<?xml version="1.0" encoding="UTF-8"?>` +
`<multistatus xmlns="DAV:" xmlns:B="http://ns.example.com/boxschema/">` +
` <response>` +
` <href>http://example.com/foo</href>` +
` <propstat>` +
` <prop>` +
` <B:bigbox><B:BoxType>Box type A</B:BoxType></B:bigbox>` +
` <B:author><B:Name>J.J. Johnson</B:Name></B:author>` +
` </prop>` +
` <status>HTTP/1.1 200 OK</status>` +
` </propstat>` +
` <propstat>` +
` <prop>` +
` <B:DingALing/>` +
` <B:Random/>` +
` </prop>` +
` <status>HTTP/1.1 403 Forbidden</status>` +
` <responsedescription>The user does not have access to the DingALing property.</responsedescription>` +
` </propstat>` +
` </response>` +
` <responsedescription>There has been an access violation error.</responsedescription>` +
`</multistatus>`,
wantCode: StatusMulti,
}, {
desc: "no response written",
// default of http.responseWriter
wantCode: http.StatusOK,
}, {
desc: "no response written (with description)",
respdesc: "too bad",
// default of http.responseWriter
wantCode: http.StatusOK,
}, {
desc: "empty multistatus with header",
writeHeader: true,
wantXML: `<multistatus xmlns="DAV:"></multistatus>`,
wantCode: StatusMulti,
}, {
desc: "bad: no href",
responses: []response{{
Propstat: []propstat{{
Prop: []Property{{
XMLName: xml.Name{
Space: "http://example.com/",
Local: "foo",
},
}},
Status: "HTTP/1.1 200 OK",
}},
}},
wantErr: errInvalidResponse,
// default of http.responseWriter
wantCode: http.StatusOK,
}, {
desc: "bad: multiple hrefs and no status",
responses: []response{{
Href: []string{"http://example.com/foo", "http://example.com/bar"},
}},
wantErr: errInvalidResponse,
// default of http.responseWriter
wantCode: http.StatusOK,
}, {
desc: "bad: one href and no propstat",
responses: []response{{
Href: []string{"http://example.com/foo"},
}},
wantErr: errInvalidResponse,
// default of http.responseWriter
wantCode: http.StatusOK,
}, {
desc: "bad: status with one href and propstat",
responses: []response{{
Href: []string{"http://example.com/foo"},
Propstat: []propstat{{
Prop: []Property{{
XMLName: xml.Name{
Space: "http://example.com/",
Local: "foo",
},
}},
Status: "HTTP/1.1 200 OK",
}},
Status: "HTTP/1.1 200 OK",
}},
wantErr: errInvalidResponse,
// default of http.responseWriter
wantCode: http.StatusOK,
}, {
desc: "bad: multiple hrefs and propstat",
responses: []response{{
Href: []string{
"http://example.com/foo",
"http://example.com/bar",
},
Propstat: []propstat{{
Prop: []Property{{
XMLName: xml.Name{
Space: "http://example.com/",
Local: "foo",
},
}},
Status: "HTTP/1.1 200 OK",
}},
}},
wantErr: errInvalidResponse,
// default of http.responseWriter
wantCode: http.StatusOK,
}}
n := xmlNormalizer{omitWhitespace: true}
loop:
for _, tc := range testCases {
rec := httptest.NewRecorder()
w := multistatusWriter{w: rec, responseDescription: tc.respdesc}
if tc.writeHeader {
if err := w.writeHeader(); err != nil {
t.Errorf("%s: got writeHeader error %v, want nil", tc.desc, err)
continue
}
}
for _, r := range tc.responses {
if err := w.write(&r); err != nil {
if err != tc.wantErr {
t.Errorf("%s: got write error %v, want %v",
tc.desc, err, tc.wantErr)
}
continue loop
}
}
if err := w.close(); err != tc.wantErr {
t.Errorf("%s: got close error %v, want %v",
tc.desc, err, tc.wantErr)
continue
}
if rec.Code != tc.wantCode {
t.Errorf("%s: got HTTP status code %d, want %d\n",
tc.desc, rec.Code, tc.wantCode)
continue
}
gotXML := rec.Body.String()
eq, err := n.equalXML(strings.NewReader(gotXML), strings.NewReader(tc.wantXML))
if err != nil {
t.Errorf("%s: equalXML: %v", tc.desc, err)
continue
}
if !eq {
t.Errorf("%s: XML body\ngot %s\nwant %s", tc.desc, gotXML, tc.wantXML)
}
}
}
func TestReadProppatch(t *testing.T) {
ppStr := func(pps []Proppatch) string {
var outer []string
for _, pp := range pps {
var inner []string
for _, p := range pp.Props {
inner = append(inner, fmt.Sprintf("{XMLName: %q, Lang: %q, InnerXML: %q}",
p.XMLName, p.Lang, p.InnerXML))
}
outer = append(outer, fmt.Sprintf("{Remove: %t, Props: [%s]}",
pp.Remove, strings.Join(inner, ", ")))
}
return "[" + strings.Join(outer, ", ") + "]"
}
testCases := []struct {
desc string
input string
wantPP []Proppatch
wantStatus int
}{{
desc: "proppatch: section 9.2 (with simple property value)",
input: `` +
`<?xml version="1.0" encoding="utf-8" ?>` +
`<D:propertyupdate xmlns:D="DAV:"` +
` xmlns:Z="http://ns.example.com/z/">` +
` <D:set>` +
` <D:prop><Z:Authors>somevalue</Z:Authors></D:prop>` +
` </D:set>` +
` <D:remove>` +
` <D:prop><Z:Copyright-Owner/></D:prop>` +
` </D:remove>` +
`</D:propertyupdate>`,
wantPP: []Proppatch{{
Props: []Property{{
xml.Name{Space: "http://ns.example.com/z/", Local: "Authors"},
"",
[]byte(`somevalue`),
}},
}, {
Remove: true,
Props: []Property{{
xml.Name{Space: "http://ns.example.com/z/", Local: "Copyright-Owner"},
"",
nil,
}},
}},
}, {
desc: "proppatch: lang attribute on prop",
input: `` +
`<?xml version="1.0" encoding="utf-8" ?>` +
`<D:propertyupdate xmlns:D="DAV:">` +
` <D:set>` +
` <D:prop xml:lang="en">` +
` <foo xmlns="http://example.com/ns"/>` +
` </D:prop>` +
` </D:set>` +
`</D:propertyupdate>`,
wantPP: []Proppatch{{
Props: []Property{{
xml.Name{Space: "http://example.com/ns", Local: "foo"},
"en",
nil,
}},
}},
}, {
desc: "bad: remove with value",
input: `` +
`<?xml version="1.0" encoding="utf-8" ?>` +
`<D:propertyupdate xmlns:D="DAV:"` +
` xmlns:Z="http://ns.example.com/z/">` +
` <D:remove>` +
` <D:prop>` +
` <Z:Authors>` +
` <Z:Author>Jim Whitehead</Z:Author>` +
` </Z:Authors>` +
` </D:prop>` +
` </D:remove>` +
`</D:propertyupdate>`,
wantStatus: http.StatusBadRequest,
}, {
desc: "bad: empty propertyupdate",
input: `` +
`<?xml version="1.0" encoding="utf-8" ?>` +
`<D:propertyupdate xmlns:D="DAV:"` +
`</D:propertyupdate>`,
wantStatus: http.StatusBadRequest,
}, {
desc: "bad: empty prop",
input: `` +
`<?xml version="1.0" encoding="utf-8" ?>` +
`<D:propertyupdate xmlns:D="DAV:"` +
` xmlns:Z="http://ns.example.com/z/">` +
` <D:remove>` +
` <D:prop/>` +
` </D:remove>` +
`</D:propertyupdate>`,
wantStatus: http.StatusBadRequest,
}}
for _, tc := range testCases {
pp, status, err := readProppatch(strings.NewReader(tc.input))
if tc.wantStatus != 0 {
if err == nil {
t.Errorf("%s: got nil error, want non-nil", tc.desc)
continue
}
} else if err != nil {
t.Errorf("%s: %v", tc.desc, err)
continue
}
if status != tc.wantStatus {
t.Errorf("%s: got status %d, want %d", tc.desc, status, tc.wantStatus)
continue
}
if !reflect.DeepEqual(pp, tc.wantPP) || status != tc.wantStatus {
t.Errorf("%s: proppatch\ngot %v\nwant %v", tc.desc, ppStr(pp), ppStr(tc.wantPP))
}
}
}
func TestUnmarshalXMLValue(t *testing.T) {
testCases := []struct {
desc string
input string
wantVal string
}{{
desc: "simple char data",
input: "<root>foo</root>",
wantVal: "foo",
}, {
desc: "empty element",
input: "<root><foo/></root>",
wantVal: "<foo/>",
}, {
desc: "preserve namespace",
input: `<root><foo xmlns="bar"/></root>`,
wantVal: `<foo xmlns="bar"/>`,
}, {
desc: "preserve root element namespace",
input: `<root xmlns:bar="bar"><bar:foo/></root>`,
wantVal: `<foo xmlns="bar"/>`,
}, {
desc: "preserve whitespace",
input: "<root> \t </root>",
wantVal: " \t ",
}, {
desc: "preserve mixed content",
input: `<root xmlns="bar"> <foo>a<bam xmlns="baz"/> </foo> </root>`,
wantVal: ` <foo xmlns="bar">a<bam xmlns="baz"/> </foo> `,
}, {
desc: "section 9.2",
input: `` +
`<Z:Authors xmlns:Z="http://ns.example.com/z/">` +
` <Z:Author>Jim Whitehead</Z:Author>` +
` <Z:Author>Roy Fielding</Z:Author>` +
`</Z:Authors>`,
wantVal: `` +
` <Author xmlns="http://ns.example.com/z/">Jim Whitehead</Author>` +
` <Author xmlns="http://ns.example.com/z/">Roy Fielding</Author>`,
}, {
desc: "section 4.3.1 (mixed content)",
input: `` +
`<x:author ` +
` xmlns:x='http://example.com/ns' ` +
` xmlns:D="DAV:">` +
` <x:name>Jane Doe</x:name>` +
` <!-- Jane's contact info -->` +
` <x:uri type='email'` +
` added='2005-11-26'>mailto:jane.doe@example.com</x:uri>` +
` <x:uri type='web'` +
` added='2005-11-27'>http://www.example.com</x:uri>` +
` <x:notes xmlns:h='http://www.w3.org/1999/xhtml'>` +
` Jane has been working way <h:em>too</h:em> long on the` +
` long-awaited revision of <![CDATA[<RFC2518>]]>.` +
` </x:notes>` +
`</x:author>`,
wantVal: `` +
` <name xmlns="http://example.com/ns">Jane Doe</name>` +
` ` +
` <uri type='email'` +
` xmlns="http://example.com/ns" ` +
` added='2005-11-26'>mailto:jane.doe@example.com</uri>` +
` <uri added='2005-11-27'` +
` type='web'` +
` xmlns="http://example.com/ns">http://www.example.com</uri>` +
` <notes xmlns="http://example.com/ns" ` +
` xmlns:h="http://www.w3.org/1999/xhtml">` +
` Jane has been working way <h:em>too</h:em> long on the` +
` long-awaited revision of &lt;RFC2518&gt;.` +
` </notes>`,
}}
var n xmlNormalizer
for _, tc := range testCases {
d := ixml.NewDecoder(strings.NewReader(tc.input))
var v xmlValue
if err := d.Decode(&v); err != nil {
t.Errorf("%s: got error %v, want nil", tc.desc, err)
continue
}
eq, err := n.equalXML(bytes.NewReader(v), strings.NewReader(tc.wantVal))
if err != nil {
t.Errorf("%s: equalXML: %v", tc.desc, err)
continue
}
if !eq {
t.Errorf("%s:\ngot %s\nwant %s", tc.desc, string(v), tc.wantVal)
}
}
}
// xmlNormalizer normalizes XML.
type xmlNormalizer struct {
// omitWhitespace instructs to ignore whitespace between element tags.
omitWhitespace bool
// omitComments instructs to ignore XML comments.
omitComments bool
}
// normalize writes the normalized XML content of r to w. It applies the
// following rules
//
// - Rename namespace prefixes according to an internal heuristic.
// - Remove unnecessary namespace declarations.
// - Sort attributes in XML start elements in lexical order of their
// fully qualified name.
// - Remove XML directives and processing instructions.
// - Remove CDATA between XML tags that only contains whitespace, if
// instructed to do so.
// - Remove comments, if instructed to do so.
func (n *xmlNormalizer) normalize(w io.Writer, r io.Reader) error {
d := ixml.NewDecoder(r)
e := ixml.NewEncoder(w)
for {
t, err := d.Token()
if err != nil {
if t == nil && err == io.EOF {
break
}
return err
}
switch val := t.(type) {
case ixml.Directive, ixml.ProcInst:
continue
case ixml.Comment:
if n.omitComments {
continue
}
case ixml.CharData:
if n.omitWhitespace && len(bytes.TrimSpace(val)) == 0 {
continue
}
case ixml.StartElement:
start, _ := ixml.CopyToken(val).(ixml.StartElement)
attr := start.Attr[:0]
for _, a := range start.Attr {
if a.Name.Space == "xmlns" || a.Name.Local == "xmlns" {
continue
}
attr = append(attr, a)
}
sort.Sort(byName(attr))
start.Attr = attr
t = start
}
err = e.EncodeToken(t)
if err != nil {
return err
}
}
return e.Flush()
}
// equalXML tests for equality of the normalized XML contents of a and b.
func (n *xmlNormalizer) equalXML(a, b io.Reader) (bool, error) {
var buf bytes.Buffer
if err := n.normalize(&buf, a); err != nil {
return false, err
}
normA := buf.String()
buf.Reset()
if err := n.normalize(&buf, b); err != nil {
return false, err
}
normB := buf.String()
return normA == normB, nil
}
type byName []ixml.Attr
func (a byName) Len() int { return len(a) }
func (a byName) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
func (a byName) Less(i, j int) bool {
if a[i].Name.Space != a[j].Name.Space {
return a[i].Name.Space < a[j].Name.Space
}
return a[i].Name.Local < a[j].Name.Local
}

152
drives/sync/filesystem.go Normal file
View File

@ -0,0 +1,152 @@
package sync
import (
"context"
"fmt"
"github.com/openziti/zrok/drives/davServer"
"io"
"io/fs"
"os"
"path/filepath"
"time"
)
type FilesystemTargetConfig struct {
Root string
}
type FilesystemTarget struct {
cfg *FilesystemTargetConfig
root fs.FS
tree []*Object
}
func NewFilesystemTarget(cfg *FilesystemTargetConfig) *FilesystemTarget {
root := os.DirFS(cfg.Root)
return &FilesystemTarget{cfg: cfg, root: root}
}
func (t *FilesystemTarget) Inventory() ([]*Object, error) {
fi, err := os.Stat(t.cfg.Root)
if os.IsNotExist(err) {
return nil, nil
}
if err != nil {
return nil, err
}
if !fi.IsDir() {
t.cfg.Root = filepath.Dir(t.cfg.Root)
return []*Object{{
Path: "/" + fi.Name(),
IsDir: false,
Size: fi.Size(),
Modified: fi.ModTime(),
}}, nil
}
t.tree = nil
if err := fs.WalkDir(t.root, ".", t.recurse); err != nil {
return nil, err
}
return t.tree, nil
}
func (t *FilesystemTarget) Dir(path string) ([]*Object, error) {
des, err := os.ReadDir(t.cfg.Root)
if err != nil {
return nil, err
}
var objects []*Object
for _, de := range des {
fi, err := de.Info()
if err != nil {
return nil, err
}
objects = append(objects, &Object{
Path: de.Name(),
IsDir: de.IsDir(),
Size: fi.Size(),
Modified: fi.ModTime(),
})
}
return objects, nil
}
func (t *FilesystemTarget) Mkdir(path string) error {
return os.MkdirAll(filepath.Join(t.cfg.Root, path), os.ModePerm)
}
func (t *FilesystemTarget) recurse(path string, d fs.DirEntry, err error) error {
if err != nil {
return err
}
fi, err := d.Info()
if err != nil {
return err
}
etag := ""
if v, ok := fi.(davServer.ETager); ok {
etag, err = v.ETag(context.Background())
if err != nil {
return err
}
} else {
etag = fmt.Sprintf(`"%x%x"`, fi.ModTime().UTC().UnixNano(), fi.Size())
}
if path != "." {
outPath := "/" + path
if fi.IsDir() {
outPath = outPath + "/"
}
t.tree = append(t.tree, &Object{
Path: outPath,
IsDir: fi.IsDir(),
Size: fi.Size(),
Modified: fi.ModTime(),
ETag: etag,
})
}
return nil
}
func (t *FilesystemTarget) ReadStream(path string) (io.ReadCloser, error) {
return os.Open(filepath.Join(t.cfg.Root, path))
}
func (t *FilesystemTarget) WriteStream(path string, stream io.Reader, mode os.FileMode) error {
targetPath := filepath.Join(t.cfg.Root, path)
if err := os.MkdirAll(filepath.Dir(targetPath), mode); err != nil {
return err
}
f, err := os.Create(targetPath)
if err != nil {
return err
}
_, err = io.Copy(f, stream)
if err != nil {
return err
}
return nil
}
func (t *FilesystemTarget) WriteStreamWithModTime(path string, stream io.Reader, mode os.FileMode, modTime time.Time) error {
return t.WriteStream(path, stream, mode)
}
func (t *FilesystemTarget) Move(src, dest string) error {
return os.Rename(filepath.Join(t.cfg.Root, src), filepath.Join(filepath.Dir(t.cfg.Root), dest))
}
func (t *FilesystemTarget) Rm(path string) error {
return os.RemoveAll(filepath.Join(t.cfg.Root, path))
}
func (t *FilesystemTarget) SetModificationTime(path string, mtime time.Time) error {
targetPath := filepath.Join(t.cfg.Root, path)
if err := os.Chtimes(targetPath, time.Now(), mtime); err != nil {
return err
}
return nil
}

27
drives/sync/model.go Normal file
View File

@ -0,0 +1,27 @@
package sync
import (
"io"
"os"
"time"
)
type Object struct {
Path string
IsDir bool
Size int64
Modified time.Time
ETag string
}
type Target interface {
Inventory() ([]*Object, error)
Dir(path string) ([]*Object, error)
Mkdir(path string) error
ReadStream(path string) (io.ReadCloser, error)
WriteStream(path string, stream io.Reader, mode os.FileMode) error
WriteStreamWithModTime(path string, stream io.Reader, mode os.FileMode, modTime time.Time) error
Move(src, dest string) error
Rm(path string) error
SetModificationTime(path string, mtime time.Time) error
}

View File

@ -0,0 +1,59 @@
package sync
import (
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
"os"
)
func OneWay(src, dst Target, sync bool) error {
srcTree, err := src.Inventory()
if err != nil {
return errors.Wrap(err, "error creating source inventory")
}
var dstTree []*Object
if sync {
dstTree, err = dst.Inventory()
if err != nil {
return errors.Wrap(err, "error creating destination inventory")
}
}
dstIndex := make(map[string]*Object)
for _, f := range dstTree {
dstIndex[f.Path] = f
}
var copyList []*Object
for _, srcF := range srcTree {
if dstF, found := dstIndex[srcF.Path]; found {
if !srcF.IsDir && (dstF.Size != srcF.Size || dstF.Modified.Unix() != srcF.Modified.Unix()) {
logrus.Debugf("%v <- dstF.Size = '%d', srcF.Size = '%d', dstF.Modified.UTC = '%d', srcF.Modified.UTC = '%d'", srcF.Path, dstF.Size, srcF.Size, dstF.Modified.Unix(), srcF.Modified.Unix())
copyList = append(copyList, srcF)
}
} else {
logrus.Debugf("%v <- !found", srcF.Path)
copyList = append(copyList, srcF)
}
}
for _, copyPath := range copyList {
if copyPath.IsDir {
if err := dst.Mkdir(copyPath.Path); err != nil {
return err
}
} else {
ss, err := src.ReadStream(copyPath.Path)
if err != nil {
return err
}
if err := dst.WriteStreamWithModTime(copyPath.Path, ss, os.ModePerm, copyPath.Modified); err != nil {
return err
}
}
logrus.Infof("=> %v", copyPath.Path)
}
return nil
}

34
drives/sync/target.go Normal file
View File

@ -0,0 +1,34 @@
package sync
import (
"github.com/openziti/zrok/environment/env_core"
"github.com/pkg/errors"
"net/url"
"strings"
)
func TargetForURL(url *url.URL, root env_core.Root, basicAuth string) (Target, error) {
switch url.Scheme {
case "file":
return NewFilesystemTarget(&FilesystemTargetConfig{Root: url.Path}), nil
case "zrok":
return NewZrokTarget(&ZrokTargetConfig{URL: url, Root: root})
case "http", "https":
var username string
var password string
if basicAuth != "" {
authTokens := strings.Split(basicAuth, ":")
if len(authTokens) != 2 {
return nil, errors.Errorf("invalid basic authentication (expect 'username:password')")
}
username = authTokens[0]
password = authTokens[1]
}
return NewWebDAVTarget(&WebDAVTargetConfig{URL: url, Username: username, Password: password})
default:
return nil, errors.Errorf("unknown URL scheme '%v'", url.Scheme)
}
}

144
drives/sync/webdav.go Normal file
View File

@ -0,0 +1,144 @@
package sync
import (
"context"
"github.com/openziti/zrok/drives/davClient"
"github.com/pkg/errors"
"io"
"net/http"
"net/url"
"os"
"path/filepath"
"time"
)
type WebDAVTargetConfig struct {
URL *url.URL
Username string
Password string
}
type WebDAVTarget struct {
cfg *WebDAVTargetConfig
dc *davClient.Client
}
func NewWebDAVTarget(cfg *WebDAVTargetConfig) (*WebDAVTarget, error) {
var httpClient davClient.HTTPClient
httpClient = http.DefaultClient
if cfg.Username != "" || cfg.Password != "" {
httpClient = davClient.HTTPClientWithBasicAuth(httpClient, cfg.Username, cfg.Password)
}
dc, err := davClient.NewClient(httpClient, cfg.URL.String())
if err != nil {
return nil, err
}
return &WebDAVTarget{cfg: cfg, dc: dc}, nil
}
func (t *WebDAVTarget) Inventory() ([]*Object, error) {
rootFi, err := t.dc.Stat(context.Background(), t.cfg.URL.Path)
if err != nil {
return nil, err
}
if !rootFi.IsDir {
base := filepath.Base(t.cfg.URL.Path)
t.cfg.URL.Path = filepath.Dir(t.cfg.URL.Path)
return []*Object{{
Path: "/" + base,
IsDir: false,
Size: rootFi.Size,
Modified: rootFi.ModTime,
}}, nil
}
fis, err := t.dc.Readdir(context.Background(), "", true)
if err != nil {
return nil, err
}
var objects []*Object
for _, fi := range fis {
if fi.Path != "/" {
objects = append(objects, &Object{
Path: fi.Path,
IsDir: fi.IsDir,
Size: fi.Size,
Modified: fi.ModTime,
})
}
}
return objects, nil
}
func (t *WebDAVTarget) Dir(path string) ([]*Object, error) {
fis, err := t.dc.Readdir(context.Background(), t.cfg.URL.Path, false)
if err != nil {
return nil, err
}
var objects []*Object
for _, fi := range fis {
if fi.Path != "/" && fi.Path != t.cfg.URL.Path+"/" {
objects = append(objects, &Object{
Path: filepath.Base(fi.Path),
IsDir: fi.IsDir,
Size: fi.Size,
Modified: fi.ModTime,
})
}
}
return objects, nil
}
func (t *WebDAVTarget) Mkdir(path string) error {
fi, err := t.dc.Stat(context.Background(), filepath.Join(t.cfg.URL.Path, path))
if err == nil {
if fi.IsDir {
return nil
}
return errors.Errorf("'%v' already exists; not directory", path)
}
return t.dc.Mkdir(context.Background(), filepath.Join(t.cfg.URL.Path, path))
}
func (t *WebDAVTarget) ReadStream(path string) (io.ReadCloser, error) {
return t.dc.Open(context.Background(), filepath.Join(t.cfg.URL.Path, path))
}
func (t *WebDAVTarget) WriteStream(path string, rs io.Reader, _ os.FileMode) error {
ws, err := t.dc.Create(context.Background(), filepath.Join(t.cfg.URL.Path, path))
if err != nil {
return err
}
defer func() { _ = ws.Close() }()
_, err = io.Copy(ws, rs)
if err != nil {
return err
}
return nil
}
func (t *WebDAVTarget) WriteStreamWithModTime(path string, rs io.Reader, _ os.FileMode, modTime time.Time) error {
ws, err := t.dc.CreateWithModTime(context.Background(), filepath.Join(t.cfg.URL.Path, path), modTime)
if err != nil {
return err
}
defer func() { _ = ws.Close() }()
_, err = io.Copy(ws, rs)
if err != nil {
return err
}
return nil
}
func (t *WebDAVTarget) Move(src, dest string) error {
return t.dc.MoveAll(context.Background(), filepath.Join(t.cfg.URL.Path, src), dest, true)
}
func (t *WebDAVTarget) Rm(path string) error {
return t.dc.RemoveAll(context.Background(), filepath.Join(t.cfg.URL.Path, path))
}
func (t *WebDAVTarget) SetModificationTime(path string, mtime time.Time) error {
return t.dc.Touch(context.Background(), filepath.Join(t.cfg.URL.Path, path), mtime)
}

156
drives/sync/zrok.go Normal file
View File

@ -0,0 +1,156 @@
package sync
import (
"context"
"github.com/openziti/zrok/drives/davClient"
"github.com/openziti/zrok/environment/env_core"
"github.com/openziti/zrok/sdk/golang/sdk"
"github.com/pkg/errors"
"io"
"net"
"net/http"
"net/url"
"os"
"path/filepath"
"strings"
"time"
)
type ZrokTargetConfig struct {
URL *url.URL
Root env_core.Root
}
type ZrokTarget struct {
cfg *ZrokTargetConfig
dc *davClient.Client
}
type zrokDialContext struct {
root env_core.Root
}
func (zdc *zrokDialContext) Dial(_ context.Context, _, addr string) (net.Conn, error) {
share := strings.Split(addr, ":")[0]
return sdk.NewDialer(share, zdc.root)
}
func NewZrokTarget(cfg *ZrokTargetConfig) (*ZrokTarget, error) {
transport := http.DefaultTransport.(*http.Transport).Clone()
transport.DialContext = (&zrokDialContext{cfg.Root}).Dial
transport.TLSClientConfig.InsecureSkipVerify = true
httpUrl := strings.Replace(cfg.URL.String(), "zrok:", "http:", 1)
dc, err := davClient.NewClient(&http.Client{Transport: transport}, httpUrl)
if err != nil {
return nil, err
}
return &ZrokTarget{cfg: cfg, dc: dc}, nil
}
func (t *ZrokTarget) Inventory() ([]*Object, error) {
rootFi, err := t.dc.Stat(context.Background(), t.cfg.URL.Path)
if err != nil {
return nil, err
}
if !rootFi.IsDir {
base := filepath.Base(t.cfg.URL.Path)
t.cfg.URL.Path = filepath.Dir(t.cfg.URL.Path)
return []*Object{{
Path: "/" + base,
IsDir: false,
Size: rootFi.Size,
Modified: rootFi.ModTime,
}}, nil
}
fis, err := t.dc.Readdir(context.Background(), t.cfg.URL.Path, true)
if err != nil {
return nil, err
}
var objects []*Object
for _, fi := range fis {
if fi.Path != "/" {
objects = append(objects, &Object{
Path: fi.Path,
IsDir: fi.IsDir,
Size: fi.Size,
Modified: fi.ModTime,
ETag: fi.ETag,
})
}
}
return objects, nil
}
func (t *ZrokTarget) Dir(path string) ([]*Object, error) {
fis, err := t.dc.Readdir(context.Background(), t.cfg.URL.Path, false)
if err != nil {
return nil, err
}
var objects []*Object
for _, fi := range fis {
if fi.Path != "/" && fi.Path != t.cfg.URL.Path+"/" {
objects = append(objects, &Object{
Path: filepath.Base(fi.Path),
IsDir: fi.IsDir,
Size: fi.Size,
Modified: fi.ModTime,
})
}
}
return objects, nil
}
func (t *ZrokTarget) Mkdir(path string) error {
fi, err := t.dc.Stat(context.Background(), filepath.Join(t.cfg.URL.Path, path))
if err == nil {
if fi.IsDir {
return nil
}
return errors.Errorf("'%v' already exists; not directory", path)
}
return t.dc.Mkdir(context.Background(), filepath.Join(t.cfg.URL.Path, path))
}
func (t *ZrokTarget) ReadStream(path string) (io.ReadCloser, error) {
return t.dc.Open(context.Background(), filepath.Join(t.cfg.URL.Path, path))
}
func (t *ZrokTarget) WriteStream(path string, rs io.Reader, _ os.FileMode) error {
ws, err := t.dc.Create(context.Background(), filepath.Join(t.cfg.URL.Path, path))
if err != nil {
return err
}
defer func() { _ = ws.Close() }()
_, err = io.Copy(ws, rs)
if err != nil {
return err
}
return nil
}
func (t *ZrokTarget) WriteStreamWithModTime(path string, rs io.Reader, _ os.FileMode, modTime time.Time) error {
ws, err := t.dc.CreateWithModTime(context.Background(), filepath.Join(t.cfg.URL.Path, path), modTime)
if err != nil {
return err
}
defer func() { _ = ws.Close() }()
_, err = io.Copy(ws, rs)
if err != nil {
return err
}
return nil
}
func (t *ZrokTarget) Move(src, dest string) error {
return t.dc.MoveAll(context.Background(), filepath.Join(t.cfg.URL.Path, src), dest, true)
}
func (t *ZrokTarget) Rm(path string) error {
return t.dc.RemoveAll(context.Background(), filepath.Join(t.cfg.URL.Path, path))
}
func (t *ZrokTarget) SetModificationTime(path string, mtime time.Time) error {
return t.dc.Touch(context.Background(), filepath.Join(t.cfg.URL.Path, path), mtime)
}

View File

@ -4,9 +4,9 @@ import (
"fmt"
"github.com/openziti/sdk-golang/ziti"
"github.com/openziti/sdk-golang/ziti/edge"
"github.com/openziti/zrok/drives/davServer"
"github.com/openziti/zrok/endpoints"
"github.com/pkg/errors"
"golang.org/x/net/webdav"
"net/http"
"time"
)
@ -42,9 +42,9 @@ func NewBackend(cfg *BackendConfig) (*Backend, error) {
return nil, err
}
handler := &webdav.Handler{
FileSystem: webdav.Dir(cfg.DriveRoot),
LockSystem: webdav.NewMemLS(),
handler := &davServer.Handler{
FileSystem: davServer.Dir(cfg.DriveRoot),
LockSystem: davServer.NewMemLS(),
Logger: func(r *http.Request, err error) {
if cfg.Requests != nil {
cfg.Requests <- &endpoints.Request{