aboutsummaryrefslogtreecommitdiff
path: root/vendor/golang.org/x/net/webdav
diff options
context:
space:
mode:
Diffstat (limited to 'vendor/golang.org/x/net/webdav')
-rw-r--r--vendor/golang.org/x/net/webdav/file.go796
-rw-r--r--vendor/golang.org/x/net/webdav/file_go1.6.go17
-rw-r--r--vendor/golang.org/x/net/webdav/file_go1.7.go16
-rw-r--r--vendor/golang.org/x/net/webdav/if.go173
-rw-r--r--vendor/golang.org/x/net/webdav/internal/xml/README11
-rw-r--r--vendor/golang.org/x/net/webdav/internal/xml/marshal.go1223
-rw-r--r--vendor/golang.org/x/net/webdav/internal/xml/read.go692
-rw-r--r--vendor/golang.org/x/net/webdav/internal/xml/typeinfo.go371
-rw-r--r--vendor/golang.org/x/net/webdav/internal/xml/xml.go1998
-rw-r--r--vendor/golang.org/x/net/webdav/litmus_test_server.go94
-rw-r--r--vendor/golang.org/x/net/webdav/lock.go445
-rw-r--r--vendor/golang.org/x/net/webdav/prop.go418
-rw-r--r--vendor/golang.org/x/net/webdav/webdav.go702
-rw-r--r--vendor/golang.org/x/net/webdav/xml.go519
14 files changed, 7475 insertions, 0 deletions
diff --git a/vendor/golang.org/x/net/webdav/file.go b/vendor/golang.org/x/net/webdav/file.go
new file mode 100644
index 0000000..748118d
--- /dev/null
+++ b/vendor/golang.org/x/net/webdav/file.go
@@ -0,0 +1,796 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package webdav
+
+import (
+ "encoding/xml"
+ "io"
+ "net/http"
+ "os"
+ "path"
+ "path/filepath"
+ "strings"
+ "sync"
+ "time"
+
+ "golang.org/x/net/context"
+)
+
+// slashClean is equivalent to but slightly more efficient than
+// path.Clean("/" + name).
+func slashClean(name string) string {
+ if name == "" || name[0] != '/' {
+ name = "/" + name
+ }
+ return path.Clean(name)
+}
+
+// A FileSystem implements access to a collection of named files. The elements
+// in a file path are separated by slash ('/', U+002F) characters, regardless
+// of host operating system convention.
+//
+// Each method has the same semantics as the os package's function of the same
+// name.
+//
+// Note that the os.Rename documentation says that "OS-specific restrictions
+// might apply". In particular, whether or not renaming a file or directory
+// overwriting another existing file or directory is an error is OS-dependent.
+type FileSystem interface {
+ Mkdir(ctx context.Context, name string, perm os.FileMode) error
+ OpenFile(ctx context.Context, name string, flag int, perm os.FileMode) (File, error)
+ RemoveAll(ctx context.Context, name string) error
+ Rename(ctx context.Context, oldName, newName string) error
+ Stat(ctx context.Context, name string) (os.FileInfo, error)
+}
+
+// A File is returned by a FileSystem's OpenFile method and can be served by a
+// Handler.
+//
+// A File may optionally implement the DeadPropsHolder interface, if it can
+// load and save dead properties.
+type File interface {
+ http.File
+ io.Writer
+}
+
+// A Dir implements FileSystem using the native file system restricted to a
+// specific directory tree.
+//
+// While the FileSystem.OpenFile method takes '/'-separated paths, a Dir's
+// string value is a filename on the native file system, not a URL, so it is
+// separated by filepath.Separator, which isn't necessarily '/'.
+//
+// An empty Dir is treated as ".".
+type Dir string
+
+func (d Dir) resolve(name string) string {
+ // This implementation is based on Dir.Open's code in the standard net/http package.
+ if filepath.Separator != '/' && strings.IndexRune(name, filepath.Separator) >= 0 ||
+ strings.Contains(name, "\x00") {
+ return ""
+ }
+ dir := string(d)
+ if dir == "" {
+ dir = "."
+ }
+ return filepath.Join(dir, filepath.FromSlash(slashClean(name)))
+}
+
+func (d Dir) Mkdir(ctx context.Context, name string, perm os.FileMode) error {
+ if name = d.resolve(name); name == "" {
+ return os.ErrNotExist
+ }
+ return os.Mkdir(name, perm)
+}
+
+func (d Dir) OpenFile(ctx context.Context, name string, flag int, perm os.FileMode) (File, error) {
+ if name = d.resolve(name); name == "" {
+ return nil, os.ErrNotExist
+ }
+ f, err := os.OpenFile(name, flag, perm)
+ if err != nil {
+ return nil, err
+ }
+ return f, nil
+}
+
+func (d Dir) RemoveAll(ctx context.Context, name string) error {
+ if name = d.resolve(name); name == "" {
+ return os.ErrNotExist
+ }
+ if name == filepath.Clean(string(d)) {
+ // Prohibit removing the virtual root directory.
+ return os.ErrInvalid
+ }
+ return os.RemoveAll(name)
+}
+
+func (d Dir) Rename(ctx context.Context, oldName, newName string) error {
+ if oldName = d.resolve(oldName); oldName == "" {
+ return os.ErrNotExist
+ }
+ if newName = d.resolve(newName); newName == "" {
+ return os.ErrNotExist
+ }
+ if root := filepath.Clean(string(d)); root == oldName || root == newName {
+ // Prohibit renaming from or to the virtual root directory.
+ return os.ErrInvalid
+ }
+ return os.Rename(oldName, newName)
+}
+
+func (d Dir) Stat(ctx context.Context, name string) (os.FileInfo, error) {
+ if name = d.resolve(name); name == "" {
+ return nil, os.ErrNotExist
+ }
+ return os.Stat(name)
+}
+
+// NewMemFS returns a new in-memory FileSystem implementation.
+func NewMemFS() FileSystem {
+ return &memFS{
+ root: memFSNode{
+ children: make(map[string]*memFSNode),
+ mode: 0660 | os.ModeDir,
+ modTime: time.Now(),
+ },
+ }
+}
+
+// A memFS implements FileSystem, storing all metadata and actual file data
+// in-memory. No limits on filesystem size are used, so it is not recommended
+// this be used where the clients are untrusted.
+//
+// Concurrent access is permitted. The tree structure is protected by a mutex,
+// and each node's contents and metadata are protected by a per-node mutex.
+//
+// TODO: Enforce file permissions.
+type memFS struct {
+ mu sync.Mutex
+ root memFSNode
+}
+
+// TODO: clean up and rationalize the walk/find code.
+
+// walk walks the directory tree for the fullname, calling f at each step. If f
+// returns an error, the walk will be aborted and return that same error.
+//
+// dir is the directory at that step, frag is the name fragment, and final is
+// whether it is the final step. For example, walking "/foo/bar/x" will result
+// in 3 calls to f:
+// - "/", "foo", false
+// - "/foo/", "bar", false
+// - "/foo/bar/", "x", true
+// The frag argument will be empty only if dir is the root node and the walk
+// ends at that root node.
+func (fs *memFS) walk(op, fullname string, f func(dir *memFSNode, frag string, final bool) error) error {
+ original := fullname
+ fullname = slashClean(fullname)
+
+ // Strip any leading "/"s to make fullname a relative path, as the walk
+ // starts at fs.root.
+ if fullname[0] == '/' {
+ fullname = fullname[1:]
+ }
+ dir := &fs.root
+
+ for {
+ frag, remaining := fullname, ""
+ i := strings.IndexRune(fullname, '/')
+ final := i < 0
+ if !final {
+ frag, remaining = fullname[:i], fullname[i+1:]
+ }
+ if frag == "" && dir != &fs.root {
+ panic("webdav: empty path fragment for a clean path")
+ }
+ if err := f(dir, frag, final); err != nil {
+ return &os.PathError{
+ Op: op,
+ Path: original,
+ Err: err,
+ }
+ }
+ if final {
+ break
+ }
+ child := dir.children[frag]
+ if child == nil {
+ return &os.PathError{
+ Op: op,
+ Path: original,
+ Err: os.ErrNotExist,
+ }
+ }
+ if !child.mode.IsDir() {
+ return &os.PathError{
+ Op: op,
+ Path: original,
+ Err: os.ErrInvalid,
+ }
+ }
+ dir, fullname = child, remaining
+ }
+ return nil
+}
+
+// find returns the parent of the named node and the relative name fragment
+// from the parent to the child. For example, if finding "/foo/bar/baz" then
+// parent will be the node for "/foo/bar" and frag will be "baz".
+//
+// If the fullname names the root node, then parent, frag and err will be zero.
+//
+// find returns an error if the parent does not already exist or the parent
+// isn't a directory, but it will not return an error per se if the child does
+// not already exist. The error returned is either nil or an *os.PathError
+// whose Op is op.
+func (fs *memFS) find(op, fullname string) (parent *memFSNode, frag string, err error) {
+ err = fs.walk(op, fullname, func(parent0 *memFSNode, frag0 string, final bool) error {
+ if !final {
+ return nil
+ }
+ if frag0 != "" {
+ parent, frag = parent0, frag0
+ }
+ return nil
+ })
+ return parent, frag, err
+}
+
+func (fs *memFS) Mkdir(ctx context.Context, name string, perm os.FileMode) error {
+ fs.mu.Lock()
+ defer fs.mu.Unlock()
+
+ dir, frag, err := fs.find("mkdir", name)
+ if err != nil {
+ return err
+ }
+ if dir == nil {
+ // We can't create the root.
+ return os.ErrInvalid
+ }
+ if _, ok := dir.children[frag]; ok {
+ return os.ErrExist
+ }
+ dir.children[frag] = &memFSNode{
+ children: make(map[string]*memFSNode),
+ mode: perm.Perm() | os.ModeDir,
+ modTime: time.Now(),
+ }
+ return nil
+}
+
+func (fs *memFS) OpenFile(ctx context.Context, name string, flag int, perm os.FileMode) (File, error) {
+ fs.mu.Lock()
+ defer fs.mu.Unlock()
+
+ dir, frag, err := fs.find("open", name)
+ if err != nil {
+ return nil, err
+ }
+ var n *memFSNode
+ if dir == nil {
+ // We're opening the root.
+ if flag&(os.O_WRONLY|os.O_RDWR) != 0 {
+ return nil, os.ErrPermission
+ }
+ n, frag = &fs.root, "/"
+
+ } else {
+ n = dir.children[frag]
+ if flag&(os.O_SYNC|os.O_APPEND) != 0 {
+ // memFile doesn't support these flags yet.
+ return nil, os.ErrInvalid
+ }
+ if flag&os.O_CREATE != 0 {
+ if flag&os.O_EXCL != 0 && n != nil {
+ return nil, os.ErrExist
+ }
+ if n == nil {
+ n = &memFSNode{
+ mode: perm.Perm(),
+ }
+ dir.children[frag] = n
+ }
+ }
+ if n == nil {
+ return nil, os.ErrNotExist
+ }
+ if flag&(os.O_WRONLY|os.O_RDWR) != 0 && flag&os.O_TRUNC != 0 {
+ n.mu.Lock()
+ n.data = nil
+ n.mu.Unlock()
+ }
+ }
+
+ children := make([]os.FileInfo, 0, len(n.children))
+ for cName, c := range n.children {
+ children = append(children, c.stat(cName))
+ }
+ return &memFile{
+ n: n,
+ nameSnapshot: frag,
+ childrenSnapshot: children,
+ }, nil
+}
+
+func (fs *memFS) RemoveAll(ctx context.Context, name string) error {
+ fs.mu.Lock()
+ defer fs.mu.Unlock()
+
+ dir, frag, err := fs.find("remove", name)
+ if err != nil {
+ return err
+ }
+ if dir == nil {
+ // We can't remove the root.
+ return os.ErrInvalid
+ }
+ delete(dir.children, frag)
+ return nil
+}
+
+func (fs *memFS) Rename(ctx context.Context, oldName, newName string) error {
+ fs.mu.Lock()
+ defer fs.mu.Unlock()
+
+ oldName = slashClean(oldName)
+ newName = slashClean(newName)
+ if oldName == newName {
+ return nil
+ }
+ if strings.HasPrefix(newName, oldName+"/") {
+ // We can't rename oldName to be a sub-directory of itself.
+ return os.ErrInvalid
+ }
+
+ oDir, oFrag, err := fs.find("rename", oldName)
+ if err != nil {
+ return err
+ }
+ if oDir == nil {
+ // We can't rename from the root.
+ return os.ErrInvalid
+ }
+
+ nDir, nFrag, err := fs.find("rename", newName)
+ if err != nil {
+ return err
+ }
+ if nDir == nil {
+ // We can't rename to the root.
+ return os.ErrInvalid
+ }
+
+ oNode, ok := oDir.children[oFrag]
+ if !ok {
+ return os.ErrNotExist
+ }
+ if oNode.children != nil {
+ if nNode, ok := nDir.children[nFrag]; ok {
+ if nNode.children == nil {
+ return errNotADirectory
+ }
+ if len(nNode.children) != 0 {
+ return errDirectoryNotEmpty
+ }
+ }
+ }
+ delete(oDir.children, oFrag)
+ nDir.children[nFrag] = oNode
+ return nil
+}
+
+func (fs *memFS) Stat(ctx context.Context, name string) (os.FileInfo, error) {
+ fs.mu.Lock()
+ defer fs.mu.Unlock()
+
+ dir, frag, err := fs.find("stat", name)
+ if err != nil {
+ return nil, err
+ }
+ if dir == nil {
+ // We're stat'ting the root.
+ return fs.root.stat("/"), nil
+ }
+ if n, ok := dir.children[frag]; ok {
+ return n.stat(path.Base(name)), nil
+ }
+ return nil, os.ErrNotExist
+}
+
+// A memFSNode represents a single entry in the in-memory filesystem and also
+// implements os.FileInfo.
+type memFSNode struct {
+ // children is protected by memFS.mu.
+ children map[string]*memFSNode
+
+ mu sync.Mutex
+ data []byte
+ mode os.FileMode
+ modTime time.Time
+ deadProps map[xml.Name]Property
+}
+
+func (n *memFSNode) stat(name string) *memFileInfo {
+ n.mu.Lock()
+ defer n.mu.Unlock()
+ return &memFileInfo{
+ name: name,
+ size: int64(len(n.data)),
+ mode: n.mode,
+ modTime: n.modTime,
+ }
+}
+
+func (n *memFSNode) DeadProps() (map[xml.Name]Property, error) {
+ n.mu.Lock()
+ defer n.mu.Unlock()
+ if len(n.deadProps) == 0 {
+ return nil, nil
+ }
+ ret := make(map[xml.Name]Property, len(n.deadProps))
+ for k, v := range n.deadProps {
+ ret[k] = v
+ }
+ return ret, nil
+}
+
+func (n *memFSNode) Patch(patches []Proppatch) ([]Propstat, error) {
+ n.mu.Lock()
+ defer n.mu.Unlock()
+ pstat := Propstat{Status: http.StatusOK}
+ for _, patch := range patches {
+ for _, p := range patch.Props {
+ pstat.Props = append(pstat.Props, Property{XMLName: p.XMLName})
+ if patch.Remove {
+ delete(n.deadProps, p.XMLName)
+ continue
+ }
+ if n.deadProps == nil {
+ n.deadProps = map[xml.Name]Property{}
+ }
+ n.deadProps[p.XMLName] = p
+ }
+ }
+ return []Propstat{pstat}, nil
+}
+
+type memFileInfo struct {
+ name string
+ size int64
+ mode os.FileMode
+ modTime time.Time
+}
+
+func (f *memFileInfo) Name() string { return f.name }
+func (f *memFileInfo) Size() int64 { return f.size }
+func (f *memFileInfo) Mode() os.FileMode { return f.mode }
+func (f *memFileInfo) ModTime() time.Time { return f.modTime }
+func (f *memFileInfo) IsDir() bool { return f.mode.IsDir() }
+func (f *memFileInfo) Sys() interface{} { return nil }
+
+// A memFile is a File implementation for a memFSNode. It is a per-file (not
+// per-node) read/write position, and a snapshot of the memFS' tree structure
+// (a node's name and children) for that node.
+type memFile struct {
+ n *memFSNode
+ nameSnapshot string
+ childrenSnapshot []os.FileInfo
+ // pos is protected by n.mu.
+ pos int
+}
+
+// A *memFile implements the optional DeadPropsHolder interface.
+var _ DeadPropsHolder = (*memFile)(nil)
+
+func (f *memFile) DeadProps() (map[xml.Name]Property, error) { return f.n.DeadProps() }
+func (f *memFile) Patch(patches []Proppatch) ([]Propstat, error) { return f.n.Patch(patches) }
+
+func (f *memFile) Close() error {
+ return nil
+}
+
+func (f *memFile) Read(p []byte) (int, error) {
+ f.n.mu.Lock()
+ defer f.n.mu.Unlock()
+ if f.n.mode.IsDir() {
+ return 0, os.ErrInvalid
+ }
+ if f.pos >= len(f.n.data) {
+ return 0, io.EOF
+ }
+ n := copy(p, f.n.data[f.pos:])
+ f.pos += n
+ return n, nil
+}
+
+func (f *memFile) Readdir(count int) ([]os.FileInfo, error) {
+ f.n.mu.Lock()
+ defer f.n.mu.Unlock()
+ if !f.n.mode.IsDir() {
+ return nil, os.ErrInvalid
+ }
+ old := f.pos
+ if old >= len(f.childrenSnapshot) {
+ // The os.File Readdir docs say that at the end of a directory,
+ // the error is io.EOF if count > 0 and nil if count <= 0.
+ if count > 0 {
+ return nil, io.EOF
+ }
+ return nil, nil
+ }
+ if count > 0 {
+ f.pos += count
+ if f.pos > len(f.childrenSnapshot) {
+ f.pos = len(f.childrenSnapshot)
+ }
+ } else {
+ f.pos = len(f.childrenSnapshot)
+ old = 0
+ }
+ return f.childrenSnapshot[old:f.pos], nil
+}
+
+func (f *memFile) Seek(offset int64, whence int) (int64, error) {
+ f.n.mu.Lock()
+ defer f.n.mu.Unlock()
+ npos := f.pos
+ // TODO: How to handle offsets greater than the size of system int?
+ switch whence {
+ case os.SEEK_SET:
+ npos = int(offset)
+ case os.SEEK_CUR:
+ npos += int(offset)
+ case os.SEEK_END:
+ npos = len(f.n.data) + int(offset)
+ default:
+ npos = -1
+ }
+ if npos < 0 {
+ return 0, os.ErrInvalid
+ }
+ f.pos = npos
+ return int64(f.pos), nil
+}
+
+func (f *memFile) Stat() (os.FileInfo, error) {
+ return f.n.stat(f.nameSnapshot), nil
+}
+
+func (f *memFile) Write(p []byte) (int, error) {
+ lenp := len(p)
+ f.n.mu.Lock()
+ defer f.n.mu.Unlock()
+
+ if f.n.mode.IsDir() {
+ return 0, os.ErrInvalid
+ }
+ if f.pos < len(f.n.data) {
+ n := copy(f.n.data[f.pos:], p)
+ f.pos += n
+ p = p[n:]
+ } else if f.pos > len(f.n.data) {
+ // Write permits the creation of holes, if we've seek'ed past the
+ // existing end of file.
+ if f.pos <= cap(f.n.data) {
+ oldLen := len(f.n.data)
+ f.n.data = f.n.data[:f.pos]
+ hole := f.n.data[oldLen:]
+ for i := range hole {
+ hole[i] = 0
+ }
+ } else {
+ d := make([]byte, f.pos, f.pos+len(p))
+ copy(d, f.n.data)
+ f.n.data = d
+ }
+ }
+
+ if len(p) > 0 {
+ // We should only get here if f.pos == len(f.n.data).
+ f.n.data = append(f.n.data, p...)
+ f.pos = len(f.n.data)
+ }
+ f.n.modTime = time.Now()
+ return lenp, nil
+}
+
+// moveFiles moves files and/or directories from src to dst.
+//
+// See section 9.9.4 for when various HTTP status codes apply.
+func moveFiles(ctx context.Context, fs FileSystem, src, dst string, overwrite bool) (status int, err error) {
+ created := false
+ if _, err := fs.Stat(ctx, dst); err != nil {
+ if !os.IsNotExist(err) {
+ return http.StatusForbidden, err
+ }
+ created = true
+ } else if overwrite {
+ // Section 9.9.3 says that "If a resource exists at the destination
+ // and the Overwrite header is "T", then prior to performing the move,
+ // the server must perform a DELETE with "Depth: infinity" on the
+ // destination resource.
+ if err := fs.RemoveAll(ctx, dst); err != nil {
+ return http.StatusForbidden, err
+ }
+ } else {
+ return http.StatusPreconditionFailed, os.ErrExist
+ }
+ if err := fs.Rename(ctx, src, dst); err != nil {
+ return http.StatusForbidden, err
+ }
+ if created {
+ return http.StatusCreated, nil
+ }
+ return http.StatusNoContent, nil
+}
+
+func copyProps(dst, src File) error {
+ d, ok := dst.(DeadPropsHolder)
+ if !ok {
+ return nil
+ }
+ s, ok := src.(DeadPropsHolder)
+ if !ok {
+ return nil
+ }
+ m, err := s.DeadProps()
+ if err != nil {
+ return err
+ }
+ props := make([]Property, 0, len(m))
+ for _, prop := range m {
+ props = append(props, prop)
+ }
+ _, err = d.Patch([]Proppatch{{Props: props}})
+ return err
+}
+
+// copyFiles copies files and/or directories from src to dst.
+//
+// See section 9.8.5 for when various HTTP status codes apply.
+func copyFiles(ctx context.Context, fs FileSystem, src, dst string, overwrite bool, depth int, recursion int) (status int, err error) {
+ if recursion == 1000 {
+ return http.StatusInternalServerError, errRecursionTooDeep
+ }
+ recursion++
+
+ // TODO: section 9.8.3 says that "Note that an infinite-depth COPY of /A/
+ // into /A/B/ could lead to infinite recursion if not handled correctly."
+
+ srcFile, err := fs.OpenFile(ctx, src, os.O_RDONLY, 0)
+ if err != nil {
+ if os.IsNotExist(err) {
+ return http.StatusNotFound, err
+ }
+ return http.StatusInternalServerError, err
+ }
+ defer srcFile.Close()
+ srcStat, err := srcFile.Stat()
+ if err != nil {
+ if os.IsNotExist(err) {
+ return http.StatusNotFound, err
+ }
+ return http.StatusInternalServerError, err
+ }
+ srcPerm := srcStat.Mode() & os.ModePerm
+
+ created := false
+ if _, err := fs.Stat(ctx, dst); err != nil {
+ if os.IsNotExist(err) {
+ created = true
+ } else {
+ return http.StatusForbidden, err
+ }
+ } else {
+ if !overwrite {
+ return http.StatusPreconditionFailed, os.ErrExist
+ }
+ if err := fs.RemoveAll(ctx, dst); err != nil && !os.IsNotExist(err) {
+ return http.StatusForbidden, err
+ }
+ }
+
+ if srcStat.IsDir() {
+ if err := fs.Mkdir(ctx, dst, srcPerm); err != nil {
+ return http.StatusForbidden, err
+ }
+ if depth == infiniteDepth {
+ children, err := srcFile.Readdir(-1)
+ if err != nil {
+ return http.StatusForbidden, err
+ }
+ for _, c := range children {
+ name := c.Name()
+ s := path.Join(src, name)
+ d := path.Join(dst, name)
+ cStatus, cErr := copyFiles(ctx, fs, s, d, overwrite, depth, recursion)
+ if cErr != nil {
+ // TODO: MultiStatus.
+ return cStatus, cErr
+ }
+ }
+ }
+
+ } else {
+ dstFile, err := fs.OpenFile(ctx, dst, os.O_RDWR|os.O_CREATE|os.O_TRUNC, srcPerm)
+ if err != nil {
+ if os.IsNotExist(err) {
+ return http.StatusConflict, err
+ }
+ return http.StatusForbidden, err
+
+ }
+ _, copyErr := io.Copy(dstFile, srcFile)
+ propsErr := copyProps(dstFile, srcFile)
+ closeErr := dstFile.Close()
+ if copyErr != nil {
+ return http.StatusInternalServerError, copyErr
+ }
+ if propsErr != nil {
+ return http.StatusInternalServerError, propsErr
+ }
+ if closeErr != nil {
+ return http.StatusInternalServerError, closeErr
+ }
+ }
+
+ if created {
+ return http.StatusCreated, nil
+ }
+ return http.StatusNoContent, nil
+}
+
+// walkFS traverses filesystem fs starting at name up to depth levels.
+//
+// Allowed values for depth are 0, 1 or infiniteDepth. For each visited node,
+// walkFS calls walkFn. If a visited file system node is a directory and
+// walkFn returns filepath.SkipDir, walkFS will skip traversal of this node.
+func walkFS(ctx context.Context, fs FileSystem, depth int, name string, info os.FileInfo, walkFn filepath.WalkFunc) error {
+ // This implementation is based on Walk's code in the standard path/filepath package.
+ err := walkFn(name, info, nil)
+ if err != nil {
+ if info.IsDir() && err == filepath.SkipDir {
+ return nil
+ }
+ return err
+ }
+ if !info.IsDir() || depth == 0 {
+ return nil
+ }
+ if depth == 1 {
+ depth = 0
+ }
+
+ // Read directory names.
+ f, err := fs.OpenFile(ctx, name, os.O_RDONLY, 0)
+ if err != nil {
+ return walkFn(name, info, err)
+ }
+ fileInfos, err := f.Readdir(0)
+ f.Close()
+ if err != nil {
+ return walkFn(name, info, err)
+ }
+
+ for _, fileInfo := range fileInfos {
+ filename := path.Join(name, fileInfo.Name())
+ fileInfo, err := fs.Stat(ctx, filename)
+ if err != nil {
+ if err := walkFn(filename, fileInfo, err); err != nil && err != filepath.SkipDir {
+ return err
+ }
+ } else {
+ err = walkFS(ctx, fs, depth, filename, fileInfo, walkFn)
+ if err != nil {
+ if !fileInfo.IsDir() || err != filepath.SkipDir {
+ return err
+ }
+ }
+ }
+ }
+ return nil
+}
diff --git a/vendor/golang.org/x/net/webdav/file_go1.6.go b/vendor/golang.org/x/net/webdav/file_go1.6.go
new file mode 100644
index 0000000..fa38770
--- /dev/null
+++ b/vendor/golang.org/x/net/webdav/file_go1.6.go
@@ -0,0 +1,17 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build !go1.7
+
+package webdav
+
+import (
+ "net/http"
+
+ "golang.org/x/net/context"
+)
+
+func getContext(r *http.Request) context.Context {
+ return context.Background()
+}
diff --git a/vendor/golang.org/x/net/webdav/file_go1.7.go b/vendor/golang.org/x/net/webdav/file_go1.7.go
new file mode 100644
index 0000000..d1c3de8
--- /dev/null
+++ b/vendor/golang.org/x/net/webdav/file_go1.7.go
@@ -0,0 +1,16 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build go1.7
+
+package webdav
+
+import (
+ "context"
+ "net/http"
+)
+
+func getContext(r *http.Request) context.Context {
+ return r.Context()
+}
diff --git a/vendor/golang.org/x/net/webdav/if.go b/vendor/golang.org/x/net/webdav/if.go
new file mode 100644
index 0000000..416e81c
--- /dev/null
+++ b/vendor/golang.org/x/net/webdav/if.go
@@ -0,0 +1,173 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package webdav
+
+// The If header is covered by Section 10.4.
+// http://www.webdav.org/specs/rfc4918.html#HEADER_If
+
+import (
+ "strings"
+)
+
+// ifHeader is a disjunction (OR) of ifLists.
+type ifHeader struct {
+ lists []ifList
+}
+
+// ifList is a conjunction (AND) of Conditions, and an optional resource tag.
+type ifList struct {
+ resourceTag string
+ conditions []Condition
+}
+
+// parseIfHeader parses the "If: foo bar" HTTP header. The httpHeader string
+// should omit the "If:" prefix and have any "\r\n"s collapsed to a " ", as is
+// returned by req.Header.Get("If") for a http.Request req.
+func parseIfHeader(httpHeader string) (h ifHeader, ok bool) {
+ s := strings.TrimSpace(httpHeader)
+ switch tokenType, _, _ := lex(s); tokenType {
+ case '(':
+ return parseNoTagLists(s)
+ case angleTokenType:
+ return parseTaggedLists(s)
+ default:
+ return ifHeader{}, false
+ }
+}
+
+func parseNoTagLists(s string) (h ifHeader, ok bool) {
+ for {
+ l, remaining, ok := parseList(s)
+ if !ok {
+ return ifHeader{}, false
+ }
+ h.lists = append(h.lists, l)
+ if remaining == "" {
+ return h, true
+ }
+ s = remaining
+ }
+}
+
+func parseTaggedLists(s string) (h ifHeader, ok bool) {
+ resourceTag, n := "", 0
+ for first := true; ; first = false {
+ tokenType, tokenStr, remaining := lex(s)
+ switch tokenType {
+ case angleTokenType:
+ if !first && n == 0 {
+ return ifHeader{}, false
+ }
+ resourceTag, n = tokenStr, 0
+ s = remaining
+ case '(':
+ n++
+ l, remaining, ok := parseList(s)
+ if !ok {
+ return ifHeader{}, false
+ }
+ l.resourceTag = resourceTag
+ h.lists = append(h.lists, l)
+ if remaining == "" {
+ return h, true
+ }
+ s = remaining
+ default:
+ return ifHeader{}, false
+ }
+ }
+}
+
+func parseList(s string) (l ifList, remaining string, ok bool) {
+ tokenType, _, s := lex(s)
+ if tokenType != '(' {
+ return ifList{}, "", false
+ }
+ for {
+ tokenType, _, remaining = lex(s)
+ if tokenType == ')' {
+ if len(l.conditions) == 0 {
+ return ifList{}, "", false
+ }
+ return l, remaining, true
+ }
+ c, remaining, ok := parseCondition(s)
+ if !ok {
+ return ifList{}, "", false
+ }
+ l.conditions = append(l.conditions, c)
+ s = remaining
+ }
+}
+
+func parseCondition(s string) (c Condition, remaining string, ok bool) {
+ tokenType, tokenStr, s := lex(s)
+ if tokenType == notTokenType {
+ c.Not = true
+ tokenType, tokenStr, s = lex(s)
+ }
+ switch tokenType {
+ case strTokenType, angleTokenType:
+ c.Token = tokenStr
+ case squareTokenType:
+ c.ETag = tokenStr
+ default:
+ return Condition{}, "", false
+ }
+ return c, s, true
+}
+
+// Single-rune tokens like '(' or ')' have a token type equal to their rune.
+// All other tokens have a negative token type.
+const (
+ errTokenType = rune(-1)
+ eofTokenType = rune(-2)
+ strTokenType = rune(-3)
+ notTokenType = rune(-4)
+ angleTokenType = rune(-5)
+ squareTokenType = rune(-6)
+)
+
+func lex(s string) (tokenType rune, tokenStr string, remaining string) {
+ // The net/textproto Reader that parses the HTTP header will collapse
+ // Linear White Space that spans multiple "\r\n" lines to a single " ",
+ // so we don't need to look for '\r' or '\n'.
+ for len(s) > 0 && (s[0] == '\t' || s[0] == ' ') {
+ s = s[1:]
+ }
+ if len(s) == 0 {
+ return eofTokenType, "", ""
+ }
+ i := 0
+loop:
+ for ; i < len(s); i++ {
+ switch s[i] {
+ case '\t', ' ', '(', ')', '<', '>', '[', ']':
+ break loop
+ }
+ }
+
+ if i != 0 {
+ tokenStr, remaining = s[:i], s[i:]
+ if tokenStr == "Not" {
+ return notTokenType, "", remaining
+ }
+ return strTokenType, tokenStr, remaining
+ }
+
+ j := 0
+ switch s[0] {
+ case '<':
+ j, tokenType = strings.IndexByte(s, '>'), angleTokenType
+ case '[':
+ j, tokenType = strings.IndexByte(s, ']'), squareTokenType
+ default:
+ return rune(s[0]), "", s[1:]
+ }
+ if j < 0 {
+ return errTokenType, "", ""
+ }
+ return tokenType, s[1:j], s[j+1:]
+}
diff --git a/vendor/golang.org/x/net/webdav/internal/xml/README b/vendor/golang.org/x/net/webdav/internal/xml/README
new file mode 100644
index 0000000..89656f4
--- /dev/null
+++ b/vendor/golang.org/x/net/webdav/internal/xml/README
@@ -0,0 +1,11 @@
+This is a fork of the encoding/xml package at ca1d6c4, the last commit before
+https://go.googlesource.com/go/+/c0d6d33 "encoding/xml: restore Go 1.4 name
+space behavior" made late in the lead-up to the Go 1.5 release.
+
+The list of encoding/xml changes is at
+https://go.googlesource.com/go/+log/master/src/encoding/xml
+
+This fork is temporary, and I (nigeltao) expect to revert it after Go 1.6 is
+released.
+
+See http://golang.org/issue/11841
diff --git a/vendor/golang.org/x/net/webdav/internal/xml/marshal.go b/vendor/golang.org/x/net/webdav/internal/xml/marshal.go
new file mode 100644
index 0000000..cb82ec2
--- /dev/null
+++ b/vendor/golang.org/x/net/webdav/internal/xml/marshal.go
@@ -0,0 +1,1223 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package xml
+
+import (
+ "bufio"
+ "bytes"
+ "encoding"
+ "fmt"
+ "io"
+ "reflect"
+ "strconv"
+ "strings"
+)
+
+const (
+ // A generic XML header suitable for use with the output of Marshal.
+ // This is not automatically added to any output of this package,
+ // it is provided as a convenience.
+ Header = `<?xml version="1.0" encoding="UTF-8"?>` + "\n"
+)
+
+// Marshal returns the XML encoding of v.
+//
+// Marshal handles an array or slice by marshalling each of the elements.
+// Marshal handles a pointer by marshalling the value it points at or, if the
+// pointer is nil, by writing nothing. Marshal handles an interface value by
+// marshalling the value it contains or, if the interface value is nil, by
+// writing nothing. Marshal handles all other data by writing one or more XML
+// elements containing the data.
+//
+// The name for the XML elements is taken from, in order of preference:
+// - the tag on the XMLName field, if the data is a struct
+// - the value of the XMLName field of type xml.Name
+// - the tag of the struct field used to obtain the data
+// - the name of the struct field used to obtain the data
+// - the name of the marshalled type
+//
+// The XML element for a struct contains marshalled elements for each of the
+// exported fields of the struct, with these exceptions:
+// - the XMLName field, described above, is omitted.
+// - a field with tag "-" is omitted.
+// - a field with tag "name,attr" becomes an attribute with
+// the given name in the XML element.
+// - a field with tag ",attr" becomes an attribute with the
+// field name in the XML element.
+// - a field with tag ",chardata" is written as character data,
+// not as an XML element.
+// - a field with tag ",innerxml" is written verbatim, not subject
+// to the usual marshalling procedure.
+// - a field with tag ",comment" is written as an XML comment, not
+// subject to the usual marshalling procedure. It must not contain
+// the "--" string within it.
+// - a field with a tag including the "omitempty" option is omitted
+// if the field value is empty. The empty values are false, 0, any
+// nil pointer or interface value, and any array, slice, map, or
+// string of length zero.
+// - an anonymous struct field is handled as if the fields of its
+// value were part of the outer struct.
+//
+// If a field uses a tag "a>b>c", then the element c will be nested inside
+// parent elements a and b. Fields that appear next to each other that name
+// the same parent will be enclosed in one XML element.
+//
+// See MarshalIndent for an example.
+//
+// Marshal will return an error if asked to marshal a channel, function, or map.
+func Marshal(v interface{}) ([]byte, error) {
+ var b bytes.Buffer
+ if err := NewEncoder(&b).Encode(v); err != nil {
+ return nil, err
+ }
+ return b.Bytes(), nil
+}
+
+// Marshaler is the interface implemented by objects that can marshal
+// themselves into valid XML elements.
+//
+// MarshalXML encodes the receiver as zero or more XML elements.
+// By convention, arrays or slices are typically encoded as a sequence
+// of elements, one per entry.
+// Using start as the element tag is not required, but doing so
+// will enable Unmarshal to match the XML elements to the correct
+// struct field.
+// One common implementation strategy is to construct a separate
+// value with a layout corresponding to the desired XML and then
+// to encode it using e.EncodeElement.
+// Another common strategy is to use repeated calls to e.EncodeToken
+// to generate the XML output one token at a time.
+// The sequence of encoded tokens must make up zero or more valid
+// XML elements.
+type Marshaler interface {
+ MarshalXML(e *Encoder, start StartElement) error
+}
+
+// MarshalerAttr is the interface implemented by objects that can marshal
+// themselves into valid XML attributes.
+//
+// MarshalXMLAttr returns an XML attribute with the encoded value of the receiver.
+// Using name as the attribute name is not required, but doing so
+// will enable Unmarshal to match the attribute to the correct
+// struct field.
+// If MarshalXMLAttr returns the zero attribute Attr{}, no attribute
+// will be generated in the output.
+// MarshalXMLAttr is used only for struct fields with the
+// "attr" option in the field tag.
+type MarshalerAttr interface {
+ MarshalXMLAttr(name Name) (Attr, error)
+}
+
+// MarshalIndent works like Marshal, but each XML element begins on a new
+// indented line that starts with prefix and is followed by one or more
+// copies of indent according to the nesting depth.
+func MarshalIndent(v interface{}, prefix, indent string) ([]byte, error) {
+ var b bytes.Buffer
+ enc := NewEncoder(&b)
+ enc.Indent(prefix, indent)
+ if err := enc.Encode(v); err != nil {
+ return nil, err
+ }
+ return b.Bytes(), nil
+}
+
+// An Encoder writes XML data to an output stream.
+type Encoder struct {
+ p printer
+}
+
+// NewEncoder returns a new encoder that writes to w.
+func NewEncoder(w io.Writer) *Encoder {
+ e := &Encoder{printer{Writer: bufio.NewWriter(w)}}
+ e.p.encoder = e
+ return e
+}
+
+// Indent sets the encoder to generate XML in which each element
+// begins on a new indented line that starts with prefix and is followed by
+// one or more copies of indent according to the nesting depth.
+func (enc *Encoder) Indent(prefix, indent string) {
+ enc.p.prefix = prefix
+ enc.p.indent = indent
+}
+
+// Encode writes the XML encoding of v to the stream.
+//
+// See the documentation for Marshal for details about the conversion
+// of Go values to XML.
+//
+// Encode calls Flush before returning.
+func (enc *Encoder) Encode(v interface{}) error {
+ err := enc.p.marshalValue(reflect.ValueOf(v), nil, nil)
+ if err != nil {
+ return err
+ }
+ return enc.p.Flush()
+}
+
+// EncodeElement writes the XML encoding of v to the stream,
+// using start as the outermost tag in the encoding.
+//
+// See the documentation for Marshal for details about the conversion
+// of Go values to XML.
+//
+// EncodeElement calls Flush before returning.
+func (enc *Encoder) EncodeElement(v interface{}, start StartElement) error {
+ err := enc.p.marshalValue(reflect.ValueOf(v), nil, &start)
+ if err != nil {
+ return err
+ }
+ return enc.p.Flush()
+}
+
+var (
+ begComment = []byte("<!--")
+ endComment = []byte("-->")
+ endProcInst = []byte("?>")
+ endDirective = []byte(">")
+)
+
+// EncodeToken writes the given XML token to the stream.
+// It returns an error if StartElement and EndElement tokens are not
+// properly matched.
+//
+// EncodeToken does not call Flush, because usually it is part of a
+// larger operation such as Encode or EncodeElement (or a custom
+// Marshaler's MarshalXML invoked during those), and those will call
+// Flush when finished. Callers that create an Encoder and then invoke
+// EncodeToken directly, without using Encode or EncodeElement, need to
+// call Flush when finished to ensure that the XML is written to the
+// underlying writer.
+//
+// EncodeToken allows writing a ProcInst with Target set to "xml" only
+// as the first token in the stream.
+//
+// When encoding a StartElement holding an XML namespace prefix
+// declaration for a prefix that is not already declared, contained
+// elements (including the StartElement itself) will use the declared
+// prefix when encoding names with matching namespace URIs.
+func (enc *Encoder) EncodeToken(t Token) error {
+
+ p := &enc.p
+ switch t := t.(type) {
+ case StartElement:
+ if err := p.writeStart(&t); err != nil {
+ return err
+ }
+ case EndElement:
+ if err := p.writeEnd(t.Name); err != nil {
+ return err
+ }
+ case CharData:
+ escapeText(p, t, false)
+ case Comment:
+ if bytes.Contains(t, endComment) {
+ return fmt.Errorf("xml: EncodeToken of Comment containing --> marker")
+ }
+ p.WriteString("<!--")
+ p.Write(t)
+ p.WriteString("-->")
+ return p.cachedWriteError()
+ case ProcInst:
+ // First token to be encoded which is also a ProcInst with target of xml
+ // is the xml declaration. The only ProcInst where target of xml is allowed.
+ if t.Target == "xml" && p.Buffered() != 0 {
+ return fmt.Errorf("xml: EncodeToken of ProcInst xml target only valid for xml declaration, first token encoded")
+ }
+ if !isNameString(t.Target) {
+ return fmt.Errorf("xml: EncodeToken of ProcInst with invalid Target")
+ }
+ if bytes.Contains(t.Inst, endProcInst) {
+ return fmt.Errorf("xml: EncodeToken of ProcInst containing ?> marker")
+ }
+ p.WriteString("<?")
+ p.WriteString(t.Target)
+ if len(t.Inst) > 0 {
+ p.WriteByte(' ')
+ p.Write(t.Inst)
+ }
+ p.WriteString("?>")
+ case Directive:
+ if !isValidDirective(t) {
+ return fmt.Errorf("xml: EncodeToken of Directive containing wrong < or > markers")
+ }
+ p.WriteString("<!")
+ p.Write(t)
+ p.WriteString(">")
+ default:
+ return fmt.Errorf("xml: EncodeToken of invalid token type")
+
+ }
+ return p.cachedWriteError()
+}
+
+// isValidDirective reports whether dir is a valid directive text,
+// meaning angle brackets are matched, ignoring comments and strings.
+func isValidDirective(dir Directive) bool {
+ var (
+ depth int
+ inquote uint8
+ incomment bool
+ )
+ for i, c := range dir {
+ switch {
+ case incomment:
+ if c == '>' {
+ if n := 1 + i - len(endComment); n >= 0 && bytes.Equal(dir[n:i+1], endComment) {
+ incomment = false
+ }
+ }
+ // Just ignore anything in comment
+ case inquote != 0:
+ if c == inquote {
+ inquote = 0
+ }
+ // Just ignore anything within quotes
+ case c == '\'' || c == '"':
+ inquote = c
+ case c == '<':
+ if i+len(begComment) < len(dir) && bytes.Equal(dir[i:i+len(begComment)], begComment) {
+ incomment = true
+ } else {
+ depth++
+ }
+ case c == '>':
+ if depth == 0 {
+ return false
+ }
+ depth--
+ }
+ }
+ return depth == 0 && inquote == 0 && !incomment
+}
+
+// Flush flushes any buffered XML to the underlying writer.
+// See the EncodeToken documentation for details about when it is necessary.
+func (enc *Encoder) Flush() error {
+ return enc.p.Flush()
+}
+
+type printer struct {
+ *bufio.Writer
+ encoder *Encoder
+ seq int
+ indent string
+ prefix string
+ depth int
+ indentedIn bool
+ putNewline bool
+ defaultNS string
+ attrNS map[string]string // map prefix -> name space
+ attrPrefix map[string]string // map name space -> prefix
+ prefixes []printerPrefix
+ tags []Name
+}
+
+// printerPrefix holds a namespace undo record.
+// When an element is popped, the prefix record
+// is set back to the recorded URL. The empty
+// prefix records the URL for the default name space.
+//
+// The start of an element is recorded with an element
+// that has mark=true.
+type printerPrefix struct {
+ prefix string
+ url string
+ mark bool
+}
+
+func (p *printer) prefixForNS(url string, isAttr bool) string {
+ // The "http://www.w3.org/XML/1998/namespace" name space is predefined as "xml"
+ // and must be referred to that way.
+ // (The "http://www.w3.org/2000/xmlns/" name space is also predefined as "xmlns",
+ // but users should not be trying to use that one directly - that's our job.)
+ if url == xmlURL {
+ return "xml"
+ }
+ if !isAttr && url == p.defaultNS {
+ // We can use the default name space.
+ return ""
+ }
+ return p.attrPrefix[url]
+}
+
+// defineNS pushes any namespace definition found in the given attribute.
+// If ignoreNonEmptyDefault is true, an xmlns="nonempty"
+// attribute will be ignored.
+func (p *printer) defineNS(attr Attr, ignoreNonEmptyDefault bool) error {
+ var prefix string
+ if attr.Name.Local == "xmlns" {
+ if attr.Name.Space != "" && attr.Name.Space != "xml" && attr.Name.Space != xmlURL {
+ return fmt.Errorf("xml: cannot redefine xmlns attribute prefix")
+ }
+ } else if attr.Name.Space == "xmlns" && attr.Name.Local != "" {
+ prefix = attr.Name.Local
+ if attr.Value == "" {
+ // Technically, an empty XML namespace is allowed for an attribute.
+ // From http://www.w3.org/TR/xml-names11/#scoping-defaulting:
+ //
+ // The attribute value in a namespace declaration for a prefix may be
+ // empty. This has the effect, within the scope of the declaration, of removing
+ // any association of the prefix with a namespace name.
+ //
+ // However our namespace prefixes here are used only as hints. There's
+ // no need to respect the removal of a namespace prefix, so we ignore it.
+ return nil
+ }
+ } else {
+ // Ignore: it's not a namespace definition
+ return nil
+ }
+ if prefix == "" {
+ if attr.Value == p.defaultNS {
+ // No need for redefinition.
+ return nil
+ }
+ if attr.Value != "" && ignoreNonEmptyDefault {
+ // We have an xmlns="..." value but
+ // it can't define a name space in this context,
+ // probably because the element has an empty
+ // name space. In this case, we just ignore
+ // the name space declaration.
+ return nil
+ }
+ } else if _, ok := p.attrPrefix[attr.Value]; ok {
+ // There's already a prefix for the given name space,
+ // so use that. This prevents us from
+ // having two prefixes for the same name space
+ // so attrNS and attrPrefix can remain bijective.
+ return nil
+ }
+ p.pushPrefix(prefix, attr.Value)
+ return nil
+}
+
+// createNSPrefix creates a name space prefix attribute
+// to use for the given name space, defining a new prefix
+// if necessary.
+// If isAttr is true, the prefix is to be created for an attribute
+// prefix, which means that the default name space cannot
+// be used.
+func (p *printer) createNSPrefix(url string, isAttr bool) {
+ if _, ok := p.attrPrefix[url]; ok {
+ // We already have a prefix for the given URL.
+ return
+ }
+ switch {
+ case !isAttr && url == p.defaultNS:
+ // We can use the default name space.
+ return
+ case url == "":
+ // The only way we can encode names in the empty
+ // name space is by using the default name space,
+ // so we must use that.
+ if p.defaultNS != "" {
+ // The default namespace is non-empty, so we
+ // need to set it to empty.
+ p.pushPrefix("", "")
+ }
+ return
+ case url == xmlURL:
+ return
+ }
+ // TODO If the URL is an existing prefix, we could
+ // use it as is. That would enable the
+ // marshaling of elements that had been unmarshaled
+ // and with a name space prefix that was not found.
+ // although technically it would be incorrect.
+
+ // Pick a name. We try to use the final element of the path
+ // but fall back to _.
+ prefix := strings.TrimRight(url, "/")
+ if i := strings.LastIndex(prefix, "/"); i >= 0 {
+ prefix = prefix[i+1:]
+ }
+ if prefix == "" || !isName([]byte(prefix)) || strings.Contains(prefix, ":") {
+ prefix = "_"
+ }
+ if strings.HasPrefix(prefix, "xml") {
+ // xmlanything is reserved.
+ prefix = "_" + prefix
+ }
+ if p.attrNS[prefix] != "" {
+ // Name is taken. Find a better one.
+ for p.seq++; ; p.seq++ {
+ if id := prefix + "_" + strconv.Itoa(p.seq); p.attrNS[id] == "" {
+ prefix = id
+ break
+ }
+ }
+ }
+
+ p.pushPrefix(prefix, url)
+}
+
+// writeNamespaces writes xmlns attributes for all the
+// namespace prefixes that have been defined in
+// the current element.
+func (p *printer) writeNamespaces() {
+ for i := len(p.prefixes) - 1; i >= 0; i-- {
+ prefix := p.prefixes[i]
+ if prefix.mark {
+ return
+ }
+ p.WriteString(" ")
+ if prefix.prefix == "" {
+ // Default name space.
+ p.WriteString(`xmlns="`)
+ } else {
+ p.WriteString("xmlns:")
+ p.WriteString(prefix.prefix)
+ p.WriteString(`="`)
+ }
+ EscapeText(p, []byte(p.nsForPrefix(prefix.prefix)))
+ p.WriteString(`"`)
+ }
+}
+
+// pushPrefix pushes a new prefix on the prefix stack
+// without checking to see if it is already defined.
+func (p *printer) pushPrefix(prefix, url string) {
+ p.prefixes = append(p.prefixes, printerPrefix{
+ prefix: prefix,
+ url: p.nsForPrefix(prefix),
+ })
+ p.setAttrPrefix(prefix, url)
+}
+
+// nsForPrefix returns the name space for the given
+// prefix. Note that this is not valid for the
+// empty attribute prefix, which always has an empty
+// name space.
+func (p *printer) nsForPrefix(prefix string) string {
+ if prefix == "" {
+ return p.defaultNS
+ }
+ return p.attrNS[prefix]
+}
+
+// markPrefix marks the start of an element on the prefix
+// stack.
+func (p *printer) markPrefix() {
+ p.prefixes = append(p.prefixes, printerPrefix{
+ mark: true,
+ })
+}
+
+// popPrefix pops all defined prefixes for the current
+// element.
+func (p *printer) popPrefix() {
+ for len(p.prefixes) > 0 {
+ prefix := p.prefixes[len(p.prefixes)-1]
+ p.prefixes = p.prefixes[:len(p.prefixes)-1]
+ if prefix.mark {
+ break
+ }
+ p.setAttrPrefix(prefix.prefix, prefix.url)
+ }
+}
+
+// setAttrPrefix sets an attribute name space prefix.
+// If url is empty, the attribute is removed.
+// If prefix is empty, the default name space is set.
+func (p *printer) setAttrPrefix(prefix, url string) {
+ if prefix == "" {
+ p.defaultNS = url
+ return
+ }
+ if url == "" {
+ delete(p.attrPrefix, p.attrNS[prefix])
+ delete(p.attrNS, prefix)
+ return
+ }
+ if p.attrPrefix == nil {
+ // Need to define a new name space.
+ p.attrPrefix = make(map[string]string)
+ p.attrNS = make(map[string]string)
+ }
+ // Remove any old prefix value. This is OK because we maintain a
+ // strict one-to-one mapping between prefix and URL (see
+ // defineNS)
+ delete(p.attrPrefix, p.attrNS[prefix])
+ p.attrPrefix[url] = prefix
+ p.attrNS[prefix] = url
+}
+
+var (
+ marshalerType = reflect.TypeOf((*Marshaler)(nil)).Elem()
+ marshalerAttrType = reflect.TypeOf((*MarshalerAttr)(nil)).Elem()
+ textMarshalerType = reflect.TypeOf((*encoding.TextMarshaler)(nil)).Elem()
+)
+
+// marshalValue writes one or more XML elements representing val.
+// If val was obtained from a struct field, finfo must have its details.
+func (p *printer) marshalValue(val reflect.Value, finfo *fieldInfo, startTemplate *StartElement) error {
+ if startTemplate != nil && startTemplate.Name.Local == "" {
+ return fmt.Errorf("xml: EncodeElement of StartElement with missing name")
+ }
+
+ if !val.IsValid() {
+ return nil
+ }
+ if finfo != nil && finfo.flags&fOmitEmpty != 0 && isEmptyValue(val) {
+ return nil
+ }
+
+ // Drill into interfaces and pointers.
+ // This can turn into an infinite loop given a cyclic chain,
+ // but it matches the Go 1 behavior.
+ for val.Kind() == reflect.Interface || val.Kind() == reflect.Ptr {
+ if val.IsNil() {
+ return nil
+ }
+ val = val.Elem()
+ }
+
+ kind := val.Kind()
+ typ := val.Type()
+
+ // Check for marshaler.
+ if val.CanInterface() && typ.Implements(marshalerType) {
+ return p.marshalInterface(val.Interface().(Marshaler), p.defaultStart(typ, finfo, startTemplate))
+ }
+ if val.CanAddr() {
+ pv := val.Addr()
+ if pv.CanInterface() && pv.Type().Implements(marshalerType) {
+ return p.marshalInterface(pv.Interface().(Marshaler), p.defaultStart(pv.Type(), finfo, startTemplate))
+ }
+ }
+
+ // Check for text marshaler.
+ if val.CanInterface() && typ.Implements(textMarshalerType) {
+ return p.marshalTextInterface(val.Interface().(encoding.TextMarshaler), p.defaultStart(typ, finfo, startTemplate))
+ }
+ if val.CanAddr() {
+ pv := val.Addr()
+ if pv.CanInterface() && pv.Type().Implements(textMarshalerType) {
+ return p.marshalTextInterface(pv.Interface().(encoding.TextMarshaler), p.defaultStart(pv.Type(), finfo, startTemplate))
+ }
+ }
+
+ // Slices and arrays iterate over the elements. They do not have an enclosing tag.
+ if (kind == reflect.Slice || kind == reflect.Array) && typ.Elem().Kind() != reflect.Uint8 {
+ for i, n := 0, val.Len(); i < n; i++ {
+ if err := p.marshalValue(val.Index(i), finfo, startTemplate); err != nil {
+ return err
+ }
+ }
+ return nil
+ }
+
+ tinfo, err := getTypeInfo(typ)
+ if err != nil {
+ return err
+ }
+
+ // Create start element.
+ // Precedence for the XML element name is:
+ // 0. startTemplate
+ // 1. XMLName field in underlying struct;
+ // 2. field name/tag in the struct field; and
+ // 3. type name
+ var start StartElement
+
+ // explicitNS records whether the element's name space has been
+ // explicitly set (for example an XMLName field).
+ explicitNS := false
+
+ if startTemplate != nil {
+ start.Name = startTemplate.Name
+ explicitNS = true
+ start.Attr = append(start.Attr, startTemplate.Attr...)
+ } else if tinfo.xmlname != nil {
+ xmlname := tinfo.xmlname
+ if xmlname.name != "" {
+ start.Name.Space, start.Name.Local = xmlname.xmlns, xmlname.name
+ } else if v, ok := xmlname.value(val).Interface().(Name); ok && v.Local != "" {
+ start.Name = v
+ }
+ explicitNS = true
+ }
+ if start.Name.Local == "" && finfo != nil {
+ start.Name.Local = finfo.name
+ if finfo.xmlns != "" {
+ start.Name.Space = finfo.xmlns
+ explicitNS = true
+ }
+ }
+ if start.Name.Local == "" {
+ name := typ.Name()
+ if name == "" {
+ return &UnsupportedTypeError{typ}
+ }
+ start.Name.Local = name
+ }
+
+ // defaultNS records the default name space as set by a xmlns="..."
+ // attribute. We don't set p.defaultNS because we want to let
+ // the attribute writing code (in p.defineNS) be solely responsible
+ // for maintaining that.
+ defaultNS := p.defaultNS
+
+ // Attributes
+ for i := range tinfo.fields {
+ finfo := &tinfo.fields[i]
+ if finfo.flags&fAttr == 0 {
+ continue
+ }
+ attr, err := p.fieldAttr(finfo, val)
+ if err != nil {
+ return err
+ }
+ if attr.Name.Local == "" {
+ continue
+ }
+ start.Attr = append(start.Attr, attr)
+ if attr.Name.Space == "" && attr.Name.Local == "xmlns" {
+ defaultNS = attr.Value
+ }
+ }
+ if !explicitNS {
+ // Historic behavior: elements use the default name space
+ // they are contained in by default.
+ start.Name.Space = defaultNS
+ }
+ // Historic behaviour: an element that's in a namespace sets
+ // the default namespace for all elements contained within it.
+ start.setDefaultNamespace()
+
+ if err := p.writeStart(&start); err != nil {
+ return err
+ }
+
+ if val.Kind() == reflect.Struct {
+ err = p.marshalStruct(tinfo, val)
+ } else {
+ s, b, err1 := p.marshalSimple(typ, val)
+ if err1 != nil {
+ err = err1
+ } else if b != nil {
+ EscapeText(p, b)
+ } else {
+ p.EscapeString(s)
+ }
+ }
+ if err != nil {
+ return err
+ }
+
+ if err := p.writeEnd(start.Name); err != nil {
+ return err
+ }
+
+ return p.cachedWriteError()
+}
+
+// fieldAttr returns the attribute of the given field.
+// If the returned attribute has an empty Name.Local,
+// it should not be used.
+// The given value holds the value containing the field.
+func (p *printer) fieldAttr(finfo *fieldInfo, val reflect.Value) (Attr, error) {
+ fv := finfo.value(val)
+ name := Name{Space: finfo.xmlns, Local: finfo.name}
+ if finfo.flags&fOmitEmpty != 0 && isEmptyValue(fv) {
+ return Attr{}, nil
+ }
+ if fv.Kind() == reflect.Interface && fv.IsNil() {
+ return Attr{}, nil
+ }
+ if fv.CanInterface() && fv.Type().Implements(marshalerAttrType) {
+ attr, err := fv.Interface().(MarshalerAttr).MarshalXMLAttr(name)
+ return attr, err
+ }
+ if fv.CanAddr() {
+ pv := fv.Addr()
+ if pv.CanInterface() && pv.Type().Implements(marshalerAttrType) {
+ attr, err := pv.Interface().(MarshalerAttr).MarshalXMLAttr(name)
+ return attr, err
+ }
+ }
+ if fv.CanInterface() && fv.Type().Implements(textMarshalerType) {
+ text, err := fv.Interface().(encoding.TextMarshaler).MarshalText()
+ if err != nil {
+ return Attr{}, err
+ }
+ return Attr{name, string(text)}, nil
+ }
+ if fv.CanAddr() {
+ pv := fv.Addr()
+ if pv.CanInterface() && pv.Type().Implements(textMarshalerType) {
+ text, err := pv.Interface().(encoding.TextMarshaler).MarshalText()
+ if err != nil {
+ return Attr{}, err
+ }
+ return Attr{name, string(text)}, nil
+ }
+ }
+ // Dereference or skip nil pointer, interface values.
+ switch fv.Kind() {
+ case reflect.Ptr, reflect.Interface:
+ if fv.IsNil() {
+ return Attr{}, nil
+ }
+ fv = fv.Elem()
+ }
+ s, b, err := p.marshalSimple(fv.Type(), fv)
+ if err != nil {
+ return Attr{}, err
+ }
+ if b != nil {
+ s = string(b)
+ }
+ return Attr{name, s}, nil
+}
+
+// defaultStart returns the default start element to use,
+// given the reflect type, field info, and start template.
+func (p *printer) defaultStart(typ reflect.Type, finfo *fieldInfo, startTemplate *StartElement) StartElement {
+ var start StartElement
+ // Precedence for the XML element name is as above,
+ // except that we do not look inside structs for the first field.
+ if startTemplate != nil {
+ start.Name = startTemplate.Name
+ start.Attr = append(start.Attr, startTemplate.Attr...)
+ } else if finfo != nil && finfo.name != "" {
+ start.Name.Local = finfo.name
+ start.Name.Space = finfo.xmlns
+ } else if typ.Name() != "" {
+ start.Name.Local = typ.Name()
+ } else {
+ // Must be a pointer to a named type,
+ // since it has the Marshaler methods.
+ start.Name.Local = typ.Elem().Name()
+ }
+ // Historic behaviour: elements use the name space of
+ // the element they are contained in by default.
+ if start.Name.Space == "" {
+ start.Name.Space = p.defaultNS
+ }
+ start.setDefaultNamespace()
+ return start
+}
+
+// marshalInterface marshals a Marshaler interface value.
+func (p *printer) marshalInterface(val Marshaler, start StartElement) error {
+ // Push a marker onto the tag stack so that MarshalXML
+ // cannot close the XML tags that it did not open.
+ p.tags = append(p.tags, Name{})
+ n := len(p.tags)
+
+ err := val.MarshalXML(p.encoder, start)
+ if err != nil {
+ return err
+ }
+
+ // Make sure MarshalXML closed all its tags. p.tags[n-1] is the mark.
+ if len(p.tags) > n {
+ return fmt.Errorf("xml: %s.MarshalXML wrote invalid XML: <%s> not closed", receiverType(val), p.tags[len(p.tags)-1].Local)
+ }
+ p.tags = p.tags[:n-1]
+ return nil
+}
+
+// marshalTextInterface marshals a TextMarshaler interface value.
+func (p *printer) marshalTextInterface(val encoding.TextMarshaler, start StartElement) error {
+ if err := p.writeStart(&start); err != nil {
+ return err
+ }
+ text, err := val.MarshalText()
+ if err != nil {
+ return err
+ }
+ EscapeText(p, text)
+ return p.writeEnd(start.Name)
+}
+
+// writeStart writes the given start element.
+func (p *printer) writeStart(start *StartElement) error {
+ if start.Name.Local == "" {
+ return fmt.Errorf("xml: start tag with no name")
+ }
+
+ p.tags = append(p.tags, start.Name)
+ p.markPrefix()
+ // Define any name spaces explicitly declared in the attributes.
+ // We do this as a separate pass so that explicitly declared prefixes
+ // will take precedence over implicitly declared prefixes
+ // regardless of the order of the attributes.
+ ignoreNonEmptyDefault := start.Name.Space == ""
+ for _, attr := range start.Attr {
+ if err := p.defineNS(attr, ignoreNonEmptyDefault); err != nil {
+ return err
+ }
+ }
+ // Define any new name spaces implied by the attributes.
+ for _, attr := range start.Attr {
+ name := attr.Name
+ // From http://www.w3.org/TR/xml-names11/#defaulting
+ // "Default namespace declarations do not apply directly
+ // to attribute names; the interpretation of unprefixed
+ // attributes is determined by the element on which they
+ // appear."
+ // This means we don't need to create a new namespace
+ // when an attribute name space is empty.
+ if name.Space != "" && !name.isNamespace() {
+ p.createNSPrefix(name.Space, true)
+ }
+ }
+ p.createNSPrefix(start.Name.Space, false)
+
+ p.writeIndent(1)
+ p.WriteByte('<')
+ p.writeName(start.Name, false)
+ p.writeNamespaces()
+ for _, attr := range start.Attr {
+ name := attr.Name
+ if name.Local == "" || name.isNamespace() {
+ // Namespaces have already been written by writeNamespaces above.
+ continue
+ }
+ p.WriteByte(' ')
+ p.writeName(name, true)
+ p.WriteString(`="`)
+ p.EscapeString(attr.Value)
+ p.WriteByte('"')
+ }
+ p.WriteByte('>')
+ return nil
+}
+
+// writeName writes the given name. It assumes
+// that p.createNSPrefix(name) has already been called.
+func (p *printer) writeName(name Name, isAttr bool) {
+ if prefix := p.prefixForNS(name.Space, isAttr); prefix != "" {
+ p.WriteString(prefix)
+ p.WriteByte(':')
+ }
+ p.WriteString(name.Local)
+}
+
+func (p *printer) writeEnd(name Name) error {
+ if name.Local == "" {
+ return fmt.Errorf("xml: end tag with no name")
+ }
+ if len(p.tags) == 0 || p.tags[len(p.tags)-1].Local == "" {
+ return fmt.Errorf("xml: end tag </%s> without start tag", name.Local)
+ }
+ if top := p.tags[len(p.tags)-1]; top != name {
+ if top.Local != name.Local {
+ return fmt.Errorf("xml: end tag </%s> does not match start tag <%s>", name.Local, top.Local)
+ }
+ return fmt.Errorf("xml: end tag </%s> in namespace %s does not match start tag <%s> in namespace %s", name.Local, name.Space, top.Local, top.Space)
+ }
+ p.tags = p.tags[:len(p.tags)-1]
+
+ p.writeIndent(-1)
+ p.WriteByte('<')
+ p.WriteByte('/')
+ p.writeName(name, false)
+ p.WriteByte('>')
+ p.popPrefix()
+ return nil
+}
+
+func (p *printer) marshalSimple(typ reflect.Type, val reflect.Value) (string, []byte, error) {
+ switch val.Kind() {
+ case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+ return strconv.FormatInt(val.Int(), 10), nil, nil
+ case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
+ return strconv.FormatUint(val.Uint(), 10), nil, nil
+ case reflect.Float32, reflect.Float64:
+ return strconv.FormatFloat(val.Float(), 'g', -1, val.Type().Bits()), nil, nil
+ case reflect.String:
+ return val.String(), nil, nil
+ case reflect.Bool:
+ return strconv.FormatBool(val.Bool()), nil, nil
+ case reflect.Array:
+ if typ.Elem().Kind() != reflect.Uint8 {
+ break
+ }
+ // [...]byte
+ var bytes []byte
+ if val.CanAddr() {
+ bytes = val.Slice(0, val.Len()).Bytes()
+ } else {
+ bytes = make([]byte, val.Len())
+ reflect.Copy(reflect.ValueOf(bytes), val)
+ }
+ return "", bytes, nil
+ case reflect.Slice:
+ if typ.Elem().Kind() != reflect.Uint8 {
+ break
+ }
+ // []byte
+ return "", val.Bytes(), nil
+ }
+ return "", nil, &UnsupportedTypeError{typ}
+}
+
+var ddBytes = []byte("--")
+
+func (p *printer) marshalStruct(tinfo *typeInfo, val reflect.Value) error {
+ s := parentStack{p: p}
+ for i := range tinfo.fields {
+ finfo := &tinfo.fields[i]
+ if finfo.flags&fAttr != 0 {
+ continue
+ }
+ vf := finfo.value(val)
+
+ // Dereference or skip nil pointer, interface values.
+ switch vf.Kind() {
+ case reflect.Ptr, reflect.Interface:
+ if !vf.IsNil() {
+ vf = vf.Elem()
+ }
+ }
+
+ switch finfo.flags & fMode {
+ case fCharData:
+ if err := s.setParents(&noField, reflect.Value{}); err != nil {
+ return err
+ }
+ if vf.CanInterface() && vf.Type().Implements(textMarshalerType) {
+ data, err := vf.Interface().(encoding.TextMarshaler).MarshalText()
+ if err != nil {
+ return err
+ }
+ Escape(p, data)
+ continue
+ }
+ if vf.CanAddr() {
+ pv := vf.Addr()
+ if pv.CanInterface() && pv.Type().Implements(textMarshalerType) {
+ data, err := pv.Interface().(encoding.TextMarshaler).MarshalText()
+ if err != nil {
+ return err
+ }
+ Escape(p, data)
+ continue
+ }
+ }
+ var scratch [64]byte
+ switch vf.Kind() {
+ case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+ Escape(p, strconv.AppendInt(scratch[:0], vf.Int(), 10))
+ case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
+ Escape(p, strconv.AppendUint(scratch[:0], vf.Uint(), 10))
+ case reflect.Float32, reflect.Float64:
+ Escape(p, strconv.AppendFloat(scratch[:0], vf.Float(), 'g', -1, vf.Type().Bits()))
+ case reflect.Bool:
+ Escape(p, strconv.AppendBool(scratch[:0], vf.Bool()))
+ case reflect.String:
+ if err := EscapeText(p, []byte(vf.String())); err != nil {
+ return err
+ }
+ case reflect.Slice:
+ if elem, ok := vf.Interface().([]byte); ok {
+ if err := EscapeText(p, elem); err != nil {
+ return err
+ }
+ }
+ }
+ continue
+
+ case fComment:
+ if err := s.setParents(&noField, reflect.Value{}); err != nil {
+ return err
+ }
+ k := vf.Kind()
+ if !(k == reflect.String || k == reflect.Slice && vf.Type().Elem().Kind() == reflect.Uint8) {
+ return fmt.Errorf("xml: bad type for comment field of %s", val.Type())
+ }
+ if vf.Len() == 0 {
+ continue
+ }
+ p.writeIndent(0)
+ p.WriteString("<!--")
+ dashDash := false
+ dashLast := false
+ switch k {
+ case reflect.String:
+ s := vf.String()
+ dashDash = strings.Index(s, "--") >= 0
+ dashLast = s[len(s)-1] == '-'
+ if !dashDash {
+ p.WriteString(s)
+ }
+ case reflect.Slice:
+ b := vf.Bytes()
+ dashDash = bytes.Index(b, ddBytes) >= 0
+ dashLast = b[len(b)-1] == '-'
+ if !dashDash {
+ p.Write(b)
+ }
+ default:
+ panic("can't happen")
+ }
+ if dashDash {
+ return fmt.Errorf(`xml: comments must not contain "--"`)
+ }
+ if dashLast {
+ // "--->" is invalid grammar. Make it "- -->"
+ p.WriteByte(' ')
+ }
+ p.WriteString("-->")
+ continue
+
+ case fInnerXml:
+ iface := vf.Interface()
+ switch raw := iface.(type) {
+ case []byte:
+ p.Write(raw)
+ continue
+ case string:
+ p.WriteString(raw)
+ continue
+ }
+
+ case fElement, fElement | fAny:
+ if err := s.setParents(finfo, vf); err != nil {
+ return err
+ }
+ }
+ if err := p.marshalValue(vf, finfo, nil); err != nil {
+ return err
+ }
+ }
+ if err := s.setParents(&noField, reflect.Value{}); err != nil {
+ return err
+ }
+ return p.cachedWriteError()
+}
+
+var noField fieldInfo
+
+// return the bufio Writer's cached write error
+func (p *printer) cachedWriteError() error {
+ _, err := p.Write(nil)
+ return err
+}
+
+func (p *printer) writeIndent(depthDelta int) {
+ if len(p.prefix) == 0 && len(p.indent) == 0 {
+ return
+ }
+ if depthDelta < 0 {
+ p.depth--
+ if p.indentedIn {
+ p.indentedIn = false
+ return
+ }
+ p.indentedIn = false
+ }
+ if p.putNewline {
+ p.WriteByte('\n')
+ } else {
+ p.putNewline = true
+ }
+ if len(p.prefix) > 0 {
+ p.WriteString(p.prefix)
+ }
+ if len(p.indent) > 0 {
+ for i := 0; i < p.depth; i++ {
+ p.WriteString(p.indent)
+ }
+ }
+ if depthDelta > 0 {
+ p.depth++
+ p.indentedIn = true
+ }
+}
+
+type parentStack struct {
+ p *printer
+ xmlns string
+ parents []string
+}
+
+// setParents sets the stack of current parents to those found in finfo.
+// It only writes the start elements if vf holds a non-nil value.
+// If finfo is &noField, it pops all elements.
+func (s *parentStack) setParents(finfo *fieldInfo, vf reflect.Value) error {
+ xmlns := s.p.defaultNS
+ if finfo.xmlns != "" {
+ xmlns = finfo.xmlns
+ }
+ commonParents := 0
+ if xmlns == s.xmlns {
+ for ; commonParents < len(finfo.parents) && commonParents < len(s.parents); commonParents++ {
+ if finfo.parents[commonParents] != s.parents[commonParents] {
+ break
+ }
+ }
+ }
+ // Pop off any parents that aren't in common with the previous field.
+ for i := len(s.parents) - 1; i >= commonParents; i-- {
+ if err := s.p.writeEnd(Name{
+ Space: s.xmlns,
+ Local: s.parents[i],
+ }); err != nil {
+ return err
+ }
+ }
+ s.parents = finfo.parents
+ s.xmlns = xmlns
+ if commonParents >= len(s.parents) {
+ // No new elements to push.
+ return nil
+ }
+ if (vf.Kind() == reflect.Ptr || vf.Kind() == reflect.Interface) && vf.IsNil() {
+ // The element is nil, so no need for the start elements.
+ s.parents = s.parents[:commonParents]
+ return nil
+ }
+ // Push any new parents required.
+ for _, name := range s.parents[commonParents:] {
+ start := &StartElement{
+ Name: Name{
+ Space: s.xmlns,
+ Local: name,
+ },
+ }
+ // Set the default name space for parent elements
+ // to match what we do with other elements.
+ if s.xmlns != s.p.defaultNS {
+ start.setDefaultNamespace()
+ }
+ if err := s.p.writeStart(start); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+// A MarshalXMLError is returned when Marshal encounters a type
+// that cannot be converted into XML.
+type UnsupportedTypeError struct {
+ Type reflect.Type
+}
+
+func (e *UnsupportedTypeError) Error() string {
+ return "xml: unsupported type: " + e.Type.String()
+}
+
+func isEmptyValue(v reflect.Value) bool {
+ switch v.Kind() {
+ case reflect.Array, reflect.Map, reflect.Slice, reflect.String:
+ return v.Len() == 0
+ case reflect.Bool:
+ return !v.Bool()
+ case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+ return v.Int() == 0
+ case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
+ return v.Uint() == 0
+ case reflect.Float32, reflect.Float64:
+ return v.Float() == 0
+ case reflect.Interface, reflect.Ptr:
+ return v.IsNil()
+ }
+ return false
+}
diff --git a/vendor/golang.org/x/net/webdav/internal/xml/read.go b/vendor/golang.org/x/net/webdav/internal/xml/read.go
new file mode 100644
index 0000000..4089056
--- /dev/null
+++ b/vendor/golang.org/x/net/webdav/internal/xml/read.go
@@ -0,0 +1,692 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package xml
+
+import (
+ "bytes"
+ "encoding"
+ "errors"
+ "fmt"
+ "reflect"
+ "strconv"
+ "strings"
+)
+
+// BUG(rsc): Mapping between XML elements and data structures is inherently flawed:
+// an XML element is an order-dependent collection of anonymous
+// values, while a data structure is an order-independent collection
+// of named values.
+// See package json for a textual representation more suitable
+// to data structures.
+
+// Unmarshal parses the XML-encoded data and stores the result in
+// the value pointed to by v, which must be an arbitrary struct,
+// slice, or string. Well-formed data that does not fit into v is
+// discarded.
+//
+// Because Unmarshal uses the reflect package, it can only assign
+// to exported (upper case) fields. Unmarshal uses a case-sensitive
+// comparison to match XML element names to tag values and struct
+// field names.
+//
+// Unmarshal maps an XML element to a struct using the following rules.
+// In the rules, the tag of a field refers to the value associated with the
+// key 'xml' in the struct field's tag (see the example above).
+//
+// * If the struct has a field of type []byte or string with tag
+// ",innerxml", Unmarshal accumulates the raw XML nested inside the
+// element in that field. The rest of the rules still apply.
+//
+// * If the struct has a field named XMLName of type xml.Name,
+// Unmarshal records the element name in that field.
+//
+// * If the XMLName field has an associated tag of the form
+// "name" or "namespace-URL name", the XML element must have
+// the given name (and, optionally, name space) or else Unmarshal
+// returns an error.
+//
+// * If the XML element has an attribute whose name matches a
+// struct field name with an associated tag containing ",attr" or
+// the explicit name in a struct field tag of the form "name,attr",
+// Unmarshal records the attribute value in that field.
+//
+// * If the XML element contains character data, that data is
+// accumulated in the first struct field that has tag ",chardata".
+// The struct field may have type []byte or string.
+// If there is no such field, the character data is discarded.
+//
+// * If the XML element contains comments, they are accumulated in
+// the first struct field that has tag ",comment". The struct
+// field may have type []byte or string. If there is no such
+// field, the comments are discarded.
+//
+// * If the XML element contains a sub-element whose name matches
+// the prefix of a tag formatted as "a" or "a>b>c", unmarshal
+// will descend into the XML structure looking for elements with the
+// given names, and will map the innermost elements to that struct
+// field. A tag starting with ">" is equivalent to one starting
+// with the field name followed by ">".
+//
+// * If the XML element contains a sub-element whose name matches
+// a struct field's XMLName tag and the struct field has no
+// explicit name tag as per the previous rule, unmarshal maps
+// the sub-element to that struct field.
+//
+// * If the XML element contains a sub-element whose name matches a
+// field without any mode flags (",attr", ",chardata", etc), Unmarshal
+// maps the sub-element to that struct field.
+//
+// * If the XML element contains a sub-element that hasn't matched any
+// of the above rules and the struct has a field with tag ",any",
+// unmarshal maps the sub-element to that struct field.
+//
+// * An anonymous struct field is handled as if the fields of its
+// value were part of the outer struct.
+//
+// * A struct field with tag "-" is never unmarshalled into.
+//
+// Unmarshal maps an XML element to a string or []byte by saving the
+// concatenation of that element's character data in the string or
+// []byte. The saved []byte is never nil.
+//
+// Unmarshal maps an attribute value to a string or []byte by saving
+// the value in the string or slice.
+//
+// Unmarshal maps an XML element to a slice by extending the length of
+// the slice and mapping the element to the newly created value.
+//
+// Unmarshal maps an XML element or attribute value to a bool by
+// setting it to the boolean value represented by the string.
+//
+// Unmarshal maps an XML element or attribute value to an integer or
+// floating-point field by setting the field to the result of
+// interpreting the string value in decimal. There is no check for
+// overflow.
+//
+// Unmarshal maps an XML element to an xml.Name by recording the
+// element name.
+//
+// Unmarshal maps an XML element to a pointer by setting the pointer
+// to a freshly allocated value and then mapping the element to that value.
+//
+func Unmarshal(data []byte, v interface{}) error {
+ return NewDecoder(bytes.NewReader(data)).Decode(v)
+}
+
+// Decode works like xml.Unmarshal, except it reads the decoder
+// stream to find the start element.
+func (d *Decoder) Decode(v interface{}) error {
+ return d.DecodeElement(v, nil)
+}
+
+// DecodeElement works like xml.Unmarshal except that it takes
+// a pointer to the start XML element to decode into v.
+// It is useful when a client reads some raw XML tokens itself
+// but also wants to defer to Unmarshal for some elements.
+func (d *Decoder) DecodeElement(v interface{}, start *StartElement) error {
+ val := reflect.ValueOf(v)
+ if val.Kind() != reflect.Ptr {
+ return errors.New("non-pointer passed to Unmarshal")
+ }
+ return d.unmarshal(val.Elem(), start)
+}
+
+// An UnmarshalError represents an error in the unmarshalling process.
+type UnmarshalError string
+
+func (e UnmarshalError) Error() string { return string(e) }
+
+// Unmarshaler is the interface implemented by objects that can unmarshal
+// an XML element description of themselves.
+//
+// UnmarshalXML decodes a single XML element
+// beginning with the given start element.
+// If it returns an error, the outer call to Unmarshal stops and
+// returns that error.
+// UnmarshalXML must consume exactly one XML element.
+// One common implementation strategy is to unmarshal into
+// a separate value with a layout matching the expected XML
+// using d.DecodeElement, and then to copy the data from
+// that value into the receiver.
+// Another common strategy is to use d.Token to process the
+// XML object one token at a time.
+// UnmarshalXML may not use d.RawToken.
+type Unmarshaler interface {
+ UnmarshalXML(d *Decoder, start StartElement) error
+}
+
+// UnmarshalerAttr is the interface implemented by objects that can unmarshal
+// an XML attribute description of themselves.
+//
+// UnmarshalXMLAttr decodes a single XML attribute.
+// If it returns an error, the outer call to Unmarshal stops and
+// returns that error.
+// UnmarshalXMLAttr is used only for struct fields with the
+// "attr" option in the field tag.
+type UnmarshalerAttr interface {
+ UnmarshalXMLAttr(attr Attr) error
+}
+
+// receiverType returns the receiver type to use in an expression like "%s.MethodName".
+func receiverType(val interface{}) string {
+ t := reflect.TypeOf(val)
+ if t.Name() != "" {
+ return t.String()
+ }
+ return "(" + t.String() + ")"
+}
+
+// unmarshalInterface unmarshals a single XML element into val.
+// start is the opening tag of the element.
+func (p *Decoder) unmarshalInterface(val Unmarshaler, start *StartElement) error {
+ // Record that decoder must stop at end tag corresponding to start.
+ p.pushEOF()
+
+ p.unmarshalDepth++
+ err := val.UnmarshalXML(p, *start)
+ p.unmarshalDepth--
+ if err != nil {
+ p.popEOF()
+ return err
+ }
+
+ if !p.popEOF() {
+ return fmt.Errorf("xml: %s.UnmarshalXML did not consume entire <%s> element", receiverType(val), start.Name.Local)
+ }
+
+ return nil
+}
+
+// unmarshalTextInterface unmarshals a single XML element into val.
+// The chardata contained in the element (but not its children)
+// is passed to the text unmarshaler.
+func (p *Decoder) unmarshalTextInterface(val encoding.TextUnmarshaler, start *StartElement) error {
+ var buf []byte
+ depth := 1
+ for depth > 0 {
+ t, err := p.Token()
+ if err != nil {
+ return err
+ }
+ switch t := t.(type) {
+ case CharData:
+ if depth == 1 {
+ buf = append(buf, t...)
+ }
+ case StartElement:
+ depth++
+ case EndElement:
+ depth--
+ }
+ }
+ return val.UnmarshalText(buf)
+}
+
+// unmarshalAttr unmarshals a single XML attribute into val.
+func (p *Decoder) unmarshalAttr(val reflect.Value, attr Attr) error {
+ if val.Kind() == reflect.Ptr {
+ if val.IsNil() {
+ val.Set(reflect.New(val.Type().Elem()))
+ }
+ val = val.Elem()
+ }
+
+ if val.CanInterface() && val.Type().Implements(unmarshalerAttrType) {
+ // This is an unmarshaler with a non-pointer receiver,
+ // so it's likely to be incorrect, but we do what we're told.
+ return val.Interface().(UnmarshalerAttr).UnmarshalXMLAttr(attr)
+ }
+ if val.CanAddr() {
+ pv := val.Addr()
+ if pv.CanInterface() && pv.Type().Implements(unmarshalerAttrType) {
+ return pv.Interface().(UnmarshalerAttr).UnmarshalXMLAttr(attr)
+ }
+ }
+
+ // Not an UnmarshalerAttr; try encoding.TextUnmarshaler.
+ if val.CanInterface() && val.Type().Implements(textUnmarshalerType) {
+ // This is an unmarshaler with a non-pointer receiver,
+ // so it's likely to be incorrect, but we do what we're told.
+ return val.Interface().(encoding.TextUnmarshaler).UnmarshalText([]byte(attr.Value))
+ }
+ if val.CanAddr() {
+ pv := val.Addr()
+ if pv.CanInterface() && pv.Type().Implements(textUnmarshalerType) {
+ return pv.Interface().(encoding.TextUnmarshaler).UnmarshalText([]byte(attr.Value))
+ }
+ }
+
+ copyValue(val, []byte(attr.Value))
+ return nil
+}
+
+var (
+ unmarshalerType = reflect.TypeOf((*Unmarshaler)(nil)).Elem()
+ unmarshalerAttrType = reflect.TypeOf((*UnmarshalerAttr)(nil)).Elem()
+ textUnmarshalerType = reflect.TypeOf((*encoding.TextUnmarshaler)(nil)).Elem()
+)
+
+// Unmarshal a single XML element into val.
+func (p *Decoder) unmarshal(val reflect.Value, start *StartElement) error {
+ // Find start element if we need it.
+ if start == nil {
+ for {
+ tok, err := p.Token()
+ if err != nil {
+ return err
+ }
+ if t, ok := tok.(StartElement); ok {
+ start = &t
+ break
+ }
+ }
+ }
+
+ // Load value from interface, but only if the result will be
+ // usefully addressable.
+ if val.Kind() == reflect.Interface && !val.IsNil() {
+ e := val.Elem()
+ if e.Kind() == reflect.Ptr && !e.IsNil() {
+ val = e
+ }
+ }
+
+ if val.Kind() == reflect.Ptr {
+ if val.IsNil() {
+ val.Set(reflect.New(val.Type().Elem()))
+ }
+ val = val.Elem()
+ }
+
+ if val.CanInterface() && val.Type().Implements(unmarshalerType) {
+ // This is an unmarshaler with a non-pointer receiver,
+ // so it's likely to be incorrect, but we do what we're told.
+ return p.unmarshalInterface(val.Interface().(Unmarshaler), start)
+ }
+
+ if val.CanAddr() {
+ pv := val.Addr()
+ if pv.CanInterface() && pv.Type().Implements(unmarshalerType) {
+ return p.unmarshalInterface(pv.Interface().(Unmarshaler), start)
+ }
+ }
+
+ if val.CanInterface() && val.Type().Implements(textUnmarshalerType) {
+ return p.unmarshalTextInterface(val.Interface().(encoding.TextUnmarshaler), start)
+ }
+
+ if val.CanAddr() {
+ pv := val.Addr()
+ if pv.CanInterface() && pv.Type().Implements(textUnmarshalerType) {
+ return p.unmarshalTextInterface(pv.Interface().(encoding.TextUnmarshaler), start)
+ }
+ }
+
+ var (
+ data []byte
+ saveData reflect.Value
+ comment []byte
+ saveComment reflect.Value
+ saveXML reflect.Value
+ saveXMLIndex int
+ saveXMLData []byte
+ saveAny reflect.Value
+ sv reflect.Value
+ tinfo *typeInfo
+ err error
+ )
+
+ switch v := val; v.Kind() {
+ default:
+ return errors.New("unknown type " + v.Type().String())
+
+ case reflect.Interface:
+ // TODO: For now, simply ignore the field. In the near
+ // future we may choose to unmarshal the start
+ // element on it, if not nil.
+ return p.Skip()
+
+ case reflect.Slice:
+ typ := v.Type()
+ if typ.Elem().Kind() == reflect.Uint8 {
+ // []byte
+ saveData = v
+ break
+ }
+
+ // Slice of element values.
+ // Grow slice.
+ n := v.Len()
+ if n >= v.Cap() {
+ ncap := 2 * n
+ if ncap < 4 {
+ ncap = 4
+ }
+ new := reflect.MakeSlice(typ, n, ncap)
+ reflect.Copy(new, v)
+ v.Set(new)
+ }
+ v.SetLen(n + 1)
+
+ // Recur to read element into slice.
+ if err := p.unmarshal(v.Index(n), start); err != nil {
+ v.SetLen(n)
+ return err
+ }
+ return nil
+
+ case reflect.Bool, reflect.Float32, reflect.Float64, reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr, reflect.String:
+ saveData = v
+
+ case reflect.Struct:
+ typ := v.Type()
+ if typ == nameType {
+ v.Set(reflect.ValueOf(start.Name))
+ break
+ }
+
+ sv = v
+ tinfo, err = getTypeInfo(typ)
+ if err != nil {
+ return err
+ }
+
+ // Validate and assign element name.
+ if tinfo.xmlname != nil {
+ finfo := tinfo.xmlname
+ if finfo.name != "" && finfo.name != start.Name.Local {
+ return UnmarshalError("expected element type <" + finfo.name + "> but have <" + start.Name.Local + ">")
+ }
+ if finfo.xmlns != "" && finfo.xmlns != start.Name.Space {
+ e := "expected element <" + finfo.name + "> in name space " + finfo.xmlns + " but have "
+ if start.Name.Space == "" {
+ e += "no name space"
+ } else {
+ e += start.Name.Space
+ }
+ return UnmarshalError(e)
+ }
+ fv := finfo.value(sv)
+ if _, ok := fv.Interface().(Name); ok {
+ fv.Set(reflect.ValueOf(start.Name))
+ }
+ }
+
+ // Assign attributes.
+ // Also, determine whether we need to save character data or comments.
+ for i := range tinfo.fields {
+ finfo := &tinfo.fields[i]
+ switch finfo.flags & fMode {
+ case fAttr:
+ strv := finfo.value(sv)
+ // Look for attribute.
+ for _, a := range start.Attr {
+ if a.Name.Local == finfo.name && (finfo.xmlns == "" || finfo.xmlns == a.Name.Space) {
+ if err := p.unmarshalAttr(strv, a); err != nil {
+ return err
+ }
+ break
+ }
+ }
+
+ case fCharData:
+ if !saveData.IsValid() {
+ saveData = finfo.value(sv)
+ }
+
+ case fComment:
+ if !saveComment.IsValid() {
+ saveComment = finfo.value(sv)
+ }
+
+ case fAny, fAny | fElement:
+ if !saveAny.IsValid() {
+ saveAny = finfo.value(sv)
+ }
+
+ case fInnerXml:
+ if !saveXML.IsValid() {
+ saveXML = finfo.value(sv)
+ if p.saved == nil {
+ saveXMLIndex = 0
+ p.saved = new(bytes.Buffer)
+ } else {
+ saveXMLIndex = p.savedOffset()
+ }
+ }
+ }
+ }
+ }
+
+ // Find end element.
+ // Process sub-elements along the way.
+Loop:
+ for {
+ var savedOffset int
+ if saveXML.IsValid() {
+ savedOffset = p.savedOffset()
+ }
+ tok, err := p.Token()
+ if err != nil {
+ return err
+ }
+ switch t := tok.(type) {
+ case StartElement:
+ consumed := false
+ if sv.IsValid() {
+ consumed, err = p.unmarshalPath(tinfo, sv, nil, &t)
+ if err != nil {
+ return err
+ }
+ if !consumed && saveAny.IsValid() {
+ consumed = true
+ if err := p.unmarshal(saveAny, &t); err != nil {
+ return err
+ }
+ }
+ }
+ if !consumed {
+ if err := p.Skip(); err != nil {
+ return err
+ }
+ }
+
+ case EndElement:
+ if saveXML.IsValid() {
+ saveXMLData = p.saved.Bytes()[saveXMLIndex:savedOffset]
+ if saveXMLIndex == 0 {
+ p.saved = nil
+ }
+ }
+ break Loop
+
+ case CharData:
+ if saveData.IsValid() {
+ data = append(data, t...)
+ }
+
+ case Comment:
+ if saveComment.IsValid() {
+ comment = append(comment, t...)
+ }
+ }
+ }
+
+ if saveData.IsValid() && saveData.CanInterface() && saveData.Type().Implements(textUnmarshalerType) {
+ if err := saveData.Interface().(encoding.TextUnmarshaler).UnmarshalText(data); err != nil {
+ return err
+ }
+ saveData = reflect.Value{}
+ }
+
+ if saveData.IsValid() && saveData.CanAddr() {
+ pv := saveData.Addr()
+ if pv.CanInterface() && pv.Type().Implements(textUnmarshalerType) {
+ if err := pv.Interface().(encoding.TextUnmarshaler).UnmarshalText(data); err != nil {
+ return err
+ }
+ saveData = reflect.Value{}
+ }
+ }
+
+ if err := copyValue(saveData, data); err != nil {
+ return err
+ }
+
+ switch t := saveComment; t.Kind() {
+ case reflect.String:
+ t.SetString(string(comment))
+ case reflect.Slice:
+ t.Set(reflect.ValueOf(comment))
+ }
+
+ switch t := saveXML; t.Kind() {
+ case reflect.String:
+ t.SetString(string(saveXMLData))
+ case reflect.Slice:
+ t.Set(reflect.ValueOf(saveXMLData))
+ }
+
+ return nil
+}
+
+func copyValue(dst reflect.Value, src []byte) (err error) {
+ dst0 := dst
+
+ if dst.Kind() == reflect.Ptr {
+ if dst.IsNil() {
+ dst.Set(reflect.New(dst.Type().Elem()))
+ }
+ dst = dst.Elem()
+ }
+
+ // Save accumulated data.
+ switch dst.Kind() {
+ case reflect.Invalid:
+ // Probably a comment.
+ default:
+ return errors.New("cannot unmarshal into " + dst0.Type().String())
+ case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+ itmp, err := strconv.ParseInt(string(src), 10, dst.Type().Bits())
+ if err != nil {
+ return err
+ }
+ dst.SetInt(itmp)
+ case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
+ utmp, err := strconv.ParseUint(string(src), 10, dst.Type().Bits())
+ if err != nil {
+ return err
+ }
+ dst.SetUint(utmp)
+ case reflect.Float32, reflect.Float64:
+ ftmp, err := strconv.ParseFloat(string(src), dst.Type().Bits())
+ if err != nil {
+ return err
+ }
+ dst.SetFloat(ftmp)
+ case reflect.Bool:
+ value, err := strconv.ParseBool(strings.TrimSpace(string(src)))
+ if err != nil {
+ return err
+ }
+ dst.SetBool(value)
+ case reflect.String:
+ dst.SetString(string(src))
+ case reflect.Slice:
+ if len(src) == 0 {
+ // non-nil to flag presence
+ src = []byte{}
+ }
+ dst.SetBytes(src)
+ }
+ return nil
+}
+
+// unmarshalPath walks down an XML structure looking for wanted
+// paths, and calls unmarshal on them.
+// The consumed result tells whether XML elements have been consumed
+// from the Decoder until start's matching end element, or if it's
+// still untouched because start is uninteresting for sv's fields.
+func (p *Decoder) unmarshalPath(tinfo *typeInfo, sv reflect.Value, parents []string, start *StartElement) (consumed bool, err error) {
+ recurse := false
+Loop:
+ for i := range tinfo.fields {
+ finfo := &tinfo.fields[i]
+ if finfo.flags&fElement == 0 || len(finfo.parents) < len(parents) || finfo.xmlns != "" && finfo.xmlns != start.Name.Space {
+ continue
+ }
+ for j := range parents {
+ if parents[j] != finfo.parents[j] {
+ continue Loop
+ }
+ }
+ if len(finfo.parents) == len(parents) && finfo.name == start.Name.Local {
+ // It's a perfect match, unmarshal the field.
+ return true, p.unmarshal(finfo.value(sv), start)
+ }
+ if len(finfo.parents) > len(parents) && finfo.parents[len(parents)] == start.Name.Local {
+ // It's a prefix for the field. Break and recurse
+ // since it's not ok for one field path to be itself
+ // the prefix for another field path.
+ recurse = true
+
+ // We can reuse the same slice as long as we
+ // don't try to append to it.
+ parents = finfo.parents[:len(parents)+1]
+ break
+ }
+ }
+ if !recurse {
+ // We have no business with this element.
+ return false, nil
+ }
+ // The element is not a perfect match for any field, but one
+ // or more fields have the path to this element as a parent
+ // prefix. Recurse and attempt to match these.
+ for {
+ var tok Token
+ tok, err = p.Token()
+ if err != nil {
+ return true, err
+ }
+ switch t := tok.(type) {
+ case StartElement:
+ consumed2, err := p.unmarshalPath(tinfo, sv, parents, &t)
+ if err != nil {
+ return true, err
+ }
+ if !consumed2 {
+ if err := p.Skip(); err != nil {
+ return true, err
+ }
+ }
+ case EndElement:
+ return true, nil
+ }
+ }
+}
+
+// Skip reads tokens until it has consumed the end element
+// matching the most recent start element already consumed.
+// It recurs if it encounters a start element, so it can be used to
+// skip nested structures.
+// It returns nil if it finds an end element matching the start
+// element; otherwise it returns an error describing the problem.
+func (d *Decoder) Skip() error {
+ for {
+ tok, err := d.Token()
+ if err != nil {
+ return err
+ }
+ switch tok.(type) {
+ case StartElement:
+ if err := d.Skip(); err != nil {
+ return err
+ }
+ case EndElement:
+ return nil
+ }
+ }
+}
diff --git a/vendor/golang.org/x/net/webdav/internal/xml/typeinfo.go b/vendor/golang.org/x/net/webdav/internal/xml/typeinfo.go
new file mode 100644
index 0000000..fdde288
--- /dev/null
+++ b/vendor/golang.org/x/net/webdav/internal/xml/typeinfo.go
@@ -0,0 +1,371 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package xml
+
+import (
+ "fmt"
+ "reflect"
+ "strings"
+ "sync"
+)
+
+// typeInfo holds details for the xml representation of a type.
+type typeInfo struct {
+ xmlname *fieldInfo
+ fields []fieldInfo
+}
+
+// fieldInfo holds details for the xml representation of a single field.
+type fieldInfo struct {
+ idx []int
+ name string
+ xmlns string
+ flags fieldFlags
+ parents []string
+}
+
+type fieldFlags int
+
+const (
+ fElement fieldFlags = 1 << iota
+ fAttr
+ fCharData
+ fInnerXml
+ fComment
+ fAny
+
+ fOmitEmpty
+
+ fMode = fElement | fAttr | fCharData | fInnerXml | fComment | fAny
+)
+
+var tinfoMap = make(map[reflect.Type]*typeInfo)
+var tinfoLock sync.RWMutex
+
+var nameType = reflect.TypeOf(Name{})
+
+// getTypeInfo returns the typeInfo structure with details necessary
+// for marshalling and unmarshalling typ.
+func getTypeInfo(typ reflect.Type) (*typeInfo, error) {
+ tinfoLock.RLock()
+ tinfo, ok := tinfoMap[typ]
+ tinfoLock.RUnlock()
+ if ok {
+ return tinfo, nil
+ }
+ tinfo = &typeInfo{}
+ if typ.Kind() == reflect.Struct && typ != nameType {
+ n := typ.NumField()
+ for i := 0; i < n; i++ {
+ f := typ.Field(i)
+ if f.PkgPath != "" || f.Tag.Get("xml") == "-" {
+ continue // Private field
+ }
+
+ // For embedded structs, embed its fields.
+ if f.Anonymous {
+ t := f.Type
+ if t.Kind() == reflect.Ptr {
+ t = t.Elem()
+ }
+ if t.Kind() == reflect.Struct {
+ inner, err := getTypeInfo(t)
+ if err != nil {
+ return nil, err
+ }
+ if tinfo.xmlname == nil {
+ tinfo.xmlname = inner.xmlname
+ }
+ for _, finfo := range inner.fields {
+ finfo.idx = append([]int{i}, finfo.idx...)
+ if err := addFieldInfo(typ, tinfo, &finfo); err != nil {
+ return nil, err
+ }
+ }
+ continue
+ }
+ }
+
+ finfo, err := structFieldInfo(typ, &f)
+ if err != nil {
+ return nil, err
+ }
+
+ if f.Name == "XMLName" {
+ tinfo.xmlname = finfo
+ continue
+ }
+
+ // Add the field if it doesn't conflict with other fields.
+ if err := addFieldInfo(typ, tinfo, finfo); err != nil {
+ return nil, err
+ }
+ }
+ }
+ tinfoLock.Lock()
+ tinfoMap[typ] = tinfo
+ tinfoLock.Unlock()
+ return tinfo, nil
+}
+
+// structFieldInfo builds and returns a fieldInfo for f.
+func structFieldInfo(typ reflect.Type, f *reflect.StructField) (*fieldInfo, error) {
+ finfo := &fieldInfo{idx: f.Index}
+
+ // Split the tag from the xml namespace if necessary.
+ tag := f.Tag.Get("xml")
+ if i := strings.Index(tag, " "); i >= 0 {
+ finfo.xmlns, tag = tag[:i], tag[i+1:]
+ }
+
+ // Parse flags.
+ tokens := strings.Split(tag, ",")
+ if len(tokens) == 1 {
+ finfo.flags = fElement
+ } else {
+ tag = tokens[0]
+ for _, flag := range tokens[1:] {
+ switch flag {
+ case "attr":
+ finfo.flags |= fAttr
+ case "chardata":
+ finfo.flags |= fCharData
+ case "innerxml":
+ finfo.flags |= fInnerXml
+ case "comment":
+ finfo.flags |= fComment
+ case "any":
+ finfo.flags |= fAny
+ case "omitempty":
+ finfo.flags |= fOmitEmpty
+ }
+ }
+
+ // Validate the flags used.
+ valid := true
+ switch mode := finfo.flags & fMode; mode {
+ case 0:
+ finfo.flags |= fElement
+ case fAttr, fCharData, fInnerXml, fComment, fAny:
+ if f.Name == "XMLName" || tag != "" && mode != fAttr {
+ valid = false
+ }
+ default:
+ // This will also catch multiple modes in a single field.
+ valid = false
+ }
+ if finfo.flags&fMode == fAny {
+ finfo.flags |= fElement
+ }
+ if finfo.flags&fOmitEmpty != 0 && finfo.flags&(fElement|fAttr) == 0 {
+ valid = false
+ }
+ if !valid {
+ return nil, fmt.Errorf("xml: invalid tag in field %s of type %s: %q",
+ f.Name, typ, f.Tag.Get("xml"))
+ }
+ }
+
+ // Use of xmlns without a name is not allowed.
+ if finfo.xmlns != "" && tag == "" {
+ return nil, fmt.Errorf("xml: namespace without name in field %s of type %s: %q",
+ f.Name, typ, f.Tag.Get("xml"))
+ }
+
+ if f.Name == "XMLName" {
+ // The XMLName field records the XML element name. Don't
+ // process it as usual because its name should default to
+ // empty rather than to the field name.
+ finfo.name = tag
+ return finfo, nil
+ }
+
+ if tag == "" {
+ // If the name part of the tag is completely empty, get
+ // default from XMLName of underlying struct if feasible,
+ // or field name otherwise.
+ if xmlname := lookupXMLName(f.Type); xmlname != nil {
+ finfo.xmlns, finfo.name = xmlname.xmlns, xmlname.name
+ } else {
+ finfo.name = f.Name
+ }
+ return finfo, nil
+ }
+
+ if finfo.xmlns == "" && finfo.flags&fAttr == 0 {
+ // If it's an element no namespace specified, get the default
+ // from the XMLName of enclosing struct if possible.
+ if xmlname := lookupXMLName(typ); xmlname != nil {
+ finfo.xmlns = xmlname.xmlns
+ }
+ }
+
+ // Prepare field name and parents.
+ parents := strings.Split(tag, ">")
+ if parents[0] == "" {
+ parents[0] = f.Name
+ }
+ if parents[len(parents)-1] == "" {
+ return nil, fmt.Errorf("xml: trailing '>' in field %s of type %s", f.Name, typ)
+ }
+ finfo.name = parents[len(parents)-1]
+ if len(parents) > 1 {
+ if (finfo.flags & fElement) == 0 {
+ return nil, fmt.Errorf("xml: %s chain not valid with %s flag", tag, strings.Join(tokens[1:], ","))
+ }
+ finfo.parents = parents[:len(parents)-1]
+ }
+
+ // If the field type has an XMLName field, the names must match
+ // so that the behavior of both marshalling and unmarshalling
+ // is straightforward and unambiguous.
+ if finfo.flags&fElement != 0 {
+ ftyp := f.Type
+ xmlname := lookupXMLName(ftyp)
+ if xmlname != nil && xmlname.name != finfo.name {
+ return nil, fmt.Errorf("xml: name %q in tag of %s.%s conflicts with name %q in %s.XMLName",
+ finfo.name, typ, f.Name, xmlname.name, ftyp)
+ }
+ }
+ return finfo, nil
+}
+
+// lookupXMLName returns the fieldInfo for typ's XMLName field
+// in case it exists and has a valid xml field tag, otherwise
+// it returns nil.
+func lookupXMLName(typ reflect.Type) (xmlname *fieldInfo) {
+ for typ.Kind() == reflect.Ptr {
+ typ = typ.Elem()
+ }
+ if typ.Kind() != reflect.Struct {
+ return nil
+ }
+ for i, n := 0, typ.NumField(); i < n; i++ {
+ f := typ.Field(i)
+ if f.Name != "XMLName" {
+ continue
+ }
+ finfo, err := structFieldInfo(typ, &f)
+ if finfo.name != "" && err == nil {
+ return finfo
+ }
+ // Also consider errors as a non-existent field tag
+ // and let getTypeInfo itself report the error.
+ break
+ }
+ return nil
+}
+
+func min(a, b int) int {
+ if a <= b {
+ return a
+ }
+ return b
+}
+
+// addFieldInfo adds finfo to tinfo.fields if there are no
+// conflicts, or if conflicts arise from previous fields that were
+// obtained from deeper embedded structures than finfo. In the latter
+// case, the conflicting entries are dropped.
+// A conflict occurs when the path (parent + name) to a field is
+// itself a prefix of another path, or when two paths match exactly.
+// It is okay for field paths to share a common, shorter prefix.
+func addFieldInfo(typ reflect.Type, tinfo *typeInfo, newf *fieldInfo) error {
+ var conflicts []int
+Loop:
+ // First, figure all conflicts. Most working code will have none.
+ for i := range tinfo.fields {
+ oldf := &tinfo.fields[i]
+ if oldf.flags&fMode != newf.flags&fMode {
+ continue
+ }
+ if oldf.xmlns != "" && newf.xmlns != "" && oldf.xmlns != newf.xmlns {
+ continue
+ }
+ minl := min(len(newf.parents), len(oldf.parents))
+ for p := 0; p < minl; p++ {
+ if oldf.parents[p] != newf.parents[p] {
+ continue Loop
+ }
+ }
+ if len(oldf.parents) > len(newf.parents) {
+ if oldf.parents[len(newf.parents)] == newf.name {
+ conflicts = append(conflicts, i)
+ }
+ } else if len(oldf.parents) < len(newf.parents) {
+ if newf.parents[len(oldf.parents)] == oldf.name {
+ conflicts = append(conflicts, i)
+ }
+ } else {
+ if newf.name == oldf.name {
+ conflicts = append(conflicts, i)
+ }
+ }
+ }
+ // Without conflicts, add the new field and return.
+ if conflicts == nil {
+ tinfo.fields = append(tinfo.fields, *newf)
+ return nil
+ }
+
+ // If any conflict is shallower, ignore the new field.
+ // This matches the Go field resolution on embedding.
+ for _, i := range conflicts {
+ if len(tinfo.fields[i].idx) < len(newf.idx) {
+ return nil
+ }
+ }
+
+ // Otherwise, if any of them is at the same depth level, it's an error.
+ for _, i := range conflicts {
+ oldf := &tinfo.fields[i]
+ if len(oldf.idx) == len(newf.idx) {
+ f1 := typ.FieldByIndex(oldf.idx)
+ f2 := typ.FieldByIndex(newf.idx)
+ return &TagPathError{typ, f1.Name, f1.Tag.Get("xml"), f2.Name, f2.Tag.Get("xml")}
+ }
+ }
+
+ // Otherwise, the new field is shallower, and thus takes precedence,
+ // so drop the conflicting fields from tinfo and append the new one.
+ for c := len(conflicts) - 1; c >= 0; c-- {
+ i := conflicts[c]
+ copy(tinfo.fields[i:], tinfo.fields[i+1:])
+ tinfo.fields = tinfo.fields[:len(tinfo.fields)-1]
+ }
+ tinfo.fields = append(tinfo.fields, *newf)
+ return nil
+}
+
+// A TagPathError represents an error in the unmarshalling process
+// caused by the use of field tags with conflicting paths.
+type TagPathError struct {
+ Struct reflect.Type
+ Field1, Tag1 string
+ Field2, Tag2 string
+}
+
+func (e *TagPathError) Error() string {
+ return fmt.Sprintf("%s field %q with tag %q conflicts with field %q with tag %q", e.Struct, e.Field1, e.Tag1, e.Field2, e.Tag2)
+}
+
+// value returns v's field value corresponding to finfo.
+// It's equivalent to v.FieldByIndex(finfo.idx), but initializes
+// and dereferences pointers as necessary.
+func (finfo *fieldInfo) value(v reflect.Value) reflect.Value {
+ for i, x := range finfo.idx {
+ if i > 0 {
+ t := v.Type()
+ if t.Kind() == reflect.Ptr && t.Elem().Kind() == reflect.Struct {
+ if v.IsNil() {
+ v.Set(reflect.New(v.Type().Elem()))
+ }
+ v = v.Elem()
+ }
+ }
+ v = v.Field(x)
+ }
+ return v
+}
diff --git a/vendor/golang.org/x/net/webdav/internal/xml/xml.go b/vendor/golang.org/x/net/webdav/internal/xml/xml.go
new file mode 100644
index 0000000..5b79cbe
--- /dev/null
+++ b/vendor/golang.org/x/net/webdav/internal/xml/xml.go
@@ -0,0 +1,1998 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package xml implements a simple XML 1.0 parser that
+// understands XML name spaces.
+package xml
+
+// References:
+// Annotated XML spec: http://www.xml.com/axml/testaxml.htm
+// XML name spaces: http://www.w3.org/TR/REC-xml-names/
+
+// TODO(rsc):
+// Test error handling.
+
+import (
+ "bufio"
+ "bytes"
+ "errors"
+ "fmt"
+ "io"
+ "strconv"
+ "strings"
+ "unicode"
+ "unicode/utf8"
+)
+
+// A SyntaxError represents a syntax error in the XML input stream.
+type SyntaxError struct {
+ Msg string
+ Line int
+}
+
+func (e *SyntaxError) Error() string {
+ return "XML syntax error on line " + strconv.Itoa(e.Line) + ": " + e.Msg
+}
+
+// A Name represents an XML name (Local) annotated with a name space
+// identifier (Space). In tokens returned by Decoder.Token, the Space
+// identifier is given as a canonical URL, not the short prefix used in
+// the document being parsed.
+//
+// As a special case, XML namespace declarations will use the literal
+// string "xmlns" for the Space field instead of the fully resolved URL.
+// See Encoder.EncodeToken for more information on namespace encoding
+// behaviour.
+type Name struct {
+ Space, Local string
+}
+
+// isNamespace reports whether the name is a namespace-defining name.
+func (name Name) isNamespace() bool {
+ return name.Local == "xmlns" || name.Space == "xmlns"
+}
+
+// An Attr represents an attribute in an XML element (Name=Value).
+type Attr struct {
+ Name Name
+ Value string
+}
+
+// A Token is an interface holding one of the token types:
+// StartElement, EndElement, CharData, Comment, ProcInst, or Directive.
+type Token interface{}
+
+// A StartElement represents an XML start element.
+type StartElement struct {
+ Name Name
+ Attr []Attr
+}
+
+func (e StartElement) Copy() StartElement {
+ attrs := make([]Attr, len(e.Attr))
+ copy(attrs, e.Attr)
+ e.Attr = attrs
+ return e
+}
+
+// End returns the corresponding XML end element.
+func (e StartElement) End() EndElement {
+ return EndElement{e.Name}
+}
+
+// setDefaultNamespace sets the namespace of the element
+// as the default for all elements contained within it.
+func (e *StartElement) setDefaultNamespace() {
+ if e.Name.Space == "" {
+ // If there's no namespace on the element, don't
+ // set the default. Strictly speaking this might be wrong, as
+ // we can't tell if the element had no namespace set
+ // or was just using the default namespace.
+ return
+ }
+ // Don't add a default name space if there's already one set.
+ for _, attr := range e.Attr {
+ if attr.Name.Space == "" && attr.Name.Local == "xmlns" {
+ return
+ }
+ }
+ e.Attr = append(e.Attr, Attr{
+ Name: Name{
+ Local: "xmlns",
+ },
+ Value: e.Name.Space,
+ })
+}
+
+// An EndElement represents an XML end element.
+type EndElement struct {
+ Name Name
+}
+
+// A CharData represents XML character data (raw text),
+// in which XML escape sequences have been replaced by
+// the characters they represent.
+type CharData []byte
+
+func makeCopy(b []byte) []byte {
+ b1 := make([]byte, len(b))
+ copy(b1, b)
+ return b1
+}
+
+func (c CharData) Copy() CharData { return CharData(makeCopy(c)) }
+
+// A Comment represents an XML comment of the form <!--comment-->.
+// The bytes do not include the <!-- and --> comment markers.
+type Comment []byte
+
+func (c Comment) Copy() Comment { return Comment(makeCopy(c)) }
+
+// A ProcInst represents an XML processing instruction of the form <?target inst?>
+type ProcInst struct {
+ Target string
+ Inst []byte
+}
+
+func (p ProcInst) Copy() ProcInst {
+ p.Inst = makeCopy(p.Inst)
+ return p
+}
+
+// A Directive represents an XML directive of the form <!text>.
+// The bytes do not include the <! and > markers.
+type Directive []byte
+
+func (d Directive) Copy() Directive { return Directive(makeCopy(d)) }
+
+// CopyToken returns a copy of a Token.
+func CopyToken(t Token) Token {
+ switch v := t.(type) {
+ case CharData:
+ return v.Copy()
+ case Comment:
+ return v.Copy()
+ case Directive:
+ return v.Copy()
+ case ProcInst:
+ return v.Copy()
+ case StartElement:
+ return v.Copy()
+ }
+ return t
+}
+
+// A Decoder represents an XML parser reading a particular input stream.
+// The parser assumes that its input is encoded in UTF-8.
+type Decoder struct {
+ // Strict defaults to true, enforcing the requirements
+ // of the XML specification.
+ // If set to false, the parser allows input containing common
+ // mistakes:
+ // * If an element is missing an end tag, the parser invents
+ // end tags as necessary to keep the return values from Token
+ // properly balanced.
+ // * In attribute values and character data, unknown or malformed
+ // character entities (sequences beginning with &) are left alone.
+ //
+ // Setting:
+ //
+ // d.Strict = false;
+ // d.AutoClose = HTMLAutoClose;
+ // d.Entity = HTMLEntity
+ //
+ // creates a parser that can handle typical HTML.
+ //
+ // Strict mode does not enforce the requirements of the XML name spaces TR.
+ // In particular it does not reject name space tags using undefined prefixes.
+ // Such tags are recorded with the unknown prefix as the name space URL.
+ Strict bool
+
+ // When Strict == false, AutoClose indicates a set of elements to
+ // consider closed immediately after they are opened, regardless
+ // of whether an end element is present.
+ AutoClose []string
+
+ // Entity can be used to map non-standard entity names to string replacements.
+ // The parser behaves as if these standard mappings are present in the map,
+ // regardless of the actual map content:
+ //
+ // "lt": "<",
+ // "gt": ">",
+ // "amp": "&",
+ // "apos": "'",
+ // "quot": `"`,
+ Entity map[string]string
+
+ // CharsetReader, if non-nil, defines a function to generate
+ // charset-conversion readers, converting from the provided
+ // non-UTF-8 charset into UTF-8. If CharsetReader is nil or
+ // returns an error, parsing stops with an error. One of the
+ // the CharsetReader's result values must be non-nil.
+ CharsetReader func(charset string, input io.Reader) (io.Reader, error)
+
+ // DefaultSpace sets the default name space used for unadorned tags,
+ // as if the entire XML stream were wrapped in an element containing
+ // the attribute xmlns="DefaultSpace".
+ DefaultSpace string
+
+ r io.ByteReader
+ buf bytes.Buffer
+ saved *bytes.Buffer
+ stk *stack
+ free *stack
+ needClose bool
+ toClose Name
+ nextToken Token
+ nextByte int
+ ns map[string]string
+ err error
+ line int
+ offset int64
+ unmarshalDepth int
+}
+
+// NewDecoder creates a new XML parser reading from r.
+// If r does not implement io.ByteReader, NewDecoder will
+// do its own buffering.
+func NewDecoder(r io.Reader) *Decoder {
+ d := &Decoder{
+ ns: make(map[string]string),
+ nextByte: -1,
+ line: 1,
+ Strict: true,
+ }
+ d.switchToReader(r)
+ return d
+}
+
+// Token returns the next XML token in the input stream.
+// At the end of the input stream, Token returns nil, io.EOF.
+//
+// Slices of bytes in the returned token data refer to the
+// parser's internal buffer and remain valid only until the next
+// call to Token. To acquire a copy of the bytes, call CopyToken
+// or the token's Copy method.
+//
+// Token expands self-closing elements such as <br/>
+// into separate start and end elements returned by successive calls.
+//
+// Token guarantees that the StartElement and EndElement
+// tokens it returns are properly nested and matched:
+// if Token encounters an unexpected end element,
+// it will return an error.
+//
+// Token implements XML name spaces as described by
+// http://www.w3.org/TR/REC-xml-names/. Each of the
+// Name structures contained in the Token has the Space
+// set to the URL identifying its name space when known.
+// If Token encounters an unrecognized name space prefix,
+// it uses the prefix as the Space rather than report an error.
+func (d *Decoder) Token() (t Token, err error) {
+ if d.stk != nil && d.stk.kind == stkEOF {
+ err = io.EOF
+ return
+ }
+ if d.nextToken != nil {
+ t = d.nextToken
+ d.nextToken = nil
+ } else if t, err = d.rawToken(); err != nil {
+ return
+ }
+
+ if !d.Strict {
+ if t1, ok := d.autoClose(t); ok {
+ d.nextToken = t
+ t = t1
+ }
+ }
+ switch t1 := t.(type) {
+ case StartElement:
+ // In XML name spaces, the translations listed in the
+ // attributes apply to the element name and
+ // to the other attribute names, so process
+ // the translations first.
+ for _, a := range t1.Attr {
+ if a.Name.Space == "xmlns" {
+ v, ok := d.ns[a.Name.Local]
+ d.pushNs(a.Name.Local, v, ok)
+ d.ns[a.Name.Local] = a.Value
+ }
+ if a.Name.Space == "" && a.Name.Local == "xmlns" {
+ // Default space for untagged names
+ v, ok := d.ns[""]
+ d.pushNs("", v, ok)
+ d.ns[""] = a.Value
+ }
+ }
+
+ d.translate(&t1.Name, true)
+ for i := range t1.Attr {
+ d.translate(&t1.Attr[i].Name, false)
+ }
+ d.pushElement(t1.Name)
+ t = t1
+
+ case EndElement:
+ d.translate(&t1.Name, true)
+ if !d.popElement(&t1) {
+ return nil, d.err
+ }
+ t = t1
+ }
+ return
+}
+
+const xmlURL = "http://www.w3.org/XML/1998/namespace"
+
+// Apply name space translation to name n.
+// The default name space (for Space=="")
+// applies only to element names, not to attribute names.
+func (d *Decoder) translate(n *Name, isElementName bool) {
+ switch {
+ case n.Space == "xmlns":
+ return
+ case n.Space == "" && !isElementName:
+ return
+ case n.Space == "xml":
+ n.Space = xmlURL
+ case n.Space == "" && n.Local == "xmlns":
+ return
+ }
+ if v, ok := d.ns[n.Space]; ok {
+ n.Space = v
+ } else if n.Space == "" {
+ n.Space = d.DefaultSpace
+ }
+}
+
+func (d *Decoder) switchToReader(r io.Reader) {
+ // Get efficient byte at a time reader.
+ // Assume that if reader has its own
+ // ReadByte, it's efficient enough.
+ // Otherwise, use bufio.
+ if rb, ok := r.(io.ByteReader); ok {
+ d.r = rb
+ } else {
+ d.r = bufio.NewReader(r)
+ }
+}
+
+// Parsing state - stack holds old name space translations
+// and the current set of open elements. The translations to pop when
+// ending a given tag are *below* it on the stack, which is
+// more work but forced on us by XML.
+type stack struct {
+ next *stack
+ kind int
+ name Name
+ ok bool
+}
+
+const (
+ stkStart = iota
+ stkNs
+ stkEOF
+)
+
+func (d *Decoder) push(kind int) *stack {
+ s := d.free
+ if s != nil {
+ d.free = s.next
+ } else {
+ s = new(stack)
+ }
+ s.next = d.stk
+ s.kind = kind
+ d.stk = s
+ return s
+}
+
+func (d *Decoder) pop() *stack {
+ s := d.stk
+ if s != nil {
+ d.stk = s.next
+ s.next = d.free
+ d.free = s
+ }
+ return s
+}
+
+// Record that after the current element is finished
+// (that element is already pushed on the stack)
+// Token should return EOF until popEOF is called.
+func (d *Decoder) pushEOF() {
+ // Walk down stack to find Start.
+ // It might not be the top, because there might be stkNs
+ // entries above it.
+ start := d.stk
+ for start.kind != stkStart {
+ start = start.next
+ }
+ // The stkNs entries below a start are associated with that
+ // element too; skip over them.
+ for start.next != nil && start.next.kind == stkNs {
+ start = start.next
+ }
+ s := d.free
+ if s != nil {
+ d.free = s.next
+ } else {
+ s = new(stack)
+ }
+ s.kind = stkEOF
+ s.next = start.next
+ start.next = s
+}
+
+// Undo a pushEOF.
+// The element must have been finished, so the EOF should be at the top of the stack.
+func (d *Decoder) popEOF() bool {
+ if d.stk == nil || d.stk.kind != stkEOF {
+ return false
+ }
+ d.pop()
+ return true
+}
+
+// Record that we are starting an element with the given name.
+func (d *Decoder) pushElement(name Name) {
+ s := d.push(stkStart)
+ s.name = name
+}
+
+// Record that we are changing the value of ns[local].
+// The old value is url, ok.
+func (d *Decoder) pushNs(local string, url string, ok bool) {
+ s := d.push(stkNs)
+ s.name.Local = local
+ s.name.Space = url
+ s.ok = ok
+}
+
+// Creates a SyntaxError with the current line number.
+func (d *Decoder) syntaxError(msg string) error {
+ return &SyntaxError{Msg: msg, Line: d.line}
+}
+
+// Record that we are ending an element with the given name.
+// The name must match the record at the top of the stack,
+// which must be a pushElement record.
+// After popping the element, apply any undo records from
+// the stack to restore the name translations that existed
+// before we saw this element.
+func (d *Decoder) popElement(t *EndElement) bool {
+ s := d.pop()
+ name := t.Name
+ switch {
+ case s == nil || s.kind != stkStart:
+ d.err = d.syntaxError("unexpected end element </" + name.Local + ">")
+ return false
+ case s.name.Local != name.Local:
+ if !d.Strict {
+ d.needClose = true
+ d.toClose = t.Name
+ t.Name = s.name
+ return true
+ }
+ d.err = d.syntaxError("element <" + s.name.Local + "> closed by </" + name.Local + ">")
+ return false
+ case s.name.Space != name.Space:
+ d.err = d.syntaxError("element <" + s.name.Local + "> in space " + s.name.Space +
+ "closed by </" + name.Local + "> in space " + name.Space)
+ return false
+ }
+
+ // Pop stack until a Start or EOF is on the top, undoing the
+ // translations that were associated with the element we just closed.
+ for d.stk != nil && d.stk.kind != stkStart && d.stk.kind != stkEOF {
+ s := d.pop()
+ if s.ok {
+ d.ns[s.name.Local] = s.name.Space
+ } else {
+ delete(d.ns, s.name.Local)
+ }
+ }
+
+ return true
+}
+
+// If the top element on the stack is autoclosing and
+// t is not the end tag, invent the end tag.
+func (d *Decoder) autoClose(t Token) (Token, bool) {
+ if d.stk == nil || d.stk.kind != stkStart {
+ return nil, false
+ }
+ name := strings.ToLower(d.stk.name.Local)
+ for _, s := range d.AutoClose {
+ if strings.ToLower(s) == name {
+ // This one should be auto closed if t doesn't close it.
+ et, ok := t.(EndElement)
+ if !ok || et.Name.Local != name {
+ return EndElement{d.stk.name}, true
+ }
+ break
+ }
+ }
+ return nil, false
+}
+
+var errRawToken = errors.New("xml: cannot use RawToken from UnmarshalXML method")
+
+// RawToken is like Token but does not verify that
+// start and end elements match and does not translate
+// name space prefixes to their corresponding URLs.
+func (d *Decoder) RawToken() (Token, error) {
+ if d.unmarshalDepth > 0 {
+ return nil, errRawToken
+ }
+ return d.rawToken()
+}
+
+func (d *Decoder) rawToken() (Token, error) {
+ if d.err != nil {
+ return nil, d.err
+ }
+ if d.needClose {
+ // The last element we read was self-closing and
+ // we returned just the StartElement half.
+ // Return the EndElement half now.
+ d.needClose = false
+ return EndElement{d.toClose}, nil
+ }
+
+ b, ok := d.getc()
+ if !ok {
+ return nil, d.err
+ }
+
+ if b != '<' {
+ // Text section.
+ d.ungetc(b)
+ data := d.text(-1, false)
+ if data == nil {
+ return nil, d.err
+ }
+ return CharData(data), nil
+ }
+
+ if b, ok = d.mustgetc(); !ok {
+ return nil, d.err
+ }
+ switch b {
+ case '/':
+ // </: End element
+ var name Name
+ if name, ok = d.nsname(); !ok {
+ if d.err == nil {
+ d.err = d.syntaxError("expected element name after </")
+ }
+ return nil, d.err
+ }
+ d.space()
+ if b, ok = d.mustgetc(); !ok {
+ return nil, d.err
+ }
+ if b != '>' {
+ d.err = d.syntaxError("invalid characters between </" + name.Local + " and >")
+ return nil, d.err
+ }
+ return EndElement{name}, nil
+
+ case '?':
+ // <?: Processing instruction.
+ var target string
+ if target, ok = d.name(); !ok {
+ if d.err == nil {
+ d.err = d.syntaxError("expected target name after <?")
+ }
+ return nil, d.err
+ }
+ d.space()
+ d.buf.Reset()
+ var b0 byte
+ for {
+ if b, ok = d.mustgetc(); !ok {
+ return nil, d.err
+ }
+ d.buf.WriteByte(b)
+ if b0 == '?' && b == '>' {
+ break
+ }
+ b0 = b
+ }
+ data := d.buf.Bytes()
+ data = data[0 : len(data)-2] // chop ?>
+
+ if target == "xml" {
+ content := string(data)
+ ver := procInst("version", content)
+ if ver != "" && ver != "1.0" {
+ d.err = fmt.Errorf("xml: unsupported version %q; only version 1.0 is supported", ver)
+ return nil, d.err
+ }
+ enc := procInst("encoding", content)
+ if enc != "" && enc != "utf-8" && enc != "UTF-8" {
+ if d.CharsetReader == nil {
+ d.err = fmt.Errorf("xml: encoding %q declared but Decoder.CharsetReader is nil", enc)
+ return nil, d.err
+ }
+ newr, err := d.CharsetReader(enc, d.r.(io.Reader))
+ if err != nil {
+ d.err = fmt.Errorf("xml: opening charset %q: %v", enc, err)
+ return nil, d.err
+ }
+ if newr == nil {
+ panic("CharsetReader returned a nil Reader for charset " + enc)
+ }
+ d.switchToReader(newr)
+ }
+ }
+ return ProcInst{target, data}, nil
+
+ case '!':
+ // <!: Maybe comment, maybe CDATA.
+ if b, ok = d.mustgetc(); !ok {
+ return nil, d.err
+ }
+ switch b {
+ case '-': // <!-
+ // Probably <!-- for a comment.
+ if b, ok = d.mustgetc(); !ok {
+ return nil, d.err
+ }
+ if b != '-' {
+ d.err = d.syntaxError("invalid sequence <!- not part of <!--")
+ return nil, d.err
+ }
+ // Look for terminator.
+ d.buf.Reset()
+ var b0, b1 byte
+ for {
+ if b, ok = d.mustgetc(); !ok {
+ return nil, d.err
+ }
+ d.buf.WriteByte(b)
+ if b0 == '-' && b1 == '-' && b == '>' {
+ break
+ }
+ b0, b1 = b1, b
+ }
+ data := d.buf.Bytes()
+ data = data[0 : len(data)-3] // chop -->
+ return Comment(data), nil
+
+ case '[': // <![
+ // Probably <![CDATA[.
+ for i := 0; i < 6; i++ {
+ if b, ok = d.mustgetc(); !ok {
+ return nil, d.err
+ }
+ if b != "CDATA["[i] {
+ d.err = d.syntaxError("invalid <![ sequence")
+ return nil, d.err
+ }
+ }
+ // Have <![CDATA[. Read text until ]]>.
+ data := d.text(-1, true)
+ if data == nil {
+ return nil, d.err
+ }
+ return CharData(data), nil
+ }
+
+ // Probably a directive: <!DOCTYPE ...>, <!ENTITY ...>, etc.
+ // We don't care, but accumulate for caller. Quoted angle
+ // brackets do not count for nesting.
+ d.buf.Reset()
+ d.buf.WriteByte(b)
+ inquote := uint8(0)
+ depth := 0
+ for {
+ if b, ok = d.mustgetc(); !ok {
+ return nil, d.err
+ }
+ if inquote == 0 && b == '>' && depth == 0 {
+ break
+ }
+ HandleB:
+ d.buf.WriteByte(b)
+ switch {
+ case b == inquote:
+ inquote = 0
+
+ case inquote != 0:
+ // in quotes, no special action
+
+ case b == '\'' || b == '"':
+ inquote = b
+
+ case b == '>' && inquote == 0:
+ depth--
+
+ case b == '<' && inquote == 0:
+ // Look for <!-- to begin comment.
+ s := "!--"
+ for i := 0; i < len(s); i++ {
+ if b, ok = d.mustgetc(); !ok {
+ return nil, d.err
+ }
+ if b != s[i] {
+ for j := 0; j < i; j++ {
+ d.buf.WriteByte(s[j])
+ }
+ depth++
+ goto HandleB
+ }
+ }
+
+ // Remove < that was written above.
+ d.buf.Truncate(d.buf.Len() - 1)
+
+ // Look for terminator.
+ var b0, b1 byte
+ for {
+ if b, ok = d.mustgetc(); !ok {
+ return nil, d.err
+ }
+ if b0 == '-' && b1 == '-' && b == '>' {
+ break
+ }
+ b0, b1 = b1, b
+ }
+ }
+ }
+ return Directive(d.buf.Bytes()), nil
+ }
+
+ // Must be an open element like <a href="foo">
+ d.ungetc(b)
+
+ var (
+ name Name
+ empty bool
+ attr []Attr
+ )
+ if name, ok = d.nsname(); !ok {
+ if d.err == nil {
+ d.err = d.syntaxError("expected element name after <")
+ }
+ return nil, d.err
+ }
+
+ attr = []Attr{}
+ for {
+ d.space()
+ if b, ok = d.mustgetc(); !ok {
+ return nil, d.err
+ }
+ if b == '/' {
+ empty = true
+ if b, ok = d.mustgetc(); !ok {
+ return nil, d.err
+ }
+ if b != '>' {
+ d.err = d.syntaxError("expected /> in element")
+ return nil, d.err
+ }
+ break
+ }
+ if b == '>' {
+ break
+ }
+ d.ungetc(b)
+
+ n := len(attr)
+ if n >= cap(attr) {
+ nCap := 2 * cap(attr)
+ if nCap == 0 {
+ nCap = 4
+ }
+ nattr := make([]Attr, n, nCap)
+ copy(nattr, attr)
+ attr = nattr
+ }
+ attr = attr[0 : n+1]
+ a := &attr[n]
+ if a.Name, ok = d.nsname(); !ok {
+ if d.err == nil {
+ d.err = d.syntaxError("expected attribute name in element")
+ }
+ return nil, d.err
+ }
+ d.space()
+ if b, ok = d.mustgetc(); !ok {
+ return nil, d.err
+ }
+ if b != '=' {
+ if d.Strict {
+ d.err = d.syntaxError("attribute name without = in element")
+ return nil, d.err
+ } else {
+ d.ungetc(b)
+ a.Value = a.Name.Local
+ }
+ } else {
+ d.space()
+ data := d.attrval()
+ if data == nil {
+ return nil, d.err
+ }
+ a.Value = string(data)
+ }
+ }
+ if empty {
+ d.needClose = true
+ d.toClose = name
+ }
+ return StartElement{name, attr}, nil
+}
+
+func (d *Decoder) attrval() []byte {
+ b, ok := d.mustgetc()
+ if !ok {
+ return nil
+ }
+ // Handle quoted attribute values
+ if b == '"' || b == '\'' {
+ return d.text(int(b), false)
+ }
+ // Handle unquoted attribute values for strict parsers
+ if d.Strict {
+ d.err = d.syntaxError("unquoted or missing attribute value in element")
+ return nil
+ }
+ // Handle unquoted attribute values for unstrict parsers
+ d.ungetc(b)
+ d.buf.Reset()
+ for {
+ b, ok = d.mustgetc()
+ if !ok {
+ return nil
+ }
+ // http://www.w3.org/TR/REC-html40/intro/sgmltut.html#h-3.2.2
+ if 'a' <= b && b <= 'z' || 'A' <= b && b <= 'Z' ||
+ '0' <= b && b <= '9' || b == '_' || b == ':' || b == '-' {
+ d.buf.WriteByte(b)
+ } else {
+ d.ungetc(b)
+ break
+ }
+ }
+ return d.buf.Bytes()
+}
+
+// Skip spaces if any
+func (d *Decoder) space() {
+ for {
+ b, ok := d.getc()
+ if !ok {
+ return
+ }
+ switch b {
+ case ' ', '\r', '\n', '\t':
+ default:
+ d.ungetc(b)
+ return
+ }
+ }
+}
+
+// Read a single byte.
+// If there is no byte to read, return ok==false
+// and leave the error in d.err.
+// Maintain line number.
+func (d *Decoder) getc() (b byte, ok bool) {
+ if d.err != nil {
+ return 0, false
+ }
+ if d.nextByte >= 0 {
+ b = byte(d.nextByte)
+ d.nextByte = -1
+ } else {
+ b, d.err = d.r.ReadByte()
+ if d.err != nil {
+ return 0, false
+ }
+ if d.saved != nil {
+ d.saved.WriteByte(b)
+ }
+ }
+ if b == '\n' {
+ d.line++
+ }
+ d.offset++
+ return b, true
+}
+
+// InputOffset returns the input stream byte offset of the current decoder position.
+// The offset gives the location of the end of the most recently returned token
+// and the beginning of the next token.
+func (d *Decoder) InputOffset() int64 {
+ return d.offset
+}
+
+// Return saved offset.
+// If we did ungetc (nextByte >= 0), have to back up one.
+func (d *Decoder) savedOffset() int {
+ n := d.saved.Len()
+ if d.nextByte >= 0 {
+ n--
+ }
+ return n
+}
+
+// Must read a single byte.
+// If there is no byte to read,
+// set d.err to SyntaxError("unexpected EOF")
+// and return ok==false
+func (d *Decoder) mustgetc() (b byte, ok bool) {
+ if b, ok = d.getc(); !ok {
+ if d.err == io.EOF {
+ d.err = d.syntaxError("unexpected EOF")
+ }
+ }
+ return
+}
+
+// Unread a single byte.
+func (d *Decoder) ungetc(b byte) {
+ if b == '\n' {
+ d.line--
+ }
+ d.nextByte = int(b)
+ d.offset--
+}
+
+var entity = map[string]int{
+ "lt": '<',
+ "gt": '>',
+ "amp": '&',
+ "apos": '\'',
+ "quot": '"',
+}
+
+// Read plain text section (XML calls it character data).
+// If quote >= 0, we are in a quoted string and need to find the matching quote.
+// If cdata == true, we are in a <![CDATA[ section and need to find ]]>.
+// On failure return nil and leave the error in d.err.
+func (d *Decoder) text(quote int, cdata bool) []byte {
+ var b0, b1 byte
+ var trunc int
+ d.buf.Reset()
+Input:
+ for {
+ b, ok := d.getc()
+ if !ok {
+ if cdata {
+ if d.err == io.EOF {
+ d.err = d.syntaxError("unexpected EOF in CDATA section")
+ }
+ return nil
+ }
+ break Input
+ }
+
+ // <![CDATA[ section ends with ]]>.
+ // It is an error for ]]> to appear in ordinary text.
+ if b0 == ']' && b1 == ']' && b == '>' {
+ if cdata {
+ trunc = 2
+ break Input
+ }
+ d.err = d.syntaxError("unescaped ]]> not in CDATA section")
+ return nil
+ }
+
+ // Stop reading text if we see a <.
+ if b == '<' && !cdata {
+ if quote >= 0 {
+ d.err = d.syntaxError("unescaped < inside quoted string")
+ return nil
+ }
+ d.ungetc('<')
+ break Input
+ }
+ if quote >= 0 && b == byte(quote) {
+ break Input
+ }
+ if b == '&' && !cdata {
+ // Read escaped character expression up to semicolon.
+ // XML in all its glory allows a document to define and use
+ // its own character names with <!ENTITY ...> directives.
+ // Parsers are required to recognize lt, gt, amp, apos, and quot
+ // even if they have not been declared.
+ before := d.buf.Len()
+ d.buf.WriteByte('&')
+ var ok bool
+ var text string
+ var haveText bool
+ if b, ok = d.mustgetc(); !ok {
+ return nil
+ }
+ if b == '#' {
+ d.buf.WriteByte(b)
+ if b, ok = d.mustgetc(); !ok {
+ return nil
+ }
+ base := 10
+ if b == 'x' {
+ base = 16
+ d.buf.WriteByte(b)
+ if b, ok = d.mustgetc(); !ok {
+ return nil
+ }
+ }
+ start := d.buf.Len()
+ for '0' <= b && b <= '9' ||
+ base == 16 && 'a' <= b && b <= 'f' ||
+ base == 16 && 'A' <= b && b <= 'F' {
+ d.buf.WriteByte(b)
+ if b, ok = d.mustgetc(); !ok {
+ return nil
+ }
+ }
+ if b != ';' {
+ d.ungetc(b)
+ } else {
+ s := string(d.buf.Bytes()[start:])
+ d.buf.WriteByte(';')
+ n, err := strconv.ParseUint(s, base, 64)
+ if err == nil && n <= unicode.MaxRune {
+ text = string(n)
+ haveText = true
+ }
+ }
+ } else {
+ d.ungetc(b)
+ if !d.readName() {
+ if d.err != nil {
+ return nil
+ }
+ ok = false
+ }
+ if b, ok = d.mustgetc(); !ok {
+ return nil
+ }
+ if b != ';' {
+ d.ungetc(b)
+ } else {
+ name := d.buf.Bytes()[before+1:]
+ d.buf.WriteByte(';')
+ if isName(name) {
+ s := string(name)
+ if r, ok := entity[s]; ok {
+ text = string(r)
+ haveText = true
+ } else if d.Entity != nil {
+ text, haveText = d.Entity[s]
+ }
+ }
+ }
+ }
+
+ if haveText {
+ d.buf.Truncate(before)
+ d.buf.Write([]byte(text))
+ b0, b1 = 0, 0
+ continue Input
+ }
+ if !d.Strict {
+ b0, b1 = 0, 0
+ continue Input
+ }
+ ent := string(d.buf.Bytes()[before:])
+ if ent[len(ent)-1] != ';' {
+ ent += " (no semicolon)"
+ }
+ d.err = d.syntaxError("invalid character entity " + ent)
+ return nil
+ }
+
+ // We must rewrite unescaped \r and \r\n into \n.
+ if b == '\r' {
+ d.buf.WriteByte('\n')
+ } else if b1 == '\r' && b == '\n' {
+ // Skip \r\n--we already wrote \n.
+ } else {
+ d.buf.WriteByte(b)
+ }
+
+ b0, b1 = b1, b
+ }
+ data := d.buf.Bytes()
+ data = data[0 : len(data)-trunc]
+
+ // Inspect each rune for being a disallowed character.
+ buf := data
+ for len(buf) > 0 {
+ r, size := utf8.DecodeRune(buf)
+ if r == utf8.RuneError && size == 1 {
+ d.err = d.syntaxError("invalid UTF-8")
+ return nil
+ }
+ buf = buf[size:]
+ if !isInCharacterRange(r) {
+ d.err = d.syntaxError(fmt.Sprintf("illegal character code %U", r))
+ return nil
+ }
+ }
+
+ return data
+}
+
+// Decide whether the given rune is in the XML Character Range, per
+// the Char production of http://www.xml.com/axml/testaxml.htm,
+// Section 2.2 Characters.
+func isInCharacterRange(r rune) (inrange bool) {
+ return r == 0x09 ||
+ r == 0x0A ||
+ r == 0x0D ||
+ r >= 0x20 && r <= 0xDF77 ||
+ r >= 0xE000 && r <= 0xFFFD ||
+ r >= 0x10000 && r <= 0x10FFFF
+}
+
+// Get name space name: name with a : stuck in the middle.
+// The part before the : is the name space identifier.
+func (d *Decoder) nsname() (name Name, ok bool) {
+ s, ok := d.name()
+ if !ok {
+ return
+ }
+ i := strings.Index(s, ":")
+ if i < 0 {
+ name.Local = s
+ } else {
+ name.Space = s[0:i]
+ name.Local = s[i+1:]
+ }
+ return name, true
+}
+
+// Get name: /first(first|second)*/
+// Do not set d.err if the name is missing (unless unexpected EOF is received):
+// let the caller provide better context.
+func (d *Decoder) name() (s string, ok bool) {
+ d.buf.Reset()
+ if !d.readName() {
+ return "", false
+ }
+
+ // Now we check the characters.
+ b := d.buf.Bytes()
+ if !isName(b) {
+ d.err = d.syntaxError("invalid XML name: " + string(b))
+ return "", false
+ }
+ return string(b), true
+}
+
+// Read a name and append its bytes to d.buf.
+// The name is delimited by any single-byte character not valid in names.
+// All multi-byte characters are accepted; the caller must check their validity.
+func (d *Decoder) readName() (ok bool) {
+ var b byte
+ if b, ok = d.mustgetc(); !ok {
+ return
+ }
+ if b < utf8.RuneSelf && !isNameByte(b) {
+ d.ungetc(b)
+ return false
+ }
+ d.buf.WriteByte(b)
+
+ for {
+ if b, ok = d.mustgetc(); !ok {
+ return
+ }
+ if b < utf8.RuneSelf && !isNameByte(b) {
+ d.ungetc(b)
+ break
+ }
+ d.buf.WriteByte(b)
+ }
+ return true
+}
+
+func isNameByte(c byte) bool {
+ return 'A' <= c && c <= 'Z' ||
+ 'a' <= c && c <= 'z' ||
+ '0' <= c && c <= '9' ||
+ c == '_' || c == ':' || c == '.' || c == '-'
+}
+
+func isName(s []byte) bool {
+ if len(s) == 0 {
+ return false
+ }
+ c, n := utf8.DecodeRune(s)
+ if c == utf8.RuneError && n == 1 {
+ return false
+ }
+ if !unicode.Is(first, c) {
+ return false
+ }
+ for n < len(s) {
+ s = s[n:]
+ c, n = utf8.DecodeRune(s)
+ if c == utf8.RuneError && n == 1 {
+ return false
+ }
+ if !unicode.Is(first, c) && !unicode.Is(second, c) {
+ return false
+ }
+ }
+ return true
+}
+
+func isNameString(s string) bool {
+ if len(s) == 0 {
+ return false
+ }
+ c, n := utf8.DecodeRuneInString(s)
+ if c == utf8.RuneError && n == 1 {
+ return false
+ }
+ if !unicode.Is(first, c) {
+ return false
+ }
+ for n < len(s) {
+ s = s[n:]
+ c, n = utf8.DecodeRuneInString(s)
+ if c == utf8.RuneError && n == 1 {
+ return false
+ }
+ if !unicode.Is(first, c) && !unicode.Is(second, c) {
+ return false
+ }
+ }
+ return true
+}
+
+// These tables were generated by cut and paste from Appendix B of
+// the XML spec at http://www.xml.com/axml/testaxml.htm
+// and then reformatting. First corresponds to (Letter | '_' | ':')
+// and second corresponds to NameChar.
+
+var first = &unicode.RangeTable{
+ R16: []unicode.Range16{
+ {0x003A, 0x003A, 1},
+ {0x0041, 0x005A, 1},
+ {0x005F, 0x005F, 1},
+ {0x0061, 0x007A, 1},
+ {0x00C0, 0x00D6, 1},
+ {0x00D8, 0x00F6, 1},
+ {0x00F8, 0x00FF, 1},
+ {0x0100, 0x0131, 1},
+ {0x0134, 0x013E, 1},
+ {0x0141, 0x0148, 1},
+ {0x014A, 0x017E, 1},
+ {0x0180, 0x01C3, 1},
+ {0x01CD, 0x01F0, 1},
+ {0x01F4, 0x01F5, 1},
+ {0x01FA, 0x0217, 1},
+ {0x0250, 0x02A8, 1},
+ {0x02BB, 0x02C1, 1},
+ {0x0386, 0x0386, 1},
+ {0x0388, 0x038A, 1},
+ {0x038C, 0x038C, 1},
+ {0x038E, 0x03A1, 1},
+ {0x03A3, 0x03CE, 1},
+ {0x03D0, 0x03D6, 1},
+ {0x03DA, 0x03E0, 2},
+ {0x03E2, 0x03F3, 1},
+ {0x0401, 0x040C, 1},
+ {0x040E, 0x044F, 1},
+ {0x0451, 0x045C, 1},
+ {0x045E, 0x0481, 1},
+ {0x0490, 0x04C4, 1},
+ {0x04C7, 0x04C8, 1},
+ {0x04CB, 0x04CC, 1},
+ {0x04D0, 0x04EB, 1},
+ {0x04EE, 0x04F5, 1},
+ {0x04F8, 0x04F9, 1},
+ {0x0531, 0x0556, 1},
+ {0x0559, 0x0559, 1},
+ {0x0561, 0x0586, 1},
+ {0x05D0, 0x05EA, 1},
+ {0x05F0, 0x05F2, 1},
+ {0x0621, 0x063A, 1},
+ {0x0641, 0x064A, 1},
+ {0x0671, 0x06B7, 1},
+ {0x06BA, 0x06BE, 1},
+ {0x06C0, 0x06CE, 1},
+ {0x06D0, 0x06D3, 1},
+ {0x06D5, 0x06D5, 1},
+ {0x06E5, 0x06E6, 1},
+ {0x0905, 0x0939, 1},
+ {0x093D, 0x093D, 1},
+ {0x0958, 0x0961, 1},
+ {0x0985, 0x098C, 1},
+ {0x098F, 0x0990, 1},
+ {0x0993, 0x09A8, 1},
+ {0x09AA, 0x09B0, 1},
+ {0x09B2, 0x09B2, 1},
+ {0x09B6, 0x09B9, 1},
+ {0x09DC, 0x09DD, 1},
+ {0x09DF, 0x09E1, 1},
+ {0x09F0, 0x09F1, 1},
+ {0x0A05, 0x0A0A, 1},
+ {0x0A0F, 0x0A10, 1},
+ {0x0A13, 0x0A28, 1},
+ {0x0A2A, 0x0A30, 1},
+ {0x0A32, 0x0A33, 1},
+ {0x0A35, 0x0A36, 1},
+ {0x0A38, 0x0A39, 1},
+ {0x0A59, 0x0A5C, 1},
+ {0x0A5E, 0x0A5E, 1},
+ {0x0A72, 0x0A74, 1},
+ {0x0A85, 0x0A8B, 1},
+ {0x0A8D, 0x0A8D, 1},
+ {0x0A8F, 0x0A91, 1},
+ {0x0A93, 0x0AA8, 1},
+ {0x0AAA, 0x0AB0, 1},
+ {0x0AB2, 0x0AB3, 1},
+ {0x0AB5, 0x0AB9, 1},
+ {0x0ABD, 0x0AE0, 0x23},
+ {0x0B05, 0x0B0C, 1},
+ {0x0B0F, 0x0B10, 1},
+ {0x0B13, 0x0B28, 1},
+ {0x0B2A, 0x0B30, 1},
+ {0x0B32, 0x0B33, 1},
+ {0x0B36, 0x0B39, 1},
+ {0x0B3D, 0x0B3D, 1},
+ {0x0B5C, 0x0B5D, 1},
+ {0x0B5F, 0x0B61, 1},
+ {0x0B85, 0x0B8A, 1},
+ {0x0B8E, 0x0B90, 1},
+ {0x0B92, 0x0B95, 1},
+ {0x0B99, 0x0B9A, 1},
+ {0x0B9C, 0x0B9C, 1},
+ {0x0B9E, 0x0B9F, 1},
+ {0x0BA3, 0x0BA4, 1},
+ {0x0BA8, 0x0BAA, 1},
+ {0x0BAE, 0x0BB5, 1},
+ {0x0BB7, 0x0BB9, 1},
+ {0x0C05, 0x0C0C, 1},
+ {0x0C0E, 0x0C10, 1},
+ {0x0C12, 0x0C28, 1},
+ {0x0C2A, 0x0C33, 1},
+ {0x0C35, 0x0C39, 1},
+ {0x0C60, 0x0C61, 1},
+ {0x0C85, 0x0C8C, 1},
+ {0x0C8E, 0x0C90, 1},
+ {0x0C92, 0x0CA8, 1},
+ {0x0CAA, 0x0CB3, 1},
+ {0x0CB5, 0x0CB9, 1},
+ {0x0CDE, 0x0CDE, 1},
+ {0x0CE0, 0x0CE1, 1},
+ {0x0D05, 0x0D0C, 1},
+ {0x0D0E, 0x0D10, 1},
+ {0x0D12, 0x0D28, 1},
+ {0x0D2A, 0x0D39, 1},
+ {0x0D60, 0x0D61, 1},
+ {0x0E01, 0x0E2E, 1},
+ {0x0E30, 0x0E30, 1},
+ {0x0E32, 0x0E33, 1},
+ {0x0E40, 0x0E45, 1},
+ {0x0E81, 0x0E82, 1},
+ {0x0E84, 0x0E84, 1},
+ {0x0E87, 0x0E88, 1},
+ {0x0E8A, 0x0E8D, 3},
+ {0x0E94, 0x0E97, 1},
+ {0x0E99, 0x0E9F, 1},
+ {0x0EA1, 0x0EA3, 1},
+ {0x0EA5, 0x0EA7, 2},
+ {0x0EAA, 0x0EAB, 1},
+ {0x0EAD, 0x0EAE, 1},
+ {0x0EB0, 0x0EB0, 1},
+ {0x0EB2, 0x0EB3, 1},
+ {0x0EBD, 0x0EBD, 1},
+ {0x0EC0, 0x0EC4, 1},
+ {0x0F40, 0x0F47, 1},
+ {0x0F49, 0x0F69, 1},
+ {0x10A0, 0x10C5, 1},
+ {0x10D0, 0x10F6, 1},
+ {0x1100, 0x1100, 1},
+ {0x1102, 0x1103, 1},
+ {0x1105, 0x1107, 1},
+ {0x1109, 0x1109, 1},
+ {0x110B, 0x110C, 1},
+ {0x110E, 0x1112, 1},
+ {0x113C, 0x1140, 2},
+ {0x114C, 0x1150, 2},
+ {0x1154, 0x1155, 1},
+ {0x1159, 0x1159, 1},
+ {0x115F, 0x1161, 1},
+ {0x1163, 0x1169, 2},
+ {0x116D, 0x116E, 1},
+ {0x1172, 0x1173, 1},
+ {0x1175, 0x119E, 0x119E - 0x1175},
+ {0x11A8, 0x11AB, 0x11AB - 0x11A8},
+ {0x11AE, 0x11AF, 1},
+ {0x11B7, 0x11B8, 1},
+ {0x11BA, 0x11BA, 1},
+ {0x11BC, 0x11C2, 1},
+ {0x11EB, 0x11F0, 0x11F0 - 0x11EB},
+ {0x11F9, 0x11F9, 1},
+ {0x1E00, 0x1E9B, 1},
+ {0x1EA0, 0x1EF9, 1},
+ {0x1F00, 0x1F15, 1},
+ {0x1F18, 0x1F1D, 1},
+ {0x1F20, 0x1F45, 1},
+ {0x1F48, 0x1F4D, 1},
+ {0x1F50, 0x1F57, 1},
+ {0x1F59, 0x1F5B, 0x1F5B - 0x1F59},
+ {0x1F5D, 0x1F5D, 1},
+ {0x1F5F, 0x1F7D, 1},
+ {0x1F80, 0x1FB4, 1},
+ {0x1FB6, 0x1FBC, 1},
+ {0x1FBE, 0x1FBE, 1},
+ {0x1FC2, 0x1FC4, 1},
+ {0x1FC6, 0x1FCC, 1},
+ {0x1FD0, 0x1FD3, 1},
+ {0x1FD6, 0x1FDB, 1},
+ {0x1FE0, 0x1FEC, 1},
+ {0x1FF2, 0x1FF4, 1},
+ {0x1FF6, 0x1FFC, 1},
+ {0x2126, 0x2126, 1},
+ {0x212A, 0x212B, 1},
+ {0x212E, 0x212E, 1},
+ {0x2180, 0x2182, 1},
+ {0x3007, 0x3007, 1},
+ {0x3021, 0x3029, 1},
+ {0x3041, 0x3094, 1},
+ {0x30A1, 0x30FA, 1},
+ {0x3105, 0x312C, 1},
+ {0x4E00, 0x9FA5, 1},
+ {0xAC00, 0xD7A3, 1},
+ },
+}
+
+var second = &unicode.RangeTable{
+ R16: []unicode.Range16{
+ {0x002D, 0x002E, 1},
+ {0x0030, 0x0039, 1},
+ {0x00B7, 0x00B7, 1},
+ {0x02D0, 0x02D1, 1},
+ {0x0300, 0x0345, 1},
+ {0x0360, 0x0361, 1},
+ {0x0387, 0x0387, 1},
+ {0x0483, 0x0486, 1},
+ {0x0591, 0x05A1, 1},
+ {0x05A3, 0x05B9, 1},
+ {0x05BB, 0x05BD, 1},
+ {0x05BF, 0x05BF, 1},
+ {0x05C1, 0x05C2, 1},
+ {0x05C4, 0x0640, 0x0640 - 0x05C4},
+ {0x064B, 0x0652, 1},
+ {0x0660, 0x0669, 1},
+ {0x0670, 0x0670, 1},
+ {0x06D6, 0x06DC, 1},
+ {0x06DD, 0x06DF, 1},
+ {0x06E0, 0x06E4, 1},
+ {0x06E7, 0x06E8, 1},
+ {0x06EA, 0x06ED, 1},
+ {0x06F0, 0x06F9, 1},
+ {0x0901, 0x0903, 1},
+ {0x093C, 0x093C, 1},
+ {0x093E, 0x094C, 1},
+ {0x094D, 0x094D, 1},
+ {0x0951, 0x0954, 1},
+ {0x0962, 0x0963, 1},
+ {0x0966, 0x096F, 1},
+ {0x0981, 0x0983, 1},
+ {0x09BC, 0x09BC, 1},
+ {0x09BE, 0x09BF, 1},
+ {0x09C0, 0x09C4, 1},
+ {0x09C7, 0x09C8, 1},
+ {0x09CB, 0x09CD, 1},
+ {0x09D7, 0x09D7, 1},
+ {0x09E2, 0x09E3, 1},
+ {0x09E6, 0x09EF, 1},
+ {0x0A02, 0x0A3C, 0x3A},
+ {0x0A3E, 0x0A3F, 1},
+ {0x0A40, 0x0A42, 1},
+ {0x0A47, 0x0A48, 1},
+ {0x0A4B, 0x0A4D, 1},
+ {0x0A66, 0x0A6F, 1},
+ {0x0A70, 0x0A71, 1},
+ {0x0A81, 0x0A83, 1},
+ {0x0ABC, 0x0ABC, 1},
+ {0x0ABE, 0x0AC5, 1},
+ {0x0AC7, 0x0AC9, 1},
+ {0x0ACB, 0x0ACD, 1},
+ {0x0AE6, 0x0AEF, 1},
+ {0x0B01, 0x0B03, 1},
+ {0x0B3C, 0x0B3C, 1},
+ {0x0B3E, 0x0B43, 1},
+ {0x0B47, 0x0B48, 1},
+ {0x0B4B, 0x0B4D, 1},
+ {0x0B56, 0x0B57, 1},
+ {0x0B66, 0x0B6F, 1},
+ {0x0B82, 0x0B83, 1},
+ {0x0BBE, 0x0BC2, 1},
+ {0x0BC6, 0x0BC8, 1},
+ {0x0BCA, 0x0BCD, 1},
+ {0x0BD7, 0x0BD7, 1},
+ {0x0BE7, 0x0BEF, 1},
+ {0x0C01, 0x0C03, 1},
+ {0x0C3E, 0x0C44, 1},
+ {0x0C46, 0x0C48, 1},
+ {0x0C4A, 0x0C4D, 1},
+ {0x0C55, 0x0C56, 1},
+ {0x0C66, 0x0C6F, 1},
+ {0x0C82, 0x0C83, 1},
+ {0x0CBE, 0x0CC4, 1},
+ {0x0CC6, 0x0CC8, 1},
+ {0x0CCA, 0x0CCD, 1},
+ {0x0CD5, 0x0CD6, 1},
+ {0x0CE6, 0x0CEF, 1},
+ {0x0D02, 0x0D03, 1},
+ {0x0D3E, 0x0D43, 1},
+ {0x0D46, 0x0D48, 1},
+ {0x0D4A, 0x0D4D, 1},
+ {0x0D57, 0x0D57, 1},
+ {0x0D66, 0x0D6F, 1},
+ {0x0E31, 0x0E31, 1},
+ {0x0E34, 0x0E3A, 1},
+ {0x0E46, 0x0E46, 1},
+ {0x0E47, 0x0E4E, 1},
+ {0x0E50, 0x0E59, 1},
+ {0x0EB1, 0x0EB1, 1},
+ {0x0EB4, 0x0EB9, 1},
+ {0x0EBB, 0x0EBC, 1},
+ {0x0EC6, 0x0EC6, 1},
+ {0x0EC8, 0x0ECD, 1},
+ {0x0ED0, 0x0ED9, 1},
+ {0x0F18, 0x0F19, 1},
+ {0x0F20, 0x0F29, 1},
+ {0x0F35, 0x0F39, 2},
+ {0x0F3E, 0x0F3F, 1},
+ {0x0F71, 0x0F84, 1},
+ {0x0F86, 0x0F8B, 1},
+ {0x0F90, 0x0F95, 1},
+ {0x0F97, 0x0F97, 1},
+ {0x0F99, 0x0FAD, 1},
+ {0x0FB1, 0x0FB7, 1},
+ {0x0FB9, 0x0FB9, 1},
+ {0x20D0, 0x20DC, 1},
+ {0x20E1, 0x3005, 0x3005 - 0x20E1},
+ {0x302A, 0x302F, 1},
+ {0x3031, 0x3035, 1},
+ {0x3099, 0x309A, 1},
+ {0x309D, 0x309E, 1},
+ {0x30FC, 0x30FE, 1},
+ },
+}
+
+// HTMLEntity is an entity map containing translations for the
+// standard HTML entity characters.
+var HTMLEntity = htmlEntity
+
+var htmlEntity = map[string]string{
+ /*
+ hget http://www.w3.org/TR/html4/sgml/entities.html |
+ ssam '
+ ,y /\&gt;/ x/\&lt;(.|\n)+/ s/\n/ /g
+ ,x v/^\&lt;!ENTITY/d
+ ,s/\&lt;!ENTITY ([^ ]+) .*U\+([0-9A-F][0-9A-F][0-9A-F][0-9A-F]) .+/ "\1": "\\u\2",/g
+ '
+ */
+ "nbsp": "\u00A0",
+ "iexcl": "\u00A1",
+ "cent": "\u00A2",
+ "pound": "\u00A3",
+ "curren": "\u00A4",
+ "yen": "\u00A5",
+ "brvbar": "\u00A6",
+ "sect": "\u00A7",
+ "uml": "\u00A8",
+ "copy": "\u00A9",
+ "ordf": "\u00AA",
+ "laquo": "\u00AB",
+ "not": "\u00AC",
+ "shy": "\u00AD",
+ "reg": "\u00AE",
+ "macr": "\u00AF",
+ "deg": "\u00B0",
+ "plusmn": "\u00B1",
+ "sup2": "\u00B2",
+ "sup3": "\u00B3",
+ "acute": "\u00B4",
+ "micro": "\u00B5",
+ "para": "\u00B6",
+ "middot": "\u00B7",
+ "cedil": "\u00B8",
+ "sup1": "\u00B9",
+ "ordm": "\u00BA",
+ "raquo": "\u00BB",
+ "frac14": "\u00BC",
+ "frac12": "\u00BD",
+ "frac34": "\u00BE",
+ "iquest": "\u00BF",
+ "Agrave": "\u00C0",
+ "Aacute": "\u00C1",
+ "Acirc": "\u00C2",
+ "Atilde": "\u00C3",
+ "Auml": "\u00C4",
+ "Aring": "\u00C5",
+ "AElig": "\u00C6",
+ "Ccedil": "\u00C7",
+ "Egrave": "\u00C8",
+ "Eacute": "\u00C9",
+ "Ecirc": "\u00CA",
+ "Euml": "\u00CB",
+ "Igrave": "\u00CC",
+ "Iacute": "\u00CD",
+ "Icirc": "\u00CE",
+ "Iuml": "\u00CF",
+ "ETH": "\u00D0",
+ "Ntilde": "\u00D1",
+ "Ograve": "\u00D2",
+ "Oacute": "\u00D3",
+ "Ocirc": "\u00D4",
+ "Otilde": "\u00D5",
+ "Ouml": "\u00D6",
+ "times": "\u00D7",
+ "Oslash": "\u00D8",
+ "Ugrave": "\u00D9",
+ "Uacute": "\u00DA",
+ "Ucirc": "\u00DB",
+ "Uuml": "\u00DC",
+ "Yacute": "\u00DD",
+ "THORN": "\u00DE",
+ "szlig": "\u00DF",
+ "agrave": "\u00E0",
+ "aacute": "\u00E1",
+ "acirc": "\u00E2",
+ "atilde": "\u00E3",
+ "auml": "\u00E4",
+ "aring": "\u00E5",
+ "aelig": "\u00E6",
+ "ccedil": "\u00E7",
+ "egrave": "\u00E8",
+ "eacute": "\u00E9",
+ "ecirc": "\u00EA",
+ "euml": "\u00EB",
+ "igrave": "\u00EC",
+ "iacute": "\u00ED",
+ "icirc": "\u00EE",
+ "iuml": "\u00EF",
+ "eth": "\u00F0",
+ "ntilde": "\u00F1",
+ "ograve": "\u00F2",
+ "oacute": "\u00F3",
+ "ocirc": "\u00F4",
+ "otilde": "\u00F5",
+ "ouml": "\u00F6",
+ "divide": "\u00F7",
+ "oslash": "\u00F8",
+ "ugrave": "\u00F9",
+ "uacute": "\u00FA",
+ "ucirc": "\u00FB",
+ "uuml": "\u00FC",
+ "yacute": "\u00FD",
+ "thorn": "\u00FE",
+ "yuml": "\u00FF",
+ "fnof": "\u0192",
+ "Alpha": "\u0391",
+ "Beta": "\u0392",
+ "Gamma": "\u0393",
+ "Delta": "\u0394",
+ "Epsilon": "\u0395",
+ "Zeta": "\u0396",
+ "Eta": "\u0397",
+ "Theta": "\u0398",
+ "Iota": "\u0399",
+ "Kappa": "\u039A",
+ "Lambda": "\u039B",
+ "Mu": "\u039C",
+ "Nu": "\u039D",
+ "Xi": "\u039E",
+ "Omicron": "\u039F",
+ "Pi": "\u03A0",
+ "Rho": "\u03A1",
+ "Sigma": "\u03A3",
+ "Tau": "\u03A4",
+ "Upsilon": "\u03A5",
+ "Phi": "\u03A6",
+ "Chi": "\u03A7",
+ "Psi": "\u03A8",
+ "Omega": "\u03A9",
+ "alpha": "\u03B1",
+ "beta": "\u03B2",
+ "gamma": "\u03B3",
+ "delta": "\u03B4",
+ "epsilon": "\u03B5",
+ "zeta": "\u03B6",
+ "eta": "\u03B7",
+ "theta": "\u03B8",
+ "iota": "\u03B9",
+ "kappa": "\u03BA",
+ "lambda": "\u03BB",
+ "mu": "\u03BC",
+ "nu": "\u03BD",
+ "xi": "\u03BE",
+ "omicron": "\u03BF",
+ "pi": "\u03C0",
+ "rho": "\u03C1",
+ "sigmaf": "\u03C2",
+ "sigma": "\u03C3",
+ "tau": "\u03C4",
+ "upsilon": "\u03C5",
+ "phi": "\u03C6",
+ "chi": "\u03C7",
+ "psi": "\u03C8",
+ "omega": "\u03C9",
+ "thetasym": "\u03D1",
+ "upsih": "\u03D2",
+ "piv": "\u03D6",
+ "bull": "\u2022",
+ "hellip": "\u2026",
+ "prime": "\u2032",
+ "Prime": "\u2033",
+ "oline": "\u203E",
+ "frasl": "\u2044",
+ "weierp": "\u2118",
+ "image": "\u2111",
+ "real": "\u211C",
+ "trade": "\u2122",
+ "alefsym": "\u2135",
+ "larr": "\u2190",
+ "uarr": "\u2191",
+ "rarr": "\u2192",
+ "darr": "\u2193",
+ "harr": "\u2194",
+ "crarr": "\u21B5",
+ "lArr": "\u21D0",
+ "uArr": "\u21D1",
+ "rArr": "\u21D2",
+ "dArr": "\u21D3",
+ "hArr": "\u21D4",
+ "forall": "\u2200",
+ "part": "\u2202",
+ "exist": "\u2203",
+ "empty": "\u2205",
+ "nabla": "\u2207",
+ "isin": "\u2208",
+ "notin": "\u2209",
+ "ni": "\u220B",
+ "prod": "\u220F",
+ "sum": "\u2211",
+ "minus": "\u2212",
+ "lowast": "\u2217",
+ "radic": "\u221A",
+ "prop": "\u221D",
+ "infin": "\u221E",
+ "ang": "\u2220",
+ "and": "\u2227",
+ "or": "\u2228",
+ "cap": "\u2229",
+ "cup": "\u222A",
+ "int": "\u222B",
+ "there4": "\u2234",
+ "sim": "\u223C",
+ "cong": "\u2245",
+ "asymp": "\u2248",
+ "ne": "\u2260",
+ "equiv": "\u2261",
+ "le": "\u2264",
+ "ge": "\u2265",
+ "sub": "\u2282",
+ "sup": "\u2283",
+ "nsub": "\u2284",
+ "sube": "\u2286",
+ "supe": "\u2287",
+ "oplus": "\u2295",
+ "otimes": "\u2297",
+ "perp": "\u22A5",
+ "sdot": "\u22C5",
+ "lceil": "\u2308",
+ "rceil": "\u2309",
+ "lfloor": "\u230A",
+ "rfloor": "\u230B",
+ "lang": "\u2329",
+ "rang": "\u232A",
+ "loz": "\u25CA",
+ "spades": "\u2660",
+ "clubs": "\u2663",
+ "hearts": "\u2665",
+ "diams": "\u2666",
+ "quot": "\u0022",
+ "amp": "\u0026",
+ "lt": "\u003C",
+ "gt": "\u003E",
+ "OElig": "\u0152",
+ "oelig": "\u0153",
+ "Scaron": "\u0160",
+ "scaron": "\u0161",
+ "Yuml": "\u0178",
+ "circ": "\u02C6",
+ "tilde": "\u02DC",
+ "ensp": "\u2002",
+ "emsp": "\u2003",
+ "thinsp": "\u2009",
+ "zwnj": "\u200C",
+ "zwj": "\u200D",
+ "lrm": "\u200E",
+ "rlm": "\u200F",
+ "ndash": "\u2013",
+ "mdash": "\u2014",
+ "lsquo": "\u2018",
+ "rsquo": "\u2019",
+ "sbquo": "\u201A",
+ "ldquo": "\u201C",
+ "rdquo": "\u201D",
+ "bdquo": "\u201E",
+ "dagger": "\u2020",
+ "Dagger": "\u2021",
+ "permil": "\u2030",
+ "lsaquo": "\u2039",
+ "rsaquo": "\u203A",
+ "euro": "\u20AC",
+}
+
+// HTMLAutoClose is the set of HTML elements that
+// should be considered to close automatically.
+var HTMLAutoClose = htmlAutoClose
+
+var htmlAutoClose = []string{
+ /*
+ hget http://www.w3.org/TR/html4/loose.dtd |
+ 9 sed -n 's/<!ELEMENT ([^ ]*) +- O EMPTY.+/ "\1",/p' | tr A-Z a-z
+ */
+ "basefont",
+ "br",
+ "area",
+ "link",
+ "img",
+ "param",
+ "hr",
+ "input",
+ "col",
+ "frame",
+ "isindex",
+ "base",
+ "meta",
+}
+
+var (
+ esc_quot = []byte("&#34;") // shorter than "&quot;"
+ esc_apos = []byte("&#39;") // shorter than "&apos;"
+ esc_amp = []byte("&amp;")
+ esc_lt = []byte("&lt;")
+ esc_gt = []byte("&gt;")
+ esc_tab = []byte("&#x9;")
+ esc_nl = []byte("&#xA;")
+ esc_cr = []byte("&#xD;")
+ esc_fffd = []byte("\uFFFD") // Unicode replacement character
+)
+
+// EscapeText writes to w the properly escaped XML equivalent
+// of the plain text data s.
+func EscapeText(w io.Writer, s []byte) error {
+ return escapeText(w, s, true)
+}
+
+// escapeText writes to w the properly escaped XML equivalent
+// of the plain text data s. If escapeNewline is true, newline
+// characters will be escaped.
+func escapeText(w io.Writer, s []byte, escapeNewline bool) error {
+ var esc []byte
+ last := 0
+ for i := 0; i < len(s); {
+ r, width := utf8.DecodeRune(s[i:])
+ i += width
+ switch r {
+ case '"':
+ esc = esc_quot
+ case '\'':
+ esc = esc_apos
+ case '&':
+ esc = esc_amp
+ case '<':
+ esc = esc_lt
+ case '>':
+ esc = esc_gt
+ case '\t':
+ esc = esc_tab
+ case '\n':
+ if !escapeNewline {
+ continue
+ }
+ esc = esc_nl
+ case '\r':
+ esc = esc_cr
+ default:
+ if !isInCharacterRange(r) || (r == 0xFFFD && width == 1) {
+ esc = esc_fffd
+ break
+ }
+ continue
+ }
+ if _, err := w.Write(s[last : i-width]); err != nil {
+ return err
+ }
+ if _, err := w.Write(esc); err != nil {
+ return err
+ }
+ last = i
+ }
+ if _, err := w.Write(s[last:]); err != nil {
+ return err
+ }
+ return nil
+}
+
+// EscapeString writes to p the properly escaped XML equivalent
+// of the plain text data s.
+func (p *printer) EscapeString(s string) {
+ var esc []byte
+ last := 0
+ for i := 0; i < len(s); {
+ r, width := utf8.DecodeRuneInString(s[i:])
+ i += width
+ switch r {
+ case '"':
+ esc = esc_quot
+ case '\'':
+ esc = esc_apos
+ case '&':
+ esc = esc_amp
+ case '<':
+ esc = esc_lt
+ case '>':
+ esc = esc_gt
+ case '\t':
+ esc = esc_tab
+ case '\n':
+ esc = esc_nl
+ case '\r':
+ esc = esc_cr
+ default:
+ if !isInCharacterRange(r) || (r == 0xFFFD && width == 1) {
+ esc = esc_fffd
+ break
+ }
+ continue
+ }
+ p.WriteString(s[last : i-width])
+ p.Write(esc)
+ last = i
+ }
+ p.WriteString(s[last:])
+}
+
+// Escape is like EscapeText but omits the error return value.
+// It is provided for backwards compatibility with Go 1.0.
+// Code targeting Go 1.1 or later should use EscapeText.
+func Escape(w io.Writer, s []byte) {
+ EscapeText(w, s)
+}
+
+// procInst parses the `param="..."` or `param='...'`
+// value out of the provided string, returning "" if not found.
+func procInst(param, s string) string {
+ // TODO: this parsing is somewhat lame and not exact.
+ // It works for all actual cases, though.
+ param = param + "="
+ idx := strings.Index(s, param)
+ if idx == -1 {
+ return ""
+ }
+ v := s[idx+len(param):]
+ if v == "" {
+ return ""
+ }
+ if v[0] != '\'' && v[0] != '"' {
+ return ""
+ }
+ idx = strings.IndexRune(v[1:], rune(v[0]))
+ if idx == -1 {
+ return ""
+ }
+ return v[1 : idx+1]
+}
diff --git a/vendor/golang.org/x/net/webdav/litmus_test_server.go b/vendor/golang.org/x/net/webdav/litmus_test_server.go
new file mode 100644
index 0000000..514db5d
--- /dev/null
+++ b/vendor/golang.org/x/net/webdav/litmus_test_server.go
@@ -0,0 +1,94 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build ignore
+
+/*
+This program is a server for the WebDAV 'litmus' compliance test at
+http://www.webdav.org/neon/litmus/
+To run the test:
+
+go run litmus_test_server.go
+
+and separately, from the downloaded litmus-xxx directory:
+
+make URL=http://localhost:9999/ check
+*/
+package main
+
+import (
+ "flag"
+ "fmt"
+ "log"
+ "net/http"
+ "net/url"
+
+ "golang.org/x/net/webdav"
+)
+
+var port = flag.Int("port", 9999, "server port")
+
+func main() {
+ flag.Parse()
+ log.SetFlags(0)
+ h := &webdav.Handler{
+ FileSystem: webdav.NewMemFS(),
+ LockSystem: webdav.NewMemLS(),
+ Logger: func(r *http.Request, err error) {
+ litmus := r.Header.Get("X-Litmus")
+ if len(litmus) > 19 {
+ litmus = litmus[:16] + "..."
+ }
+
+ switch r.Method {
+ case "COPY", "MOVE":
+ dst := ""
+ if u, err := url.Parse(r.Header.Get("Destination")); err == nil {
+ dst = u.Path
+ }
+ o := r.Header.Get("Overwrite")
+ log.Printf("%-20s%-10s%-30s%-30so=%-2s%v", litmus, r.Method, r.URL.Path, dst, o, err)
+ default:
+ log.Printf("%-20s%-10s%-30s%v", litmus, r.Method, r.URL.Path, err)
+ }
+ },
+ }
+
+ // The next line would normally be:
+ // http.Handle("/", h)
+ // but we wrap that HTTP handler h to cater for a special case.
+ //
+ // The propfind_invalid2 litmus test case expects an empty namespace prefix
+ // declaration to be an error. The FAQ in the webdav litmus test says:
+ //
+ // "What does the "propfind_invalid2" test check for?...
+ //
+ // If a request was sent with an XML body which included an empty namespace
+ // prefix declaration (xmlns:ns1=""), then the server must reject that with
+ // a "400 Bad Request" response, as it is invalid according to the XML
+ // Namespace specification."
+ //
+ // On the other hand, the Go standard library's encoding/xml package
+ // accepts an empty xmlns namespace, as per the discussion at
+ // https://github.com/golang/go/issues/8068
+ //
+ // Empty namespaces seem disallowed in the second (2006) edition of the XML
+ // standard, but allowed in a later edition. The grammar differs between
+ // http://www.w3.org/TR/2006/REC-xml-names-20060816/#ns-decl and
+ // http://www.w3.org/TR/REC-xml-names/#dt-prefix
+ //
+ // Thus, we assume that the propfind_invalid2 test is obsolete, and
+ // hard-code the 400 Bad Request response that the test expects.
+ http.Handle("/", http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ if r.Header.Get("X-Litmus") == "props: 3 (propfind_invalid2)" {
+ http.Error(w, "400 Bad Request", http.StatusBadRequest)
+ return
+ }
+ h.ServeHTTP(w, r)
+ }))
+
+ addr := fmt.Sprintf(":%d", *port)
+ log.Printf("Serving %v", addr)
+ log.Fatal(http.ListenAndServe(addr, nil))
+}
diff --git a/vendor/golang.org/x/net/webdav/lock.go b/vendor/golang.org/x/net/webdav/lock.go
new file mode 100644
index 0000000..344ac5c
--- /dev/null
+++ b/vendor/golang.org/x/net/webdav/lock.go
@@ -0,0 +1,445 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package webdav
+
+import (
+ "container/heap"
+ "errors"
+ "strconv"
+ "strings"
+ "sync"
+ "time"
+)
+
+var (
+ // ErrConfirmationFailed is returned by a LockSystem's Confirm method.
+ ErrConfirmationFailed = errors.New("webdav: confirmation failed")
+ // ErrForbidden is returned by a LockSystem's Unlock method.
+ ErrForbidden = errors.New("webdav: forbidden")
+ // ErrLocked is returned by a LockSystem's Create, Refresh and Unlock methods.
+ ErrLocked = errors.New("webdav: locked")
+ // ErrNoSuchLock is returned by a LockSystem's Refresh and Unlock methods.
+ ErrNoSuchLock = errors.New("webdav: no such lock")
+)
+
+// Condition can match a WebDAV resource, based on a token or ETag.
+// Exactly one of Token and ETag should be non-empty.
+type Condition struct {
+ Not bool
+ Token string
+ ETag string
+}
+
+// LockSystem manages access to a collection of named resources. The elements
+// in a lock name are separated by slash ('/', U+002F) characters, regardless
+// of host operating system convention.
+type LockSystem interface {
+ // Confirm confirms that the caller can claim all of the locks specified by
+ // the given conditions, and that holding the union of all of those locks
+ // gives exclusive access to all of the named resources. Up to two resources
+ // can be named. Empty names are ignored.
+ //
+ // Exactly one of release and err will be non-nil. If release is non-nil,
+ // all of the requested locks are held until release is called. Calling
+ // release does not unlock the lock, in the WebDAV UNLOCK sense, but once
+ // Confirm has confirmed that a lock claim is valid, that lock cannot be
+ // Confirmed again until it has been released.
+ //
+ // If Confirm returns ErrConfirmationFailed then the Handler will continue
+ // to try any other set of locks presented (a WebDAV HTTP request can
+ // present more than one set of locks). If it returns any other non-nil
+ // error, the Handler will write a "500 Internal Server Error" HTTP status.
+ Confirm(now time.Time, name0, name1 string, conditions ...Condition) (release func(), err error)
+
+ // Create creates a lock with the given depth, duration, owner and root
+ // (name). The depth will either be negative (meaning infinite) or zero.
+ //
+ // If Create returns ErrLocked then the Handler will write a "423 Locked"
+ // HTTP status. If it returns any other non-nil error, the Handler will
+ // write a "500 Internal Server Error" HTTP status.
+ //
+ // See http://www.webdav.org/specs/rfc4918.html#rfc.section.9.10.6 for
+ // when to use each error.
+ //
+ // The token returned identifies the created lock. It should be an absolute
+ // URI as defined by RFC 3986, Section 4.3. In particular, it should not
+ // contain whitespace.
+ Create(now time.Time, details LockDetails) (token string, err error)
+
+ // Refresh refreshes the lock with the given token.
+ //
+ // If Refresh returns ErrLocked then the Handler will write a "423 Locked"
+ // HTTP Status. If Refresh returns ErrNoSuchLock then the Handler will write
+ // a "412 Precondition Failed" HTTP Status. If it returns any other non-nil
+ // error, the Handler will write a "500 Internal Server Error" HTTP status.
+ //
+ // See http://www.webdav.org/specs/rfc4918.html#rfc.section.9.10.6 for
+ // when to use each error.
+ Refresh(now time.Time, token string, duration time.Duration) (LockDetails, error)
+
+ // Unlock unlocks the lock with the given token.
+ //
+ // If Unlock returns ErrForbidden then the Handler will write a "403
+ // Forbidden" HTTP Status. If Unlock returns ErrLocked then the Handler
+ // will write a "423 Locked" HTTP status. If Unlock returns ErrNoSuchLock
+ // then the Handler will write a "409 Conflict" HTTP Status. If it returns
+ // any other non-nil error, the Handler will write a "500 Internal Server
+ // Error" HTTP status.
+ //
+ // See http://www.webdav.org/specs/rfc4918.html#rfc.section.9.11.1 for
+ // when to use each error.
+ Unlock(now time.Time, token string) error
+}
+
+// LockDetails are a lock's metadata.
+type LockDetails struct {
+ // Root is the root resource name being locked. For a zero-depth lock, the
+ // root is the only resource being locked.
+ Root string
+ // Duration is the lock timeout. A negative duration means infinite.
+ Duration time.Duration
+ // OwnerXML is the verbatim <owner> XML given in a LOCK HTTP request.
+ //
+ // TODO: does the "verbatim" nature play well with XML namespaces?
+ // Does the OwnerXML field need to have more structure? See
+ // https://codereview.appspot.com/175140043/#msg2
+ OwnerXML string
+ // ZeroDepth is whether the lock has zero depth. If it does not have zero
+ // depth, it has infinite depth.
+ ZeroDepth bool
+}
+
+// NewMemLS returns a new in-memory LockSystem.
+func NewMemLS() LockSystem {
+ return &memLS{
+ byName: make(map[string]*memLSNode),
+ byToken: make(map[string]*memLSNode),
+ gen: uint64(time.Now().Unix()),
+ }
+}
+
+type memLS struct {
+ mu sync.Mutex
+ byName map[string]*memLSNode
+ byToken map[string]*memLSNode
+ gen uint64
+ // byExpiry only contains those nodes whose LockDetails have a finite
+ // Duration and are yet to expire.
+ byExpiry byExpiry
+}
+
+func (m *memLS) nextToken() string {
+ m.gen++
+ return strconv.FormatUint(m.gen, 10)
+}
+
+func (m *memLS) collectExpiredNodes(now time.Time) {
+ for len(m.byExpiry) > 0 {
+ if now.Before(m.byExpiry[0].expiry) {
+ break
+ }
+ m.remove(m.byExpiry[0])
+ }
+}
+
+func (m *memLS) Confirm(now time.Time, name0, name1 string, conditions ...Condition) (func(), error) {
+ m.mu.Lock()
+ defer m.mu.Unlock()
+ m.collectExpiredNodes(now)
+
+ var n0, n1 *memLSNode
+ if name0 != "" {
+ if n0 = m.lookup(slashClean(name0), conditions...); n0 == nil {
+ return nil, ErrConfirmationFailed
+ }
+ }
+ if name1 != "" {
+ if n1 = m.lookup(slashClean(name1), conditions...); n1 == nil {
+ return nil, ErrConfirmationFailed
+ }
+ }
+
+ // Don't hold the same node twice.
+ if n1 == n0 {
+ n1 = nil
+ }
+
+ if n0 != nil {
+ m.hold(n0)
+ }
+ if n1 != nil {
+ m.hold(n1)
+ }
+ return func() {
+ m.mu.Lock()
+ defer m.mu.Unlock()
+ if n1 != nil {
+ m.unhold(n1)
+ }
+ if n0 != nil {
+ m.unhold(n0)
+ }
+ }, nil
+}
+
+// lookup returns the node n that locks the named resource, provided that n
+// matches at least one of the given conditions and that lock isn't held by
+// another party. Otherwise, it returns nil.
+//
+// n may be a parent of the named resource, if n is an infinite depth lock.
+func (m *memLS) lookup(name string, conditions ...Condition) (n *memLSNode) {
+ // TODO: support Condition.Not and Condition.ETag.
+ for _, c := range conditions {
+ n = m.byToken[c.Token]
+ if n == nil || n.held {
+ continue
+ }
+ if name == n.details.Root {
+ return n
+ }
+ if n.details.ZeroDepth {
+ continue
+ }
+ if n.details.Root == "/" || strings.HasPrefix(name, n.details.Root+"/") {
+ return n
+ }
+ }
+ return nil
+}
+
+func (m *memLS) hold(n *memLSNode) {
+ if n.held {
+ panic("webdav: memLS inconsistent held state")
+ }
+ n.held = true
+ if n.details.Duration >= 0 && n.byExpiryIndex >= 0 {
+ heap.Remove(&m.byExpiry, n.byExpiryIndex)
+ }
+}
+
+func (m *memLS) unhold(n *memLSNode) {
+ if !n.held {
+ panic("webdav: memLS inconsistent held state")
+ }
+ n.held = false
+ if n.details.Duration >= 0 {
+ heap.Push(&m.byExpiry, n)
+ }
+}
+
+func (m *memLS) Create(now time.Time, details LockDetails) (string, error) {
+ m.mu.Lock()
+ defer m.mu.Unlock()
+ m.collectExpiredNodes(now)
+ details.Root = slashClean(details.Root)
+
+ if !m.canCreate(details.Root, details.ZeroDepth) {
+ return "", ErrLocked
+ }
+ n := m.create(details.Root)
+ n.token = m.nextToken()
+ m.byToken[n.token] = n
+ n.details = details
+ if n.details.Duration >= 0 {
+ n.expiry = now.Add(n.details.Duration)
+ heap.Push(&m.byExpiry, n)
+ }
+ return n.token, nil
+}
+
+func (m *memLS) Refresh(now time.Time, token string, duration time.Duration) (LockDetails, error) {
+ m.mu.Lock()
+ defer m.mu.Unlock()
+ m.collectExpiredNodes(now)
+
+ n := m.byToken[token]
+ if n == nil {
+ return LockDetails{}, ErrNoSuchLock
+ }
+ if n.held {
+ return LockDetails{}, ErrLocked
+ }
+ if n.byExpiryIndex >= 0 {
+ heap.Remove(&m.byExpiry, n.byExpiryIndex)
+ }
+ n.details.Duration = duration
+ if n.details.Duration >= 0 {
+ n.expiry = now.Add(n.details.Duration)
+ heap.Push(&m.byExpiry, n)
+ }
+ return n.details, nil
+}
+
+func (m *memLS) Unlock(now time.Time, token string) error {
+ m.mu.Lock()
+ defer m.mu.Unlock()
+ m.collectExpiredNodes(now)
+
+ n := m.byToken[token]
+ if n == nil {
+ return ErrNoSuchLock
+ }
+ if n.held {
+ return ErrLocked
+ }
+ m.remove(n)
+ return nil
+}
+
+func (m *memLS) canCreate(name string, zeroDepth bool) bool {
+ return walkToRoot(name, func(name0 string, first bool) bool {
+ n := m.byName[name0]
+ if n == nil {
+ return true
+ }
+ if first {
+ if n.token != "" {
+ // The target node is already locked.
+ return false
+ }
+ if !zeroDepth {
+ // The requested lock depth is infinite, and the fact that n exists
+ // (n != nil) means that a descendent of the target node is locked.
+ return false
+ }
+ } else if n.token != "" && !n.details.ZeroDepth {
+ // An ancestor of the target node is locked with infinite depth.
+ return false
+ }
+ return true
+ })
+}
+
+func (m *memLS) create(name string) (ret *memLSNode) {
+ walkToRoot(name, func(name0 string, first bool) bool {
+ n := m.byName[name0]
+ if n == nil {
+ n = &memLSNode{
+ details: LockDetails{
+ Root: name0,
+ },
+ byExpiryIndex: -1,
+ }
+ m.byName[name0] = n
+ }
+ n.refCount++
+ if first {
+ ret = n
+ }
+ return true
+ })
+ return ret
+}
+
+func (m *memLS) remove(n *memLSNode) {
+ delete(m.byToken, n.token)
+ n.token = ""
+ walkToRoot(n.details.Root, func(name0 string, first bool) bool {
+ x := m.byName[name0]
+ x.refCount--
+ if x.refCount == 0 {
+ delete(m.byName, name0)
+ }
+ return true
+ })
+ if n.byExpiryIndex >= 0 {
+ heap.Remove(&m.byExpiry, n.byExpiryIndex)
+ }
+}
+
+func walkToRoot(name string, f func(name0 string, first bool) bool) bool {
+ for first := true; ; first = false {
+ if !f(name, first) {
+ return false
+ }
+ if name == "/" {
+ break
+ }
+ name = name[:strings.LastIndex(name, "/")]
+ if name == "" {
+ name = "/"
+ }
+ }
+ return true
+}
+
+type memLSNode struct {
+ // details are the lock metadata. Even if this node's name is not explicitly locked,
+ // details.Root will still equal the node's name.
+ details LockDetails
+ // token is the unique identifier for this node's lock. An empty token means that
+ // this node is not explicitly locked.
+ token string
+ // refCount is the number of self-or-descendent nodes that are explicitly locked.
+ refCount int
+ // expiry is when this node's lock expires.
+ expiry time.Time
+ // byExpiryIndex is the index of this node in memLS.byExpiry. It is -1
+ // if this node does not expire, or has expired.
+ byExpiryIndex int
+ // held is whether this node's lock is actively held by a Confirm call.
+ held bool
+}
+
+type byExpiry []*memLSNode
+
+func (b *byExpiry) Len() int {
+ return len(*b)
+}
+
+func (b *byExpiry) Less(i, j int) bool {
+ return (*b)[i].expiry.Before((*b)[j].expiry)
+}
+
+func (b *byExpiry) Swap(i, j int) {
+ (*b)[i], (*b)[j] = (*b)[j], (*b)[i]
+ (*b)[i].byExpiryIndex = i
+ (*b)[j].byExpiryIndex = j
+}
+
+func (b *byExpiry) Push(x interface{}) {
+ n := x.(*memLSNode)
+ n.byExpiryIndex = len(*b)
+ *b = append(*b, n)
+}
+
+func (b *byExpiry) Pop() interface{} {
+ i := len(*b) - 1
+ n := (*b)[i]
+ (*b)[i] = nil
+ n.byExpiryIndex = -1
+ *b = (*b)[:i]
+ return n
+}
+
+const infiniteTimeout = -1
+
+// parseTimeout parses the Timeout HTTP header, as per section 10.7. If s is
+// empty, an infiniteTimeout is returned.
+func parseTimeout(s string) (time.Duration, error) {
+ if s == "" {
+ return infiniteTimeout, nil
+ }
+ if i := strings.IndexByte(s, ','); i >= 0 {
+ s = s[:i]
+ }
+ s = strings.TrimSpace(s)
+ if s == "Infinite" {
+ return infiniteTimeout, nil
+ }
+ const pre = "Second-"
+ if !strings.HasPrefix(s, pre) {
+ return 0, errInvalidTimeout
+ }
+ s = s[len(pre):]
+ if s == "" || s[0] < '0' || '9' < s[0] {
+ return 0, errInvalidTimeout
+ }
+ n, err := strconv.ParseInt(s, 10, 64)
+ if err != nil || 1<<32-1 < n {
+ return 0, errInvalidTimeout
+ }
+ return time.Duration(n) * time.Second, nil
+}
diff --git a/vendor/golang.org/x/net/webdav/prop.go b/vendor/golang.org/x/net/webdav/prop.go
new file mode 100644
index 0000000..e36a3b3
--- /dev/null
+++ b/vendor/golang.org/x/net/webdav/prop.go
@@ -0,0 +1,418 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package webdav
+
+import (
+ "bytes"
+ "encoding/xml"
+ "fmt"
+ "io"
+ "mime"
+ "net/http"
+ "os"
+ "path/filepath"
+ "strconv"
+
+ "golang.org/x/net/context"
+)
+
+// Proppatch describes a property update instruction as defined in RFC 4918.
+// See http://www.webdav.org/specs/rfc4918.html#METHOD_PROPPATCH
+type Proppatch struct {
+ // Remove specifies whether this patch removes properties. If it does not
+ // remove them, it sets them.
+ Remove bool
+ // Props contains the properties to be set or removed.
+ Props []Property
+}
+
+// Propstat describes a XML propstat element as defined in RFC 4918.
+// See http://www.webdav.org/specs/rfc4918.html#ELEMENT_propstat
+type Propstat struct {
+ // Props contains the properties for which Status applies.
+ Props []Property
+
+ // Status defines the HTTP status code of the properties in Prop.
+ // Allowed values include, but are not limited to the WebDAV status
+ // code extensions for HTTP/1.1.
+ // http://www.webdav.org/specs/rfc4918.html#status.code.extensions.to.http11
+ Status int
+
+ // XMLError contains the XML representation of the optional error element.
+ // XML content within this field must not rely on any predefined
+ // namespace declarations or prefixes. If empty, the XML error element
+ // is omitted.
+ XMLError string
+
+ // ResponseDescription contains the contents of the optional
+ // responsedescription field. If empty, the XML element is omitted.
+ ResponseDescription string
+}
+
+// makePropstats returns a slice containing those of x and y whose Props slice
+// is non-empty. If both are empty, it returns a slice containing an otherwise
+// zero Propstat whose HTTP status code is 200 OK.
+func makePropstats(x, y Propstat) []Propstat {
+ pstats := make([]Propstat, 0, 2)
+ if len(x.Props) != 0 {
+ pstats = append(pstats, x)
+ }
+ if len(y.Props) != 0 {
+ pstats = append(pstats, y)
+ }
+ if len(pstats) == 0 {
+ pstats = append(pstats, Propstat{
+ Status: http.StatusOK,
+ })
+ }
+ return pstats
+}
+
+// DeadPropsHolder holds the dead properties of a resource.
+//
+// Dead properties are those properties that are explicitly defined. In
+// comparison, live properties, such as DAV:getcontentlength, are implicitly
+// defined by the underlying resource, and cannot be explicitly overridden or
+// removed. See the Terminology section of
+// http://www.webdav.org/specs/rfc4918.html#rfc.section.3
+//
+// There is a whitelist of the names of live properties. This package handles
+// all live properties, and will only pass non-whitelisted names to the Patch
+// method of DeadPropsHolder implementations.
+type DeadPropsHolder interface {
+ // DeadProps returns a copy of the dead properties held.
+ DeadProps() (map[xml.Name]Property, error)
+
+ // Patch patches the dead properties held.
+ //
+ // Patching is atomic; either all or no patches succeed. It returns (nil,
+ // non-nil) if an internal server error occurred, otherwise the Propstats
+ // collectively contain one Property for each proposed patch Property. If
+ // all patches succeed, Patch returns a slice of length one and a Propstat
+ // element with a 200 OK HTTP status code. If none succeed, for reasons
+ // other than an internal server error, no Propstat has status 200 OK.
+ //
+ // For more details on when various HTTP status codes apply, see
+ // http://www.webdav.org/specs/rfc4918.html#PROPPATCH-status
+ Patch([]Proppatch) ([]Propstat, error)
+}
+
+// liveProps contains all supported, protected DAV: properties.
+var liveProps = map[xml.Name]struct {
+ // findFn implements the propfind function of this property. If nil,
+ // it indicates a hidden property.
+ findFn func(context.Context, FileSystem, LockSystem, string, os.FileInfo) (string, error)
+ // dir is true if the property applies to directories.
+ dir bool
+}{
+ {Space: "DAV:", Local: "resourcetype"}: {
+ findFn: findResourceType,
+ dir: true,
+ },
+ {Space: "DAV:", Local: "displayname"}: {
+ findFn: findDisplayName,
+ dir: true,
+ },
+ {Space: "DAV:", Local: "getcontentlength"}: {
+ findFn: findContentLength,
+ dir: false,
+ },
+ {Space: "DAV:", Local: "getlastmodified"}: {
+ findFn: findLastModified,
+ // http://webdav.org/specs/rfc4918.html#PROPERTY_getlastmodified
+ // suggests that getlastmodified should only apply to GETable
+ // resources, and this package does not support GET on directories.
+ //
+ // Nonetheless, some WebDAV clients expect child directories to be
+ // sortable by getlastmodified date, so this value is true, not false.
+ // See golang.org/issue/15334.
+ dir: true,
+ },
+ {Space: "DAV:", Local: "creationdate"}: {
+ findFn: nil,
+ dir: false,
+ },
+ {Space: "DAV:", Local: "getcontentlanguage"}: {
+ findFn: nil,
+ dir: false,
+ },
+ {Space: "DAV:", Local: "getcontenttype"}: {
+ findFn: findContentType,
+ dir: false,
+ },
+ {Space: "DAV:", Local: "getetag"}: {
+ findFn: findETag,
+ // findETag implements ETag as the concatenated hex values of a file's
+ // modification time and size. This is not a reliable synchronization
+ // mechanism for directories, so we do not advertise getetag for DAV
+ // collections.
+ dir: false,
+ },
+
+ // TODO: The lockdiscovery property requires LockSystem to list the
+ // active locks on a resource.
+ {Space: "DAV:", Local: "lockdiscovery"}: {},
+ {Space: "DAV:", Local: "supportedlock"}: {
+ findFn: findSupportedLock,
+ dir: true,
+ },
+}
+
+// TODO(nigeltao) merge props and allprop?
+
+// Props returns the status of the properties named pnames for resource name.
+//
+// Each Propstat has a unique status and each property name will only be part
+// of one Propstat element.
+func props(ctx context.Context, fs FileSystem, ls LockSystem, name string, pnames []xml.Name) ([]Propstat, error) {
+ f, err := fs.OpenFile(ctx, name, os.O_RDONLY, 0)
+ if err != nil {
+ return nil, err
+ }
+ defer f.Close()
+ fi, err := f.Stat()
+ if err != nil {
+ return nil, err
+ }
+ isDir := fi.IsDir()
+
+ var deadProps map[xml.Name]Property
+ if dph, ok := f.(DeadPropsHolder); ok {
+ deadProps, err = dph.DeadProps()
+ if err != nil {
+ return nil, err
+ }
+ }
+
+ pstatOK := Propstat{Status: http.StatusOK}
+ pstatNotFound := Propstat{Status: http.StatusNotFound}
+ for _, pn := range pnames {
+ // If this file has dead properties, check if they contain pn.
+ if dp, ok := deadProps[pn]; ok {
+ pstatOK.Props = append(pstatOK.Props, dp)
+ continue
+ }
+ // Otherwise, it must either be a live property or we don't know it.
+ if prop := liveProps[pn]; prop.findFn != nil && (prop.dir || !isDir) {
+ innerXML, err := prop.findFn(ctx, fs, ls, name, fi)
+ if err != nil {
+ return nil, err
+ }
+ pstatOK.Props = append(pstatOK.Props, Property{
+ XMLName: pn,
+ InnerXML: []byte(innerXML),
+ })
+ } else {
+ pstatNotFound.Props = append(pstatNotFound.Props, Property{
+ XMLName: pn,
+ })
+ }
+ }
+ return makePropstats(pstatOK, pstatNotFound), nil
+}
+
+// Propnames returns the property names defined for resource name.
+func propnames(ctx context.Context, fs FileSystem, ls LockSystem, name string) ([]xml.Name, error) {
+ f, err := fs.OpenFile(ctx, name, os.O_RDONLY, 0)
+ if err != nil {
+ return nil, err
+ }
+ defer f.Close()
+ fi, err := f.Stat()
+ if err != nil {
+ return nil, err
+ }
+ isDir := fi.IsDir()
+
+ var deadProps map[xml.Name]Property
+ if dph, ok := f.(DeadPropsHolder); ok {
+ deadProps, err = dph.DeadProps()
+ if err != nil {
+ return nil, err
+ }
+ }
+
+ pnames := make([]xml.Name, 0, len(liveProps)+len(deadProps))
+ for pn, prop := range liveProps {
+ if prop.findFn != nil && (prop.dir || !isDir) {
+ pnames = append(pnames, pn)
+ }
+ }
+ for pn := range deadProps {
+ pnames = append(pnames, pn)
+ }
+ return pnames, nil
+}
+
+// Allprop returns the properties defined for resource name and the properties
+// named in include.
+//
+// Note that RFC 4918 defines 'allprop' to return the DAV: properties defined
+// within the RFC plus dead properties. Other live properties should only be
+// returned if they are named in 'include'.
+//
+// See http://www.webdav.org/specs/rfc4918.html#METHOD_PROPFIND
+func allprop(ctx context.Context, fs FileSystem, ls LockSystem, name string, include []xml.Name) ([]Propstat, error) {
+ pnames, err := propnames(ctx, fs, ls, name)
+ if err != nil {
+ return nil, err
+ }
+ // Add names from include if they are not already covered in pnames.
+ nameset := make(map[xml.Name]bool)
+ for _, pn := range pnames {
+ nameset[pn] = true
+ }
+ for _, pn := range include {
+ if !nameset[pn] {
+ pnames = append(pnames, pn)
+ }
+ }
+ return props(ctx, fs, ls, name, pnames)
+}
+
+// Patch patches the properties of resource name. The return values are
+// constrained in the same manner as DeadPropsHolder.Patch.
+func patch(ctx context.Context, fs FileSystem, ls LockSystem, name string, patches []Proppatch) ([]Propstat, error) {
+ conflict := false
+loop:
+ for _, patch := range patches {
+ for _, p := range patch.Props {
+ if _, ok := liveProps[p.XMLName]; ok {
+ conflict = true
+ break loop
+ }
+ }
+ }
+ if conflict {
+ pstatForbidden := Propstat{
+ Status: http.StatusForbidden,
+ XMLError: `<D:cannot-modify-protected-property xmlns:D="DAV:"/>`,
+ }
+ pstatFailedDep := Propstat{
+ Status: StatusFailedDependency,
+ }
+ for _, patch := range patches {
+ for _, p := range patch.Props {
+ if _, ok := liveProps[p.XMLName]; ok {
+ pstatForbidden.Props = append(pstatForbidden.Props, Property{XMLName: p.XMLName})
+ } else {
+ pstatFailedDep.Props = append(pstatFailedDep.Props, Property{XMLName: p.XMLName})
+ }
+ }
+ }
+ return makePropstats(pstatForbidden, pstatFailedDep), nil
+ }
+
+ f, err := fs.OpenFile(ctx, name, os.O_RDWR, 0)
+ if err != nil {
+ return nil, err
+ }
+ defer f.Close()
+ if dph, ok := f.(DeadPropsHolder); ok {
+ ret, err := dph.Patch(patches)
+ if err != nil {
+ return nil, err
+ }
+ // http://www.webdav.org/specs/rfc4918.html#ELEMENT_propstat says that
+ // "The contents of the prop XML element must only list the names of
+ // properties to which the result in the status element applies."
+ for _, pstat := range ret {
+ for i, p := range pstat.Props {
+ pstat.Props[i] = Property{XMLName: p.XMLName}
+ }
+ }
+ return ret, nil
+ }
+ // The file doesn't implement the optional DeadPropsHolder interface, so
+ // all patches are forbidden.
+ pstat := Propstat{Status: http.StatusForbidden}
+ for _, patch := range patches {
+ for _, p := range patch.Props {
+ pstat.Props = append(pstat.Props, Property{XMLName: p.XMLName})
+ }
+ }
+ return []Propstat{pstat}, nil
+}
+
+func escapeXML(s string) string {
+ for i := 0; i < len(s); i++ {
+ // As an optimization, if s contains only ASCII letters, digits or a
+ // few special characters, the escaped value is s itself and we don't
+ // need to allocate a buffer and convert between string and []byte.
+ switch c := s[i]; {
+ case c == ' ' || c == '_' ||
+ ('+' <= c && c <= '9') || // Digits as well as + , - . and /
+ ('A' <= c && c <= 'Z') ||
+ ('a' <= c && c <= 'z'):
+ continue
+ }
+ // Otherwise, go through the full escaping process.
+ var buf bytes.Buffer
+ xml.EscapeText(&buf, []byte(s))
+ return buf.String()
+ }
+ return s
+}
+
+func findResourceType(ctx context.Context, fs FileSystem, ls LockSystem, name string, fi os.FileInfo) (string, error) {
+ if fi.IsDir() {
+ return `<D:collection xmlns:D="DAV:"/>`, nil
+ }
+ return "", nil
+}
+
+func findDisplayName(ctx context.Context, fs FileSystem, ls LockSystem, name string, fi os.FileInfo) (string, error) {
+ if slashClean(name) == "/" {
+ // Hide the real name of a possibly prefixed root directory.
+ return "", nil
+ }
+ return escapeXML(fi.Name()), nil
+}
+
+func findContentLength(ctx context.Context, fs FileSystem, ls LockSystem, name string, fi os.FileInfo) (string, error) {
+ return strconv.FormatInt(fi.Size(), 10), nil
+}
+
+func findLastModified(ctx context.Context, fs FileSystem, ls LockSystem, name string, fi os.FileInfo) (string, error) {
+ return fi.ModTime().Format(http.TimeFormat), nil
+}
+
+func findContentType(ctx context.Context, fs FileSystem, ls LockSystem, name string, fi os.FileInfo) (string, error) {
+ f, err := fs.OpenFile(ctx, name, os.O_RDONLY, 0)
+ if err != nil {
+ return "", err
+ }
+ defer f.Close()
+ // This implementation is based on serveContent's code in the standard net/http package.
+ ctype := mime.TypeByExtension(filepath.Ext(name))
+ if ctype != "" {
+ return ctype, nil
+ }
+ // Read a chunk to decide between utf-8 text and binary.
+ var buf [512]byte
+ n, err := io.ReadFull(f, buf[:])
+ if err != nil && err != io.EOF && err != io.ErrUnexpectedEOF {
+ return "", err
+ }
+ ctype = http.DetectContentType(buf[:n])
+ // Rewind file.
+ _, err = f.Seek(0, os.SEEK_SET)
+ return ctype, err
+}
+
+func findETag(ctx context.Context, fs FileSystem, ls LockSystem, name string, fi os.FileInfo) (string, error) {
+ // The Apache http 2.4 web server by default concatenates the
+ // modification time and size of a file. We replicate the heuristic
+ // with nanosecond granularity.
+ return fmt.Sprintf(`"%x%x"`, fi.ModTime().UnixNano(), fi.Size()), nil
+}
+
+func findSupportedLock(ctx context.Context, fs FileSystem, ls LockSystem, name string, fi os.FileInfo) (string, error) {
+ return `` +
+ `<D:lockentry xmlns:D="DAV:">` +
+ `<D:lockscope><D:exclusive/></D:lockscope>` +
+ `<D:locktype><D:write/></D:locktype>` +
+ `</D:lockentry>`, nil
+}
diff --git a/vendor/golang.org/x/net/webdav/webdav.go b/vendor/golang.org/x/net/webdav/webdav.go
new file mode 100644
index 0000000..7b56687
--- /dev/null
+++ b/vendor/golang.org/x/net/webdav/webdav.go
@@ -0,0 +1,702 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package webdav provides a WebDAV server implementation.
+package webdav // import "golang.org/x/net/webdav"
+
+import (
+ "errors"
+ "fmt"
+ "io"
+ "net/http"
+ "net/url"
+ "os"
+ "path"
+ "strings"
+ "time"
+)
+
+type Handler struct {
+ // Prefix is the URL path prefix to strip from WebDAV resource paths.
+ Prefix string
+ // FileSystem is the virtual file system.
+ FileSystem FileSystem
+ // LockSystem is the lock management system.
+ LockSystem LockSystem
+ // Logger is an optional error logger. If non-nil, it will be called
+ // for all HTTP requests.
+ Logger func(*http.Request, error)
+}
+
+func (h *Handler) stripPrefix(p string) (string, int, error) {
+ if h.Prefix == "" {
+ return p, http.StatusOK, nil
+ }
+ if r := strings.TrimPrefix(p, h.Prefix); len(r) < len(p) {
+ return r, http.StatusOK, nil
+ }
+ return p, http.StatusNotFound, errPrefixMismatch
+}
+
+func (h *Handler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
+ status, err := http.StatusBadRequest, errUnsupportedMethod
+ if h.FileSystem == nil {
+ status, err = http.StatusInternalServerError, errNoFileSystem
+ } else if h.LockSystem == nil {
+ status, err = http.StatusInternalServerError, errNoLockSystem
+ } else {
+ switch r.Method {
+ case "OPTIONS":
+ status, err = h.handleOptions(w, r)
+ case "GET", "HEAD", "POST":
+ status, err = h.handleGetHeadPost(w, r)
+ case "DELETE":
+ status, err = h.handleDelete(w, r)
+ case "PUT":
+ status, err = h.handlePut(w, r)
+ case "MKCOL":
+ status, err = h.handleMkcol(w, r)
+ case "COPY", "MOVE":
+ status, err = h.handleCopyMove(w, r)
+ case "LOCK":
+ status, err = h.handleLock(w, r)
+ case "UNLOCK":
+ status, err = h.handleUnlock(w, r)
+ case "PROPFIND":
+ status, err = h.handlePropfind(w, r)
+ case "PROPPATCH":
+ status, err = h.handleProppatch(w, r)
+ }
+ }
+
+ if status != 0 {
+ w.WriteHeader(status)
+ if status != http.StatusNoContent {
+ w.Write([]byte(StatusText(status)))
+ }
+ }
+ if h.Logger != nil {
+ h.Logger(r, err)
+ }
+}
+
+func (h *Handler) lock(now time.Time, root string) (token string, status int, err error) {
+ token, err = h.LockSystem.Create(now, LockDetails{
+ Root: root,
+ Duration: infiniteTimeout,
+ ZeroDepth: true,
+ })
+ if err != nil {
+ if err == ErrLocked {
+ return "", StatusLocked, err
+ }
+ return "", http.StatusInternalServerError, err
+ }
+ return token, 0, nil
+}
+
+func (h *Handler) confirmLocks(r *http.Request, src, dst string) (release func(), status int, err error) {
+ hdr := r.Header.Get("If")
+ if hdr == "" {
+ // An empty If header means that the client hasn't previously created locks.
+ // Even if this client doesn't care about locks, we still need to check that
+ // the resources aren't locked by another client, so we create temporary
+ // locks that would conflict with another client's locks. These temporary
+ // locks are unlocked at the end of the HTTP request.
+ now, srcToken, dstToken := time.Now(), "", ""
+ if src != "" {
+ srcToken, status, err = h.lock(now, src)
+ if err != nil {
+ return nil, status, err
+ }
+ }
+ if dst != "" {
+ dstToken, status, err = h.lock(now, dst)
+ if err != nil {
+ if srcToken != "" {
+ h.LockSystem.Unlock(now, srcToken)
+ }
+ return nil, status, err
+ }
+ }
+
+ return func() {
+ if dstToken != "" {
+ h.LockSystem.Unlock(now, dstToken)
+ }
+ if srcToken != "" {
+ h.LockSystem.Unlock(now, srcToken)
+ }
+ }, 0, nil
+ }
+
+ ih, ok := parseIfHeader(hdr)
+ if !ok {
+ return nil, http.StatusBadRequest, errInvalidIfHeader
+ }
+ // ih is a disjunction (OR) of ifLists, so any ifList will do.
+ for _, l := range ih.lists {
+ lsrc := l.resourceTag
+ if lsrc == "" {
+ lsrc = src
+ } else {
+ u, err := url.Parse(lsrc)
+ if err != nil {
+ continue
+ }
+ if u.Host != r.Host {
+ continue
+ }
+ lsrc, status, err = h.stripPrefix(u.Path)
+ if err != nil {
+ return nil, status, err
+ }
+ }
+ release, err = h.LockSystem.Confirm(time.Now(), lsrc, dst, l.conditions...)
+ if err == ErrConfirmationFailed {
+ continue
+ }
+ if err != nil {
+ return nil, http.StatusInternalServerError, err
+ }
+ return release, 0, nil
+ }
+ // Section 10.4.1 says that "If this header is evaluated and all state lists
+ // fail, then the request must fail with a 412 (Precondition Failed) status."
+ // We follow the spec even though the cond_put_corrupt_token test case from
+ // the litmus test warns on seeing a 412 instead of a 423 (Locked).
+ return nil, http.StatusPreconditionFailed, ErrLocked
+}
+
+func (h *Handler) handleOptions(w http.ResponseWriter, r *http.Request) (status int, err error) {
+ reqPath, status, err := h.stripPrefix(r.URL.Path)
+ if err != nil {
+ return status, err
+ }
+ ctx := getContext(r)
+ allow := "OPTIONS, LOCK, PUT, MKCOL"
+ if fi, err := h.FileSystem.Stat(ctx, reqPath); err == nil {
+ if fi.IsDir() {
+ allow = "OPTIONS, LOCK, DELETE, PROPPATCH, COPY, MOVE, UNLOCK, PROPFIND"
+ } else {
+ allow = "OPTIONS, LOCK, GET, HEAD, POST, DELETE, PROPPATCH, COPY, MOVE, UNLOCK, PROPFIND, PUT"
+ }
+ }
+ w.Header().Set("Allow", allow)
+ // http://www.webdav.org/specs/rfc4918.html#dav.compliance.classes
+ w.Header().Set("DAV", "1, 2")
+ // http://msdn.microsoft.com/en-au/library/cc250217.aspx
+ w.Header().Set("MS-Author-Via", "DAV")
+ return 0, nil
+}
+
+func (h *Handler) handleGetHeadPost(w http.ResponseWriter, r *http.Request) (status int, err error) {
+ reqPath, status, err := h.stripPrefix(r.URL.Path)
+ if err != nil {
+ return status, err
+ }
+ // TODO: check locks for read-only access??
+ ctx := getContext(r)
+ f, err := h.FileSystem.OpenFile(ctx, reqPath, os.O_RDONLY, 0)
+ if err != nil {
+ return http.StatusNotFound, err
+ }
+ defer f.Close()
+ fi, err := f.Stat()
+ if err != nil {
+ return http.StatusNotFound, err
+ }
+ if fi.IsDir() {
+ return http.StatusMethodNotAllowed, nil
+ }
+ etag, err := findETag(ctx, h.FileSystem, h.LockSystem, reqPath, fi)
+ if err != nil {
+ return http.StatusInternalServerError, err
+ }
+ w.Header().Set("ETag", etag)
+ // Let ServeContent determine the Content-Type header.
+ http.ServeContent(w, r, reqPath, fi.ModTime(), f)
+ return 0, nil
+}
+
+func (h *Handler) handleDelete(w http.ResponseWriter, r *http.Request) (status int, err error) {
+ reqPath, status, err := h.stripPrefix(r.URL.Path)
+ if err != nil {
+ return status, err
+ }
+ release, status, err := h.confirmLocks(r, reqPath, "")
+ if err != nil {
+ return status, err
+ }
+ defer release()
+
+ ctx := getContext(r)
+
+ // TODO: return MultiStatus where appropriate.
+
+ // "godoc os RemoveAll" says that "If the path does not exist, RemoveAll
+ // returns nil (no error)." WebDAV semantics are that it should return a
+ // "404 Not Found". We therefore have to Stat before we RemoveAll.
+ if _, err := h.FileSystem.Stat(ctx, reqPath); err != nil {
+ if os.IsNotExist(err) {
+ return http.StatusNotFound, err
+ }
+ return http.StatusMethodNotAllowed, err
+ }
+ if err := h.FileSystem.RemoveAll(ctx, reqPath); err != nil {
+ return http.StatusMethodNotAllowed, err
+ }
+ return http.StatusNoContent, nil
+}
+
+func (h *Handler) handlePut(w http.ResponseWriter, r *http.Request) (status int, err error) {
+ reqPath, status, err := h.stripPrefix(r.URL.Path)
+ if err != nil {
+ return status, err
+ }
+ release, status, err := h.confirmLocks(r, reqPath, "")
+ if err != nil {
+ return status, err
+ }
+ defer release()
+ // TODO(rost): Support the If-Match, If-None-Match headers? See bradfitz'
+ // comments in http.checkEtag.
+ ctx := getContext(r)
+
+ f, err := h.FileSystem.OpenFile(ctx, reqPath, os.O_RDWR|os.O_CREATE|os.O_TRUNC, 0666)
+ if err != nil {
+ return http.StatusNotFound, err
+ }
+ _, copyErr := io.Copy(f, r.Body)
+ fi, statErr := f.Stat()
+ closeErr := f.Close()
+ // TODO(rost): Returning 405 Method Not Allowed might not be appropriate.
+ if copyErr != nil {
+ return http.StatusMethodNotAllowed, copyErr
+ }
+ if statErr != nil {
+ return http.StatusMethodNotAllowed, statErr
+ }
+ if closeErr != nil {
+ return http.StatusMethodNotAllowed, closeErr
+ }
+ etag, err := findETag(ctx, h.FileSystem, h.LockSystem, reqPath, fi)
+ if err != nil {
+ return http.StatusInternalServerError, err
+ }
+ w.Header().Set("ETag", etag)
+ return http.StatusCreated, nil
+}
+
+func (h *Handler) handleMkcol(w http.ResponseWriter, r *http.Request) (status int, err error) {
+ reqPath, status, err := h.stripPrefix(r.URL.Path)
+ if err != nil {
+ return status, err
+ }
+ release, status, err := h.confirmLocks(r, reqPath, "")
+ if err != nil {
+ return status, err
+ }
+ defer release()
+
+ ctx := getContext(r)
+
+ if r.ContentLength > 0 {
+ return http.StatusUnsupportedMediaType, nil
+ }
+ if err := h.FileSystem.Mkdir(ctx, reqPath, 0777); err != nil {
+ if os.IsNotExist(err) {
+ return http.StatusConflict, err
+ }
+ return http.StatusMethodNotAllowed, err
+ }
+ return http.StatusCreated, nil
+}
+
+func (h *Handler) handleCopyMove(w http.ResponseWriter, r *http.Request) (status int, err error) {
+ hdr := r.Header.Get("Destination")
+ if hdr == "" {
+ return http.StatusBadRequest, errInvalidDestination
+ }
+ u, err := url.Parse(hdr)
+ if err != nil {
+ return http.StatusBadRequest, errInvalidDestination
+ }
+ if u.Host != r.Host {
+ return http.StatusBadGateway, errInvalidDestination
+ }
+
+ src, status, err := h.stripPrefix(r.URL.Path)
+ if err != nil {
+ return status, err
+ }
+
+ dst, status, err := h.stripPrefix(u.Path)
+ if err != nil {
+ return status, err
+ }
+
+ if dst == "" {
+ return http.StatusBadGateway, errInvalidDestination
+ }
+ if dst == src {
+ return http.StatusForbidden, errDestinationEqualsSource
+ }
+
+ ctx := getContext(r)
+
+ if r.Method == "COPY" {
+ // Section 7.5.1 says that a COPY only needs to lock the destination,
+ // not both destination and source. Strictly speaking, this is racy,
+ // even though a COPY doesn't modify the source, if a concurrent
+ // operation modifies the source. However, the litmus test explicitly
+ // checks that COPYing a locked-by-another source is OK.
+ release, status, err := h.confirmLocks(r, "", dst)
+ if err != nil {
+ return status, err
+ }
+ defer release()
+
+ // Section 9.8.3 says that "The COPY method on a collection without a Depth
+ // header must act as if a Depth header with value "infinity" was included".
+ depth := infiniteDepth
+ if hdr := r.Header.Get("Depth"); hdr != "" {
+ depth = parseDepth(hdr)
+ if depth != 0 && depth != infiniteDepth {
+ // Section 9.8.3 says that "A client may submit a Depth header on a
+ // COPY on a collection with a value of "0" or "infinity"."
+ return http.StatusBadRequest, errInvalidDepth
+ }
+ }
+ return copyFiles(ctx, h.FileSystem, src, dst, r.Header.Get("Overwrite") != "F", depth, 0)
+ }
+
+ release, status, err := h.confirmLocks(r, src, dst)
+ if err != nil {
+ return status, err
+ }
+ defer release()
+
+ // Section 9.9.2 says that "The MOVE method on a collection must act as if
+ // a "Depth: infinity" header was used on it. A client must not submit a
+ // Depth header on a MOVE on a collection with any value but "infinity"."
+ if hdr := r.Header.Get("Depth"); hdr != "" {
+ if parseDepth(hdr) != infiniteDepth {
+ return http.StatusBadRequest, errInvalidDepth
+ }
+ }
+ return moveFiles(ctx, h.FileSystem, src, dst, r.Header.Get("Overwrite") == "T")
+}
+
+func (h *Handler) handleLock(w http.ResponseWriter, r *http.Request) (retStatus int, retErr error) {
+ duration, err := parseTimeout(r.Header.Get("Timeout"))
+ if err != nil {
+ return http.StatusBadRequest, err
+ }
+ li, status, err := readLockInfo(r.Body)
+ if err != nil {
+ return status, err
+ }
+
+ ctx := getContext(r)
+ token, ld, now, created := "", LockDetails{}, time.Now(), false
+ if li == (lockInfo{}) {
+ // An empty lockInfo means to refresh the lock.
+ ih, ok := parseIfHeader(r.Header.Get("If"))
+ if !ok {
+ return http.StatusBadRequest, errInvalidIfHeader
+ }
+ if len(ih.lists) == 1 && len(ih.lists[0].conditions) == 1 {
+ token = ih.lists[0].conditions[0].Token
+ }
+ if token == "" {
+ return http.StatusBadRequest, errInvalidLockToken
+ }
+ ld, err = h.LockSystem.Refresh(now, token, duration)
+ if err != nil {
+ if err == ErrNoSuchLock {
+ return http.StatusPreconditionFailed, err
+ }
+ return http.StatusInternalServerError, err
+ }
+
+ } else {
+ // Section 9.10.3 says that "If no Depth header is submitted on a LOCK request,
+ // then the request MUST act as if a "Depth:infinity" had been submitted."
+ depth := infiniteDepth
+ if hdr := r.Header.Get("Depth"); hdr != "" {
+ depth = parseDepth(hdr)
+ if depth != 0 && depth != infiniteDepth {
+ // Section 9.10.3 says that "Values other than 0 or infinity must not be
+ // used with the Depth header on a LOCK method".
+ return http.StatusBadRequest, errInvalidDepth
+ }
+ }
+ reqPath, status, err := h.stripPrefix(r.URL.Path)
+ if err != nil {
+ return status, err
+ }
+ ld = LockDetails{
+ Root: reqPath,
+ Duration: duration,
+ OwnerXML: li.Owner.InnerXML,
+ ZeroDepth: depth == 0,
+ }
+ token, err = h.LockSystem.Create(now, ld)
+ if err != nil {
+ if err == ErrLocked {
+ return StatusLocked, err
+ }
+ return http.StatusInternalServerError, err
+ }
+ defer func() {
+ if retErr != nil {
+ h.LockSystem.Unlock(now, token)
+ }
+ }()
+
+ // Create the resource if it didn't previously exist.
+ if _, err := h.FileSystem.Stat(ctx, reqPath); err != nil {
+ f, err := h.FileSystem.OpenFile(ctx, reqPath, os.O_RDWR|os.O_CREATE|os.O_TRUNC, 0666)
+ if err != nil {
+ // TODO: detect missing intermediate dirs and return http.StatusConflict?
+ return http.StatusInternalServerError, err
+ }
+ f.Close()
+ created = true
+ }
+
+ // http://www.webdav.org/specs/rfc4918.html#HEADER_Lock-Token says that the
+ // Lock-Token value is a Coded-URL. We add angle brackets.
+ w.Header().Set("Lock-Token", "<"+token+">")
+ }
+
+ w.Header().Set("Content-Type", "application/xml; charset=utf-8")
+ if created {
+ // This is "w.WriteHeader(http.StatusCreated)" and not "return
+ // http.StatusCreated, nil" because we write our own (XML) response to w
+ // and Handler.ServeHTTP would otherwise write "Created".
+ w.WriteHeader(http.StatusCreated)
+ }
+ writeLockInfo(w, token, ld)
+ return 0, nil
+}
+
+func (h *Handler) handleUnlock(w http.ResponseWriter, r *http.Request) (status int, err error) {
+ // http://www.webdav.org/specs/rfc4918.html#HEADER_Lock-Token says that the
+ // Lock-Token value is a Coded-URL. We strip its angle brackets.
+ t := r.Header.Get("Lock-Token")
+ if len(t) < 2 || t[0] != '<' || t[len(t)-1] != '>' {
+ return http.StatusBadRequest, errInvalidLockToken
+ }
+ t = t[1 : len(t)-1]
+
+ switch err = h.LockSystem.Unlock(time.Now(), t); err {
+ case nil:
+ return http.StatusNoContent, err
+ case ErrForbidden:
+ return http.StatusForbidden, err
+ case ErrLocked:
+ return StatusLocked, err
+ case ErrNoSuchLock:
+ return http.StatusConflict, err
+ default:
+ return http.StatusInternalServerError, err
+ }
+}
+
+func (h *Handler) handlePropfind(w http.ResponseWriter, r *http.Request) (status int, err error) {
+ reqPath, status, err := h.stripPrefix(r.URL.Path)
+ if err != nil {
+ return status, err
+ }
+ ctx := getContext(r)
+ fi, err := h.FileSystem.Stat(ctx, reqPath)
+ if err != nil {
+ if os.IsNotExist(err) {
+ return http.StatusNotFound, err
+ }
+ return http.StatusMethodNotAllowed, err
+ }
+ depth := infiniteDepth
+ if hdr := r.Header.Get("Depth"); hdr != "" {
+ depth = parseDepth(hdr)
+ if depth == invalidDepth {
+ return http.StatusBadRequest, errInvalidDepth
+ }
+ }
+ pf, status, err := readPropfind(r.Body)
+ if err != nil {
+ return status, err
+ }
+
+ mw := multistatusWriter{w: w}
+
+ walkFn := func(reqPath string, info os.FileInfo, err error) error {
+ if err != nil {
+ return err
+ }
+ var pstats []Propstat
+ if pf.Propname != nil {
+ pnames, err := propnames(ctx, h.FileSystem, h.LockSystem, reqPath)
+ if err != nil {
+ return err
+ }
+ pstat := Propstat{Status: http.StatusOK}
+ for _, xmlname := range pnames {
+ pstat.Props = append(pstat.Props, Property{XMLName: xmlname})
+ }
+ pstats = append(pstats, pstat)
+ } else if pf.Allprop != nil {
+ pstats, err = allprop(ctx, h.FileSystem, h.LockSystem, reqPath, pf.Prop)
+ } else {
+ pstats, err = props(ctx, h.FileSystem, h.LockSystem, reqPath, pf.Prop)
+ }
+ if err != nil {
+ return err
+ }
+ return mw.write(makePropstatResponse(path.Join(h.Prefix, reqPath), pstats))
+ }
+
+ walkErr := walkFS(ctx, h.FileSystem, depth, reqPath, fi, walkFn)
+ closeErr := mw.close()
+ if walkErr != nil {
+ return http.StatusInternalServerError, walkErr
+ }
+ if closeErr != nil {
+ return http.StatusInternalServerError, closeErr
+ }
+ return 0, nil
+}
+
+func (h *Handler) handleProppatch(w http.ResponseWriter, r *http.Request) (status int, err error) {
+ reqPath, status, err := h.stripPrefix(r.URL.Path)
+ if err != nil {
+ return status, err
+ }
+ release, status, err := h.confirmLocks(r, reqPath, "")
+ if err != nil {
+ return status, err
+ }
+ defer release()
+
+ ctx := getContext(r)
+
+ if _, err := h.FileSystem.Stat(ctx, reqPath); err != nil {
+ if os.IsNotExist(err) {
+ return http.StatusNotFound, err
+ }
+ return http.StatusMethodNotAllowed, err
+ }
+ patches, status, err := readProppatch(r.Body)
+ if err != nil {
+ return status, err
+ }
+ pstats, err := patch(ctx, h.FileSystem, h.LockSystem, reqPath, patches)
+ if err != nil {
+ return http.StatusInternalServerError, err
+ }
+ mw := multistatusWriter{w: w}
+ writeErr := mw.write(makePropstatResponse(r.URL.Path, pstats))
+ closeErr := mw.close()
+ if writeErr != nil {
+ return http.StatusInternalServerError, writeErr
+ }
+ if closeErr != nil {
+ return http.StatusInternalServerError, closeErr
+ }
+ return 0, nil
+}
+
+func makePropstatResponse(href string, pstats []Propstat) *response {
+ resp := response{
+ Href: []string{(&url.URL{Path: href}).EscapedPath()},
+ Propstat: make([]propstat, 0, len(pstats)),
+ }
+ for _, p := range pstats {
+ var xmlErr *xmlError
+ if p.XMLError != "" {
+ xmlErr = &xmlError{InnerXML: []byte(p.XMLError)}
+ }
+ resp.Propstat = append(resp.Propstat, propstat{
+ Status: fmt.Sprintf("HTTP/1.1 %d %s", p.Status, StatusText(p.Status)),
+ Prop: p.Props,
+ ResponseDescription: p.ResponseDescription,
+ Error: xmlErr,
+ })
+ }
+ return &resp
+}
+
+const (
+ infiniteDepth = -1
+ invalidDepth = -2
+)
+
+// parseDepth maps the strings "0", "1" and "infinity" to 0, 1 and
+// infiniteDepth. Parsing any other string returns invalidDepth.
+//
+// Different WebDAV methods have further constraints on valid depths:
+// - PROPFIND has no further restrictions, as per section 9.1.
+// - COPY accepts only "0" or "infinity", as per section 9.8.3.
+// - MOVE accepts only "infinity", as per section 9.9.2.
+// - LOCK accepts only "0" or "infinity", as per section 9.10.3.
+// These constraints are enforced by the handleXxx methods.
+func parseDepth(s string) int {
+ switch s {
+ case "0":
+ return 0
+ case "1":
+ return 1
+ case "infinity":
+ return infiniteDepth
+ }
+ return invalidDepth
+}
+
+// http://www.webdav.org/specs/rfc4918.html#status.code.extensions.to.http11
+const (
+ StatusMulti = 207
+ StatusUnprocessableEntity = 422
+ StatusLocked = 423
+ StatusFailedDependency = 424
+ StatusInsufficientStorage = 507
+)
+
+func StatusText(code int) string {
+ switch code {
+ case StatusMulti:
+ return "Multi-Status"
+ case StatusUnprocessableEntity:
+ return "Unprocessable Entity"
+ case StatusLocked:
+ return "Locked"
+ case StatusFailedDependency:
+ return "Failed Dependency"
+ case StatusInsufficientStorage:
+ return "Insufficient Storage"
+ }
+ return http.StatusText(code)
+}
+
+var (
+ errDestinationEqualsSource = errors.New("webdav: destination equals source")
+ errDirectoryNotEmpty = errors.New("webdav: directory not empty")
+ errInvalidDepth = errors.New("webdav: invalid depth")
+ errInvalidDestination = errors.New("webdav: invalid destination")
+ errInvalidIfHeader = errors.New("webdav: invalid If header")
+ errInvalidLockInfo = errors.New("webdav: invalid lock info")
+ errInvalidLockToken = errors.New("webdav: invalid lock token")
+ errInvalidPropfind = errors.New("webdav: invalid propfind")
+ errInvalidProppatch = errors.New("webdav: invalid proppatch")
+ errInvalidResponse = errors.New("webdav: invalid response")
+ errInvalidTimeout = errors.New("webdav: invalid timeout")
+ errNoFileSystem = errors.New("webdav: no file system")
+ errNoLockSystem = errors.New("webdav: no lock system")
+ errNotADirectory = errors.New("webdav: not a directory")
+ errPrefixMismatch = errors.New("webdav: prefix mismatch")
+ errRecursionTooDeep = errors.New("webdav: recursion too deep")
+ errUnsupportedLockInfo = errors.New("webdav: unsupported lock info")
+ errUnsupportedMethod = errors.New("webdav: unsupported method")
+)
diff --git a/vendor/golang.org/x/net/webdav/xml.go b/vendor/golang.org/x/net/webdav/xml.go
new file mode 100644
index 0000000..790dc81
--- /dev/null
+++ b/vendor/golang.org/x/net/webdav/xml.go
@@ -0,0 +1,519 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package webdav
+
+// The XML encoding is covered by Section 14.
+// http://www.webdav.org/specs/rfc4918.html#xml.element.definitions
+
+import (
+ "bytes"
+ "encoding/xml"
+ "fmt"
+ "io"
+ "net/http"
+ "time"
+
+ // As of https://go-review.googlesource.com/#/c/12772/ which was submitted
+ // in July 2015, this package uses an internal fork of the standard
+ // library's encoding/xml package, due to changes in the way namespaces
+ // were encoded. Such changes were introduced in the Go 1.5 cycle, but were
+ // rolled back in response to https://github.com/golang/go/issues/11841
+ //
+ // However, this package's exported API, specifically the Property and
+ // DeadPropsHolder types, need to refer to the standard library's version
+ // of the xml.Name type, as code that imports this package cannot refer to
+ // the internal version.
+ //
+ // This file therefore imports both the internal and external versions, as
+ // ixml and xml, and converts between them.
+ //
+ // In the long term, this package should use the standard library's version
+ // only, and the internal fork deleted, once
+ // https://github.com/golang/go/issues/13400 is resolved.
+ ixml "golang.org/x/net/webdav/internal/xml"
+)
+
+// http://www.webdav.org/specs/rfc4918.html#ELEMENT_lockinfo
+type lockInfo struct {
+ XMLName ixml.Name `xml:"lockinfo"`
+ Exclusive *struct{} `xml:"lockscope>exclusive"`
+ Shared *struct{} `xml:"lockscope>shared"`
+ Write *struct{} `xml:"locktype>write"`
+ Owner owner `xml:"owner"`
+}
+
+// http://www.webdav.org/specs/rfc4918.html#ELEMENT_owner
+type owner struct {
+ InnerXML string `xml:",innerxml"`
+}
+
+func readLockInfo(r io.Reader) (li lockInfo, status int, err error) {
+ c := &countingReader{r: r}
+ if err = ixml.NewDecoder(c).Decode(&li); err != nil {
+ if err == io.EOF {
+ if c.n == 0 {
+ // An empty body means to refresh the lock.
+ // http://www.webdav.org/specs/rfc4918.html#refreshing-locks
+ return lockInfo{}, 0, nil
+ }
+ err = errInvalidLockInfo
+ }
+ return lockInfo{}, http.StatusBadRequest, err
+ }
+ // We only support exclusive (non-shared) write locks. In practice, these are
+ // the only types of locks that seem to matter.
+ if li.Exclusive == nil || li.Shared != nil || li.Write == nil {
+ return lockInfo{}, http.StatusNotImplemented, errUnsupportedLockInfo
+ }
+ return li, 0, nil
+}
+
+type countingReader struct {
+ n int
+ r io.Reader
+}
+
+func (c *countingReader) Read(p []byte) (int, error) {
+ n, err := c.r.Read(p)
+ c.n += n
+ return n, err
+}
+
+func writeLockInfo(w io.Writer, token string, ld LockDetails) (int, error) {
+ depth := "infinity"
+ if ld.ZeroDepth {
+ depth = "0"
+ }
+ timeout := ld.Duration / time.Second
+ return fmt.Fprintf(w, "<?xml version=\"1.0\" encoding=\"utf-8\"?>\n"+
+ "<D:prop xmlns:D=\"DAV:\"><D:lockdiscovery><D:activelock>\n"+
+ " <D:locktype><D:write/></D:locktype>\n"+
+ " <D:lockscope><D:exclusive/></D:lockscope>\n"+
+ " <D:depth>%s</D:depth>\n"+
+ " <D:owner>%s</D:owner>\n"+
+ " <D:timeout>Second-%d</D:timeout>\n"+
+ " <D:locktoken><D:href>%s</D:href></D:locktoken>\n"+
+ " <D:lockroot><D:href>%s</D:href></D:lockroot>\n"+
+ "</D:activelock></D:lockdiscovery></D:prop>",
+ depth, ld.OwnerXML, timeout, escape(token), escape(ld.Root),
+ )
+}
+
+func escape(s string) string {
+ for i := 0; i < len(s); i++ {
+ switch s[i] {
+ case '"', '&', '\'', '<', '>':
+ b := bytes.NewBuffer(nil)
+ ixml.EscapeText(b, []byte(s))
+ return b.String()
+ }
+ }
+ return s
+}
+
+// Next returns the next token, if any, in the XML stream of d.
+// RFC 4918 requires to ignore comments, processing instructions
+// and directives.
+// http://www.webdav.org/specs/rfc4918.html#property_values
+// http://www.webdav.org/specs/rfc4918.html#xml-extensibility
+func next(d *ixml.Decoder) (ixml.Token, error) {
+ for {
+ t, err := d.Token()
+ if err != nil {
+ return t, err
+ }
+ switch t.(type) {
+ case ixml.Comment, ixml.Directive, ixml.ProcInst:
+ continue
+ default:
+ return t, nil
+ }
+ }
+}
+
+// http://www.webdav.org/specs/rfc4918.html#ELEMENT_prop (for propfind)
+type propfindProps []xml.Name
+
+// UnmarshalXML appends the property names enclosed within start to pn.
+//
+// It returns an error if start does not contain any properties or if
+// properties contain values. Character data between properties is ignored.
+func (pn *propfindProps) UnmarshalXML(d *ixml.Decoder, start ixml.StartElement) error {
+ for {
+ t, err := next(d)
+ if err != nil {
+ return err
+ }
+ switch t.(type) {
+ case ixml.EndElement:
+ if len(*pn) == 0 {
+ return fmt.Errorf("%s must not be empty", start.Name.Local)
+ }
+ return nil
+ case ixml.StartElement:
+ name := t.(ixml.StartElement).Name
+ t, err = next(d)
+ if err != nil {
+ return err
+ }
+ if _, ok := t.(ixml.EndElement); !ok {
+ return fmt.Errorf("unexpected token %T", t)
+ }
+ *pn = append(*pn, xml.Name(name))
+ }
+ }
+}
+
+// http://www.webdav.org/specs/rfc4918.html#ELEMENT_propfind
+type propfind struct {
+ XMLName ixml.Name `xml:"DAV: propfind"`
+ Allprop *struct{} `xml:"DAV: allprop"`
+ Propname *struct{} `xml:"DAV: propname"`
+ Prop propfindProps `xml:"DAV: prop"`
+ Include propfindProps `xml:"DAV: include"`
+}
+
+func readPropfind(r io.Reader) (pf propfind, status int, err error) {
+ c := countingReader{r: r}
+ if err = ixml.NewDecoder(&c).Decode(&pf); err != nil {
+ if err == io.EOF {
+ if c.n == 0 {
+ // An empty body means to propfind allprop.
+ // http://www.webdav.org/specs/rfc4918.html#METHOD_PROPFIND
+ return propfind{Allprop: new(struct{})}, 0, nil
+ }
+ err = errInvalidPropfind
+ }
+ return propfind{}, http.StatusBadRequest, err
+ }
+
+ if pf.Allprop == nil && pf.Include != nil {
+ return propfind{}, http.StatusBadRequest, errInvalidPropfind
+ }
+ if pf.Allprop != nil && (pf.Prop != nil || pf.Propname != nil) {
+ return propfind{}, http.StatusBadRequest, errInvalidPropfind
+ }
+ if pf.Prop != nil && pf.Propname != nil {
+ return propfind{}, http.StatusBadRequest, errInvalidPropfind
+ }
+ if pf.Propname == nil && pf.Allprop == nil && pf.Prop == nil {
+ return propfind{}, http.StatusBadRequest, errInvalidPropfind
+ }
+ return pf, 0, nil
+}
+
+// Property represents a single DAV resource property as defined in RFC 4918.
+// See http://www.webdav.org/specs/rfc4918.html#data.model.for.resource.properties
+type Property struct {
+ // XMLName is the fully qualified name that identifies this property.
+ XMLName xml.Name
+
+ // Lang is an optional xml:lang attribute.
+ Lang string `xml:"xml:lang,attr,omitempty"`
+
+ // InnerXML contains the XML representation of the property value.
+ // See http://www.webdav.org/specs/rfc4918.html#property_values
+ //
+ // Property values of complex type or mixed-content must have fully
+ // expanded XML namespaces or be self-contained with according
+ // XML namespace declarations. They must not rely on any XML
+ // namespace declarations within the scope of the XML document,
+ // even including the DAV: namespace.
+ InnerXML []byte `xml:",innerxml"`
+}
+
+// ixmlProperty is the same as the Property type except it holds an ixml.Name
+// instead of an xml.Name.
+type ixmlProperty struct {
+ XMLName ixml.Name
+ Lang string `xml:"xml:lang,attr,omitempty"`
+ InnerXML []byte `xml:",innerxml"`
+}
+
+// http://www.webdav.org/specs/rfc4918.html#ELEMENT_error
+// See multistatusWriter for the "D:" namespace prefix.
+type xmlError struct {
+ XMLName ixml.Name `xml:"D:error"`
+ InnerXML []byte `xml:",innerxml"`
+}
+
+// http://www.webdav.org/specs/rfc4918.html#ELEMENT_propstat
+// See multistatusWriter for the "D:" namespace prefix.
+type propstat struct {
+ Prop []Property `xml:"D:prop>_ignored_"`
+ Status string `xml:"D:status"`
+ Error *xmlError `xml:"D:error"`
+ ResponseDescription string `xml:"D:responsedescription,omitempty"`
+}
+
+// ixmlPropstat is the same as the propstat type except it holds an ixml.Name
+// instead of an xml.Name.
+type ixmlPropstat struct {
+ Prop []ixmlProperty `xml:"D:prop>_ignored_"`
+ Status string `xml:"D:status"`
+ Error *xmlError `xml:"D:error"`
+ ResponseDescription string `xml:"D:responsedescription,omitempty"`
+}
+
+// MarshalXML prepends the "D:" namespace prefix on properties in the DAV: namespace
+// before encoding. See multistatusWriter.
+func (ps propstat) MarshalXML(e *ixml.Encoder, start ixml.StartElement) error {
+ // Convert from a propstat to an ixmlPropstat.
+ ixmlPs := ixmlPropstat{
+ Prop: make([]ixmlProperty, len(ps.Prop)),
+ Status: ps.Status,
+ Error: ps.Error,
+ ResponseDescription: ps.ResponseDescription,
+ }
+ for k, prop := range ps.Prop {
+ ixmlPs.Prop[k] = ixmlProperty{
+ XMLName: ixml.Name(prop.XMLName),
+ Lang: prop.Lang,
+ InnerXML: prop.InnerXML,
+ }
+ }
+
+ for k, prop := range ixmlPs.Prop {
+ if prop.XMLName.Space == "DAV:" {
+ prop.XMLName = ixml.Name{Space: "", Local: "D:" + prop.XMLName.Local}
+ ixmlPs.Prop[k] = prop
+ }
+ }
+ // Distinct type to avoid infinite recursion of MarshalXML.
+ type newpropstat ixmlPropstat
+ return e.EncodeElement(newpropstat(ixmlPs), start)
+}
+
+// http://www.webdav.org/specs/rfc4918.html#ELEMENT_response
+// See multistatusWriter for the "D:" namespace prefix.
+type response struct {
+ XMLName ixml.Name `xml:"D:response"`
+ Href []string `xml:"D:href"`
+ Propstat []propstat `xml:"D:propstat"`
+ Status string `xml:"D:status,omitempty"`
+ Error *xmlError `xml:"D:error"`
+ ResponseDescription string `xml:"D:responsedescription,omitempty"`
+}
+
+// MultistatusWriter marshals one or more Responses into a XML
+// multistatus response.
+// See http://www.webdav.org/specs/rfc4918.html#ELEMENT_multistatus
+// TODO(rsto, mpl): As a workaround, the "D:" namespace prefix, defined as
+// "DAV:" on this element, is prepended on the nested response, as well as on all
+// its nested elements. All property names in the DAV: namespace are prefixed as
+// well. This is because some versions of Mini-Redirector (on windows 7) ignore
+// elements with a default namespace (no prefixed namespace). A less intrusive fix
+// should be possible after golang.org/cl/11074. See https://golang.org/issue/11177
+type multistatusWriter struct {
+ // ResponseDescription contains the optional responsedescription
+ // of the multistatus XML element. Only the latest content before
+ // close will be emitted. Empty response descriptions are not
+ // written.
+ responseDescription string
+
+ w http.ResponseWriter
+ enc *ixml.Encoder
+}
+
+// Write validates and emits a DAV response as part of a multistatus response
+// element.
+//
+// It sets the HTTP status code of its underlying http.ResponseWriter to 207
+// (Multi-Status) and populates the Content-Type header. If r is the
+// first, valid response to be written, Write prepends the XML representation
+// of r with a multistatus tag. Callers must call close after the last response
+// has been written.
+func (w *multistatusWriter) write(r *response) error {
+ switch len(r.Href) {
+ case 0:
+ return errInvalidResponse
+ case 1:
+ if len(r.Propstat) > 0 != (r.Status == "") {
+ return errInvalidResponse
+ }
+ default:
+ if len(r.Propstat) > 0 || r.Status == "" {
+ return errInvalidResponse
+ }
+ }
+ err := w.writeHeader()
+ if err != nil {
+ return err
+ }
+ return w.enc.Encode(r)
+}
+
+// writeHeader writes a XML multistatus start element on w's underlying
+// http.ResponseWriter and returns the result of the write operation.
+// After the first write attempt, writeHeader becomes a no-op.
+func (w *multistatusWriter) writeHeader() error {
+ if w.enc != nil {
+ return nil
+ }
+ w.w.Header().Add("Content-Type", "text/xml; charset=utf-8")
+ w.w.WriteHeader(StatusMulti)
+ _, err := fmt.Fprintf(w.w, `<?xml version="1.0" encoding="UTF-8"?>`)
+ if err != nil {
+ return err
+ }
+ w.enc = ixml.NewEncoder(w.w)
+ return w.enc.EncodeToken(ixml.StartElement{
+ Name: ixml.Name{
+ Space: "DAV:",
+ Local: "multistatus",
+ },
+ Attr: []ixml.Attr{{
+ Name: ixml.Name{Space: "xmlns", Local: "D"},
+ Value: "DAV:",
+ }},
+ })
+}
+
+// Close completes the marshalling of the multistatus response. It returns
+// an error if the multistatus response could not be completed. If both the
+// return value and field enc of w are nil, then no multistatus response has
+// been written.
+func (w *multistatusWriter) close() error {
+ if w.enc == nil {
+ return nil
+ }
+ var end []ixml.Token
+ if w.responseDescription != "" {
+ name := ixml.Name{Space: "DAV:", Local: "responsedescription"}
+ end = append(end,
+ ixml.StartElement{Name: name},
+ ixml.CharData(w.responseDescription),
+ ixml.EndElement{Name: name},
+ )
+ }
+ end = append(end, ixml.EndElement{
+ Name: ixml.Name{Space: "DAV:", Local: "multistatus"},
+ })
+ for _, t := range end {
+ err := w.enc.EncodeToken(t)
+ if err != nil {
+ return err
+ }
+ }
+ return w.enc.Flush()
+}
+
+var xmlLangName = ixml.Name{Space: "http://www.w3.org/XML/1998/namespace", Local: "lang"}
+
+func xmlLang(s ixml.StartElement, d string) string {
+ for _, attr := range s.Attr {
+ if attr.Name == xmlLangName {
+ return attr.Value
+ }
+ }
+ return d
+}
+
+type xmlValue []byte
+
+func (v *xmlValue) UnmarshalXML(d *ixml.Decoder, start ixml.StartElement) error {
+ // The XML value of a property can be arbitrary, mixed-content XML.
+ // To make sure that the unmarshalled value contains all required
+ // namespaces, we encode all the property value XML tokens into a
+ // buffer. This forces the encoder to redeclare any used namespaces.
+ var b bytes.Buffer
+ e := ixml.NewEncoder(&b)
+ for {
+ t, err := next(d)
+ if err != nil {
+ return err
+ }
+ if e, ok := t.(ixml.EndElement); ok && e.Name == start.Name {
+ break
+ }
+ if err = e.EncodeToken(t); err != nil {
+ return err
+ }
+ }
+ err := e.Flush()
+ if err != nil {
+ return err
+ }
+ *v = b.Bytes()
+ return nil
+}
+
+// http://www.webdav.org/specs/rfc4918.html#ELEMENT_prop (for proppatch)
+type proppatchProps []Property
+
+// UnmarshalXML appends the property names and values enclosed within start
+// to ps.
+//
+// An xml:lang attribute that is defined either on the DAV:prop or property
+// name XML element is propagated to the property's Lang field.
+//
+// UnmarshalXML returns an error if start does not contain any properties or if
+// property values contain syntactically incorrect XML.
+func (ps *proppatchProps) UnmarshalXML(d *ixml.Decoder, start ixml.StartElement) error {
+ lang := xmlLang(start, "")
+ for {
+ t, err := next(d)
+ if err != nil {
+ return err
+ }
+ switch elem := t.(type) {
+ case ixml.EndElement:
+ if len(*ps) == 0 {
+ return fmt.Errorf("%s must not be empty", start.Name.Local)
+ }
+ return nil
+ case ixml.StartElement:
+ p := Property{
+ XMLName: xml.Name(t.(ixml.StartElement).Name),
+ Lang: xmlLang(t.(ixml.StartElement), lang),
+ }
+ err = d.DecodeElement(((*xmlValue)(&p.InnerXML)), &elem)
+ if err != nil {
+ return err
+ }
+ *ps = append(*ps, p)
+ }
+ }
+}
+
+// http://www.webdav.org/specs/rfc4918.html#ELEMENT_set
+// http://www.webdav.org/specs/rfc4918.html#ELEMENT_remove
+type setRemove struct {
+ XMLName ixml.Name
+ Lang string `xml:"xml:lang,attr,omitempty"`
+ Prop proppatchProps `xml:"DAV: prop"`
+}
+
+// http://www.webdav.org/specs/rfc4918.html#ELEMENT_propertyupdate
+type propertyupdate struct {
+ XMLName ixml.Name `xml:"DAV: propertyupdate"`
+ Lang string `xml:"xml:lang,attr,omitempty"`
+ SetRemove []setRemove `xml:",any"`
+}
+
+func readProppatch(r io.Reader) (patches []Proppatch, status int, err error) {
+ var pu propertyupdate
+ if err = ixml.NewDecoder(r).Decode(&pu); err != nil {
+ return nil, http.StatusBadRequest, err
+ }
+ for _, op := range pu.SetRemove {
+ remove := false
+ switch op.XMLName {
+ case ixml.Name{Space: "DAV:", Local: "set"}:
+ // No-op.
+ case ixml.Name{Space: "DAV:", Local: "remove"}:
+ for _, p := range op.Prop {
+ if len(p.InnerXML) > 0 {
+ return nil, http.StatusBadRequest, errInvalidProppatch
+ }
+ }
+ remove = true
+ default:
+ return nil, http.StatusBadRequest, errInvalidProppatch
+ }
+ patches = append(patches, Proppatch{Remove: remove, Props: op.Prop})
+ }
+ return patches, 0, nil
+}