From 7ea2d24011edd19c7133141c18d17b3f9b4bde06 Mon Sep 17 00:00:00 2001 From: Eric Chiang Date: Mon, 9 Jan 2017 14:51:47 -0800 Subject: [PATCH] vendor: revendor --- glide.lock | 10 +- vendor/github.com/beevik/etree/LICENSE | 24 + vendor/github.com/beevik/etree/etree.go | 938 ++++++++++++++++++ vendor/github.com/beevik/etree/helpers.go | 188 ++++ vendor/github.com/beevik/etree/path.go | 470 +++++++++ vendor/github.com/jonboulle/clockwork/LICENSE | 201 ++++ .../jonboulle/clockwork/clockwork.go | 179 ++++ .../russellhaering/goxmldsig/LICENSE | 175 ++++ .../russellhaering/goxmldsig/canonicalize.go | 251 +++++ .../russellhaering/goxmldsig/clock.go | 55 + .../russellhaering/goxmldsig/keystore.go | 63 ++ .../russellhaering/goxmldsig/sign.go | 186 ++++ .../russellhaering/goxmldsig/tls_keystore.go | 34 + .../russellhaering/goxmldsig/validate.go | 397 ++++++++ .../russellhaering/goxmldsig/xml_constants.go | 78 ++ 15 files changed, 3247 insertions(+), 2 deletions(-) create mode 100644 vendor/github.com/beevik/etree/LICENSE create mode 100644 vendor/github.com/beevik/etree/etree.go create mode 100644 vendor/github.com/beevik/etree/helpers.go create mode 100644 vendor/github.com/beevik/etree/path.go create mode 100644 vendor/github.com/jonboulle/clockwork/LICENSE create mode 100644 vendor/github.com/jonboulle/clockwork/clockwork.go create mode 100644 vendor/github.com/russellhaering/goxmldsig/LICENSE create mode 100644 vendor/github.com/russellhaering/goxmldsig/canonicalize.go create mode 100644 vendor/github.com/russellhaering/goxmldsig/clock.go create mode 100644 vendor/github.com/russellhaering/goxmldsig/keystore.go create mode 100644 vendor/github.com/russellhaering/goxmldsig/sign.go create mode 100644 vendor/github.com/russellhaering/goxmldsig/tls_keystore.go create mode 100644 vendor/github.com/russellhaering/goxmldsig/validate.go create mode 100644 vendor/github.com/russellhaering/goxmldsig/xml_constants.go diff --git a/glide.lock b/glide.lock index 81ab6445..0ae0472a 100644 --- a/glide.lock +++ b/glide.lock @@ -1,6 +1,8 @@ -hash: 4d7d84f09a330d27458fb821ae7ada243cfa825808dc7ab116db28a08f9166a2 -updated: 2017-01-08T19:23:40.352046548+01:00 +hash: 2f68b742168a81ebbe604be42801d37e9da71dff5aeb6b8f8e91ed81ff0edec0 +updated: 2017-01-09T14:51:09.514065012-08:00 imports: +- name: github.com/beevik/etree + version: 4cd0dd976db869f817248477718071a28e978df0 - name: github.com/cockroachdb/cockroach-go version: 31611c0501c812f437d4861d87d117053967c955 subpackages: @@ -26,6 +28,8 @@ imports: version: e7e23673cac3f529f49e22f94e4af6d12bb49dba - name: github.com/inconshreveable/mousetrap version: 76626ae9c91c4f2a10f34cad8ce83ea42c93bb75 +- name: github.com/jonboulle/clockwork + version: bcac9884e7502bb2b474c0339d889cb981a2f27f - name: github.com/kylelemons/godebug version: eadb3ce320cbab8393bea5ca17bebac3f78a021b subpackages: @@ -41,6 +45,8 @@ imports: version: c97913dcbd76de40b051a9b4cd827f7eaeb7a868 subpackages: - cacheobject +- name: github.com/russellhaering/goxmldsig + version: d9f653eb27ee8b145f7d5a45172e81a93def0860 - name: github.com/Sirupsen/logrus version: d26492970760ca5d33129d2d799e34be5c4782eb - name: github.com/spf13/cobra diff --git a/vendor/github.com/beevik/etree/LICENSE b/vendor/github.com/beevik/etree/LICENSE new file mode 100644 index 00000000..e14ad682 --- /dev/null +++ b/vendor/github.com/beevik/etree/LICENSE @@ -0,0 +1,24 @@ +Copyright 2015 Brett Vickers. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions +are met: + + 1. Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + +THIS SOFTWARE IS PROVIDED BY COPYRIGHT HOLDER ``AS IS'' AND ANY +EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR +PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL COPYRIGHT HOLDER OR +CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, +EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, +PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR +PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY +OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/beevik/etree/etree.go b/vendor/github.com/beevik/etree/etree.go new file mode 100644 index 00000000..21f83522 --- /dev/null +++ b/vendor/github.com/beevik/etree/etree.go @@ -0,0 +1,938 @@ +// Copyright 2015 Brett Vickers. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package etree provides XML services through an Element Tree +// abstraction. +package etree + +import ( + "bufio" + "bytes" + "encoding/xml" + "errors" + "io" + "os" + "strings" +) + +const ( + // NoIndent is used with Indent to disable all indenting. + NoIndent = -1 +) + +// ErrXML is returned when XML parsing fails due to incorrect formatting. +var ErrXML = errors.New("etree: invalid XML format") + +// ReadSettings allow for changing the default behavior of the ReadFrom* +// methods. +type ReadSettings struct { + // CharsetReader to be passed to standard xml.Decoder. Default: nil. + CharsetReader func(charset string, input io.Reader) (io.Reader, error) +} + +// newReadSettings creates a default ReadSettings record. +func newReadSettings() ReadSettings { + return ReadSettings{} +} + +// WriteSettings allow for changing the serialization behavior of the WriteTo* +// methods. +type WriteSettings struct { + // CanonicalEndTags forces the production of XML end tags, even for + // elements that have no child elements. Default: false. + CanonicalEndTags bool + + // CanonicalText forces the production of XML character references for + // text data characters &, <, and >. If false, XML character references + // are also produced for " and '. Default: false. + CanonicalText bool + + // CanonicalAttrVal forces the production of XML character references for + // attribute value characters &, < and ". If false, XML character + // references are also produced for > and '. Default: false. + CanonicalAttrVal bool +} + +// newWriteSettings creates a default WriteSettings record. +func newWriteSettings() WriteSettings { + return WriteSettings{ + CanonicalEndTags: false, + CanonicalText: false, + CanonicalAttrVal: false, + } +} + +// A Token is an empty interface that represents an Element, CharData, +// Comment, Directive, or ProcInst. +type Token interface { + Parent() *Element + dup(parent *Element) Token + setParent(parent *Element) + writeTo(w *bufio.Writer, s *WriteSettings) +} + +// A Document is a container holding a complete XML hierarchy. Its embedded +// element contains zero or more children, one of which is usually the root +// element. The embedded element may include other children such as +// processing instructions or BOM CharData tokens. +type Document struct { + Element + ReadSettings ReadSettings + WriteSettings WriteSettings +} + +// An Element represents an XML element, its attributes, and its child tokens. +type Element struct { + Space, Tag string // namespace and tag + Attr []Attr // key-value attribute pairs + Child []Token // child tokens (elements, comments, etc.) + parent *Element // parent element +} + +// An Attr represents a key-value attribute of an XML element. +type Attr struct { + Space, Key string // The attribute's namespace and key + Value string // The attribute value string +} + +// CharData represents character data within XML. +type CharData struct { + Data string + parent *Element + whitespace bool +} + +// A Comment represents an XML comment. +type Comment struct { + Data string + parent *Element +} + +// A Directive represents an XML directive. +type Directive struct { + Data string + parent *Element +} + +// A ProcInst represents an XML processing instruction. +type ProcInst struct { + Target string + Inst string + parent *Element +} + +// NewDocument creates an XML document without a root element. +func NewDocument() *Document { + return &Document{ + Element{Child: make([]Token, 0)}, + newReadSettings(), + newWriteSettings(), + } +} + +// Copy returns a recursive, deep copy of the document. +func (d *Document) Copy() *Document { + return &Document{*(d.dup(nil).(*Element)), d.ReadSettings, d.WriteSettings} +} + +// Root returns the root element of the document, or nil if there is no root +// element. +func (d *Document) Root() *Element { + for _, t := range d.Child { + if c, ok := t.(*Element); ok { + return c + } + } + return nil +} + +// SetRoot replaces the document's root element with e. If the document +// already has a root when this function is called, then the document's +// original root is unbound first. If the element e is bound to another +// document (or to another element within a document), then it is unbound +// first. +func (d *Document) SetRoot(e *Element) { + if e.parent != nil { + e.parent.RemoveChild(e) + } + e.setParent(&d.Element) + + for i, t := range d.Child { + if _, ok := t.(*Element); ok { + t.setParent(nil) + d.Child[i] = e + return + } + } + d.Child = append(d.Child, e) +} + +// ReadFrom reads XML from the reader r into the document d. It returns the +// number of bytes read and any error encountered. +func (d *Document) ReadFrom(r io.Reader) (n int64, err error) { + return d.Element.readFrom(r, d.ReadSettings.CharsetReader) +} + +// ReadFromFile reads XML from the string s into the document d. +func (d *Document) ReadFromFile(filename string) error { + f, err := os.Open(filename) + if err != nil { + return err + } + defer f.Close() + _, err = d.ReadFrom(f) + return err +} + +// ReadFromBytes reads XML from the byte slice b into the document d. +func (d *Document) ReadFromBytes(b []byte) error { + _, err := d.ReadFrom(bytes.NewReader(b)) + return err +} + +// ReadFromString reads XML from the string s into the document d. +func (d *Document) ReadFromString(s string) error { + _, err := d.ReadFrom(strings.NewReader(s)) + return err +} + +// WriteTo serializes an XML document into the writer w. It +// returns the number of bytes written and any error encountered. +func (d *Document) WriteTo(w io.Writer) (n int64, err error) { + cw := newCountWriter(w) + b := bufio.NewWriter(cw) + for _, c := range d.Child { + c.writeTo(b, &d.WriteSettings) + } + err, n = b.Flush(), cw.bytes + return +} + +// WriteToFile serializes an XML document into the file named +// filename. +func (d *Document) WriteToFile(filename string) error { + f, err := os.Create(filename) + if err != nil { + return err + } + defer f.Close() + _, err = d.WriteTo(f) + return err +} + +// WriteToBytes serializes the XML document into a slice of +// bytes. +func (d *Document) WriteToBytes() (b []byte, err error) { + var buf bytes.Buffer + if _, err = d.WriteTo(&buf); err != nil { + return + } + return buf.Bytes(), nil +} + +// WriteToString serializes the XML document into a string. +func (d *Document) WriteToString() (s string, err error) { + var b []byte + if b, err = d.WriteToBytes(); err != nil { + return + } + return string(b), nil +} + +type indentFunc func(depth int) string + +// Indent modifies the document's element tree by inserting CharData entities +// containing carriage returns and indentation. The amount of indentation per +// depth level is given as spaces. Pass etree.NoIndent for spaces if you want +// no indentation at all. +func (d *Document) Indent(spaces int) { + var indent indentFunc + switch { + case spaces < 0: + indent = func(depth int) string { return "" } + default: + indent = func(depth int) string { return crIndent(depth*spaces, crsp) } + } + d.Element.indent(0, indent) +} + +// IndentTabs modifies the document's element tree by inserting CharData +// entities containing carriage returns and tabs for indentation. One tab is +// used per indentation level. +func (d *Document) IndentTabs() { + indent := func(depth int) string { return crIndent(depth, crtab) } + d.Element.indent(0, indent) +} + +// NewElement creates an unparented element with the specified tag. The tag +// may be prefixed by a namespace and a colon. +func NewElement(tag string) *Element { + space, stag := spaceDecompose(tag) + return newElement(space, stag, nil) +} + +// newElement is a helper function that creates an element and binds it to +// a parent element if possible. +func newElement(space, tag string, parent *Element) *Element { + e := &Element{ + Space: space, + Tag: tag, + Attr: make([]Attr, 0), + Child: make([]Token, 0), + parent: parent, + } + if parent != nil { + parent.addChild(e) + } + return e +} + +// Copy creates a recursive, deep copy of the element and all its attributes +// and children. The returned element has no parent but can be parented to a +// another element using AddElement, or to a document using SetRoot. +func (e *Element) Copy() *Element { + var parent *Element + return e.dup(parent).(*Element) +} + +// Text returns the characters immediately following the element's +// opening tag. +func (e *Element) Text() string { + if len(e.Child) == 0 { + return "" + } + if cd, ok := e.Child[0].(*CharData); ok { + return cd.Data + } + return "" +} + +// SetText replaces an element's subsidiary CharData text with a new string. +func (e *Element) SetText(text string) { + if len(e.Child) > 0 { + if cd, ok := e.Child[0].(*CharData); ok { + cd.Data = text + return + } + } + cd := newCharData(text, false, e) + copy(e.Child[1:], e.Child[0:]) + e.Child[0] = cd +} + +// CreateElement creates an element with the specified tag and adds it as the +// last child element of the element e. The tag may be prefixed by a namespace +// and a colon. +func (e *Element) CreateElement(tag string) *Element { + space, stag := spaceDecompose(tag) + return newElement(space, stag, e) +} + +// AddChild adds the token t as the last child of element e. If token t was +// already the child of another element, it is first removed from its current +// parent element. +func (e *Element) AddChild(t Token) { + if t.Parent() != nil { + t.Parent().RemoveChild(t) + } + t.setParent(e) + e.addChild(t) +} + +// InsertChild inserts the token t before e's existing child token ex. If ex +// is nil (or if ex is not a child of e), then t is added to the end of e's +// child token list. If token t was already the child of another element, it +// is first removed from its current parent element. +func (e *Element) InsertChild(ex Token, t Token) { + if t.Parent() != nil { + t.Parent().RemoveChild(t) + } + t.setParent(e) + + for i, c := range e.Child { + if c == ex { + e.Child = append(e.Child, nil) + copy(e.Child[i+1:], e.Child[i:]) + e.Child[i] = t + return + } + } + e.addChild(t) +} + +// RemoveChild attempts to remove the token t from element e's list of +// children. If the token t is a child of e, then it is returned. Otherwise, +// nil is returned. +func (e *Element) RemoveChild(t Token) Token { + for i, c := range e.Child { + if c == t { + e.Child = append(e.Child[:i], e.Child[i+1:]...) + c.setParent(nil) + return t + } + } + return nil +} + +// ReadFrom reads XML from the reader r and stores the result as a new child +// of element e. +func (e *Element) readFrom(ri io.Reader, charsetReader func(charset string, input io.Reader) (io.Reader, error)) (n int64, err error) { + r := newCountReader(ri) + dec := xml.NewDecoder(r) + dec.CharsetReader = charsetReader + var stack stack + stack.push(e) + for { + t, err := dec.RawToken() + switch { + case err == io.EOF: + return r.bytes, nil + case err != nil: + return r.bytes, err + case stack.empty(): + return r.bytes, ErrXML + } + + top := stack.peek().(*Element) + + switch t := t.(type) { + case xml.StartElement: + e := newElement(t.Name.Space, t.Name.Local, top) + for _, a := range t.Attr { + e.createAttr(a.Name.Space, a.Name.Local, a.Value) + } + stack.push(e) + case xml.EndElement: + stack.pop() + case xml.CharData: + data := string(t) + newCharData(data, isWhitespace(data), top) + case xml.Comment: + newComment(string(t), top) + case xml.Directive: + newDirective(string(t), top) + case xml.ProcInst: + newProcInst(t.Target, string(t.Inst), top) + } + } +} + +// SelectAttr finds an element attribute matching the requested key and +// returns it if found. The key may be prefixed by a namespace and a colon. +func (e *Element) SelectAttr(key string) *Attr { + space, skey := spaceDecompose(key) + for i, a := range e.Attr { + if spaceMatch(space, a.Space) && skey == a.Key { + return &e.Attr[i] + } + } + return nil +} + +// SelectAttrValue finds an element attribute matching the requested key and +// returns its value if found. The key may be prefixed by a namespace and a +// colon. If the key is not found, the dflt value is returned instead. +func (e *Element) SelectAttrValue(key, dflt string) string { + space, skey := spaceDecompose(key) + for _, a := range e.Attr { + if spaceMatch(space, a.Space) && skey == a.Key { + return a.Value + } + } + return dflt +} + +// ChildElements returns all elements that are children of element e. +func (e *Element) ChildElements() []*Element { + var elements []*Element + for _, t := range e.Child { + if c, ok := t.(*Element); ok { + elements = append(elements, c) + } + } + return elements +} + +// SelectElement returns the first child element with the given tag. The tag +// may be prefixed by a namespace and a colon. +func (e *Element) SelectElement(tag string) *Element { + space, stag := spaceDecompose(tag) + for _, t := range e.Child { + if c, ok := t.(*Element); ok && spaceMatch(space, c.Space) && stag == c.Tag { + return c + } + } + return nil +} + +// SelectElements returns a slice of all child elements with the given tag. +// The tag may be prefixed by a namespace and a colon. +func (e *Element) SelectElements(tag string) []*Element { + space, stag := spaceDecompose(tag) + var elements []*Element + for _, t := range e.Child { + if c, ok := t.(*Element); ok && spaceMatch(space, c.Space) && stag == c.Tag { + elements = append(elements, c) + } + } + return elements +} + +// FindElement returns the first element matched by the XPath-like path +// string. Panics if an invalid path string is supplied. +func (e *Element) FindElement(path string) *Element { + return e.FindElementPath(MustCompilePath(path)) +} + +// FindElementPath returns the first element matched by the XPath-like path +// string. +func (e *Element) FindElementPath(path Path) *Element { + p := newPather() + elements := p.traverse(e, path) + switch { + case len(elements) > 0: + return elements[0] + default: + return nil + } +} + +// FindElements returns a slice of elements matched by the XPath-like path +// string. Panics if an invalid path string is supplied. +func (e *Element) FindElements(path string) []*Element { + return e.FindElementsPath(MustCompilePath(path)) +} + +// FindElementsPath returns a slice of elements matched by the Path object. +func (e *Element) FindElementsPath(path Path) []*Element { + p := newPather() + return p.traverse(e, path) +} + +// indent recursively inserts proper indentation between an +// XML element's child tokens. +func (e *Element) indent(depth int, indent indentFunc) { + e.stripIndent() + n := len(e.Child) + if n == 0 { + return + } + + oldChild := e.Child + e.Child = make([]Token, 0, n*2+1) + isCharData, firstNonCharData := false, true + for _, c := range oldChild { + + // Insert CR+indent before child if it's not character data. + // Exceptions: when it's the first non-character-data child, or when + // the child is at root depth. + _, isCharData = c.(*CharData) + if !isCharData { + if !firstNonCharData || depth > 0 { + newCharData(indent(depth), true, e) + } + firstNonCharData = false + } + + e.addChild(c) + + // Recursively process child elements. + if ce, ok := c.(*Element); ok { + ce.indent(depth+1, indent) + } + } + + // Insert CR+indent before the last child. + if !isCharData { + if !firstNonCharData || depth > 0 { + newCharData(indent(depth-1), true, e) + } + } +} + +// stripIndent removes any previously inserted indentation. +func (e *Element) stripIndent() { + // Count the number of non-indent child tokens + n := len(e.Child) + for _, c := range e.Child { + if cd, ok := c.(*CharData); ok && cd.whitespace { + n-- + } + } + if n == len(e.Child) { + return + } + + // Strip out indent CharData + newChild := make([]Token, n) + j := 0 + for _, c := range e.Child { + if cd, ok := c.(*CharData); ok && cd.whitespace { + continue + } + newChild[j] = c + j++ + } + e.Child = newChild +} + +// dup duplicates the element. +func (e *Element) dup(parent *Element) Token { + ne := &Element{ + Space: e.Space, + Tag: e.Tag, + Attr: make([]Attr, len(e.Attr)), + Child: make([]Token, len(e.Child)), + parent: parent, + } + for i, t := range e.Child { + ne.Child[i] = t.dup(ne) + } + for i, a := range e.Attr { + ne.Attr[i] = a + } + return ne +} + +// Parent returns the element token's parent element, or nil if it has no +// parent. +func (e *Element) Parent() *Element { + return e.parent +} + +// setParent replaces the element token's parent. +func (e *Element) setParent(parent *Element) { + e.parent = parent +} + +// writeTo serializes the element to the writer w. +func (e *Element) writeTo(w *bufio.Writer, s *WriteSettings) { + w.WriteByte('<') + if e.Space != "" { + w.WriteString(e.Space) + w.WriteByte(':') + } + w.WriteString(e.Tag) + for _, a := range e.Attr { + w.WriteByte(' ') + a.writeTo(w, s) + } + if len(e.Child) > 0 { + w.WriteString(">") + for _, c := range e.Child { + c.writeTo(w, s) + } + w.Write([]byte{'<', '/'}) + if e.Space != "" { + w.WriteString(e.Space) + w.WriteByte(':') + } + w.WriteString(e.Tag) + w.WriteByte('>') + } else { + if s.CanonicalEndTags { + w.Write([]byte{'>', '<', '/'}) + if e.Space != "" { + w.WriteString(e.Space) + w.WriteByte(':') + } + w.WriteString(e.Tag) + w.WriteByte('>') + } else { + w.Write([]byte{'/', '>'}) + } + } +} + +// addChild adds a child token to the element e. +func (e *Element) addChild(t Token) { + e.Child = append(e.Child, t) +} + +// CreateAttr creates an attribute and adds it to element e. The key may be +// prefixed by a namespace and a colon. If an attribute with the key already +// exists, its value is replaced. +func (e *Element) CreateAttr(key, value string) *Attr { + space, skey := spaceDecompose(key) + return e.createAttr(space, skey, value) +} + +// createAttr is a helper function that creates attributes. +func (e *Element) createAttr(space, key, value string) *Attr { + for i, a := range e.Attr { + if space == a.Space && key == a.Key { + e.Attr[i].Value = value + return &e.Attr[i] + } + } + a := Attr{space, key, value} + e.Attr = append(e.Attr, a) + return &e.Attr[len(e.Attr)-1] +} + +// RemoveAttr removes and returns the first attribute of the element whose key +// matches the given key. The key may be prefixed by a namespace and a colon. +// If an equal attribute does not exist, nil is returned. +func (e *Element) RemoveAttr(key string) *Attr { + space, skey := spaceDecompose(key) + for i, a := range e.Attr { + if space == a.Space && skey == a.Key { + e.Attr = append(e.Attr[0:i], e.Attr[i+1:]...) + return &a + } + } + return nil +} + +var xmlReplacerNormal = strings.NewReplacer( + "&", "&", + "<", "<", + ">", ">", + "'", "'", + `"`, """, +) + +var xmlReplacerCanonicalText = strings.NewReplacer( + "&", "&", + "<", "<", + ">", ">", + "\r", " ", +) + +var xmlReplacerCanonicalAttrVal = strings.NewReplacer( + "&", "&", + "<", "<", + `"`, """, + "\t", " ", + "\n", " ", + "\r", " ", +) + +// writeTo serializes the attribute to the writer. +func (a *Attr) writeTo(w *bufio.Writer, s *WriteSettings) { + if a.Space != "" { + w.WriteString(a.Space) + w.WriteByte(':') + } + w.WriteString(a.Key) + w.WriteString(`="`) + var r *strings.Replacer + if s.CanonicalAttrVal { + r = xmlReplacerCanonicalAttrVal + } else { + r = xmlReplacerNormal + } + w.WriteString(r.Replace(a.Value)) + w.WriteByte('"') +} + +// NewCharData creates a parentless XML character data entity. +func NewCharData(data string) *CharData { + return newCharData(data, false, nil) +} + +// newCharData creates an XML character data entity and binds it to a parent +// element. If parent is nil, the CharData token remains unbound. +func newCharData(data string, whitespace bool, parent *Element) *CharData { + c := &CharData{ + Data: data, + whitespace: whitespace, + parent: parent, + } + if parent != nil { + parent.addChild(c) + } + return c +} + +// CreateCharData creates an XML character data entity and adds it as a child +// of element e. +func (e *Element) CreateCharData(data string) *CharData { + return newCharData(data, false, e) +} + +// dup duplicates the character data. +func (c *CharData) dup(parent *Element) Token { + return &CharData{ + Data: c.Data, + whitespace: c.whitespace, + parent: parent, + } +} + +// Parent returns the character data token's parent element, or nil if it has +// no parent. +func (c *CharData) Parent() *Element { + return c.parent +} + +// setParent replaces the character data token's parent. +func (c *CharData) setParent(parent *Element) { + c.parent = parent +} + +// writeTo serializes the character data entity to the writer. +func (c *CharData) writeTo(w *bufio.Writer, s *WriteSettings) { + var r *strings.Replacer + if s.CanonicalText { + r = xmlReplacerCanonicalText + } else { + r = xmlReplacerNormal + } + w.WriteString(r.Replace(c.Data)) +} + +// NewComment creates a parentless XML comment. +func NewComment(comment string) *Comment { + return newComment(comment, nil) +} + +// NewComment creates an XML comment and binds it to a parent element. If +// parent is nil, the Comment remains unbound. +func newComment(comment string, parent *Element) *Comment { + c := &Comment{ + Data: comment, + parent: parent, + } + if parent != nil { + parent.addChild(c) + } + return c +} + +// CreateComment creates an XML comment and adds it as a child of element e. +func (e *Element) CreateComment(comment string) *Comment { + return newComment(comment, e) +} + +// dup duplicates the comment. +func (c *Comment) dup(parent *Element) Token { + return &Comment{ + Data: c.Data, + parent: parent, + } +} + +// Parent returns comment token's parent element, or nil if it has no parent. +func (c *Comment) Parent() *Element { + return c.parent +} + +// setParent replaces the comment token's parent. +func (c *Comment) setParent(parent *Element) { + c.parent = parent +} + +// writeTo serialies the comment to the writer. +func (c *Comment) writeTo(w *bufio.Writer, s *WriteSettings) { + w.WriteString("") +} + +// NewDirective creates a parentless XML directive. +func NewDirective(data string) *Directive { + return newDirective(data, nil) +} + +// newDirective creates an XML directive and binds it to a parent element. If +// parent is nil, the Directive remains unbound. +func newDirective(data string, parent *Element) *Directive { + d := &Directive{ + Data: data, + parent: parent, + } + if parent != nil { + parent.addChild(d) + } + return d +} + +// CreateDirective creates an XML directive and adds it as the last child of +// element e. +func (e *Element) CreateDirective(data string) *Directive { + return newDirective(data, e) +} + +// dup duplicates the directive. +func (d *Directive) dup(parent *Element) Token { + return &Directive{ + Data: d.Data, + parent: parent, + } +} + +// Parent returns directive token's parent element, or nil if it has no +// parent. +func (d *Directive) Parent() *Element { + return d.parent +} + +// setParent replaces the directive token's parent. +func (d *Directive) setParent(parent *Element) { + d.parent = parent +} + +// writeTo serializes the XML directive to the writer. +func (d *Directive) writeTo(w *bufio.Writer, s *WriteSettings) { + w.WriteString("") +} + +// NewProcInst creates a parentless XML processing instruction. +func NewProcInst(target, inst string) *ProcInst { + return newProcInst(target, inst, nil) +} + +// newProcInst creates an XML processing instruction and binds it to a parent +// element. If parent is nil, the ProcInst remains unbound. +func newProcInst(target, inst string, parent *Element) *ProcInst { + p := &ProcInst{ + Target: target, + Inst: inst, + parent: parent, + } + if parent != nil { + parent.addChild(p) + } + return p +} + +// CreateProcInst creates a processing instruction and adds it as a child of +// element e. +func (e *Element) CreateProcInst(target, inst string) *ProcInst { + return newProcInst(target, inst, e) +} + +// dup duplicates the procinst. +func (p *ProcInst) dup(parent *Element) Token { + return &ProcInst{ + Target: p.Target, + Inst: p.Inst, + parent: parent, + } +} + +// Parent returns processing instruction token's parent element, or nil if it +// has no parent. +func (p *ProcInst) Parent() *Element { + return p.parent +} + +// setParent replaces the processing instruction token's parent. +func (p *ProcInst) setParent(parent *Element) { + p.parent = parent +} + +// writeTo serializes the processing instruction to the writer. +func (p *ProcInst) writeTo(w *bufio.Writer, s *WriteSettings) { + w.WriteString("") +} diff --git a/vendor/github.com/beevik/etree/helpers.go b/vendor/github.com/beevik/etree/helpers.go new file mode 100644 index 00000000..4f8350e7 --- /dev/null +++ b/vendor/github.com/beevik/etree/helpers.go @@ -0,0 +1,188 @@ +// Copyright 2015 Brett Vickers. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package etree + +import ( + "io" + "strings" +) + +// A simple stack +type stack struct { + data []interface{} +} + +func (s *stack) empty() bool { + return len(s.data) == 0 +} + +func (s *stack) push(value interface{}) { + s.data = append(s.data, value) +} + +func (s *stack) pop() interface{} { + value := s.data[len(s.data)-1] + s.data[len(s.data)-1] = nil + s.data = s.data[:len(s.data)-1] + return value +} + +func (s *stack) peek() interface{} { + return s.data[len(s.data)-1] +} + +// A fifo is a simple first-in-first-out queue. +type fifo struct { + data []interface{} + head, tail int +} + +func (f *fifo) add(value interface{}) { + if f.len()+1 >= len(f.data) { + f.grow() + } + f.data[f.tail] = value + if f.tail++; f.tail == len(f.data) { + f.tail = 0 + } +} + +func (f *fifo) remove() interface{} { + value := f.data[f.head] + f.data[f.head] = nil + if f.head++; f.head == len(f.data) { + f.head = 0 + } + return value +} + +func (f *fifo) len() int { + if f.tail >= f.head { + return f.tail - f.head + } + return len(f.data) - f.head + f.tail +} + +func (f *fifo) grow() { + c := len(f.data) * 2 + if c == 0 { + c = 4 + } + buf, count := make([]interface{}, c), f.len() + if f.tail >= f.head { + copy(buf[0:count], f.data[f.head:f.tail]) + } else { + hindex := len(f.data) - f.head + copy(buf[0:hindex], f.data[f.head:]) + copy(buf[hindex:count], f.data[:f.tail]) + } + f.data, f.head, f.tail = buf, 0, count +} + +// countReader implements a proxy reader that counts the number of +// bytes read from its encapsulated reader. +type countReader struct { + r io.Reader + bytes int64 +} + +func newCountReader(r io.Reader) *countReader { + return &countReader{r: r} +} + +func (cr *countReader) Read(p []byte) (n int, err error) { + b, err := cr.r.Read(p) + cr.bytes += int64(b) + return b, err +} + +// countWriter implements a proxy writer that counts the number of +// bytes written by its encapsulated writer. +type countWriter struct { + w io.Writer + bytes int64 +} + +func newCountWriter(w io.Writer) *countWriter { + return &countWriter{w: w} +} + +func (cw *countWriter) Write(p []byte) (n int, err error) { + b, err := cw.w.Write(p) + cw.bytes += int64(b) + return b, err +} + +// isWhitespace returns true if the byte slice contains only +// whitespace characters. +func isWhitespace(s string) bool { + for i := 0; i < len(s); i++ { + if c := s[i]; c != ' ' && c != '\t' && c != '\n' && c != '\r' { + return false + } + } + return true +} + +// spaceMatch returns true if namespace a is the empty string +// or if namespace a equals namespace b. +func spaceMatch(a, b string) bool { + switch { + case a == "": + return true + default: + return a == b + } +} + +// spaceDecompose breaks a namespace:tag identifier at the ':' +// and returns the two parts. +func spaceDecompose(str string) (space, key string) { + colon := strings.IndexByte(str, ':') + if colon == -1 { + return "", str + } + return str[:colon], str[colon+1:] +} + +// Strings used by crIndent +const ( + crsp = "\n " + crtab = "\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t" +) + +// crIndent returns a carriage return followed by n copies of the +// first non-CR character in the source string. +func crIndent(n int, source string) string { + switch { + case n < 0: + return source[:1] + case n < len(source): + return source[:n+1] + default: + return source + strings.Repeat(source[1:2], n-len(source)+1) + } +} + +// nextIndex returns the index of the next occurrence of sep in s, +// starting from offset. It returns -1 if the sep string is not found. +func nextIndex(s, sep string, offset int) int { + switch i := strings.Index(s[offset:], sep); i { + case -1: + return -1 + default: + return offset + i + } +} + +// isInteger returns true if the string s contains an integer. +func isInteger(s string) bool { + for i := 0; i < len(s); i++ { + if (s[i] < '0' || s[i] > '9') && !(i == 0 && s[i] == '-') { + return false + } + } + return true +} diff --git a/vendor/github.com/beevik/etree/path.go b/vendor/github.com/beevik/etree/path.go new file mode 100644 index 00000000..126eb154 --- /dev/null +++ b/vendor/github.com/beevik/etree/path.go @@ -0,0 +1,470 @@ +// Copyright 2015 Brett Vickers. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package etree + +import ( + "strconv" + "strings" +) + +/* +A Path is an object that represents an optimized version of an +XPath-like search string. Although path strings are XPath-like, +only the following limited syntax is supported: + + . Selects the current element + .. Selects the parent of the current element + * Selects all child elements + // Selects all descendants of the current element + tag Selects all child elements with the given tag + [#] Selects the element of the given index (1-based, + negative starts from the end) + [@attrib] Selects all elements with the given attribute + [@attrib='val'] Selects all elements with the given attribute set to val + [tag] Selects all elements with a child element named tag + [tag='val'] Selects all elements with a child element named tag + and text equal to val + +Examples: + +Select the title elements of all descendant book elements having a +'category' attribute of 'WEB': + //book[@category='WEB']/title + +Select the first book element with a title child containing the text +'Great Expectations': + .//book[title='Great Expectations'][1] + +Starting from the current element, select all children of book elements +with an attribute 'language' set to 'english': + ./book/*[@language='english'] + +Select all descendant book elements whose title element has an attribute +'language' set to 'french': + //book/title[@language='french']/.. +*/ +type Path struct { + segments []segment +} + +// ErrPath is returned by path functions when an invalid etree path is provided. +type ErrPath string + +// Error returns the string describing a path error. +func (err ErrPath) Error() string { + return "etree: " + string(err) +} + +// CompilePath creates an optimized version of an XPath-like string that +// can be used to query elements in an element tree. +func CompilePath(path string) (Path, error) { + var comp compiler + segments := comp.parsePath(path) + if comp.err != ErrPath("") { + return Path{nil}, comp.err + } + return Path{segments}, nil +} + +// MustCompilePath creates an optimized version of an XPath-like string that +// can be used to query elements in an element tree. Panics if an error +// occurs. Use this function to create Paths when you know the path is +// valid (i.e., if it's hard-coded). +func MustCompilePath(path string) Path { + p, err := CompilePath(path) + if err != nil { + panic(err) + } + return p +} + +// A segment is a portion of a path between "/" characters. +// It contains one selector and zero or more [filters]. +type segment struct { + sel selector + filters []filter +} + +func (seg *segment) apply(e *Element, p *pather) { + seg.sel.apply(e, p) + for _, f := range seg.filters { + f.apply(p) + } +} + +// A selector selects XML elements for consideration by the +// path traversal. +type selector interface { + apply(e *Element, p *pather) +} + +// A filter pares down a list of candidate XML elements based +// on a path filter in [brackets]. +type filter interface { + apply(p *pather) +} + +// A pather is helper object that traverses an element tree using +// a Path object. It collects and deduplicates all elements matching +// the path query. +type pather struct { + queue fifo + results []*Element + inResults map[*Element]bool + candidates []*Element + scratch []*Element // used by filters +} + +// A node represents an element and the remaining path segments that +// should be applied against it by the pather. +type node struct { + e *Element + segments []segment +} + +func newPather() *pather { + return &pather{ + results: make([]*Element, 0), + inResults: make(map[*Element]bool), + candidates: make([]*Element, 0), + scratch: make([]*Element, 0), + } +} + +// traverse follows the path from the element e, collecting +// and then returning all elements that match the path's selectors +// and filters. +func (p *pather) traverse(e *Element, path Path) []*Element { + for p.queue.add(node{e, path.segments}); p.queue.len() > 0; { + p.eval(p.queue.remove().(node)) + } + return p.results +} + +// eval evalutes the current path node by applying the remaining +// path's selector rules against the node's element. +func (p *pather) eval(n node) { + p.candidates = p.candidates[0:0] + seg, remain := n.segments[0], n.segments[1:] + seg.apply(n.e, p) + + if len(remain) == 0 { + for _, c := range p.candidates { + if in := p.inResults[c]; !in { + p.inResults[c] = true + p.results = append(p.results, c) + } + } + } else { + for _, c := range p.candidates { + p.queue.add(node{c, remain}) + } + } +} + +// A compiler generates a compiled path from a path string. +type compiler struct { + err ErrPath +} + +// parsePath parses an XPath-like string describing a path +// through an element tree and returns a slice of segment +// descriptors. +func (c *compiler) parsePath(path string) []segment { + // If path starts or ends with //, fix it + if strings.HasPrefix(path, "//") { + path = "." + path + } + if strings.HasSuffix(path, "//") { + path = path + "*" + } + + // Paths cannot be absolute + if strings.HasPrefix(path, "/") { + c.err = ErrPath("paths cannot be absolute.") + return nil + } + + // Split path into segment objects + var segments []segment + for _, s := range splitPath(path) { + segments = append(segments, c.parseSegment(s)) + if c.err != ErrPath("") { + break + } + } + return segments +} + +func splitPath(path string) []string { + pieces := make([]string, 0) + start := 0 + inquote := false + for i := 0; i+1 <= len(path); i++ { + if path[i] == '\'' { + inquote = !inquote + } else if path[i] == '/' && !inquote { + pieces = append(pieces, path[start:i]) + start = i + 1 + } + } + return append(pieces, path[start:]) +} + +// parseSegment parses a path segment between / characters. +func (c *compiler) parseSegment(path string) segment { + pieces := strings.Split(path, "[") + seg := segment{ + sel: c.parseSelector(pieces[0]), + filters: make([]filter, 0), + } + for i := 1; i < len(pieces); i++ { + fpath := pieces[i] + if fpath[len(fpath)-1] != ']' { + c.err = ErrPath("path has invalid filter [brackets].") + break + } + seg.filters = append(seg.filters, c.parseFilter(fpath[:len(fpath)-1])) + } + return seg +} + +// parseSelector parses a selector at the start of a path segment. +func (c *compiler) parseSelector(path string) selector { + switch path { + case ".": + return new(selectSelf) + case "..": + return new(selectParent) + case "*": + return new(selectChildren) + case "": + return new(selectDescendants) + default: + return newSelectChildrenByTag(path) + } +} + +// parseFilter parses a path filter contained within [brackets]. +func (c *compiler) parseFilter(path string) filter { + if len(path) == 0 { + c.err = ErrPath("path contains an empty filter expression.") + return nil + } + + // Filter contains [@attr='val'] or [tag='val']? + eqindex := strings.Index(path, "='") + if eqindex >= 0 { + rindex := nextIndex(path, "'", eqindex+2) + if rindex != len(path)-1 { + c.err = ErrPath("path has mismatched filter quotes.") + return nil + } + switch { + case path[0] == '@': + return newFilterAttrVal(path[1:eqindex], path[eqindex+2:rindex]) + default: + return newFilterChildText(path[:eqindex], path[eqindex+2:rindex]) + } + } + + // Filter contains [@attr], [N] or [tag] + switch { + case path[0] == '@': + return newFilterAttr(path[1:]) + case isInteger(path): + pos, _ := strconv.Atoi(path) + switch { + case pos > 0: + return newFilterPos(pos - 1) + default: + return newFilterPos(pos) + } + default: + return newFilterChild(path) + } +} + +// selectSelf selects the current element into the candidate list. +type selectSelf struct{} + +func (s *selectSelf) apply(e *Element, p *pather) { + p.candidates = append(p.candidates, e) +} + +// selectParent selects the element's parent into the candidate list. +type selectParent struct{} + +func (s *selectParent) apply(e *Element, p *pather) { + if e.parent != nil { + p.candidates = append(p.candidates, e.parent) + } +} + +// selectChildren selects the element's child elements into the +// candidate list. +type selectChildren struct{} + +func (s *selectChildren) apply(e *Element, p *pather) { + for _, c := range e.Child { + if c, ok := c.(*Element); ok { + p.candidates = append(p.candidates, c) + } + } +} + +// selectDescendants selects all descendant child elements +// of the element into the candidate list. +type selectDescendants struct{} + +func (s *selectDescendants) apply(e *Element, p *pather) { + var queue fifo + for queue.add(e); queue.len() > 0; { + e := queue.remove().(*Element) + p.candidates = append(p.candidates, e) + for _, c := range e.Child { + if c, ok := c.(*Element); ok { + queue.add(c) + } + } + } +} + +// selectChildrenByTag selects into the candidate list all child +// elements of the element having the specified tag. +type selectChildrenByTag struct { + space, tag string +} + +func newSelectChildrenByTag(path string) *selectChildrenByTag { + s, l := spaceDecompose(path) + return &selectChildrenByTag{s, l} +} + +func (s *selectChildrenByTag) apply(e *Element, p *pather) { + for _, c := range e.Child { + if c, ok := c.(*Element); ok && spaceMatch(s.space, c.Space) && s.tag == c.Tag { + p.candidates = append(p.candidates, c) + } + } +} + +// filterPos filters the candidate list, keeping only the +// candidate at the specified index. +type filterPos struct { + index int +} + +func newFilterPos(pos int) *filterPos { + return &filterPos{pos} +} + +func (f *filterPos) apply(p *pather) { + if f.index >= 0 { + if f.index < len(p.candidates) { + p.scratch = append(p.scratch, p.candidates[f.index]) + } + } else { + if -f.index <= len(p.candidates) { + p.scratch = append(p.scratch, p.candidates[len(p.candidates)+f.index]) + } + } + p.candidates, p.scratch = p.scratch, p.candidates[0:0] +} + +// filterAttr filters the candidate list for elements having +// the specified attribute. +type filterAttr struct { + space, key string +} + +func newFilterAttr(str string) *filterAttr { + s, l := spaceDecompose(str) + return &filterAttr{s, l} +} + +func (f *filterAttr) apply(p *pather) { + for _, c := range p.candidates { + for _, a := range c.Attr { + if spaceMatch(f.space, a.Space) && f.key == a.Key { + p.scratch = append(p.scratch, c) + break + } + } + } + p.candidates, p.scratch = p.scratch, p.candidates[0:0] +} + +// filterAttrVal filters the candidate list for elements having +// the specified attribute with the specified value. +type filterAttrVal struct { + space, key, val string +} + +func newFilterAttrVal(str, value string) *filterAttrVal { + s, l := spaceDecompose(str) + return &filterAttrVal{s, l, value} +} + +func (f *filterAttrVal) apply(p *pather) { + for _, c := range p.candidates { + for _, a := range c.Attr { + if spaceMatch(f.space, a.Space) && f.key == a.Key && f.val == a.Value { + p.scratch = append(p.scratch, c) + break + } + } + } + p.candidates, p.scratch = p.scratch, p.candidates[0:0] +} + +// filterChild filters the candidate list for elements having +// a child element with the specified tag. +type filterChild struct { + space, tag string +} + +func newFilterChild(str string) *filterChild { + s, l := spaceDecompose(str) + return &filterChild{s, l} +} + +func (f *filterChild) apply(p *pather) { + for _, c := range p.candidates { + for _, cc := range c.Child { + if cc, ok := cc.(*Element); ok && + spaceMatch(f.space, cc.Space) && + f.tag == cc.Tag { + p.scratch = append(p.scratch, c) + } + } + } + p.candidates, p.scratch = p.scratch, p.candidates[0:0] +} + +// filterChildText filters the candidate list for elements having +// a child element with the specified tag and text. +type filterChildText struct { + space, tag, text string +} + +func newFilterChildText(str, text string) *filterChildText { + s, l := spaceDecompose(str) + return &filterChildText{s, l, text} +} + +func (f *filterChildText) apply(p *pather) { + for _, c := range p.candidates { + for _, cc := range c.Child { + if cc, ok := cc.(*Element); ok && + spaceMatch(f.space, cc.Space) && + f.tag == cc.Tag && + f.text == cc.Text() { + p.scratch = append(p.scratch, c) + } + } + } + p.candidates, p.scratch = p.scratch, p.candidates[0:0] +} diff --git a/vendor/github.com/jonboulle/clockwork/LICENSE b/vendor/github.com/jonboulle/clockwork/LICENSE new file mode 100644 index 00000000..5c304d1a --- /dev/null +++ b/vendor/github.com/jonboulle/clockwork/LICENSE @@ -0,0 +1,201 @@ +Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "{}" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright {yyyy} {name of copyright owner} + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/jonboulle/clockwork/clockwork.go b/vendor/github.com/jonboulle/clockwork/clockwork.go new file mode 100644 index 00000000..999fddd5 --- /dev/null +++ b/vendor/github.com/jonboulle/clockwork/clockwork.go @@ -0,0 +1,179 @@ +package clockwork + +import ( + "sync" + "time" +) + +// Clock provides an interface that packages can use instead of directly +// using the time module, so that chronology-related behavior can be tested +type Clock interface { + After(d time.Duration) <-chan time.Time + Sleep(d time.Duration) + Now() time.Time + Since(t time.Time) time.Duration +} + +// FakeClock provides an interface for a clock which can be +// manually advanced through time +type FakeClock interface { + Clock + // Advance advances the FakeClock to a new point in time, ensuring any existing + // sleepers are notified appropriately before returning + Advance(d time.Duration) + // BlockUntil will block until the FakeClock has the given number of + // sleepers (callers of Sleep or After) + BlockUntil(n int) +} + +// NewRealClock returns a Clock which simply delegates calls to the actual time +// package; it should be used by packages in production. +func NewRealClock() Clock { + return &realClock{} +} + +// NewFakeClock returns a FakeClock implementation which can be +// manually advanced through time for testing. The initial time of the +// FakeClock will be an arbitrary non-zero time. +func NewFakeClock() FakeClock { + // use a fixture that does not fulfill Time.IsZero() + return NewFakeClockAt(time.Date(1984, time.April, 4, 0, 0, 0, 0, time.UTC)) +} + +// NewFakeClockAt returns a FakeClock initialised at the given time.Time. +func NewFakeClockAt(t time.Time) FakeClock { + return &fakeClock{ + time: t, + } +} + +type realClock struct{} + +func (rc *realClock) After(d time.Duration) <-chan time.Time { + return time.After(d) +} + +func (rc *realClock) Sleep(d time.Duration) { + time.Sleep(d) +} + +func (rc *realClock) Now() time.Time { + return time.Now() +} + +func (rc *realClock) Since(t time.Time) time.Duration { + return rc.Now().Sub(t) +} + +type fakeClock struct { + sleepers []*sleeper + blockers []*blocker + time time.Time + + l sync.RWMutex +} + +// sleeper represents a caller of After or Sleep +type sleeper struct { + until time.Time + done chan time.Time +} + +// blocker represents a caller of BlockUntil +type blocker struct { + count int + ch chan struct{} +} + +// After mimics time.After; it waits for the given duration to elapse on the +// fakeClock, then sends the current time on the returned channel. +func (fc *fakeClock) After(d time.Duration) <-chan time.Time { + fc.l.Lock() + defer fc.l.Unlock() + now := fc.time + done := make(chan time.Time, 1) + if d.Nanoseconds() == 0 { + // special case - trigger immediately + done <- now + } else { + // otherwise, add to the set of sleepers + s := &sleeper{ + until: now.Add(d), + done: done, + } + fc.sleepers = append(fc.sleepers, s) + // and notify any blockers + fc.blockers = notifyBlockers(fc.blockers, len(fc.sleepers)) + } + return done +} + +// notifyBlockers notifies all the blockers waiting until the +// given number of sleepers are waiting on the fakeClock. It +// returns an updated slice of blockers (i.e. those still waiting) +func notifyBlockers(blockers []*blocker, count int) (newBlockers []*blocker) { + for _, b := range blockers { + if b.count == count { + close(b.ch) + } else { + newBlockers = append(newBlockers, b) + } + } + return +} + +// Sleep blocks until the given duration has passed on the fakeClock +func (fc *fakeClock) Sleep(d time.Duration) { + <-fc.After(d) +} + +// Time returns the current time of the fakeClock +func (fc *fakeClock) Now() time.Time { + fc.l.RLock() + t := fc.time + fc.l.RUnlock() + return t +} + +// Since returns the duration that has passed since the given time on the fakeClock +func (fc *fakeClock) Since(t time.Time) time.Duration { + return fc.Now().Sub(t) +} + +// Advance advances fakeClock to a new point in time, ensuring channels from any +// previous invocations of After are notified appropriately before returning +func (fc *fakeClock) Advance(d time.Duration) { + fc.l.Lock() + defer fc.l.Unlock() + end := fc.time.Add(d) + var newSleepers []*sleeper + for _, s := range fc.sleepers { + if end.Sub(s.until) >= 0 { + s.done <- end + } else { + newSleepers = append(newSleepers, s) + } + } + fc.sleepers = newSleepers + fc.blockers = notifyBlockers(fc.blockers, len(fc.sleepers)) + fc.time = end +} + +// BlockUntil will block until the fakeClock has the given number of sleepers +// (callers of Sleep or After) +func (fc *fakeClock) BlockUntil(n int) { + fc.l.Lock() + // Fast path: current number of sleepers is what we're looking for + if len(fc.sleepers) == n { + fc.l.Unlock() + return + } + // Otherwise, set up a new blocker + b := &blocker{ + count: n, + ch: make(chan struct{}), + } + fc.blockers = append(fc.blockers, b) + fc.l.Unlock() + <-b.ch +} diff --git a/vendor/github.com/russellhaering/goxmldsig/LICENSE b/vendor/github.com/russellhaering/goxmldsig/LICENSE new file mode 100644 index 00000000..67db8588 --- /dev/null +++ b/vendor/github.com/russellhaering/goxmldsig/LICENSE @@ -0,0 +1,175 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. diff --git a/vendor/github.com/russellhaering/goxmldsig/canonicalize.go b/vendor/github.com/russellhaering/goxmldsig/canonicalize.go new file mode 100644 index 00000000..7488ef5a --- /dev/null +++ b/vendor/github.com/russellhaering/goxmldsig/canonicalize.go @@ -0,0 +1,251 @@ +package dsig + +import ( + "sort" + "strings" + + "github.com/beevik/etree" +) + +// Canonicalizer is an implementation of a canonicalization algorithm. +type Canonicalizer interface { + Canonicalize(el *etree.Element) ([]byte, error) + Algorithm() AlgorithmID +} + +type c14N10ExclusiveCanonicalizer struct { + InclusiveNamespaces map[string]struct{} +} + +// MakeC14N10ExclusiveCanonicalizerWithPrefixList constructs an exclusive Canonicalizer +// from a PrefixList in NMTOKENS format (a white space separated list). +func MakeC14N10ExclusiveCanonicalizerWithPrefixList(prefixList string) Canonicalizer { + prefixes := strings.Fields(prefixList) + prefixSet := make(map[string]struct{}, len(prefixes)) + + for _, prefix := range prefixes { + prefixSet[prefix] = struct{}{} + } + + return &c14N10ExclusiveCanonicalizer{ + InclusiveNamespaces: prefixSet, + } +} + +// Canonicalize transforms the input Element into a serialized XML document in canonical form. +func (c *c14N10ExclusiveCanonicalizer) Canonicalize(el *etree.Element) ([]byte, error) { + scope := make(map[string]c14nSpace) + return canonicalSerialize(excCanonicalPrep(el, scope, c.InclusiveNamespaces)) +} + +func (c *c14N10ExclusiveCanonicalizer) Algorithm() AlgorithmID { + return CanonicalXML10ExclusiveAlgorithmId +} + +type c14N11Canonicalizer struct{} + +// MakeC14N11Canonicalizer constructs an inclusive canonicalizer. +func MakeC14N11Canonicalizer() Canonicalizer { + return &c14N11Canonicalizer{} +} + +// Canonicalize transforms the input Element into a serialized XML document in canonical form. +func (c *c14N11Canonicalizer) Canonicalize(el *etree.Element) ([]byte, error) { + scope := make(map[string]struct{}) + return canonicalSerialize(canonicalPrep(el, scope)) +} + +func (c *c14N11Canonicalizer) Algorithm() AlgorithmID { + return CanonicalXML11AlgorithmId +} + +func composeAttr(space, key string) string { + if space != "" { + return space + ":" + key + } + + return key +} + +type attrsByKey []etree.Attr + +func (a attrsByKey) Len() int { + return len(a) +} + +func (a attrsByKey) Swap(i, j int) { + a[i], a[j] = a[j], a[i] +} + +func (a attrsByKey) Less(i, j int) bool { + // As I understand it: any "xmlns" attribute should come first, followed by any + // any "xmlns:prefix" attributes, presumably ordered by prefix. Lastly any other + // attributes in lexicographical order. + if a[i].Space == "" && a[i].Key == "xmlns" { + return true + } + + if a[i].Space == "xmlns" { + if a[j].Space == "xmlns" { + return a[i].Key < a[j].Key + } + return true + } + + if a[j].Space == "xmlns" { + return false + } + + return composeAttr(a[i].Space, a[i].Key) < composeAttr(a[j].Space, a[j].Key) +} + +type c14nSpace struct { + a etree.Attr + used bool +} + +const nsSpace = "xmlns" + +// excCanonicalPrep accepts an *etree.Element and recursively transforms it into one +// which is ready for serialization to exclusive canonical form. Specifically this +// entails: +// +// 1. Stripping re-declarations of namespaces +// 2. Stripping unused namespaces +// 3. Sorting attributes into canonical order. +// +// NOTE(russell_h): Currently this function modifies the passed element. +func excCanonicalPrep(el *etree.Element, _nsAlreadyDeclared map[string]c14nSpace, inclusiveNamespaces map[string]struct{}) *etree.Element { + //Copy alreadyDeclared map (only contains namespaces) + nsAlreadyDeclared := make(map[string]c14nSpace, len(_nsAlreadyDeclared)) + for k := range _nsAlreadyDeclared { + nsAlreadyDeclared[k] = _nsAlreadyDeclared[k] + } + + //Track the namespaces used on the current element + nsUsedHere := make(map[string]struct{}) + + //Make sure to track the element namespace for the case: + // + if el.Space != "" { + nsUsedHere[el.Space] = struct{}{} + } + + toRemove := make([]string, 0, 0) + + for _, a := range el.Attr { + switch a.Space { + case nsSpace: + + //For simplicity, remove all xmlns attribues; to be added in one pass + //later. Otherwise, we need another map/set to track xmlns attributes + //that we left alone. + toRemove = append(toRemove, a.Space+":"+a.Key) + if _, ok := nsAlreadyDeclared[a.Key]; !ok { + //If we're not tracking ancestor state already for this namespace, add + //it to the map + nsAlreadyDeclared[a.Key] = c14nSpace{a: a, used: false} + } + + // This algorithm accepts a set of namespaces which should be treated + // in an inclusive fashion. Specifically that means we should keep the + // declaration of that namespace closest to the root of the tree. We can + // accomplish that be pretending it was used by this element. + _, inclusive := inclusiveNamespaces[a.Key] + if inclusive { + nsUsedHere[a.Key] = struct{}{} + } + + default: + //We only track namespaces, so ignore attributes without one. + if a.Space != "" { + nsUsedHere[a.Space] = struct{}{} + } + } + } + + //Remove all attributes so that we can add them with much-simpler logic + for _, attrK := range toRemove { + el.RemoveAttr(attrK) + } + + //For all namespaces used on the current element, declare them if they were + //not declared (and used) in an ancestor. + for k := range nsUsedHere { + spc := nsAlreadyDeclared[k] + //If previously unused, mark as used + if !spc.used { + el.Attr = append(el.Attr, spc.a) + spc.used = true + + //Assignment here is only to update the pre-existing `used` tracking value + nsAlreadyDeclared[k] = spc + } + } + + //Canonicalize all children, passing down the ancestor tracking map + for _, child := range el.ChildElements() { + excCanonicalPrep(child, nsAlreadyDeclared, inclusiveNamespaces) + } + + //Sort attributes lexicographically + sort.Sort(attrsByKey(el.Attr)) + + return el.Copy() +} + +// canonicalPrep accepts an *etree.Element and transforms it into one which is ready +// for serialization into inclusive canonical form. Specifically this +// entails: +// +// 1. Stripping re-declarations of namespaces +// 2. Sorting attributes into canonical order +// +// Inclusive canonicalization does not strip unused namespaces. +// +// TODO(russell_h): This is very similar to excCanonicalPrep - perhaps they should +// be unified into one parameterized function? +func canonicalPrep(el *etree.Element, seenSoFar map[string]struct{}) *etree.Element { + _seenSoFar := make(map[string]struct{}) + for k, v := range seenSoFar { + _seenSoFar[k] = v + } + + ne := el.Copy() + sort.Sort(attrsByKey(ne.Attr)) + if len(ne.Attr) != 0 { + for _, attr := range ne.Attr { + if attr.Space != nsSpace { + continue + } + key := attr.Space + ":" + attr.Key + if _, seen := _seenSoFar[key]; seen { + ne.RemoveAttr(attr.Space + ":" + attr.Key) + } else { + _seenSoFar[key] = struct{}{} + } + } + } + + for i, token := range ne.Child { + childElement, ok := token.(*etree.Element) + if ok { + ne.Child[i] = canonicalPrep(childElement, _seenSoFar) + } + } + + return ne +} + +func canonicalSerialize(el *etree.Element) ([]byte, error) { + doc := etree.NewDocument() + doc.SetRoot(el) + + doc.WriteSettings = etree.WriteSettings{ + CanonicalAttrVal: true, + CanonicalEndTags: true, + CanonicalText: true, + } + + return doc.WriteToBytes() +} diff --git a/vendor/github.com/russellhaering/goxmldsig/clock.go b/vendor/github.com/russellhaering/goxmldsig/clock.go new file mode 100644 index 00000000..cceaaa54 --- /dev/null +++ b/vendor/github.com/russellhaering/goxmldsig/clock.go @@ -0,0 +1,55 @@ +package dsig + +import ( + "time" + + "github.com/jonboulle/clockwork" +) + +// Clock wraps a clockwork.Clock (which could be real or fake) in order +// to default to a real clock when a nil *Clock is used. In other words, +// if you attempt to use a nil *Clock it will defer to the real system +// clock. This allows Clock to be easily added to structs with methods +// that currently reference the time package, without requiring every +// instantiation of that struct to be updated. +type Clock struct { + wrapped clockwork.Clock +} + +func (c *Clock) getWrapped() clockwork.Clock { + if c == nil { + return clockwork.NewRealClock() + } + + return c.wrapped +} + +func (c *Clock) After(d time.Duration) <-chan time.Time { + return c.getWrapped().After(d) +} + +func (c *Clock) Sleep(d time.Duration) { + c.getWrapped().Sleep(d) +} + +func (c *Clock) Now() time.Time { + return c.getWrapped().Now() +} + +func NewRealClock() *Clock { + return &Clock{ + wrapped: clockwork.NewRealClock(), + } +} + +func NewFakeClock(wrapped clockwork.Clock) *Clock { + return &Clock{ + wrapped: wrapped, + } +} + +func NewFakeClockAt(t time.Time) *Clock { + return &Clock{ + wrapped: clockwork.NewFakeClockAt(t), + } +} diff --git a/vendor/github.com/russellhaering/goxmldsig/keystore.go b/vendor/github.com/russellhaering/goxmldsig/keystore.go new file mode 100644 index 00000000..81487f08 --- /dev/null +++ b/vendor/github.com/russellhaering/goxmldsig/keystore.go @@ -0,0 +1,63 @@ +package dsig + +import ( + "crypto/rand" + "crypto/rsa" + "crypto/x509" + "math/big" + "time" +) + +type X509KeyStore interface { + GetKeyPair() (privateKey *rsa.PrivateKey, cert []byte, err error) +} + +type X509CertificateStore interface { + Certificates() (roots []*x509.Certificate, err error) +} + +type MemoryX509CertificateStore struct { + Roots []*x509.Certificate +} + +func (mX509cs *MemoryX509CertificateStore) Certificates() ([]*x509.Certificate, error) { + return mX509cs.Roots, nil +} + +type MemoryX509KeyStore struct { + privateKey *rsa.PrivateKey + cert []byte +} + +func (ks *MemoryX509KeyStore) GetKeyPair() (*rsa.PrivateKey, []byte, error) { + return ks.privateKey, ks.cert, nil +} + +func RandomKeyStoreForTest() X509KeyStore { + key, err := rsa.GenerateKey(rand.Reader, 1024) + if err != nil { + panic(err) + } + + now := time.Now() + + template := &x509.Certificate{ + SerialNumber: big.NewInt(0), + NotBefore: now.Add(-5 * time.Minute), + NotAfter: now.Add(365 * 24 * time.Hour), + + KeyUsage: x509.KeyUsageDigitalSignature, + ExtKeyUsage: []x509.ExtKeyUsage{}, + BasicConstraintsValid: true, + } + + cert, err := x509.CreateCertificate(rand.Reader, template, template, &key.PublicKey, key) + if err != nil { + panic(err) + } + + return &MemoryX509KeyStore{ + privateKey: key, + cert: cert, + } +} diff --git a/vendor/github.com/russellhaering/goxmldsig/sign.go b/vendor/github.com/russellhaering/goxmldsig/sign.go new file mode 100644 index 00000000..1fcdee62 --- /dev/null +++ b/vendor/github.com/russellhaering/goxmldsig/sign.go @@ -0,0 +1,186 @@ +package dsig + +import ( + "crypto" + "crypto/rand" + "crypto/rsa" + _ "crypto/sha1" + _ "crypto/sha256" + "encoding/base64" + "errors" + "fmt" + + "github.com/beevik/etree" +) + +type SigningContext struct { + Hash crypto.Hash + KeyStore X509KeyStore + IdAttribute string + Prefix string + Canonicalizer Canonicalizer +} + +func NewDefaultSigningContext(ks X509KeyStore) *SigningContext { + return &SigningContext{ + Hash: crypto.SHA256, + KeyStore: ks, + IdAttribute: DefaultIdAttr, + Prefix: DefaultPrefix, + Canonicalizer: MakeC14N11Canonicalizer(), + } +} + +func (ctx *SigningContext) SetSignatureMethod(algorithmID string) error { + hash, ok := signatureMethodsByIdentifier[algorithmID] + if !ok { + return fmt.Errorf("Unknown SignatureMethod: %s", algorithmID) + } + + ctx.Hash = hash + + return nil +} + +func (ctx *SigningContext) digest(el *etree.Element) ([]byte, error) { + canonical, err := ctx.Canonicalizer.Canonicalize(el) + if err != nil { + return nil, err + } + + hash := ctx.Hash.New() + _, err = hash.Write(canonical) + if err != nil { + return nil, err + } + + return hash.Sum(nil), nil +} + +func (ctx *SigningContext) constructSignedInfo(el *etree.Element, enveloped bool) (*etree.Element, error) { + digestAlgorithmIdentifier, ok := digestAlgorithmIdentifiers[ctx.Hash] + if !ok { + return nil, errors.New("unsupported hash mechanism") + } + + signatureMethodIdentifier, ok := signatureMethodIdentifiers[ctx.Hash] + if !ok { + return nil, errors.New("unsupported signature method") + } + + digest, err := ctx.digest(el) + if err != nil { + return nil, err + } + + signedInfo := &etree.Element{ + Tag: SignedInfoTag, + Space: ctx.Prefix, + } + + // /SignedInfo/CanonicalizationMethod + canonicalizationMethod := ctx.createNamespacedElement(signedInfo, CanonicalizationMethodTag) + canonicalizationMethod.CreateAttr(AlgorithmAttr, string(ctx.Canonicalizer.Algorithm())) + + // /SignedInfo/SignatureMethod + signatureMethod := ctx.createNamespacedElement(signedInfo, SignatureMethodTag) + signatureMethod.CreateAttr(AlgorithmAttr, signatureMethodIdentifier) + + // /SignedInfo/Reference + reference := ctx.createNamespacedElement(signedInfo, ReferenceTag) + + dataId := el.SelectAttrValue(DefaultIdAttr, "") + if dataId == "" { + return nil, errors.New("Missing data ID") + } + + reference.CreateAttr(URIAttr, "#"+dataId) + + // /SignedInfo/Reference/Transforms + transforms := ctx.createNamespacedElement(reference, TransformsTag) + if enveloped { + envelopedTransform := ctx.createNamespacedElement(transforms, TransformTag) + envelopedTransform.CreateAttr(AlgorithmAttr, EnvelopedSignatureAltorithmId.String()) + } + canonicalizationAlgorithm := ctx.createNamespacedElement(transforms, TransformTag) + canonicalizationAlgorithm.CreateAttr(AlgorithmAttr, string(ctx.Canonicalizer.Algorithm())) + + // /SignedInfo/Reference/DigestMethod + digestMethod := ctx.createNamespacedElement(reference, DigestMethodTag) + digestMethod.CreateAttr(AlgorithmAttr, digestAlgorithmIdentifier) + + // /SignedInfo/Reference/DigestValue + digestValue := ctx.createNamespacedElement(reference, DigestValueTag) + digestValue.SetText(base64.StdEncoding.EncodeToString(digest)) + + return signedInfo, nil +} + +func (ctx *SigningContext) constructSignature(el *etree.Element, enveloped bool) (*etree.Element, error) { + signedInfo, err := ctx.constructSignedInfo(el, enveloped) + if err != nil { + return nil, err + } + + sig := &etree.Element{ + Tag: SignatureTag, + Space: ctx.Prefix, + } + + xmlns := "xmlns" + if ctx.Prefix != "" { + xmlns += ":" + ctx.Prefix + } + + sig.CreateAttr(xmlns, Namespace) + + sig.Child = append(sig.Child, signedInfo) + + // Must propagate down the attributes to the 'SignedInfo' before digesting + for _, attr := range sig.Attr { + signedInfo.CreateAttr(attr.Space+":"+attr.Key, attr.Value) + } + + digest, err := ctx.digest(signedInfo) + if err != nil { + return nil, err + } + + key, cert, err := ctx.KeyStore.GetKeyPair() + if err != nil { + return nil, err + } + + rawSignature, err := rsa.SignPKCS1v15(rand.Reader, key, ctx.Hash, digest) + if err != nil { + return nil, err + } + + signatureValue := ctx.createNamespacedElement(sig, SignatureValueTag) + signatureValue.SetText(base64.StdEncoding.EncodeToString(rawSignature)) + + keyInfo := ctx.createNamespacedElement(sig, KeyInfoTag) + x509Data := ctx.createNamespacedElement(keyInfo, X509DataTag) + x509Certificate := ctx.createNamespacedElement(x509Data, X509CertificateTag) + x509Certificate.SetText(base64.StdEncoding.EncodeToString(cert)) + + return sig, nil +} + +func (ctx *SigningContext) createNamespacedElement(el *etree.Element, tag string) *etree.Element { + child := el.CreateElement(tag) + child.Space = ctx.Prefix + return child +} + +func (ctx *SigningContext) SignEnveloped(el *etree.Element) (*etree.Element, error) { + sig, err := ctx.constructSignature(el, true) + if err != nil { + return nil, err + } + + ret := el.Copy() + ret.Child = append(ret.Child, sig) + + return ret, nil +} diff --git a/vendor/github.com/russellhaering/goxmldsig/tls_keystore.go b/vendor/github.com/russellhaering/goxmldsig/tls_keystore.go new file mode 100644 index 00000000..c98f312c --- /dev/null +++ b/vendor/github.com/russellhaering/goxmldsig/tls_keystore.go @@ -0,0 +1,34 @@ +package dsig + +import ( + "crypto/rsa" + "crypto/tls" + "fmt" +) + +//Well-known errors +var ( + ErrNonRSAKey = fmt.Errorf("Private key was not RSA") + ErrMissingCertificates = fmt.Errorf("No public certificates provided") +) + +//TLSCertKeyStore wraps the stdlib tls.Certificate to return its contained key +//and certs. +type TLSCertKeyStore tls.Certificate + +//GetKeyPair implements X509KeyStore using the underlying tls.Certificate +func (d TLSCertKeyStore) GetKeyPair() (*rsa.PrivateKey, []byte, error) { + pk, ok := d.PrivateKey.(*rsa.PrivateKey) + + if !ok { + return nil, nil, ErrNonRSAKey + } + + if len(d.Certificate) < 1 { + return nil, nil, ErrMissingCertificates + } + + crt := d.Certificate[0] + + return pk, crt, nil +} diff --git a/vendor/github.com/russellhaering/goxmldsig/validate.go b/vendor/github.com/russellhaering/goxmldsig/validate.go new file mode 100644 index 00000000..51a3d0b4 --- /dev/null +++ b/vendor/github.com/russellhaering/goxmldsig/validate.go @@ -0,0 +1,397 @@ +package dsig + +import ( + "bytes" + "crypto/rsa" + "crypto/x509" + "encoding/base64" + "encoding/pem" + "errors" + "fmt" + "regexp" + + "github.com/beevik/etree" +) + +var uriRegexp = regexp.MustCompile("^#[a-zA-Z_][\\w.-]*$") + +type ValidationContext struct { + CertificateStore X509CertificateStore + IdAttribute string + Clock *Clock +} + +func NewDefaultValidationContext(certificateStore X509CertificateStore) *ValidationContext { + return &ValidationContext{ + CertificateStore: certificateStore, + IdAttribute: DefaultIdAttr, + } +} + +// TODO(russell_h): More flexible namespace support. This might barely work. +func inNamespace(el *etree.Element, ns string) bool { + for _, attr := range el.Attr { + if attr.Value == ns { + if attr.Space == "" && attr.Key == "xmlns" { + return el.Space == "" + } else if attr.Space == "xmlns" { + return el.Space == attr.Key + } + } + } + + return false +} + +func childPath(space, tag string) string { + if space == "" { + return "./" + tag + } else { + return "./" + space + ":" + tag + } +} + +// The RemoveElement method on etree.Element isn't recursive... +func recursivelyRemoveElement(tree, el *etree.Element) bool { + if tree.RemoveChild(el) != nil { + return true + } + + for _, child := range tree.Child { + if childElement, ok := child.(*etree.Element); ok { + if recursivelyRemoveElement(childElement, el) { + return true + } + } + } + + return false +} + +// transform applies the passed set of transforms to the specified root element. +// +// The functionality of transform is currently very limited and purpose-specific. +// +// NOTE(russell_h): Ideally this wouldn't mutate the root passed to it, and would +// instead return a copy. Unfortunately copying the tree makes it difficult to +// correctly locate the signature. I'm opting, for now, to simply mutate the root +// parameter. +func (ctx *ValidationContext) transform(root, sig *etree.Element, transforms []*etree.Element) (*etree.Element, Canonicalizer, error) { + if len(transforms) != 2 { + return nil, nil, errors.New("Expected Enveloped and C14N transforms") + } + + var canonicalizer Canonicalizer + + for _, transform := range transforms { + algo := transform.SelectAttr(AlgorithmAttr) + if algo == nil { + return nil, nil, errors.New("Missing Algorithm attribute") + } + + switch AlgorithmID(algo.Value) { + case EnvelopedSignatureAltorithmId: + if !recursivelyRemoveElement(root, sig) { + return nil, nil, errors.New("Error applying canonicalization transform: Signature not found") + } + + case CanonicalXML10ExclusiveAlgorithmId: + var prefixList string + ins := transform.FindElement(childPath("", InclusiveNamespacesTag)) + if ins != nil { + prefixListEl := ins.SelectAttr(PrefixListAttr) + if prefixListEl != nil { + prefixList = prefixListEl.Value + } + } + + canonicalizer = MakeC14N10ExclusiveCanonicalizerWithPrefixList(prefixList) + + case CanonicalXML11AlgorithmId: + canonicalizer = MakeC14N11Canonicalizer() + + default: + return nil, nil, errors.New("Unknown Transform Algorithm: " + algo.Value) + } + } + + if canonicalizer == nil { + return nil, nil, errors.New("Expected canonicalization transform") + } + + return root, canonicalizer, nil +} + +func (ctx *ValidationContext) digest(el *etree.Element, digestAlgorithmId string, canonicalizer Canonicalizer) ([]byte, error) { + data, err := canonicalizer.Canonicalize(el) + if err != nil { + return nil, err + } + + digestAlgorithm, ok := digestAlgorithmsByIdentifier[digestAlgorithmId] + if !ok { + return nil, errors.New("Unknown digest algorithm: " + digestAlgorithmId) + } + + hash := digestAlgorithm.New() + _, err = hash.Write(data) + if err != nil { + return nil, err + } + + return hash.Sum(nil), nil +} + +func (ctx *ValidationContext) verifySignedInfo(signatureElement *etree.Element, canonicalizer Canonicalizer, signatureMethodId string, cert *x509.Certificate, sig []byte) error { + signedInfo := signatureElement.FindElement(childPath(signatureElement.Space, SignedInfoTag)) + if signedInfo == nil { + return errors.New("Missing SignedInfo") + } + + // Any attributes from the 'Signature' element must be pushed down into the 'SignedInfo' element before it is canonicalized + for _, attr := range signatureElement.Attr { + signedInfo.CreateAttr(attr.Space+":"+attr.Key, attr.Value) + } + + // Canonicalize the xml + canonical, err := canonicalizer.Canonicalize(signedInfo) + if err != nil { + return err + } + + signatureAlgorithm, ok := signatureMethodsByIdentifier[signatureMethodId] + if !ok { + return errors.New("Unknown signature method: " + signatureMethodId) + } + + hash := signatureAlgorithm.New() + _, err = hash.Write(canonical) + if err != nil { + return err + } + + hashed := hash.Sum(nil) + + pubKey, ok := cert.PublicKey.(*rsa.PublicKey) + if !ok { + return errors.New("Invalid public key") + } + + // Verify that the private key matching the public key from the cert was what was used to sign the 'SignedInfo' and produce the 'SignatureValue' + err = rsa.VerifyPKCS1v15(pubKey, signatureAlgorithm, hashed[:], sig) + if err != nil { + return err + } + + return nil +} + +func (ctx *ValidationContext) validateSignature(el *etree.Element, cert *x509.Certificate) (*etree.Element, error) { + el = el.Copy() + + // Verify the document minus the signedInfo against the 'DigestValue' + // Find the 'Signature' element + sig := el.FindElement(SignatureTag) + + if sig == nil { + return nil, errors.New("Missing Signature") + } + + if !inNamespace(sig, Namespace) { + return nil, errors.New("Signature element is in the wrong namespace") + } + + // Get the 'SignedInfo' element + signedInfo := sig.FindElement(childPath(sig.Space, SignedInfoTag)) + if signedInfo == nil { + return nil, errors.New("Missing SignedInfo") + } + + reference := signedInfo.FindElement(childPath(sig.Space, ReferenceTag)) + if reference == nil { + return nil, errors.New("Missing Reference") + } + + transforms := reference.FindElement(childPath(sig.Space, TransformsTag)) + if transforms == nil { + return nil, errors.New("Missing Transforms") + } + + uri := reference.SelectAttr("URI") + if uri == nil { + // TODO(russell_h): It is permissible to leave this out. We should be + // able to fall back to finding the referenced element some other way. + return nil, errors.New("Reference is missing URI attribute") + } + + if !uriRegexp.MatchString(uri.Value) { + return nil, errors.New("Invalid URI: " + uri.Value) + } + + // Get the element referenced in the 'SignedInfo' + referencedElement := el.FindElement(fmt.Sprintf("//[@%s='%s']", ctx.IdAttribute, uri.Value[1:])) + if referencedElement == nil { + return nil, errors.New("Unable to find referenced element: " + uri.Value) + } + + // Perform all transformations listed in the 'SignedInfo' + // Basically, this means removing the 'SignedInfo' + transformed, canonicalizer, err := ctx.transform(referencedElement, sig, transforms.ChildElements()) + if err != nil { + return nil, err + } + + digestMethod := reference.FindElement(childPath(sig.Space, DigestMethodTag)) + if digestMethod == nil { + return nil, errors.New("Missing DigestMethod") + } + + digestValue := reference.FindElement(childPath(sig.Space, DigestValueTag)) + if digestValue == nil { + return nil, errors.New("Missing DigestValue") + } + + digestAlgorithmAttr := digestMethod.SelectAttr(AlgorithmAttr) + if digestAlgorithmAttr == nil { + return nil, errors.New("Missing DigestMethod Algorithm attribute") + } + + // Digest the transformed XML and compare it to the 'DigestValue' from the 'SignedInfo' + digest, err := ctx.digest(transformed, digestAlgorithmAttr.Value, canonicalizer) + if err != nil { + return nil, err + } + + decodedDigestValue, err := base64.StdEncoding.DecodeString(digestValue.Text()) + if err != nil { + return nil, err + } + + if !bytes.Equal(digest, decodedDigestValue) { + return nil, errors.New("Signature could not be verified") + } + + //Verify the signed info + signatureMethod := signedInfo.FindElement(childPath(sig.Space, SignatureMethodTag)) + if signatureMethod == nil { + return nil, errors.New("Missing SignatureMethod") + } + + signatureMethodAlgorithmAttr := signatureMethod.SelectAttr(AlgorithmAttr) + if digestAlgorithmAttr == nil { + return nil, errors.New("Missing SignatureMethod Algorithm attribute") + } + + // Decode the 'SignatureValue' so we can compare against it + signatureValue := sig.FindElement(childPath(sig.Space, SignatureValueTag)) + if signatureValue == nil { + return nil, errors.New("Missing SignatureValue") + } + + decodedSignature, err := base64.StdEncoding.DecodeString(signatureValue.Text()) + + if err != nil { + return nil, errors.New("Could not decode signature") + } + // Actually verify the 'SignedInfo' was signed by a trusted source + err = ctx.verifySignedInfo(sig, canonicalizer, signatureMethodAlgorithmAttr.Value, cert, decodedSignature) + if err != nil { + return nil, err + } + + return transformed, nil +} + +func contains(roots []*x509.Certificate, cert *x509.Certificate) bool { + for _, root := range roots { + if root.Equal(cert) { + return true + } + } + return false +} + +func (ctx *ValidationContext) verifyCertificate(el *etree.Element) (*x509.Certificate, error) { + now := ctx.Clock.Now() + el = el.Copy() + + idAttr := el.SelectAttr(DefaultIdAttr) + if idAttr == nil || idAttr.Value == "" { + return nil, errors.New("Missing ID attribute") + } + + signatureElements := el.FindElements("//" + SignatureTag) + var signatureElement *etree.Element + + // Find the Signature element that references the whole Response element + for _, e := range signatureElements { + e2 := e.Copy() + + signedInfo := e2.FindElement(childPath(e2.Space, SignedInfoTag)) + if signedInfo == nil { + return nil, errors.New("Missing SignedInfo") + } + + referenceElement := signedInfo.FindElement(childPath(e2.Space, ReferenceTag)) + if referenceElement == nil { + return nil, errors.New("Missing Reference Element") + } + + uriAttr := referenceElement.SelectAttr(URIAttr) + if uriAttr == nil || uriAttr.Value == "" { + return nil, errors.New("Missing URI attribute") + } + + if uriAttr.Value[1:] == idAttr.Value { + signatureElement = e + break + } + } + + if signatureElement == nil { + return nil, errors.New("Missing signature referencing the top-level element") + } + + // Get the x509 element from the signature + x509Element := signatureElement.FindElement("//" + childPath(signatureElement.Space, X509CertificateTag)) + if x509Element == nil { + return nil, errors.New("Missing x509 Element") + } + + x509Text := "-----BEGIN CERTIFICATE-----\n" + x509Element.Text() + "\n-----END CERTIFICATE-----" + block, _ := pem.Decode([]byte(x509Text)) + if block == nil { + return nil, errors.New("Failed to parse certificate PEM") + } + + cert, err := x509.ParseCertificate(block.Bytes) + if err != nil { + return nil, err + } + + roots, err := ctx.CertificateStore.Certificates() + if err != nil { + return nil, err + } + + // Verify that the certificate is one we trust + if !contains(roots, cert) { + return nil, errors.New("Could not verify certificate against trusted certs") + } + + if now.Before(cert.NotBefore) || now.After(cert.NotAfter) { + return nil, errors.New("Cert is not valid at this time") + } + + return cert, nil +} + +func (ctx *ValidationContext) Validate(el *etree.Element) (*etree.Element, error) { + cert, err := ctx.verifyCertificate(el) + + if err != nil { + return nil, err + } + + return ctx.validateSignature(el, cert) +} diff --git a/vendor/github.com/russellhaering/goxmldsig/xml_constants.go b/vendor/github.com/russellhaering/goxmldsig/xml_constants.go new file mode 100644 index 00000000..5c9cb693 --- /dev/null +++ b/vendor/github.com/russellhaering/goxmldsig/xml_constants.go @@ -0,0 +1,78 @@ +package dsig + +import "crypto" + +const ( + DefaultPrefix = "ds" + Namespace = "http://www.w3.org/2000/09/xmldsig#" +) + +// Tags +const ( + SignatureTag = "Signature" + SignedInfoTag = "SignedInfo" + CanonicalizationMethodTag = "CanonicalizationMethod" + SignatureMethodTag = "SignatureMethod" + ReferenceTag = "Reference" + TransformsTag = "Transforms" + TransformTag = "Transform" + DigestMethodTag = "DigestMethod" + DigestValueTag = "DigestValue" + SignatureValueTag = "SignatureValue" + KeyInfoTag = "KeyInfo" + X509DataTag = "X509Data" + X509CertificateTag = "X509Certificate" + InclusiveNamespacesTag = "InclusiveNamespaces" +) + +const ( + AlgorithmAttr = "Algorithm" + URIAttr = "URI" + DefaultIdAttr = "ID" + PrefixListAttr = "PrefixList" +) + +type AlgorithmID string + +func (id AlgorithmID) String() string { + return string(id) +} + +const ( + RSASHA1SignatureMethod = "http://www.w3.org/2000/09/xmldsig#rsa-sha1" + RSASHA256SignatureMethod = "http://www.w3.org/2001/04/xmldsig-more#rsa-sha256" + RSASHA512SignatureMethod = "http://www.w3.org/2001/04/xmldsig-more#rsa-sha512" +) + +//Well-known signature algorithms +const ( + // Supported canonicalization algorithms + CanonicalXML10ExclusiveAlgorithmId AlgorithmID = "http://www.w3.org/2001/10/xml-exc-c14n#" + CanonicalXML11AlgorithmId AlgorithmID = "http://www.w3.org/2006/12/xml-c14n11" + + EnvelopedSignatureAltorithmId AlgorithmID = "http://www.w3.org/2000/09/xmldsig#enveloped-signature" +) + +var digestAlgorithmIdentifiers = map[crypto.Hash]string{ + crypto.SHA1: "http://www.w3.org/2000/09/xmldsig#sha1", + crypto.SHA256: "http://www.w3.org/2001/04/xmlenc#sha256", + crypto.SHA512: "http://www.w3.org/2001/04/xmlenc#sha512", +} + +var digestAlgorithmsByIdentifier = map[string]crypto.Hash{} +var signatureMethodsByIdentifier = map[string]crypto.Hash{} + +func init() { + for hash, id := range digestAlgorithmIdentifiers { + digestAlgorithmsByIdentifier[id] = hash + } + for hash, id := range signatureMethodIdentifiers { + signatureMethodsByIdentifier[id] = hash + } +} + +var signatureMethodIdentifiers = map[crypto.Hash]string{ + crypto.SHA1: RSASHA1SignatureMethod, + crypto.SHA256: RSASHA256SignatureMethod, + crypto.SHA512: RSASHA512SignatureMethod, +}