summaryrefslogtreecommitdiff
path: root/vendor/github.com
diff options
context:
space:
mode:
authorDave Henderson <dhenderson@gmail.com>2018-11-09 23:45:23 -0500
committerDave Henderson <dhenderson@gmail.com>2018-11-09 23:46:55 -0500
commit27078d2f3b249202b8f799d2cf9e0b3cd80d69ec (patch)
treedbd2afd1539294b818b303615e36d850845daf78 /vendor/github.com
parent06955432208ae0c4caf8f2a436af8ba94c97d0ce (diff)
Vendoring github.com/Shopify/ejson
Signed-off-by: Dave Henderson <dhenderson@gmail.com>
Diffstat (limited to 'vendor/github.com')
-rw-r--r--vendor/github.com/Shopify/ejson/LICENSE.txt22
-rw-r--r--vendor/github.com/Shopify/ejson/crypto/boxed_message.go104
-rw-r--r--vendor/github.com/Shopify/ejson/crypto/crypto.go160
-rw-r--r--vendor/github.com/Shopify/ejson/ejson.go197
-rw-r--r--vendor/github.com/Shopify/ejson/json/key.go62
-rw-r--r--vendor/github.com/Shopify/ejson/json/pipeline.go73
-rw-r--r--vendor/github.com/Shopify/ejson/json/walker.go133
-rw-r--r--vendor/github.com/dustin/gojson/LICENSE27
-rw-r--r--vendor/github.com/dustin/gojson/decode.go1089
-rw-r--r--vendor/github.com/dustin/gojson/encode.go1183
-rw-r--r--vendor/github.com/dustin/gojson/fold.go143
-rw-r--r--vendor/github.com/dustin/gojson/indent.go137
-rw-r--r--vendor/github.com/dustin/gojson/scanner.go629
-rw-r--r--vendor/github.com/dustin/gojson/stream.go200
-rw-r--r--vendor/github.com/dustin/gojson/tags.go44
15 files changed, 4203 insertions, 0 deletions
diff --git a/vendor/github.com/Shopify/ejson/LICENSE.txt b/vendor/github.com/Shopify/ejson/LICENSE.txt
new file mode 100644
index 00000000..c7a793ba
--- /dev/null
+++ b/vendor/github.com/Shopify/ejson/LICENSE.txt
@@ -0,0 +1,22 @@
+Copyright (c) 2014 Shopify
+
+MIT License
+
+Permission is hereby granted, free of charge, to any person obtaining
+a copy of this software and associated documentation files (the
+"Software"), to deal in the Software without restriction, including
+without limitation the rights to use, copy, modify, merge, publish,
+distribute, sublicense, and/or sell copies of the Software, and to
+permit persons to whom the Software is furnished to do so, subject to
+the following conditions:
+
+The above copyright notice and this permission notice shall be
+included in all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
+LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
diff --git a/vendor/github.com/Shopify/ejson/crypto/boxed_message.go b/vendor/github.com/Shopify/ejson/crypto/boxed_message.go
new file mode 100644
index 00000000..5dc86980
--- /dev/null
+++ b/vendor/github.com/Shopify/ejson/crypto/boxed_message.go
@@ -0,0 +1,104 @@
+package crypto
+
+import (
+ "encoding/base64"
+ "fmt"
+ "regexp"
+ "strconv"
+)
+
+var messageParser = regexp.MustCompile("\\AEJ\\[(\\d):([A-Za-z0-9+=/]{44}):([A-Za-z0-9+=/]{32}):(.+)\\]\\z")
+
+// boxedMessage dumps and loads the wire format for encrypted messages. The
+// schema is fairly simple:
+//
+// "EJ["
+// SchemaVersion ( "1" )
+// ":"
+// EncrypterPublic :: base64-encoded 32-byte key
+// ":"
+// Nonce :: base64-encoded 24-byte nonce
+// ":"
+// Box :: base64-encoded encrypted message
+// "]"
+type boxedMessage struct {
+ SchemaVersion int
+ EncrypterPublic [32]byte
+ Nonce [24]byte
+ Box []byte
+}
+
+// IsBoxedMessage tests whether a value is formatted using the boxedMessage
+// format. This can be used to determine whether a string value requires
+// encryption or is already encrypted.
+func IsBoxedMessage(data []byte) bool {
+ return messageParser.Find(data) != nil
+}
+
+// Dump dumps to the wire format
+func (b *boxedMessage) Dump() []byte {
+ pub := base64.StdEncoding.EncodeToString(b.EncrypterPublic[:])
+ nonce := base64.StdEncoding.EncodeToString(b.Nonce[:])
+ box := base64.StdEncoding.EncodeToString(b.Box)
+
+ str := fmt.Sprintf("EJ[%d:%s:%s:%s]",
+ b.SchemaVersion, pub, nonce, box)
+ return []byte(str)
+}
+
+// Load restores from the wire format.
+func (b *boxedMessage) Load(from []byte) error {
+ var ssver, spub, snonce, sbox string
+ var err error
+
+ allMatches := messageParser.FindAllStringSubmatch(string(from), -1) // -> [][][]byte
+ if len(allMatches) != 1 {
+ return fmt.Errorf("invalid message format")
+ }
+ matches := allMatches[0]
+ if len(matches) != 5 {
+ return fmt.Errorf("invalid message format")
+ }
+
+ ssver = matches[1]
+ spub = matches[2]
+ snonce = matches[3]
+ sbox = matches[4]
+
+ b.SchemaVersion, err = strconv.Atoi(ssver)
+ if err != nil {
+ return err
+ }
+
+ pub, err := base64.StdEncoding.DecodeString(spub)
+ if err != nil {
+ return err
+ }
+ pubBytes := []byte(pub)
+ if len(pubBytes) != 32 {
+ return fmt.Errorf("public key invalid")
+ }
+ var public [32]byte
+ copy(public[:], pubBytes[0:32])
+ b.EncrypterPublic = public
+
+ nnc, err := base64.StdEncoding.DecodeString(snonce)
+ if err != nil {
+ return err
+ }
+ nonceBytes := []byte(nnc)
+ if len(nonceBytes) != 24 {
+ return fmt.Errorf("nonce invalid")
+ }
+ var nonce [24]byte
+ copy(nonce[:], nonceBytes[0:24])
+ b.Nonce = nonce
+
+ box, err := base64.StdEncoding.DecodeString(sbox)
+ if err != nil {
+ return err
+ }
+ b.Box = []byte(box)
+
+ return nil
+}
diff --git a/vendor/github.com/Shopify/ejson/crypto/crypto.go b/vendor/github.com/Shopify/ejson/crypto/crypto.go
new file mode 100644
index 00000000..63de1c6a
--- /dev/null
+++ b/vendor/github.com/Shopify/ejson/crypto/crypto.go
@@ -0,0 +1,160 @@
+// Package crypto implements a simple convenience wrapper around
+// golang.org/x/crypto/nacl/box. It ultimately models a situation where you
+// don't care about authenticating the encryptor, so the nonce and encryption
+// public key are prepended to the encrypted message.
+//
+// Shared key precomputation is used when encrypting but not when decrypting.
+// This is not an inherent limitation, but it would complicate the
+// implementation a little bit to do precomputation during decryption also.
+// If performance becomes an issue (highly unlikely), it's completely feasible
+// to add.
+package crypto
+
+import (
+ "crypto/rand"
+ "errors"
+ "fmt"
+
+ "golang.org/x/crypto/nacl/box"
+)
+
+// Keypair models a Curve25519 keypair. To generate a new Keypair, declare an
+// empty one and call Generate() on it.
+type Keypair struct {
+ Public [32]byte
+ Private [32]byte
+}
+
+// Encrypter is generated from a keypair (typically a newly-generated ephemeral
+// keypair, used only for this session) with the public key of an authorized
+// decrypter. It is then capable of encrypting messages to that decrypter's
+// private key. An instance should normally be obtained only by calling
+// Encrypter() on a Keypair instance.
+type Encrypter struct {
+ Keypair *Keypair
+ PeerPublic [32]byte
+ SharedKey [32]byte
+}
+
+// Decrypter is generated from a keypair (a fixed keypair, generally, whose
+// private key is stored in configuration management or otherwise), and used to
+// decrypt messages. It should normally be obtained by calling Decrypter() on a
+// Keypair instance.
+type Decrypter struct {
+ Keypair *Keypair
+}
+
+// ErrDecryptionFailed means the decryption didn't work. This normally
+// indicates that the message was corrupted or the wrong keypair was used.
+var ErrDecryptionFailed = errors.New("couldn't decrypt message")
+
+// Generate generates a new Curve25519 keypair into a (presumably) empty Keypair
+// structure.
+func (k *Keypair) Generate() (err error) {
+ var pub, priv *[32]byte
+ pub, priv, err = box.GenerateKey(rand.Reader)
+ if err != nil {
+ return
+ }
+ k.Public = *pub
+ k.Private = *priv
+ return
+}
+
+// PublicString returns the public key in the canonical hex-encoded printable form.
+func (k *Keypair) PublicString() string {
+ return fmt.Sprintf("%x", k.Public)
+}
+
+// PrivateString returns the private key in the canonical hex-encoded printable form.
+func (k *Keypair) PrivateString() string {
+ return fmt.Sprintf("%x", k.Private)
+}
+
+// Encrypter returns an Encrypter instance, given a public key, to encrypt
+// messages to the paired, unknown, private key.
+func (k *Keypair) Encrypter(peerPublic [32]byte) *Encrypter {
+ return NewEncrypter(k, peerPublic)
+}
+
+// Decrypter returns a Decrypter instance, used to decrypt properly formatted
+// messages from arbitrary encrypters.
+func (k *Keypair) Decrypter() *Decrypter {
+ return &Decrypter{Keypair: k}
+}
+
+// NewEncrypter instantiates an Encrypter after pre-computing the shared key for
+// the owned keypair and the given decrypter public key.
+func NewEncrypter(kp *Keypair, peerPublic [32]byte) *Encrypter {
+ var shared [32]byte
+ box.Precompute(&shared, &peerPublic, &kp.Private)
+ return &Encrypter{
+ Keypair: kp,
+ PeerPublic: peerPublic,
+ SharedKey: shared,
+ }
+}
+
+func (e *Encrypter) encrypt(message []byte) (*boxedMessage, error) {
+ nonce, err := genNonce()
+ if err != nil {
+ return nil, err
+ }
+
+ out := box.SealAfterPrecomputation(nil, []byte(message), &nonce, &e.SharedKey)
+
+ return &boxedMessage{
+ SchemaVersion: 1,
+ EncrypterPublic: e.Keypair.Public,
+ Nonce: nonce,
+ Box: out,
+ }, nil
+}
+
+// Encrypt takes a plaintext message and returns an encrypted message. Unlike
+// raw nacl/box encryption, this message is decryptable without passing the
+// nonce or public key out-of-band, as it includes both. This is not less
+// secure, it just doesn't allow for authorizing the encryptor. That's fine,
+// since authorization isn't a desired property of this particular cryptosystem.
+func (e *Encrypter) Encrypt(message []byte) ([]byte, error) {
+ if IsBoxedMessage(message) {
+ return message, nil
+ }
+ boxedMessage, err := e.encrypt(message)
+ if err != nil {
+ return nil, err
+ }
+ return boxedMessage.Dump(), nil
+}
+
+// Decrypt is passed an encrypted message or a particular format (the format
+// generated by (*Encrypter)Encrypt(), which includes the nonce and public key
+// used to create the ciphertext. It returns the decrypted string. Note that,
+// unlike with encryption, Shared-key-precomputation is not used for decryption.
+func (d *Decrypter) Decrypt(message []byte) ([]byte, error) {
+ var bm boxedMessage
+ if err := bm.Load(message); err != nil {
+ return nil, err
+ }
+ return d.decrypt(&bm)
+}
+
+func (d *Decrypter) decrypt(bm *boxedMessage) ([]byte, error) {
+ plaintext, ok := box.Open(nil, bm.Box, &bm.Nonce, &bm.EncrypterPublic, &d.Keypair.Private)
+ if !ok {
+ return nil, ErrDecryptionFailed
+ }
+ return plaintext, nil
+}
+
+func genNonce() (nonce [24]byte, err error) {
+ var n int
+ n, err = rand.Read(nonce[0:24])
+ if err != nil {
+ return
+ }
+ if n != 24 {
+ err = fmt.Errorf("not enough bytes returned from rand.Reader")
+ }
+ return
+}
diff --git a/vendor/github.com/Shopify/ejson/ejson.go b/vendor/github.com/Shopify/ejson/ejson.go
new file mode 100644
index 00000000..b30bca37
--- /dev/null
+++ b/vendor/github.com/Shopify/ejson/ejson.go
@@ -0,0 +1,197 @@
+// Package ejson implements the primary interface to interact with ejson
+// documents and keypairs. The CLI implemented by cmd/ejson is a fairly thin
+// wrapper around this package.
+package ejson
+
+import (
+ "bytes"
+ "encoding/hex"
+ "fmt"
+ "io"
+ "io/ioutil"
+ "os"
+ "strings"
+
+ "github.com/Shopify/ejson/crypto"
+ "github.com/Shopify/ejson/json"
+)
+
+// GenerateKeypair is used to create a new ejson keypair. It returns the keys as
+// hex-encoded strings, suitable for printing to the screen. hex.DecodeString
+// can be used to load the true representation if necessary.
+func GenerateKeypair() (pub string, priv string, err error) {
+ var kp crypto.Keypair
+ if err := kp.Generate(); err != nil {
+ return "", "", err
+ }
+ return kp.PublicString(), kp.PrivateString(), nil
+}
+
+// Encrypt reads all contents from 'in', extracts the pubkey
+// and performs the requested encryption operation, writing
+// the resulting data to 'out'.
+// Returns the number of bytes written and any error that might have
+// occurred.
+func Encrypt(in io.Reader, out io.Writer) (int, error) {
+ data, err := ioutil.ReadAll(in)
+ if err != nil {
+ return -1, err
+ }
+
+ var myKP crypto.Keypair
+ if err = myKP.Generate(); err != nil {
+ return -1, err
+ }
+
+ pubkey, err := json.ExtractPublicKey(data)
+ if err != nil {
+ return -1, err
+ }
+
+ encrypter := myKP.Encrypter(pubkey)
+ walker := json.Walker{
+ Action: encrypter.Encrypt,
+ }
+
+ newdata, err := walker.Walk(data)
+ if err != nil {
+ return -1, err
+ }
+
+ return out.Write(newdata)
+}
+
+// EncryptFileInPlace takes a path to a file on disk, which must be a valid EJSON file
+// (see README.md for more on what constitutes a valid EJSON file). Any
+// encryptable-but-unencrypted fields in the file will be encrypted using the
+// public key embdded in the file, and the resulting text will be written over
+// the file present on disk.
+func EncryptFileInPlace(filePath string) (int, error) {
+ var fileMode os.FileMode
+ if stat, err := os.Stat(filePath); err == nil {
+ fileMode = stat.Mode()
+ } else {
+ return -1, err
+ }
+
+ file, err := os.Open(filePath)
+ if err != nil {
+ return -1, err
+ }
+
+ var outBuffer bytes.Buffer
+
+ written, err := Encrypt(file, &outBuffer)
+ if err != nil {
+ return -1, err
+ }
+
+ if err = file.Close(); err != nil {
+ return -1, err
+ }
+
+ if err := ioutil.WriteFile(filePath, outBuffer.Bytes(), fileMode); err != nil {
+ return -1, err
+ }
+
+ return written, nil
+}
+
+// Decrypt reads an ejson stream from 'in' and writes the decrypted data to 'out'.
+// The private key is expected to be under 'keydir'.
+// Returns error upon failure, or nil on success.
+func Decrypt(in io.Reader, out io.Writer, keydir string, userSuppliedPrivateKey string) error {
+ data, err := ioutil.ReadAll(in)
+ if err != nil {
+ return err
+ }
+
+ pubkey, err := json.ExtractPublicKey(data)
+ if err != nil {
+ return err
+ }
+
+ privkey, err := findPrivateKey(pubkey, keydir, userSuppliedPrivateKey)
+ if err != nil {
+ return err
+ }
+
+ myKP := crypto.Keypair{
+ Public: pubkey,
+ Private: privkey,
+ }
+
+ decrypter := myKP.Decrypter()
+ walker := json.Walker{
+ Action: decrypter.Decrypt,
+ }
+
+ newdata, err := walker.Walk(data)
+ if err != nil {
+ return err
+ }
+
+ _, err = out.Write(newdata)
+
+ return err
+}
+
+// DecryptFile takes a path to an encrypted EJSON file and returns the data
+// decrypted. The public key used to encrypt the values is embedded in the
+// referenced document, and the matching private key is searched for in keydir.
+// There must exist a file in keydir whose name is the public key from the
+// EJSON document, and whose contents are the corresponding private key. See
+// README.md for more details on this.
+func DecryptFile(filePath, keydir string, userSuppliedPrivateKey string) ([]byte, error) {
+ if _, err := os.Stat(filePath); err != nil {
+ return nil, err
+ }
+
+ file, err := os.Open(filePath)
+ if err != nil {
+ return nil, err
+ }
+ defer file.Close()
+
+ var outBuffer bytes.Buffer
+
+ err = Decrypt(file, &outBuffer, keydir, userSuppliedPrivateKey)
+
+ return outBuffer.Bytes(), err
+}
+
+func readPrivateKeyFromDisk(pubkey [32]byte, keydir string) (privkey string, err error) {
+ keyFile := fmt.Sprintf("%s/%x", keydir, pubkey)
+ var fileContents []byte
+ fileContents, err = ioutil.ReadFile(keyFile)
+ if err != nil {
+ err = fmt.Errorf("couldn't read key file (%s)", err.Error())
+ return
+ }
+ privkey = string(fileContents)
+ return
+}
+
+func findPrivateKey(pubkey [32]byte, keydir string, userSuppliedPrivateKey string) (privkey [32]byte, err error) {
+ var privkeyString string
+ if userSuppliedPrivateKey != "" {
+ privkeyString = userSuppliedPrivateKey
+ } else {
+ privkeyString, err = readPrivateKeyFromDisk(pubkey, keydir)
+ if err != nil {
+ return privkey, err
+ }
+ }
+
+ privkeyBytes, err := hex.DecodeString(strings.TrimSpace(privkeyString))
+ if err != nil {
+ return
+ }
+
+ if len(privkeyBytes) != 32 {
+ err = fmt.Errorf("invalid private key")
+ return
+ }
+ copy(privkey[:], privkeyBytes)
+ return
+}
diff --git a/vendor/github.com/Shopify/ejson/json/key.go b/vendor/github.com/Shopify/ejson/json/key.go
new file mode 100644
index 00000000..66177c88
--- /dev/null
+++ b/vendor/github.com/Shopify/ejson/json/key.go
@@ -0,0 +1,62 @@
+package json
+
+import (
+ "encoding/hex"
+ "encoding/json"
+ "errors"
+)
+
+const (
+ // PublicKeyField is the key name at which the public key should be
+ // stored in an EJSON document.
+ PublicKeyField = "_public_key"
+)
+
+// ErrPublicKeyMissing indicates that the PublicKeyField key was not found
+// at the top level of the JSON document provided.
+var ErrPublicKeyMissing = errors.New("public key not present in EJSON file")
+
+// ErrPublicKeyInvalid means that the PublicKeyField key was found, but the
+// value could not be parsed into a valid key.
+var ErrPublicKeyInvalid = errors.New("public key has invalid format")
+
+// ExtractPublicKey finds the _public_key value in an EJSON document and
+// parses it into a key usable with the crypto library.
+func ExtractPublicKey(data []byte) (key [32]byte, err error) {
+ var (
+ obj map[string]interface{}
+ ks string
+ ok bool
+ bs []byte
+ )
+ err = json.Unmarshal(data, &obj)
+ if err != nil {
+ return
+ }
+ k, ok := obj[PublicKeyField]
+ if !ok {
+ goto missing
+ }
+ ks, ok = k.(string)
+ if !ok {
+ goto invalid
+ }
+ if len(ks) != 64 {
+ goto invalid
+ }
+ bs, err = hex.DecodeString(ks)
+ if err != nil {
+ goto invalid
+ }
+ if len(bs) != 32 {
+ goto invalid
+ }
+ copy(key[:], bs)
+ return
+missing:
+ err = ErrPublicKeyMissing
+ return
+invalid:
+ err = ErrPublicKeyInvalid
+ return
+}
diff --git a/vendor/github.com/Shopify/ejson/json/pipeline.go b/vendor/github.com/Shopify/ejson/json/pipeline.go
new file mode 100644
index 00000000..4527a18d
--- /dev/null
+++ b/vendor/github.com/Shopify/ejson/json/pipeline.go
@@ -0,0 +1,73 @@
+package json
+
+type pipeline struct {
+ final []byte
+ err error
+ pendingBytes []byte
+ queue chan queueItem
+ done chan struct{}
+}
+
+type queueItem struct {
+ pr <-chan promiseResult
+ bs []byte
+ term bool
+}
+
+type promiseResult struct {
+ bytes []byte
+ err error
+}
+
+func newPipeline() *pipeline {
+ pl := &pipeline{
+ queue: make(chan queueItem, 512),
+ done: make(chan struct{}),
+ }
+ go pl.run()
+ return pl
+}
+
+func (p *pipeline) run() {
+ for qi := range p.queue {
+ if qi.term {
+ close(p.done)
+ } else if qi.pr != nil {
+ res := <-qi.pr
+ if res.err != nil {
+ p.err = res.err
+ }
+ p.final = append(p.final, res.bytes...)
+ } else {
+ p.final = append(p.final, qi.bs...)
+ }
+ }
+}
+
+func (p *pipeline) appendBytes(bs []byte) {
+ p.pendingBytes = append(p.pendingBytes, bs...)
+}
+
+func (p *pipeline) appendByte(b byte) {
+ p.pendingBytes = append(p.pendingBytes, b)
+}
+
+func (p *pipeline) appendPromise(ch <-chan promiseResult) {
+ p.flushPendingBytes()
+ p.queue <- queueItem{pr: ch}
+}
+
+func (p *pipeline) flush() ([]byte, error) {
+ p.flushPendingBytes()
+ p.queue <- queueItem{term: true}
+ <-p.done
+ close(p.queue)
+ return p.final, p.err
+}
+
+func (p *pipeline) flushPendingBytes() {
+ if len(p.pendingBytes) > 0 {
+ p.queue <- queueItem{bs: p.pendingBytes}
+ p.pendingBytes = nil
+ }
+}
diff --git a/vendor/github.com/Shopify/ejson/json/walker.go b/vendor/github.com/Shopify/ejson/json/walker.go
new file mode 100644
index 00000000..b377f67e
--- /dev/null
+++ b/vendor/github.com/Shopify/ejson/json/walker.go
@@ -0,0 +1,133 @@
+// Package json implements functions to load the Public key data from an EJSON
+// file, and to walk that data file, encrypting or decrypting any keys which,
+// according to the specification, are marked as encryptable (see README.md for
+// details).
+//
+// It may be non-obvious why this is implemented using a scanner and not by
+// loading the structure, manipulating it, then dumping it. Since Go's maps are
+// explicitly randomized, that would cause the entire structure to be randomized
+// each time the file was written, rendering diffs over time essentially
+// useless.
+package json
+
+import (
+ "bytes"
+ "fmt"
+
+ "github.com/dustin/gojson"
+)
+
+// Walker takes an Action, which will run on fields selected by EJSON for
+// encryption, and provides a Walk method, which iterates on all the fields in
+// a JSON text, running the Action on all selected fields. Fields are selected
+// if they are a Value (not a Key) of type string, and their referencing Key did
+// *not* begin with an Underscore. Note that this
+// underscore-to-disable-encryption syntax does not propagate down the hierarchy
+// to children.
+// That is:
+// * In {"_a": "b"}, Action will not be run at all.
+// * In {"a": "b"}, Action will be run with "b", and the return value will
+// replace "b".
+// * In {"k": {"a": ["b"]}, Action will run on "b".
+// * In {"_k": {"a": ["b"]}, Action run on "b".
+// * In {"k": {"_a": ["b"]}, Action will not run.
+type Walker struct {
+ Action func([]byte) ([]byte, error)
+}
+
+// Walk walks an entire JSON structure, running the ejsonWalker.Action on each
+// actionable node. A node is actionable if it's a string *value*, and its
+// referencing key doesn't begin with an underscore. For each actionable node,
+// the contents are replaced with the result of Action. Everything else is
+// unchanged.
+func (ew *Walker) Walk(data []byte) ([]byte, error) {
+ var (
+ inLiteral bool
+ literalStart int
+ isComment bool
+ scanner json.Scanner
+ )
+ scanner.Reset()
+ pline := newPipeline()
+ for i, c := range data {
+ switch v := scanner.Step(&scanner, int(c)); v {
+ case json.ScanContinue, json.ScanSkipSpace:
+ // Uninteresting byte. Just advance to next.
+ case json.ScanBeginLiteral:
+ inLiteral = true
+ literalStart = i
+ case json.ScanObjectKey:
+ // The literal we just finished reading was a Key. Decide whether it was a
+ // encryptable by checking whether the first byte after the '"' was an
+ // underscore, then append it verbatim to the output buffer.
+ inLiteral = false
+ isComment = data[literalStart+1] == '_'
+ pline.appendBytes(data[literalStart:i])
+ case json.ScanError:
+ // Some error happened; just bail.
+ pline.flush()
+ return nil, fmt.Errorf("invalid json")
+ case json.ScanEnd:
+ // We successfully hit the end of input.
+ pline.appendByte(c)
+ return pline.flush()
+ default:
+ if inLiteral {
+ inLiteral = false
+ // We finished reading some literal, and it wasn't a Key, meaning it's
+ // potentially encryptable. If it was a string, and the most recent Key
+ // encountered didn't begin with a '_', we are to encrypt it. In any
+ // other case, we append it verbatim to the output buffer.
+ if isComment || data[literalStart] != '"' {
+ pline.appendBytes(data[literalStart:i])
+ } else {
+ res := make(chan promiseResult)
+ go func(subData []byte) {
+ actioned, err := ew.runAction(subData)
+ res <- promiseResult{actioned, err}
+ close(res)
+ }(data[literalStart:i])
+ pline.appendPromise(res)
+ }
+ }
+ }
+ if !inLiteral {
+ // If we're in a literal, we save up bytes because we may have to encrypt
+ // them. Outside of a literal, we simply append each byte as we read it.
+ pline.appendByte(c)
+ }
+ }
+ if scanner.EOF() == json.ScanError {
+ // Unexpected EOF => malformed JSON
+ pline.flush()
+ return nil, fmt.Errorf("invalid json")
+ }
+ return pline.flush()
+}
+
+func (ew *Walker) runAction(data []byte) ([]byte, error) {
+ trimmed := bytes.TrimSpace(data)
+ unquoted, ok := json.UnquoteBytes(trimmed)
+ if !ok {
+ return nil, fmt.Errorf("invalid json")
+ }
+ done, err := ew.Action(unquoted)
+ if err != nil {
+ return nil, err
+ }
+ quoted, err := quoteBytes(done)
+ if err != nil {
+ return nil, err
+ }
+ return append(quoted, data[len(trimmed):]...), nil
+}
+
+// probably a better way to do this, but...
+func quoteBytes(in []byte) ([]byte, error) {
+ data := []string{string(in)}
+ out, err := json.Marshal(data)
+ if err != nil {
+ return nil, err
+ }
+ return out[1 : len(out)-1], nil
+}
diff --git a/vendor/github.com/dustin/gojson/LICENSE b/vendor/github.com/dustin/gojson/LICENSE
new file mode 100644
index 00000000..74487567
--- /dev/null
+++ b/vendor/github.com/dustin/gojson/LICENSE
@@ -0,0 +1,27 @@
+Copyright (c) 2012 The Go Authors. All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+ * Redistributions of source code must retain the above copyright
+notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above
+copyright notice, this list of conditions and the following disclaimer
+in the documentation and/or other materials provided with the
+distribution.
+ * Neither the name of Google Inc. nor the names of its
+contributors may be used to endorse or promote products derived from
+this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/vendor/github.com/dustin/gojson/decode.go b/vendor/github.com/dustin/gojson/decode.go
new file mode 100644
index 00000000..cf467d57
--- /dev/null
+++ b/vendor/github.com/dustin/gojson/decode.go
@@ -0,0 +1,1089 @@
+// Copyright 2010 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Represents JSON data structure using native Go types: booleans, floats,
+// strings, arrays, and maps.
+
+package json
+
+import (
+ "bytes"
+ "encoding"
+ "encoding/base64"
+ "errors"
+ "fmt"
+ "reflect"
+ "runtime"
+ "strconv"
+ "unicode"
+ "unicode/utf16"
+ "unicode/utf8"
+)
+
+// Unmarshal parses the JSON-encoded data and stores the result
+// in the value pointed to by v.
+//
+// Unmarshal uses the inverse of the encodings that
+// Marshal uses, allocating maps, slices, and pointers as necessary,
+// with the following additional rules:
+//
+// To unmarshal JSON into a pointer, Unmarshal first handles the case of
+// the JSON being the JSON literal null. In that case, Unmarshal sets
+// the pointer to nil. Otherwise, Unmarshal unmarshals the JSON into
+// the value pointed at by the pointer. If the pointer is nil, Unmarshal
+// allocates a new value for it to point to.
+//
+// To unmarshal JSON into a struct, Unmarshal matches incoming object
+// keys to the keys used by Marshal (either the struct field name or its tag),
+// preferring an exact match but also accepting a case-insensitive match.
+//
+// To unmarshal JSON into an interface value,
+// Unmarshal stores one of these in the interface value:
+//
+// bool, for JSON booleans
+// float64, for JSON numbers
+// string, for JSON strings
+// []interface{}, for JSON arrays
+// map[string]interface{}, for JSON objects
+// nil for JSON null
+//
+// If a JSON value is not appropriate for a given target type,
+// or if a JSON number overflows the target type, Unmarshal
+// skips that field and completes the unmarshalling as best it can.
+// If no more serious errors are encountered, Unmarshal returns
+// an UnmarshalTypeError describing the earliest such error.
+//
+// The JSON null value unmarshals into an interface, map, pointer, or slice
+// by setting that Go value to nil. Because null is often used in JSON to mean
+// ``not present,'' unmarshaling a JSON null into any other Go type has no effect
+// on the value and produces no error.
+//
+// When unmarshaling quoted strings, invalid UTF-8 or
+// invalid UTF-16 surrogate pairs are not treated as an error.
+// Instead, they are replaced by the Unicode replacement
+// character U+FFFD.
+//
+func Unmarshal(data []byte, v interface{}) error {
+ // Check for well-formedness.
+ // Avoids filling out half a data structure
+ // before discovering a JSON syntax error.
+ var d decodeState
+ err := checkValid(data, &d.scan)
+ if err != nil {
+ return err
+ }
+
+ d.init(data)
+ return d.unmarshal(v)
+}
+
+// Unmarshaler is the interface implemented by objects
+// that can unmarshal a JSON description of themselves.
+// The input can be assumed to be a valid encoding of
+// a JSON value. UnmarshalJSON must copy the JSON data
+// if it wishes to retain the data after returning.
+type Unmarshaler interface {
+ UnmarshalJSON([]byte) error
+}
+
+// An UnmarshalTypeError describes a JSON value that was
+// not appropriate for a value of a specific Go type.
+type UnmarshalTypeError struct {
+ Value string // description of JSON value - "bool", "array", "number -5"
+ Type reflect.Type // type of Go value it could not be assigned to
+}
+
+func (e *UnmarshalTypeError) Error() string {
+ return "json: cannot unmarshal " + e.Value + " into Go value of type " + e.Type.String()
+}
+
+// An UnmarshalFieldError describes a JSON object key that
+// led to an unexported (and therefore unwritable) struct field.
+// (No longer used; kept for compatibility.)
+type UnmarshalFieldError struct {
+ Key string
+ Type reflect.Type
+ Field reflect.StructField
+}
+
+func (e *UnmarshalFieldError) Error() string {
+ return "json: cannot unmarshal object key " + strconv.Quote(e.Key) + " into unexported field " + e.Field.Name + " of type " + e.Type.String()
+}
+
+// An InvalidUnmarshalError describes an invalid argument passed to Unmarshal.
+// (The argument to Unmarshal must be a non-nil pointer.)
+type InvalidUnmarshalError struct {
+ Type reflect.Type
+}
+
+func (e *InvalidUnmarshalError) Error() string {
+ if e.Type == nil {
+ return "json: Unmarshal(nil)"
+ }
+
+ if e.Type.Kind() != reflect.Ptr {
+ return "json: Unmarshal(non-pointer " + e.Type.String() + ")"
+ }
+ return "json: Unmarshal(nil " + e.Type.String() + ")"
+}
+
+func (d *decodeState) unmarshal(v interface{}) (err error) {
+ defer func() {
+ if r := recover(); r != nil {
+ if _, ok := r.(runtime.Error); ok {
+ panic(r)
+ }
+ err = r.(error)
+ }
+ }()
+
+ rv := reflect.ValueOf(v)
+ if rv.Kind() != reflect.Ptr || rv.IsNil() {
+ return &InvalidUnmarshalError{reflect.TypeOf(v)}
+ }
+
+ d.scan.Reset()
+ // We decode rv not rv.Elem because the Unmarshaler interface
+ // test must be applied at the top level of the value.
+ d.value(rv)
+ return d.savedError
+}
+
+// A Number represents a JSON number literal.
+type Number string
+
+// String returns the literal text of the number.
+func (n Number) String() string { return string(n) }
+
+// Float64 returns the number as a float64.
+func (n Number) Float64() (float64, error) {
+ return strconv.ParseFloat(string(n), 64)
+}
+
+// Int64 returns the number as an int64.
+func (n Number) Int64() (int64, error) {
+ return strconv.ParseInt(string(n), 10, 64)
+}
+
+// decodeState represents the state while decoding a JSON value.
+type decodeState struct {
+ data []byte
+ off int // read offset in data
+ scan Scanner
+ nextscan Scanner // for calls to NextValue
+ savedError error
+ useNumber bool
+}
+
+// errPhase is used for errors that should not happen unless
+// there is a bug in the JSON decoder or something is editing
+// the data slice while the decoder executes.
+var errPhase = errors.New("JSON decoder out of sync - data changing underfoot?")
+
+func (d *decodeState) init(data []byte) *decodeState {
+ d.data = data
+ d.off = 0
+ d.savedError = nil
+ return d
+}
+
+// error aborts the decoding by panicking with err.
+func (d *decodeState) error(err error) {
+ panic(err)
+}
+
+// saveError saves the first err it is called with,
+// for reporting at the end of the unmarshal.
+func (d *decodeState) saveError(err error) {
+ if d.savedError == nil {
+ d.savedError = err
+ }
+}
+
+// next cuts off and returns the next full JSON value in d.data[d.off:].
+// The next value is known to be an object or array, not a literal.
+func (d *decodeState) next() []byte {
+ c := d.data[d.off]
+ item, rest, err := NextValue(d.data[d.off:], &d.nextscan)
+ if err != nil {
+ d.error(err)
+ }
+ d.off = len(d.data) - len(rest)
+
+ // Our scanner has seen the opening brace/bracket
+ // and thinks we're still in the middle of the object.
+ // invent a closing brace/bracket to get it out.
+ if c == '{' {
+ d.scan.Step(&d.scan, '}')
+ } else {
+ d.scan.Step(&d.scan, ']')
+ }
+
+ return item
+}
+
+// scanWhile processes bytes in d.data[d.off:] until it
+// receives a scan code not equal to op.
+// It updates d.off and returns the new scan code.
+func (d *decodeState) scanWhile(op int) int {
+ var newOp int
+ for {
+ if d.off >= len(d.data) {
+ newOp = d.scan.EOF()
+ d.off = len(d.data) + 1 // mark processed EOF with len+1
+ } else {
+ c := int(d.data[d.off])
+ d.off++
+ newOp = d.scan.Step(&d.scan, c)
+ }
+ if newOp != op {
+ break
+ }
+ }
+ return newOp
+}
+
+// value decodes a JSON value from d.data[d.off:] into the value.
+// it updates d.off to point past the decoded value.
+func (d *decodeState) value(v reflect.Value) {
+ if !v.IsValid() {
+ _, rest, err := NextValue(d.data[d.off:], &d.nextscan)
+ if err != nil {
+ d.error(err)
+ }
+ d.off = len(d.data) - len(rest)
+
+ // d.scan thinks we're still at the beginning of the item.
+ // Feed in an empty string - the shortest, simplest value -
+ // so that it knows we got to the end of the value.
+ if d.scan.redo {
+ // rewind.
+ d.scan.redo = false
+ d.scan.Step = stateBeginValue
+ }
+ d.scan.Step(&d.scan, '"')
+ d.scan.Step(&d.scan, '"')
+
+ n := len(d.scan.parseState)
+ if n > 0 && d.scan.parseState[n-1] == parseObjectKey {
+ // d.scan thinks we just read an object key; finish the object
+ d.scan.Step(&d.scan, ':')
+ d.scan.Step(&d.scan, '"')
+ d.scan.Step(&d.scan, '"')
+ d.scan.Step(&d.scan, '}')
+ }
+
+ return
+ }
+
+ switch op := d.scanWhile(ScanSkipSpace); op {
+ default:
+ d.error(errPhase)
+
+ case ScanBeginArray:
+ d.array(v)
+
+ case ScanBeginObject:
+ d.object(v)
+
+ case ScanBeginLiteral:
+ d.literal(v)
+ }
+}
+
+type unquotedValue struct{}
+
+// valueQuoted is like value but decodes a
+// quoted string literal or literal null into an interface value.
+// If it finds anything other than a quoted string literal or null,
+// valueQuoted returns unquotedValue{}.
+func (d *decodeState) valueQuoted() interface{} {
+ switch op := d.scanWhile(ScanSkipSpace); op {
+ default:
+ d.error(errPhase)
+
+ case ScanBeginArray:
+ d.array(reflect.Value{})
+
+ case ScanBeginObject:
+ d.object(reflect.Value{})
+
+ case ScanBeginLiteral:
+ switch v := d.literalInterface().(type) {
+ case nil, string:
+ return v
+ }
+ }
+ return unquotedValue{}
+}
+
+// indirect walks down v allocating pointers as needed,
+// until it gets to a non-pointer.
+// if it encounters an Unmarshaler, indirect stops and returns that.
+// if decodingNull is true, indirect stops at the last pointer so it can be set to nil.
+func (d *decodeState) indirect(v reflect.Value, decodingNull bool) (Unmarshaler, encoding.TextUnmarshaler, reflect.Value) {
+ // If v is a named type and is addressable,
+ // start with its address, so that if the type has pointer methods,
+ // we find them.
+ if v.Kind() != reflect.Ptr && v.Type().Name() != "" && v.CanAddr() {
+ v = v.Addr()
+ }
+ for {
+ // Load value from interface, but only if the result will be
+ // usefully addressable.
+ if v.Kind() == reflect.Interface && !v.IsNil() {
+ e := v.Elem()
+ if e.Kind() == reflect.Ptr && !e.IsNil() && (!decodingNull || e.Elem().Kind() == reflect.Ptr) {
+ v = e
+ continue
+ }
+ }
+
+ if v.Kind() != reflect.Ptr {
+ break
+ }
+
+ if v.Elem().Kind() != reflect.Ptr && decodingNull && v.CanSet() {
+ break
+ }
+ if v.IsNil() {
+ v.Set(reflect.New(v.Type().Elem()))
+ }
+ if v.Type().NumMethod() > 0 {
+ if u, ok := v.Interface().(Unmarshaler); ok {
+ return u, nil, reflect.Value{}
+ }
+ if u, ok := v.Interface().(encoding.TextUnmarshaler); ok {
+ return nil, u, reflect.Value{}
+ }
+ }
+ v = v.Elem()
+ }
+ return nil, nil, v
+}
+
+// array consumes an array from d.data[d.off-1:], decoding into the value v.
+// the first byte of the array ('[') has been read already.
+func (d *decodeState) array(v reflect.Value) {
+ // Check for unmarshaler.
+ u, ut, pv := d.indirect(v, false)
+ if u != nil {
+ d.off--
+ err := u.UnmarshalJSON(d.next())
+ if err != nil {
+ d.error(err)
+ }
+ return
+ }
+ if ut != nil {
+ d.saveError(&UnmarshalTypeError{"array", v.Type()})
+ d.off--
+ d.next()
+ return
+ }
+
+ v = pv
+
+ // Check type of target.
+ switch v.Kind() {
+ case reflect.Interface:
+ if v.NumMethod() == 0 {
+ // Decoding into nil interface? Switch to non-reflect code.
+ v.Set(reflect.ValueOf(d.arrayInterface()))
+ return
+ }
+ // Otherwise it's invalid.
+ fallthrough
+ default:
+ d.saveError(&UnmarshalTypeError{"array", v.Type()})
+ d.off--
+ d.next()
+ return
+ case reflect.Array:
+ case reflect.Slice:
+ break
+ }
+
+ i := 0
+ for {
+ // Look ahead for ] - can only happen on first iteration.
+ op := d.scanWhile(ScanSkipSpace)
+ if op == ScanEndArray {
+ break
+ }
+
+ // Back up so d.value can have the byte we just read.
+ d.off--
+ d.scan.undo(op)
+
+ // Get element of array, growing if necessary.
+ if v.Kind() == reflect.Slice {
+ // Grow slice if necessary
+ if i >= v.Cap() {
+ newcap := v.Cap() + v.Cap()/2
+ if newcap < 4 {
+ newcap = 4
+ }
+ newv := reflect.MakeSlice(v.Type(), v.Len(), newcap)
+ reflect.Copy(newv, v)
+ v.Set(newv)
+ }
+ if i >= v.Len() {
+ v.SetLen(i + 1)
+ }
+ }
+
+ if i < v.Len() {
+ // Decode into element.
+ d.value(v.Index(i))
+ } else {
+ // Ran out of fixed array: skip.
+ d.value(reflect.Value{})
+ }
+ i++
+
+ // Next token must be , or ].
+ op = d.scanWhile(ScanSkipSpace)
+ if op == ScanEndArray {
+ break
+ }
+ if op != ScanArrayValue {
+ d.error(errPhase)
+ }
+ }
+
+ if i < v.Len() {
+ if v.Kind() == reflect.Array {
+ // Array. Zero the rest.
+ z := reflect.Zero(v.Type().Elem())
+ for ; i < v.Len(); i++ {
+ v.Index(i).Set(z)
+ }
+ } else {
+ v.SetLen(i)
+ }
+ }
+ if i == 0 && v.Kind() == reflect.Slice {
+ v.Set(reflect.MakeSlice(v.Type(), 0, 0))
+ }
+}
+
+var nullLiteral = []byte("null")
+
+// object consumes an object from d.data[d.off-1:], decoding into the value v.
+// the first byte ('{') of the object has been read already.
+func (d *decodeState) object(v reflect.Value) {
+ // Check for unmarshaler.
+ u, ut, pv := d.indirect(v, false)
+ if u != nil {
+ d.off--
+ err := u.UnmarshalJSON(d.next())
+ if err != nil {
+ d.error(err)
+ }
+ return
+ }
+ if ut != nil {
+ d.saveError(&UnmarshalTypeError{"object", v.Type()})
+ d.off--
+ d.next() // skip over { } in input
+ return
+ }
+ v = pv
+
+ // Decoding into nil interface? Switch to non-reflect code.
+ if v.Kind() == reflect.Interface && v.NumMethod() == 0 {
+ v.Set(reflect.ValueOf(d.objectInterface()))
+ return
+ }
+
+ // Check type of target: struct or map[string]T
+ switch v.Kind() {
+ case reflect.Map:
+ // map must have string kind
+ t := v.Type()
+ if t.Key().Kind() != reflect.String {
+ d.saveError(&UnmarshalTypeError{"object", v.Type()})
+ d.off--
+ d.next() // skip over { } in input
+ return
+ }
+ if v.IsNil() {
+ v.Set(reflect.MakeMap(t))
+ }
+ case reflect.Struct:
+
+ default:
+ d.saveError(&UnmarshalTypeError{"object", v.Type()})
+ d.off--
+ d.next() // skip over { } in input
+ return
+ }
+
+ var mapElem reflect.Value
+
+ for {
+ // Read opening " of string key or closing }.
+ op := d.scanWhile(ScanSkipSpace)
+ if op == ScanEndObject {
+ // closing } - can only happen on first iteration.
+ break
+ }
+ if op != ScanBeginLiteral {
+ d.error(errPhase)
+ }
+
+ // Read key.
+ start := d.off - 1
+ op = d.scanWhile(ScanContinue)
+ item := d.data[start : d.off-1]
+ key, ok := UnquoteBytes(item)
+ if !ok {
+ d.error(errPhase)
+ }
+
+ // Figure out field corresponding to key.
+ var subv reflect.Value
+ destring := false // whether the value is wrapped in a string to be decoded first
+
+ if v.Kind() == reflect.Map {
+ elemType := v.Type().Elem()
+ if !mapElem.IsValid() {
+ mapElem = reflect.New(elemType).Elem()
+ } else {
+ mapElem.Set(reflect.Zero(elemType))
+ }
+ subv = mapElem
+ } else {
+ var f *field
+ fields := cachedTypeFields(v.Type())
+ for i := range fields {
+ ff := &fields[i]
+ if bytes.Equal(ff.nameBytes, key) {
+ f = ff
+ break
+ }
+ if f == nil && ff.equalFold(ff.nameBytes, key) {
+ f = ff
+ }
+ }
+ if f != nil {
+ subv = v
+ destring = f.quoted
+ for _, i := range f.index {
+ if subv.Kind() == reflect.Ptr {
+ if subv.IsNil() {
+ subv.Set(reflect.New(subv.Type().Elem()))
+ }
+ subv = subv.Elem()
+ }
+ subv = subv.Field(i)
+ }
+ }
+ }
+
+ // Read : before value.
+ if op == ScanSkipSpace {
+ op = d.scanWhile(ScanSkipSpace)
+ }
+ if op != ScanObjectKey {
+ d.error(errPhase)
+ }
+
+ // Read value.
+ if destring {
+ switch qv := d.valueQuoted().(type) {
+ case nil:
+ d.literalStore(nullLiteral, subv, false)
+ case string:
+ d.literalStore([]byte(qv), subv, true)
+ default:
+ d.saveError(fmt.Errorf("json: invalid use of ,string struct tag, trying to unmarshal unquoted value into %v", item, v.Type()))
+ }
+ } else {
+ d.value(subv)
+ }
+
+ // Write value back to map;
+ // if using struct, subv points into struct already.
+ if v.Kind() == reflect.Map {
+ kv := reflect.ValueOf(key).Convert(v.Type().Key())
+ v.SetMapIndex(kv, subv)
+ }
+
+ // Next token must be , or }.
+ op = d.scanWhile(ScanSkipSpace)
+ if op == ScanEndObject {
+ break
+ }
+ if op != ScanObjectValue {
+ d.error(errPhase)
+ }
+ }
+}
+
+// literal consumes a literal from d.data[d.off-1:], decoding into the value v.
+// The first byte of the literal has been read already
+// (that's how the caller knows it's a literal).
+func (d *decodeState) literal(v reflect.Value) {
+ // All bytes inside literal return scanContinue op code.
+ start := d.off - 1
+ op := d.scanWhile(ScanContinue)
+
+ // Scan read one byte too far; back up.
+ d.off--
+ d.scan.undo(op)
+
+ d.literalStore(d.data[start:d.off], v, false)
+}
+
+// convertNumber converts the number literal s to a float64 or a Number
+// depending on the setting of d.useNumber.
+func (d *decodeState) convertNumber(s string) (interface{}, error) {
+ if d.useNumber {
+ return Number(s), nil
+ }
+ f, err := strconv.ParseFloat(s, 64)
+ if err != nil {
+ return nil, &UnmarshalTypeError{"number " + s, reflect.TypeOf(0.0)}
+ }
+ return f, nil
+}
+
+var numberType = reflect.TypeOf(Number(""))
+
+// literalStore decodes a literal stored in item into v.
+//
+// fromQuoted indicates whether this literal came from unwrapping a
+// string from the ",string" struct tag option. this is used only to
+// produce more helpful error messages.
+func (d *decodeState) literalStore(item []byte, v reflect.Value, fromQuoted bool) {
+ // Check for unmarshaler.
+ if len(item) == 0 {
+ //Empty string given
+ d.saveError(fmt.Errorf("json: invalid use of ,string struct tag, trying to unmarshal %q into %v", item, v.Type()))
+ return
+ }
+ wantptr := item[0] == 'n' // null
+ u, ut, pv := d.indirect(v, wantptr)
+ if u != nil {
+ err := u.UnmarshalJSON(item)
+ if err != nil {
+ d.error(err)
+ }
+ return
+ }
+ if ut != nil {
+ if item[0] != '"' {
+ if fromQuoted {
+ d.saveError(fmt.Errorf("json: invalid use of ,string struct tag, trying to unmarshal %q into %v", item, v.Type()))
+ } else {
+ d.saveError(&UnmarshalTypeError{"string", v.Type()})
+ }
+ }
+ s, ok := UnquoteBytes(item)
+ if !ok {
+ if fromQuoted {
+ d.error(fmt.Errorf("json: invalid use of ,string struct tag, trying to unmarshal %q into %v", item, v.Type()))
+ } else {
+ d.error(errPhase)
+ }
+ }
+ err := ut.UnmarshalText(s)
+ if err != nil {
+ d.error(err)
+ }
+ return
+ }
+
+ v = pv
+
+ switch c := item[0]; c {
+ case 'n': // null
+ switch v.Kind() {
+ case reflect.Interface, reflect.Ptr, reflect.Map, reflect.Slice:
+ v.Set(reflect.Zero(v.Type()))
+ // otherwise, ignore null for primitives/string
+ }
+ case 't', 'f': // true, false
+ value := c == 't'
+ switch v.Kind() {
+ default:
+ if fromQuoted {
+ d.saveError(fmt.Errorf("json: invalid use of ,string struct tag, trying to unmarshal %q into %v", item, v.Type()))
+ } else {
+ d.saveError(&UnmarshalTypeError{"bool", v.Type()})
+ }
+ case reflect.Bool:
+ v.SetBool(value)
+ case reflect.Interface:
+ if v.NumMethod() == 0 {
+ v.Set(reflect.ValueOf(value))
+ } else {
+ d.saveError(&UnmarshalTypeError{"bool", v.Type()})
+ }
+ }
+
+ case '"': // string
+ s, ok := UnquoteBytes(item)
+ if !ok {
+ if fromQuoted {
+ d.error(fmt.Errorf("json: invalid use of ,string struct tag, trying to unmarshal %q into %v", item, v.Type()))
+ } else {
+ d.error(errPhase)
+ }
+ }
+ switch v.Kind() {
+ default:
+ d.saveError(&UnmarshalTypeError{"string", v.Type()})
+ case reflect.Slice:
+ if v.Type() != byteSliceType {
+ d.saveError(&UnmarshalTypeError{"string", v.Type()})
+ break
+ }
+ b := make([]byte, base64.StdEncoding.DecodedLen(len(s)))
+ n, err := base64.StdEncoding.Decode(b, s)
+ if err != nil {
+ d.saveError(err)
+ break
+ }
+ v.Set(reflect.ValueOf(b[0:n]))
+ case reflect.String:
+ v.SetString(string(s))
+ case reflect.Interface:
+ if v.NumMethod() == 0 {
+ v.Set(reflect.ValueOf(string(s)))
+ } else {
+ d.saveError(&UnmarshalTypeError{"string", v.Type()})
+ }
+ }
+
+ default: // number
+ if c != '-' && (c < '0' || c > '9') {
+ if fromQuoted {
+ d.error(fmt.Errorf("json: invalid use of ,string struct tag, trying to unmarshal %q into %v", item, v.Type()))
+ } else {
+ d.error(errPhase)
+ }
+ }
+ s := string(item)
+ switch v.Kind() {
+ default:
+ if v.Kind() == reflect.String && v.Type() == numberType {
+ v.SetString(s)
+ break
+ }
+ if fromQuoted {
+ d.error(fmt.Errorf("json: invalid use of ,string struct tag, trying to unmarshal %q into %v", item, v.Type()))
+ } else {
+ d.error(&UnmarshalTypeError{"number", v.Type()})
+ }
+ case reflect.Interface:
+ n, err := d.convertNumber(s)
+ if err != nil {
+ d.saveError(err)
+ break
+ }
+ if v.NumMethod() != 0 {
+ d.saveError(&UnmarshalTypeError{"number", v.Type()})
+ break
+ }
+ v.Set(reflect.ValueOf(n))
+
+ case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+ n, err := strconv.ParseInt(s, 10, 64)
+ if err != nil || v.OverflowInt(n) {
+ d.saveError(&UnmarshalTypeError{"number " + s, v.Type()})
+ break
+ }
+ v.SetInt(n)
+
+ case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
+ n, err := strconv.ParseUint(s, 10, 64)
+ if err != nil || v.OverflowUint(n) {
+ d.saveError(&UnmarshalTypeError{"number " + s, v.Type()})
+ break
+ }
+ v.SetUint(n)
+
+ case reflect.Float32, reflect.Float64:
+ n, err := strconv.ParseFloat(s, v.Type().Bits())
+ if err != nil || v.OverflowFloat(n) {
+ d.saveError(&UnmarshalTypeError{"number " + s, v.Type()})
+ break
+ }
+ v.SetFloat(n)
+ }
+ }
+}
+
+// The xxxInterface routines build up a value to be stored
+// in an empty interface. They are not strictly necessary,
+// but they avoid the weight of reflection in this common case.
+
+// valueInterface is like value but returns interface{}
+func (d *decodeState) valueInterface() interface{} {
+ switch d.scanWhile(ScanSkipSpace) {
+ default:
+ d.error(errPhase)
+ panic("unreachable")
+ case ScanBeginArray:
+ return d.arrayInterface()
+ case ScanBeginObject:
+ return d.objectInterface()
+ case ScanBeginLiteral:
+ return d.literalInterface()
+ }
+}
+
+// arrayInterface is like array but returns []interface{}.
+func (d *decodeState) arrayInterface() []interface{} {
+ var v = make([]interface{}, 0)
+ for {
+ // Look ahead for ] - can only happen on first iteration.
+ op := d.scanWhile(ScanSkipSpace)
+ if op == ScanEndArray {
+ break
+ }
+
+ // Back up so d.value can have the byte we just read.
+ d.off--
+ d.scan.undo(op)
+
+ v = append(v, d.valueInterface())
+
+ // Next token must be , or ].
+ op = d.scanWhile(ScanSkipSpace)
+ if op == ScanEndArray {
+ break
+ }
+ if op != ScanArrayValue {
+ d.error(errPhase)
+ }
+ }
+ return v
+}
+
+// objectInterface is like object but returns map[string]interface{}.
+func (d *decodeState) objectInterface() map[string]interface{} {
+ m := make(map[string]interface{})
+ for {
+ // Read opening " of string key or closing }.
+ op := d.scanWhile(ScanSkipSpace)
+ if op == ScanEndObject {
+ // closing } - can only happen on first iteration.
+ break
+ }
+ if op != ScanBeginLiteral {
+ d.error(errPhase)
+ }
+
+ // Read string key.
+ start := d.off - 1
+ op = d.scanWhile(ScanContinue)
+ item := d.data[start : d.off-1]
+ key, ok := unquote(item)
+ if !ok {
+ d.error(errPhase)
+ }
+
+ // Read : before value.
+ if op == ScanSkipSpace {
+ op = d.scanWhile(ScanSkipSpace)
+ }
+ if op != ScanObjectKey {
+ d.error(errPhase)
+ }
+
+ // Read value.
+ m[key] = d.valueInterface()
+
+ // Next token must be , or }.
+ op = d.scanWhile(ScanSkipSpace)
+ if op == ScanEndObject {
+ break
+ }
+ if op != ScanObjectValue {
+ d.error(errPhase)
+ }
+ }
+ return m
+}
+
+// literalInterface is like literal but returns an interface value.
+func (d *decodeState) literalInterface() interface{} {
+ // All bytes inside literal return scanContinue op code.
+ start := d.off - 1
+ op := d.scanWhile(ScanContinue)
+
+ // Scan read one byte too far; back up.
+ d.off--
+ d.scan.undo(op)
+ item := d.data[start:d.off]
+
+ switch c := item[0]; c {
+ case 'n': // null
+ return nil
+
+ case 't', 'f': // true, false
+ return c == 't'
+
+ case '"': // string
+ s, ok := unquote(item)
+ if !ok {
+ d.error(errPhase)
+ }
+ return s
+
+ default: // number
+ if c != '-' && (c < '0' || c > '9') {
+ d.error(errPhase)
+ }
+ n, err := d.convertNumber(string(item))
+ if err != nil {
+ d.saveError(err)
+ }
+ return n
+ }
+}
+
+// getu4 decodes \uXXXX from the beginning of s, returning the hex value,
+// or it returns -1.
+func getu4(s []byte) rune {
+ if len(s) < 6 || s[0] != '\\' || s[1] != 'u' {
+ return -1
+ }
+ r, err := strconv.ParseUint(string(s[2:6]), 16, 64)
+ if err != nil {
+ return -1
+ }
+ return rune(r)
+}
+
+// unquote converts a quoted JSON string literal s into an actual string t.
+// The rules are different than for Go, so cannot use strconv.Unquote.
+func unquote(s []byte) (t string, ok bool) {
+ s, ok = UnquoteBytes(s)
+ t = string(s)
+ return
+}
+
+func UnquoteBytes(s []byte) (t []byte, ok bool) {
+ if len(s) < 2 || s[0] != '"' || s[len(s)-1] != '"' {
+ s = bytes.TrimSpace(s)
+
+ if len(s) < 2 || s[0] != '"' || s[len(s)-1] != '"' {
+ return
+ }
+ }
+
+ s = s[1 : len(s)-1]
+
+ // Check for unusual characters. If there are none,
+ // then no unquoting is needed, so return a slice of the
+ // original bytes.
+ r := 0
+ for r < len(s) {
+ c := s[r]
+ if c == '\\' || c == '"' || c < ' ' {
+ break
+ }
+ if c < utf8.RuneSelf {
+ r++
+ continue
+ }
+ rr, size := utf8.DecodeRune(s[r:])
+ if rr == utf8.RuneError && size == 1 {
+ break
+ }
+ r += size
+ }
+ if r == len(s) {
+ return s, true
+ }
+
+ b := make([]byte, len(s)+2*utf8.UTFMax)
+ w := copy(b, s[0:r])
+ for r < len(s) {
+ // Out of room? Can only happen if s is full of
+ // malformed UTF-8 and we're replacing each
+ // byte with RuneError.
+ if w >= len(b)-2*utf8.UTFMax {
+ nb := make([]byte, (len(b)+utf8.UTFMax)*2)
+ copy(nb, b[0:w])
+ b = nb
+ }
+ switch c := s[r]; {
+ case c == '\\':
+ r++
+ if r >= len(s) {
+ return
+ }
+ switch s[r] {
+ default:
+ return
+ case '"', '\\', '/', '\'':
+ b[w] = s[r]
+ r++
+ w++
+ case 'b':
+ b[w] = '\b'
+ r++
+ w++
+ case 'f':
+ b[w] = '\f'
+ r++
+ w++
+ case 'n':
+ b[w] = '\n'
+ r++
+ w++
+ case 'r':
+ b[w] = '\r'
+ r++
+ w++
+ case 't':
+ b[w] = '\t'
+ r++
+ w++
+ case 'u':
+ r--
+ rr := getu4(s[r:])
+ if rr < 0 {
+ return
+ }
+ r += 6
+ if utf16.IsSurrogate(rr) {
+ rr1 := getu4(s[r:])
+ if dec := utf16.DecodeRune(rr, rr1); dec != unicode.ReplacementChar {
+ // A valid pair; consume.
+ r += 6
+ w += utf8.EncodeRune(b[w:], dec)
+ break
+ }
+ // Invalid surrogate; fall back to replacement rune.
+ rr = unicode.ReplacementChar
+ }
+ w += utf8.EncodeRune(b[w:], rr)
+ }
+
+ // Quote, control characters are invalid.
+ case c == '"', c < ' ':
+ return
+
+ // ASCII
+ case c < utf8.RuneSelf:
+ b[w] = c
+ r++
+ w++
+
+ // Coerce to well-formed UTF-8.
+ default:
+ rr, size := utf8.DecodeRune(s[r:])
+ r += size
+ w += utf8.EncodeRune(b[w:], rr)
+ }
+ }
+ return b[0:w], true
+}
diff --git a/vendor/github.com/dustin/gojson/encode.go b/vendor/github.com/dustin/gojson/encode.go
new file mode 100644
index 00000000..fca2a098
--- /dev/null
+++ b/vendor/github.com/dustin/gojson/encode.go
@@ -0,0 +1,1183 @@
+// Copyright 2010 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package json implements encoding and decoding of JSON objects as defined in
+// RFC 4627. The mapping between JSON objects and Go values is described
+// in the documentation for the Marshal and Unmarshal functions.
+//
+// See "JSON and Go" for an introduction to this package:
+// http://golang.org/doc/articles/json_and_go.html
+package json
+
+import (
+ "bytes"
+ "encoding"
+ "encoding/base64"
+ "math"
+ "reflect"
+ "runtime"
+ "sort"
+ "strconv"
+ "strings"
+ "sync"
+ "unicode"
+ "unicode/utf8"
+)
+
+// Marshal returns the JSON encoding of v.
+//
+// Marshal traverses the value v recursively.
+// If an encountered value implements the Marshaler interface
+// and is not a nil pointer, Marshal calls its MarshalJSON method
+// to produce JSON. The nil pointer exception is not strictly necessary
+// but mimics a similar, necessary exception in the behavior of
+// UnmarshalJSON.
+//
+// Otherwise, Marshal uses the following type-dependent default encodings:
+//
+// Boolean values encode as JSON booleans.
+//
+// Floating point, integer, and Number values encode as JSON numbers.
+//
+// String values encode as JSON strings coerced to valid UTF-8,
+// replacing invalid bytes with the Unicode replacement rune.
+// The angle brackets "<" and ">" are escaped to "\u003c" and "\u003e"
+// to keep some browsers from misinterpreting JSON output as HTML.
+// Ampersand "&" is also escaped to "\u0026" for the same reason.
+//
+// Array and slice values encode as JSON arrays, except that
+// []byte encodes as a base64-encoded string, and a nil slice
+// encodes as the null JSON object.
+//
+// Struct values encode as JSON objects. Each exported struct field
+// becomes a member of the object unless
+// - the field's tag is "-", or
+// - the field is empty and its tag specifies the "omitempty" option.
+// The empty values are false, 0, any
+// nil pointer or interface value, and any array, slice, map, or string of
+// length zero. The object's default key string is the struct field name
+// but can be specified in the struct field's tag value. The "json" key in
+// the struct field's tag value is the key name, followed by an optional comma
+// and options. Examples:
+//
+// // Field is ignored by this package.
+// Field int `json:"-"`
+//
+// // Field appears in JSON as key "myName".
+// Field int `json:"myName"`
+//
+// // Field appears in JSON as key "myName" and
+// // the field is omitted from the object if its value is empty,
+// // as defined above.
+// Field int `json:"myName,omitempty"`
+//
+// // Field appears in JSON as key "Field" (the default), but
+// // the field is skipped if empty.
+// // Note the leading comma.
+// Field int `json:",omitempty"`
+//
+// The "string" option signals that a field is stored as JSON inside a
+// JSON-encoded string. It applies only to fields of string, floating point,
+// or integer types. This extra level of encoding is sometimes used when
+// communicating with JavaScript programs:
+//
+// Int64String int64 `json:",string"`
+//
+// The key name will be used if it's a non-empty string consisting of
+// only Unicode letters, digits, dollar signs, percent signs, hyphens,
+// underscores and slashes.
+//
+// Anonymous struct fields are usually marshaled as if their inner exported fields
+// were fields in the outer struct, subject to the usual Go visibility rules amended
+// as described in the next paragraph.
+// An anonymous struct field with a name given in its JSON tag is treated as
+// having that name, rather than being anonymous.
+// An anonymous struct field of interface type is treated the same as having
+// that type as its name, rather than being anonymous.
+//
+// The Go visibility rules for struct fields are amended for JSON when
+// deciding which field to marshal or unmarshal. If there are
+// multiple fields at the same level, and that level is the least
+// nested (and would therefore be the nesting level selected by the
+// usual Go rules), the following extra rules apply:
+//
+// 1) Of those fields, if any are JSON-tagged, only tagged fields are considered,
+// even if there are multiple untagged fields that would otherwise conflict.
+// 2) If there is exactly one field (tagged or not according to the first rule), that is selected.
+// 3) Otherwise there are multiple fields, and all are ignored; no error occurs.
+//
+// Handling of anonymous struct fields is new in Go 1.1.
+// Prior to Go 1.1, anonymous struct fields were ignored. To force ignoring of
+// an anonymous struct field in both current and earlier versions, give the field
+// a JSON tag of "-".
+//
+// Map values encode as JSON objects.
+// The map's key type must be string; the object keys are used directly
+// as map keys.
+//
+// Pointer values encode as the value pointed to.
+// A nil pointer encodes as the null JSON object.
+//
+// Interface values encode as the value contained in the interface.
+// A nil interface value encodes as the null JSON object.
+//
+// Channel, complex, and function values cannot be encoded in JSON.
+// Attempting to encode such a value causes Marshal to return
+// an UnsupportedTypeError.
+//
+// JSON cannot represent cyclic data structures and Marshal does not
+// handle them. Passing cyclic structures to Marshal will result in
+// an infinite recursion.
+//
+func Marshal(v interface{}) ([]byte, error) {
+ e := &encodeState{}
+ err := e.marshal(v)
+ if err != nil {
+ return nil, err
+ }
+ return e.Bytes(), nil
+}
+
+// MarshalIndent is like Marshal but applies Indent to format the output.
+func MarshalIndent(v interface{}, prefix, indent string) ([]byte, error) {
+ b, err := Marshal(v)
+ if err != nil {
+ return nil, err
+ }
+ var buf bytes.Buffer
+ err = Indent(&buf, b, prefix, indent)
+ if err != nil {
+ return nil, err
+ }
+ return buf.Bytes(), nil
+}
+
+// HTMLEscape appends to dst the JSON-encoded src with <, >, &, U+2028 and U+2029
+// characters inside string literals changed to \u003c, \u003e, \u0026, \u2028, \u2029
+// so that the JSON will be safe to embed inside HTML <script> tags.
+// For historical reasons, web browsers don't honor standard HTML
+// escaping within <script> tags, so an alternative JSON encoding must
+// be used.
+func HTMLEscape(dst *bytes.Buffer, src []byte) {
+ // The characters can only appear in string literals,
+ // so just scan the string one byte at a time.
+ start := 0
+ for i, c := range src {
+ if c == '<' || c == '>' || c == '&' {
+ if start < i {
+ dst.Write(src[start:i])
+ }
+ dst.WriteString(`\u00`)
+ dst.WriteByte(hex[c>>4])
+ dst.WriteByte(hex[c&0xF])
+ start = i + 1
+ }
+ // Convert U+2028 and U+2029 (E2 80 A8 and E2 80 A9).
+ if c == 0xE2 && i+2 < len(src) && src[i+1] == 0x80 && src[i+2]&^1 == 0xA8 {
+ if start < i {
+ dst.Write(src[start:i])
+ }
+ dst.WriteString(`\u202`)
+ dst.WriteByte(hex[src[i+2]&0xF])
+ start = i + 3
+ }
+ }
+ if start < len(src) {
+ dst.Write(src[start:])
+ }
+}
+
+// Marshaler is the interface implemented by objects that
+// can marshal themselves into valid JSON.
+type Marshaler interface {
+ MarshalJSON() ([]byte, error)
+}
+
+// An UnsupportedTypeError is returned by Marshal when attempting
+// to encode an unsupported value type.
+type UnsupportedTypeError struct {
+ Type reflect.Type
+}
+
+func (e *UnsupportedTypeError) Error() string {
+ return "json: unsupported type: " + e.Type.String()
+}
+
+type UnsupportedValueError struct {
+ Value reflect.Value
+ Str string
+}
+
+func (e *UnsupportedValueError) Error() string {
+ return "json: unsupported value: " + e.Str
+}
+
+// Before Go 1.2, an InvalidUTF8Error was returned by Marshal when
+// attempting to encode a string value with invalid UTF-8 sequences.
+// As of Go 1.2, Marshal instead coerces the string to valid UTF-8 by
+// replacing invalid bytes with the Unicode replacement rune U+FFFD.
+// This error is no longer generated but is kept for backwards compatibility
+// with programs that might mention it.
+type InvalidUTF8Error struct {
+ S string // the whole string value that caused the error
+}
+
+func (e *InvalidUTF8Error) Error() string {
+ return "json: invalid UTF-8 in string: " + strconv.Quote(e.S)
+}
+
+type MarshalerError struct {
+ Type reflect.Type
+ Err error
+}
+
+func (e *MarshalerError) Error() string {
+ return "json: error calling MarshalJSON for type " + e.Type.String() + ": " + e.Err.Error()
+}
+
+var hex = "0123456789abcdef"
+
+// An encodeState encodes JSON into a bytes.Buffer.
+type encodeState struct {
+ bytes.Buffer // accumulated output
+ scratch [64]byte
+}
+
+var encodeStatePool sync.Pool
+
+func newEncodeState() *encodeState {
+ if v := encodeStatePool.Get(); v != nil {
+ e := v.(*encodeState)
+ e.Reset()
+ return e
+ }
+ return new(encodeState)
+}
+
+func (e *encodeState) marshal(v interface{}) (err error) {
+ defer func() {
+ if r := recover(); r != nil {
+ if _, ok := r.(runtime.Error); ok {
+ panic(r)
+ }
+ if s, ok := r.(string); ok {
+ panic(s)
+ }
+ err = r.(error)
+ }
+ }()
+ e.reflectValue(reflect.ValueOf(v))
+ return nil
+}
+
+func (e *encodeState) error(err error) {
+ panic(err)
+}
+
+var byteSliceType = reflect.TypeOf([]byte(nil))
+
+func isEmptyValue(v reflect.Value) bool {
+ switch v.Kind() {
+ case reflect.Array, reflect.Map, reflect.Slice, reflect.String:
+ return v.Len() == 0
+ case reflect.Bool:
+ return !v.Bool()
+ case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+ return v.Int() == 0
+ case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
+ return v.Uint() == 0
+ case reflect.Float32, reflect.Float64:
+ return v.Float() == 0
+ case reflect.Interface, reflect.Ptr:
+ return v.IsNil()
+ }
+ return false
+}
+
+func (e *encodeState) reflectValue(v reflect.Value) {
+ valueEncoder(v)(e, v, false)
+}
+
+type encoderFunc func(e *encodeState, v reflect.Value, quoted bool)
+
+var encoderCache struct {
+ sync.RWMutex
+ m map[reflect.Type]encoderFunc
+}
+
+func valueEncoder(v reflect.Value) encoderFunc {
+ if !v.IsValid() {
+ return invalidValueEncoder
+ }
+ return typeEncoder(v.Type())
+}
+
+func typeEncoder(t reflect.Type) encoderFunc {
+ encoderCache.RLock()
+ f := encoderCache.m[t]
+ encoderCache.RUnlock()
+ if f != nil {
+ return f
+ }
+
+ // To deal with recursive types, populate the map with an
+ // indirect func before we build it. This type waits on the
+ // real func (f) to be ready and then calls it. This indirect
+ // func is only used for recursive types.
+ encoderCache.Lock()
+ if encoderCache.m == nil {
+ encoderCache.m = make(map[reflect.Type]encoderFunc)
+ }
+ var wg sync.WaitGroup
+ wg.Add(1)
+ encoderCache.m[t] = func(e *encodeState, v reflect.Value, quoted bool) {
+ wg.Wait()
+ f(e, v, quoted)
+ }
+ encoderCache.Unlock()
+
+ // Compute fields without lock.
+ // Might duplicate effort but won't hold other computations back.
+ f = newTypeEncoder(t, true)
+ wg.Done()
+ encoderCache.Lock()
+ encoderCache.m[t] = f
+ encoderCache.Unlock()
+ return f
+}
+
+var (
+ marshalerType = reflect.TypeOf(new(Marshaler)).Elem()
+ textMarshalerType = reflect.TypeOf(new(encoding.TextMarshaler)).Elem()
+)
+
+// newTypeEncoder constructs an encoderFunc for a type.
+// The returned encoder only checks CanAddr when allowAddr is true.
+func newTypeEncoder(t reflect.Type, allowAddr bool) encoderFunc {
+ if t.Implements(marshalerType) {
+ return marshalerEncoder
+ }
+ if t.Kind() != reflect.Ptr && allowAddr {
+ if reflect.PtrTo(t).Implements(marshalerType) {
+ return newCondAddrEncoder(addrMarshalerEncoder, newTypeEncoder(t, false))
+ }
+ }
+
+ if t.Implements(textMarshalerType) {
+ return textMarshalerEncoder
+ }
+ if t.Kind() != reflect.Ptr && allowAddr {
+ if reflect.PtrTo(t).Implements(textMarshalerType) {
+ return newCondAddrEncoder(addrTextMarshalerEncoder, newTypeEncoder(t, false))
+ }
+ }
+
+ switch t.Kind() {
+ case reflect.Bool:
+ return boolEncoder
+ case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+ return intEncoder
+ case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
+ return uintEncoder
+ case reflect.Float32:
+ return float32Encoder
+ case reflect.Float64:
+ return float64Encoder
+ case reflect.String:
+ return stringEncoder
+ case reflect.Interface:
+ return interfaceEncoder
+ case reflect.Struct:
+ return newStructEncoder(t)
+ case reflect.Map:
+ return newMapEncoder(t)
+ case reflect.Slice:
+ return newSliceEncoder(t)
+ case reflect.Array:
+ return newArrayEncoder(t)
+ case reflect.Ptr:
+ return newPtrEncoder(t)
+ default:
+ return unsupportedTypeEncoder
+ }
+}
+
+func invalidValueEncoder(e *encodeState, v reflect.Value, quoted bool) {
+ e.WriteString("null")
+}
+
+func marshalerEncoder(e *encodeState, v reflect.Value, quoted bool) {
+ if v.Kind() == reflect.Ptr && v.IsNil() {
+ e.WriteString("null")
+ return
+ }
+ m := v.Interface().(Marshaler)
+ b, err := m.MarshalJSON()
+ if err == nil {
+ // copy JSON into buffer, checking validity.
+ err = compact(&e.Buffer, b, true)
+ }
+ if err != nil {
+ e.error(&MarshalerError{v.Type(), err})
+ }
+}
+
+func addrMarshalerEncoder(e *encodeState, v reflect.Value, quoted bool) {
+ va := v.Addr()
+ if va.IsNil() {
+ e.WriteString("null")
+ return
+ }
+ m := va.Interface().(Marshaler)
+ b, err := m.MarshalJSON()
+ if err == nil {
+ // copy JSON into buffer, checking validity.
+ err = compact(&e.Buffer, b, true)
+ }
+ if err != nil {
+ e.error(&MarshalerError{v.Type(), err})
+ }
+}
+
+func textMarshalerEncoder(e *encodeState, v reflect.Value, quoted bool) {
+ if v.Kind() == reflect.Ptr && v.IsNil() {
+ e.WriteString("null")
+ return
+ }
+ m := v.Interface().(encoding.TextMarshaler)
+ b, err := m.MarshalText()
+ if err == nil {
+ _, err = e.stringBytes(b)
+ }
+ if err != nil {
+ e.error(&MarshalerError{v.Type(), err})
+ }
+}
+
+func addrTextMarshalerEncoder(e *encodeState, v reflect.Value, quoted bool) {
+ va := v.Addr()
+ if va.IsNil() {
+ e.WriteString("null")
+ return
+ }
+ m := va.Interface().(encoding.TextMarshaler)
+ b, err := m.MarshalText()
+ if err == nil {
+ _, err = e.stringBytes(b)
+ }
+ if err != nil {
+ e.error(&MarshalerError{v.Type(), err})
+ }
+}
+
+func boolEncoder(e *encodeState, v reflect.Value, quoted bool) {
+ if quoted {
+ e.WriteByte('"')
+ }
+ if v.Bool() {
+ e.WriteString("true")
+ } else {
+ e.WriteString("false")
+ }
+ if quoted {
+ e.WriteByte('"')
+ }
+}
+
+func intEncoder(e *encodeState, v reflect.Value, quoted bool) {
+ b := strconv.AppendInt(e.scratch[:0], v.Int(), 10)
+ if quoted {
+ e.WriteByte('"')
+ }
+ e.Write(b)
+ if quoted {
+ e.WriteByte('"')
+ }
+}
+
+func uintEncoder(e *encodeState, v reflect.Value, quoted bool) {
+ b := strconv.AppendUint(e.scratch[:0], v.Uint(), 10)
+ if quoted {
+ e.WriteByte('"')
+ }
+ e.Write(b)
+ if quoted {
+ e.WriteByte('"')
+ }
+}
+
+type floatEncoder int // number of bits
+
+func (bits floatEncoder) encode(e *encodeState, v reflect.Value, quoted bool) {
+ f := v.Float()
+ if math.IsInf(f, 0) || math.IsNaN(f) {
+ e.error(&UnsupportedValueError{v, strconv.FormatFloat(f, 'g', -1, int(bits))})
+ }
+ b := strconv.AppendFloat(e.scratch[:0], f, 'g', -1, int(bits))
+ if quoted {
+ e.WriteByte('"')
+ }
+ e.Write(b)
+ if quoted {
+ e.WriteByte('"')
+ }
+}
+
+var (
+ float32Encoder = (floatEncoder(32)).encode
+ float64Encoder = (floatEncoder(64)).encode
+)
+
+func stringEncoder(e *encodeState, v reflect.Value, quoted bool) {
+ if v.Type() == numberType {
+ numStr := v.String()
+ if numStr == "" {
+ numStr = "0" // Number's zero-val
+ }
+ e.WriteString(numStr)
+ return
+ }
+ if quoted {
+ sb, err := Marshal(v.String())
+ if err != nil {
+ e.error(err)
+ }
+ e.string(string(sb))
+ } else {
+ e.string(v.String())
+ }
+}
+
+func interfaceEncoder(e *encodeState, v reflect.Value, quoted bool) {
+ if v.IsNil() {
+ e.WriteString("null")
+ return
+ }
+ e.reflectValue(v.Elem())
+}
+
+func unsupportedTypeEncoder(e *encodeState, v reflect.Value, quoted bool) {
+ e.error(&UnsupportedTypeError{v.Type()})
+}
+
+type structEncoder struct {
+ fields []field
+ fieldEncs []encoderFunc
+}
+
+func (se *structEncoder) encode(e *encodeState, v reflect.Value, quoted bool) {
+ e.WriteByte('{')
+ first := true
+ for i, f := range se.fields {
+ fv := fieldByIndex(v, f.index)
+ if !fv.IsValid() || f.omitEmpty && isEmptyValue(fv) {
+ continue
+ }
+ if first {
+ first = false
+ } else {
+ e.WriteByte(',')
+ }
+ e.string(f.name)
+ e.WriteByte(':')
+ se.fieldEncs[i](e, fv, f.quoted)
+ }
+ e.WriteByte('}')
+}
+
+func newStructEncoder(t reflect.Type) encoderFunc {
+ fields := cachedTypeFields(t)
+ se := &structEncoder{
+ fields: fields,
+ fieldEncs: make([]encoderFunc, len(fields)),
+ }
+ for i, f := range fields {
+ se.fieldEncs[i] = typeEncoder(typeByIndex(t, f.index))
+ }
+ return se.encode
+}
+
+type mapEncoder struct {
+ elemEnc encoderFunc
+}
+
+func (me *mapEncoder) encode(e *encodeState, v reflect.Value, _ bool) {
+ if v.IsNil() {
+ e.WriteString("null")
+ return
+ }
+ e.WriteByte('{')
+ var sv stringValues = v.MapKeys()
+ sort.Sort(sv)
+ for i, k := range sv {
+ if i > 0 {
+ e.WriteByte(',')
+ }
+ e.string(k.String())
+ e.WriteByte(':')
+ me.elemEnc(e, v.MapIndex(k), false)
+ }
+ e.WriteByte('}')
+}
+
+func newMapEncoder(t reflect.Type) encoderFunc {
+ if t.Key().Kind() != reflect.String {
+ return unsupportedTypeEncoder
+ }
+ me := &mapEncoder{typeEncoder(t.Elem())}
+ return me.encode
+}
+
+func encodeByteSlice(e *encodeState, v reflect.Value, _ bool) {
+ if v.IsNil() {
+ e.WriteString("null")
+ return
+ }
+ s := v.Bytes()
+ e.WriteByte('"')
+ if len(s) < 1024 {
+ // for small buffers, using Encode directly is much faster.
+ dst := make([]byte, base64.StdEncoding.EncodedLen(len(s)))
+ base64.StdEncoding.Encode(dst, s)
+ e.Write(dst)
+ } else {
+ // for large buffers, avoid unnecessary extra temporary
+ // buffer space.
+ enc := base64.NewEncoder(base64.StdEncoding, e)
+ enc.Write(s)
+ enc.Close()
+ }
+ e.WriteByte('"')
+}
+
+// sliceEncoder just wraps an arrayEncoder, checking to make sure the value isn't nil.
+type sliceEncoder struct {
+ arrayEnc encoderFunc
+}
+
+func (se *sliceEncoder) encode(e *encodeState, v reflect.Value, _ bool) {
+ if v.IsNil() {
+ e.WriteString("null")
+ return
+ }
+ se.arrayEnc(e, v, false)
+}
+
+func newSliceEncoder(t reflect.Type) encoderFunc {
+ // Byte slices get special treatment; arrays don't.
+ if t.Elem().Kind() == reflect.Uint8 {
+ return encodeByteSlice
+ }
+ enc := &sliceEncoder{newArrayEncoder(t)}
+ return enc.encode
+}
+
+type arrayEncoder struct {
+ elemEnc encoderFunc
+}
+
+func (ae *arrayEncoder) encode(e *encodeState, v reflect.Value, _ bool) {
+ e.WriteByte('[')
+ n := v.Len()
+ for i := 0; i < n; i++ {
+ if i > 0 {
+ e.WriteByte(',')
+ }
+ ae.elemEnc(e, v.Index(i), false)
+ }
+ e.WriteByte(']')
+}
+
+func newArrayEncoder(t reflect.Type) encoderFunc {
+ enc := &arrayEncoder{typeEncoder(t.Elem())}
+ return enc.encode
+}
+
+type ptrEncoder struct {
+ elemEnc encoderFunc
+}
+
+func (pe *ptrEncoder) encode(e *encodeState, v reflect.Value, quoted bool) {
+ if v.IsNil() {
+ e.WriteString("null")
+ return
+ }
+ pe.elemEnc(e, v.Elem(), quoted)
+}
+
+func newPtrEncoder(t reflect.Type) encoderFunc {
+ enc := &ptrEncoder{typeEncoder(t.Elem())}
+ return enc.encode
+}
+
+type condAddrEncoder struct {
+ canAddrEnc, elseEnc encoderFunc
+}
+
+func (ce *condAddrEncoder) encode(e *encodeState, v reflect.Value, quoted bool) {
+ if v.CanAddr() {
+ ce.canAddrEnc(e, v, quoted)
+ } else {
+ ce.elseEnc(e, v, quoted)
+ }
+}
+
+// newCondAddrEncoder returns an encoder that checks whether its value
+// CanAddr and delegates to canAddrEnc if so, else to elseEnc.
+func newCondAddrEncoder(canAddrEnc, elseEnc encoderFunc) encoderFunc {
+ enc := &condAddrEncoder{canAddrEnc: canAddrEnc, elseEnc: elseEnc}
+ return enc.encode
+}
+
+func isValidTag(s string) bool {
+ if s == "" {
+ return false
+ }
+ for _, c := range s {
+ switch {
+ case strings.ContainsRune("!#$%&()*+-./:<=>?@[]^_{|}~ ", c):
+ // Backslash and quote chars are reserved, but
+ // otherwise any punctuation chars are allowed
+ // in a tag name.
+ default:
+ if !unicode.IsLetter(c) && !unicode.IsDigit(c) {
+ return false
+ }
+ }
+ }
+ return true
+}
+
+func fieldByIndex(v reflect.Value, index []int) reflect.Value {
+ for _, i := range index {
+ if v.Kind() == reflect.Ptr {
+ if v.IsNil() {
+ return reflect.Value{}
+ }
+ v = v.Elem()
+ }
+ v = v.Field(i)
+ }
+ return v
+}
+
+func typeByIndex(t reflect.Type, index []int) reflect.Type {
+ for _, i := range index {
+ if t.Kind() == reflect.Ptr {
+ t = t.Elem()
+ }
+ t = t.Field(i).Type
+ }
+ return t
+}
+
+// stringValues is a slice of reflect.Value holding *reflect.StringValue.
+// It implements the methods to sort by string.
+type stringValues []reflect.Value
+
+func (sv stringValues) Len() int { return len(sv) }
+func (sv stringValues) Swap(i, j int) { sv[i], sv[j] = sv[j], sv[i] }
+func (sv stringValues) Less(i, j int) bool { return sv.get(i) < sv.get(j) }
+func (sv stringValues) get(i int) string { return sv[i].String() }
+
+// NOTE: keep in sync with stringBytes below.
+func (e *encodeState) string(s string) (int, error) {
+ len0 := e.Len()
+ e.WriteByte('"')
+ start := 0
+ for i := 0; i < len(s); {
+ if b := s[i]; b < utf8.RuneSelf {
+ if 0x20 <= b && b != '\\' && b != '"' && b != '<' && b != '>' && b != '&' {
+ i++
+ continue
+ }
+ if start < i {
+ e.WriteString(s[start:i])
+ }
+ switch b {
+ case '\\', '"':
+ e.WriteByte('\\')
+ e.WriteByte(b)
+ case '\n':
+ e.WriteByte('\\')
+ e.WriteByte('n')
+ case '\r':
+ e.WriteByte('\\')
+ e.WriteByte('r')
+ case '\t':
+ e.WriteByte('\\')
+ e.WriteByte('t')
+ default:
+ // This encodes bytes < 0x20 except for \n and \r,
+ // as well as <, > and &. The latter are escaped because they
+ // can lead to security holes when user-controlled strings
+ // are rendered into JSON and served to some browsers.
+ e.WriteString(`\u00`)
+ e.WriteByte(hex[b>>4])
+ e.WriteByte(hex[b&0xF])
+ }
+ i++
+ start = i
+ continue
+ }
+ c, size := utf8.DecodeRuneInString(s[i:])
+ if c == utf8.RuneError && size == 1 {
+ if start < i {
+ e.WriteString(s[start:i])
+ }
+ e.WriteString(`\ufffd`)
+ i += size
+ start = i
+ continue
+ }
+ // U+2028 is LINE SEPARATOR.
+ // U+2029 is PARAGRAPH SEPARATOR.
+ // They are both technically valid characters in JSON strings,
+ // but don't work in JSONP, which has to be evaluated as JavaScript,
+ // and can lead to security holes there. It is valid JSON to
+ // escape them, so we do so unconditionally.
+ // See http://timelessrepo.com/json-isnt-a-javascript-subset for discussion.
+ if c == '\u2028' || c == '\u2029' {
+ if start < i {
+ e.WriteString(s[start:i])
+ }
+ e.WriteString(`\u202`)
+ e.WriteByte(hex[c&0xF])
+ i += size
+ start = i
+ continue
+ }
+ i += size
+ }
+ if start < len(s) {
+ e.WriteString(s[start:])
+ }
+ e.WriteByte('"')
+ return e.Len() - len0, nil
+}
+
+// NOTE: keep in sync with string above.
+func (e *encodeState) stringBytes(s []byte) (int, error) {
+ len0 := e.Len()
+ e.WriteByte('"')
+ start := 0
+ for i := 0; i < len(s); {
+ if b := s[i]; b < utf8.RuneSelf {
+ if 0x20 <= b && b != '\\' && b != '"' && b != '<' && b != '>' && b != '&' {
+ i++
+ continue
+ }
+ if start < i {
+ e.Write(s[start:i])
+ }
+ switch b {
+ case '\\', '"':
+ e.WriteByte('\\')
+ e.WriteByte(b)
+ case '\n':
+ e.WriteByte('\\')
+ e.WriteByte('n')
+ case '\r':
+ e.WriteByte('\\')
+ e.WriteByte('r')
+ case '\t':
+ e.WriteByte('\\')
+ e.WriteByte('t')
+ default:
+ // This encodes bytes < 0x20 except for \n and \r,
+ // as well as <, >, and &. The latter are escaped because they
+ // can lead to security holes when user-controlled strings
+ // are rendered into JSON and served to some browsers.
+ e.WriteString(`\u00`)
+ e.WriteByte(hex[b>>4])
+ e.WriteByte(hex[b&0xF])
+ }
+ i++
+ start = i
+ continue
+ }
+ c, size := utf8.DecodeRune(s[i:])
+ if c == utf8.RuneError && size == 1 {
+ if start < i {
+ e.Write(s[start:i])
+ }
+ e.WriteString(`\ufffd`)
+ i += size
+ start = i
+ continue
+ }
+ // U+2028 is LINE SEPARATOR.
+ // U+2029 is PARAGRAPH SEPARATOR.
+ // They are both technically valid characters in JSON strings,
+ // but don't work in JSONP, which has to be evaluated as JavaScript,
+ // and can lead to security holes there. It is valid JSON to
+ // escape them, so we do so unconditionally.
+ // See http://timelessrepo.com/json-isnt-a-javascript-subset for discussion.
+ if c == '\u2028' || c == '\u2029' {
+ if start < i {
+ e.Write(s[start:i])
+ }
+ e.WriteString(`\u202`)
+ e.WriteByte(hex[c&0xF])
+ i += size
+ start = i
+ continue
+ }
+ i += size
+ }
+ if start < len(s) {
+ e.Write(s[start:])
+ }
+ e.WriteByte('"')
+ return e.Len() - len0, nil
+}
+
+// A field represents a single field found in a struct.
+type field struct {
+ name string
+ nameBytes []byte // []byte(name)
+ equalFold func(s, t []byte) bool // bytes.EqualFold or equivalent
+
+ tag bool
+ index []int
+ typ reflect.Type
+ omitEmpty bool
+ quoted bool
+}
+
+func fillField(f field) field {
+ f.nameBytes = []byte(f.name)
+ f.equalFold = foldFunc(f.nameBytes)
+ return f
+}
+
+// byName sorts field by name, breaking ties with depth,
+// then breaking ties with "name came from json tag", then
+// breaking ties with index sequence.
+type byName []field
+
+func (x byName) Len() int { return len(x) }
+
+func (x byName) Swap(i, j int) { x[i], x[j] = x[j], x[i] }
+
+func (x byName) Less(i, j int) bool {
+ if x[i].name != x[j].name {
+ return x[i].name < x[j].name
+ }
+ if len(x[i].index) != len(x[j].index) {
+ return len(x[i].index) < len(x[j].index)
+ }
+ if x[i].tag != x[j].tag {
+ return x[i].tag
+ }
+ return byIndex(x).Less(i, j)
+}
+
+// byIndex sorts field by index sequence.
+type byIndex []field
+
+func (x byIndex) Len() int { return len(x) }
+
+func (x byIndex) Swap(i, j int) { x[i], x[j] = x[j], x[i] }
+
+func (x byIndex) Less(i, j int) bool {
+ for k, xik := range x[i].index {
+ if k >= len(x[j].index) {
+ return false
+ }
+ if xik != x[j].index[k] {
+ return xik < x[j].index[k]
+ }
+ }
+ return len(x[i].index) < len(x[j].index)
+}
+
+// typeFields returns a list of fields that JSON should recognize for the given type.
+// The algorithm is breadth-first search over the set of structs to include - the top struct
+// and then any reachable anonymous structs.
+func typeFields(t reflect.Type) []field {
+ // Anonymous fields to explore at the current level and the next.
+ current := []field{}
+ next := []field{{typ: t}}
+
+ // Count of queued names for current level and the next.
+ count := map[reflect.Type]int{}
+ nextCount := map[reflect.Type]int{}
+
+ // Types already visited at an earlier level.
+ visited := map[reflect.Type]bool{}
+
+ // Fields found.
+ var fields []field
+
+ for len(next) > 0 {
+ current, next = next, current[:0]
+ count, nextCount = nextCount, map[reflect.Type]int{}
+
+ for _, f := range current {
+ if visited[f.typ] {
+ continue
+ }
+ visited[f.typ] = true
+
+ // Scan f.typ for fields to include.
+ for i := 0; i < f.typ.NumField(); i++ {
+ sf := f.typ.Field(i)
+ if sf.PkgPath != "" { // unexported
+ continue
+ }
+ tag := sf.Tag.Get("json")
+ if tag == "-" {
+ continue
+ }
+ name, opts := parseTag(tag)
+ if !isValidTag(name) {
+ name = ""
+ }
+ index := make([]int, len(f.index)+1)
+ copy(index, f.index)
+ index[len(f.index)] = i
+
+ ft := sf.Type
+ if ft.Name() == "" && ft.Kind() == reflect.Ptr {
+ // Follow pointer.
+ ft = ft.Elem()
+ }
+
+ // Record found field and index sequence.
+ if name != "" || !sf.Anonymous || ft.Kind() != reflect.Struct {
+ tagged := name != ""
+ if name == "" {
+ name = sf.Name
+ }
+ fields = append(fields, fillField(field{
+ name: name,
+ tag: tagged,
+ index: index,
+ typ: ft,
+ omitEmpty: opts.Contains("omitempty"),
+ quoted: opts.Contains("string"),
+ }))
+ if count[f.typ] > 1 {
+ // If there were multiple instances, add a second,
+ // so that the annihilation code will see a duplicate.
+ // It only cares about the distinction between 1 or 2,
+ // so don't bother generating any more copies.
+ fields = append(fields, fields[len(fields)-1])
+ }
+ continue
+ }
+
+ // Record new anonymous struct to explore in next round.
+ nextCount[ft]++
+ if nextCount[ft] == 1 {
+ next = append(next, fillField(field{name: ft.Name(), index: index, typ: ft}))
+ }
+ }
+ }
+ }
+
+ sort.Sort(byName(fields))
+
+ // Delete all fields that are hidden by the Go rules for embedded fields,
+ // except that fields with JSON tags are promoted.
+
+ // The fields are sorted in primary order of name, secondary order
+ // of field index length. Loop over names; for each name, delete
+ // hidden fields by choosing the one dominant field that survives.
+ out := fields[:0]
+ for advance, i := 0, 0; i < len(fields); i += advance {
+ // One iteration per name.
+ // Find the sequence of fields with the name of this first field.
+ fi := fields[i]
+ name := fi.name
+ for advance = 1; i+advance < len(fields); advance++ {
+ fj := fields[i+advance]
+ if fj.name != name {
+ break
+ }
+ }
+ if advance == 1 { // Only one field with this name
+ out = append(out, fi)
+ continue
+ }
+ dominant, ok := dominantField(fields[i : i+advance])
+ if ok {
+ out = append(out, dominant)
+ }
+ }
+
+ fields = out
+ sort.Sort(byIndex(fields))
+
+ return fields
+}
+
+// dominantField looks through the fields, all of which are known to
+// have the same name, to find the single field that dominates the
+// others using Go's embedding rules, modified by the presence of
+// JSON tags. If there are multiple top-level fields, the boolean
+// will be false: This condition is an error in Go and we skip all
+// the fields.
+func dominantField(fields []field) (field, bool) {
+ // The fields are sorted in increasing index-length order. The winner
+ // must therefore be one with the shortest index length. Drop all
+ // longer entries, which is easy: just truncate the slice.
+ length := len(fields[0].index)
+ tagged := -1 // Index of first tagged field.
+ for i, f := range fields {
+ if len(f.index) > length {
+ fields = fields[:i]
+ break
+ }
+ if f.tag {
+ if tagged >= 0 {
+ // Multiple tagged fields at the same level: conflict.
+ // Return no field.
+ return field{}, false
+ }
+ tagged = i
+ }
+ }
+ if tagged >= 0 {
+ return fields[tagged], true
+ }
+ // All remaining fields have the same length. If there's more than one,
+ // we have a conflict (two fields named "X" at the same level) and we
+ // return no field.
+ if len(fields) > 1 {
+ return field{}, false
+ }
+ return fields[0], true
+}
+
+var fieldCache struct {
+ sync.RWMutex
+ m map[reflect.Type][]field
+}
+
+// cachedTypeFields is like typeFields but uses a cache to avoid repeated work.
+func cachedTypeFields(t reflect.Type) []field {
+ fieldCache.RLock()
+ f := fieldCache.m[t]
+ fieldCache.RUnlock()
+ if f != nil {
+ return f
+ }
+
+ // Compute fields without lock.
+ // Might duplicate effort but won't hold other computations back.
+ f = typeFields(t)
+ if f == nil {
+ f = []field{}
+ }
+
+ fieldCache.Lock()
+ if fieldCache.m == nil {
+ fieldCache.m = map[reflect.Type][]field{}
+ }
+ fieldCache.m[t] = f
+ fieldCache.Unlock()
+ return f
+}
diff --git a/vendor/github.com/dustin/gojson/fold.go b/vendor/github.com/dustin/gojson/fold.go
new file mode 100644
index 00000000..d6f77c93
--- /dev/null
+++ b/vendor/github.com/dustin/gojson/fold.go
@@ -0,0 +1,143 @@
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package json
+
+import (
+ "bytes"
+ "unicode/utf8"
+)
+
+const (
+ caseMask = ^byte(0x20) // Mask to ignore case in ASCII.
+ kelvin = '\u212a'
+ smallLongEss = '\u017f'
+)
+
+// foldFunc returns one of four different case folding equivalence
+// functions, from most general (and slow) to fastest:
+//
+// 1) bytes.EqualFold, if the key s contains any non-ASCII UTF-8
+// 2) equalFoldRight, if s contains special folding ASCII ('k', 'K', 's', 'S')
+// 3) asciiEqualFold, no special, but includes non-letters (including _)
+// 4) simpleLetterEqualFold, no specials, no non-letters.
+//
+// The letters S and K are special because they map to 3 runes, not just 2:
+// * S maps to s and to U+017F 'ſ' Latin small letter long s
+// * k maps to K and to U+212A 'K' Kelvin sign
+// See http://play.golang.org/p/tTxjOc0OGo
+//
+// The returned function is specialized for matching against s and
+// should only be given s. It's not curried for performance reasons.
+func foldFunc(s []byte) func(s, t []byte) bool {
+ nonLetter := false
+ special := false // special letter
+ for _, b := range s {
+ if b >= utf8.RuneSelf {
+ return bytes.EqualFold
+ }
+ upper := b & caseMask
+ if upper < 'A' || upper > 'Z' {
+ nonLetter = true
+ } else if upper == 'K' || upper == 'S' {
+ // See above for why these letters are special.
+ special = true
+ }
+ }
+ if special {
+ return equalFoldRight
+ }
+ if nonLetter {
+ return asciiEqualFold
+ }
+ return simpleLetterEqualFold
+}
+
+// equalFoldRight is a specialization of bytes.EqualFold when s is
+// known to be all ASCII (including punctuation), but contains an 's',
+// 'S', 'k', or 'K', requiring a Unicode fold on the bytes in t.
+// See comments on foldFunc.
+func equalFoldRight(s, t []byte) bool {
+ for _, sb := range s {
+ if len(t) == 0 {
+ return false
+ }
+ tb := t[0]
+ if tb < utf8.RuneSelf {
+ if sb != tb {
+ sbUpper := sb & caseMask
+ if 'A' <= sbUpper && sbUpper <= 'Z' {
+ if sbUpper != tb&caseMask {
+ return false
+ }
+ } else {
+ return false
+ }
+ }
+ t = t[1:]
+ continue
+ }
+ // sb is ASCII and t is not. t must be either kelvin
+ // sign or long s; sb must be s, S, k, or K.
+ tr, size := utf8.DecodeRune(t)
+ switch sb {
+ case 's', 'S':
+ if tr != smallLongEss {
+ return false
+ }
+ case 'k', 'K':
+ if tr != kelvin {
+ return false
+ }
+ default:
+ return false
+ }
+ t = t[size:]
+
+ }
+ if len(t) > 0 {
+ return false
+ }
+ return true
+}
+
+// asciiEqualFold is a specialization of bytes.EqualFold for use when
+// s is all ASCII (but may contain non-letters) and contains no
+// special-folding letters.
+// See comments on foldFunc.
+func asciiEqualFold(s, t []byte) bool {
+ if len(s) != len(t) {
+ return false
+ }
+ for i, sb := range s {
+ tb := t[i]
+ if sb == tb {
+ continue
+ }
+ if ('a' <= sb && sb <= 'z') || ('A' <= sb && sb <= 'Z') {
+ if sb&caseMask != tb&caseMask {
+ return false
+ }
+ } else {
+ return false
+ }
+ }
+ return true
+}
+
+// simpleLetterEqualFold is a specialization of bytes.EqualFold for
+// use when s is all ASCII letters (no underscores, etc) and also
+// doesn't contain 'k', 'K', 's', or 'S'.
+// See comments on foldFunc.
+func simpleLetterEqualFold(s, t []byte) bool {
+ if len(s) != len(t) {
+ return false
+ }
+ for i, b := range s {
+ if b&caseMask != t[i]&caseMask {
+ return false
+ }
+ }
+ return true
+}
diff --git a/vendor/github.com/dustin/gojson/indent.go b/vendor/github.com/dustin/gojson/indent.go
new file mode 100644
index 00000000..f95fac91
--- /dev/null
+++ b/vendor/github.com/dustin/gojson/indent.go
@@ -0,0 +1,137 @@
+// Copyright 2010 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package json
+
+import "bytes"
+
+// Compact appends to dst the JSON-encoded src with
+// insignificant space characters elided.
+func Compact(dst *bytes.Buffer, src []byte) error {
+ return compact(dst, src, false)
+}
+
+func compact(dst *bytes.Buffer, src []byte, escape bool) error {
+ origLen := dst.Len()
+ var scan Scanner
+ scan.Reset()
+ start := 0
+ for i, c := range src {
+ if escape && (c == '<' || c == '>' || c == '&') {
+ if start < i {
+ dst.Write(src[start:i])
+ }
+ dst.WriteString(`\u00`)
+ dst.WriteByte(hex[c>>4])
+ dst.WriteByte(hex[c&0xF])
+ start = i + 1
+ }
+ // Convert U+2028 and U+2029 (E2 80 A8 and E2 80 A9).
+ if c == 0xE2 && i+2 < len(src) && src[i+1] == 0x80 && src[i+2]&^1 == 0xA8 {
+ if start < i {
+ dst.Write(src[start:i])
+ }
+ dst.WriteString(`\u202`)
+ dst.WriteByte(hex[src[i+2]&0xF])
+ start = i + 3
+ }
+ v := scan.Step(&scan, int(c))
+ if v >= ScanSkipSpace {
+ if v == ScanError {
+ break
+ }
+ if start < i {
+ dst.Write(src[start:i])
+ }
+ start = i + 1
+ }
+ }
+ if scan.EOF() == ScanError {
+ dst.Truncate(origLen)
+ return scan.Err
+ }
+ if start < len(src) {
+ dst.Write(src[start:])
+ }
+ return nil
+}
+
+func newline(dst *bytes.Buffer, prefix, indent string, depth int) {
+ dst.WriteByte('\n')
+ dst.WriteString(prefix)
+ for i := 0; i < depth; i++ {
+ dst.WriteString(indent)
+ }
+}
+
+// Indent appends to dst an indented form of the JSON-encoded src.
+// Each element in a JSON object or array begins on a new,
+// indented line beginning with prefix followed by one or more
+// copies of indent according to the indentation nesting.
+// The data appended to dst does not begin with the prefix nor
+// any indentation, and has no trailing newline, to make it
+// easier to embed inside other formatted JSON data.
+func Indent(dst *bytes.Buffer, src []byte, prefix, indent string) error {
+ origLen := dst.Len()
+ var scan Scanner
+ scan.Reset()
+ needIndent := false
+ depth := 0
+ for _, c := range src {
+ scan.bytes++
+ v := scan.Step(&scan, int(c))
+ if v == ScanSkipSpace {
+ continue
+ }
+ if v == ScanError {
+ break
+ }
+ if needIndent && v != ScanEndObject && v != ScanEndArray {
+ needIndent = false
+ depth++
+ newline(dst, prefix, indent, depth)
+ }
+
+ // Emit semantically uninteresting bytes
+ // (in particular, punctuation in strings) unmodified.
+ if v == ScanContinue {
+ dst.WriteByte(c)
+ continue
+ }
+
+ // Add spacing around real punctuation.
+ switch c {
+ case '{', '[':
+ // delay indent so that empty object and array are formatted as {} and [].
+ needIndent = true
+ dst.WriteByte(c)
+
+ case ',':
+ dst.WriteByte(c)
+ newline(dst, prefix, indent, depth)
+
+ case ':':
+ dst.WriteByte(c)
+ dst.WriteByte(' ')
+
+ case '}', ']':
+ if needIndent {
+ // suppress indent in empty object/array
+ needIndent = false
+ } else {
+ depth--
+ newline(dst, prefix, indent, depth)
+ }
+ dst.WriteByte(c)
+
+ default:
+ dst.WriteByte(c)
+ }
+ }
+ if scan.EOF() == ScanError {
+ dst.Truncate(origLen)
+ return scan.Err
+ }
+ return nil
+}
diff --git a/vendor/github.com/dustin/gojson/scanner.go b/vendor/github.com/dustin/gojson/scanner.go
new file mode 100644
index 00000000..377510d2
--- /dev/null
+++ b/vendor/github.com/dustin/gojson/scanner.go
@@ -0,0 +1,629 @@
+// Copyright 2010 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package json
+
+// JSON value parser state machine.
+// Just about at the limit of what is reasonable to write by hand.
+// Some parts are a bit tedious, but overall it nicely factors out the
+// otherwise common code from the multiple scanning functions
+// in this package (Compact, Indent, checkValid, NextValue, etc).
+//
+// This file starts with two simple examples using the scanner
+// before diving into the scanner itself.
+
+import "strconv"
+
+// checkValid verifies that data is valid JSON-encoded data.
+// scan is passed in for use by checkValid to avoid an allocation.
+func checkValid(data []byte, scan *Scanner) error {
+ scan.Reset()
+ for _, c := range data {
+ scan.bytes++
+ if scan.Step(scan, int(c)) == ScanError {
+ return scan.Err
+ }
+ }
+ if scan.EOF() == ScanError {
+ return scan.Err
+ }
+ return nil
+}
+
+// Validate some alleged JSON. Return nil iff the JSON is valid.
+func Validate(data []byte) error {
+ s := &Scanner{}
+ return checkValid(data, s)
+}
+
+// NextValue splits data after the next whole JSON value,
+// returning that value and the bytes that follow it as separate slices.
+// scan is passed in for use by NextValue to avoid an allocation.
+func NextValue(data []byte, scan *Scanner) (value, rest []byte, err error) {
+ scan.Reset()
+ for i, c := range data {
+ v := scan.Step(scan, int(c))
+ if v >= ScanEnd {
+ switch v {
+ case ScanError:
+ return nil, nil, scan.Err
+ case ScanEnd:
+ return data[0:i], data[i:], nil
+ }
+ }
+ }
+ if scan.EOF() == ScanError {
+ return nil, nil, scan.Err
+ }
+ return data, nil, nil
+}
+
+// A SyntaxError is a description of a JSON syntax error.
+type SyntaxError struct {
+ msg string // description of error
+ Offset int64 // error occurred after reading Offset bytes
+}
+
+func (e *SyntaxError) Error() string { return e.msg }
+
+// A Scanner is a JSON scanning state machine.
+// Callers call scan.Reset() and then pass bytes in one at a time
+// by calling scan.Step(&scan, c) for each byte.
+// The return value, referred to as an opcode, tells the
+// caller about significant parsing events like beginning
+// and ending literals, objects, and arrays, so that the
+// caller can follow along if it wishes.
+// The return value ScanEnd indicates that a single top-level
+// JSON value has been completed, *before* the byte that
+// just got passed in. (The indication must be delayed in order
+// to recognize the end of numbers: is 123 a whole value or
+// the beginning of 12345e+6?).
+type Scanner struct {
+ // The step is a func to be called to execute the next transition.
+ // Also tried using an integer constant and a single func
+ // with a switch, but using the func directly was 10% faster
+ // on a 64-bit Mac Mini, and it's nicer to read.
+ Step func(*Scanner, int) int
+
+ // Reached end of top-level value.
+ endTop bool
+
+ // Stack of what we're in the middle of - array values, object keys, object values.
+ parseState []int
+
+ // Error that happened, if any.
+ Err error
+
+ // 1-byte redo (see undo method)
+ redo bool
+ redoCode int
+ redoState func(*Scanner, int) int
+
+ // total bytes consumed, updated by decoder.Decode
+ bytes int64
+}
+
+// These values are returned by the state transition functions
+// assigned to Scanner.state and the method Scanner.EOF.
+// They give details about the current state of the scan that
+// callers might be interested to know about.
+// It is okay to ignore the return value of any particular
+// call to Scanner.state: if one call returns ScanError,
+// every subsequent call will return ScanError too.
+const (
+ // Continue.
+ ScanContinue = iota // uninteresting byte
+ ScanBeginLiteral // end implied by next result != scanContinue
+ ScanBeginObject // begin object
+ ScanObjectKey // just finished object key (string)
+ ScanObjectValue // just finished non-last object value
+ ScanEndObject // end object (implies scanObjectValue if possible)
+ ScanBeginArray // begin array
+ ScanArrayValue // just finished array value
+ ScanEndArray // end array (implies scanArrayValue if possible)
+ ScanSkipSpace // space byte; can skip; known to be last "continue" result
+
+ // Stop.
+ ScanEnd // top-level value ended *before* this byte; known to be first "stop" result
+ ScanError // hit an error, Scanner.err.
+)
+
+// These values are stored in the parseState stack.
+// They give the current state of a composite value
+// being scanned. If the parser is inside a nested value
+// the parseState describes the nested state, outermost at entry 0.
+const (
+ parseObjectKey = iota // parsing object key (before colon)
+ parseObjectValue // parsing object value (after colon)
+ parseArrayValue // parsing array value
+)
+
+// reset prepares the scanner for use.
+// It must be called before calling s.step.
+func (s *Scanner) Reset() {
+ s.Step = stateBeginValue
+ s.parseState = s.parseState[0:0]
+ s.Err = nil
+ s.redo = false
+ s.endTop = false
+}
+
+// EOF tells the scanner that the end of input has been reached.
+// It returns a scan status just as s.step does.
+func (s *Scanner) EOF() int {
+ if s.Err != nil {
+ return ScanError
+ }
+ if s.endTop {
+ return ScanEnd
+ }
+ s.Step(s, ' ')
+ if s.endTop {
+ return ScanEnd
+ }
+ if s.Err == nil {
+ s.Err = &SyntaxError{"unexpected end of JSON input", s.bytes}
+ }
+ return ScanError
+}
+
+// pushParseState pushes a new parse state p onto the parse stack.
+func (s *Scanner) pushParseState(p int) {
+ s.parseState = append(s.parseState, p)
+}
+
+// popParseState pops a parse state (already obtained) off the stack
+// and updates s.step accordingly.
+func (s *Scanner) popParseState() {
+ n := len(s.parseState) - 1
+ s.parseState = s.parseState[0:n]
+ s.redo = false
+ if n == 0 {
+ s.Step = stateEndTop
+ s.endTop = true
+ } else {
+ s.Step = stateEndValue
+ }
+}
+
+func isSpace(c rune) bool {
+ return c == ' ' || c == '\t' || c == '\r' || c == '\n'
+}
+
+// stateBeginValueOrEmpty is the state after reading `[`.
+func stateBeginValueOrEmpty(s *Scanner, c int) int {
+ if c <= ' ' && isSpace(rune(c)) {
+ return ScanSkipSpace
+ }
+ if c == ']' {
+ return stateEndValue(s, c)
+ }
+ return stateBeginValue(s, c)
+}
+
+// stateBeginValue is the state at the beginning of the input.
+func stateBeginValue(s *Scanner, c int) int {
+ if c <= ' ' && isSpace(rune(c)) {
+ return ScanSkipSpace
+ }
+ switch c {
+ case '{':
+ s.Step = stateBeginStringOrEmpty
+ s.pushParseState(parseObjectKey)
+ return ScanBeginObject
+ case '[':
+ s.Step = stateBeginValueOrEmpty
+ s.pushParseState(parseArrayValue)
+ return ScanBeginArray
+ case '"':
+ s.Step = stateInString
+ return ScanBeginLiteral
+ case '-':
+ s.Step = stateNeg
+ return ScanBeginLiteral
+ case '0': // beginning of 0.123
+ s.Step = state0
+ return ScanBeginLiteral
+ case 't': // beginning of true
+ s.Step = stateT
+ return ScanBeginLiteral
+ case 'f': // beginning of false
+ s.Step = stateF
+ return ScanBeginLiteral
+ case 'n': // beginning of null
+ s.Step = stateN
+ return ScanBeginLiteral
+ }
+ if '1' <= c && c <= '9' { // beginning of 1234.5
+ s.Step = state1
+ return ScanBeginLiteral
+ }
+ return s.error(c, "looking for beginning of value")
+}
+
+// stateBeginStringOrEmpty is the state after reading `{`.
+func stateBeginStringOrEmpty(s *Scanner, c int) int {
+ if c <= ' ' && isSpace(rune(c)) {
+ return ScanSkipSpace
+ }
+ if c == '}' {
+ n := len(s.parseState)
+ s.parseState[n-1] = parseObjectValue
+ return stateEndValue(s, c)
+ }
+ return stateBeginString(s, c)
+}
+
+// stateBeginString is the state after reading `{"key": value,`.
+func stateBeginString(s *Scanner, c int) int {
+ if c <= ' ' && isSpace(rune(c)) {
+ return ScanSkipSpace
+ }
+ if c == '"' {
+ s.Step = stateInString
+ return ScanBeginLiteral
+ }
+ return s.error(c, "looking for beginning of object key string")
+}
+
+// stateEndValue is the state after completing a value,
+// such as after reading `{}` or `true` or `["x"`.
+func stateEndValue(s *Scanner, c int) int {
+ n := len(s.parseState)
+ if n == 0 {
+ // Completed top-level before the current byte.
+ s.Step = stateEndTop
+ s.endTop = true
+ return stateEndTop(s, c)
+ }
+ if c <= ' ' && isSpace(rune(c)) {
+ s.Step = stateEndValue
+ return ScanSkipSpace
+ }
+ ps := s.parseState[n-1]
+ switch ps {
+ case parseObjectKey:
+ if c == ':' {
+ s.parseState[n-1] = parseObjectValue
+ s.Step = stateBeginValue
+ return ScanObjectKey
+ }
+ return s.error(c, "after object key")
+ case parseObjectValue:
+ if c == ',' {
+ s.parseState[n-1] = parseObjectKey
+ s.Step = stateBeginString
+ return ScanObjectValue
+ }
+ if c == '}' {
+ s.popParseState()
+ return ScanEndObject
+ }
+ return s.error(c, "after object key:value pair")
+ case parseArrayValue:
+ if c == ',' {
+ s.Step = stateBeginValue
+ return ScanArrayValue
+ }
+ if c == ']' {
+ s.popParseState()
+ return ScanEndArray
+ }
+ return s.error(c, "after array element")
+ }
+ return s.error(c, "")
+}
+
+// stateEndTop is the state after finishing the top-level value,
+// such as after reading `{}` or `[1,2,3]`.
+// Only space characters should be seen now.
+func stateEndTop(s *Scanner, c int) int {
+ if c != ' ' && c != '\t' && c != '\r' && c != '\n' {
+ // Complain about non-space byte on next call.
+ s.error(c, "after top-level value")
+ }
+ return ScanEnd
+}
+
+// stateInString is the state after reading `"`.
+func stateInString(s *Scanner, c int) int {
+ if c == '"' {
+ s.Step = stateEndValue
+ return ScanContinue
+ }
+ if c == '\\' {
+ s.Step = stateInStringEsc
+ return ScanContinue
+ }
+ if c < 0x20 {
+ return s.error(c, "in string literal")
+ }
+ return ScanContinue
+}
+
+// stateInStringEsc is the state after reading `"\` during a quoted string.
+func stateInStringEsc(s *Scanner, c int) int {
+ switch c {
+ case 'b', 'f', 'n', 'r', 't', '\\', '/', '"':
+ s.Step = stateInString
+ return ScanContinue
+ }
+ if c == 'u' {
+ s.Step = stateInStringEscU
+ return ScanContinue
+ }
+ return s.error(c, "in string escape code")
+}
+
+// stateInStringEscU is the state after reading `"\u` during a quoted string.
+func stateInStringEscU(s *Scanner, c int) int {
+ if '0' <= c && c <= '9' || 'a' <= c && c <= 'f' || 'A' <= c && c <= 'F' {
+ s.Step = stateInStringEscU1
+ return ScanContinue
+ }
+ // numbers
+ return s.error(c, "in \\u hexadecimal character escape")
+}
+
+// stateInStringEscU1 is the state after reading `"\u1` during a quoted string.
+func stateInStringEscU1(s *Scanner, c int) int {
+ if '0' <= c && c <= '9' || 'a' <= c && c <= 'f' || 'A' <= c && c <= 'F' {
+ s.Step = stateInStringEscU12
+ return ScanContinue
+ }
+ // numbers
+ return s.error(c, "in \\u hexadecimal character escape")
+}
+
+// stateInStringEscU12 is the state after reading `"\u12` during a quoted string.
+func stateInStringEscU12(s *Scanner, c int) int {
+ if '0' <= c && c <= '9' || 'a' <= c && c <= 'f' || 'A' <= c && c <= 'F' {
+ s.Step = stateInStringEscU123
+ return ScanContinue
+ }
+ // numbers
+ return s.error(c, "in \\u hexadecimal character escape")
+}
+
+// stateInStringEscU123 is the state after reading `"\u123` during a quoted string.
+func stateInStringEscU123(s *Scanner, c int) int {
+ if '0' <= c && c <= '9' || 'a' <= c && c <= 'f' || 'A' <= c && c <= 'F' {
+ s.Step = stateInString
+ return ScanContinue
+ }
+ // numbers
+ return s.error(c, "in \\u hexadecimal character escape")
+}
+
+// stateNeg is the state after reading `-` during a number.
+func stateNeg(s *Scanner, c int) int {
+ if c == '0' {
+ s.Step = state0
+ return ScanContinue
+ }
+ if '1' <= c && c <= '9' {
+ s.Step = state1
+ return ScanContinue
+ }
+ return s.error(c, "in numeric literal")
+}
+
+// state1 is the state after reading a non-zero integer during a number,
+// such as after reading `1` or `100` but not `0`.
+func state1(s *Scanner, c int) int {
+ if '0' <= c && c <= '9' {
+ s.Step = state1
+ return ScanContinue
+ }
+ return state0(s, c)
+}
+
+// state0 is the state after reading `0` during a number.
+func state0(s *Scanner, c int) int {
+ if c == '.' {
+ s.Step = stateDot
+ return ScanContinue
+ }
+ if c == 'e' || c == 'E' {
+ s.Step = stateE
+ return ScanContinue
+ }
+ return stateEndValue(s, c)
+}
+
+// stateDot is the state after reading the integer and decimal point in a number,
+// such as after reading `1.`.
+func stateDot(s *Scanner, c int) int {
+ if '0' <= c && c <= '9' {
+ s.Step = stateDot0
+ return ScanContinue
+ }
+ return s.error(c, "after decimal point in numeric literal")
+}
+
+// stateDot0 is the state after reading the integer, decimal point, and subsequent
+// digits of a number, such as after reading `3.14`.
+func stateDot0(s *Scanner, c int) int {
+ if '0' <= c && c <= '9' {
+ s.Step = stateDot0
+ return ScanContinue
+ }
+ if c == 'e' || c == 'E' {
+ s.Step = stateE
+ return ScanContinue
+ }
+ return stateEndValue(s, c)
+}
+
+// stateE is the state after reading the mantissa and e in a number,
+// such as after reading `314e` or `0.314e`.
+func stateE(s *Scanner, c int) int {
+ if c == '+' {
+ s.Step = stateESign
+ return ScanContinue
+ }
+ if c == '-' {
+ s.Step = stateESign
+ return ScanContinue
+ }
+ return stateESign(s, c)
+}
+
+// stateESign is the state after reading the mantissa, e, and sign in a number,
+// such as after reading `314e-` or `0.314e+`.
+func stateESign(s *Scanner, c int) int {
+ if '0' <= c && c <= '9' {
+ s.Step = stateE0
+ return ScanContinue
+ }
+ return s.error(c, "in exponent of numeric literal")
+}
+
+// stateE0 is the state after reading the mantissa, e, optional sign,
+// and at least one digit of the exponent in a number,
+// such as after reading `314e-2` or `0.314e+1` or `3.14e0`.
+func stateE0(s *Scanner, c int) int {
+ if '0' <= c && c <= '9' {
+ s.Step = stateE0
+ return ScanContinue
+ }
+ return stateEndValue(s, c)
+}
+
+// stateT is the state after reading `t`.
+func stateT(s *Scanner, c int) int {
+ if c == 'r' {
+ s.Step = stateTr
+ return ScanContinue
+ }
+ return s.error(c, "in literal true (expecting 'r')")
+}
+
+// stateTr is the state after reading `tr`.
+func stateTr(s *Scanner, c int) int {
+ if c == 'u' {
+ s.Step = stateTru
+ return ScanContinue
+ }
+ return s.error(c, "in literal true (expecting 'u')")
+}
+
+// stateTru is the state after reading `tru`.
+func stateTru(s *Scanner, c int) int {
+ if c == 'e' {
+ s.Step = stateEndValue
+ return ScanContinue
+ }
+ return s.error(c, "in literal true (expecting 'e')")
+}
+
+// stateF is the state after reading `f`.
+func stateF(s *Scanner, c int) int {
+ if c == 'a' {
+ s.Step = stateFa
+ return ScanContinue
+ }
+ return s.error(c, "in literal false (expecting 'a')")
+}
+
+// stateFa is the state after reading `fa`.
+func stateFa(s *Scanner, c int) int {
+ if c == 'l' {
+ s.Step = stateFal
+ return ScanContinue
+ }
+ return s.error(c, "in literal false (expecting 'l')")
+}
+
+// stateFal is the state after reading `fal`.
+func stateFal(s *Scanner, c int) int {
+ if c == 's' {
+ s.Step = stateFals
+ return ScanContinue
+ }
+ return s.error(c, "in literal false (expecting 's')")
+}
+
+// stateFals is the state after reading `fals`.
+func stateFals(s *Scanner, c int) int {
+ if c == 'e' {
+ s.Step = stateEndValue
+ return ScanContinue
+ }
+ return s.error(c, "in literal false (expecting 'e')")
+}
+
+// stateN is the state after reading `n`.
+func stateN(s *Scanner, c int) int {
+ if c == 'u' {
+ s.Step = stateNu
+ return ScanContinue
+ }
+ return s.error(c, "in literal null (expecting 'u')")
+}
+
+// stateNu is the state after reading `nu`.
+func stateNu(s *Scanner, c int) int {
+ if c == 'l' {
+ s.Step = stateNul
+ return ScanContinue
+ }
+ return s.error(c, "in literal null (expecting 'l')")
+}
+
+// stateNul is the state after reading `nul`.
+func stateNul(s *Scanner, c int) int {
+ if c == 'l' {
+ s.Step = stateEndValue
+ return ScanContinue
+ }
+ return s.error(c, "in literal null (expecting 'l')")
+}
+
+// stateError is the state after reaching a syntax error,
+// such as after reading `[1}` or `5.1.2`.
+func stateError(s *Scanner, c int) int {
+ return ScanError
+}
+
+// error records an error and switches to the error state.
+func (s *Scanner) error(c int, context string) int {
+ s.Step = stateError
+ s.Err = &SyntaxError{"invalid character " + quoteChar(c) + " " + context, s.bytes}
+ return ScanError
+}
+
+// quoteChar formats c as a quoted character literal
+func quoteChar(c int) string {
+ // special cases - different from quoted strings
+ if c == '\'' {
+ return `'\''`
+ }
+ if c == '"' {
+ return `'"'`
+ }
+
+ // use quoted string with different quotation marks
+ s := strconv.Quote(string(c))
+ return "'" + s[1:len(s)-1] + "'"
+}
+
+// undo causes the scanner to return scanCode from the next state transition.
+// This gives callers a simple 1-byte undo mechanism.
+func (s *Scanner) undo(scanCode int) {
+ if s.redo {
+ panic("json: invalid use of scanner")
+ }
+ s.redoCode = scanCode
+ s.redoState = s.Step
+ s.Step = stateRedo
+ s.redo = true
+}
+
+// stateRedo helps implement the scanner's 1-byte undo.
+func stateRedo(s *Scanner, c int) int {
+ s.redo = false
+ s.Step = s.redoState
+ return s.redoCode
+}
diff --git a/vendor/github.com/dustin/gojson/stream.go b/vendor/github.com/dustin/gojson/stream.go
new file mode 100644
index 00000000..82a59044
--- /dev/null
+++ b/vendor/github.com/dustin/gojson/stream.go
@@ -0,0 +1,200 @@
+// Copyright 2010 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package json
+
+import (
+ "bytes"
+ "errors"
+ "io"
+)
+
+// A Decoder reads and decodes JSON objects from an input stream.
+type Decoder struct {
+ r io.Reader
+ buf []byte
+ d decodeState
+ scan Scanner
+ err error
+}
+
+// NewDecoder returns a new decoder that reads from r.
+//
+// The decoder introduces its own buffering and may
+// read data from r beyond the JSON values requested.
+func NewDecoder(r io.Reader) *Decoder {
+ return &Decoder{r: r}
+}
+
+// UseNumber causes the Decoder to unmarshal a number into an interface{} as a
+// Number instead of as a float64.
+func (dec *Decoder) UseNumber() { dec.d.useNumber = true }
+
+// Decode reads the next JSON-encoded value from its
+// input and stores it in the value pointed to by v.
+//
+// See the documentation for Unmarshal for details about
+// the conversion of JSON into a Go value.
+func (dec *Decoder) Decode(v interface{}) error {
+ if dec.err != nil {
+ return dec.err
+ }
+
+ n, err := dec.readValue()
+ if err != nil {
+ return err
+ }
+
+ // Don't save err from unmarshal into dec.err:
+ // the connection is still usable since we read a complete JSON
+ // object from it before the error happened.
+ dec.d.init(dec.buf[0:n])
+ err = dec.d.unmarshal(v)
+
+ // Slide rest of data down.
+ rest := copy(dec.buf, dec.buf[n:])
+ dec.buf = dec.buf[0:rest]
+
+ return err
+}
+
+// Buffered returns a reader of the data remaining in the Decoder's
+// buffer. The reader is valid until the next call to Decode.
+func (dec *Decoder) Buffered() io.Reader {
+ return bytes.NewReader(dec.buf)
+}
+
+// readValue reads a JSON value into dec.buf.
+// It returns the length of the encoding.
+func (dec *Decoder) readValue() (int, error) {
+ dec.scan.Reset()
+
+ scanp := 0
+ var err error
+Input:
+ for {
+ // Look in the buffer for a new value.
+ for i, c := range dec.buf[scanp:] {
+ dec.scan.bytes++
+ v := dec.scan.Step(&dec.scan, int(c))
+ if v == ScanEnd {
+ scanp += i
+ break Input
+ }
+ // scanEnd is delayed one byte.
+ // We might block trying to get that byte from src,
+ // so instead invent a space byte.
+ if (v == ScanEndObject || v == ScanEndArray) && dec.scan.Step(&dec.scan, ' ') == ScanEnd {
+ scanp += i + 1
+ break Input
+ }
+ if v == ScanError {
+ dec.err = dec.scan.Err
+ return 0, dec.scan.Err
+ }
+ }
+ scanp = len(dec.buf)
+
+ // Did the last read have an error?
+ // Delayed until now to allow buffer scan.
+ if err != nil {
+ if err == io.EOF {
+ if dec.scan.Step(&dec.scan, ' ') == ScanEnd {
+ break Input
+ }
+ if nonSpace(dec.buf) {
+ err = io.ErrUnexpectedEOF
+ }
+ }
+ dec.err = err
+ return 0, err
+ }
+
+ // Make room to read more into the buffer.
+ const minRead = 512
+ if cap(dec.buf)-len(dec.buf) < minRead {
+ newBuf := make([]byte, len(dec.buf), 2*cap(dec.buf)+minRead)
+ copy(newBuf, dec.buf)
+ dec.buf = newBuf
+ }
+
+ // Read. Delay error for next iteration (after scan).
+ var n int
+ n, err = dec.r.Read(dec.buf[len(dec.buf):cap(dec.buf)])
+ dec.buf = dec.buf[0 : len(dec.buf)+n]
+ }
+ return scanp, nil
+}
+
+func nonSpace(b []byte) bool {
+ for _, c := range b {
+ if !isSpace(rune(c)) {
+ return true
+ }
+ }
+ return false
+}
+
+// An Encoder writes JSON objects to an output stream.
+type Encoder struct {
+ w io.Writer
+ err error
+}
+
+// NewEncoder returns a new encoder that writes to w.
+func NewEncoder(w io.Writer) *Encoder {
+ return &Encoder{w: w}
+}
+
+// Encode writes the JSON encoding of v to the stream,
+// followed by a newline character.
+//
+// See the documentation for Marshal for details about the
+// conversion of Go values to JSON.
+func (enc *Encoder) Encode(v interface{}) error {
+ if enc.err != nil {
+ return enc.err
+ }
+ e := newEncodeState()
+ err := e.marshal(v)
+ if err != nil {
+ return err
+ }
+
+ // Terminate each value with a newline.
+ // This makes the output look a little nicer
+ // when debugging, and some kind of space
+ // is required if the encoded value was a number,
+ // so that the reader knows there aren't more
+ // digits coming.
+ e.WriteByte('\n')
+
+ if _, err = enc.w.Write(e.Bytes()); err != nil {
+ enc.err = err
+ }
+ encodeStatePool.Put(e)
+ return err
+}
+
+// RawMessage is a raw encoded JSON object.
+// It implements Marshaler and Unmarshaler and can
+// be used to delay JSON decoding or precompute a JSON encoding.
+type RawMessage []byte
+
+// MarshalJSON returns *m as the JSON encoding of m.
+func (m *RawMessage) MarshalJSON() ([]byte, error) {
+ return *m, nil
+}
+
+// UnmarshalJSON sets *m to a copy of data.
+func (m *RawMessage) UnmarshalJSON(data []byte) error {
+ if m == nil {
+ return errors.New("json.RawMessage: UnmarshalJSON on nil pointer")
+ }
+ *m = append((*m)[0:0], data...)
+ return nil
+}
+
+var _ Marshaler = (*RawMessage)(nil)
+var _ Unmarshaler = (*RawMessage)(nil)
diff --git a/vendor/github.com/dustin/gojson/tags.go b/vendor/github.com/dustin/gojson/tags.go
new file mode 100644
index 00000000..c38fd510
--- /dev/null
+++ b/vendor/github.com/dustin/gojson/tags.go
@@ -0,0 +1,44 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package json
+
+import (
+ "strings"
+)
+
+// tagOptions is the string following a comma in a struct field's "json"
+// tag, or the empty string. It does not include the leading comma.
+type tagOptions string
+
+// parseTag splits a struct field's json tag into its name and
+// comma-separated options.
+func parseTag(tag string) (string, tagOptions) {
+ if idx := strings.Index(tag, ","); idx != -1 {
+ return tag[:idx], tagOptions(tag[idx+1:])
+ }
+ return tag, tagOptions("")
+}
+
+// Contains reports whether a comma-separated list of options
+// contains a particular substr flag. substr must be surrounded by a
+// string boundary or commas.
+func (o tagOptions) Contains(optionName string) bool {
+ if len(o) == 0 {
+ return false
+ }
+ s := string(o)
+ for s != "" {
+ var next string
+ i := strings.Index(s, ",")
+ if i >= 0 {
+ s, next = s[:i], s[i+1:]
+ }
+ if s == optionName {
+ return true
+ }
+ s = next
+ }
+ return false
+}