summaryrefslogtreecommitdiff
path: root/vendor/github.com
diff options
context:
space:
mode:
authorDave Henderson <dhenderson@gmail.com>2019-08-17 11:53:32 -0400
committerDave Henderson <dhenderson@gmail.com>2019-10-13 12:03:59 -0400
commitb2a1fb672bdd6d5e804e22766618cafa334af752 (patch)
tree0177dbfe1b80aad8e3b166291efb931cfd6d5728 /vendor/github.com
parentd0d67b1dd388ee9ae0c4afd750cf1e1f11d3032a (diff)
Supporting s3 datasources
Signed-off-by: Dave Henderson <dhenderson@gmail.com>
Diffstat (limited to 'vendor/github.com')
-rw-r--r--vendor/github.com/aws/aws-sdk-go/internal/s3err/error.go57
-rw-r--r--vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/debug.go144
-rw-r--r--vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/decode.go199
-rw-r--r--vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/encode.go114
-rw-r--r--vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/error.go23
-rw-r--r--vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/eventstreamapi/api.go196
-rw-r--r--vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/eventstreamapi/error.go24
-rw-r--r--vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/header.go166
-rw-r--r--vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/header_value.go501
-rw-r--r--vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/message.go103
-rw-r--r--vendor/github.com/aws/aws-sdk-go/private/protocol/restxml/restxml.go79
-rw-r--r--vendor/github.com/aws/aws-sdk-go/service/s3/api.go24952
-rw-r--r--vendor/github.com/aws/aws-sdk-go/service/s3/body_hash.go249
-rw-r--r--vendor/github.com/aws/aws-sdk-go/service/s3/bucket_location.go107
-rw-r--r--vendor/github.com/aws/aws-sdk-go/service/s3/customizations.go75
-rw-r--r--vendor/github.com/aws/aws-sdk-go/service/s3/doc.go26
-rw-r--r--vendor/github.com/aws/aws-sdk-go/service/s3/doc_custom.go123
-rw-r--r--vendor/github.com/aws/aws-sdk-go/service/s3/errors.go48
-rw-r--r--vendor/github.com/aws/aws-sdk-go/service/s3/host_style_bucket.go155
-rw-r--r--vendor/github.com/aws/aws-sdk-go/service/s3/platform_handlers.go8
-rw-r--r--vendor/github.com/aws/aws-sdk-go/service/s3/platform_handlers_go1.6.go28
-rw-r--r--vendor/github.com/aws/aws-sdk-go/service/s3/s3iface/interface.go443
-rw-r--r--vendor/github.com/aws/aws-sdk-go/service/s3/s3manager/batch.go529
-rw-r--r--vendor/github.com/aws/aws-sdk-go/service/s3/s3manager/bucket_region.go88
-rw-r--r--vendor/github.com/aws/aws-sdk-go/service/s3/s3manager/buffered_read_seeker.go81
-rw-r--r--vendor/github.com/aws/aws-sdk-go/service/s3/s3manager/default_read_seeker_write_to.go7
-rw-r--r--vendor/github.com/aws/aws-sdk-go/service/s3/s3manager/default_read_seeker_write_to_windows.go5
-rw-r--r--vendor/github.com/aws/aws-sdk-go/service/s3/s3manager/default_writer_read_from.go7
-rw-r--r--vendor/github.com/aws/aws-sdk-go/service/s3/s3manager/default_writer_read_from_windows.go5
-rw-r--r--vendor/github.com/aws/aws-sdk-go/service/s3/s3manager/doc.go3
-rw-r--r--vendor/github.com/aws/aws-sdk-go/service/s3/s3manager/download.go597
-rw-r--r--vendor/github.com/aws/aws-sdk-go/service/s3/s3manager/read_seeker_write_to.go65
-rw-r--r--vendor/github.com/aws/aws-sdk-go/service/s3/s3manager/upload.go774
-rw-r--r--vendor/github.com/aws/aws-sdk-go/service/s3/s3manager/upload_input.go129
-rw-r--r--vendor/github.com/aws/aws-sdk-go/service/s3/s3manager/writer_read_from.go75
-rw-r--r--vendor/github.com/aws/aws-sdk-go/service/s3/service.go99
-rw-r--r--vendor/github.com/aws/aws-sdk-go/service/s3/sse.go84
-rw-r--r--vendor/github.com/aws/aws-sdk-go/service/s3/statusok_error.go40
-rw-r--r--vendor/github.com/aws/aws-sdk-go/service/s3/unmarshal_error.go88
-rw-r--r--vendor/github.com/aws/aws-sdk-go/service/s3/waiters.go214
-rw-r--r--vendor/github.com/golang/protobuf/AUTHORS3
-rw-r--r--vendor/github.com/golang/protobuf/CONTRIBUTORS3
-rw-r--r--vendor/github.com/golang/protobuf/LICENSE28
-rw-r--r--vendor/github.com/golang/protobuf/proto/clone.go253
-rw-r--r--vendor/github.com/golang/protobuf/proto/decode.go427
-rw-r--r--vendor/github.com/golang/protobuf/proto/deprecated.go63
-rw-r--r--vendor/github.com/golang/protobuf/proto/discard.go350
-rw-r--r--vendor/github.com/golang/protobuf/proto/encode.go203
-rw-r--r--vendor/github.com/golang/protobuf/proto/equal.go301
-rw-r--r--vendor/github.com/golang/protobuf/proto/extensions.go607
-rw-r--r--vendor/github.com/golang/protobuf/proto/lib.go965
-rw-r--r--vendor/github.com/golang/protobuf/proto/message_set.go181
-rw-r--r--vendor/github.com/golang/protobuf/proto/pointer_reflect.go360
-rw-r--r--vendor/github.com/golang/protobuf/proto/pointer_unsafe.go313
-rw-r--r--vendor/github.com/golang/protobuf/proto/properties.go545
-rw-r--r--vendor/github.com/golang/protobuf/proto/table_marshal.go2776
-rw-r--r--vendor/github.com/golang/protobuf/proto/table_merge.go654
-rw-r--r--vendor/github.com/golang/protobuf/proto/table_unmarshal.go2053
-rw-r--r--vendor/github.com/golang/protobuf/proto/text.go843
-rw-r--r--vendor/github.com/golang/protobuf/proto/text_parser.go880
-rw-r--r--vendor/github.com/golang/protobuf/ptypes/any.go141
-rw-r--r--vendor/github.com/golang/protobuf/ptypes/any/any.pb.go200
-rw-r--r--vendor/github.com/golang/protobuf/ptypes/any/any.proto154
-rw-r--r--vendor/github.com/golang/protobuf/ptypes/doc.go35
-rw-r--r--vendor/github.com/golang/protobuf/ptypes/duration.go102
-rw-r--r--vendor/github.com/golang/protobuf/ptypes/duration/duration.pb.go161
-rw-r--r--vendor/github.com/golang/protobuf/ptypes/duration/duration.proto117
-rw-r--r--vendor/github.com/golang/protobuf/ptypes/timestamp.go132
-rw-r--r--vendor/github.com/golang/protobuf/ptypes/timestamp/timestamp.pb.go179
-rw-r--r--vendor/github.com/golang/protobuf/ptypes/timestamp/timestamp.proto135
-rw-r--r--vendor/github.com/google/wire/.codecov.yml13
-rw-r--r--vendor/github.com/google/wire/.contributebot4
-rw-r--r--vendor/github.com/google/wire/.travis.yml55
-rw-r--r--vendor/github.com/google/wire/AUTHORS18
-rw-r--r--vendor/github.com/google/wire/CODE_OF_CONDUCT.md10
-rw-r--r--vendor/github.com/google/wire/CONTRIBUTING.md152
-rw-r--r--vendor/github.com/google/wire/CONTRIBUTORS43
-rw-r--r--vendor/github.com/google/wire/LICENSE202
-rw-r--r--vendor/github.com/google/wire/README.md57
-rw-r--r--vendor/github.com/google/wire/go.mod10
-rw-r--r--vendor/github.com/google/wire/go.sum20
-rw-r--r--vendor/github.com/google/wire/wire.go191
-rw-r--r--vendor/github.com/googleapis/gax-go/.gitignore1
-rw-r--r--vendor/github.com/googleapis/gax-go/.travis.yml12
-rw-r--r--vendor/github.com/googleapis/gax-go/CODE_OF_CONDUCT.md43
-rw-r--r--vendor/github.com/googleapis/gax-go/CONTRIBUTING.md27
-rw-r--r--vendor/github.com/googleapis/gax-go/LICENSE27
-rw-r--r--vendor/github.com/googleapis/gax-go/README.md29
-rw-r--r--vendor/github.com/googleapis/gax-go/call_option.go157
-rw-r--r--vendor/github.com/googleapis/gax-go/gax.go38
-rw-r--r--vendor/github.com/googleapis/gax-go/header.go24
-rw-r--r--vendor/github.com/googleapis/gax-go/invoke.go89
-rw-r--r--vendor/github.com/johannesboyne/gofakes3/.gitignore9
-rw-r--r--vendor/github.com/johannesboyne/gofakes3/.gitmodules3
-rw-r--r--vendor/github.com/johannesboyne/gofakes3/Dockerfile5
-rw-r--r--vendor/github.com/johannesboyne/gofakes3/GoFakeS3.pngbin0 -> 25065 bytes
-rw-r--r--vendor/github.com/johannesboyne/gofakes3/LICENSE21
-rw-r--r--vendor/github.com/johannesboyne/gofakes3/README.md163
-rw-r--r--vendor/github.com/johannesboyne/gofakes3/backend.go307
-rw-r--r--vendor/github.com/johannesboyne/gofakes3/backend/s3mem/backend.go501
-rw-r--r--vendor/github.com/johannesboyne/gofakes3/backend/s3mem/bucket.go274
-rw-r--r--vendor/github.com/johannesboyne/gofakes3/backend/s3mem/versionid.go67
-rw-r--r--vendor/github.com/johannesboyne/gofakes3/ca-certificates.crt3646
-rw-r--r--vendor/github.com/johannesboyne/gofakes3/constants.go44
-rw-r--r--vendor/github.com/johannesboyne/gofakes3/cors.go43
-rw-r--r--vendor/github.com/johannesboyne/gofakes3/error.go344
-rw-r--r--vendor/github.com/johannesboyne/gofakes3/go.mod16
-rw-r--r--vendor/github.com/johannesboyne/gofakes3/go.sum35
-rw-r--r--vendor/github.com/johannesboyne/gofakes3/gofakes3.go967
-rw-r--r--vendor/github.com/johannesboyne/gofakes3/hash.go78
-rw-r--r--vendor/github.com/johannesboyne/gofakes3/internal/goskipiter/iter.go66
-rw-r--r--vendor/github.com/johannesboyne/gofakes3/internal/s3io/io.go13
-rw-r--r--vendor/github.com/johannesboyne/gofakes3/log.go111
-rw-r--r--vendor/github.com/johannesboyne/gofakes3/logo.sketchbin0 -> 65536 bytes
-rw-r--r--vendor/github.com/johannesboyne/gofakes3/makefile.go202
-rw-r--r--vendor/github.com/johannesboyne/gofakes3/messages.go527
-rw-r--r--vendor/github.com/johannesboyne/gofakes3/option.go82
-rw-r--r--vendor/github.com/johannesboyne/gofakes3/prefix.go171
-rw-r--r--vendor/github.com/johannesboyne/gofakes3/range.go126
-rw-r--r--vendor/github.com/johannesboyne/gofakes3/routing.go196
-rw-r--r--vendor/github.com/johannesboyne/gofakes3/time.go58
-rw-r--r--vendor/github.com/johannesboyne/gofakes3/uploader.go517
-rw-r--r--vendor/github.com/johannesboyne/gofakes3/util.go58
-rw-r--r--vendor/github.com/johannesboyne/gofakes3/validation.go55
-rw-r--r--vendor/github.com/ryszard/goskiplist/AUTHORS12
-rw-r--r--vendor/github.com/ryszard/goskiplist/CONTRIBUTORS35
-rw-r--r--vendor/github.com/ryszard/goskiplist/LICENSE202
-rw-r--r--vendor/github.com/ryszard/goskiplist/skiplist/skiplist.go635
-rw-r--r--vendor/github.com/shabbyrobe/gocovmerge/LICENSE22
-rw-r--r--vendor/github.com/shabbyrobe/gocovmerge/README.md22
-rw-r--r--vendor/github.com/shabbyrobe/gocovmerge/gocovmerge.go103
131 files changed, 54832 insertions, 0 deletions
diff --git a/vendor/github.com/aws/aws-sdk-go/internal/s3err/error.go b/vendor/github.com/aws/aws-sdk-go/internal/s3err/error.go
new file mode 100644
index 00000000..0b9b0dfc
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/internal/s3err/error.go
@@ -0,0 +1,57 @@
+package s3err
+
+import (
+ "fmt"
+
+ "github.com/aws/aws-sdk-go/aws/awserr"
+ "github.com/aws/aws-sdk-go/aws/request"
+)
+
+// RequestFailure provides additional S3 specific metadata for the request
+// failure.
+type RequestFailure struct {
+ awserr.RequestFailure
+
+ hostID string
+}
+
+// NewRequestFailure returns a request failure error decordated with S3
+// specific metadata.
+func NewRequestFailure(err awserr.RequestFailure, hostID string) *RequestFailure {
+ return &RequestFailure{RequestFailure: err, hostID: hostID}
+}
+
+func (r RequestFailure) Error() string {
+ extra := fmt.Sprintf("status code: %d, request id: %s, host id: %s",
+ r.StatusCode(), r.RequestID(), r.hostID)
+ return awserr.SprintError(r.Code(), r.Message(), extra, r.OrigErr())
+}
+func (r RequestFailure) String() string {
+ return r.Error()
+}
+
+// HostID returns the HostID request response value.
+func (r RequestFailure) HostID() string {
+ return r.hostID
+}
+
+// RequestFailureWrapperHandler returns a handler to rap an
+// awserr.RequestFailure with the S3 request ID 2 from the response.
+func RequestFailureWrapperHandler() request.NamedHandler {
+ return request.NamedHandler{
+ Name: "awssdk.s3.errorHandler",
+ Fn: func(req *request.Request) {
+ reqErr, ok := req.Error.(awserr.RequestFailure)
+ if !ok || reqErr == nil {
+ return
+ }
+
+ hostID := req.HTTPResponse.Header.Get("X-Amz-Id-2")
+ if req.Error == nil {
+ return
+ }
+
+ req.Error = NewRequestFailure(reqErr, hostID)
+ },
+ }
+}
diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/debug.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/debug.go
new file mode 100644
index 00000000..ecc7bf82
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/debug.go
@@ -0,0 +1,144 @@
+package eventstream
+
+import (
+ "bytes"
+ "encoding/base64"
+ "encoding/json"
+ "fmt"
+ "strconv"
+)
+
+type decodedMessage struct {
+ rawMessage
+ Headers decodedHeaders `json:"headers"`
+}
+type jsonMessage struct {
+ Length json.Number `json:"total_length"`
+ HeadersLen json.Number `json:"headers_length"`
+ PreludeCRC json.Number `json:"prelude_crc"`
+ Headers decodedHeaders `json:"headers"`
+ Payload []byte `json:"payload"`
+ CRC json.Number `json:"message_crc"`
+}
+
+func (d *decodedMessage) UnmarshalJSON(b []byte) (err error) {
+ var jsonMsg jsonMessage
+ if err = json.Unmarshal(b, &jsonMsg); err != nil {
+ return err
+ }
+
+ d.Length, err = numAsUint32(jsonMsg.Length)
+ if err != nil {
+ return err
+ }
+ d.HeadersLen, err = numAsUint32(jsonMsg.HeadersLen)
+ if err != nil {
+ return err
+ }
+ d.PreludeCRC, err = numAsUint32(jsonMsg.PreludeCRC)
+ if err != nil {
+ return err
+ }
+ d.Headers = jsonMsg.Headers
+ d.Payload = jsonMsg.Payload
+ d.CRC, err = numAsUint32(jsonMsg.CRC)
+ if err != nil {
+ return err
+ }
+
+ return nil
+}
+
+func (d *decodedMessage) MarshalJSON() ([]byte, error) {
+ jsonMsg := jsonMessage{
+ Length: json.Number(strconv.Itoa(int(d.Length))),
+ HeadersLen: json.Number(strconv.Itoa(int(d.HeadersLen))),
+ PreludeCRC: json.Number(strconv.Itoa(int(d.PreludeCRC))),
+ Headers: d.Headers,
+ Payload: d.Payload,
+ CRC: json.Number(strconv.Itoa(int(d.CRC))),
+ }
+
+ return json.Marshal(jsonMsg)
+}
+
+func numAsUint32(n json.Number) (uint32, error) {
+ v, err := n.Int64()
+ if err != nil {
+ return 0, fmt.Errorf("failed to get int64 json number, %v", err)
+ }
+
+ return uint32(v), nil
+}
+
+func (d decodedMessage) Message() Message {
+ return Message{
+ Headers: Headers(d.Headers),
+ Payload: d.Payload,
+ }
+}
+
+type decodedHeaders Headers
+
+func (hs *decodedHeaders) UnmarshalJSON(b []byte) error {
+ var jsonHeaders []struct {
+ Name string `json:"name"`
+ Type valueType `json:"type"`
+ Value interface{} `json:"value"`
+ }
+
+ decoder := json.NewDecoder(bytes.NewReader(b))
+ decoder.UseNumber()
+ if err := decoder.Decode(&jsonHeaders); err != nil {
+ return err
+ }
+
+ var headers Headers
+ for _, h := range jsonHeaders {
+ value, err := valueFromType(h.Type, h.Value)
+ if err != nil {
+ return err
+ }
+ headers.Set(h.Name, value)
+ }
+ (*hs) = decodedHeaders(headers)
+
+ return nil
+}
+
+func valueFromType(typ valueType, val interface{}) (Value, error) {
+ switch typ {
+ case trueValueType:
+ return BoolValue(true), nil
+ case falseValueType:
+ return BoolValue(false), nil
+ case int8ValueType:
+ v, err := val.(json.Number).Int64()
+ return Int8Value(int8(v)), err
+ case int16ValueType:
+ v, err := val.(json.Number).Int64()
+ return Int16Value(int16(v)), err
+ case int32ValueType:
+ v, err := val.(json.Number).Int64()
+ return Int32Value(int32(v)), err
+ case int64ValueType:
+ v, err := val.(json.Number).Int64()
+ return Int64Value(v), err
+ case bytesValueType:
+ v, err := base64.StdEncoding.DecodeString(val.(string))
+ return BytesValue(v), err
+ case stringValueType:
+ v, err := base64.StdEncoding.DecodeString(val.(string))
+ return StringValue(string(v)), err
+ case timestampValueType:
+ v, err := val.(json.Number).Int64()
+ return TimestampValue(timeFromEpochMilli(v)), err
+ case uuidValueType:
+ v, err := base64.StdEncoding.DecodeString(val.(string))
+ var tv UUIDValue
+ copy(tv[:], v)
+ return tv, err
+ default:
+ panic(fmt.Sprintf("unknown type, %s, %T", typ.String(), val))
+ }
+}
diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/decode.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/decode.go
new file mode 100644
index 00000000..4b972b2d
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/decode.go
@@ -0,0 +1,199 @@
+package eventstream
+
+import (
+ "bytes"
+ "encoding/binary"
+ "encoding/hex"
+ "encoding/json"
+ "fmt"
+ "hash"
+ "hash/crc32"
+ "io"
+
+ "github.com/aws/aws-sdk-go/aws"
+)
+
+// Decoder provides decoding of an Event Stream messages.
+type Decoder struct {
+ r io.Reader
+ logger aws.Logger
+}
+
+// NewDecoder initializes and returns a Decoder for decoding event
+// stream messages from the reader provided.
+func NewDecoder(r io.Reader) *Decoder {
+ return &Decoder{
+ r: r,
+ }
+}
+
+// Decode attempts to decode a single message from the event stream reader.
+// Will return the event stream message, or error if Decode fails to read
+// the message from the stream.
+func (d *Decoder) Decode(payloadBuf []byte) (m Message, err error) {
+ reader := d.r
+ if d.logger != nil {
+ debugMsgBuf := bytes.NewBuffer(nil)
+ reader = io.TeeReader(reader, debugMsgBuf)
+ defer func() {
+ logMessageDecode(d.logger, debugMsgBuf, m, err)
+ }()
+ }
+
+ crc := crc32.New(crc32IEEETable)
+ hashReader := io.TeeReader(reader, crc)
+
+ prelude, err := decodePrelude(hashReader, crc)
+ if err != nil {
+ return Message{}, err
+ }
+
+ if prelude.HeadersLen > 0 {
+ lr := io.LimitReader(hashReader, int64(prelude.HeadersLen))
+ m.Headers, err = decodeHeaders(lr)
+ if err != nil {
+ return Message{}, err
+ }
+ }
+
+ if payloadLen := prelude.PayloadLen(); payloadLen > 0 {
+ buf, err := decodePayload(payloadBuf, io.LimitReader(hashReader, int64(payloadLen)))
+ if err != nil {
+ return Message{}, err
+ }
+ m.Payload = buf
+ }
+
+ msgCRC := crc.Sum32()
+ if err := validateCRC(reader, msgCRC); err != nil {
+ return Message{}, err
+ }
+
+ return m, nil
+}
+
+// UseLogger specifies the Logger that that the decoder should use to log the
+// message decode to.
+func (d *Decoder) UseLogger(logger aws.Logger) {
+ d.logger = logger
+}
+
+func logMessageDecode(logger aws.Logger, msgBuf *bytes.Buffer, msg Message, decodeErr error) {
+ w := bytes.NewBuffer(nil)
+ defer func() { logger.Log(w.String()) }()
+
+ fmt.Fprintf(w, "Raw message:\n%s\n",
+ hex.Dump(msgBuf.Bytes()))
+
+ if decodeErr != nil {
+ fmt.Fprintf(w, "Decode error: %v\n", decodeErr)
+ return
+ }
+
+ rawMsg, err := msg.rawMessage()
+ if err != nil {
+ fmt.Fprintf(w, "failed to create raw message, %v\n", err)
+ return
+ }
+
+ decodedMsg := decodedMessage{
+ rawMessage: rawMsg,
+ Headers: decodedHeaders(msg.Headers),
+ }
+
+ fmt.Fprintf(w, "Decoded message:\n")
+ encoder := json.NewEncoder(w)
+ if err := encoder.Encode(decodedMsg); err != nil {
+ fmt.Fprintf(w, "failed to generate decoded message, %v\n", err)
+ }
+}
+
+func decodePrelude(r io.Reader, crc hash.Hash32) (messagePrelude, error) {
+ var p messagePrelude
+
+ var err error
+ p.Length, err = decodeUint32(r)
+ if err != nil {
+ return messagePrelude{}, err
+ }
+
+ p.HeadersLen, err = decodeUint32(r)
+ if err != nil {
+ return messagePrelude{}, err
+ }
+
+ if err := p.ValidateLens(); err != nil {
+ return messagePrelude{}, err
+ }
+
+ preludeCRC := crc.Sum32()
+ if err := validateCRC(r, preludeCRC); err != nil {
+ return messagePrelude{}, err
+ }
+
+ p.PreludeCRC = preludeCRC
+
+ return p, nil
+}
+
+func decodePayload(buf []byte, r io.Reader) ([]byte, error) {
+ w := bytes.NewBuffer(buf[0:0])
+
+ _, err := io.Copy(w, r)
+ return w.Bytes(), err
+}
+
+func decodeUint8(r io.Reader) (uint8, error) {
+ type byteReader interface {
+ ReadByte() (byte, error)
+ }
+
+ if br, ok := r.(byteReader); ok {
+ v, err := br.ReadByte()
+ return uint8(v), err
+ }
+
+ var b [1]byte
+ _, err := io.ReadFull(r, b[:])
+ return uint8(b[0]), err
+}
+func decodeUint16(r io.Reader) (uint16, error) {
+ var b [2]byte
+ bs := b[:]
+ _, err := io.ReadFull(r, bs)
+ if err != nil {
+ return 0, err
+ }
+ return binary.BigEndian.Uint16(bs), nil
+}
+func decodeUint32(r io.Reader) (uint32, error) {
+ var b [4]byte
+ bs := b[:]
+ _, err := io.ReadFull(r, bs)
+ if err != nil {
+ return 0, err
+ }
+ return binary.BigEndian.Uint32(bs), nil
+}
+func decodeUint64(r io.Reader) (uint64, error) {
+ var b [8]byte
+ bs := b[:]
+ _, err := io.ReadFull(r, bs)
+ if err != nil {
+ return 0, err
+ }
+ return binary.BigEndian.Uint64(bs), nil
+}
+
+func validateCRC(r io.Reader, expect uint32) error {
+ msgCRC, err := decodeUint32(r)
+ if err != nil {
+ return err
+ }
+
+ if msgCRC != expect {
+ return ChecksumError{}
+ }
+
+ return nil
+}
diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/encode.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/encode.go
new file mode 100644
index 00000000..150a6098
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/encode.go
@@ -0,0 +1,114 @@
+package eventstream
+
+import (
+ "bytes"
+ "encoding/binary"
+ "hash"
+ "hash/crc32"
+ "io"
+)
+
+// Encoder provides EventStream message encoding.
+type Encoder struct {
+ w io.Writer
+
+ headersBuf *bytes.Buffer
+}
+
+// NewEncoder initializes and returns an Encoder to encode Event Stream
+// messages to an io.Writer.
+func NewEncoder(w io.Writer) *Encoder {
+ return &Encoder{
+ w: w,
+ headersBuf: bytes.NewBuffer(nil),
+ }
+}
+
+// Encode encodes a single EventStream message to the io.Writer the Encoder
+// was created with. An error is returned if writing the message fails.
+func (e *Encoder) Encode(msg Message) error {
+ e.headersBuf.Reset()
+
+ err := encodeHeaders(e.headersBuf, msg.Headers)
+ if err != nil {
+ return err
+ }
+
+ crc := crc32.New(crc32IEEETable)
+ hashWriter := io.MultiWriter(e.w, crc)
+
+ headersLen := uint32(e.headersBuf.Len())
+ payloadLen := uint32(len(msg.Payload))
+
+ if err := encodePrelude(hashWriter, crc, headersLen, payloadLen); err != nil {
+ return err
+ }
+
+ if headersLen > 0 {
+ if _, err := io.Copy(hashWriter, e.headersBuf); err != nil {
+ return err
+ }
+ }
+
+ if payloadLen > 0 {
+ if _, err := hashWriter.Write(msg.Payload); err != nil {
+ return err
+ }
+ }
+
+ msgCRC := crc.Sum32()
+ return binary.Write(e.w, binary.BigEndian, msgCRC)
+}
+
+func encodePrelude(w io.Writer, crc hash.Hash32, headersLen, payloadLen uint32) error {
+ p := messagePrelude{
+ Length: minMsgLen + headersLen + payloadLen,
+ HeadersLen: headersLen,
+ }
+ if err := p.ValidateLens(); err != nil {
+ return err
+ }
+
+ err := binaryWriteFields(w, binary.BigEndian,
+ p.Length,
+ p.HeadersLen,
+ )
+ if err != nil {
+ return err
+ }
+
+ p.PreludeCRC = crc.Sum32()
+ err = binary.Write(w, binary.BigEndian, p.PreludeCRC)
+ if err != nil {
+ return err
+ }
+
+ return nil
+}
+
+func encodeHeaders(w io.Writer, headers Headers) error {
+ for _, h := range headers {
+ hn := headerName{
+ Len: uint8(len(h.Name)),
+ }
+ copy(hn.Name[:hn.Len], h.Name)
+ if err := hn.encode(w); err != nil {
+ return err
+ }
+
+ if err := h.Value.encode(w); err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+func binaryWriteFields(w io.Writer, order binary.ByteOrder, vs ...interface{}) error {
+ for _, v := range vs {
+ if err := binary.Write(w, order, v); err != nil {
+ return err
+ }
+ }
+ return nil
+}
diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/error.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/error.go
new file mode 100644
index 00000000..5481ef30
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/error.go
@@ -0,0 +1,23 @@
+package eventstream
+
+import "fmt"
+
+// LengthError provides the error for items being larger than a maximum length.
+type LengthError struct {
+ Part string
+ Want int
+ Have int
+ Value interface{}
+}
+
+func (e LengthError) Error() string {
+ return fmt.Sprintf("%s length invalid, %d/%d, %v",
+ e.Part, e.Want, e.Have, e.Value)
+}
+
+// ChecksumError provides the error for message checksum invalidation errors.
+type ChecksumError struct{}
+
+func (e ChecksumError) Error() string {
+ return "message checksum mismatch"
+}
diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/eventstreamapi/api.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/eventstreamapi/api.go
new file mode 100644
index 00000000..97937c8e
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/eventstreamapi/api.go
@@ -0,0 +1,196 @@
+package eventstreamapi
+
+import (
+ "fmt"
+ "io"
+
+ "github.com/aws/aws-sdk-go/aws"
+ "github.com/aws/aws-sdk-go/private/protocol"
+ "github.com/aws/aws-sdk-go/private/protocol/eventstream"
+)
+
+// Unmarshaler provides the interface for unmarshaling a EventStream
+// message into a SDK type.
+type Unmarshaler interface {
+ UnmarshalEvent(protocol.PayloadUnmarshaler, eventstream.Message) error
+}
+
+// EventStream headers with specific meaning to async API functionality.
+const (
+ MessageTypeHeader = `:message-type` // Identifies type of message.
+ EventMessageType = `event`
+ ErrorMessageType = `error`
+ ExceptionMessageType = `exception`
+
+ // Message Events
+ EventTypeHeader = `:event-type` // Identifies message event type e.g. "Stats".
+
+ // Message Error
+ ErrorCodeHeader = `:error-code`
+ ErrorMessageHeader = `:error-message`
+
+ // Message Exception
+ ExceptionTypeHeader = `:exception-type`
+)
+
+// EventReader provides reading from the EventStream of an reader.
+type EventReader struct {
+ reader io.ReadCloser
+ decoder *eventstream.Decoder
+
+ unmarshalerForEventType func(string) (Unmarshaler, error)
+ payloadUnmarshaler protocol.PayloadUnmarshaler
+
+ payloadBuf []byte
+}
+
+// NewEventReader returns a EventReader built from the reader and unmarshaler
+// provided. Use ReadStream method to start reading from the EventStream.
+func NewEventReader(
+ reader io.ReadCloser,
+ payloadUnmarshaler protocol.PayloadUnmarshaler,
+ unmarshalerForEventType func(string) (Unmarshaler, error),
+) *EventReader {
+ return &EventReader{
+ reader: reader,
+ decoder: eventstream.NewDecoder(reader),
+ payloadUnmarshaler: payloadUnmarshaler,
+ unmarshalerForEventType: unmarshalerForEventType,
+ payloadBuf: make([]byte, 10*1024),
+ }
+}
+
+// UseLogger instructs the EventReader to use the logger and log level
+// specified.
+func (r *EventReader) UseLogger(logger aws.Logger, logLevel aws.LogLevelType) {
+ if logger != nil && logLevel.Matches(aws.LogDebugWithEventStreamBody) {
+ r.decoder.UseLogger(logger)
+ }
+}
+
+// ReadEvent attempts to read a message from the EventStream and return the
+// unmarshaled event value that the message is for.
+//
+// For EventStream API errors check if the returned error satisfies the
+// awserr.Error interface to get the error's Code and Message components.
+//
+// EventUnmarshalers called with EventStream messages must take copies of the
+// message's Payload. The payload will is reused between events read.
+func (r *EventReader) ReadEvent() (event interface{}, err error) {
+ msg, err := r.decoder.Decode(r.payloadBuf)
+ if err != nil {
+ return nil, err
+ }
+ defer func() {
+ // Reclaim payload buffer for next message read.
+ r.payloadBuf = msg.Payload[0:0]
+ }()
+
+ typ, err := GetHeaderString(msg, MessageTypeHeader)
+ if err != nil {
+ return nil, err
+ }
+
+ switch typ {
+ case EventMessageType:
+ return r.unmarshalEventMessage(msg)
+ case ExceptionMessageType:
+ err = r.unmarshalEventException(msg)
+ return nil, err
+ case ErrorMessageType:
+ return nil, r.unmarshalErrorMessage(msg)
+ default:
+ return nil, fmt.Errorf("unknown eventstream message type, %v", typ)
+ }
+}
+
+func (r *EventReader) unmarshalEventMessage(
+ msg eventstream.Message,
+) (event interface{}, err error) {
+ eventType, err := GetHeaderString(msg, EventTypeHeader)
+ if err != nil {
+ return nil, err
+ }
+
+ ev, err := r.unmarshalerForEventType(eventType)
+ if err != nil {
+ return nil, err
+ }
+
+ err = ev.UnmarshalEvent(r.payloadUnmarshaler, msg)
+ if err != nil {
+ return nil, err
+ }
+
+ return ev, nil
+}
+
+func (r *EventReader) unmarshalEventException(
+ msg eventstream.Message,
+) (err error) {
+ eventType, err := GetHeaderString(msg, ExceptionTypeHeader)
+ if err != nil {
+ return err
+ }
+
+ ev, err := r.unmarshalerForEventType(eventType)
+ if err != nil {
+ return err
+ }
+
+ err = ev.UnmarshalEvent(r.payloadUnmarshaler, msg)
+ if err != nil {
+ return err
+ }
+
+ var ok bool
+ err, ok = ev.(error)
+ if !ok {
+ err = messageError{
+ code: "SerializationError",
+ msg: fmt.Sprintf(
+ "event stream exception %s mapped to non-error %T, %v",
+ eventType, ev, ev,
+ ),
+ }
+ }
+
+ return err
+}
+
+func (r *EventReader) unmarshalErrorMessage(msg eventstream.Message) (err error) {
+ var msgErr messageError
+
+ msgErr.code, err = GetHeaderString(msg, ErrorCodeHeader)
+ if err != nil {
+ return err
+ }
+
+ msgErr.msg, err = GetHeaderString(msg, ErrorMessageHeader)
+ if err != nil {
+ return err
+ }
+
+ return msgErr
+}
+
+// Close closes the EventReader's EventStream reader.
+func (r *EventReader) Close() error {
+ return r.reader.Close()
+}
+
+// GetHeaderString returns the value of the header as a string. If the header
+// is not set or the value is not a string an error will be returned.
+func GetHeaderString(msg eventstream.Message, headerName string) (string, error) {
+ headerVal := msg.Headers.Get(headerName)
+ if headerVal == nil {
+ return "", fmt.Errorf("error header %s not present", headerName)
+ }
+
+ v, ok := headerVal.Get().(string)
+ if !ok {
+ return "", fmt.Errorf("error header value is not a string, %T", headerVal)
+ }
+
+ return v, nil
+}
diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/eventstreamapi/error.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/eventstreamapi/error.go
new file mode 100644
index 00000000..5ea5a988
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/eventstreamapi/error.go
@@ -0,0 +1,24 @@
+package eventstreamapi
+
+import "fmt"
+
+type messageError struct {
+ code string
+ msg string
+}
+
+func (e messageError) Code() string {
+ return e.code
+}
+
+func (e messageError) Message() string {
+ return e.msg
+}
+
+func (e messageError) Error() string {
+ return fmt.Sprintf("%s: %s", e.code, e.msg)
+}
+
+func (e messageError) OrigErr() error {
+ return nil
+}
diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/header.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/header.go
new file mode 100644
index 00000000..3b44dde2
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/header.go
@@ -0,0 +1,166 @@
+package eventstream
+
+import (
+ "encoding/binary"
+ "fmt"
+ "io"
+)
+
+// Headers are a collection of EventStream header values.
+type Headers []Header
+
+// Header is a single EventStream Key Value header pair.
+type Header struct {
+ Name string
+ Value Value
+}
+
+// Set associates the name with a value. If the header name already exists in
+// the Headers the value will be replaced with the new one.
+func (hs *Headers) Set(name string, value Value) {
+ var i int
+ for ; i < len(*hs); i++ {
+ if (*hs)[i].Name == name {
+ (*hs)[i].Value = value
+ return
+ }
+ }
+
+ *hs = append(*hs, Header{
+ Name: name, Value: value,
+ })
+}
+
+// Get returns the Value associated with the header. Nil is returned if the
+// value does not exist.
+func (hs Headers) Get(name string) Value {
+ for i := 0; i < len(hs); i++ {
+ if h := hs[i]; h.Name == name {
+ return h.Value
+ }
+ }
+ return nil
+}
+
+// Del deletes the value in the Headers if it exists.
+func (hs *Headers) Del(name string) {
+ for i := 0; i < len(*hs); i++ {
+ if (*hs)[i].Name == name {
+ copy((*hs)[i:], (*hs)[i+1:])
+ (*hs) = (*hs)[:len(*hs)-1]
+ }
+ }
+}
+
+func decodeHeaders(r io.Reader) (Headers, error) {
+ hs := Headers{}
+
+ for {
+ name, err := decodeHeaderName(r)
+ if err != nil {
+ if err == io.EOF {
+ // EOF while getting header name means no more headers
+ break
+ }
+ return nil, err
+ }
+
+ value, err := decodeHeaderValue(r)
+ if err != nil {
+ return nil, err
+ }
+
+ hs.Set(name, value)
+ }
+
+ return hs, nil
+}
+
+func decodeHeaderName(r io.Reader) (string, error) {
+ var n headerName
+
+ var err error
+ n.Len, err = decodeUint8(r)
+ if err != nil {
+ return "", err
+ }
+
+ name := n.Name[:n.Len]
+ if _, err := io.ReadFull(r, name); err != nil {
+ return "", err
+ }
+
+ return string(name), nil
+}
+
+func decodeHeaderValue(r io.Reader) (Value, error) {
+ var raw rawValue
+
+ typ, err := decodeUint8(r)
+ if err != nil {
+ return nil, err
+ }
+ raw.Type = valueType(typ)
+
+ var v Value
+
+ switch raw.Type {
+ case trueValueType:
+ v = BoolValue(true)
+ case falseValueType:
+ v = BoolValue(false)
+ case int8ValueType:
+ var tv Int8Value
+ err = tv.decode(r)
+ v = tv
+ case int16ValueType:
+ var tv Int16Value
+ err = tv.decode(r)
+ v = tv
+ case int32ValueType:
+ var tv Int32Value
+ err = tv.decode(r)
+ v = tv
+ case int64ValueType:
+ var tv Int64Value
+ err = tv.decode(r)
+ v = tv
+ case bytesValueType:
+ var tv BytesValue
+ err = tv.decode(r)
+ v = tv
+ case stringValueType:
+ var tv StringValue
+ err = tv.decode(r)
+ v = tv
+ case timestampValueType:
+ var tv TimestampValue
+ err = tv.decode(r)
+ v = tv
+ case uuidValueType:
+ var tv UUIDValue
+ err = tv.decode(r)
+ v = tv
+ default:
+ panic(fmt.Sprintf("unknown value type %d", raw.Type))
+ }
+
+ // Error could be EOF, let caller deal with it
+ return v, err
+}
+
+const maxHeaderNameLen = 255
+
+type headerName struct {
+ Len uint8
+ Name [maxHeaderNameLen]byte
+}
+
+func (v headerName) encode(w io.Writer) error {
+ if err := binary.Write(w, binary.BigEndian, v.Len); err != nil {
+ return err
+ }
+
+ _, err := w.Write(v.Name[:v.Len])
+ return err
+}
diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/header_value.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/header_value.go
new file mode 100644
index 00000000..e3fc0766
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/header_value.go
@@ -0,0 +1,501 @@
+package eventstream
+
+import (
+ "encoding/base64"
+ "encoding/binary"
+ "fmt"
+ "io"
+ "strconv"
+ "time"
+)
+
+const maxHeaderValueLen = 1<<15 - 1 // 2^15-1 or 32KB - 1
+
+// valueType is the EventStream header value type.
+type valueType uint8
+
+// Header value types
+const (
+ trueValueType valueType = iota
+ falseValueType
+ int8ValueType // Byte
+ int16ValueType // Short
+ int32ValueType // Integer
+ int64ValueType // Long
+ bytesValueType
+ stringValueType
+ timestampValueType
+ uuidValueType
+)
+
+func (t valueType) String() string {
+ switch t {
+ case trueValueType:
+ return "bool"
+ case falseValueType:
+ return "bool"
+ case int8ValueType:
+ return "int8"
+ case int16ValueType:
+ return "int16"
+ case int32ValueType:
+ return "int32"
+ case int64ValueType:
+ return "int64"
+ case bytesValueType:
+ return "byte_array"
+ case stringValueType:
+ return "string"
+ case timestampValueType:
+ return "timestamp"
+ case uuidValueType:
+ return "uuid"
+ default:
+ return fmt.Sprintf("unknown value type %d", uint8(t))
+ }
+}
+
+type rawValue struct {
+ Type valueType
+ Len uint16 // Only set for variable length slices
+ Value []byte // byte representation of value, BigEndian encoding.
+}
+
+func (r rawValue) encodeScalar(w io.Writer, v interface{}) error {
+ return binaryWriteFields(w, binary.BigEndian,
+ r.Type,
+ v,
+ )
+}
+
+func (r rawValue) encodeFixedSlice(w io.Writer, v []byte) error {
+ binary.Write(w, binary.BigEndian, r.Type)
+
+ _, err := w.Write(v)
+ return err
+}
+
+func (r rawValue) encodeBytes(w io.Writer, v []byte) error {
+ if len(v) > maxHeaderValueLen {
+ return LengthError{
+ Part: "header value",
+ Want: maxHeaderValueLen, Have: len(v),
+ Value: v,
+ }
+ }
+ r.Len = uint16(len(v))
+
+ err := binaryWriteFields(w, binary.BigEndian,
+ r.Type,
+ r.Len,
+ )
+ if err != nil {
+ return err
+ }
+
+ _, err = w.Write(v)
+ return err
+}
+
+func (r rawValue) encodeString(w io.Writer, v string) error {
+ if len(v) > maxHeaderValueLen {
+ return LengthError{
+ Part: "header value",
+ Want: maxHeaderValueLen, Have: len(v),
+ Value: v,
+ }
+ }
+ r.Len = uint16(len(v))
+
+ type stringWriter interface {
+ WriteString(string) (int, error)
+ }
+
+ err := binaryWriteFields(w, binary.BigEndian,
+ r.Type,
+ r.Len,
+ )
+ if err != nil {
+ return err
+ }
+
+ if sw, ok := w.(stringWriter); ok {
+ _, err = sw.WriteString(v)
+ } else {
+ _, err = w.Write([]byte(v))
+ }
+
+ return err
+}
+
+func decodeFixedBytesValue(r io.Reader, buf []byte) error {
+ _, err := io.ReadFull(r, buf)
+ return err
+}
+
+func decodeBytesValue(r io.Reader) ([]byte, error) {
+ var raw rawValue
+ var err error
+ raw.Len, err = decodeUint16(r)
+ if err != nil {
+ return nil, err
+ }
+
+ buf := make([]byte, raw.Len)
+ _, err = io.ReadFull(r, buf)
+ if err != nil {
+ return nil, err
+ }
+
+ return buf, nil
+}
+
+func decodeStringValue(r io.Reader) (string, error) {
+ v, err := decodeBytesValue(r)
+ return string(v), err
+}
+
+// Value represents the abstract header value.
+type Value interface {
+ Get() interface{}
+ String() string
+ valueType() valueType
+ encode(io.Writer) error
+}
+
+// An BoolValue provides eventstream encoding, and representation
+// of a Go bool value.
+type BoolValue bool
+
+// Get returns the underlying type
+func (v BoolValue) Get() interface{} {
+ return bool(v)
+}
+
+// valueType returns the EventStream header value type value.
+func (v BoolValue) valueType() valueType {
+ if v {
+ return trueValueType
+ }
+ return falseValueType
+}
+
+func (v BoolValue) String() string {
+ return strconv.FormatBool(bool(v))
+}
+
+// encode encodes the BoolValue into an eventstream binary value
+// representation.
+func (v BoolValue) encode(w io.Writer) error {
+ return binary.Write(w, binary.BigEndian, v.valueType())
+}
+
+// An Int8Value provides eventstream encoding, and representation of a Go
+// int8 value.
+type Int8Value int8
+
+// Get returns the underlying value.
+func (v Int8Value) Get() interface{} {
+ return int8(v)
+}
+
+// valueType returns the EventStream header value type value.
+func (Int8Value) valueType() valueType {
+ return int8ValueType
+}
+
+func (v Int8Value) String() string {
+ return fmt.Sprintf("0x%02x", int8(v))
+}
+
+// encode encodes the Int8Value into an eventstream binary value
+// representation.
+func (v Int8Value) encode(w io.Writer) error {
+ raw := rawValue{
+ Type: v.valueType(),
+ }
+
+ return raw.encodeScalar(w, v)
+}
+
+func (v *Int8Value) decode(r io.Reader) error {
+ n, err := decodeUint8(r)
+ if err != nil {
+ return err
+ }
+
+ *v = Int8Value(n)
+ return nil
+}
+
+// An Int16Value provides eventstream encoding, and representation of a Go
+// int16 value.
+type Int16Value int16
+
+// Get returns the underlying value.
+func (v Int16Value) Get() interface{} {
+ return int16(v)
+}
+
+// valueType returns the EventStream header value type value.
+func (Int16Value) valueType() valueType {
+ return int16ValueType
+}
+
+func (v Int16Value) String() string {
+ return fmt.Sprintf("0x%04x", int16(v))
+}
+
+// encode encodes the Int16Value into an eventstream binary value
+// representation.
+func (v Int16Value) encode(w io.Writer) error {
+ raw := rawValue{
+ Type: v.valueType(),
+ }
+ return raw.encodeScalar(w, v)
+}
+
+func (v *Int16Value) decode(r io.Reader) error {
+ n, err := decodeUint16(r)
+ if err != nil {
+ return err
+ }
+
+ *v = Int16Value(n)
+ return nil
+}
+
+// An Int32Value provides eventstream encoding, and representation of a Go
+// int32 value.
+type Int32Value int32
+
+// Get returns the underlying value.
+func (v Int32Value) Get() interface{} {
+ return int32(v)
+}
+
+// valueType returns the EventStream header value type value.
+func (Int32Value) valueType() valueType {
+ return int32ValueType
+}
+
+func (v Int32Value) String() string {
+ return fmt.Sprintf("0x%08x", int32(v))
+}
+
+// encode encodes the Int32Value into an eventstream binary value
+// representation.
+func (v Int32Value) encode(w io.Writer) error {
+ raw := rawValue{
+ Type: v.valueType(),
+ }
+ return raw.encodeScalar(w, v)
+}
+
+func (v *Int32Value) decode(r io.Reader) error {
+ n, err := decodeUint32(r)
+ if err != nil {
+ return err
+ }
+
+ *v = Int32Value(n)
+ return nil
+}
+
+// An Int64Value provides eventstream encoding, and representation of a Go
+// int64 value.
+type Int64Value int64
+
+// Get returns the underlying value.
+func (v Int64Value) Get() interface{} {
+ return int64(v)
+}
+
+// valueType returns the EventStream header value type value.
+func (Int64Value) valueType() valueType {
+ return int64ValueType
+}
+
+func (v Int64Value) String() string {
+ return fmt.Sprintf("0x%016x", int64(v))
+}
+
+// encode encodes the Int64Value into an eventstream binary value
+// representation.
+func (v Int64Value) encode(w io.Writer) error {
+ raw := rawValue{
+ Type: v.valueType(),
+ }
+ return raw.encodeScalar(w, v)
+}
+
+func (v *Int64Value) decode(r io.Reader) error {
+ n, err := decodeUint64(r)
+ if err != nil {
+ return err
+ }
+
+ *v = Int64Value(n)
+ return nil
+}
+
+// An BytesValue provides eventstream encoding, and representation of a Go
+// byte slice.
+type BytesValue []byte
+
+// Get returns the underlying value.
+func (v BytesValue) Get() interface{} {
+ return []byte(v)
+}
+
+// valueType returns the EventStream header value type value.
+func (BytesValue) valueType() valueType {
+ return bytesValueType
+}
+
+func (v BytesValue) String() string {
+ return base64.StdEncoding.EncodeToString([]byte(v))
+}
+
+// encode encodes the BytesValue into an eventstream binary value
+// representation.
+func (v BytesValue) encode(w io.Writer) error {
+ raw := rawValue{
+ Type: v.valueType(),
+ }
+
+ return raw.encodeBytes(w, []byte(v))
+}
+
+func (v *BytesValue) decode(r io.Reader) error {
+ buf, err := decodeBytesValue(r)
+ if err != nil {
+ return err
+ }
+
+ *v = BytesValue(buf)
+ return nil
+}
+
+// An StringValue provides eventstream encoding, and representation of a Go
+// string.
+type StringValue string
+
+// Get returns the underlying value.
+func (v StringValue) Get() interface{} {
+ return string(v)
+}
+
+// valueType returns the EventStream header value type value.
+func (StringValue) valueType() valueType {
+ return stringValueType
+}
+
+func (v StringValue) String() string {
+ return string(v)
+}
+
+// encode encodes the StringValue into an eventstream binary value
+// representation.
+func (v StringValue) encode(w io.Writer) error {
+ raw := rawValue{
+ Type: v.valueType(),
+ }
+
+ return raw.encodeString(w, string(v))
+}
+
+func (v *StringValue) decode(r io.Reader) error {
+ s, err := decodeStringValue(r)
+ if err != nil {
+ return err
+ }
+
+ *v = StringValue(s)
+ return nil
+}
+
+// An TimestampValue provides eventstream encoding, and representation of a Go
+// timestamp.
+type TimestampValue time.Time
+
+// Get returns the underlying value.
+func (v TimestampValue) Get() interface{} {
+ return time.Time(v)
+}
+
+// valueType returns the EventStream header value type value.
+func (TimestampValue) valueType() valueType {
+ return timestampValueType
+}
+
+func (v TimestampValue) epochMilli() int64 {
+ nano := time.Time(v).UnixNano()
+ msec := nano / int64(time.Millisecond)
+ return msec
+}
+
+func (v TimestampValue) String() string {
+ msec := v.epochMilli()
+ return strconv.FormatInt(msec, 10)
+}
+
+// encode encodes the TimestampValue into an eventstream binary value
+// representation.
+func (v TimestampValue) encode(w io.Writer) error {
+ raw := rawValue{
+ Type: v.valueType(),
+ }
+
+ msec := v.epochMilli()
+ return raw.encodeScalar(w, msec)
+}
+
+func (v *TimestampValue) decode(r io.Reader) error {
+ n, err := decodeUint64(r)
+ if err != nil {
+ return err
+ }
+
+ *v = TimestampValue(timeFromEpochMilli(int64(n)))
+ return nil
+}
+
+func timeFromEpochMilli(t int64) time.Time {
+ secs := t / 1e3
+ msec := t % 1e3
+ return time.Unix(secs, msec*int64(time.Millisecond)).UTC()
+}
+
+// An UUIDValue provides eventstream encoding, and representation of a UUID
+// value.
+type UUIDValue [16]byte
+
+// Get returns the underlying value.
+func (v UUIDValue) Get() interface{} {
+ return v[:]
+}
+
+// valueType returns the EventStream header value type value.
+func (UUIDValue) valueType() valueType {
+ return uuidValueType
+}
+
+func (v UUIDValue) String() string {
+ return fmt.Sprintf(`%X-%X-%X-%X-%X`, v[0:4], v[4:6], v[6:8], v[8:10], v[10:])
+}
+
+// encode encodes the UUIDValue into an eventstream binary value
+// representation.
+func (v UUIDValue) encode(w io.Writer) error {
+ raw := rawValue{
+ Type: v.valueType(),
+ }
+
+ return raw.encodeFixedSlice(w, v[:])
+}
+
+func (v *UUIDValue) decode(r io.Reader) error {
+ tv := (*v)[:]
+ return decodeFixedBytesValue(r, tv)
+}
diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/message.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/message.go
new file mode 100644
index 00000000..2dc012a6
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/message.go
@@ -0,0 +1,103 @@
+package eventstream
+
+import (
+ "bytes"
+ "encoding/binary"
+ "hash/crc32"
+)
+
+const preludeLen = 8
+const preludeCRCLen = 4
+const msgCRCLen = 4
+const minMsgLen = preludeLen + preludeCRCLen + msgCRCLen
+const maxPayloadLen = 1024 * 1024 * 16 // 16MB
+const maxHeadersLen = 1024 * 128 // 128KB
+const maxMsgLen = minMsgLen + maxHeadersLen + maxPayloadLen
+
+var crc32IEEETable = crc32.MakeTable(crc32.IEEE)
+
+// A Message provides the eventstream message representation.
+type Message struct {
+ Headers Headers
+ Payload []byte
+}
+
+func (m *Message) rawMessage() (rawMessage, error) {
+ var raw rawMessage
+
+ if len(m.Headers) > 0 {
+ var headers bytes.Buffer
+ if err := encodeHeaders(&headers, m.Headers); err != nil {
+ return rawMessage{}, err
+ }
+ raw.Headers = headers.Bytes()
+ raw.HeadersLen = uint32(len(raw.Headers))
+ }
+
+ raw.Length = raw.HeadersLen + uint32(len(m.Payload)) + minMsgLen
+
+ hash := crc32.New(crc32IEEETable)
+ binaryWriteFields(hash, binary.BigEndian, raw.Length, raw.HeadersLen)
+ raw.PreludeCRC = hash.Sum32()
+
+ binaryWriteFields(hash, binary.BigEndian, raw.PreludeCRC)
+
+ if raw.HeadersLen > 0 {
+ hash.Write(raw.Headers)
+ }
+
+ // Read payload bytes and update hash for it as well.
+ if len(m.Payload) > 0 {
+ raw.Payload = m.Payload
+ hash.Write(raw.Payload)
+ }
+
+ raw.CRC = hash.Sum32()
+
+ return raw, nil
+}
+
+type messagePrelude struct {
+ Length uint32
+ HeadersLen uint32
+ PreludeCRC uint32
+}
+
+func (p messagePrelude) PayloadLen() uint32 {
+ return p.Length - p.HeadersLen - minMsgLen
+}
+
+func (p messagePrelude) ValidateLens() error {
+ if p.Length == 0 || p.Length > maxMsgLen {
+ return LengthError{
+ Part: "message prelude",
+ Want: maxMsgLen,
+ Have: int(p.Length),
+ }
+ }
+ if p.HeadersLen > maxHeadersLen {
+ return LengthError{
+ Part: "message headers",
+ Want: maxHeadersLen,
+ Have: int(p.HeadersLen),
+ }
+ }
+ if payloadLen := p.PayloadLen(); payloadLen > maxPayloadLen {
+ return LengthError{
+ Part: "message payload",
+ Want: maxPayloadLen,
+ Have: int(payloadLen),
+ }
+ }
+
+ return nil
+}
+
+type rawMessage struct {
+ messagePrelude
+
+ Headers []byte
+ Payload []byte
+
+ CRC uint32
+}
diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/restxml/restxml.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/restxml/restxml.go
new file mode 100644
index 00000000..07a6187e
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/restxml/restxml.go
@@ -0,0 +1,79 @@
+// Package restxml provides RESTful XML serialization of AWS
+// requests and responses.
+package restxml
+
+//go:generate go run -tags codegen ../../../models/protocol_tests/generate.go ../../../models/protocol_tests/input/rest-xml.json build_test.go
+//go:generate go run -tags codegen ../../../models/protocol_tests/generate.go ../../../models/protocol_tests/output/rest-xml.json unmarshal_test.go
+
+import (
+ "bytes"
+ "encoding/xml"
+
+ "github.com/aws/aws-sdk-go/aws/awserr"
+ "github.com/aws/aws-sdk-go/aws/request"
+ "github.com/aws/aws-sdk-go/private/protocol/query"
+ "github.com/aws/aws-sdk-go/private/protocol/rest"
+ "github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil"
+)
+
+// BuildHandler is a named request handler for building restxml protocol requests
+var BuildHandler = request.NamedHandler{Name: "awssdk.restxml.Build", Fn: Build}
+
+// UnmarshalHandler is a named request handler for unmarshaling restxml protocol requests
+var UnmarshalHandler = request.NamedHandler{Name: "awssdk.restxml.Unmarshal", Fn: Unmarshal}
+
+// UnmarshalMetaHandler is a named request handler for unmarshaling restxml protocol request metadata
+var UnmarshalMetaHandler = request.NamedHandler{Name: "awssdk.restxml.UnmarshalMeta", Fn: UnmarshalMeta}
+
+// UnmarshalErrorHandler is a named request handler for unmarshaling restxml protocol request errors
+var UnmarshalErrorHandler = request.NamedHandler{Name: "awssdk.restxml.UnmarshalError", Fn: UnmarshalError}
+
+// Build builds a request payload for the REST XML protocol.
+func Build(r *request.Request) {
+ rest.Build(r)
+
+ if t := rest.PayloadType(r.Params); t == "structure" || t == "" {
+ var buf bytes.Buffer
+ err := xmlutil.BuildXML(r.Params, xml.NewEncoder(&buf))
+ if err != nil {
+ r.Error = awserr.NewRequestFailure(
+ awserr.New(request.ErrCodeSerialization,
+ "failed to encode rest XML request", err),
+ 0,
+ r.RequestID,
+ )
+ return
+ }
+ r.SetBufferBody(buf.Bytes())
+ }
+}
+
+// Unmarshal unmarshals a payload response for the REST XML protocol.
+func Unmarshal(r *request.Request) {
+ if t := rest.PayloadType(r.Data); t == "structure" || t == "" {
+ defer r.HTTPResponse.Body.Close()
+ decoder := xml.NewDecoder(r.HTTPResponse.Body)
+ err := xmlutil.UnmarshalXML(r.Data, decoder, "")
+ if err != nil {
+ r.Error = awserr.NewRequestFailure(
+ awserr.New(request.ErrCodeSerialization,
+ "failed to decode REST XML response", err),
+ r.HTTPResponse.StatusCode,
+ r.RequestID,
+ )
+ return
+ }
+ } else {
+ rest.Unmarshal(r)
+ }
+}
+
+// UnmarshalMeta unmarshals response headers for the REST XML protocol.
+func UnmarshalMeta(r *request.Request) {
+ rest.UnmarshalMeta(r)
+}
+
+// UnmarshalError unmarshals a response error for the REST XML protocol.
+func UnmarshalError(r *request.Request) {
+ query.UnmarshalError(r)
+}
diff --git a/vendor/github.com/aws/aws-sdk-go/service/s3/api.go b/vendor/github.com/aws/aws-sdk-go/service/s3/api.go
new file mode 100644
index 00000000..b4a4e8c4
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/service/s3/api.go
@@ -0,0 +1,24952 @@
+// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT.
+
+package s3
+
+import (
+ "bytes"
+ "fmt"
+ "io"
+ "sync"
+ "sync/atomic"
+ "time"
+
+ "github.com/aws/aws-sdk-go/aws"
+ "github.com/aws/aws-sdk-go/aws/awserr"
+ "github.com/aws/aws-sdk-go/aws/awsutil"
+ "github.com/aws/aws-sdk-go/aws/client"
+ "github.com/aws/aws-sdk-go/aws/request"
+ "github.com/aws/aws-sdk-go/private/protocol"
+ "github.com/aws/aws-sdk-go/private/protocol/eventstream"
+ "github.com/aws/aws-sdk-go/private/protocol/eventstream/eventstreamapi"
+ "github.com/aws/aws-sdk-go/private/protocol/rest"
+ "github.com/aws/aws-sdk-go/private/protocol/restxml"
+)
+
+const opAbortMultipartUpload = "AbortMultipartUpload"
+
+// AbortMultipartUploadRequest generates a "aws/request.Request" representing the
+// client's request for the AbortMultipartUpload operation. The "output" return
+// value will be populated with the request's response once the request completes
+// successfully.
+//
+// Use "Send" method on the returned Request to send the API call to the service.
+// the "output" return value is not valid until after Send returns without error.
+//
+// See AbortMultipartUpload for more information on using the AbortMultipartUpload
+// API call, and error handling.
+//
+// This method is useful when you want to inject custom logic or configuration
+// into the SDK's request lifecycle. Such as custom headers, or retry logic.
+//
+//
+// // Example sending a request using the AbortMultipartUploadRequest method.
+// req, resp := client.AbortMultipartUploadRequest(params)
+//
+// err := req.Send()
+// if err == nil { // resp is now filled
+// fmt.Println(resp)
+// }
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/AbortMultipartUpload
+func (c *S3) AbortMultipartUploadRequest(input *AbortMultipartUploadInput) (req *request.Request, output *AbortMultipartUploadOutput) {
+ op := &request.Operation{
+ Name: opAbortMultipartUpload,
+ HTTPMethod: "DELETE",
+ HTTPPath: "/{Bucket}/{Key+}",
+ }
+
+ if input == nil {
+ input = &AbortMultipartUploadInput{}
+ }
+
+ output = &AbortMultipartUploadOutput{}
+ req = c.newRequest(op, input, output)
+ return
+}
+
+// AbortMultipartUpload API operation for Amazon Simple Storage Service.
+//
+// Aborts a multipart upload.
+//
+// To verify that all parts have been removed, so you don't get charged for
+// the part storage, you should call the List Parts operation and ensure the
+// parts list is empty.
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for Amazon Simple Storage Service's
+// API operation AbortMultipartUpload for usage and error information.
+//
+// Returned Error Codes:
+// * ErrCodeNoSuchUpload "NoSuchUpload"
+// The specified multipart upload does not exist.
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/AbortMultipartUpload
+func (c *S3) AbortMultipartUpload(input *AbortMultipartUploadInput) (*AbortMultipartUploadOutput, error) {
+ req, out := c.AbortMultipartUploadRequest(input)
+ return out, req.Send()
+}
+
+// AbortMultipartUploadWithContext is the same as AbortMultipartUpload with the addition of
+// the ability to pass a context and additional request options.
+//
+// See AbortMultipartUpload for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *S3) AbortMultipartUploadWithContext(ctx aws.Context, input *AbortMultipartUploadInput, opts ...request.Option) (*AbortMultipartUploadOutput, error) {
+ req, out := c.AbortMultipartUploadRequest(input)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return out, req.Send()
+}
+
+const opCompleteMultipartUpload = "CompleteMultipartUpload"
+
+// CompleteMultipartUploadRequest generates a "aws/request.Request" representing the
+// client's request for the CompleteMultipartUpload operation. The "output" return
+// value will be populated with the request's response once the request completes
+// successfully.
+//
+// Use "Send" method on the returned Request to send the API call to the service.
+// the "output" return value is not valid until after Send returns without error.
+//
+// See CompleteMultipartUpload for more information on using the CompleteMultipartUpload
+// API call, and error handling.
+//
+// This method is useful when you want to inject custom logic or configuration
+// into the SDK's request lifecycle. Such as custom headers, or retry logic.
+//
+//
+// // Example sending a request using the CompleteMultipartUploadRequest method.
+// req, resp := client.CompleteMultipartUploadRequest(params)
+//
+// err := req.Send()
+// if err == nil { // resp is now filled
+// fmt.Println(resp)
+// }
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/CompleteMultipartUpload
+func (c *S3) CompleteMultipartUploadRequest(input *CompleteMultipartUploadInput) (req *request.Request, output *CompleteMultipartUploadOutput) {
+ op := &request.Operation{
+ Name: opCompleteMultipartUpload,
+ HTTPMethod: "POST",
+ HTTPPath: "/{Bucket}/{Key+}",
+ }
+
+ if input == nil {
+ input = &CompleteMultipartUploadInput{}
+ }
+
+ output = &CompleteMultipartUploadOutput{}
+ req = c.newRequest(op, input, output)
+ return
+}
+
+// CompleteMultipartUpload API operation for Amazon Simple Storage Service.
+//
+// Completes a multipart upload by assembling previously uploaded parts.
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for Amazon Simple Storage Service's
+// API operation CompleteMultipartUpload for usage and error information.
+// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/CompleteMultipartUpload
+func (c *S3) CompleteMultipartUpload(input *CompleteMultipartUploadInput) (*CompleteMultipartUploadOutput, error) {
+ req, out := c.CompleteMultipartUploadRequest(input)
+ return out, req.Send()
+}
+
+// CompleteMultipartUploadWithContext is the same as CompleteMultipartUpload with the addition of
+// the ability to pass a context and additional request options.
+//
+// See CompleteMultipartUpload for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *S3) CompleteMultipartUploadWithContext(ctx aws.Context, input *CompleteMultipartUploadInput, opts ...request.Option) (*CompleteMultipartUploadOutput, error) {
+ req, out := c.CompleteMultipartUploadRequest(input)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return out, req.Send()
+}
+
+const opCopyObject = "CopyObject"
+
+// CopyObjectRequest generates a "aws/request.Request" representing the
+// client's request for the CopyObject operation. The "output" return
+// value will be populated with the request's response once the request completes
+// successfully.
+//
+// Use "Send" method on the returned Request to send the API call to the service.
+// the "output" return value is not valid until after Send returns without error.
+//
+// See CopyObject for more information on using the CopyObject
+// API call, and error handling.
+//
+// This method is useful when you want to inject custom logic or configuration
+// into the SDK's request lifecycle. Such as custom headers, or retry logic.
+//
+//
+// // Example sending a request using the CopyObjectRequest method.
+// req, resp := client.CopyObjectRequest(params)
+//
+// err := req.Send()
+// if err == nil { // resp is now filled
+// fmt.Println(resp)
+// }
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/CopyObject
+func (c *S3) CopyObjectRequest(input *CopyObjectInput) (req *request.Request, output *CopyObjectOutput) {
+ op := &request.Operation{
+ Name: opCopyObject,
+ HTTPMethod: "PUT",
+ HTTPPath: "/{Bucket}/{Key+}",
+ }
+
+ if input == nil {
+ input = &CopyObjectInput{}
+ }
+
+ output = &CopyObjectOutput{}
+ req = c.newRequest(op, input, output)
+ return
+}
+
+// CopyObject API operation for Amazon Simple Storage Service.
+//
+// Creates a copy of an object that is already stored in Amazon S3.
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for Amazon Simple Storage Service's
+// API operation CopyObject for usage and error information.
+//
+// Returned Error Codes:
+// * ErrCodeObjectNotInActiveTierError "ObjectNotInActiveTierError"
+// The source object of the COPY operation is not in the active tier and is
+// only stored in Amazon Glacier.
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/CopyObject
+func (c *S3) CopyObject(input *CopyObjectInput) (*CopyObjectOutput, error) {
+ req, out := c.CopyObjectRequest(input)
+ return out, req.Send()
+}
+
+// CopyObjectWithContext is the same as CopyObject with the addition of
+// the ability to pass a context and additional request options.
+//
+// See CopyObject for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *S3) CopyObjectWithContext(ctx aws.Context, input *CopyObjectInput, opts ...request.Option) (*CopyObjectOutput, error) {
+ req, out := c.CopyObjectRequest(input)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return out, req.Send()
+}
+
+const opCreateBucket = "CreateBucket"
+
+// CreateBucketRequest generates a "aws/request.Request" representing the
+// client's request for the CreateBucket operation. The "output" return
+// value will be populated with the request's response once the request completes
+// successfully.
+//
+// Use "Send" method on the returned Request to send the API call to the service.
+// the "output" return value is not valid until after Send returns without error.
+//
+// See CreateBucket for more information on using the CreateBucket
+// API call, and error handling.
+//
+// This method is useful when you want to inject custom logic or configuration
+// into the SDK's request lifecycle. Such as custom headers, or retry logic.
+//
+//
+// // Example sending a request using the CreateBucketRequest method.
+// req, resp := client.CreateBucketRequest(params)
+//
+// err := req.Send()
+// if err == nil { // resp is now filled
+// fmt.Println(resp)
+// }
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/CreateBucket
+func (c *S3) CreateBucketRequest(input *CreateBucketInput) (req *request.Request, output *CreateBucketOutput) {
+ op := &request.Operation{
+ Name: opCreateBucket,
+ HTTPMethod: "PUT",
+ HTTPPath: "/{Bucket}",
+ }
+
+ if input == nil {
+ input = &CreateBucketInput{}
+ }
+
+ output = &CreateBucketOutput{}
+ req = c.newRequest(op, input, output)
+ return
+}
+
+// CreateBucket API operation for Amazon Simple Storage Service.
+//
+// Creates a new bucket.
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for Amazon Simple Storage Service's
+// API operation CreateBucket for usage and error information.
+//
+// Returned Error Codes:
+// * ErrCodeBucketAlreadyExists "BucketAlreadyExists"
+// The requested bucket name is not available. The bucket namespace is shared
+// by all users of the system. Please select a different name and try again.
+//
+// * ErrCodeBucketAlreadyOwnedByYou "BucketAlreadyOwnedByYou"
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/CreateBucket
+func (c *S3) CreateBucket(input *CreateBucketInput) (*CreateBucketOutput, error) {
+ req, out := c.CreateBucketRequest(input)
+ return out, req.Send()
+}
+
+// CreateBucketWithContext is the same as CreateBucket with the addition of
+// the ability to pass a context and additional request options.
+//
+// See CreateBucket for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *S3) CreateBucketWithContext(ctx aws.Context, input *CreateBucketInput, opts ...request.Option) (*CreateBucketOutput, error) {
+ req, out := c.CreateBucketRequest(input)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return out, req.Send()
+}
+
+const opCreateMultipartUpload = "CreateMultipartUpload"
+
+// CreateMultipartUploadRequest generates a "aws/request.Request" representing the
+// client's request for the CreateMultipartUpload operation. The "output" return
+// value will be populated with the request's response once the request completes
+// successfully.
+//
+// Use "Send" method on the returned Request to send the API call to the service.
+// the "output" return value is not valid until after Send returns without error.
+//
+// See CreateMultipartUpload for more information on using the CreateMultipartUpload
+// API call, and error handling.
+//
+// This method is useful when you want to inject custom logic or configuration
+// into the SDK's request lifecycle. Such as custom headers, or retry logic.
+//
+//
+// // Example sending a request using the CreateMultipartUploadRequest method.
+// req, resp := client.CreateMultipartUploadRequest(params)
+//
+// err := req.Send()
+// if err == nil { // resp is now filled
+// fmt.Println(resp)
+// }
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/CreateMultipartUpload
+func (c *S3) CreateMultipartUploadRequest(input *CreateMultipartUploadInput) (req *request.Request, output *CreateMultipartUploadOutput) {
+ op := &request.Operation{
+ Name: opCreateMultipartUpload,
+ HTTPMethod: "POST",
+ HTTPPath: "/{Bucket}/{Key+}?uploads",
+ }
+
+ if input == nil {
+ input = &CreateMultipartUploadInput{}
+ }
+
+ output = &CreateMultipartUploadOutput{}
+ req = c.newRequest(op, input, output)
+ return
+}
+
+// CreateMultipartUpload API operation for Amazon Simple Storage Service.
+//
+// Initiates a multipart upload and returns an upload ID.
+//
+// Note: After you initiate multipart upload and upload one or more parts, you
+// must either complete or abort multipart upload in order to stop getting charged
+// for storage of the uploaded parts. Only after you either complete or abort
+// multipart upload, Amazon S3 frees up the parts storage and stops charging
+// you for the parts storage.
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for Amazon Simple Storage Service's
+// API operation CreateMultipartUpload for usage and error information.
+// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/CreateMultipartUpload
+func (c *S3) CreateMultipartUpload(input *CreateMultipartUploadInput) (*CreateMultipartUploadOutput, error) {
+ req, out := c.CreateMultipartUploadRequest(input)
+ return out, req.Send()
+}
+
+// CreateMultipartUploadWithContext is the same as CreateMultipartUpload with the addition of
+// the ability to pass a context and additional request options.
+//
+// See CreateMultipartUpload for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *S3) CreateMultipartUploadWithContext(ctx aws.Context, input *CreateMultipartUploadInput, opts ...request.Option) (*CreateMultipartUploadOutput, error) {
+ req, out := c.CreateMultipartUploadRequest(input)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return out, req.Send()
+}
+
+const opDeleteBucket = "DeleteBucket"
+
+// DeleteBucketRequest generates a "aws/request.Request" representing the
+// client's request for the DeleteBucket operation. The "output" return
+// value will be populated with the request's response once the request completes
+// successfully.
+//
+// Use "Send" method on the returned Request to send the API call to the service.
+// the "output" return value is not valid until after Send returns without error.
+//
+// See DeleteBucket for more information on using the DeleteBucket
+// API call, and error handling.
+//
+// This method is useful when you want to inject custom logic or configuration
+// into the SDK's request lifecycle. Such as custom headers, or retry logic.
+//
+//
+// // Example sending a request using the DeleteBucketRequest method.
+// req, resp := client.DeleteBucketRequest(params)
+//
+// err := req.Send()
+// if err == nil { // resp is now filled
+// fmt.Println(resp)
+// }
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucket
+func (c *S3) DeleteBucketRequest(input *DeleteBucketInput) (req *request.Request, output *DeleteBucketOutput) {
+ op := &request.Operation{
+ Name: opDeleteBucket,
+ HTTPMethod: "DELETE",
+ HTTPPath: "/{Bucket}",
+ }
+
+ if input == nil {
+ input = &DeleteBucketInput{}
+ }
+
+ output = &DeleteBucketOutput{}
+ req = c.newRequest(op, input, output)
+ req.Handlers.Unmarshal.Swap(restxml.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler)
+ return
+}
+
+// DeleteBucket API operation for Amazon Simple Storage Service.
+//
+// Deletes the bucket. All objects (including all object versions and Delete
+// Markers) in the bucket must be deleted before the bucket itself can be deleted.
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for Amazon Simple Storage Service's
+// API operation DeleteBucket for usage and error information.
+// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucket
+func (c *S3) DeleteBucket(input *DeleteBucketInput) (*DeleteBucketOutput, error) {
+ req, out := c.DeleteBucketRequest(input)
+ return out, req.Send()
+}
+
+// DeleteBucketWithContext is the same as DeleteBucket with the addition of
+// the ability to pass a context and additional request options.
+//
+// See DeleteBucket for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *S3) DeleteBucketWithContext(ctx aws.Context, input *DeleteBucketInput, opts ...request.Option) (*DeleteBucketOutput, error) {
+ req, out := c.DeleteBucketRequest(input)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return out, req.Send()
+}
+
+const opDeleteBucketAnalyticsConfiguration = "DeleteBucketAnalyticsConfiguration"
+
+// DeleteBucketAnalyticsConfigurationRequest generates a "aws/request.Request" representing the
+// client's request for the DeleteBucketAnalyticsConfiguration operation. The "output" return
+// value will be populated with the request's response once the request completes
+// successfully.
+//
+// Use "Send" method on the returned Request to send the API call to the service.
+// the "output" return value is not valid until after Send returns without error.
+//
+// See DeleteBucketAnalyticsConfiguration for more information on using the DeleteBucketAnalyticsConfiguration
+// API call, and error handling.
+//
+// This method is useful when you want to inject custom logic or configuration
+// into the SDK's request lifecycle. Such as custom headers, or retry logic.
+//
+//
+// // Example sending a request using the DeleteBucketAnalyticsConfigurationRequest method.
+// req, resp := client.DeleteBucketAnalyticsConfigurationRequest(params)
+//
+// err := req.Send()
+// if err == nil { // resp is now filled
+// fmt.Println(resp)
+// }
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketAnalyticsConfiguration
+func (c *S3) DeleteBucketAnalyticsConfigurationRequest(input *DeleteBucketAnalyticsConfigurationInput) (req *request.Request, output *DeleteBucketAnalyticsConfigurationOutput) {
+ op := &request.Operation{
+ Name: opDeleteBucketAnalyticsConfiguration,
+ HTTPMethod: "DELETE",
+ HTTPPath: "/{Bucket}?analytics",
+ }
+
+ if input == nil {
+ input = &DeleteBucketAnalyticsConfigurationInput{}
+ }
+
+ output = &DeleteBucketAnalyticsConfigurationOutput{}
+ req = c.newRequest(op, input, output)
+ req.Handlers.Unmarshal.Swap(restxml.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler)
+ return
+}
+
+// DeleteBucketAnalyticsConfiguration API operation for Amazon Simple Storage Service.
+//
+// Deletes an analytics configuration for the bucket (specified by the analytics
+// configuration ID).
+//
+// To use this operation, you must have permissions to perform the s3:PutAnalyticsConfiguration
+// action. The bucket owner has this permission by default. The bucket owner
+// can grant this permission to others.
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for Amazon Simple Storage Service's
+// API operation DeleteBucketAnalyticsConfiguration for usage and error information.
+// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketAnalyticsConfiguration
+func (c *S3) DeleteBucketAnalyticsConfiguration(input *DeleteBucketAnalyticsConfigurationInput) (*DeleteBucketAnalyticsConfigurationOutput, error) {
+ req, out := c.DeleteBucketAnalyticsConfigurationRequest(input)
+ return out, req.Send()
+}
+
+// DeleteBucketAnalyticsConfigurationWithContext is the same as DeleteBucketAnalyticsConfiguration with the addition of
+// the ability to pass a context and additional request options.
+//
+// See DeleteBucketAnalyticsConfiguration for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *S3) DeleteBucketAnalyticsConfigurationWithContext(ctx aws.Context, input *DeleteBucketAnalyticsConfigurationInput, opts ...request.Option) (*DeleteBucketAnalyticsConfigurationOutput, error) {
+ req, out := c.DeleteBucketAnalyticsConfigurationRequest(input)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return out, req.Send()
+}
+
+const opDeleteBucketCors = "DeleteBucketCors"
+
+// DeleteBucketCorsRequest generates a "aws/request.Request" representing the
+// client's request for the DeleteBucketCors operation. The "output" return
+// value will be populated with the request's response once the request completes
+// successfully.
+//
+// Use "Send" method on the returned Request to send the API call to the service.
+// the "output" return value is not valid until after Send returns without error.
+//
+// See DeleteBucketCors for more information on using the DeleteBucketCors
+// API call, and error handling.
+//
+// This method is useful when you want to inject custom logic or configuration
+// into the SDK's request lifecycle. Such as custom headers, or retry logic.
+//
+//
+// // Example sending a request using the DeleteBucketCorsRequest method.
+// req, resp := client.DeleteBucketCorsRequest(params)
+//
+// err := req.Send()
+// if err == nil { // resp is now filled
+// fmt.Println(resp)
+// }
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketCors
+func (c *S3) DeleteBucketCorsRequest(input *DeleteBucketCorsInput) (req *request.Request, output *DeleteBucketCorsOutput) {
+ op := &request.Operation{
+ Name: opDeleteBucketCors,
+ HTTPMethod: "DELETE",
+ HTTPPath: "/{Bucket}?cors",
+ }
+
+ if input == nil {
+ input = &DeleteBucketCorsInput{}
+ }
+
+ output = &DeleteBucketCorsOutput{}
+ req = c.newRequest(op, input, output)
+ req.Handlers.Unmarshal.Swap(restxml.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler)
+ return
+}
+
+// DeleteBucketCors API operation for Amazon Simple Storage Service.
+//
+// Deletes the CORS configuration information set for the bucket.
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for Amazon Simple Storage Service's
+// API operation DeleteBucketCors for usage and error information.
+// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketCors
+func (c *S3) DeleteBucketCors(input *DeleteBucketCorsInput) (*DeleteBucketCorsOutput, error) {
+ req, out := c.DeleteBucketCorsRequest(input)
+ return out, req.Send()
+}
+
+// DeleteBucketCorsWithContext is the same as DeleteBucketCors with the addition of
+// the ability to pass a context and additional request options.
+//
+// See DeleteBucketCors for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *S3) DeleteBucketCorsWithContext(ctx aws.Context, input *DeleteBucketCorsInput, opts ...request.Option) (*DeleteBucketCorsOutput, error) {
+ req, out := c.DeleteBucketCorsRequest(input)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return out, req.Send()
+}
+
+const opDeleteBucketEncryption = "DeleteBucketEncryption"
+
+// DeleteBucketEncryptionRequest generates a "aws/request.Request" representing the
+// client's request for the DeleteBucketEncryption operation. The "output" return
+// value will be populated with the request's response once the request completes
+// successfully.
+//
+// Use "Send" method on the returned Request to send the API call to the service.
+// the "output" return value is not valid until after Send returns without error.
+//
+// See DeleteBucketEncryption for more information on using the DeleteBucketEncryption
+// API call, and error handling.
+//
+// This method is useful when you want to inject custom logic or configuration
+// into the SDK's request lifecycle. Such as custom headers, or retry logic.
+//
+//
+// // Example sending a request using the DeleteBucketEncryptionRequest method.
+// req, resp := client.DeleteBucketEncryptionRequest(params)
+//
+// err := req.Send()
+// if err == nil { // resp is now filled
+// fmt.Println(resp)
+// }
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketEncryption
+func (c *S3) DeleteBucketEncryptionRequest(input *DeleteBucketEncryptionInput) (req *request.Request, output *DeleteBucketEncryptionOutput) {
+ op := &request.Operation{
+ Name: opDeleteBucketEncryption,
+ HTTPMethod: "DELETE",
+ HTTPPath: "/{Bucket}?encryption",
+ }
+
+ if input == nil {
+ input = &DeleteBucketEncryptionInput{}
+ }
+
+ output = &DeleteBucketEncryptionOutput{}
+ req = c.newRequest(op, input, output)
+ req.Handlers.Unmarshal.Swap(restxml.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler)
+ return
+}
+
+// DeleteBucketEncryption API operation for Amazon Simple Storage Service.
+//
+// Deletes the server-side encryption configuration from the bucket.
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for Amazon Simple Storage Service's
+// API operation DeleteBucketEncryption for usage and error information.
+// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketEncryption
+func (c *S3) DeleteBucketEncryption(input *DeleteBucketEncryptionInput) (*DeleteBucketEncryptionOutput, error) {
+ req, out := c.DeleteBucketEncryptionRequest(input)
+ return out, req.Send()
+}
+
+// DeleteBucketEncryptionWithContext is the same as DeleteBucketEncryption with the addition of
+// the ability to pass a context and additional request options.
+//
+// See DeleteBucketEncryption for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *S3) DeleteBucketEncryptionWithContext(ctx aws.Context, input *DeleteBucketEncryptionInput, opts ...request.Option) (*DeleteBucketEncryptionOutput, error) {
+ req, out := c.DeleteBucketEncryptionRequest(input)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return out, req.Send()
+}
+
+const opDeleteBucketInventoryConfiguration = "DeleteBucketInventoryConfiguration"
+
+// DeleteBucketInventoryConfigurationRequest generates a "aws/request.Request" representing the
+// client's request for the DeleteBucketInventoryConfiguration operation. The "output" return
+// value will be populated with the request's response once the request completes
+// successfully.
+//
+// Use "Send" method on the returned Request to send the API call to the service.
+// the "output" return value is not valid until after Send returns without error.
+//
+// See DeleteBucketInventoryConfiguration for more information on using the DeleteBucketInventoryConfiguration
+// API call, and error handling.
+//
+// This method is useful when you want to inject custom logic or configuration
+// into the SDK's request lifecycle. Such as custom headers, or retry logic.
+//
+//
+// // Example sending a request using the DeleteBucketInventoryConfigurationRequest method.
+// req, resp := client.DeleteBucketInventoryConfigurationRequest(params)
+//
+// err := req.Send()
+// if err == nil { // resp is now filled
+// fmt.Println(resp)
+// }
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketInventoryConfiguration
+func (c *S3) DeleteBucketInventoryConfigurationRequest(input *DeleteBucketInventoryConfigurationInput) (req *request.Request, output *DeleteBucketInventoryConfigurationOutput) {
+ op := &request.Operation{
+ Name: opDeleteBucketInventoryConfiguration,
+ HTTPMethod: "DELETE",
+ HTTPPath: "/{Bucket}?inventory",
+ }
+
+ if input == nil {
+ input = &DeleteBucketInventoryConfigurationInput{}
+ }
+
+ output = &DeleteBucketInventoryConfigurationOutput{}
+ req = c.newRequest(op, input, output)
+ req.Handlers.Unmarshal.Swap(restxml.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler)
+ return
+}
+
+// DeleteBucketInventoryConfiguration API operation for Amazon Simple Storage Service.
+//
+// Deletes an inventory configuration (identified by the inventory ID) from
+// the bucket.
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for Amazon Simple Storage Service's
+// API operation DeleteBucketInventoryConfiguration for usage and error information.
+// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketInventoryConfiguration
+func (c *S3) DeleteBucketInventoryConfiguration(input *DeleteBucketInventoryConfigurationInput) (*DeleteBucketInventoryConfigurationOutput, error) {
+ req, out := c.DeleteBucketInventoryConfigurationRequest(input)
+ return out, req.Send()
+}
+
+// DeleteBucketInventoryConfigurationWithContext is the same as DeleteBucketInventoryConfiguration with the addition of
+// the ability to pass a context and additional request options.
+//
+// See DeleteBucketInventoryConfiguration for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *S3) DeleteBucketInventoryConfigurationWithContext(ctx aws.Context, input *DeleteBucketInventoryConfigurationInput, opts ...request.Option) (*DeleteBucketInventoryConfigurationOutput, error) {
+ req, out := c.DeleteBucketInventoryConfigurationRequest(input)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return out, req.Send()
+}
+
+const opDeleteBucketLifecycle = "DeleteBucketLifecycle"
+
+// DeleteBucketLifecycleRequest generates a "aws/request.Request" representing the
+// client's request for the DeleteBucketLifecycle operation. The "output" return
+// value will be populated with the request's response once the request completes
+// successfully.
+//
+// Use "Send" method on the returned Request to send the API call to the service.
+// the "output" return value is not valid until after Send returns without error.
+//
+// See DeleteBucketLifecycle for more information on using the DeleteBucketLifecycle
+// API call, and error handling.
+//
+// This method is useful when you want to inject custom logic or configuration
+// into the SDK's request lifecycle. Such as custom headers, or retry logic.
+//
+//
+// // Example sending a request using the DeleteBucketLifecycleRequest method.
+// req, resp := client.DeleteBucketLifecycleRequest(params)
+//
+// err := req.Send()
+// if err == nil { // resp is now filled
+// fmt.Println(resp)
+// }
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketLifecycle
+func (c *S3) DeleteBucketLifecycleRequest(input *DeleteBucketLifecycleInput) (req *request.Request, output *DeleteBucketLifecycleOutput) {
+ op := &request.Operation{
+ Name: opDeleteBucketLifecycle,
+ HTTPMethod: "DELETE",
+ HTTPPath: "/{Bucket}?lifecycle",
+ }
+
+ if input == nil {
+ input = &DeleteBucketLifecycleInput{}
+ }
+
+ output = &DeleteBucketLifecycleOutput{}
+ req = c.newRequest(op, input, output)
+ req.Handlers.Unmarshal.Swap(restxml.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler)
+ return
+}
+
+// DeleteBucketLifecycle API operation for Amazon Simple Storage Service.
+//
+// Deletes the lifecycle configuration from the bucket.
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for Amazon Simple Storage Service's
+// API operation DeleteBucketLifecycle for usage and error information.
+// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketLifecycle
+func (c *S3) DeleteBucketLifecycle(input *DeleteBucketLifecycleInput) (*DeleteBucketLifecycleOutput, error) {
+ req, out := c.DeleteBucketLifecycleRequest(input)
+ return out, req.Send()
+}
+
+// DeleteBucketLifecycleWithContext is the same as DeleteBucketLifecycle with the addition of
+// the ability to pass a context and additional request options.
+//
+// See DeleteBucketLifecycle for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *S3) DeleteBucketLifecycleWithContext(ctx aws.Context, input *DeleteBucketLifecycleInput, opts ...request.Option) (*DeleteBucketLifecycleOutput, error) {
+ req, out := c.DeleteBucketLifecycleRequest(input)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return out, req.Send()
+}
+
+const opDeleteBucketMetricsConfiguration = "DeleteBucketMetricsConfiguration"
+
+// DeleteBucketMetricsConfigurationRequest generates a "aws/request.Request" representing the
+// client's request for the DeleteBucketMetricsConfiguration operation. The "output" return
+// value will be populated with the request's response once the request completes
+// successfully.
+//
+// Use "Send" method on the returned Request to send the API call to the service.
+// the "output" return value is not valid until after Send returns without error.
+//
+// See DeleteBucketMetricsConfiguration for more information on using the DeleteBucketMetricsConfiguration
+// API call, and error handling.
+//
+// This method is useful when you want to inject custom logic or configuration
+// into the SDK's request lifecycle. Such as custom headers, or retry logic.
+//
+//
+// // Example sending a request using the DeleteBucketMetricsConfigurationRequest method.
+// req, resp := client.DeleteBucketMetricsConfigurationRequest(params)
+//
+// err := req.Send()
+// if err == nil { // resp is now filled
+// fmt.Println(resp)
+// }
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketMetricsConfiguration
+func (c *S3) DeleteBucketMetricsConfigurationRequest(input *DeleteBucketMetricsConfigurationInput) (req *request.Request, output *DeleteBucketMetricsConfigurationOutput) {
+ op := &request.Operation{
+ Name: opDeleteBucketMetricsConfiguration,
+ HTTPMethod: "DELETE",
+ HTTPPath: "/{Bucket}?metrics",
+ }
+
+ if input == nil {
+ input = &DeleteBucketMetricsConfigurationInput{}
+ }
+
+ output = &DeleteBucketMetricsConfigurationOutput{}
+ req = c.newRequest(op, input, output)
+ req.Handlers.Unmarshal.Swap(restxml.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler)
+ return
+}
+
+// DeleteBucketMetricsConfiguration API operation for Amazon Simple Storage Service.
+//
+// Deletes a metrics configuration (specified by the metrics configuration ID)
+// from the bucket.
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for Amazon Simple Storage Service's
+// API operation DeleteBucketMetricsConfiguration for usage and error information.
+// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketMetricsConfiguration
+func (c *S3) DeleteBucketMetricsConfiguration(input *DeleteBucketMetricsConfigurationInput) (*DeleteBucketMetricsConfigurationOutput, error) {
+ req, out := c.DeleteBucketMetricsConfigurationRequest(input)
+ return out, req.Send()
+}
+
+// DeleteBucketMetricsConfigurationWithContext is the same as DeleteBucketMetricsConfiguration with the addition of
+// the ability to pass a context and additional request options.
+//
+// See DeleteBucketMetricsConfiguration for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *S3) DeleteBucketMetricsConfigurationWithContext(ctx aws.Context, input *DeleteBucketMetricsConfigurationInput, opts ...request.Option) (*DeleteBucketMetricsConfigurationOutput, error) {
+ req, out := c.DeleteBucketMetricsConfigurationRequest(input)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return out, req.Send()
+}
+
+const opDeleteBucketPolicy = "DeleteBucketPolicy"
+
+// DeleteBucketPolicyRequest generates a "aws/request.Request" representing the
+// client's request for the DeleteBucketPolicy operation. The "output" return
+// value will be populated with the request's response once the request completes
+// successfully.
+//
+// Use "Send" method on the returned Request to send the API call to the service.
+// the "output" return value is not valid until after Send returns without error.
+//
+// See DeleteBucketPolicy for more information on using the DeleteBucketPolicy
+// API call, and error handling.
+//
+// This method is useful when you want to inject custom logic or configuration
+// into the SDK's request lifecycle. Such as custom headers, or retry logic.
+//
+//
+// // Example sending a request using the DeleteBucketPolicyRequest method.
+// req, resp := client.DeleteBucketPolicyRequest(params)
+//
+// err := req.Send()
+// if err == nil { // resp is now filled
+// fmt.Println(resp)
+// }
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketPolicy
+func (c *S3) DeleteBucketPolicyRequest(input *DeleteBucketPolicyInput) (req *request.Request, output *DeleteBucketPolicyOutput) {
+ op := &request.Operation{
+ Name: opDeleteBucketPolicy,
+ HTTPMethod: "DELETE",
+ HTTPPath: "/{Bucket}?policy",
+ }
+
+ if input == nil {
+ input = &DeleteBucketPolicyInput{}
+ }
+
+ output = &DeleteBucketPolicyOutput{}
+ req = c.newRequest(op, input, output)
+ req.Handlers.Unmarshal.Swap(restxml.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler)
+ return
+}
+
+// DeleteBucketPolicy API operation for Amazon Simple Storage Service.
+//
+// Deletes the policy from the bucket.
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for Amazon Simple Storage Service's
+// API operation DeleteBucketPolicy for usage and error information.
+// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketPolicy
+func (c *S3) DeleteBucketPolicy(input *DeleteBucketPolicyInput) (*DeleteBucketPolicyOutput, error) {
+ req, out := c.DeleteBucketPolicyRequest(input)
+ return out, req.Send()
+}
+
+// DeleteBucketPolicyWithContext is the same as DeleteBucketPolicy with the addition of
+// the ability to pass a context and additional request options.
+//
+// See DeleteBucketPolicy for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *S3) DeleteBucketPolicyWithContext(ctx aws.Context, input *DeleteBucketPolicyInput, opts ...request.Option) (*DeleteBucketPolicyOutput, error) {
+ req, out := c.DeleteBucketPolicyRequest(input)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return out, req.Send()
+}
+
+const opDeleteBucketReplication = "DeleteBucketReplication"
+
+// DeleteBucketReplicationRequest generates a "aws/request.Request" representing the
+// client's request for the DeleteBucketReplication operation. The "output" return
+// value will be populated with the request's response once the request completes
+// successfully.
+//
+// Use "Send" method on the returned Request to send the API call to the service.
+// the "output" return value is not valid until after Send returns without error.
+//
+// See DeleteBucketReplication for more information on using the DeleteBucketReplication
+// API call, and error handling.
+//
+// This method is useful when you want to inject custom logic or configuration
+// into the SDK's request lifecycle. Such as custom headers, or retry logic.
+//
+//
+// // Example sending a request using the DeleteBucketReplicationRequest method.
+// req, resp := client.DeleteBucketReplicationRequest(params)
+//
+// err := req.Send()
+// if err == nil { // resp is now filled
+// fmt.Println(resp)
+// }
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketReplication
+func (c *S3) DeleteBucketReplicationRequest(input *DeleteBucketReplicationInput) (req *request.Request, output *DeleteBucketReplicationOutput) {
+ op := &request.Operation{
+ Name: opDeleteBucketReplication,
+ HTTPMethod: "DELETE",
+ HTTPPath: "/{Bucket}?replication",
+ }
+
+ if input == nil {
+ input = &DeleteBucketReplicationInput{}
+ }
+
+ output = &DeleteBucketReplicationOutput{}
+ req = c.newRequest(op, input, output)
+ req.Handlers.Unmarshal.Swap(restxml.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler)
+ return
+}
+
+// DeleteBucketReplication API operation for Amazon Simple Storage Service.
+//
+// Deletes the replication configuration from the bucket. For information about
+// replication configuration, see Cross-Region Replication (CRR) (https://docs.aws.amazon.com/AmazonS3/latest/dev/crr.html)
+// in the Amazon S3 Developer Guide.
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for Amazon Simple Storage Service's
+// API operation DeleteBucketReplication for usage and error information.
+// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketReplication
+func (c *S3) DeleteBucketReplication(input *DeleteBucketReplicationInput) (*DeleteBucketReplicationOutput, error) {
+ req, out := c.DeleteBucketReplicationRequest(input)
+ return out, req.Send()
+}
+
+// DeleteBucketReplicationWithContext is the same as DeleteBucketReplication with the addition of
+// the ability to pass a context and additional request options.
+//
+// See DeleteBucketReplication for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *S3) DeleteBucketReplicationWithContext(ctx aws.Context, input *DeleteBucketReplicationInput, opts ...request.Option) (*DeleteBucketReplicationOutput, error) {
+ req, out := c.DeleteBucketReplicationRequest(input)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return out, req.Send()
+}
+
+const opDeleteBucketTagging = "DeleteBucketTagging"
+
+// DeleteBucketTaggingRequest generates a "aws/request.Request" representing the
+// client's request for the DeleteBucketTagging operation. The "output" return
+// value will be populated with the request's response once the request completes
+// successfully.
+//
+// Use "Send" method on the returned Request to send the API call to the service.
+// the "output" return value is not valid until after Send returns without error.
+//
+// See DeleteBucketTagging for more information on using the DeleteBucketTagging
+// API call, and error handling.
+//
+// This method is useful when you want to inject custom logic or configuration
+// into the SDK's request lifecycle. Such as custom headers, or retry logic.
+//
+//
+// // Example sending a request using the DeleteBucketTaggingRequest method.
+// req, resp := client.DeleteBucketTaggingRequest(params)
+//
+// err := req.Send()
+// if err == nil { // resp is now filled
+// fmt.Println(resp)
+// }
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketTagging
+func (c *S3) DeleteBucketTaggingRequest(input *DeleteBucketTaggingInput) (req *request.Request, output *DeleteBucketTaggingOutput) {
+ op := &request.Operation{
+ Name: opDeleteBucketTagging,
+ HTTPMethod: "DELETE",
+ HTTPPath: "/{Bucket}?tagging",
+ }
+
+ if input == nil {
+ input = &DeleteBucketTaggingInput{}
+ }
+
+ output = &DeleteBucketTaggingOutput{}
+ req = c.newRequest(op, input, output)
+ req.Handlers.Unmarshal.Swap(restxml.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler)
+ return
+}
+
+// DeleteBucketTagging API operation for Amazon Simple Storage Service.
+//
+// Deletes the tags from the bucket.
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for Amazon Simple Storage Service's
+// API operation DeleteBucketTagging for usage and error information.
+// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketTagging
+func (c *S3) DeleteBucketTagging(input *DeleteBucketTaggingInput) (*DeleteBucketTaggingOutput, error) {
+ req, out := c.DeleteBucketTaggingRequest(input)
+ return out, req.Send()
+}
+
+// DeleteBucketTaggingWithContext is the same as DeleteBucketTagging with the addition of
+// the ability to pass a context and additional request options.
+//
+// See DeleteBucketTagging for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *S3) DeleteBucketTaggingWithContext(ctx aws.Context, input *DeleteBucketTaggingInput, opts ...request.Option) (*DeleteBucketTaggingOutput, error) {
+ req, out := c.DeleteBucketTaggingRequest(input)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return out, req.Send()
+}
+
+const opDeleteBucketWebsite = "DeleteBucketWebsite"
+
+// DeleteBucketWebsiteRequest generates a "aws/request.Request" representing the
+// client's request for the DeleteBucketWebsite operation. The "output" return
+// value will be populated with the request's response once the request completes
+// successfully.
+//
+// Use "Send" method on the returned Request to send the API call to the service.
+// the "output" return value is not valid until after Send returns without error.
+//
+// See DeleteBucketWebsite for more information on using the DeleteBucketWebsite
+// API call, and error handling.
+//
+// This method is useful when you want to inject custom logic or configuration
+// into the SDK's request lifecycle. Such as custom headers, or retry logic.
+//
+//
+// // Example sending a request using the DeleteBucketWebsiteRequest method.
+// req, resp := client.DeleteBucketWebsiteRequest(params)
+//
+// err := req.Send()
+// if err == nil { // resp is now filled
+// fmt.Println(resp)
+// }
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketWebsite
+func (c *S3) DeleteBucketWebsiteRequest(input *DeleteBucketWebsiteInput) (req *request.Request, output *DeleteBucketWebsiteOutput) {
+ op := &request.Operation{
+ Name: opDeleteBucketWebsite,
+ HTTPMethod: "DELETE",
+ HTTPPath: "/{Bucket}?website",
+ }
+
+ if input == nil {
+ input = &DeleteBucketWebsiteInput{}
+ }
+
+ output = &DeleteBucketWebsiteOutput{}
+ req = c.newRequest(op, input, output)
+ req.Handlers.Unmarshal.Swap(restxml.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler)
+ return
+}
+
+// DeleteBucketWebsite API operation for Amazon Simple Storage Service.
+//
+// This operation removes the website configuration from the bucket.
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for Amazon Simple Storage Service's
+// API operation DeleteBucketWebsite for usage and error information.
+// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketWebsite
+func (c *S3) DeleteBucketWebsite(input *DeleteBucketWebsiteInput) (*DeleteBucketWebsiteOutput, error) {
+ req, out := c.DeleteBucketWebsiteRequest(input)
+ return out, req.Send()
+}
+
+// DeleteBucketWebsiteWithContext is the same as DeleteBucketWebsite with the addition of
+// the ability to pass a context and additional request options.
+//
+// See DeleteBucketWebsite for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *S3) DeleteBucketWebsiteWithContext(ctx aws.Context, input *DeleteBucketWebsiteInput, opts ...request.Option) (*DeleteBucketWebsiteOutput, error) {
+ req, out := c.DeleteBucketWebsiteRequest(input)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return out, req.Send()
+}
+
+const opDeleteObject = "DeleteObject"
+
+// DeleteObjectRequest generates a "aws/request.Request" representing the
+// client's request for the DeleteObject operation. The "output" return
+// value will be populated with the request's response once the request completes
+// successfully.
+//
+// Use "Send" method on the returned Request to send the API call to the service.
+// the "output" return value is not valid until after Send returns without error.
+//
+// See DeleteObject for more information on using the DeleteObject
+// API call, and error handling.
+//
+// This method is useful when you want to inject custom logic or configuration
+// into the SDK's request lifecycle. Such as custom headers, or retry logic.
+//
+//
+// // Example sending a request using the DeleteObjectRequest method.
+// req, resp := client.DeleteObjectRequest(params)
+//
+// err := req.Send()
+// if err == nil { // resp is now filled
+// fmt.Println(resp)
+// }
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteObject
+func (c *S3) DeleteObjectRequest(input *DeleteObjectInput) (req *request.Request, output *DeleteObjectOutput) {
+ op := &request.Operation{
+ Name: opDeleteObject,
+ HTTPMethod: "DELETE",
+ HTTPPath: "/{Bucket}/{Key+}",
+ }
+
+ if input == nil {
+ input = &DeleteObjectInput{}
+ }
+
+ output = &DeleteObjectOutput{}
+ req = c.newRequest(op, input, output)
+ return
+}
+
+// DeleteObject API operation for Amazon Simple Storage Service.
+//
+// Removes the null version (if there is one) of an object and inserts a delete
+// marker, which becomes the latest version of the object. If there isn't a
+// null version, Amazon S3 does not remove any objects.
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for Amazon Simple Storage Service's
+// API operation DeleteObject for usage and error information.
+// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteObject
+func (c *S3) DeleteObject(input *DeleteObjectInput) (*DeleteObjectOutput, error) {
+ req, out := c.DeleteObjectRequest(input)
+ return out, req.Send()
+}
+
+// DeleteObjectWithContext is the same as DeleteObject with the addition of
+// the ability to pass a context and additional request options.
+//
+// See DeleteObject for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *S3) DeleteObjectWithContext(ctx aws.Context, input *DeleteObjectInput, opts ...request.Option) (*DeleteObjectOutput, error) {
+ req, out := c.DeleteObjectRequest(input)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return out, req.Send()
+}
+
+const opDeleteObjectTagging = "DeleteObjectTagging"
+
+// DeleteObjectTaggingRequest generates a "aws/request.Request" representing the
+// client's request for the DeleteObjectTagging operation. The "output" return
+// value will be populated with the request's response once the request completes
+// successfully.
+//
+// Use "Send" method on the returned Request to send the API call to the service.
+// the "output" return value is not valid until after Send returns without error.
+//
+// See DeleteObjectTagging for more information on using the DeleteObjectTagging
+// API call, and error handling.
+//
+// This method is useful when you want to inject custom logic or configuration
+// into the SDK's request lifecycle. Such as custom headers, or retry logic.
+//
+//
+// // Example sending a request using the DeleteObjectTaggingRequest method.
+// req, resp := client.DeleteObjectTaggingRequest(params)
+//
+// err := req.Send()
+// if err == nil { // resp is now filled
+// fmt.Println(resp)
+// }
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteObjectTagging
+func (c *S3) DeleteObjectTaggingRequest(input *DeleteObjectTaggingInput) (req *request.Request, output *DeleteObjectTaggingOutput) {
+ op := &request.Operation{
+ Name: opDeleteObjectTagging,
+ HTTPMethod: "DELETE",
+ HTTPPath: "/{Bucket}/{Key+}?tagging",
+ }
+
+ if input == nil {
+ input = &DeleteObjectTaggingInput{}
+ }
+
+ output = &DeleteObjectTaggingOutput{}
+ req = c.newRequest(op, input, output)
+ return
+}
+
+// DeleteObjectTagging API operation for Amazon Simple Storage Service.
+//
+// Removes the tag-set from an existing object.
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for Amazon Simple Storage Service's
+// API operation DeleteObjectTagging for usage and error information.
+// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteObjectTagging
+func (c *S3) DeleteObjectTagging(input *DeleteObjectTaggingInput) (*DeleteObjectTaggingOutput, error) {
+ req, out := c.DeleteObjectTaggingRequest(input)
+ return out, req.Send()
+}
+
+// DeleteObjectTaggingWithContext is the same as DeleteObjectTagging with the addition of
+// the ability to pass a context and additional request options.
+//
+// See DeleteObjectTagging for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *S3) DeleteObjectTaggingWithContext(ctx aws.Context, input *DeleteObjectTaggingInput, opts ...request.Option) (*DeleteObjectTaggingOutput, error) {
+ req, out := c.DeleteObjectTaggingRequest(input)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return out, req.Send()
+}
+
+const opDeleteObjects = "DeleteObjects"
+
+// DeleteObjectsRequest generates a "aws/request.Request" representing the
+// client's request for the DeleteObjects operation. The "output" return
+// value will be populated with the request's response once the request completes
+// successfully.
+//
+// Use "Send" method on the returned Request to send the API call to the service.
+// the "output" return value is not valid until after Send returns without error.
+//
+// See DeleteObjects for more information on using the DeleteObjects
+// API call, and error handling.
+//
+// This method is useful when you want to inject custom logic or configuration
+// into the SDK's request lifecycle. Such as custom headers, or retry logic.
+//
+//
+// // Example sending a request using the DeleteObjectsRequest method.
+// req, resp := client.DeleteObjectsRequest(params)
+//
+// err := req.Send()
+// if err == nil { // resp is now filled
+// fmt.Println(resp)
+// }
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteObjects
+func (c *S3) DeleteObjectsRequest(input *DeleteObjectsInput) (req *request.Request, output *DeleteObjectsOutput) {
+ op := &request.Operation{
+ Name: opDeleteObjects,
+ HTTPMethod: "POST",
+ HTTPPath: "/{Bucket}?delete",
+ }
+
+ if input == nil {
+ input = &DeleteObjectsInput{}
+ }
+
+ output = &DeleteObjectsOutput{}
+ req = c.newRequest(op, input, output)
+ return
+}
+
+// DeleteObjects API operation for Amazon Simple Storage Service.
+//
+// This operation enables you to delete multiple objects from a bucket using
+// a single HTTP request. You may specify up to 1000 keys.
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for Amazon Simple Storage Service's
+// API operation DeleteObjects for usage and error information.
+// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteObjects
+func (c *S3) DeleteObjects(input *DeleteObjectsInput) (*DeleteObjectsOutput, error) {
+ req, out := c.DeleteObjectsRequest(input)
+ return out, req.Send()
+}
+
+// DeleteObjectsWithContext is the same as DeleteObjects with the addition of
+// the ability to pass a context and additional request options.
+//
+// See DeleteObjects for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *S3) DeleteObjectsWithContext(ctx aws.Context, input *DeleteObjectsInput, opts ...request.Option) (*DeleteObjectsOutput, error) {
+ req, out := c.DeleteObjectsRequest(input)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return out, req.Send()
+}
+
+const opDeletePublicAccessBlock = "DeletePublicAccessBlock"
+
+// DeletePublicAccessBlockRequest generates a "aws/request.Request" representing the
+// client's request for the DeletePublicAccessBlock operation. The "output" return
+// value will be populated with the request's response once the request completes
+// successfully.
+//
+// Use "Send" method on the returned Request to send the API call to the service.
+// the "output" return value is not valid until after Send returns without error.
+//
+// See DeletePublicAccessBlock for more information on using the DeletePublicAccessBlock
+// API call, and error handling.
+//
+// This method is useful when you want to inject custom logic or configuration
+// into the SDK's request lifecycle. Such as custom headers, or retry logic.
+//
+//
+// // Example sending a request using the DeletePublicAccessBlockRequest method.
+// req, resp := client.DeletePublicAccessBlockRequest(params)
+//
+// err := req.Send()
+// if err == nil { // resp is now filled
+// fmt.Println(resp)
+// }
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeletePublicAccessBlock
+func (c *S3) DeletePublicAccessBlockRequest(input *DeletePublicAccessBlockInput) (req *request.Request, output *DeletePublicAccessBlockOutput) {
+ op := &request.Operation{
+ Name: opDeletePublicAccessBlock,
+ HTTPMethod: "DELETE",
+ HTTPPath: "/{Bucket}?publicAccessBlock",
+ }
+
+ if input == nil {
+ input = &DeletePublicAccessBlockInput{}
+ }
+
+ output = &DeletePublicAccessBlockOutput{}
+ req = c.newRequest(op, input, output)
+ req.Handlers.Unmarshal.Swap(restxml.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler)
+ return
+}
+
+// DeletePublicAccessBlock API operation for Amazon Simple Storage Service.
+//
+// Removes the PublicAccessBlock configuration from an Amazon S3 bucket.
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for Amazon Simple Storage Service's
+// API operation DeletePublicAccessBlock for usage and error information.
+// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeletePublicAccessBlock
+func (c *S3) DeletePublicAccessBlock(input *DeletePublicAccessBlockInput) (*DeletePublicAccessBlockOutput, error) {
+ req, out := c.DeletePublicAccessBlockRequest(input)
+ return out, req.Send()
+}
+
+// DeletePublicAccessBlockWithContext is the same as DeletePublicAccessBlock with the addition of
+// the ability to pass a context and additional request options.
+//
+// See DeletePublicAccessBlock for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *S3) DeletePublicAccessBlockWithContext(ctx aws.Context, input *DeletePublicAccessBlockInput, opts ...request.Option) (*DeletePublicAccessBlockOutput, error) {
+ req, out := c.DeletePublicAccessBlockRequest(input)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return out, req.Send()
+}
+
+const opGetBucketAccelerateConfiguration = "GetBucketAccelerateConfiguration"
+
+// GetBucketAccelerateConfigurationRequest generates a "aws/request.Request" representing the
+// client's request for the GetBucketAccelerateConfiguration operation. The "output" return
+// value will be populated with the request's response once the request completes
+// successfully.
+//
+// Use "Send" method on the returned Request to send the API call to the service.
+// the "output" return value is not valid until after Send returns without error.
+//
+// See GetBucketAccelerateConfiguration for more information on using the GetBucketAccelerateConfiguration
+// API call, and error handling.
+//
+// This method is useful when you want to inject custom logic or configuration
+// into the SDK's request lifecycle. Such as custom headers, or retry logic.
+//
+//
+// // Example sending a request using the GetBucketAccelerateConfigurationRequest method.
+// req, resp := client.GetBucketAccelerateConfigurationRequest(params)
+//
+// err := req.Send()
+// if err == nil { // resp is now filled
+// fmt.Println(resp)
+// }
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketAccelerateConfiguration
+func (c *S3) GetBucketAccelerateConfigurationRequest(input *GetBucketAccelerateConfigurationInput) (req *request.Request, output *GetBucketAccelerateConfigurationOutput) {
+ op := &request.Operation{
+ Name: opGetBucketAccelerateConfiguration,
+ HTTPMethod: "GET",
+ HTTPPath: "/{Bucket}?accelerate",
+ }
+
+ if input == nil {
+ input = &GetBucketAccelerateConfigurationInput{}
+ }
+
+ output = &GetBucketAccelerateConfigurationOutput{}
+ req = c.newRequest(op, input, output)
+ return
+}
+
+// GetBucketAccelerateConfiguration API operation for Amazon Simple Storage Service.
+//
+// Returns the accelerate configuration of a bucket.
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for Amazon Simple Storage Service's
+// API operation GetBucketAccelerateConfiguration for usage and error information.
+// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketAccelerateConfiguration
+func (c *S3) GetBucketAccelerateConfiguration(input *GetBucketAccelerateConfigurationInput) (*GetBucketAccelerateConfigurationOutput, error) {
+ req, out := c.GetBucketAccelerateConfigurationRequest(input)
+ return out, req.Send()
+}
+
+// GetBucketAccelerateConfigurationWithContext is the same as GetBucketAccelerateConfiguration with the addition of
+// the ability to pass a context and additional request options.
+//
+// See GetBucketAccelerateConfiguration for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *S3) GetBucketAccelerateConfigurationWithContext(ctx aws.Context, input *GetBucketAccelerateConfigurationInput, opts ...request.Option) (*GetBucketAccelerateConfigurationOutput, error) {
+ req, out := c.GetBucketAccelerateConfigurationRequest(input)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return out, req.Send()
+}
+
+const opGetBucketAcl = "GetBucketAcl"
+
+// GetBucketAclRequest generates a "aws/request.Request" representing the
+// client's request for the GetBucketAcl operation. The "output" return
+// value will be populated with the request's response once the request completes
+// successfully.
+//
+// Use "Send" method on the returned Request to send the API call to the service.
+// the "output" return value is not valid until after Send returns without error.
+//
+// See GetBucketAcl for more information on using the GetBucketAcl
+// API call, and error handling.
+//
+// This method is useful when you want to inject custom logic or configuration
+// into the SDK's request lifecycle. Such as custom headers, or retry logic.
+//
+//
+// // Example sending a request using the GetBucketAclRequest method.
+// req, resp := client.GetBucketAclRequest(params)
+//
+// err := req.Send()
+// if err == nil { // resp is now filled
+// fmt.Println(resp)
+// }
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketAcl
+func (c *S3) GetBucketAclRequest(input *GetBucketAclInput) (req *request.Request, output *GetBucketAclOutput) {
+ op := &request.Operation{
+ Name: opGetBucketAcl,
+ HTTPMethod: "GET",
+ HTTPPath: "/{Bucket}?acl",
+ }
+
+ if input == nil {
+ input = &GetBucketAclInput{}
+ }
+
+ output = &GetBucketAclOutput{}
+ req = c.newRequest(op, input, output)
+ return
+}
+
+// GetBucketAcl API operation for Amazon Simple Storage Service.
+//
+// Gets the access control policy for the bucket.
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for Amazon Simple Storage Service's
+// API operation GetBucketAcl for usage and error information.
+// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketAcl
+func (c *S3) GetBucketAcl(input *GetBucketAclInput) (*GetBucketAclOutput, error) {
+ req, out := c.GetBucketAclRequest(input)
+ return out, req.Send()
+}
+
+// GetBucketAclWithContext is the same as GetBucketAcl with the addition of
+// the ability to pass a context and additional request options.
+//
+// See GetBucketAcl for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *S3) GetBucketAclWithContext(ctx aws.Context, input *GetBucketAclInput, opts ...request.Option) (*GetBucketAclOutput, error) {
+ req, out := c.GetBucketAclRequest(input)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return out, req.Send()
+}
+
+const opGetBucketAnalyticsConfiguration = "GetBucketAnalyticsConfiguration"
+
+// GetBucketAnalyticsConfigurationRequest generates a "aws/request.Request" representing the
+// client's request for the GetBucketAnalyticsConfiguration operation. The "output" return
+// value will be populated with the request's response once the request completes
+// successfully.
+//
+// Use "Send" method on the returned Request to send the API call to the service.
+// the "output" return value is not valid until after Send returns without error.
+//
+// See GetBucketAnalyticsConfiguration for more information on using the GetBucketAnalyticsConfiguration
+// API call, and error handling.
+//
+// This method is useful when you want to inject custom logic or configuration
+// into the SDK's request lifecycle. Such as custom headers, or retry logic.
+//
+//
+// // Example sending a request using the GetBucketAnalyticsConfigurationRequest method.
+// req, resp := client.GetBucketAnalyticsConfigurationRequest(params)
+//
+// err := req.Send()
+// if err == nil { // resp is now filled
+// fmt.Println(resp)
+// }
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketAnalyticsConfiguration
+func (c *S3) GetBucketAnalyticsConfigurationRequest(input *GetBucketAnalyticsConfigurationInput) (req *request.Request, output *GetBucketAnalyticsConfigurationOutput) {
+ op := &request.Operation{
+ Name: opGetBucketAnalyticsConfiguration,
+ HTTPMethod: "GET",
+ HTTPPath: "/{Bucket}?analytics",
+ }
+
+ if input == nil {
+ input = &GetBucketAnalyticsConfigurationInput{}
+ }
+
+ output = &GetBucketAnalyticsConfigurationOutput{}
+ req = c.newRequest(op, input, output)
+ return
+}
+
+// GetBucketAnalyticsConfiguration API operation for Amazon Simple Storage Service.
+//
+// Gets an analytics configuration for the bucket (specified by the analytics
+// configuration ID).
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for Amazon Simple Storage Service's
+// API operation GetBucketAnalyticsConfiguration for usage and error information.
+// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketAnalyticsConfiguration
+func (c *S3) GetBucketAnalyticsConfiguration(input *GetBucketAnalyticsConfigurationInput) (*GetBucketAnalyticsConfigurationOutput, error) {
+ req, out := c.GetBucketAnalyticsConfigurationRequest(input)
+ return out, req.Send()
+}
+
+// GetBucketAnalyticsConfigurationWithContext is the same as GetBucketAnalyticsConfiguration with the addition of
+// the ability to pass a context and additional request options.
+//
+// See GetBucketAnalyticsConfiguration for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *S3) GetBucketAnalyticsConfigurationWithContext(ctx aws.Context, input *GetBucketAnalyticsConfigurationInput, opts ...request.Option) (*GetBucketAnalyticsConfigurationOutput, error) {
+ req, out := c.GetBucketAnalyticsConfigurationRequest(input)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return out, req.Send()
+}
+
+const opGetBucketCors = "GetBucketCors"
+
+// GetBucketCorsRequest generates a "aws/request.Request" representing the
+// client's request for the GetBucketCors operation. The "output" return
+// value will be populated with the request's response once the request completes
+// successfully.
+//
+// Use "Send" method on the returned Request to send the API call to the service.
+// the "output" return value is not valid until after Send returns without error.
+//
+// See GetBucketCors for more information on using the GetBucketCors
+// API call, and error handling.
+//
+// This method is useful when you want to inject custom logic or configuration
+// into the SDK's request lifecycle. Such as custom headers, or retry logic.
+//
+//
+// // Example sending a request using the GetBucketCorsRequest method.
+// req, resp := client.GetBucketCorsRequest(params)
+//
+// err := req.Send()
+// if err == nil { // resp is now filled
+// fmt.Println(resp)
+// }
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketCors
+func (c *S3) GetBucketCorsRequest(input *GetBucketCorsInput) (req *request.Request, output *GetBucketCorsOutput) {
+ op := &request.Operation{
+ Name: opGetBucketCors,
+ HTTPMethod: "GET",
+ HTTPPath: "/{Bucket}?cors",
+ }
+
+ if input == nil {
+ input = &GetBucketCorsInput{}
+ }
+
+ output = &GetBucketCorsOutput{}
+ req = c.newRequest(op, input, output)
+ return
+}
+
+// GetBucketCors API operation for Amazon Simple Storage Service.
+//
+// Returns the CORS configuration for the bucket.
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for Amazon Simple Storage Service's
+// API operation GetBucketCors for usage and error information.
+// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketCors
+func (c *S3) GetBucketCors(input *GetBucketCorsInput) (*GetBucketCorsOutput, error) {
+ req, out := c.GetBucketCorsRequest(input)
+ return out, req.Send()
+}
+
+// GetBucketCorsWithContext is the same as GetBucketCors with the addition of
+// the ability to pass a context and additional request options.
+//
+// See GetBucketCors for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *S3) GetBucketCorsWithContext(ctx aws.Context, input *GetBucketCorsInput, opts ...request.Option) (*GetBucketCorsOutput, error) {
+ req, out := c.GetBucketCorsRequest(input)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return out, req.Send()
+}
+
+const opGetBucketEncryption = "GetBucketEncryption"
+
+// GetBucketEncryptionRequest generates a "aws/request.Request" representing the
+// client's request for the GetBucketEncryption operation. The "output" return
+// value will be populated with the request's response once the request completes
+// successfully.
+//
+// Use "Send" method on the returned Request to send the API call to the service.
+// the "output" return value is not valid until after Send returns without error.
+//
+// See GetBucketEncryption for more information on using the GetBucketEncryption
+// API call, and error handling.
+//
+// This method is useful when you want to inject custom logic or configuration
+// into the SDK's request lifecycle. Such as custom headers, or retry logic.
+//
+//
+// // Example sending a request using the GetBucketEncryptionRequest method.
+// req, resp := client.GetBucketEncryptionRequest(params)
+//
+// err := req.Send()
+// if err == nil { // resp is now filled
+// fmt.Println(resp)
+// }
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketEncryption
+func (c *S3) GetBucketEncryptionRequest(input *GetBucketEncryptionInput) (req *request.Request, output *GetBucketEncryptionOutput) {
+ op := &request.Operation{
+ Name: opGetBucketEncryption,
+ HTTPMethod: "GET",
+ HTTPPath: "/{Bucket}?encryption",
+ }
+
+ if input == nil {
+ input = &GetBucketEncryptionInput{}
+ }
+
+ output = &GetBucketEncryptionOutput{}
+ req = c.newRequest(op, input, output)
+ return
+}
+
+// GetBucketEncryption API operation for Amazon Simple Storage Service.
+//
+// Returns the server-side encryption configuration of a bucket.
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for Amazon Simple Storage Service's
+// API operation GetBucketEncryption for usage and error information.
+// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketEncryption
+func (c *S3) GetBucketEncryption(input *GetBucketEncryptionInput) (*GetBucketEncryptionOutput, error) {
+ req, out := c.GetBucketEncryptionRequest(input)
+ return out, req.Send()
+}
+
+// GetBucketEncryptionWithContext is the same as GetBucketEncryption with the addition of
+// the ability to pass a context and additional request options.
+//
+// See GetBucketEncryption for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *S3) GetBucketEncryptionWithContext(ctx aws.Context, input *GetBucketEncryptionInput, opts ...request.Option) (*GetBucketEncryptionOutput, error) {
+ req, out := c.GetBucketEncryptionRequest(input)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return out, req.Send()
+}
+
+const opGetBucketInventoryConfiguration = "GetBucketInventoryConfiguration"
+
+// GetBucketInventoryConfigurationRequest generates a "aws/request.Request" representing the
+// client's request for the GetBucketInventoryConfiguration operation. The "output" return
+// value will be populated with the request's response once the request completes
+// successfully.
+//
+// Use "Send" method on the returned Request to send the API call to the service.
+// the "output" return value is not valid until after Send returns without error.
+//
+// See GetBucketInventoryConfiguration for more information on using the GetBucketInventoryConfiguration
+// API call, and error handling.
+//
+// This method is useful when you want to inject custom logic or configuration
+// into the SDK's request lifecycle. Such as custom headers, or retry logic.
+//
+//
+// // Example sending a request using the GetBucketInventoryConfigurationRequest method.
+// req, resp := client.GetBucketInventoryConfigurationRequest(params)
+//
+// err := req.Send()
+// if err == nil { // resp is now filled
+// fmt.Println(resp)
+// }
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketInventoryConfiguration
+func (c *S3) GetBucketInventoryConfigurationRequest(input *GetBucketInventoryConfigurationInput) (req *request.Request, output *GetBucketInventoryConfigurationOutput) {
+ op := &request.Operation{
+ Name: opGetBucketInventoryConfiguration,
+ HTTPMethod: "GET",
+ HTTPPath: "/{Bucket}?inventory",
+ }
+
+ if input == nil {
+ input = &GetBucketInventoryConfigurationInput{}
+ }
+
+ output = &GetBucketInventoryConfigurationOutput{}
+ req = c.newRequest(op, input, output)
+ return
+}
+
+// GetBucketInventoryConfiguration API operation for Amazon Simple Storage Service.
+//
+// Returns an inventory configuration (identified by the inventory ID) from
+// the bucket.
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for Amazon Simple Storage Service's
+// API operation GetBucketInventoryConfiguration for usage and error information.
+// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketInventoryConfiguration
+func (c *S3) GetBucketInventoryConfiguration(input *GetBucketInventoryConfigurationInput) (*GetBucketInventoryConfigurationOutput, error) {
+ req, out := c.GetBucketInventoryConfigurationRequest(input)
+ return out, req.Send()
+}
+
+// GetBucketInventoryConfigurationWithContext is the same as GetBucketInventoryConfiguration with the addition of
+// the ability to pass a context and additional request options.
+//
+// See GetBucketInventoryConfiguration for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *S3) GetBucketInventoryConfigurationWithContext(ctx aws.Context, input *GetBucketInventoryConfigurationInput, opts ...request.Option) (*GetBucketInventoryConfigurationOutput, error) {
+ req, out := c.GetBucketInventoryConfigurationRequest(input)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return out, req.Send()
+}
+
+const opGetBucketLifecycle = "GetBucketLifecycle"
+
+// GetBucketLifecycleRequest generates a "aws/request.Request" representing the
+// client's request for the GetBucketLifecycle operation. The "output" return
+// value will be populated with the request's response once the request completes
+// successfully.
+//
+// Use "Send" method on the returned Request to send the API call to the service.
+// the "output" return value is not valid until after Send returns without error.
+//
+// See GetBucketLifecycle for more information on using the GetBucketLifecycle
+// API call, and error handling.
+//
+// This method is useful when you want to inject custom logic or configuration
+// into the SDK's request lifecycle. Such as custom headers, or retry logic.
+//
+//
+// // Example sending a request using the GetBucketLifecycleRequest method.
+// req, resp := client.GetBucketLifecycleRequest(params)
+//
+// err := req.Send()
+// if err == nil { // resp is now filled
+// fmt.Println(resp)
+// }
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketLifecycle
+//
+// Deprecated: GetBucketLifecycle has been deprecated
+func (c *S3) GetBucketLifecycleRequest(input *GetBucketLifecycleInput) (req *request.Request, output *GetBucketLifecycleOutput) {
+ if c.Client.Config.Logger != nil {
+ c.Client.Config.Logger.Log("This operation, GetBucketLifecycle, has been deprecated")
+ }
+ op := &request.Operation{
+ Name: opGetBucketLifecycle,
+ HTTPMethod: "GET",
+ HTTPPath: "/{Bucket}?lifecycle",
+ }
+
+ if input == nil {
+ input = &GetBucketLifecycleInput{}
+ }
+
+ output = &GetBucketLifecycleOutput{}
+ req = c.newRequest(op, input, output)
+ return
+}
+
+// GetBucketLifecycle API operation for Amazon Simple Storage Service.
+//
+// No longer used, see the GetBucketLifecycleConfiguration operation.
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for Amazon Simple Storage Service's
+// API operation GetBucketLifecycle for usage and error information.
+// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketLifecycle
+//
+// Deprecated: GetBucketLifecycle has been deprecated
+func (c *S3) GetBucketLifecycle(input *GetBucketLifecycleInput) (*GetBucketLifecycleOutput, error) {
+ req, out := c.GetBucketLifecycleRequest(input)
+ return out, req.Send()
+}
+
+// GetBucketLifecycleWithContext is the same as GetBucketLifecycle with the addition of
+// the ability to pass a context and additional request options.
+//
+// See GetBucketLifecycle for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+//
+// Deprecated: GetBucketLifecycleWithContext has been deprecated
+func (c *S3) GetBucketLifecycleWithContext(ctx aws.Context, input *GetBucketLifecycleInput, opts ...request.Option) (*GetBucketLifecycleOutput, error) {
+ req, out := c.GetBucketLifecycleRequest(input)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return out, req.Send()
+}
+
+const opGetBucketLifecycleConfiguration = "GetBucketLifecycleConfiguration"
+
+// GetBucketLifecycleConfigurationRequest generates a "aws/request.Request" representing the
+// client's request for the GetBucketLifecycleConfiguration operation. The "output" return
+// value will be populated with the request's response once the request completes
+// successfully.
+//
+// Use "Send" method on the returned Request to send the API call to the service.
+// the "output" return value is not valid until after Send returns without error.
+//
+// See GetBucketLifecycleConfiguration for more information on using the GetBucketLifecycleConfiguration
+// API call, and error handling.
+//
+// This method is useful when you want to inject custom logic or configuration
+// into the SDK's request lifecycle. Such as custom headers, or retry logic.
+//
+//
+// // Example sending a request using the GetBucketLifecycleConfigurationRequest method.
+// req, resp := client.GetBucketLifecycleConfigurationRequest(params)
+//
+// err := req.Send()
+// if err == nil { // resp is now filled
+// fmt.Println(resp)
+// }
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketLifecycleConfiguration
+func (c *S3) GetBucketLifecycleConfigurationRequest(input *GetBucketLifecycleConfigurationInput) (req *request.Request, output *GetBucketLifecycleConfigurationOutput) {
+ op := &request.Operation{
+ Name: opGetBucketLifecycleConfiguration,
+ HTTPMethod: "GET",
+ HTTPPath: "/{Bucket}?lifecycle",
+ }
+
+ if input == nil {
+ input = &GetBucketLifecycleConfigurationInput{}
+ }
+
+ output = &GetBucketLifecycleConfigurationOutput{}
+ req = c.newRequest(op, input, output)
+ return
+}
+
+// GetBucketLifecycleConfiguration API operation for Amazon Simple Storage Service.
+//
+// Returns the lifecycle configuration information set on the bucket.
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for Amazon Simple Storage Service's
+// API operation GetBucketLifecycleConfiguration for usage and error information.
+// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketLifecycleConfiguration
+func (c *S3) GetBucketLifecycleConfiguration(input *GetBucketLifecycleConfigurationInput) (*GetBucketLifecycleConfigurationOutput, error) {
+ req, out := c.GetBucketLifecycleConfigurationRequest(input)
+ return out, req.Send()
+}
+
+// GetBucketLifecycleConfigurationWithContext is the same as GetBucketLifecycleConfiguration with the addition of
+// the ability to pass a context and additional request options.
+//
+// See GetBucketLifecycleConfiguration for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *S3) GetBucketLifecycleConfigurationWithContext(ctx aws.Context, input *GetBucketLifecycleConfigurationInput, opts ...request.Option) (*GetBucketLifecycleConfigurationOutput, error) {
+ req, out := c.GetBucketLifecycleConfigurationRequest(input)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return out, req.Send()
+}
+
+const opGetBucketLocation = "GetBucketLocation"
+
+// GetBucketLocationRequest generates a "aws/request.Request" representing the
+// client's request for the GetBucketLocation operation. The "output" return
+// value will be populated with the request's response once the request completes
+// successfully.
+//
+// Use "Send" method on the returned Request to send the API call to the service.
+// the "output" return value is not valid until after Send returns without error.
+//
+// See GetBucketLocation for more information on using the GetBucketLocation
+// API call, and error handling.
+//
+// This method is useful when you want to inject custom logic or configuration
+// into the SDK's request lifecycle. Such as custom headers, or retry logic.
+//
+//
+// // Example sending a request using the GetBucketLocationRequest method.
+// req, resp := client.GetBucketLocationRequest(params)
+//
+// err := req.Send()
+// if err == nil { // resp is now filled
+// fmt.Println(resp)
+// }
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketLocation
+func (c *S3) GetBucketLocationRequest(input *GetBucketLocationInput) (req *request.Request, output *GetBucketLocationOutput) {
+ op := &request.Operation{
+ Name: opGetBucketLocation,
+ HTTPMethod: "GET",
+ HTTPPath: "/{Bucket}?location",
+ }
+
+ if input == nil {
+ input = &GetBucketLocationInput{}
+ }
+
+ output = &GetBucketLocationOutput{}
+ req = c.newRequest(op, input, output)
+ return
+}
+
+// GetBucketLocation API operation for Amazon Simple Storage Service.
+//
+// Returns the region the bucket resides in.
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for Amazon Simple Storage Service's
+// API operation GetBucketLocation for usage and error information.
+// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketLocation
+func (c *S3) GetBucketLocation(input *GetBucketLocationInput) (*GetBucketLocationOutput, error) {
+ req, out := c.GetBucketLocationRequest(input)
+ return out, req.Send()
+}
+
+// GetBucketLocationWithContext is the same as GetBucketLocation with the addition of
+// the ability to pass a context and additional request options.
+//
+// See GetBucketLocation for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *S3) GetBucketLocationWithContext(ctx aws.Context, input *GetBucketLocationInput, opts ...request.Option) (*GetBucketLocationOutput, error) {
+ req, out := c.GetBucketLocationRequest(input)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return out, req.Send()
+}
+
+const opGetBucketLogging = "GetBucketLogging"
+
+// GetBucketLoggingRequest generates a "aws/request.Request" representing the
+// client's request for the GetBucketLogging operation. The "output" return
+// value will be populated with the request's response once the request completes
+// successfully.
+//
+// Use "Send" method on the returned Request to send the API call to the service.
+// the "output" return value is not valid until after Send returns without error.
+//
+// See GetBucketLogging for more information on using the GetBucketLogging
+// API call, and error handling.
+//
+// This method is useful when you want to inject custom logic or configuration
+// into the SDK's request lifecycle. Such as custom headers, or retry logic.
+//
+//
+// // Example sending a request using the GetBucketLoggingRequest method.
+// req, resp := client.GetBucketLoggingRequest(params)
+//
+// err := req.Send()
+// if err == nil { // resp is now filled
+// fmt.Println(resp)
+// }
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketLogging
+func (c *S3) GetBucketLoggingRequest(input *GetBucketLoggingInput) (req *request.Request, output *GetBucketLoggingOutput) {
+ op := &request.Operation{
+ Name: opGetBucketLogging,
+ HTTPMethod: "GET",
+ HTTPPath: "/{Bucket}?logging",
+ }
+
+ if input == nil {
+ input = &GetBucketLoggingInput{}
+ }
+
+ output = &GetBucketLoggingOutput{}
+ req = c.newRequest(op, input, output)
+ return
+}
+
+// GetBucketLogging API operation for Amazon Simple Storage Service.
+//
+// Returns the logging status of a bucket and the permissions users have to
+// view and modify that status. To use GET, you must be the bucket owner.
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for Amazon Simple Storage Service's
+// API operation GetBucketLogging for usage and error information.
+// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketLogging
+func (c *S3) GetBucketLogging(input *GetBucketLoggingInput) (*GetBucketLoggingOutput, error) {
+ req, out := c.GetBucketLoggingRequest(input)
+ return out, req.Send()
+}
+
+// GetBucketLoggingWithContext is the same as GetBucketLogging with the addition of
+// the ability to pass a context and additional request options.
+//
+// See GetBucketLogging for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *S3) GetBucketLoggingWithContext(ctx aws.Context, input *GetBucketLoggingInput, opts ...request.Option) (*GetBucketLoggingOutput, error) {
+ req, out := c.GetBucketLoggingRequest(input)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return out, req.Send()
+}
+
+const opGetBucketMetricsConfiguration = "GetBucketMetricsConfiguration"
+
+// GetBucketMetricsConfigurationRequest generates a "aws/request.Request" representing the
+// client's request for the GetBucketMetricsConfiguration operation. The "output" return
+// value will be populated with the request's response once the request completes
+// successfully.
+//
+// Use "Send" method on the returned Request to send the API call to the service.
+// the "output" return value is not valid until after Send returns without error.
+//
+// See GetBucketMetricsConfiguration for more information on using the GetBucketMetricsConfiguration
+// API call, and error handling.
+//
+// This method is useful when you want to inject custom logic or configuration
+// into the SDK's request lifecycle. Such as custom headers, or retry logic.
+//
+//
+// // Example sending a request using the GetBucketMetricsConfigurationRequest method.
+// req, resp := client.GetBucketMetricsConfigurationRequest(params)
+//
+// err := req.Send()
+// if err == nil { // resp is now filled
+// fmt.Println(resp)
+// }
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketMetricsConfiguration
+func (c *S3) GetBucketMetricsConfigurationRequest(input *GetBucketMetricsConfigurationInput) (req *request.Request, output *GetBucketMetricsConfigurationOutput) {
+ op := &request.Operation{
+ Name: opGetBucketMetricsConfiguration,
+ HTTPMethod: "GET",
+ HTTPPath: "/{Bucket}?metrics",
+ }
+
+ if input == nil {
+ input = &GetBucketMetricsConfigurationInput{}
+ }
+
+ output = &GetBucketMetricsConfigurationOutput{}
+ req = c.newRequest(op, input, output)
+ return
+}
+
+// GetBucketMetricsConfiguration API operation for Amazon Simple Storage Service.
+//
+// Gets a metrics configuration (specified by the metrics configuration ID)
+// from the bucket.
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for Amazon Simple Storage Service's
+// API operation GetBucketMetricsConfiguration for usage and error information.
+// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketMetricsConfiguration
+func (c *S3) GetBucketMetricsConfiguration(input *GetBucketMetricsConfigurationInput) (*GetBucketMetricsConfigurationOutput, error) {
+ req, out := c.GetBucketMetricsConfigurationRequest(input)
+ return out, req.Send()
+}
+
+// GetBucketMetricsConfigurationWithContext is the same as GetBucketMetricsConfiguration with the addition of
+// the ability to pass a context and additional request options.
+//
+// See GetBucketMetricsConfiguration for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *S3) GetBucketMetricsConfigurationWithContext(ctx aws.Context, input *GetBucketMetricsConfigurationInput, opts ...request.Option) (*GetBucketMetricsConfigurationOutput, error) {
+ req, out := c.GetBucketMetricsConfigurationRequest(input)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return out, req.Send()
+}
+
+const opGetBucketNotification = "GetBucketNotification"
+
+// GetBucketNotificationRequest generates a "aws/request.Request" representing the
+// client's request for the GetBucketNotification operation. The "output" return
+// value will be populated with the request's response once the request completes
+// successfully.
+//
+// Use "Send" method on the returned Request to send the API call to the service.
+// the "output" return value is not valid until after Send returns without error.
+//
+// See GetBucketNotification for more information on using the GetBucketNotification
+// API call, and error handling.
+//
+// This method is useful when you want to inject custom logic or configuration
+// into the SDK's request lifecycle. Such as custom headers, or retry logic.
+//
+//
+// // Example sending a request using the GetBucketNotificationRequest method.
+// req, resp := client.GetBucketNotificationRequest(params)
+//
+// err := req.Send()
+// if err == nil { // resp is now filled
+// fmt.Println(resp)
+// }
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketNotification
+//
+// Deprecated: GetBucketNotification has been deprecated
+func (c *S3) GetBucketNotificationRequest(input *GetBucketNotificationConfigurationRequest) (req *request.Request, output *NotificationConfigurationDeprecated) {
+ if c.Client.Config.Logger != nil {
+ c.Client.Config.Logger.Log("This operation, GetBucketNotification, has been deprecated")
+ }
+ op := &request.Operation{
+ Name: opGetBucketNotification,
+ HTTPMethod: "GET",
+ HTTPPath: "/{Bucket}?notification",
+ }
+
+ if input == nil {
+ input = &GetBucketNotificationConfigurationRequest{}
+ }
+
+ output = &NotificationConfigurationDeprecated{}
+ req = c.newRequest(op, input, output)
+ return
+}
+
+// GetBucketNotification API operation for Amazon Simple Storage Service.
+//
+// No longer used, see the GetBucketNotificationConfiguration operation.
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for Amazon Simple Storage Service's
+// API operation GetBucketNotification for usage and error information.
+// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketNotification
+//
+// Deprecated: GetBucketNotification has been deprecated
+func (c *S3) GetBucketNotification(input *GetBucketNotificationConfigurationRequest) (*NotificationConfigurationDeprecated, error) {
+ req, out := c.GetBucketNotificationRequest(input)
+ return out, req.Send()
+}
+
+// GetBucketNotificationWithContext is the same as GetBucketNotification with the addition of
+// the ability to pass a context and additional request options.
+//
+// See GetBucketNotification for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+//
+// Deprecated: GetBucketNotificationWithContext has been deprecated
+func (c *S3) GetBucketNotificationWithContext(ctx aws.Context, input *GetBucketNotificationConfigurationRequest, opts ...request.Option) (*NotificationConfigurationDeprecated, error) {
+ req, out := c.GetBucketNotificationRequest(input)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return out, req.Send()
+}
+
+const opGetBucketNotificationConfiguration = "GetBucketNotificationConfiguration"
+
+// GetBucketNotificationConfigurationRequest generates a "aws/request.Request" representing the
+// client's request for the GetBucketNotificationConfiguration operation. The "output" return
+// value will be populated with the request's response once the request completes
+// successfully.
+//
+// Use "Send" method on the returned Request to send the API call to the service.
+// the "output" return value is not valid until after Send returns without error.
+//
+// See GetBucketNotificationConfiguration for more information on using the GetBucketNotificationConfiguration
+// API call, and error handling.
+//
+// This method is useful when you want to inject custom logic or configuration
+// into the SDK's request lifecycle. Such as custom headers, or retry logic.
+//
+//
+// // Example sending a request using the GetBucketNotificationConfigurationRequest method.
+// req, resp := client.GetBucketNotificationConfigurationRequest(params)
+//
+// err := req.Send()
+// if err == nil { // resp is now filled
+// fmt.Println(resp)
+// }
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketNotificationConfiguration
+func (c *S3) GetBucketNotificationConfigurationRequest(input *GetBucketNotificationConfigurationRequest) (req *request.Request, output *NotificationConfiguration) {
+ op := &request.Operation{
+ Name: opGetBucketNotificationConfiguration,
+ HTTPMethod: "GET",
+ HTTPPath: "/{Bucket}?notification",
+ }
+
+ if input == nil {
+ input = &GetBucketNotificationConfigurationRequest{}
+ }
+
+ output = &NotificationConfiguration{}
+ req = c.newRequest(op, input, output)
+ return
+}
+
+// GetBucketNotificationConfiguration API operation for Amazon Simple Storage Service.
+//
+// Returns the notification configuration of a bucket.
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for Amazon Simple Storage Service's
+// API operation GetBucketNotificationConfiguration for usage and error information.
+// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketNotificationConfiguration
+func (c *S3) GetBucketNotificationConfiguration(input *GetBucketNotificationConfigurationRequest) (*NotificationConfiguration, error) {
+ req, out := c.GetBucketNotificationConfigurationRequest(input)
+ return out, req.Send()
+}
+
+// GetBucketNotificationConfigurationWithContext is the same as GetBucketNotificationConfiguration with the addition of
+// the ability to pass a context and additional request options.
+//
+// See GetBucketNotificationConfiguration for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *S3) GetBucketNotificationConfigurationWithContext(ctx aws.Context, input *GetBucketNotificationConfigurationRequest, opts ...request.Option) (*NotificationConfiguration, error) {
+ req, out := c.GetBucketNotificationConfigurationRequest(input)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return out, req.Send()
+}
+
+const opGetBucketPolicy = "GetBucketPolicy"
+
+// GetBucketPolicyRequest generates a "aws/request.Request" representing the
+// client's request for the GetBucketPolicy operation. The "output" return
+// value will be populated with the request's response once the request completes
+// successfully.
+//
+// Use "Send" method on the returned Request to send the API call to the service.
+// the "output" return value is not valid until after Send returns without error.
+//
+// See GetBucketPolicy for more information on using the GetBucketPolicy
+// API call, and error handling.
+//
+// This method is useful when you want to inject custom logic or configuration
+// into the SDK's request lifecycle. Such as custom headers, or retry logic.
+//
+//
+// // Example sending a request using the GetBucketPolicyRequest method.
+// req, resp := client.GetBucketPolicyRequest(params)
+//
+// err := req.Send()
+// if err == nil { // resp is now filled
+// fmt.Println(resp)
+// }
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketPolicy
+func (c *S3) GetBucketPolicyRequest(input *GetBucketPolicyInput) (req *request.Request, output *GetBucketPolicyOutput) {
+ op := &request.Operation{
+ Name: opGetBucketPolicy,
+ HTTPMethod: "GET",
+ HTTPPath: "/{Bucket}?policy",
+ }
+
+ if input == nil {
+ input = &GetBucketPolicyInput{}
+ }
+
+ output = &GetBucketPolicyOutput{}
+ req = c.newRequest(op, input, output)
+ return
+}
+
+// GetBucketPolicy API operation for Amazon Simple Storage Service.
+//
+// Returns the policy of a specified bucket.
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for Amazon Simple Storage Service's
+// API operation GetBucketPolicy for usage and error information.
+// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketPolicy
+func (c *S3) GetBucketPolicy(input *GetBucketPolicyInput) (*GetBucketPolicyOutput, error) {
+ req, out := c.GetBucketPolicyRequest(input)
+ return out, req.Send()
+}
+
+// GetBucketPolicyWithContext is the same as GetBucketPolicy with the addition of
+// the ability to pass a context and additional request options.
+//
+// See GetBucketPolicy for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *S3) GetBucketPolicyWithContext(ctx aws.Context, input *GetBucketPolicyInput, opts ...request.Option) (*GetBucketPolicyOutput, error) {
+ req, out := c.GetBucketPolicyRequest(input)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return out, req.Send()
+}
+
+const opGetBucketPolicyStatus = "GetBucketPolicyStatus"
+
+// GetBucketPolicyStatusRequest generates a "aws/request.Request" representing the
+// client's request for the GetBucketPolicyStatus operation. The "output" return
+// value will be populated with the request's response once the request completes
+// successfully.
+//
+// Use "Send" method on the returned Request to send the API call to the service.
+// the "output" return value is not valid until after Send returns without error.
+//
+// See GetBucketPolicyStatus for more information on using the GetBucketPolicyStatus
+// API call, and error handling.
+//
+// This method is useful when you want to inject custom logic or configuration
+// into the SDK's request lifecycle. Such as custom headers, or retry logic.
+//
+//
+// // Example sending a request using the GetBucketPolicyStatusRequest method.
+// req, resp := client.GetBucketPolicyStatusRequest(params)
+//
+// err := req.Send()
+// if err == nil { // resp is now filled
+// fmt.Println(resp)
+// }
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketPolicyStatus
+func (c *S3) GetBucketPolicyStatusRequest(input *GetBucketPolicyStatusInput) (req *request.Request, output *GetBucketPolicyStatusOutput) {
+ op := &request.Operation{
+ Name: opGetBucketPolicyStatus,
+ HTTPMethod: "GET",
+ HTTPPath: "/{Bucket}?policyStatus",
+ }
+
+ if input == nil {
+ input = &GetBucketPolicyStatusInput{}
+ }
+
+ output = &GetBucketPolicyStatusOutput{}
+ req = c.newRequest(op, input, output)
+ return
+}
+
+// GetBucketPolicyStatus API operation for Amazon Simple Storage Service.
+//
+// Retrieves the policy status for an Amazon S3 bucket, indicating whether the
+// bucket is public.
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for Amazon Simple Storage Service's
+// API operation GetBucketPolicyStatus for usage and error information.
+// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketPolicyStatus
+func (c *S3) GetBucketPolicyStatus(input *GetBucketPolicyStatusInput) (*GetBucketPolicyStatusOutput, error) {
+ req, out := c.GetBucketPolicyStatusRequest(input)
+ return out, req.Send()
+}
+
+// GetBucketPolicyStatusWithContext is the same as GetBucketPolicyStatus with the addition of
+// the ability to pass a context and additional request options.
+//
+// See GetBucketPolicyStatus for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *S3) GetBucketPolicyStatusWithContext(ctx aws.Context, input *GetBucketPolicyStatusInput, opts ...request.Option) (*GetBucketPolicyStatusOutput, error) {
+ req, out := c.GetBucketPolicyStatusRequest(input)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return out, req.Send()
+}
+
+const opGetBucketReplication = "GetBucketReplication"
+
+// GetBucketReplicationRequest generates a "aws/request.Request" representing the
+// client's request for the GetBucketReplication operation. The "output" return
+// value will be populated with the request's response once the request completes
+// successfully.
+//
+// Use "Send" method on the returned Request to send the API call to the service.
+// the "output" return value is not valid until after Send returns without error.
+//
+// See GetBucketReplication for more information on using the GetBucketReplication
+// API call, and error handling.
+//
+// This method is useful when you want to inject custom logic or configuration
+// into the SDK's request lifecycle. Such as custom headers, or retry logic.
+//
+//
+// // Example sending a request using the GetBucketReplicationRequest method.
+// req, resp := client.GetBucketReplicationRequest(params)
+//
+// err := req.Send()
+// if err == nil { // resp is now filled
+// fmt.Println(resp)
+// }
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketReplication
+func (c *S3) GetBucketReplicationRequest(input *GetBucketReplicationInput) (req *request.Request, output *GetBucketReplicationOutput) {
+ op := &request.Operation{
+ Name: opGetBucketReplication,
+ HTTPMethod: "GET",
+ HTTPPath: "/{Bucket}?replication",
+ }
+
+ if input == nil {
+ input = &GetBucketReplicationInput{}
+ }
+
+ output = &GetBucketReplicationOutput{}
+ req = c.newRequest(op, input, output)
+ return
+}
+
+// GetBucketReplication API operation for Amazon Simple Storage Service.
+//
+// Returns the replication configuration of a bucket.
+//
+// It can take a while to propagate the put or delete a replication configuration
+// to all Amazon S3 systems. Therefore, a get request soon after put or delete
+// can return a wrong result.
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for Amazon Simple Storage Service's
+// API operation GetBucketReplication for usage and error information.
+// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketReplication
+func (c *S3) GetBucketReplication(input *GetBucketReplicationInput) (*GetBucketReplicationOutput, error) {
+ req, out := c.GetBucketReplicationRequest(input)
+ return out, req.Send()
+}
+
+// GetBucketReplicationWithContext is the same as GetBucketReplication with the addition of
+// the ability to pass a context and additional request options.
+//
+// See GetBucketReplication for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *S3) GetBucketReplicationWithContext(ctx aws.Context, input *GetBucketReplicationInput, opts ...request.Option) (*GetBucketReplicationOutput, error) {
+ req, out := c.GetBucketReplicationRequest(input)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return out, req.Send()
+}
+
+const opGetBucketRequestPayment = "GetBucketRequestPayment"
+
+// GetBucketRequestPaymentRequest generates a "aws/request.Request" representing the
+// client's request for the GetBucketRequestPayment operation. The "output" return
+// value will be populated with the request's response once the request completes
+// successfully.
+//
+// Use "Send" method on the returned Request to send the API call to the service.
+// the "output" return value is not valid until after Send returns without error.
+//
+// See GetBucketRequestPayment for more information on using the GetBucketRequestPayment
+// API call, and error handling.
+//
+// This method is useful when you want to inject custom logic or configuration
+// into the SDK's request lifecycle. Such as custom headers, or retry logic.
+//
+//
+// // Example sending a request using the GetBucketRequestPaymentRequest method.
+// req, resp := client.GetBucketRequestPaymentRequest(params)
+//
+// err := req.Send()
+// if err == nil { // resp is now filled
+// fmt.Println(resp)
+// }
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketRequestPayment
+func (c *S3) GetBucketRequestPaymentRequest(input *GetBucketRequestPaymentInput) (req *request.Request, output *GetBucketRequestPaymentOutput) {
+ op := &request.Operation{
+ Name: opGetBucketRequestPayment,
+ HTTPMethod: "GET",
+ HTTPPath: "/{Bucket}?requestPayment",
+ }
+
+ if input == nil {
+ input = &GetBucketRequestPaymentInput{}
+ }
+
+ output = &GetBucketRequestPaymentOutput{}
+ req = c.newRequest(op, input, output)
+ return
+}
+
+// GetBucketRequestPayment API operation for Amazon Simple Storage Service.
+//
+// Returns the request payment configuration of a bucket.
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for Amazon Simple Storage Service's
+// API operation GetBucketRequestPayment for usage and error information.
+// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketRequestPayment
+func (c *S3) GetBucketRequestPayment(input *GetBucketRequestPaymentInput) (*GetBucketRequestPaymentOutput, error) {
+ req, out := c.GetBucketRequestPaymentRequest(input)
+ return out, req.Send()
+}
+
+// GetBucketRequestPaymentWithContext is the same as GetBucketRequestPayment with the addition of
+// the ability to pass a context and additional request options.
+//
+// See GetBucketRequestPayment for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *S3) GetBucketRequestPaymentWithContext(ctx aws.Context, input *GetBucketRequestPaymentInput, opts ...request.Option) (*GetBucketRequestPaymentOutput, error) {
+ req, out := c.GetBucketRequestPaymentRequest(input)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return out, req.Send()
+}
+
+const opGetBucketTagging = "GetBucketTagging"
+
+// GetBucketTaggingRequest generates a "aws/request.Request" representing the
+// client's request for the GetBucketTagging operation. The "output" return
+// value will be populated with the request's response once the request completes
+// successfully.
+//
+// Use "Send" method on the returned Request to send the API call to the service.
+// the "output" return value is not valid until after Send returns without error.
+//
+// See GetBucketTagging for more information on using the GetBucketTagging
+// API call, and error handling.
+//
+// This method is useful when you want to inject custom logic or configuration
+// into the SDK's request lifecycle. Such as custom headers, or retry logic.
+//
+//
+// // Example sending a request using the GetBucketTaggingRequest method.
+// req, resp := client.GetBucketTaggingRequest(params)
+//
+// err := req.Send()
+// if err == nil { // resp is now filled
+// fmt.Println(resp)
+// }
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketTagging
+func (c *S3) GetBucketTaggingRequest(input *GetBucketTaggingInput) (req *request.Request, output *GetBucketTaggingOutput) {
+ op := &request.Operation{
+ Name: opGetBucketTagging,
+ HTTPMethod: "GET",
+ HTTPPath: "/{Bucket}?tagging",
+ }
+
+ if input == nil {
+ input = &GetBucketTaggingInput{}
+ }
+
+ output = &GetBucketTaggingOutput{}
+ req = c.newRequest(op, input, output)
+ return
+}
+
+// GetBucketTagging API operation for Amazon Simple Storage Service.
+//
+// Returns the tag set associated with the bucket.
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for Amazon Simple Storage Service's
+// API operation GetBucketTagging for usage and error information.
+// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketTagging
+func (c *S3) GetBucketTagging(input *GetBucketTaggingInput) (*GetBucketTaggingOutput, error) {
+ req, out := c.GetBucketTaggingRequest(input)
+ return out, req.Send()
+}
+
+// GetBucketTaggingWithContext is the same as GetBucketTagging with the addition of
+// the ability to pass a context and additional request options.
+//
+// See GetBucketTagging for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *S3) GetBucketTaggingWithContext(ctx aws.Context, input *GetBucketTaggingInput, opts ...request.Option) (*GetBucketTaggingOutput, error) {
+ req, out := c.GetBucketTaggingRequest(input)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return out, req.Send()
+}
+
+const opGetBucketVersioning = "GetBucketVersioning"
+
+// GetBucketVersioningRequest generates a "aws/request.Request" representing the
+// client's request for the GetBucketVersioning operation. The "output" return
+// value will be populated with the request's response once the request completes
+// successfully.
+//
+// Use "Send" method on the returned Request to send the API call to the service.
+// the "output" return value is not valid until after Send returns without error.
+//
+// See GetBucketVersioning for more information on using the GetBucketVersioning
+// API call, and error handling.
+//
+// This method is useful when you want to inject custom logic or configuration
+// into the SDK's request lifecycle. Such as custom headers, or retry logic.
+//
+//
+// // Example sending a request using the GetBucketVersioningRequest method.
+// req, resp := client.GetBucketVersioningRequest(params)
+//
+// err := req.Send()
+// if err == nil { // resp is now filled
+// fmt.Println(resp)
+// }
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketVersioning
+func (c *S3) GetBucketVersioningRequest(input *GetBucketVersioningInput) (req *request.Request, output *GetBucketVersioningOutput) {
+ op := &request.Operation{
+ Name: opGetBucketVersioning,
+ HTTPMethod: "GET",
+ HTTPPath: "/{Bucket}?versioning",
+ }
+
+ if input == nil {
+ input = &GetBucketVersioningInput{}
+ }
+
+ output = &GetBucketVersioningOutput{}
+ req = c.newRequest(op, input, output)
+ return
+}
+
+// GetBucketVersioning API operation for Amazon Simple Storage Service.
+//
+// Returns the versioning state of a bucket.
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for Amazon Simple Storage Service's
+// API operation GetBucketVersioning for usage and error information.
+// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketVersioning
+func (c *S3) GetBucketVersioning(input *GetBucketVersioningInput) (*GetBucketVersioningOutput, error) {
+ req, out := c.GetBucketVersioningRequest(input)
+ return out, req.Send()
+}
+
+// GetBucketVersioningWithContext is the same as GetBucketVersioning with the addition of
+// the ability to pass a context and additional request options.
+//
+// See GetBucketVersioning for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *S3) GetBucketVersioningWithContext(ctx aws.Context, input *GetBucketVersioningInput, opts ...request.Option) (*GetBucketVersioningOutput, error) {
+ req, out := c.GetBucketVersioningRequest(input)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return out, req.Send()
+}
+
+const opGetBucketWebsite = "GetBucketWebsite"
+
+// GetBucketWebsiteRequest generates a "aws/request.Request" representing the
+// client's request for the GetBucketWebsite operation. The "output" return
+// value will be populated with the request's response once the request completes
+// successfully.
+//
+// Use "Send" method on the returned Request to send the API call to the service.
+// the "output" return value is not valid until after Send returns without error.
+//
+// See GetBucketWebsite for more information on using the GetBucketWebsite
+// API call, and error handling.
+//
+// This method is useful when you want to inject custom logic or configuration
+// into the SDK's request lifecycle. Such as custom headers, or retry logic.
+//
+//
+// // Example sending a request using the GetBucketWebsiteRequest method.
+// req, resp := client.GetBucketWebsiteRequest(params)
+//
+// err := req.Send()
+// if err == nil { // resp is now filled
+// fmt.Println(resp)
+// }
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketWebsite
+func (c *S3) GetBucketWebsiteRequest(input *GetBucketWebsiteInput) (req *request.Request, output *GetBucketWebsiteOutput) {
+ op := &request.Operation{
+ Name: opGetBucketWebsite,
+ HTTPMethod: "GET",
+ HTTPPath: "/{Bucket}?website",
+ }
+
+ if input == nil {
+ input = &GetBucketWebsiteInput{}
+ }
+
+ output = &GetBucketWebsiteOutput{}
+ req = c.newRequest(op, input, output)
+ return
+}
+
+// GetBucketWebsite API operation for Amazon Simple Storage Service.
+//
+// Returns the website configuration for a bucket.
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for Amazon Simple Storage Service's
+// API operation GetBucketWebsite for usage and error information.
+// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketWebsite
+func (c *S3) GetBucketWebsite(input *GetBucketWebsiteInput) (*GetBucketWebsiteOutput, error) {
+ req, out := c.GetBucketWebsiteRequest(input)
+ return out, req.Send()
+}
+
+// GetBucketWebsiteWithContext is the same as GetBucketWebsite with the addition of
+// the ability to pass a context and additional request options.
+//
+// See GetBucketWebsite for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *S3) GetBucketWebsiteWithContext(ctx aws.Context, input *GetBucketWebsiteInput, opts ...request.Option) (*GetBucketWebsiteOutput, error) {
+ req, out := c.GetBucketWebsiteRequest(input)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return out, req.Send()
+}
+
+const opGetObject = "GetObject"
+
+// GetObjectRequest generates a "aws/request.Request" representing the
+// client's request for the GetObject operation. The "output" return
+// value will be populated with the request's response once the request completes
+// successfully.
+//
+// Use "Send" method on the returned Request to send the API call to the service.
+// the "output" return value is not valid until after Send returns without error.
+//
+// See GetObject for more information on using the GetObject
+// API call, and error handling.
+//
+// This method is useful when you want to inject custom logic or configuration
+// into the SDK's request lifecycle. Such as custom headers, or retry logic.
+//
+//
+// // Example sending a request using the GetObjectRequest method.
+// req, resp := client.GetObjectRequest(params)
+//
+// err := req.Send()
+// if err == nil { // resp is now filled
+// fmt.Println(resp)
+// }
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetObject
+func (c *S3) GetObjectRequest(input *GetObjectInput) (req *request.Request, output *GetObjectOutput) {
+ op := &request.Operation{
+ Name: opGetObject,
+ HTTPMethod: "GET",
+ HTTPPath: "/{Bucket}/{Key+}",
+ }
+
+ if input == nil {
+ input = &GetObjectInput{}
+ }
+
+ output = &GetObjectOutput{}
+ req = c.newRequest(op, input, output)
+ return
+}
+
+// GetObject API operation for Amazon Simple Storage Service.
+//
+// Retrieves objects from Amazon S3.
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for Amazon Simple Storage Service's
+// API operation GetObject for usage and error information.
+//
+// Returned Error Codes:
+// * ErrCodeNoSuchKey "NoSuchKey"
+// The specified key does not exist.
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetObject
+func (c *S3) GetObject(input *GetObjectInput) (*GetObjectOutput, error) {
+ req, out := c.GetObjectRequest(input)
+ return out, req.Send()
+}
+
+// GetObjectWithContext is the same as GetObject with the addition of
+// the ability to pass a context and additional request options.
+//
+// See GetObject for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *S3) GetObjectWithContext(ctx aws.Context, input *GetObjectInput, opts ...request.Option) (*GetObjectOutput, error) {
+ req, out := c.GetObjectRequest(input)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return out, req.Send()
+}
+
+const opGetObjectAcl = "GetObjectAcl"
+
+// GetObjectAclRequest generates a "aws/request.Request" representing the
+// client's request for the GetObjectAcl operation. The "output" return
+// value will be populated with the request's response once the request completes
+// successfully.
+//
+// Use "Send" method on the returned Request to send the API call to the service.
+// the "output" return value is not valid until after Send returns without error.
+//
+// See GetObjectAcl for more information on using the GetObjectAcl
+// API call, and error handling.
+//
+// This method is useful when you want to inject custom logic or configuration
+// into the SDK's request lifecycle. Such as custom headers, or retry logic.
+//
+//
+// // Example sending a request using the GetObjectAclRequest method.
+// req, resp := client.GetObjectAclRequest(params)
+//
+// err := req.Send()
+// if err == nil { // resp is now filled
+// fmt.Println(resp)
+// }
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetObjectAcl
+func (c *S3) GetObjectAclRequest(input *GetObjectAclInput) (req *request.Request, output *GetObjectAclOutput) {
+ op := &request.Operation{
+ Name: opGetObjectAcl,
+ HTTPMethod: "GET",
+ HTTPPath: "/{Bucket}/{Key+}?acl",
+ }
+
+ if input == nil {
+ input = &GetObjectAclInput{}
+ }
+
+ output = &GetObjectAclOutput{}
+ req = c.newRequest(op, input, output)
+ return
+}
+
+// GetObjectAcl API operation for Amazon Simple Storage Service.
+//
+// Returns the access control list (ACL) of an object.
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for Amazon Simple Storage Service's
+// API operation GetObjectAcl for usage and error information.
+//
+// Returned Error Codes:
+// * ErrCodeNoSuchKey "NoSuchKey"
+// The specified key does not exist.
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetObjectAcl
+func (c *S3) GetObjectAcl(input *GetObjectAclInput) (*GetObjectAclOutput, error) {
+ req, out := c.GetObjectAclRequest(input)
+ return out, req.Send()
+}
+
+// GetObjectAclWithContext is the same as GetObjectAcl with the addition of
+// the ability to pass a context and additional request options.
+//
+// See GetObjectAcl for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *S3) GetObjectAclWithContext(ctx aws.Context, input *GetObjectAclInput, opts ...request.Option) (*GetObjectAclOutput, error) {
+ req, out := c.GetObjectAclRequest(input)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return out, req.Send()
+}
+
+const opGetObjectLegalHold = "GetObjectLegalHold"
+
+// GetObjectLegalHoldRequest generates a "aws/request.Request" representing the
+// client's request for the GetObjectLegalHold operation. The "output" return
+// value will be populated with the request's response once the request completes
+// successfully.
+//
+// Use "Send" method on the returned Request to send the API call to the service.
+// the "output" return value is not valid until after Send returns without error.
+//
+// See GetObjectLegalHold for more information on using the GetObjectLegalHold
+// API call, and error handling.
+//
+// This method is useful when you want to inject custom logic or configuration
+// into the SDK's request lifecycle. Such as custom headers, or retry logic.
+//
+//
+// // Example sending a request using the GetObjectLegalHoldRequest method.
+// req, resp := client.GetObjectLegalHoldRequest(params)
+//
+// err := req.Send()
+// if err == nil { // resp is now filled
+// fmt.Println(resp)
+// }
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetObjectLegalHold
+func (c *S3) GetObjectLegalHoldRequest(input *GetObjectLegalHoldInput) (req *request.Request, output *GetObjectLegalHoldOutput) {
+ op := &request.Operation{
+ Name: opGetObjectLegalHold,
+ HTTPMethod: "GET",
+ HTTPPath: "/{Bucket}/{Key+}?legal-hold",
+ }
+
+ if input == nil {
+ input = &GetObjectLegalHoldInput{}
+ }
+
+ output = &GetObjectLegalHoldOutput{}
+ req = c.newRequest(op, input, output)
+ return
+}
+
+// GetObjectLegalHold API operation for Amazon Simple Storage Service.
+//
+// Gets an object's current Legal Hold status.
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for Amazon Simple Storage Service's
+// API operation GetObjectLegalHold for usage and error information.
+// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetObjectLegalHold
+func (c *S3) GetObjectLegalHold(input *GetObjectLegalHoldInput) (*GetObjectLegalHoldOutput, error) {
+ req, out := c.GetObjectLegalHoldRequest(input)
+ return out, req.Send()
+}
+
+// GetObjectLegalHoldWithContext is the same as GetObjectLegalHold with the addition of
+// the ability to pass a context and additional request options.
+//
+// See GetObjectLegalHold for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *S3) GetObjectLegalHoldWithContext(ctx aws.Context, input *GetObjectLegalHoldInput, opts ...request.Option) (*GetObjectLegalHoldOutput, error) {
+ req, out := c.GetObjectLegalHoldRequest(input)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return out, req.Send()
+}
+
+const opGetObjectLockConfiguration = "GetObjectLockConfiguration"
+
+// GetObjectLockConfigurationRequest generates a "aws/request.Request" representing the
+// client's request for the GetObjectLockConfiguration operation. The "output" return
+// value will be populated with the request's response once the request completes
+// successfully.
+//
+// Use "Send" method on the returned Request to send the API call to the service.
+// the "output" return value is not valid until after Send returns without error.
+//
+// See GetObjectLockConfiguration for more information on using the GetObjectLockConfiguration
+// API call, and error handling.
+//
+// This method is useful when you want to inject custom logic or configuration
+// into the SDK's request lifecycle. Such as custom headers, or retry logic.
+//
+//
+// // Example sending a request using the GetObjectLockConfigurationRequest method.
+// req, resp := client.GetObjectLockConfigurationRequest(params)
+//
+// err := req.Send()
+// if err == nil { // resp is now filled
+// fmt.Println(resp)
+// }
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetObjectLockConfiguration
+func (c *S3) GetObjectLockConfigurationRequest(input *GetObjectLockConfigurationInput) (req *request.Request, output *GetObjectLockConfigurationOutput) {
+ op := &request.Operation{
+ Name: opGetObjectLockConfiguration,
+ HTTPMethod: "GET",
+ HTTPPath: "/{Bucket}?object-lock",
+ }
+
+ if input == nil {
+ input = &GetObjectLockConfigurationInput{}
+ }
+
+ output = &GetObjectLockConfigurationOutput{}
+ req = c.newRequest(op, input, output)
+ return
+}
+
+// GetObjectLockConfiguration API operation for Amazon Simple Storage Service.
+//
+// Gets the object lock configuration for a bucket. The rule specified in the
+// object lock configuration will be applied by default to every new object
+// placed in the specified bucket.
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for Amazon Simple Storage Service's
+// API operation GetObjectLockConfiguration for usage and error information.
+// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetObjectLockConfiguration
+func (c *S3) GetObjectLockConfiguration(input *GetObjectLockConfigurationInput) (*GetObjectLockConfigurationOutput, error) {
+ req, out := c.GetObjectLockConfigurationRequest(input)
+ return out, req.Send()
+}
+
+// GetObjectLockConfigurationWithContext is the same as GetObjectLockConfiguration with the addition of
+// the ability to pass a context and additional request options.
+//
+// See GetObjectLockConfiguration for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *S3) GetObjectLockConfigurationWithContext(ctx aws.Context, input *GetObjectLockConfigurationInput, opts ...request.Option) (*GetObjectLockConfigurationOutput, error) {
+ req, out := c.GetObjectLockConfigurationRequest(input)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return out, req.Send()
+}
+
+const opGetObjectRetention = "GetObjectRetention"
+
+// GetObjectRetentionRequest generates a "aws/request.Request" representing the
+// client's request for the GetObjectRetention operation. The "output" return
+// value will be populated with the request's response once the request completes
+// successfully.
+//
+// Use "Send" method on the returned Request to send the API call to the service.
+// the "output" return value is not valid until after Send returns without error.
+//
+// See GetObjectRetention for more information on using the GetObjectRetention
+// API call, and error handling.
+//
+// This method is useful when you want to inject custom logic or configuration
+// into the SDK's request lifecycle. Such as custom headers, or retry logic.
+//
+//
+// // Example sending a request using the GetObjectRetentionRequest method.
+// req, resp := client.GetObjectRetentionRequest(params)
+//
+// err := req.Send()
+// if err == nil { // resp is now filled
+// fmt.Println(resp)
+// }
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetObjectRetention
+func (c *S3) GetObjectRetentionRequest(input *GetObjectRetentionInput) (req *request.Request, output *GetObjectRetentionOutput) {
+ op := &request.Operation{
+ Name: opGetObjectRetention,
+ HTTPMethod: "GET",
+ HTTPPath: "/{Bucket}/{Key+}?retention",
+ }
+
+ if input == nil {
+ input = &GetObjectRetentionInput{}
+ }
+
+ output = &GetObjectRetentionOutput{}
+ req = c.newRequest(op, input, output)
+ return
+}
+
+// GetObjectRetention API operation for Amazon Simple Storage Service.
+//
+// Retrieves an object's retention settings.
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for Amazon Simple Storage Service's
+// API operation GetObjectRetention for usage and error information.
+// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetObjectRetention
+func (c *S3) GetObjectRetention(input *GetObjectRetentionInput) (*GetObjectRetentionOutput, error) {
+ req, out := c.GetObjectRetentionRequest(input)
+ return out, req.Send()
+}
+
+// GetObjectRetentionWithContext is the same as GetObjectRetention with the addition of
+// the ability to pass a context and additional request options.
+//
+// See GetObjectRetention for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *S3) GetObjectRetentionWithContext(ctx aws.Context, input *GetObjectRetentionInput, opts ...request.Option) (*GetObjectRetentionOutput, error) {
+ req, out := c.GetObjectRetentionRequest(input)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return out, req.Send()
+}
+
+const opGetObjectTagging = "GetObjectTagging"
+
+// GetObjectTaggingRequest generates a "aws/request.Request" representing the
+// client's request for the GetObjectTagging operation. The "output" return
+// value will be populated with the request's response once the request completes
+// successfully.
+//
+// Use "Send" method on the returned Request to send the API call to the service.
+// the "output" return value is not valid until after Send returns without error.
+//
+// See GetObjectTagging for more information on using the GetObjectTagging
+// API call, and error handling.
+//
+// This method is useful when you want to inject custom logic or configuration
+// into the SDK's request lifecycle. Such as custom headers, or retry logic.
+//
+//
+// // Example sending a request using the GetObjectTaggingRequest method.
+// req, resp := client.GetObjectTaggingRequest(params)
+//
+// err := req.Send()
+// if err == nil { // resp is now filled
+// fmt.Println(resp)
+// }
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetObjectTagging
+func (c *S3) GetObjectTaggingRequest(input *GetObjectTaggingInput) (req *request.Request, output *GetObjectTaggingOutput) {
+ op := &request.Operation{
+ Name: opGetObjectTagging,
+ HTTPMethod: "GET",
+ HTTPPath: "/{Bucket}/{Key+}?tagging",
+ }
+
+ if input == nil {
+ input = &GetObjectTaggingInput{}
+ }
+
+ output = &GetObjectTaggingOutput{}
+ req = c.newRequest(op, input, output)
+ return
+}
+
+// GetObjectTagging API operation for Amazon Simple Storage Service.
+//
+// Returns the tag-set of an object.
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for Amazon Simple Storage Service's
+// API operation GetObjectTagging for usage and error information.
+// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetObjectTagging
+func (c *S3) GetObjectTagging(input *GetObjectTaggingInput) (*GetObjectTaggingOutput, error) {
+ req, out := c.GetObjectTaggingRequest(input)
+ return out, req.Send()
+}
+
+// GetObjectTaggingWithContext is the same as GetObjectTagging with the addition of
+// the ability to pass a context and additional request options.
+//
+// See GetObjectTagging for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *S3) GetObjectTaggingWithContext(ctx aws.Context, input *GetObjectTaggingInput, opts ...request.Option) (*GetObjectTaggingOutput, error) {
+ req, out := c.GetObjectTaggingRequest(input)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return out, req.Send()
+}
+
+const opGetObjectTorrent = "GetObjectTorrent"
+
+// GetObjectTorrentRequest generates a "aws/request.Request" representing the
+// client's request for the GetObjectTorrent operation. The "output" return
+// value will be populated with the request's response once the request completes
+// successfully.
+//
+// Use "Send" method on the returned Request to send the API call to the service.
+// the "output" return value is not valid until after Send returns without error.
+//
+// See GetObjectTorrent for more information on using the GetObjectTorrent
+// API call, and error handling.
+//
+// This method is useful when you want to inject custom logic or configuration
+// into the SDK's request lifecycle. Such as custom headers, or retry logic.
+//
+//
+// // Example sending a request using the GetObjectTorrentRequest method.
+// req, resp := client.GetObjectTorrentRequest(params)
+//
+// err := req.Send()
+// if err == nil { // resp is now filled
+// fmt.Println(resp)
+// }
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetObjectTorrent
+func (c *S3) GetObjectTorrentRequest(input *GetObjectTorrentInput) (req *request.Request, output *GetObjectTorrentOutput) {
+ op := &request.Operation{
+ Name: opGetObjectTorrent,
+ HTTPMethod: "GET",
+ HTTPPath: "/{Bucket}/{Key+}?torrent",
+ }
+
+ if input == nil {
+ input = &GetObjectTorrentInput{}
+ }
+
+ output = &GetObjectTorrentOutput{}
+ req = c.newRequest(op, input, output)
+ return
+}
+
+// GetObjectTorrent API operation for Amazon Simple Storage Service.
+//
+// Return torrent files from a bucket.
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for Amazon Simple Storage Service's
+// API operation GetObjectTorrent for usage and error information.
+// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetObjectTorrent
+func (c *S3) GetObjectTorrent(input *GetObjectTorrentInput) (*GetObjectTorrentOutput, error) {
+ req, out := c.GetObjectTorrentRequest(input)
+ return out, req.Send()
+}
+
+// GetObjectTorrentWithContext is the same as GetObjectTorrent with the addition of
+// the ability to pass a context and additional request options.
+//
+// See GetObjectTorrent for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *S3) GetObjectTorrentWithContext(ctx aws.Context, input *GetObjectTorrentInput, opts ...request.Option) (*GetObjectTorrentOutput, error) {
+ req, out := c.GetObjectTorrentRequest(input)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return out, req.Send()
+}
+
+const opGetPublicAccessBlock = "GetPublicAccessBlock"
+
+// GetPublicAccessBlockRequest generates a "aws/request.Request" representing the
+// client's request for the GetPublicAccessBlock operation. The "output" return
+// value will be populated with the request's response once the request completes
+// successfully.
+//
+// Use "Send" method on the returned Request to send the API call to the service.
+// the "output" return value is not valid until after Send returns without error.
+//
+// See GetPublicAccessBlock for more information on using the GetPublicAccessBlock
+// API call, and error handling.
+//
+// This method is useful when you want to inject custom logic or configuration
+// into the SDK's request lifecycle. Such as custom headers, or retry logic.
+//
+//
+// // Example sending a request using the GetPublicAccessBlockRequest method.
+// req, resp := client.GetPublicAccessBlockRequest(params)
+//
+// err := req.Send()
+// if err == nil { // resp is now filled
+// fmt.Println(resp)
+// }
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetPublicAccessBlock
+func (c *S3) GetPublicAccessBlockRequest(input *GetPublicAccessBlockInput) (req *request.Request, output *GetPublicAccessBlockOutput) {
+ op := &request.Operation{
+ Name: opGetPublicAccessBlock,
+ HTTPMethod: "GET",
+ HTTPPath: "/{Bucket}?publicAccessBlock",
+ }
+
+ if input == nil {
+ input = &GetPublicAccessBlockInput{}
+ }
+
+ output = &GetPublicAccessBlockOutput{}
+ req = c.newRequest(op, input, output)
+ return
+}
+
+// GetPublicAccessBlock API operation for Amazon Simple Storage Service.
+//
+// Retrieves the PublicAccessBlock configuration for an Amazon S3 bucket.
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for Amazon Simple Storage Service's
+// API operation GetPublicAccessBlock for usage and error information.
+// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetPublicAccessBlock
+func (c *S3) GetPublicAccessBlock(input *GetPublicAccessBlockInput) (*GetPublicAccessBlockOutput, error) {
+ req, out := c.GetPublicAccessBlockRequest(input)
+ return out, req.Send()
+}
+
+// GetPublicAccessBlockWithContext is the same as GetPublicAccessBlock with the addition of
+// the ability to pass a context and additional request options.
+//
+// See GetPublicAccessBlock for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *S3) GetPublicAccessBlockWithContext(ctx aws.Context, input *GetPublicAccessBlockInput, opts ...request.Option) (*GetPublicAccessBlockOutput, error) {
+ req, out := c.GetPublicAccessBlockRequest(input)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return out, req.Send()
+}
+
+const opHeadBucket = "HeadBucket"
+
+// HeadBucketRequest generates a "aws/request.Request" representing the
+// client's request for the HeadBucket operation. The "output" return
+// value will be populated with the request's response once the request completes
+// successfully.
+//
+// Use "Send" method on the returned Request to send the API call to the service.
+// the "output" return value is not valid until after Send returns without error.
+//
+// See HeadBucket for more information on using the HeadBucket
+// API call, and error handling.
+//
+// This method is useful when you want to inject custom logic or configuration
+// into the SDK's request lifecycle. Such as custom headers, or retry logic.
+//
+//
+// // Example sending a request using the HeadBucketRequest method.
+// req, resp := client.HeadBucketRequest(params)
+//
+// err := req.Send()
+// if err == nil { // resp is now filled
+// fmt.Println(resp)
+// }
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/HeadBucket
+func (c *S3) HeadBucketRequest(input *HeadBucketInput) (req *request.Request, output *HeadBucketOutput) {
+ op := &request.Operation{
+ Name: opHeadBucket,
+ HTTPMethod: "HEAD",
+ HTTPPath: "/{Bucket}",
+ }
+
+ if input == nil {
+ input = &HeadBucketInput{}
+ }
+
+ output = &HeadBucketOutput{}
+ req = c.newRequest(op, input, output)
+ req.Handlers.Unmarshal.Swap(restxml.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler)
+ return
+}
+
+// HeadBucket API operation for Amazon Simple Storage Service.
+//
+// This operation is useful to determine if a bucket exists and you have permission
+// to access it.
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for Amazon Simple Storage Service's
+// API operation HeadBucket for usage and error information.
+//
+// Returned Error Codes:
+// * ErrCodeNoSuchBucket "NoSuchBucket"
+// The specified bucket does not exist.
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/HeadBucket
+func (c *S3) HeadBucket(input *HeadBucketInput) (*HeadBucketOutput, error) {
+ req, out := c.HeadBucketRequest(input)
+ return out, req.Send()
+}
+
+// HeadBucketWithContext is the same as HeadBucket with the addition of
+// the ability to pass a context and additional request options.
+//
+// See HeadBucket for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *S3) HeadBucketWithContext(ctx aws.Context, input *HeadBucketInput, opts ...request.Option) (*HeadBucketOutput, error) {
+ req, out := c.HeadBucketRequest(input)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return out, req.Send()
+}
+
+const opHeadObject = "HeadObject"
+
+// HeadObjectRequest generates a "aws/request.Request" representing the
+// client's request for the HeadObject operation. The "output" return
+// value will be populated with the request's response once the request completes
+// successfully.
+//
+// Use "Send" method on the returned Request to send the API call to the service.
+// the "output" return value is not valid until after Send returns without error.
+//
+// See HeadObject for more information on using the HeadObject
+// API call, and error handling.
+//
+// This method is useful when you want to inject custom logic or configuration
+// into the SDK's request lifecycle. Such as custom headers, or retry logic.
+//
+//
+// // Example sending a request using the HeadObjectRequest method.
+// req, resp := client.HeadObjectRequest(params)
+//
+// err := req.Send()
+// if err == nil { // resp is now filled
+// fmt.Println(resp)
+// }
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/HeadObject
+func (c *S3) HeadObjectRequest(input *HeadObjectInput) (req *request.Request, output *HeadObjectOutput) {
+ op := &request.Operation{
+ Name: opHeadObject,
+ HTTPMethod: "HEAD",
+ HTTPPath: "/{Bucket}/{Key+}",
+ }
+
+ if input == nil {
+ input = &HeadObjectInput{}
+ }
+
+ output = &HeadObjectOutput{}
+ req = c.newRequest(op, input, output)
+ return
+}
+
+// HeadObject API operation for Amazon Simple Storage Service.
+//
+// The HEAD operation retrieves metadata from an object without returning the
+// object itself. This operation is useful if you're only interested in an object's
+// metadata. To use HEAD, you must have READ access to the object.
+//
+// See http://docs.aws.amazon.com/AmazonS3/latest/API/ErrorResponses.html#RESTErrorResponses
+// for more information on returned errors.
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for Amazon Simple Storage Service's
+// API operation HeadObject for usage and error information.
+// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/HeadObject
+func (c *S3) HeadObject(input *HeadObjectInput) (*HeadObjectOutput, error) {
+ req, out := c.HeadObjectRequest(input)
+ return out, req.Send()
+}
+
+// HeadObjectWithContext is the same as HeadObject with the addition of
+// the ability to pass a context and additional request options.
+//
+// See HeadObject for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *S3) HeadObjectWithContext(ctx aws.Context, input *HeadObjectInput, opts ...request.Option) (*HeadObjectOutput, error) {
+ req, out := c.HeadObjectRequest(input)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return out, req.Send()
+}
+
+const opListBucketAnalyticsConfigurations = "ListBucketAnalyticsConfigurations"
+
+// ListBucketAnalyticsConfigurationsRequest generates a "aws/request.Request" representing the
+// client's request for the ListBucketAnalyticsConfigurations operation. The "output" return
+// value will be populated with the request's response once the request completes
+// successfully.
+//
+// Use "Send" method on the returned Request to send the API call to the service.
+// the "output" return value is not valid until after Send returns without error.
+//
+// See ListBucketAnalyticsConfigurations for more information on using the ListBucketAnalyticsConfigurations
+// API call, and error handling.
+//
+// This method is useful when you want to inject custom logic or configuration
+// into the SDK's request lifecycle. Such as custom headers, or retry logic.
+//
+//
+// // Example sending a request using the ListBucketAnalyticsConfigurationsRequest method.
+// req, resp := client.ListBucketAnalyticsConfigurationsRequest(params)
+//
+// err := req.Send()
+// if err == nil { // resp is now filled
+// fmt.Println(resp)
+// }
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListBucketAnalyticsConfigurations
+func (c *S3) ListBucketAnalyticsConfigurationsRequest(input *ListBucketAnalyticsConfigurationsInput) (req *request.Request, output *ListBucketAnalyticsConfigurationsOutput) {
+ op := &request.Operation{
+ Name: opListBucketAnalyticsConfigurations,
+ HTTPMethod: "GET",
+ HTTPPath: "/{Bucket}?analytics",
+ }
+
+ if input == nil {
+ input = &ListBucketAnalyticsConfigurationsInput{}
+ }
+
+ output = &ListBucketAnalyticsConfigurationsOutput{}
+ req = c.newRequest(op, input, output)
+ return
+}
+
+// ListBucketAnalyticsConfigurations API operation for Amazon Simple Storage Service.
+//
+// Lists the analytics configurations for the bucket.
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for Amazon Simple Storage Service's
+// API operation ListBucketAnalyticsConfigurations for usage and error information.
+// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListBucketAnalyticsConfigurations
+func (c *S3) ListBucketAnalyticsConfigurations(input *ListBucketAnalyticsConfigurationsInput) (*ListBucketAnalyticsConfigurationsOutput, error) {
+ req, out := c.ListBucketAnalyticsConfigurationsRequest(input)
+ return out, req.Send()
+}
+
+// ListBucketAnalyticsConfigurationsWithContext is the same as ListBucketAnalyticsConfigurations with the addition of
+// the ability to pass a context and additional request options.
+//
+// See ListBucketAnalyticsConfigurations for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *S3) ListBucketAnalyticsConfigurationsWithContext(ctx aws.Context, input *ListBucketAnalyticsConfigurationsInput, opts ...request.Option) (*ListBucketAnalyticsConfigurationsOutput, error) {
+ req, out := c.ListBucketAnalyticsConfigurationsRequest(input)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return out, req.Send()
+}
+
+const opListBucketInventoryConfigurations = "ListBucketInventoryConfigurations"
+
+// ListBucketInventoryConfigurationsRequest generates a "aws/request.Request" representing the
+// client's request for the ListBucketInventoryConfigurations operation. The "output" return
+// value will be populated with the request's response once the request completes
+// successfully.
+//
+// Use "Send" method on the returned Request to send the API call to the service.
+// the "output" return value is not valid until after Send returns without error.
+//
+// See ListBucketInventoryConfigurations for more information on using the ListBucketInventoryConfigurations
+// API call, and error handling.
+//
+// This method is useful when you want to inject custom logic or configuration
+// into the SDK's request lifecycle. Such as custom headers, or retry logic.
+//
+//
+// // Example sending a request using the ListBucketInventoryConfigurationsRequest method.
+// req, resp := client.ListBucketInventoryConfigurationsRequest(params)
+//
+// err := req.Send()
+// if err == nil { // resp is now filled
+// fmt.Println(resp)
+// }
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListBucketInventoryConfigurations
+func (c *S3) ListBucketInventoryConfigurationsRequest(input *ListBucketInventoryConfigurationsInput) (req *request.Request, output *ListBucketInventoryConfigurationsOutput) {
+ op := &request.Operation{
+ Name: opListBucketInventoryConfigurations,
+ HTTPMethod: "GET",
+ HTTPPath: "/{Bucket}?inventory",
+ }
+
+ if input == nil {
+ input = &ListBucketInventoryConfigurationsInput{}
+ }
+
+ output = &ListBucketInventoryConfigurationsOutput{}
+ req = c.newRequest(op, input, output)
+ return
+}
+
+// ListBucketInventoryConfigurations API operation for Amazon Simple Storage Service.
+//
+// Returns a list of inventory configurations for the bucket.
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for Amazon Simple Storage Service's
+// API operation ListBucketInventoryConfigurations for usage and error information.
+// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListBucketInventoryConfigurations
+func (c *S3) ListBucketInventoryConfigurations(input *ListBucketInventoryConfigurationsInput) (*ListBucketInventoryConfigurationsOutput, error) {
+ req, out := c.ListBucketInventoryConfigurationsRequest(input)
+ return out, req.Send()
+}
+
+// ListBucketInventoryConfigurationsWithContext is the same as ListBucketInventoryConfigurations with the addition of
+// the ability to pass a context and additional request options.
+//
+// See ListBucketInventoryConfigurations for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *S3) ListBucketInventoryConfigurationsWithContext(ctx aws.Context, input *ListBucketInventoryConfigurationsInput, opts ...request.Option) (*ListBucketInventoryConfigurationsOutput, error) {
+ req, out := c.ListBucketInventoryConfigurationsRequest(input)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return out, req.Send()
+}
+
+const opListBucketMetricsConfigurations = "ListBucketMetricsConfigurations"
+
+// ListBucketMetricsConfigurationsRequest generates a "aws/request.Request" representing the
+// client's request for the ListBucketMetricsConfigurations operation. The "output" return
+// value will be populated with the request's response once the request completes
+// successfully.
+//
+// Use "Send" method on the returned Request to send the API call to the service.
+// the "output" return value is not valid until after Send returns without error.
+//
+// See ListBucketMetricsConfigurations for more information on using the ListBucketMetricsConfigurations
+// API call, and error handling.
+//
+// This method is useful when you want to inject custom logic or configuration
+// into the SDK's request lifecycle. Such as custom headers, or retry logic.
+//
+//
+// // Example sending a request using the ListBucketMetricsConfigurationsRequest method.
+// req, resp := client.ListBucketMetricsConfigurationsRequest(params)
+//
+// err := req.Send()
+// if err == nil { // resp is now filled
+// fmt.Println(resp)
+// }
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListBucketMetricsConfigurations
+func (c *S3) ListBucketMetricsConfigurationsRequest(input *ListBucketMetricsConfigurationsInput) (req *request.Request, output *ListBucketMetricsConfigurationsOutput) {
+ op := &request.Operation{
+ Name: opListBucketMetricsConfigurations,
+ HTTPMethod: "GET",
+ HTTPPath: "/{Bucket}?metrics",
+ }
+
+ if input == nil {
+ input = &ListBucketMetricsConfigurationsInput{}
+ }
+
+ output = &ListBucketMetricsConfigurationsOutput{}
+ req = c.newRequest(op, input, output)
+ return
+}
+
+// ListBucketMetricsConfigurations API operation for Amazon Simple Storage Service.
+//
+// Lists the metrics configurations for the bucket.
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for Amazon Simple Storage Service's
+// API operation ListBucketMetricsConfigurations for usage and error information.
+// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListBucketMetricsConfigurations
+func (c *S3) ListBucketMetricsConfigurations(input *ListBucketMetricsConfigurationsInput) (*ListBucketMetricsConfigurationsOutput, error) {
+ req, out := c.ListBucketMetricsConfigurationsRequest(input)
+ return out, req.Send()
+}
+
+// ListBucketMetricsConfigurationsWithContext is the same as ListBucketMetricsConfigurations with the addition of
+// the ability to pass a context and additional request options.
+//
+// See ListBucketMetricsConfigurations for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *S3) ListBucketMetricsConfigurationsWithContext(ctx aws.Context, input *ListBucketMetricsConfigurationsInput, opts ...request.Option) (*ListBucketMetricsConfigurationsOutput, error) {
+ req, out := c.ListBucketMetricsConfigurationsRequest(input)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return out, req.Send()
+}
+
+const opListBuckets = "ListBuckets"
+
+// ListBucketsRequest generates a "aws/request.Request" representing the
+// client's request for the ListBuckets operation. The "output" return
+// value will be populated with the request's response once the request completes
+// successfully.
+//
+// Use "Send" method on the returned Request to send the API call to the service.
+// the "output" return value is not valid until after Send returns without error.
+//
+// See ListBuckets for more information on using the ListBuckets
+// API call, and error handling.
+//
+// This method is useful when you want to inject custom logic or configuration
+// into the SDK's request lifecycle. Such as custom headers, or retry logic.
+//
+//
+// // Example sending a request using the ListBucketsRequest method.
+// req, resp := client.ListBucketsRequest(params)
+//
+// err := req.Send()
+// if err == nil { // resp is now filled
+// fmt.Println(resp)
+// }
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListBuckets
+func (c *S3) ListBucketsRequest(input *ListBucketsInput) (req *request.Request, output *ListBucketsOutput) {
+ op := &request.Operation{
+ Name: opListBuckets,
+ HTTPMethod: "GET",
+ HTTPPath: "/",
+ }
+
+ if input == nil {
+ input = &ListBucketsInput{}
+ }
+
+ output = &ListBucketsOutput{}
+ req = c.newRequest(op, input, output)
+ return
+}
+
+// ListBuckets API operation for Amazon Simple Storage Service.
+//
+// Returns a list of all buckets owned by the authenticated sender of the request.
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for Amazon Simple Storage Service's
+// API operation ListBuckets for usage and error information.
+// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListBuckets
+func (c *S3) ListBuckets(input *ListBucketsInput) (*ListBucketsOutput, error) {
+ req, out := c.ListBucketsRequest(input)
+ return out, req.Send()
+}
+
+// ListBucketsWithContext is the same as ListBuckets with the addition of
+// the ability to pass a context and additional request options.
+//
+// See ListBuckets for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *S3) ListBucketsWithContext(ctx aws.Context, input *ListBucketsInput, opts ...request.Option) (*ListBucketsOutput, error) {
+ req, out := c.ListBucketsRequest(input)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return out, req.Send()
+}
+
+const opListMultipartUploads = "ListMultipartUploads"
+
+// ListMultipartUploadsRequest generates a "aws/request.Request" representing the
+// client's request for the ListMultipartUploads operation. The "output" return
+// value will be populated with the request's response once the request completes
+// successfully.
+//
+// Use "Send" method on the returned Request to send the API call to the service.
+// the "output" return value is not valid until after Send returns without error.
+//
+// See ListMultipartUploads for more information on using the ListMultipartUploads
+// API call, and error handling.
+//
+// This method is useful when you want to inject custom logic or configuration
+// into the SDK's request lifecycle. Such as custom headers, or retry logic.
+//
+//
+// // Example sending a request using the ListMultipartUploadsRequest method.
+// req, resp := client.ListMultipartUploadsRequest(params)
+//
+// err := req.Send()
+// if err == nil { // resp is now filled
+// fmt.Println(resp)
+// }
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListMultipartUploads
+func (c *S3) ListMultipartUploadsRequest(input *ListMultipartUploadsInput) (req *request.Request, output *ListMultipartUploadsOutput) {
+ op := &request.Operation{
+ Name: opListMultipartUploads,
+ HTTPMethod: "GET",
+ HTTPPath: "/{Bucket}?uploads",
+ Paginator: &request.Paginator{
+ InputTokens: []string{"KeyMarker", "UploadIdMarker"},
+ OutputTokens: []string{"NextKeyMarker", "NextUploadIdMarker"},
+ LimitToken: "MaxUploads",
+ TruncationToken: "IsTruncated",
+ },
+ }
+
+ if input == nil {
+ input = &ListMultipartUploadsInput{}
+ }
+
+ output = &ListMultipartUploadsOutput{}
+ req = c.newRequest(op, input, output)
+ return
+}
+
+// ListMultipartUploads API operation for Amazon Simple Storage Service.
+//
+// This operation lists in-progress multipart uploads.
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for Amazon Simple Storage Service's
+// API operation ListMultipartUploads for usage and error information.
+// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListMultipartUploads
+func (c *S3) ListMultipartUploads(input *ListMultipartUploadsInput) (*ListMultipartUploadsOutput, error) {
+ req, out := c.ListMultipartUploadsRequest(input)
+ return out, req.Send()
+}
+
+// ListMultipartUploadsWithContext is the same as ListMultipartUploads with the addition of
+// the ability to pass a context and additional request options.
+//
+// See ListMultipartUploads for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *S3) ListMultipartUploadsWithContext(ctx aws.Context, input *ListMultipartUploadsInput, opts ...request.Option) (*ListMultipartUploadsOutput, error) {
+ req, out := c.ListMultipartUploadsRequest(input)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return out, req.Send()
+}
+
+// ListMultipartUploadsPages iterates over the pages of a ListMultipartUploads operation,
+// calling the "fn" function with the response data for each page. To stop
+// iterating, return false from the fn function.
+//
+// See ListMultipartUploads method for more information on how to use this operation.
+//
+// Note: This operation can generate multiple requests to a service.
+//
+// // Example iterating over at most 3 pages of a ListMultipartUploads operation.
+// pageNum := 0
+// err := client.ListMultipartUploadsPages(params,
+// func(page *s3.ListMultipartUploadsOutput, lastPage bool) bool {
+// pageNum++
+// fmt.Println(page)
+// return pageNum <= 3
+// })
+//
+func (c *S3) ListMultipartUploadsPages(input *ListMultipartUploadsInput, fn func(*ListMultipartUploadsOutput, bool) bool) error {
+ return c.ListMultipartUploadsPagesWithContext(aws.BackgroundContext(), input, fn)
+}
+
+// ListMultipartUploadsPagesWithContext same as ListMultipartUploadsPages except
+// it takes a Context and allows setting request options on the pages.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *S3) ListMultipartUploadsPagesWithContext(ctx aws.Context, input *ListMultipartUploadsInput, fn func(*ListMultipartUploadsOutput, bool) bool, opts ...request.Option) error {
+ p := request.Pagination{
+ NewRequest: func() (*request.Request, error) {
+ var inCpy *ListMultipartUploadsInput
+ if input != nil {
+ tmp := *input
+ inCpy = &tmp
+ }
+ req, _ := c.ListMultipartUploadsRequest(inCpy)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return req, nil
+ },
+ }
+
+ cont := true
+ for p.Next() && cont {
+ cont = fn(p.Page().(*ListMultipartUploadsOutput), !p.HasNextPage())
+ }
+ return p.Err()
+}
+
+const opListObjectVersions = "ListObjectVersions"
+
+// ListObjectVersionsRequest generates a "aws/request.Request" representing the
+// client's request for the ListObjectVersions operation. The "output" return
+// value will be populated with the request's response once the request completes
+// successfully.
+//
+// Use "Send" method on the returned Request to send the API call to the service.
+// the "output" return value is not valid until after Send returns without error.
+//
+// See ListObjectVersions for more information on using the ListObjectVersions
+// API call, and error handling.
+//
+// This method is useful when you want to inject custom logic or configuration
+// into the SDK's request lifecycle. Such as custom headers, or retry logic.
+//
+//
+// // Example sending a request using the ListObjectVersionsRequest method.
+// req, resp := client.ListObjectVersionsRequest(params)
+//
+// err := req.Send()
+// if err == nil { // resp is now filled
+// fmt.Println(resp)
+// }
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListObjectVersions
+func (c *S3) ListObjectVersionsRequest(input *ListObjectVersionsInput) (req *request.Request, output *ListObjectVersionsOutput) {
+ op := &request.Operation{
+ Name: opListObjectVersions,
+ HTTPMethod: "GET",
+ HTTPPath: "/{Bucket}?versions",
+ Paginator: &request.Paginator{
+ InputTokens: []string{"KeyMarker", "VersionIdMarker"},
+ OutputTokens: []string{"NextKeyMarker", "NextVersionIdMarker"},
+ LimitToken: "MaxKeys",
+ TruncationToken: "IsTruncated",
+ },
+ }
+
+ if input == nil {
+ input = &ListObjectVersionsInput{}
+ }
+
+ output = &ListObjectVersionsOutput{}
+ req = c.newRequest(op, input, output)
+ return
+}
+
+// ListObjectVersions API operation for Amazon Simple Storage Service.
+//
+// Returns metadata about all of the versions of objects in a bucket.
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for Amazon Simple Storage Service's
+// API operation ListObjectVersions for usage and error information.
+// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListObjectVersions
+func (c *S3) ListObjectVersions(input *ListObjectVersionsInput) (*ListObjectVersionsOutput, error) {
+ req, out := c.ListObjectVersionsRequest(input)
+ return out, req.Send()
+}
+
+// ListObjectVersionsWithContext is the same as ListObjectVersions with the addition of
+// the ability to pass a context and additional request options.
+//
+// See ListObjectVersions for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *S3) ListObjectVersionsWithContext(ctx aws.Context, input *ListObjectVersionsInput, opts ...request.Option) (*ListObjectVersionsOutput, error) {
+ req, out := c.ListObjectVersionsRequest(input)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return out, req.Send()
+}
+
+// ListObjectVersionsPages iterates over the pages of a ListObjectVersions operation,
+// calling the "fn" function with the response data for each page. To stop
+// iterating, return false from the fn function.
+//
+// See ListObjectVersions method for more information on how to use this operation.
+//
+// Note: This operation can generate multiple requests to a service.
+//
+// // Example iterating over at most 3 pages of a ListObjectVersions operation.
+// pageNum := 0
+// err := client.ListObjectVersionsPages(params,
+// func(page *s3.ListObjectVersionsOutput, lastPage bool) bool {
+// pageNum++
+// fmt.Println(page)
+// return pageNum <= 3
+// })
+//
+func (c *S3) ListObjectVersionsPages(input *ListObjectVersionsInput, fn func(*ListObjectVersionsOutput, bool) bool) error {
+ return c.ListObjectVersionsPagesWithContext(aws.BackgroundContext(), input, fn)
+}
+
+// ListObjectVersionsPagesWithContext same as ListObjectVersionsPages except
+// it takes a Context and allows setting request options on the pages.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *S3) ListObjectVersionsPagesWithContext(ctx aws.Context, input *ListObjectVersionsInput, fn func(*ListObjectVersionsOutput, bool) bool, opts ...request.Option) error {
+ p := request.Pagination{
+ NewRequest: func() (*request.Request, error) {
+ var inCpy *ListObjectVersionsInput
+ if input != nil {
+ tmp := *input
+ inCpy = &tmp
+ }
+ req, _ := c.ListObjectVersionsRequest(inCpy)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return req, nil
+ },
+ }
+
+ cont := true
+ for p.Next() && cont {
+ cont = fn(p.Page().(*ListObjectVersionsOutput), !p.HasNextPage())
+ }
+ return p.Err()
+}
+
+const opListObjects = "ListObjects"
+
+// ListObjectsRequest generates a "aws/request.Request" representing the
+// client's request for the ListObjects operation. The "output" return
+// value will be populated with the request's response once the request completes
+// successfully.
+//
+// Use "Send" method on the returned Request to send the API call to the service.
+// the "output" return value is not valid until after Send returns without error.
+//
+// See ListObjects for more information on using the ListObjects
+// API call, and error handling.
+//
+// This method is useful when you want to inject custom logic or configuration
+// into the SDK's request lifecycle. Such as custom headers, or retry logic.
+//
+//
+// // Example sending a request using the ListObjectsRequest method.
+// req, resp := client.ListObjectsRequest(params)
+//
+// err := req.Send()
+// if err == nil { // resp is now filled
+// fmt.Println(resp)
+// }
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListObjects
+func (c *S3) ListObjectsRequest(input *ListObjectsInput) (req *request.Request, output *ListObjectsOutput) {
+ op := &request.Operation{
+ Name: opListObjects,
+ HTTPMethod: "GET",
+ HTTPPath: "/{Bucket}",
+ Paginator: &request.Paginator{
+ InputTokens: []string{"Marker"},
+ OutputTokens: []string{"NextMarker || Contents[-1].Key"},
+ LimitToken: "MaxKeys",
+ TruncationToken: "IsTruncated",
+ },
+ }
+
+ if input == nil {
+ input = &ListObjectsInput{}
+ }
+
+ output = &ListObjectsOutput{}
+ req = c.newRequest(op, input, output)
+ return
+}
+
+// ListObjects API operation for Amazon Simple Storage Service.
+//
+// Returns some or all (up to 1000) of the objects in a bucket. You can use
+// the request parameters as selection criteria to return a subset of the objects
+// in a bucket.
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for Amazon Simple Storage Service's
+// API operation ListObjects for usage and error information.
+//
+// Returned Error Codes:
+// * ErrCodeNoSuchBucket "NoSuchBucket"
+// The specified bucket does not exist.
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListObjects
+func (c *S3) ListObjects(input *ListObjectsInput) (*ListObjectsOutput, error) {
+ req, out := c.ListObjectsRequest(input)
+ return out, req.Send()
+}
+
+// ListObjectsWithContext is the same as ListObjects with the addition of
+// the ability to pass a context and additional request options.
+//
+// See ListObjects for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *S3) ListObjectsWithContext(ctx aws.Context, input *ListObjectsInput, opts ...request.Option) (*ListObjectsOutput, error) {
+ req, out := c.ListObjectsRequest(input)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return out, req.Send()
+}
+
+// ListObjectsPages iterates over the pages of a ListObjects operation,
+// calling the "fn" function with the response data for each page. To stop
+// iterating, return false from the fn function.
+//
+// See ListObjects method for more information on how to use this operation.
+//
+// Note: This operation can generate multiple requests to a service.
+//
+// // Example iterating over at most 3 pages of a ListObjects operation.
+// pageNum := 0
+// err := client.ListObjectsPages(params,
+// func(page *s3.ListObjectsOutput, lastPage bool) bool {
+// pageNum++
+// fmt.Println(page)
+// return pageNum <= 3
+// })
+//
+func (c *S3) ListObjectsPages(input *ListObjectsInput, fn func(*ListObjectsOutput, bool) bool) error {
+ return c.ListObjectsPagesWithContext(aws.BackgroundContext(), input, fn)
+}
+
+// ListObjectsPagesWithContext same as ListObjectsPages except
+// it takes a Context and allows setting request options on the pages.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *S3) ListObjectsPagesWithContext(ctx aws.Context, input *ListObjectsInput, fn func(*ListObjectsOutput, bool) bool, opts ...request.Option) error {
+ p := request.Pagination{
+ NewRequest: func() (*request.Request, error) {
+ var inCpy *ListObjectsInput
+ if input != nil {
+ tmp := *input
+ inCpy = &tmp
+ }
+ req, _ := c.ListObjectsRequest(inCpy)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return req, nil
+ },
+ }
+
+ cont := true
+ for p.Next() && cont {
+ cont = fn(p.Page().(*ListObjectsOutput), !p.HasNextPage())
+ }
+ return p.Err()
+}
+
+const opListObjectsV2 = "ListObjectsV2"
+
+// ListObjectsV2Request generates a "aws/request.Request" representing the
+// client's request for the ListObjectsV2 operation. The "output" return
+// value will be populated with the request's response once the request completes
+// successfully.
+//
+// Use "Send" method on the returned Request to send the API call to the service.
+// the "output" return value is not valid until after Send returns without error.
+//
+// See ListObjectsV2 for more information on using the ListObjectsV2
+// API call, and error handling.
+//
+// This method is useful when you want to inject custom logic or configuration
+// into the SDK's request lifecycle. Such as custom headers, or retry logic.
+//
+//
+// // Example sending a request using the ListObjectsV2Request method.
+// req, resp := client.ListObjectsV2Request(params)
+//
+// err := req.Send()
+// if err == nil { // resp is now filled
+// fmt.Println(resp)
+// }
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListObjectsV2
+func (c *S3) ListObjectsV2Request(input *ListObjectsV2Input) (req *request.Request, output *ListObjectsV2Output) {
+ op := &request.Operation{
+ Name: opListObjectsV2,
+ HTTPMethod: "GET",
+ HTTPPath: "/{Bucket}?list-type=2",
+ Paginator: &request.Paginator{
+ InputTokens: []string{"ContinuationToken"},
+ OutputTokens: []string{"NextContinuationToken"},
+ LimitToken: "MaxKeys",
+ TruncationToken: "",
+ },
+ }
+
+ if input == nil {
+ input = &ListObjectsV2Input{}
+ }
+
+ output = &ListObjectsV2Output{}
+ req = c.newRequest(op, input, output)
+ return
+}
+
+// ListObjectsV2 API operation for Amazon Simple Storage Service.
+//
+// Returns some or all (up to 1000) of the objects in a bucket. You can use
+// the request parameters as selection criteria to return a subset of the objects
+// in a bucket. Note: ListObjectsV2 is the revised List Objects API and we recommend
+// you use this revised API for new application development.
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for Amazon Simple Storage Service's
+// API operation ListObjectsV2 for usage and error information.
+//
+// Returned Error Codes:
+// * ErrCodeNoSuchBucket "NoSuchBucket"
+// The specified bucket does not exist.
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListObjectsV2
+func (c *S3) ListObjectsV2(input *ListObjectsV2Input) (*ListObjectsV2Output, error) {
+ req, out := c.ListObjectsV2Request(input)
+ return out, req.Send()
+}
+
+// ListObjectsV2WithContext is the same as ListObjectsV2 with the addition of
+// the ability to pass a context and additional request options.
+//
+// See ListObjectsV2 for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *S3) ListObjectsV2WithContext(ctx aws.Context, input *ListObjectsV2Input, opts ...request.Option) (*ListObjectsV2Output, error) {
+ req, out := c.ListObjectsV2Request(input)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return out, req.Send()
+}
+
+// ListObjectsV2Pages iterates over the pages of a ListObjectsV2 operation,
+// calling the "fn" function with the response data for each page. To stop
+// iterating, return false from the fn function.
+//
+// See ListObjectsV2 method for more information on how to use this operation.
+//
+// Note: This operation can generate multiple requests to a service.
+//
+// // Example iterating over at most 3 pages of a ListObjectsV2 operation.
+// pageNum := 0
+// err := client.ListObjectsV2Pages(params,
+// func(page *s3.ListObjectsV2Output, lastPage bool) bool {
+// pageNum++
+// fmt.Println(page)
+// return pageNum <= 3
+// })
+//
+func (c *S3) ListObjectsV2Pages(input *ListObjectsV2Input, fn func(*ListObjectsV2Output, bool) bool) error {
+ return c.ListObjectsV2PagesWithContext(aws.BackgroundContext(), input, fn)
+}
+
+// ListObjectsV2PagesWithContext same as ListObjectsV2Pages except
+// it takes a Context and allows setting request options on the pages.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *S3) ListObjectsV2PagesWithContext(ctx aws.Context, input *ListObjectsV2Input, fn func(*ListObjectsV2Output, bool) bool, opts ...request.Option) error {
+ p := request.Pagination{
+ NewRequest: func() (*request.Request, error) {
+ var inCpy *ListObjectsV2Input
+ if input != nil {
+ tmp := *input
+ inCpy = &tmp
+ }
+ req, _ := c.ListObjectsV2Request(inCpy)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return req, nil
+ },
+ }
+
+ cont := true
+ for p.Next() && cont {
+ cont = fn(p.Page().(*ListObjectsV2Output), !p.HasNextPage())
+ }
+ return p.Err()
+}
+
+const opListParts = "ListParts"
+
+// ListPartsRequest generates a "aws/request.Request" representing the
+// client's request for the ListParts operation. The "output" return
+// value will be populated with the request's response once the request completes
+// successfully.
+//
+// Use "Send" method on the returned Request to send the API call to the service.
+// the "output" return value is not valid until after Send returns without error.
+//
+// See ListParts for more information on using the ListParts
+// API call, and error handling.
+//
+// This method is useful when you want to inject custom logic or configuration
+// into the SDK's request lifecycle. Such as custom headers, or retry logic.
+//
+//
+// // Example sending a request using the ListPartsRequest method.
+// req, resp := client.ListPartsRequest(params)
+//
+// err := req.Send()
+// if err == nil { // resp is now filled
+// fmt.Println(resp)
+// }
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListParts
+func (c *S3) ListPartsRequest(input *ListPartsInput) (req *request.Request, output *ListPartsOutput) {
+ op := &request.Operation{
+ Name: opListParts,
+ HTTPMethod: "GET",
+ HTTPPath: "/{Bucket}/{Key+}",
+ Paginator: &request.Paginator{
+ InputTokens: []string{"PartNumberMarker"},
+ OutputTokens: []string{"NextPartNumberMarker"},
+ LimitToken: "MaxParts",
+ TruncationToken: "IsTruncated",
+ },
+ }
+
+ if input == nil {
+ input = &ListPartsInput{}
+ }
+
+ output = &ListPartsOutput{}
+ req = c.newRequest(op, input, output)
+ return
+}
+
+// ListParts API operation for Amazon Simple Storage Service.
+//
+// Lists the parts that have been uploaded for a specific multipart upload.
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for Amazon Simple Storage Service's
+// API operation ListParts for usage and error information.
+// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListParts
+func (c *S3) ListParts(input *ListPartsInput) (*ListPartsOutput, error) {
+ req, out := c.ListPartsRequest(input)
+ return out, req.Send()
+}
+
+// ListPartsWithContext is the same as ListParts with the addition of
+// the ability to pass a context and additional request options.
+//
+// See ListParts for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *S3) ListPartsWithContext(ctx aws.Context, input *ListPartsInput, opts ...request.Option) (*ListPartsOutput, error) {
+ req, out := c.ListPartsRequest(input)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return out, req.Send()
+}
+
+// ListPartsPages iterates over the pages of a ListParts operation,
+// calling the "fn" function with the response data for each page. To stop
+// iterating, return false from the fn function.
+//
+// See ListParts method for more information on how to use this operation.
+//
+// Note: This operation can generate multiple requests to a service.
+//
+// // Example iterating over at most 3 pages of a ListParts operation.
+// pageNum := 0
+// err := client.ListPartsPages(params,
+// func(page *s3.ListPartsOutput, lastPage bool) bool {
+// pageNum++
+// fmt.Println(page)
+// return pageNum <= 3
+// })
+//
+func (c *S3) ListPartsPages(input *ListPartsInput, fn func(*ListPartsOutput, bool) bool) error {
+ return c.ListPartsPagesWithContext(aws.BackgroundContext(), input, fn)
+}
+
+// ListPartsPagesWithContext same as ListPartsPages except
+// it takes a Context and allows setting request options on the pages.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *S3) ListPartsPagesWithContext(ctx aws.Context, input *ListPartsInput, fn func(*ListPartsOutput, bool) bool, opts ...request.Option) error {
+ p := request.Pagination{
+ NewRequest: func() (*request.Request, error) {
+ var inCpy *ListPartsInput
+ if input != nil {
+ tmp := *input
+ inCpy = &tmp
+ }
+ req, _ := c.ListPartsRequest(inCpy)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return req, nil
+ },
+ }
+
+ cont := true
+ for p.Next() && cont {
+ cont = fn(p.Page().(*ListPartsOutput), !p.HasNextPage())
+ }
+ return p.Err()
+}
+
+const opPutBucketAccelerateConfiguration = "PutBucketAccelerateConfiguration"
+
+// PutBucketAccelerateConfigurationRequest generates a "aws/request.Request" representing the
+// client's request for the PutBucketAccelerateConfiguration operation. The "output" return
+// value will be populated with the request's response once the request completes
+// successfully.
+//
+// Use "Send" method on the returned Request to send the API call to the service.
+// the "output" return value is not valid until after Send returns without error.
+//
+// See PutBucketAccelerateConfiguration for more information on using the PutBucketAccelerateConfiguration
+// API call, and error handling.
+//
+// This method is useful when you want to inject custom logic or configuration
+// into the SDK's request lifecycle. Such as custom headers, or retry logic.
+//
+//
+// // Example sending a request using the PutBucketAccelerateConfigurationRequest method.
+// req, resp := client.PutBucketAccelerateConfigurationRequest(params)
+//
+// err := req.Send()
+// if err == nil { // resp is now filled
+// fmt.Println(resp)
+// }
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketAccelerateConfiguration
+func (c *S3) PutBucketAccelerateConfigurationRequest(input *PutBucketAccelerateConfigurationInput) (req *request.Request, output *PutBucketAccelerateConfigurationOutput) {
+ op := &request.Operation{
+ Name: opPutBucketAccelerateConfiguration,
+ HTTPMethod: "PUT",
+ HTTPPath: "/{Bucket}?accelerate",
+ }
+
+ if input == nil {
+ input = &PutBucketAccelerateConfigurationInput{}
+ }
+
+ output = &PutBucketAccelerateConfigurationOutput{}
+ req = c.newRequest(op, input, output)
+ req.Handlers.Unmarshal.Swap(restxml.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler)
+ return
+}
+
+// PutBucketAccelerateConfiguration API operation for Amazon Simple Storage Service.
+//
+// Sets the accelerate configuration of an existing bucket.
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for Amazon Simple Storage Service's
+// API operation PutBucketAccelerateConfiguration for usage and error information.
+// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketAccelerateConfiguration
+func (c *S3) PutBucketAccelerateConfiguration(input *PutBucketAccelerateConfigurationInput) (*PutBucketAccelerateConfigurationOutput, error) {
+ req, out := c.PutBucketAccelerateConfigurationRequest(input)
+ return out, req.Send()
+}
+
+// PutBucketAccelerateConfigurationWithContext is the same as PutBucketAccelerateConfiguration with the addition of
+// the ability to pass a context and additional request options.
+//
+// See PutBucketAccelerateConfiguration for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *S3) PutBucketAccelerateConfigurationWithContext(ctx aws.Context, input *PutBucketAccelerateConfigurationInput, opts ...request.Option) (*PutBucketAccelerateConfigurationOutput, error) {
+ req, out := c.PutBucketAccelerateConfigurationRequest(input)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return out, req.Send()
+}
+
+const opPutBucketAcl = "PutBucketAcl"
+
+// PutBucketAclRequest generates a "aws/request.Request" representing the
+// client's request for the PutBucketAcl operation. The "output" return
+// value will be populated with the request's response once the request completes
+// successfully.
+//
+// Use "Send" method on the returned Request to send the API call to the service.
+// the "output" return value is not valid until after Send returns without error.
+//
+// See PutBucketAcl for more information on using the PutBucketAcl
+// API call, and error handling.
+//
+// This method is useful when you want to inject custom logic or configuration
+// into the SDK's request lifecycle. Such as custom headers, or retry logic.
+//
+//
+// // Example sending a request using the PutBucketAclRequest method.
+// req, resp := client.PutBucketAclRequest(params)
+//
+// err := req.Send()
+// if err == nil { // resp is now filled
+// fmt.Println(resp)
+// }
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketAcl
+func (c *S3) PutBucketAclRequest(input *PutBucketAclInput) (req *request.Request, output *PutBucketAclOutput) {
+ op := &request.Operation{
+ Name: opPutBucketAcl,
+ HTTPMethod: "PUT",
+ HTTPPath: "/{Bucket}?acl",
+ }
+
+ if input == nil {
+ input = &PutBucketAclInput{}
+ }
+
+ output = &PutBucketAclOutput{}
+ req = c.newRequest(op, input, output)
+ req.Handlers.Unmarshal.Swap(restxml.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler)
+ return
+}
+
+// PutBucketAcl API operation for Amazon Simple Storage Service.
+//
+// Sets the permissions on a bucket using access control lists (ACL).
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for Amazon Simple Storage Service's
+// API operation PutBucketAcl for usage and error information.
+// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketAcl
+func (c *S3) PutBucketAcl(input *PutBucketAclInput) (*PutBucketAclOutput, error) {
+ req, out := c.PutBucketAclRequest(input)
+ return out, req.Send()
+}
+
+// PutBucketAclWithContext is the same as PutBucketAcl with the addition of
+// the ability to pass a context and additional request options.
+//
+// See PutBucketAcl for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *S3) PutBucketAclWithContext(ctx aws.Context, input *PutBucketAclInput, opts ...request.Option) (*PutBucketAclOutput, error) {
+ req, out := c.PutBucketAclRequest(input)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return out, req.Send()
+}
+
+const opPutBucketAnalyticsConfiguration = "PutBucketAnalyticsConfiguration"
+
+// PutBucketAnalyticsConfigurationRequest generates a "aws/request.Request" representing the
+// client's request for the PutBucketAnalyticsConfiguration operation. The "output" return
+// value will be populated with the request's response once the request completes
+// successfully.
+//
+// Use "Send" method on the returned Request to send the API call to the service.
+// the "output" return value is not valid until after Send returns without error.
+//
+// See PutBucketAnalyticsConfiguration for more information on using the PutBucketAnalyticsConfiguration
+// API call, and error handling.
+//
+// This method is useful when you want to inject custom logic or configuration
+// into the SDK's request lifecycle. Such as custom headers, or retry logic.
+//
+//
+// // Example sending a request using the PutBucketAnalyticsConfigurationRequest method.
+// req, resp := client.PutBucketAnalyticsConfigurationRequest(params)
+//
+// err := req.Send()
+// if err == nil { // resp is now filled
+// fmt.Println(resp)
+// }
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketAnalyticsConfiguration
+func (c *S3) PutBucketAnalyticsConfigurationRequest(input *PutBucketAnalyticsConfigurationInput) (req *request.Request, output *PutBucketAnalyticsConfigurationOutput) {
+ op := &request.Operation{
+ Name: opPutBucketAnalyticsConfiguration,
+ HTTPMethod: "PUT",
+ HTTPPath: "/{Bucket}?analytics",
+ }
+
+ if input == nil {
+ input = &PutBucketAnalyticsConfigurationInput{}
+ }
+
+ output = &PutBucketAnalyticsConfigurationOutput{}
+ req = c.newRequest(op, input, output)
+ req.Handlers.Unmarshal.Swap(restxml.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler)
+ return
+}
+
+// PutBucketAnalyticsConfiguration API operation for Amazon Simple Storage Service.
+//
+// Sets an analytics configuration for the bucket (specified by the analytics
+// configuration ID).
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for Amazon Simple Storage Service's
+// API operation PutBucketAnalyticsConfiguration for usage and error information.
+// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketAnalyticsConfiguration
+func (c *S3) PutBucketAnalyticsConfiguration(input *PutBucketAnalyticsConfigurationInput) (*PutBucketAnalyticsConfigurationOutput, error) {
+ req, out := c.PutBucketAnalyticsConfigurationRequest(input)
+ return out, req.Send()
+}
+
+// PutBucketAnalyticsConfigurationWithContext is the same as PutBucketAnalyticsConfiguration with the addition of
+// the ability to pass a context and additional request options.
+//
+// See PutBucketAnalyticsConfiguration for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *S3) PutBucketAnalyticsConfigurationWithContext(ctx aws.Context, input *PutBucketAnalyticsConfigurationInput, opts ...request.Option) (*PutBucketAnalyticsConfigurationOutput, error) {
+ req, out := c.PutBucketAnalyticsConfigurationRequest(input)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return out, req.Send()
+}
+
+const opPutBucketCors = "PutBucketCors"
+
+// PutBucketCorsRequest generates a "aws/request.Request" representing the
+// client's request for the PutBucketCors operation. The "output" return
+// value will be populated with the request's response once the request completes
+// successfully.
+//
+// Use "Send" method on the returned Request to send the API call to the service.
+// the "output" return value is not valid until after Send returns without error.
+//
+// See PutBucketCors for more information on using the PutBucketCors
+// API call, and error handling.
+//
+// This method is useful when you want to inject custom logic or configuration
+// into the SDK's request lifecycle. Such as custom headers, or retry logic.
+//
+//
+// // Example sending a request using the PutBucketCorsRequest method.
+// req, resp := client.PutBucketCorsRequest(params)
+//
+// err := req.Send()
+// if err == nil { // resp is now filled
+// fmt.Println(resp)
+// }
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketCors
+func (c *S3) PutBucketCorsRequest(input *PutBucketCorsInput) (req *request.Request, output *PutBucketCorsOutput) {
+ op := &request.Operation{
+ Name: opPutBucketCors,
+ HTTPMethod: "PUT",
+ HTTPPath: "/{Bucket}?cors",
+ }
+
+ if input == nil {
+ input = &PutBucketCorsInput{}
+ }
+
+ output = &PutBucketCorsOutput{}
+ req = c.newRequest(op, input, output)
+ req.Handlers.Unmarshal.Swap(restxml.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler)
+ return
+}
+
+// PutBucketCors API operation for Amazon Simple Storage Service.
+//
+// Sets the CORS configuration for a bucket.
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for Amazon Simple Storage Service's
+// API operation PutBucketCors for usage and error information.
+// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketCors
+func (c *S3) PutBucketCors(input *PutBucketCorsInput) (*PutBucketCorsOutput, error) {
+ req, out := c.PutBucketCorsRequest(input)
+ return out, req.Send()
+}
+
+// PutBucketCorsWithContext is the same as PutBucketCors with the addition of
+// the ability to pass a context and additional request options.
+//
+// See PutBucketCors for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *S3) PutBucketCorsWithContext(ctx aws.Context, input *PutBucketCorsInput, opts ...request.Option) (*PutBucketCorsOutput, error) {
+ req, out := c.PutBucketCorsRequest(input)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return out, req.Send()
+}
+
+const opPutBucketEncryption = "PutBucketEncryption"
+
+// PutBucketEncryptionRequest generates a "aws/request.Request" representing the
+// client's request for the PutBucketEncryption operation. The "output" return
+// value will be populated with the request's response once the request completes
+// successfully.
+//
+// Use "Send" method on the returned Request to send the API call to the service.
+// the "output" return value is not valid until after Send returns without error.
+//
+// See PutBucketEncryption for more information on using the PutBucketEncryption
+// API call, and error handling.
+//
+// This method is useful when you want to inject custom logic or configuration
+// into the SDK's request lifecycle. Such as custom headers, or retry logic.
+//
+//
+// // Example sending a request using the PutBucketEncryptionRequest method.
+// req, resp := client.PutBucketEncryptionRequest(params)
+//
+// err := req.Send()
+// if err == nil { // resp is now filled
+// fmt.Println(resp)
+// }
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketEncryption
+func (c *S3) PutBucketEncryptionRequest(input *PutBucketEncryptionInput) (req *request.Request, output *PutBucketEncryptionOutput) {
+ op := &request.Operation{
+ Name: opPutBucketEncryption,
+ HTTPMethod: "PUT",
+ HTTPPath: "/{Bucket}?encryption",
+ }
+
+ if input == nil {
+ input = &PutBucketEncryptionInput{}
+ }
+
+ output = &PutBucketEncryptionOutput{}
+ req = c.newRequest(op, input, output)
+ req.Handlers.Unmarshal.Swap(restxml.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler)
+ return
+}
+
+// PutBucketEncryption API operation for Amazon Simple Storage Service.
+//
+// Creates a new server-side encryption configuration (or replaces an existing
+// one, if present).
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for Amazon Simple Storage Service's
+// API operation PutBucketEncryption for usage and error information.
+// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketEncryption
+func (c *S3) PutBucketEncryption(input *PutBucketEncryptionInput) (*PutBucketEncryptionOutput, error) {
+ req, out := c.PutBucketEncryptionRequest(input)
+ return out, req.Send()
+}
+
+// PutBucketEncryptionWithContext is the same as PutBucketEncryption with the addition of
+// the ability to pass a context and additional request options.
+//
+// See PutBucketEncryption for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *S3) PutBucketEncryptionWithContext(ctx aws.Context, input *PutBucketEncryptionInput, opts ...request.Option) (*PutBucketEncryptionOutput, error) {
+ req, out := c.PutBucketEncryptionRequest(input)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return out, req.Send()
+}
+
+const opPutBucketInventoryConfiguration = "PutBucketInventoryConfiguration"
+
+// PutBucketInventoryConfigurationRequest generates a "aws/request.Request" representing the
+// client's request for the PutBucketInventoryConfiguration operation. The "output" return
+// value will be populated with the request's response once the request completes
+// successfully.
+//
+// Use "Send" method on the returned Request to send the API call to the service.
+// the "output" return value is not valid until after Send returns without error.
+//
+// See PutBucketInventoryConfiguration for more information on using the PutBucketInventoryConfiguration
+// API call, and error handling.
+//
+// This method is useful when you want to inject custom logic or configuration
+// into the SDK's request lifecycle. Such as custom headers, or retry logic.
+//
+//
+// // Example sending a request using the PutBucketInventoryConfigurationRequest method.
+// req, resp := client.PutBucketInventoryConfigurationRequest(params)
+//
+// err := req.Send()
+// if err == nil { // resp is now filled
+// fmt.Println(resp)
+// }
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketInventoryConfiguration
+func (c *S3) PutBucketInventoryConfigurationRequest(input *PutBucketInventoryConfigurationInput) (req *request.Request, output *PutBucketInventoryConfigurationOutput) {
+ op := &request.Operation{
+ Name: opPutBucketInventoryConfiguration,
+ HTTPMethod: "PUT",
+ HTTPPath: "/{Bucket}?inventory",
+ }
+
+ if input == nil {
+ input = &PutBucketInventoryConfigurationInput{}
+ }
+
+ output = &PutBucketInventoryConfigurationOutput{}
+ req = c.newRequest(op, input, output)
+ req.Handlers.Unmarshal.Swap(restxml.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler)
+ return
+}
+
+// PutBucketInventoryConfiguration API operation for Amazon Simple Storage Service.
+//
+// Adds an inventory configuration (identified by the inventory ID) from the
+// bucket.
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for Amazon Simple Storage Service's
+// API operation PutBucketInventoryConfiguration for usage and error information.
+// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketInventoryConfiguration
+func (c *S3) PutBucketInventoryConfiguration(input *PutBucketInventoryConfigurationInput) (*PutBucketInventoryConfigurationOutput, error) {
+ req, out := c.PutBucketInventoryConfigurationRequest(input)
+ return out, req.Send()
+}
+
+// PutBucketInventoryConfigurationWithContext is the same as PutBucketInventoryConfiguration with the addition of
+// the ability to pass a context and additional request options.
+//
+// See PutBucketInventoryConfiguration for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *S3) PutBucketInventoryConfigurationWithContext(ctx aws.Context, input *PutBucketInventoryConfigurationInput, opts ...request.Option) (*PutBucketInventoryConfigurationOutput, error) {
+ req, out := c.PutBucketInventoryConfigurationRequest(input)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return out, req.Send()
+}
+
+const opPutBucketLifecycle = "PutBucketLifecycle"
+
+// PutBucketLifecycleRequest generates a "aws/request.Request" representing the
+// client's request for the PutBucketLifecycle operation. The "output" return
+// value will be populated with the request's response once the request completes
+// successfully.
+//
+// Use "Send" method on the returned Request to send the API call to the service.
+// the "output" return value is not valid until after Send returns without error.
+//
+// See PutBucketLifecycle for more information on using the PutBucketLifecycle
+// API call, and error handling.
+//
+// This method is useful when you want to inject custom logic or configuration
+// into the SDK's request lifecycle. Such as custom headers, or retry logic.
+//
+//
+// // Example sending a request using the PutBucketLifecycleRequest method.
+// req, resp := client.PutBucketLifecycleRequest(params)
+//
+// err := req.Send()
+// if err == nil { // resp is now filled
+// fmt.Println(resp)
+// }
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketLifecycle
+//
+// Deprecated: PutBucketLifecycle has been deprecated
+func (c *S3) PutBucketLifecycleRequest(input *PutBucketLifecycleInput) (req *request.Request, output *PutBucketLifecycleOutput) {
+ if c.Client.Config.Logger != nil {
+ c.Client.Config.Logger.Log("This operation, PutBucketLifecycle, has been deprecated")
+ }
+ op := &request.Operation{
+ Name: opPutBucketLifecycle,
+ HTTPMethod: "PUT",
+ HTTPPath: "/{Bucket}?lifecycle",
+ }
+
+ if input == nil {
+ input = &PutBucketLifecycleInput{}
+ }
+
+ output = &PutBucketLifecycleOutput{}
+ req = c.newRequest(op, input, output)
+ req.Handlers.Unmarshal.Swap(restxml.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler)
+ return
+}
+
+// PutBucketLifecycle API operation for Amazon Simple Storage Service.
+//
+// No longer used, see the PutBucketLifecycleConfiguration operation.
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for Amazon Simple Storage Service's
+// API operation PutBucketLifecycle for usage and error information.
+// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketLifecycle
+//
+// Deprecated: PutBucketLifecycle has been deprecated
+func (c *S3) PutBucketLifecycle(input *PutBucketLifecycleInput) (*PutBucketLifecycleOutput, error) {
+ req, out := c.PutBucketLifecycleRequest(input)
+ return out, req.Send()
+}
+
+// PutBucketLifecycleWithContext is the same as PutBucketLifecycle with the addition of
+// the ability to pass a context and additional request options.
+//
+// See PutBucketLifecycle for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+//
+// Deprecated: PutBucketLifecycleWithContext has been deprecated
+func (c *S3) PutBucketLifecycleWithContext(ctx aws.Context, input *PutBucketLifecycleInput, opts ...request.Option) (*PutBucketLifecycleOutput, error) {
+ req, out := c.PutBucketLifecycleRequest(input)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return out, req.Send()
+}
+
+const opPutBucketLifecycleConfiguration = "PutBucketLifecycleConfiguration"
+
+// PutBucketLifecycleConfigurationRequest generates a "aws/request.Request" representing the
+// client's request for the PutBucketLifecycleConfiguration operation. The "output" return
+// value will be populated with the request's response once the request completes
+// successfully.
+//
+// Use "Send" method on the returned Request to send the API call to the service.
+// the "output" return value is not valid until after Send returns without error.
+//
+// See PutBucketLifecycleConfiguration for more information on using the PutBucketLifecycleConfiguration
+// API call, and error handling.
+//
+// This method is useful when you want to inject custom logic or configuration
+// into the SDK's request lifecycle. Such as custom headers, or retry logic.
+//
+//
+// // Example sending a request using the PutBucketLifecycleConfigurationRequest method.
+// req, resp := client.PutBucketLifecycleConfigurationRequest(params)
+//
+// err := req.Send()
+// if err == nil { // resp is now filled
+// fmt.Println(resp)
+// }
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketLifecycleConfiguration
+func (c *S3) PutBucketLifecycleConfigurationRequest(input *PutBucketLifecycleConfigurationInput) (req *request.Request, output *PutBucketLifecycleConfigurationOutput) {
+ op := &request.Operation{
+ Name: opPutBucketLifecycleConfiguration,
+ HTTPMethod: "PUT",
+ HTTPPath: "/{Bucket}?lifecycle",
+ }
+
+ if input == nil {
+ input = &PutBucketLifecycleConfigurationInput{}
+ }
+
+ output = &PutBucketLifecycleConfigurationOutput{}
+ req = c.newRequest(op, input, output)
+ req.Handlers.Unmarshal.Swap(restxml.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler)
+ return
+}
+
+// PutBucketLifecycleConfiguration API operation for Amazon Simple Storage Service.
+//
+// Sets lifecycle configuration for your bucket. If a lifecycle configuration
+// exists, it replaces it.
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for Amazon Simple Storage Service's
+// API operation PutBucketLifecycleConfiguration for usage and error information.
+// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketLifecycleConfiguration
+func (c *S3) PutBucketLifecycleConfiguration(input *PutBucketLifecycleConfigurationInput) (*PutBucketLifecycleConfigurationOutput, error) {
+ req, out := c.PutBucketLifecycleConfigurationRequest(input)
+ return out, req.Send()
+}
+
+// PutBucketLifecycleConfigurationWithContext is the same as PutBucketLifecycleConfiguration with the addition of
+// the ability to pass a context and additional request options.
+//
+// See PutBucketLifecycleConfiguration for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *S3) PutBucketLifecycleConfigurationWithContext(ctx aws.Context, input *PutBucketLifecycleConfigurationInput, opts ...request.Option) (*PutBucketLifecycleConfigurationOutput, error) {
+ req, out := c.PutBucketLifecycleConfigurationRequest(input)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return out, req.Send()
+}
+
+const opPutBucketLogging = "PutBucketLogging"
+
+// PutBucketLoggingRequest generates a "aws/request.Request" representing the
+// client's request for the PutBucketLogging operation. The "output" return
+// value will be populated with the request's response once the request completes
+// successfully.
+//
+// Use "Send" method on the returned Request to send the API call to the service.
+// the "output" return value is not valid until after Send returns without error.
+//
+// See PutBucketLogging for more information on using the PutBucketLogging
+// API call, and error handling.
+//
+// This method is useful when you want to inject custom logic or configuration
+// into the SDK's request lifecycle. Such as custom headers, or retry logic.
+//
+//
+// // Example sending a request using the PutBucketLoggingRequest method.
+// req, resp := client.PutBucketLoggingRequest(params)
+//
+// err := req.Send()
+// if err == nil { // resp is now filled
+// fmt.Println(resp)
+// }
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketLogging
+func (c *S3) PutBucketLoggingRequest(input *PutBucketLoggingInput) (req *request.Request, output *PutBucketLoggingOutput) {
+ op := &request.Operation{
+ Name: opPutBucketLogging,
+ HTTPMethod: "PUT",
+ HTTPPath: "/{Bucket}?logging",
+ }
+
+ if input == nil {
+ input = &PutBucketLoggingInput{}
+ }
+
+ output = &PutBucketLoggingOutput{}
+ req = c.newRequest(op, input, output)
+ req.Handlers.Unmarshal.Swap(restxml.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler)
+ return
+}
+
+// PutBucketLogging API operation for Amazon Simple Storage Service.
+//
+// Set the logging parameters for a bucket and to specify permissions for who
+// can view and modify the logging parameters. To set the logging status of
+// a bucket, you must be the bucket owner.
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for Amazon Simple Storage Service's
+// API operation PutBucketLogging for usage and error information.
+// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketLogging
+func (c *S3) PutBucketLogging(input *PutBucketLoggingInput) (*PutBucketLoggingOutput, error) {
+ req, out := c.PutBucketLoggingRequest(input)
+ return out, req.Send()
+}
+
+// PutBucketLoggingWithContext is the same as PutBucketLogging with the addition of
+// the ability to pass a context and additional request options.
+//
+// See PutBucketLogging for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *S3) PutBucketLoggingWithContext(ctx aws.Context, input *PutBucketLoggingInput, opts ...request.Option) (*PutBucketLoggingOutput, error) {
+ req, out := c.PutBucketLoggingRequest(input)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return out, req.Send()
+}
+
+const opPutBucketMetricsConfiguration = "PutBucketMetricsConfiguration"
+
+// PutBucketMetricsConfigurationRequest generates a "aws/request.Request" representing the
+// client's request for the PutBucketMetricsConfiguration operation. The "output" return
+// value will be populated with the request's response once the request completes
+// successfully.
+//
+// Use "Send" method on the returned Request to send the API call to the service.
+// the "output" return value is not valid until after Send returns without error.
+//
+// See PutBucketMetricsConfiguration for more information on using the PutBucketMetricsConfiguration
+// API call, and error handling.
+//
+// This method is useful when you want to inject custom logic or configuration
+// into the SDK's request lifecycle. Such as custom headers, or retry logic.
+//
+//
+// // Example sending a request using the PutBucketMetricsConfigurationRequest method.
+// req, resp := client.PutBucketMetricsConfigurationRequest(params)
+//
+// err := req.Send()
+// if err == nil { // resp is now filled
+// fmt.Println(resp)
+// }
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketMetricsConfiguration
+func (c *S3) PutBucketMetricsConfigurationRequest(input *PutBucketMetricsConfigurationInput) (req *request.Request, output *PutBucketMetricsConfigurationOutput) {
+ op := &request.Operation{
+ Name: opPutBucketMetricsConfiguration,
+ HTTPMethod: "PUT",
+ HTTPPath: "/{Bucket}?metrics",
+ }
+
+ if input == nil {
+ input = &PutBucketMetricsConfigurationInput{}
+ }
+
+ output = &PutBucketMetricsConfigurationOutput{}
+ req = c.newRequest(op, input, output)
+ req.Handlers.Unmarshal.Swap(restxml.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler)
+ return
+}
+
+// PutBucketMetricsConfiguration API operation for Amazon Simple Storage Service.
+//
+// Sets a metrics configuration (specified by the metrics configuration ID)
+// for the bucket.
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for Amazon Simple Storage Service's
+// API operation PutBucketMetricsConfiguration for usage and error information.
+// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketMetricsConfiguration
+func (c *S3) PutBucketMetricsConfiguration(input *PutBucketMetricsConfigurationInput) (*PutBucketMetricsConfigurationOutput, error) {
+ req, out := c.PutBucketMetricsConfigurationRequest(input)
+ return out, req.Send()
+}
+
+// PutBucketMetricsConfigurationWithContext is the same as PutBucketMetricsConfiguration with the addition of
+// the ability to pass a context and additional request options.
+//
+// See PutBucketMetricsConfiguration for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *S3) PutBucketMetricsConfigurationWithContext(ctx aws.Context, input *PutBucketMetricsConfigurationInput, opts ...request.Option) (*PutBucketMetricsConfigurationOutput, error) {
+ req, out := c.PutBucketMetricsConfigurationRequest(input)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return out, req.Send()
+}
+
+const opPutBucketNotification = "PutBucketNotification"
+
+// PutBucketNotificationRequest generates a "aws/request.Request" representing the
+// client's request for the PutBucketNotification operation. The "output" return
+// value will be populated with the request's response once the request completes
+// successfully.
+//
+// Use "Send" method on the returned Request to send the API call to the service.
+// the "output" return value is not valid until after Send returns without error.
+//
+// See PutBucketNotification for more information on using the PutBucketNotification
+// API call, and error handling.
+//
+// This method is useful when you want to inject custom logic or configuration
+// into the SDK's request lifecycle. Such as custom headers, or retry logic.
+//
+//
+// // Example sending a request using the PutBucketNotificationRequest method.
+// req, resp := client.PutBucketNotificationRequest(params)
+//
+// err := req.Send()
+// if err == nil { // resp is now filled
+// fmt.Println(resp)
+// }
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketNotification
+//
+// Deprecated: PutBucketNotification has been deprecated
+func (c *S3) PutBucketNotificationRequest(input *PutBucketNotificationInput) (req *request.Request, output *PutBucketNotificationOutput) {
+ if c.Client.Config.Logger != nil {
+ c.Client.Config.Logger.Log("This operation, PutBucketNotification, has been deprecated")
+ }
+ op := &request.Operation{
+ Name: opPutBucketNotification,
+ HTTPMethod: "PUT",
+ HTTPPath: "/{Bucket}?notification",
+ }
+
+ if input == nil {
+ input = &PutBucketNotificationInput{}
+ }
+
+ output = &PutBucketNotificationOutput{}
+ req = c.newRequest(op, input, output)
+ req.Handlers.Unmarshal.Swap(restxml.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler)
+ return
+}
+
+// PutBucketNotification API operation for Amazon Simple Storage Service.
+//
+// No longer used, see the PutBucketNotificationConfiguration operation.
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for Amazon Simple Storage Service's
+// API operation PutBucketNotification for usage and error information.
+// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketNotification
+//
+// Deprecated: PutBucketNotification has been deprecated
+func (c *S3) PutBucketNotification(input *PutBucketNotificationInput) (*PutBucketNotificationOutput, error) {
+ req, out := c.PutBucketNotificationRequest(input)
+ return out, req.Send()
+}
+
+// PutBucketNotificationWithContext is the same as PutBucketNotification with the addition of
+// the ability to pass a context and additional request options.
+//
+// See PutBucketNotification for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+//
+// Deprecated: PutBucketNotificationWithContext has been deprecated
+func (c *S3) PutBucketNotificationWithContext(ctx aws.Context, input *PutBucketNotificationInput, opts ...request.Option) (*PutBucketNotificationOutput, error) {
+ req, out := c.PutBucketNotificationRequest(input)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return out, req.Send()
+}
+
+const opPutBucketNotificationConfiguration = "PutBucketNotificationConfiguration"
+
+// PutBucketNotificationConfigurationRequest generates a "aws/request.Request" representing the
+// client's request for the PutBucketNotificationConfiguration operation. The "output" return
+// value will be populated with the request's response once the request completes
+// successfully.
+//
+// Use "Send" method on the returned Request to send the API call to the service.
+// the "output" return value is not valid until after Send returns without error.
+//
+// See PutBucketNotificationConfiguration for more information on using the PutBucketNotificationConfiguration
+// API call, and error handling.
+//
+// This method is useful when you want to inject custom logic or configuration
+// into the SDK's request lifecycle. Such as custom headers, or retry logic.
+//
+//
+// // Example sending a request using the PutBucketNotificationConfigurationRequest method.
+// req, resp := client.PutBucketNotificationConfigurationRequest(params)
+//
+// err := req.Send()
+// if err == nil { // resp is now filled
+// fmt.Println(resp)
+// }
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketNotificationConfiguration
+func (c *S3) PutBucketNotificationConfigurationRequest(input *PutBucketNotificationConfigurationInput) (req *request.Request, output *PutBucketNotificationConfigurationOutput) {
+ op := &request.Operation{
+ Name: opPutBucketNotificationConfiguration,
+ HTTPMethod: "PUT",
+ HTTPPath: "/{Bucket}?notification",
+ }
+
+ if input == nil {
+ input = &PutBucketNotificationConfigurationInput{}
+ }
+
+ output = &PutBucketNotificationConfigurationOutput{}
+ req = c.newRequest(op, input, output)
+ req.Handlers.Unmarshal.Swap(restxml.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler)
+ return
+}
+
+// PutBucketNotificationConfiguration API operation for Amazon Simple Storage Service.
+//
+// Enables notifications of specified events for a bucket.
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for Amazon Simple Storage Service's
+// API operation PutBucketNotificationConfiguration for usage and error information.
+// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketNotificationConfiguration
+func (c *S3) PutBucketNotificationConfiguration(input *PutBucketNotificationConfigurationInput) (*PutBucketNotificationConfigurationOutput, error) {
+ req, out := c.PutBucketNotificationConfigurationRequest(input)
+ return out, req.Send()
+}
+
+// PutBucketNotificationConfigurationWithContext is the same as PutBucketNotificationConfiguration with the addition of
+// the ability to pass a context and additional request options.
+//
+// See PutBucketNotificationConfiguration for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *S3) PutBucketNotificationConfigurationWithContext(ctx aws.Context, input *PutBucketNotificationConfigurationInput, opts ...request.Option) (*PutBucketNotificationConfigurationOutput, error) {
+ req, out := c.PutBucketNotificationConfigurationRequest(input)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return out, req.Send()
+}
+
+const opPutBucketPolicy = "PutBucketPolicy"
+
+// PutBucketPolicyRequest generates a "aws/request.Request" representing the
+// client's request for the PutBucketPolicy operation. The "output" return
+// value will be populated with the request's response once the request completes
+// successfully.
+//
+// Use "Send" method on the returned Request to send the API call to the service.
+// the "output" return value is not valid until after Send returns without error.
+//
+// See PutBucketPolicy for more information on using the PutBucketPolicy
+// API call, and error handling.
+//
+// This method is useful when you want to inject custom logic or configuration
+// into the SDK's request lifecycle. Such as custom headers, or retry logic.
+//
+//
+// // Example sending a request using the PutBucketPolicyRequest method.
+// req, resp := client.PutBucketPolicyRequest(params)
+//
+// err := req.Send()
+// if err == nil { // resp is now filled
+// fmt.Println(resp)
+// }
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketPolicy
+func (c *S3) PutBucketPolicyRequest(input *PutBucketPolicyInput) (req *request.Request, output *PutBucketPolicyOutput) {
+ op := &request.Operation{
+ Name: opPutBucketPolicy,
+ HTTPMethod: "PUT",
+ HTTPPath: "/{Bucket}?policy",
+ }
+
+ if input == nil {
+ input = &PutBucketPolicyInput{}
+ }
+
+ output = &PutBucketPolicyOutput{}
+ req = c.newRequest(op, input, output)
+ req.Handlers.Unmarshal.Swap(restxml.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler)
+ return
+}
+
+// PutBucketPolicy API operation for Amazon Simple Storage Service.
+//
+// Applies an Amazon S3 bucket policy to an Amazon S3 bucket.
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for Amazon Simple Storage Service's
+// API operation PutBucketPolicy for usage and error information.
+// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketPolicy
+func (c *S3) PutBucketPolicy(input *PutBucketPolicyInput) (*PutBucketPolicyOutput, error) {
+ req, out := c.PutBucketPolicyRequest(input)
+ return out, req.Send()
+}
+
+// PutBucketPolicyWithContext is the same as PutBucketPolicy with the addition of
+// the ability to pass a context and additional request options.
+//
+// See PutBucketPolicy for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *S3) PutBucketPolicyWithContext(ctx aws.Context, input *PutBucketPolicyInput, opts ...request.Option) (*PutBucketPolicyOutput, error) {
+ req, out := c.PutBucketPolicyRequest(input)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return out, req.Send()
+}
+
+const opPutBucketReplication = "PutBucketReplication"
+
+// PutBucketReplicationRequest generates a "aws/request.Request" representing the
+// client's request for the PutBucketReplication operation. The "output" return
+// value will be populated with the request's response once the request completes
+// successfully.
+//
+// Use "Send" method on the returned Request to send the API call to the service.
+// the "output" return value is not valid until after Send returns without error.
+//
+// See PutBucketReplication for more information on using the PutBucketReplication
+// API call, and error handling.
+//
+// This method is useful when you want to inject custom logic or configuration
+// into the SDK's request lifecycle. Such as custom headers, or retry logic.
+//
+//
+// // Example sending a request using the PutBucketReplicationRequest method.
+// req, resp := client.PutBucketReplicationRequest(params)
+//
+// err := req.Send()
+// if err == nil { // resp is now filled
+// fmt.Println(resp)
+// }
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketReplication
+func (c *S3) PutBucketReplicationRequest(input *PutBucketReplicationInput) (req *request.Request, output *PutBucketReplicationOutput) {
+ op := &request.Operation{
+ Name: opPutBucketReplication,
+ HTTPMethod: "PUT",
+ HTTPPath: "/{Bucket}?replication",
+ }
+
+ if input == nil {
+ input = &PutBucketReplicationInput{}
+ }
+
+ output = &PutBucketReplicationOutput{}
+ req = c.newRequest(op, input, output)
+ req.Handlers.Unmarshal.Swap(restxml.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler)
+ return
+}
+
+// PutBucketReplication API operation for Amazon Simple Storage Service.
+//
+// Creates a replication configuration or replaces an existing one. For more
+// information, see Cross-Region Replication (CRR) (https://docs.aws.amazon.com/AmazonS3/latest/dev/crr.html)
+// in the Amazon S3 Developer Guide.
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for Amazon Simple Storage Service's
+// API operation PutBucketReplication for usage and error information.
+// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketReplication
+func (c *S3) PutBucketReplication(input *PutBucketReplicationInput) (*PutBucketReplicationOutput, error) {
+ req, out := c.PutBucketReplicationRequest(input)
+ return out, req.Send()
+}
+
+// PutBucketReplicationWithContext is the same as PutBucketReplication with the addition of
+// the ability to pass a context and additional request options.
+//
+// See PutBucketReplication for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *S3) PutBucketReplicationWithContext(ctx aws.Context, input *PutBucketReplicationInput, opts ...request.Option) (*PutBucketReplicationOutput, error) {
+ req, out := c.PutBucketReplicationRequest(input)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return out, req.Send()
+}
+
+const opPutBucketRequestPayment = "PutBucketRequestPayment"
+
+// PutBucketRequestPaymentRequest generates a "aws/request.Request" representing the
+// client's request for the PutBucketRequestPayment operation. The "output" return
+// value will be populated with the request's response once the request completes
+// successfully.
+//
+// Use "Send" method on the returned Request to send the API call to the service.
+// the "output" return value is not valid until after Send returns without error.
+//
+// See PutBucketRequestPayment for more information on using the PutBucketRequestPayment
+// API call, and error handling.
+//
+// This method is useful when you want to inject custom logic or configuration
+// into the SDK's request lifecycle. Such as custom headers, or retry logic.
+//
+//
+// // Example sending a request using the PutBucketRequestPaymentRequest method.
+// req, resp := client.PutBucketRequestPaymentRequest(params)
+//
+// err := req.Send()
+// if err == nil { // resp is now filled
+// fmt.Println(resp)
+// }
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketRequestPayment
+func (c *S3) PutBucketRequestPaymentRequest(input *PutBucketRequestPaymentInput) (req *request.Request, output *PutBucketRequestPaymentOutput) {
+ op := &request.Operation{
+ Name: opPutBucketRequestPayment,
+ HTTPMethod: "PUT",
+ HTTPPath: "/{Bucket}?requestPayment",
+ }
+
+ if input == nil {
+ input = &PutBucketRequestPaymentInput{}
+ }
+
+ output = &PutBucketRequestPaymentOutput{}
+ req = c.newRequest(op, input, output)
+ req.Handlers.Unmarshal.Swap(restxml.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler)
+ return
+}
+
+// PutBucketRequestPayment API operation for Amazon Simple Storage Service.
+//
+// Sets the request payment configuration for a bucket. By default, the bucket
+// owner pays for downloads from the bucket. This configuration parameter enables
+// the bucket owner (only) to specify that the person requesting the download
+// will be charged for the download. Documentation on requester pays buckets
+// can be found at http://docs.aws.amazon.com/AmazonS3/latest/dev/RequesterPaysBuckets.html
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for Amazon Simple Storage Service's
+// API operation PutBucketRequestPayment for usage and error information.
+// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketRequestPayment
+func (c *S3) PutBucketRequestPayment(input *PutBucketRequestPaymentInput) (*PutBucketRequestPaymentOutput, error) {
+ req, out := c.PutBucketRequestPaymentRequest(input)
+ return out, req.Send()
+}
+
+// PutBucketRequestPaymentWithContext is the same as PutBucketRequestPayment with the addition of
+// the ability to pass a context and additional request options.
+//
+// See PutBucketRequestPayment for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *S3) PutBucketRequestPaymentWithContext(ctx aws.Context, input *PutBucketRequestPaymentInput, opts ...request.Option) (*PutBucketRequestPaymentOutput, error) {
+ req, out := c.PutBucketRequestPaymentRequest(input)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return out, req.Send()
+}
+
+const opPutBucketTagging = "PutBucketTagging"
+
+// PutBucketTaggingRequest generates a "aws/request.Request" representing the
+// client's request for the PutBucketTagging operation. The "output" return
+// value will be populated with the request's response once the request completes
+// successfully.
+//
+// Use "Send" method on the returned Request to send the API call to the service.
+// the "output" return value is not valid until after Send returns without error.
+//
+// See PutBucketTagging for more information on using the PutBucketTagging
+// API call, and error handling.
+//
+// This method is useful when you want to inject custom logic or configuration
+// into the SDK's request lifecycle. Such as custom headers, or retry logic.
+//
+//
+// // Example sending a request using the PutBucketTaggingRequest method.
+// req, resp := client.PutBucketTaggingRequest(params)
+//
+// err := req.Send()
+// if err == nil { // resp is now filled
+// fmt.Println(resp)
+// }
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketTagging
+func (c *S3) PutBucketTaggingRequest(input *PutBucketTaggingInput) (req *request.Request, output *PutBucketTaggingOutput) {
+ op := &request.Operation{
+ Name: opPutBucketTagging,
+ HTTPMethod: "PUT",
+ HTTPPath: "/{Bucket}?tagging",
+ }
+
+ if input == nil {
+ input = &PutBucketTaggingInput{}
+ }
+
+ output = &PutBucketTaggingOutput{}
+ req = c.newRequest(op, input, output)
+ req.Handlers.Unmarshal.Swap(restxml.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler)
+ return
+}
+
+// PutBucketTagging API operation for Amazon Simple Storage Service.
+//
+// Sets the tags for a bucket.
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for Amazon Simple Storage Service's
+// API operation PutBucketTagging for usage and error information.
+// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketTagging
+func (c *S3) PutBucketTagging(input *PutBucketTaggingInput) (*PutBucketTaggingOutput, error) {
+ req, out := c.PutBucketTaggingRequest(input)
+ return out, req.Send()
+}
+
+// PutBucketTaggingWithContext is the same as PutBucketTagging with the addition of
+// the ability to pass a context and additional request options.
+//
+// See PutBucketTagging for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *S3) PutBucketTaggingWithContext(ctx aws.Context, input *PutBucketTaggingInput, opts ...request.Option) (*PutBucketTaggingOutput, error) {
+ req, out := c.PutBucketTaggingRequest(input)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return out, req.Send()
+}
+
+const opPutBucketVersioning = "PutBucketVersioning"
+
+// PutBucketVersioningRequest generates a "aws/request.Request" representing the
+// client's request for the PutBucketVersioning operation. The "output" return
+// value will be populated with the request's response once the request completes
+// successfully.
+//
+// Use "Send" method on the returned Request to send the API call to the service.
+// the "output" return value is not valid until after Send returns without error.
+//
+// See PutBucketVersioning for more information on using the PutBucketVersioning
+// API call, and error handling.
+//
+// This method is useful when you want to inject custom logic or configuration
+// into the SDK's request lifecycle. Such as custom headers, or retry logic.
+//
+//
+// // Example sending a request using the PutBucketVersioningRequest method.
+// req, resp := client.PutBucketVersioningRequest(params)
+//
+// err := req.Send()
+// if err == nil { // resp is now filled
+// fmt.Println(resp)
+// }
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketVersioning
+func (c *S3) PutBucketVersioningRequest(input *PutBucketVersioningInput) (req *request.Request, output *PutBucketVersioningOutput) {
+ op := &request.Operation{
+ Name: opPutBucketVersioning,
+ HTTPMethod: "PUT",
+ HTTPPath: "/{Bucket}?versioning",
+ }
+
+ if input == nil {
+ input = &PutBucketVersioningInput{}
+ }
+
+ output = &PutBucketVersioningOutput{}
+ req = c.newRequest(op, input, output)
+ req.Handlers.Unmarshal.Swap(restxml.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler)
+ return
+}
+
+// PutBucketVersioning API operation for Amazon Simple Storage Service.
+//
+// Sets the versioning state of an existing bucket. To set the versioning state,
+// you must be the bucket owner.
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for Amazon Simple Storage Service's
+// API operation PutBucketVersioning for usage and error information.
+// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketVersioning
+func (c *S3) PutBucketVersioning(input *PutBucketVersioningInput) (*PutBucketVersioningOutput, error) {
+ req, out := c.PutBucketVersioningRequest(input)
+ return out, req.Send()
+}
+
+// PutBucketVersioningWithContext is the same as PutBucketVersioning with the addition of
+// the ability to pass a context and additional request options.
+//
+// See PutBucketVersioning for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *S3) PutBucketVersioningWithContext(ctx aws.Context, input *PutBucketVersioningInput, opts ...request.Option) (*PutBucketVersioningOutput, error) {
+ req, out := c.PutBucketVersioningRequest(input)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return out, req.Send()
+}
+
+const opPutBucketWebsite = "PutBucketWebsite"
+
+// PutBucketWebsiteRequest generates a "aws/request.Request" representing the
+// client's request for the PutBucketWebsite operation. The "output" return
+// value will be populated with the request's response once the request completes
+// successfully.
+//
+// Use "Send" method on the returned Request to send the API call to the service.
+// the "output" return value is not valid until after Send returns without error.
+//
+// See PutBucketWebsite for more information on using the PutBucketWebsite
+// API call, and error handling.
+//
+// This method is useful when you want to inject custom logic or configuration
+// into the SDK's request lifecycle. Such as custom headers, or retry logic.
+//
+//
+// // Example sending a request using the PutBucketWebsiteRequest method.
+// req, resp := client.PutBucketWebsiteRequest(params)
+//
+// err := req.Send()
+// if err == nil { // resp is now filled
+// fmt.Println(resp)
+// }
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketWebsite
+func (c *S3) PutBucketWebsiteRequest(input *PutBucketWebsiteInput) (req *request.Request, output *PutBucketWebsiteOutput) {
+ op := &request.Operation{
+ Name: opPutBucketWebsite,
+ HTTPMethod: "PUT",
+ HTTPPath: "/{Bucket}?website",
+ }
+
+ if input == nil {
+ input = &PutBucketWebsiteInput{}
+ }
+
+ output = &PutBucketWebsiteOutput{}
+ req = c.newRequest(op, input, output)
+ req.Handlers.Unmarshal.Swap(restxml.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler)
+ return
+}
+
+// PutBucketWebsite API operation for Amazon Simple Storage Service.
+//
+// Set the website configuration for a bucket.
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for Amazon Simple Storage Service's
+// API operation PutBucketWebsite for usage and error information.
+// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketWebsite
+func (c *S3) PutBucketWebsite(input *PutBucketWebsiteInput) (*PutBucketWebsiteOutput, error) {
+ req, out := c.PutBucketWebsiteRequest(input)
+ return out, req.Send()
+}
+
+// PutBucketWebsiteWithContext is the same as PutBucketWebsite with the addition of
+// the ability to pass a context and additional request options.
+//
+// See PutBucketWebsite for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *S3) PutBucketWebsiteWithContext(ctx aws.Context, input *PutBucketWebsiteInput, opts ...request.Option) (*PutBucketWebsiteOutput, error) {
+ req, out := c.PutBucketWebsiteRequest(input)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return out, req.Send()
+}
+
+const opPutObject = "PutObject"
+
+// PutObjectRequest generates a "aws/request.Request" representing the
+// client's request for the PutObject operation. The "output" return
+// value will be populated with the request's response once the request completes
+// successfully.
+//
+// Use "Send" method on the returned Request to send the API call to the service.
+// the "output" return value is not valid until after Send returns without error.
+//
+// See PutObject for more information on using the PutObject
+// API call, and error handling.
+//
+// This method is useful when you want to inject custom logic or configuration
+// into the SDK's request lifecycle. Such as custom headers, or retry logic.
+//
+//
+// // Example sending a request using the PutObjectRequest method.
+// req, resp := client.PutObjectRequest(params)
+//
+// err := req.Send()
+// if err == nil { // resp is now filled
+// fmt.Println(resp)
+// }
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutObject
+func (c *S3) PutObjectRequest(input *PutObjectInput) (req *request.Request, output *PutObjectOutput) {
+ op := &request.Operation{
+ Name: opPutObject,
+ HTTPMethod: "PUT",
+ HTTPPath: "/{Bucket}/{Key+}",
+ }
+
+ if input == nil {
+ input = &PutObjectInput{}
+ }
+
+ output = &PutObjectOutput{}
+ req = c.newRequest(op, input, output)
+ return
+}
+
+// PutObject API operation for Amazon Simple Storage Service.
+//
+// Adds an object to a bucket.
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for Amazon Simple Storage Service's
+// API operation PutObject for usage and error information.
+// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutObject
+func (c *S3) PutObject(input *PutObjectInput) (*PutObjectOutput, error) {
+ req, out := c.PutObjectRequest(input)
+ return out, req.Send()
+}
+
+// PutObjectWithContext is the same as PutObject with the addition of
+// the ability to pass a context and additional request options.
+//
+// See PutObject for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *S3) PutObjectWithContext(ctx aws.Context, input *PutObjectInput, opts ...request.Option) (*PutObjectOutput, error) {
+ req, out := c.PutObjectRequest(input)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return out, req.Send()
+}
+
+const opPutObjectAcl = "PutObjectAcl"
+
+// PutObjectAclRequest generates a "aws/request.Request" representing the
+// client's request for the PutObjectAcl operation. The "output" return
+// value will be populated with the request's response once the request completes
+// successfully.
+//
+// Use "Send" method on the returned Request to send the API call to the service.
+// the "output" return value is not valid until after Send returns without error.
+//
+// See PutObjectAcl for more information on using the PutObjectAcl
+// API call, and error handling.
+//
+// This method is useful when you want to inject custom logic or configuration
+// into the SDK's request lifecycle. Such as custom headers, or retry logic.
+//
+//
+// // Example sending a request using the PutObjectAclRequest method.
+// req, resp := client.PutObjectAclRequest(params)
+//
+// err := req.Send()
+// if err == nil { // resp is now filled
+// fmt.Println(resp)
+// }
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutObjectAcl
+func (c *S3) PutObjectAclRequest(input *PutObjectAclInput) (req *request.Request, output *PutObjectAclOutput) {
+ op := &request.Operation{
+ Name: opPutObjectAcl,
+ HTTPMethod: "PUT",
+ HTTPPath: "/{Bucket}/{Key+}?acl",
+ }
+
+ if input == nil {
+ input = &PutObjectAclInput{}
+ }
+
+ output = &PutObjectAclOutput{}
+ req = c.newRequest(op, input, output)
+ return
+}
+
+// PutObjectAcl API operation for Amazon Simple Storage Service.
+//
+// uses the acl subresource to set the access control list (ACL) permissions
+// for an object that already exists in a bucket
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for Amazon Simple Storage Service's
+// API operation PutObjectAcl for usage and error information.
+//
+// Returned Error Codes:
+// * ErrCodeNoSuchKey "NoSuchKey"
+// The specified key does not exist.
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutObjectAcl
+func (c *S3) PutObjectAcl(input *PutObjectAclInput) (*PutObjectAclOutput, error) {
+ req, out := c.PutObjectAclRequest(input)
+ return out, req.Send()
+}
+
+// PutObjectAclWithContext is the same as PutObjectAcl with the addition of
+// the ability to pass a context and additional request options.
+//
+// See PutObjectAcl for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *S3) PutObjectAclWithContext(ctx aws.Context, input *PutObjectAclInput, opts ...request.Option) (*PutObjectAclOutput, error) {
+ req, out := c.PutObjectAclRequest(input)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return out, req.Send()
+}
+
+const opPutObjectLegalHold = "PutObjectLegalHold"
+
+// PutObjectLegalHoldRequest generates a "aws/request.Request" representing the
+// client's request for the PutObjectLegalHold operation. The "output" return
+// value will be populated with the request's response once the request completes
+// successfully.
+//
+// Use "Send" method on the returned Request to send the API call to the service.
+// the "output" return value is not valid until after Send returns without error.
+//
+// See PutObjectLegalHold for more information on using the PutObjectLegalHold
+// API call, and error handling.
+//
+// This method is useful when you want to inject custom logic or configuration
+// into the SDK's request lifecycle. Such as custom headers, or retry logic.
+//
+//
+// // Example sending a request using the PutObjectLegalHoldRequest method.
+// req, resp := client.PutObjectLegalHoldRequest(params)
+//
+// err := req.Send()
+// if err == nil { // resp is now filled
+// fmt.Println(resp)
+// }
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutObjectLegalHold
+func (c *S3) PutObjectLegalHoldRequest(input *PutObjectLegalHoldInput) (req *request.Request, output *PutObjectLegalHoldOutput) {
+ op := &request.Operation{
+ Name: opPutObjectLegalHold,
+ HTTPMethod: "PUT",
+ HTTPPath: "/{Bucket}/{Key+}?legal-hold",
+ }
+
+ if input == nil {
+ input = &PutObjectLegalHoldInput{}
+ }
+
+ output = &PutObjectLegalHoldOutput{}
+ req = c.newRequest(op, input, output)
+ return
+}
+
+// PutObjectLegalHold API operation for Amazon Simple Storage Service.
+//
+// Applies a Legal Hold configuration to the specified object.
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for Amazon Simple Storage Service's
+// API operation PutObjectLegalHold for usage and error information.
+// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutObjectLegalHold
+func (c *S3) PutObjectLegalHold(input *PutObjectLegalHoldInput) (*PutObjectLegalHoldOutput, error) {
+ req, out := c.PutObjectLegalHoldRequest(input)
+ return out, req.Send()
+}
+
+// PutObjectLegalHoldWithContext is the same as PutObjectLegalHold with the addition of
+// the ability to pass a context and additional request options.
+//
+// See PutObjectLegalHold for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *S3) PutObjectLegalHoldWithContext(ctx aws.Context, input *PutObjectLegalHoldInput, opts ...request.Option) (*PutObjectLegalHoldOutput, error) {
+ req, out := c.PutObjectLegalHoldRequest(input)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return out, req.Send()
+}
+
+const opPutObjectLockConfiguration = "PutObjectLockConfiguration"
+
+// PutObjectLockConfigurationRequest generates a "aws/request.Request" representing the
+// client's request for the PutObjectLockConfiguration operation. The "output" return
+// value will be populated with the request's response once the request completes
+// successfully.
+//
+// Use "Send" method on the returned Request to send the API call to the service.
+// the "output" return value is not valid until after Send returns without error.
+//
+// See PutObjectLockConfiguration for more information on using the PutObjectLockConfiguration
+// API call, and error handling.
+//
+// This method is useful when you want to inject custom logic or configuration
+// into the SDK's request lifecycle. Such as custom headers, or retry logic.
+//
+//
+// // Example sending a request using the PutObjectLockConfigurationRequest method.
+// req, resp := client.PutObjectLockConfigurationRequest(params)
+//
+// err := req.Send()
+// if err == nil { // resp is now filled
+// fmt.Println(resp)
+// }
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutObjectLockConfiguration
+func (c *S3) PutObjectLockConfigurationRequest(input *PutObjectLockConfigurationInput) (req *request.Request, output *PutObjectLockConfigurationOutput) {
+ op := &request.Operation{
+ Name: opPutObjectLockConfiguration,
+ HTTPMethod: "PUT",
+ HTTPPath: "/{Bucket}?object-lock",
+ }
+
+ if input == nil {
+ input = &PutObjectLockConfigurationInput{}
+ }
+
+ output = &PutObjectLockConfigurationOutput{}
+ req = c.newRequest(op, input, output)
+ return
+}
+
+// PutObjectLockConfiguration API operation for Amazon Simple Storage Service.
+//
+// Places an object lock configuration on the specified bucket. The rule specified
+// in the object lock configuration will be applied by default to every new
+// object placed in the specified bucket.
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for Amazon Simple Storage Service's
+// API operation PutObjectLockConfiguration for usage and error information.
+// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutObjectLockConfiguration
+func (c *S3) PutObjectLockConfiguration(input *PutObjectLockConfigurationInput) (*PutObjectLockConfigurationOutput, error) {
+ req, out := c.PutObjectLockConfigurationRequest(input)
+ return out, req.Send()
+}
+
+// PutObjectLockConfigurationWithContext is the same as PutObjectLockConfiguration with the addition of
+// the ability to pass a context and additional request options.
+//
+// See PutObjectLockConfiguration for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *S3) PutObjectLockConfigurationWithContext(ctx aws.Context, input *PutObjectLockConfigurationInput, opts ...request.Option) (*PutObjectLockConfigurationOutput, error) {
+ req, out := c.PutObjectLockConfigurationRequest(input)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return out, req.Send()
+}
+
+const opPutObjectRetention = "PutObjectRetention"
+
+// PutObjectRetentionRequest generates a "aws/request.Request" representing the
+// client's request for the PutObjectRetention operation. The "output" return
+// value will be populated with the request's response once the request completes
+// successfully.
+//
+// Use "Send" method on the returned Request to send the API call to the service.
+// the "output" return value is not valid until after Send returns without error.
+//
+// See PutObjectRetention for more information on using the PutObjectRetention
+// API call, and error handling.
+//
+// This method is useful when you want to inject custom logic or configuration
+// into the SDK's request lifecycle. Such as custom headers, or retry logic.
+//
+//
+// // Example sending a request using the PutObjectRetentionRequest method.
+// req, resp := client.PutObjectRetentionRequest(params)
+//
+// err := req.Send()
+// if err == nil { // resp is now filled
+// fmt.Println(resp)
+// }
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutObjectRetention
+func (c *S3) PutObjectRetentionRequest(input *PutObjectRetentionInput) (req *request.Request, output *PutObjectRetentionOutput) {
+ op := &request.Operation{
+ Name: opPutObjectRetention,
+ HTTPMethod: "PUT",
+ HTTPPath: "/{Bucket}/{Key+}?retention",
+ }
+
+ if input == nil {
+ input = &PutObjectRetentionInput{}
+ }
+
+ output = &PutObjectRetentionOutput{}
+ req = c.newRequest(op, input, output)
+ return
+}
+
+// PutObjectRetention API operation for Amazon Simple Storage Service.
+//
+// Places an Object Retention configuration on an object.
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for Amazon Simple Storage Service's
+// API operation PutObjectRetention for usage and error information.
+// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutObjectRetention
+func (c *S3) PutObjectRetention(input *PutObjectRetentionInput) (*PutObjectRetentionOutput, error) {
+ req, out := c.PutObjectRetentionRequest(input)
+ return out, req.Send()
+}
+
+// PutObjectRetentionWithContext is the same as PutObjectRetention with the addition of
+// the ability to pass a context and additional request options.
+//
+// See PutObjectRetention for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *S3) PutObjectRetentionWithContext(ctx aws.Context, input *PutObjectRetentionInput, opts ...request.Option) (*PutObjectRetentionOutput, error) {
+ req, out := c.PutObjectRetentionRequest(input)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return out, req.Send()
+}
+
+const opPutObjectTagging = "PutObjectTagging"
+
+// PutObjectTaggingRequest generates a "aws/request.Request" representing the
+// client's request for the PutObjectTagging operation. The "output" return
+// value will be populated with the request's response once the request completes
+// successfully.
+//
+// Use "Send" method on the returned Request to send the API call to the service.
+// the "output" return value is not valid until after Send returns without error.
+//
+// See PutObjectTagging for more information on using the PutObjectTagging
+// API call, and error handling.
+//
+// This method is useful when you want to inject custom logic or configuration
+// into the SDK's request lifecycle. Such as custom headers, or retry logic.
+//
+//
+// // Example sending a request using the PutObjectTaggingRequest method.
+// req, resp := client.PutObjectTaggingRequest(params)
+//
+// err := req.Send()
+// if err == nil { // resp is now filled
+// fmt.Println(resp)
+// }
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutObjectTagging
+func (c *S3) PutObjectTaggingRequest(input *PutObjectTaggingInput) (req *request.Request, output *PutObjectTaggingOutput) {
+ op := &request.Operation{
+ Name: opPutObjectTagging,
+ HTTPMethod: "PUT",
+ HTTPPath: "/{Bucket}/{Key+}?tagging",
+ }
+
+ if input == nil {
+ input = &PutObjectTaggingInput{}
+ }
+
+ output = &PutObjectTaggingOutput{}
+ req = c.newRequest(op, input, output)
+ return
+}
+
+// PutObjectTagging API operation for Amazon Simple Storage Service.
+//
+// Sets the supplied tag-set to an object that already exists in a bucket
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for Amazon Simple Storage Service's
+// API operation PutObjectTagging for usage and error information.
+// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutObjectTagging
+func (c *S3) PutObjectTagging(input *PutObjectTaggingInput) (*PutObjectTaggingOutput, error) {
+ req, out := c.PutObjectTaggingRequest(input)
+ return out, req.Send()
+}
+
+// PutObjectTaggingWithContext is the same as PutObjectTagging with the addition of
+// the ability to pass a context and additional request options.
+//
+// See PutObjectTagging for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *S3) PutObjectTaggingWithContext(ctx aws.Context, input *PutObjectTaggingInput, opts ...request.Option) (*PutObjectTaggingOutput, error) {
+ req, out := c.PutObjectTaggingRequest(input)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return out, req.Send()
+}
+
+const opPutPublicAccessBlock = "PutPublicAccessBlock"
+
+// PutPublicAccessBlockRequest generates a "aws/request.Request" representing the
+// client's request for the PutPublicAccessBlock operation. The "output" return
+// value will be populated with the request's response once the request completes
+// successfully.
+//
+// Use "Send" method on the returned Request to send the API call to the service.
+// the "output" return value is not valid until after Send returns without error.
+//
+// See PutPublicAccessBlock for more information on using the PutPublicAccessBlock
+// API call, and error handling.
+//
+// This method is useful when you want to inject custom logic or configuration
+// into the SDK's request lifecycle. Such as custom headers, or retry logic.
+//
+//
+// // Example sending a request using the PutPublicAccessBlockRequest method.
+// req, resp := client.PutPublicAccessBlockRequest(params)
+//
+// err := req.Send()
+// if err == nil { // resp is now filled
+// fmt.Println(resp)
+// }
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutPublicAccessBlock
+func (c *S3) PutPublicAccessBlockRequest(input *PutPublicAccessBlockInput) (req *request.Request, output *PutPublicAccessBlockOutput) {
+ op := &request.Operation{
+ Name: opPutPublicAccessBlock,
+ HTTPMethod: "PUT",
+ HTTPPath: "/{Bucket}?publicAccessBlock",
+ }
+
+ if input == nil {
+ input = &PutPublicAccessBlockInput{}
+ }
+
+ output = &PutPublicAccessBlockOutput{}
+ req = c.newRequest(op, input, output)
+ req.Handlers.Unmarshal.Swap(restxml.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler)
+ return
+}
+
+// PutPublicAccessBlock API operation for Amazon Simple Storage Service.
+//
+// Creates or modifies the PublicAccessBlock configuration for an Amazon S3
+// bucket.
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for Amazon Simple Storage Service's
+// API operation PutPublicAccessBlock for usage and error information.
+// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutPublicAccessBlock
+func (c *S3) PutPublicAccessBlock(input *PutPublicAccessBlockInput) (*PutPublicAccessBlockOutput, error) {
+ req, out := c.PutPublicAccessBlockRequest(input)
+ return out, req.Send()
+}
+
+// PutPublicAccessBlockWithContext is the same as PutPublicAccessBlock with the addition of
+// the ability to pass a context and additional request options.
+//
+// See PutPublicAccessBlock for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *S3) PutPublicAccessBlockWithContext(ctx aws.Context, input *PutPublicAccessBlockInput, opts ...request.Option) (*PutPublicAccessBlockOutput, error) {
+ req, out := c.PutPublicAccessBlockRequest(input)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return out, req.Send()
+}
+
+const opRestoreObject = "RestoreObject"
+
+// RestoreObjectRequest generates a "aws/request.Request" representing the
+// client's request for the RestoreObject operation. The "output" return
+// value will be populated with the request's response once the request completes
+// successfully.
+//
+// Use "Send" method on the returned Request to send the API call to the service.
+// the "output" return value is not valid until after Send returns without error.
+//
+// See RestoreObject for more information on using the RestoreObject
+// API call, and error handling.
+//
+// This method is useful when you want to inject custom logic or configuration
+// into the SDK's request lifecycle. Such as custom headers, or retry logic.
+//
+//
+// // Example sending a request using the RestoreObjectRequest method.
+// req, resp := client.RestoreObjectRequest(params)
+//
+// err := req.Send()
+// if err == nil { // resp is now filled
+// fmt.Println(resp)
+// }
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/RestoreObject
+func (c *S3) RestoreObjectRequest(input *RestoreObjectInput) (req *request.Request, output *RestoreObjectOutput) {
+ op := &request.Operation{
+ Name: opRestoreObject,
+ HTTPMethod: "POST",
+ HTTPPath: "/{Bucket}/{Key+}?restore",
+ }
+
+ if input == nil {
+ input = &RestoreObjectInput{}
+ }
+
+ output = &RestoreObjectOutput{}
+ req = c.newRequest(op, input, output)
+ return
+}
+
+// RestoreObject API operation for Amazon Simple Storage Service.
+//
+// Restores an archived copy of an object back into Amazon S3
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for Amazon Simple Storage Service's
+// API operation RestoreObject for usage and error information.
+//
+// Returned Error Codes:
+// * ErrCodeObjectAlreadyInActiveTierError "ObjectAlreadyInActiveTierError"
+// This operation is not allowed against this storage tier
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/RestoreObject
+func (c *S3) RestoreObject(input *RestoreObjectInput) (*RestoreObjectOutput, error) {
+ req, out := c.RestoreObjectRequest(input)
+ return out, req.Send()
+}
+
+// RestoreObjectWithContext is the same as RestoreObject with the addition of
+// the ability to pass a context and additional request options.
+//
+// See RestoreObject for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *S3) RestoreObjectWithContext(ctx aws.Context, input *RestoreObjectInput, opts ...request.Option) (*RestoreObjectOutput, error) {
+ req, out := c.RestoreObjectRequest(input)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return out, req.Send()
+}
+
+const opSelectObjectContent = "SelectObjectContent"
+
+// SelectObjectContentRequest generates a "aws/request.Request" representing the
+// client's request for the SelectObjectContent operation. The "output" return
+// value will be populated with the request's response once the request completes
+// successfully.
+//
+// Use "Send" method on the returned Request to send the API call to the service.
+// the "output" return value is not valid until after Send returns without error.
+//
+// See SelectObjectContent for more information on using the SelectObjectContent
+// API call, and error handling.
+//
+// This method is useful when you want to inject custom logic or configuration
+// into the SDK's request lifecycle. Such as custom headers, or retry logic.
+//
+//
+// // Example sending a request using the SelectObjectContentRequest method.
+// req, resp := client.SelectObjectContentRequest(params)
+//
+// err := req.Send()
+// if err == nil { // resp is now filled
+// fmt.Println(resp)
+// }
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/SelectObjectContent
+func (c *S3) SelectObjectContentRequest(input *SelectObjectContentInput) (req *request.Request, output *SelectObjectContentOutput) {
+ op := &request.Operation{
+ Name: opSelectObjectContent,
+ HTTPMethod: "POST",
+ HTTPPath: "/{Bucket}/{Key+}?select&select-type=2",
+ }
+
+ if input == nil {
+ input = &SelectObjectContentInput{}
+ }
+
+ output = &SelectObjectContentOutput{}
+ req = c.newRequest(op, input, output)
+ req.Handlers.Send.Swap(client.LogHTTPResponseHandler.Name, client.LogHTTPResponseHeaderHandler)
+ req.Handlers.Unmarshal.Swap(restxml.UnmarshalHandler.Name, rest.UnmarshalHandler)
+ req.Handlers.Unmarshal.PushBack(output.runEventStreamLoop)
+ return
+}
+
+// SelectObjectContent API operation for Amazon Simple Storage Service.
+//
+// This operation filters the contents of an Amazon S3 object based on a simple
+// Structured Query Language (SQL) statement. In the request, along with the
+// SQL expression, you must also specify a data serialization format (JSON or
+// CSV) of the object. Amazon S3 uses this to parse object data into records,
+// and returns only records that match the specified SQL expression. You must
+// also specify the data serialization format for the response.
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for Amazon Simple Storage Service's
+// API operation SelectObjectContent for usage and error information.
+// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/SelectObjectContent
+func (c *S3) SelectObjectContent(input *SelectObjectContentInput) (*SelectObjectContentOutput, error) {
+ req, out := c.SelectObjectContentRequest(input)
+ return out, req.Send()
+}
+
+// SelectObjectContentWithContext is the same as SelectObjectContent with the addition of
+// the ability to pass a context and additional request options.
+//
+// See SelectObjectContent for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *S3) SelectObjectContentWithContext(ctx aws.Context, input *SelectObjectContentInput, opts ...request.Option) (*SelectObjectContentOutput, error) {
+ req, out := c.SelectObjectContentRequest(input)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return out, req.Send()
+}
+
+const opUploadPart = "UploadPart"
+
+// UploadPartRequest generates a "aws/request.Request" representing the
+// client's request for the UploadPart operation. The "output" return
+// value will be populated with the request's response once the request completes
+// successfully.
+//
+// Use "Send" method on the returned Request to send the API call to the service.
+// the "output" return value is not valid until after Send returns without error.
+//
+// See UploadPart for more information on using the UploadPart
+// API call, and error handling.
+//
+// This method is useful when you want to inject custom logic or configuration
+// into the SDK's request lifecycle. Such as custom headers, or retry logic.
+//
+//
+// // Example sending a request using the UploadPartRequest method.
+// req, resp := client.UploadPartRequest(params)
+//
+// err := req.Send()
+// if err == nil { // resp is now filled
+// fmt.Println(resp)
+// }
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/UploadPart
+func (c *S3) UploadPartRequest(input *UploadPartInput) (req *request.Request, output *UploadPartOutput) {
+ op := &request.Operation{
+ Name: opUploadPart,
+ HTTPMethod: "PUT",
+ HTTPPath: "/{Bucket}/{Key+}",
+ }
+
+ if input == nil {
+ input = &UploadPartInput{}
+ }
+
+ output = &UploadPartOutput{}
+ req = c.newRequest(op, input, output)
+ return
+}
+
+// UploadPart API operation for Amazon Simple Storage Service.
+//
+// Uploads a part in a multipart upload.
+//
+// Note: After you initiate multipart upload and upload one or more parts, you
+// must either complete or abort multipart upload in order to stop getting charged
+// for storage of the uploaded parts. Only after you either complete or abort
+// multipart upload, Amazon S3 frees up the parts storage and stops charging
+// you for the parts storage.
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for Amazon Simple Storage Service's
+// API operation UploadPart for usage and error information.
+// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/UploadPart
+func (c *S3) UploadPart(input *UploadPartInput) (*UploadPartOutput, error) {
+ req, out := c.UploadPartRequest(input)
+ return out, req.Send()
+}
+
+// UploadPartWithContext is the same as UploadPart with the addition of
+// the ability to pass a context and additional request options.
+//
+// See UploadPart for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *S3) UploadPartWithContext(ctx aws.Context, input *UploadPartInput, opts ...request.Option) (*UploadPartOutput, error) {
+ req, out := c.UploadPartRequest(input)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return out, req.Send()
+}
+
+const opUploadPartCopy = "UploadPartCopy"
+
+// UploadPartCopyRequest generates a "aws/request.Request" representing the
+// client's request for the UploadPartCopy operation. The "output" return
+// value will be populated with the request's response once the request completes
+// successfully.
+//
+// Use "Send" method on the returned Request to send the API call to the service.
+// the "output" return value is not valid until after Send returns without error.
+//
+// See UploadPartCopy for more information on using the UploadPartCopy
+// API call, and error handling.
+//
+// This method is useful when you want to inject custom logic or configuration
+// into the SDK's request lifecycle. Such as custom headers, or retry logic.
+//
+//
+// // Example sending a request using the UploadPartCopyRequest method.
+// req, resp := client.UploadPartCopyRequest(params)
+//
+// err := req.Send()
+// if err == nil { // resp is now filled
+// fmt.Println(resp)
+// }
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/UploadPartCopy
+func (c *S3) UploadPartCopyRequest(input *UploadPartCopyInput) (req *request.Request, output *UploadPartCopyOutput) {
+ op := &request.Operation{
+ Name: opUploadPartCopy,
+ HTTPMethod: "PUT",
+ HTTPPath: "/{Bucket}/{Key+}",
+ }
+
+ if input == nil {
+ input = &UploadPartCopyInput{}
+ }
+
+ output = &UploadPartCopyOutput{}
+ req = c.newRequest(op, input, output)
+ return
+}
+
+// UploadPartCopy API operation for Amazon Simple Storage Service.
+//
+// Uploads a part by copying data from an existing object as data source.
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for Amazon Simple Storage Service's
+// API operation UploadPartCopy for usage and error information.
+// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/UploadPartCopy
+func (c *S3) UploadPartCopy(input *UploadPartCopyInput) (*UploadPartCopyOutput, error) {
+ req, out := c.UploadPartCopyRequest(input)
+ return out, req.Send()
+}
+
+// UploadPartCopyWithContext is the same as UploadPartCopy with the addition of
+// the ability to pass a context and additional request options.
+//
+// See UploadPartCopy for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *S3) UploadPartCopyWithContext(ctx aws.Context, input *UploadPartCopyInput, opts ...request.Option) (*UploadPartCopyOutput, error) {
+ req, out := c.UploadPartCopyRequest(input)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return out, req.Send()
+}
+
+// Specifies the days since the initiation of an incomplete multipart upload
+// that Amazon S3 will wait before permanently removing all parts of the upload.
+// For more information, see Aborting Incomplete Multipart Uploads Using a Bucket
+// Lifecycle Policy (https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuoverview.html#mpu-abort-incomplete-mpu-lifecycle-config)
+// in the Amazon Simple Storage Service Developer Guide.
+type AbortIncompleteMultipartUpload struct {
+ _ struct{} `type:"structure"`
+
+ // Specifies the number of days after which Amazon S3 aborts an incomplete multipart
+ // upload.
+ DaysAfterInitiation *int64 `type:"integer"`
+}
+
+// String returns the string representation
+func (s AbortIncompleteMultipartUpload) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s AbortIncompleteMultipartUpload) GoString() string {
+ return s.String()
+}
+
+// SetDaysAfterInitiation sets the DaysAfterInitiation field's value.
+func (s *AbortIncompleteMultipartUpload) SetDaysAfterInitiation(v int64) *AbortIncompleteMultipartUpload {
+ s.DaysAfterInitiation = &v
+ return s
+}
+
+type AbortMultipartUploadInput struct {
+ _ struct{} `locationName:"AbortMultipartUploadRequest" type:"structure"`
+
+ // Name of the bucket to which the multipart upload was initiated.
+ //
+ // Bucket is a required field
+ Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
+
+ // Key of the object for which the multipart upload was initiated.
+ //
+ // Key is a required field
+ Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"`
+
+ // Confirms that the requester knows that she or he will be charged for the
+ // request. Bucket owners need not specify this parameter in their requests.
+ // Documentation on downloading objects from requester pays buckets can be found
+ // at http://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html
+ RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"`
+
+ // Upload ID that identifies the multipart upload.
+ //
+ // UploadId is a required field
+ UploadId *string `location:"querystring" locationName:"uploadId" type:"string" required:"true"`
+}
+
+// String returns the string representation
+func (s AbortMultipartUploadInput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s AbortMultipartUploadInput) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *AbortMultipartUploadInput) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "AbortMultipartUploadInput"}
+ if s.Bucket == nil {
+ invalidParams.Add(request.NewErrParamRequired("Bucket"))
+ }
+ if s.Bucket != nil && len(*s.Bucket) < 1 {
+ invalidParams.Add(request.NewErrParamMinLen("Bucket", 1))
+ }
+ if s.Key == nil {
+ invalidParams.Add(request.NewErrParamRequired("Key"))
+ }
+ if s.Key != nil && len(*s.Key) < 1 {
+ invalidParams.Add(request.NewErrParamMinLen("Key", 1))
+ }
+ if s.UploadId == nil {
+ invalidParams.Add(request.NewErrParamRequired("UploadId"))
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetBucket sets the Bucket field's value.
+func (s *AbortMultipartUploadInput) SetBucket(v string) *AbortMultipartUploadInput {
+ s.Bucket = &v
+ return s
+}
+
+func (s *AbortMultipartUploadInput) getBucket() (v string) {
+ if s.Bucket == nil {
+ return v
+ }
+ return *s.Bucket
+}
+
+// SetKey sets the Key field's value.
+func (s *AbortMultipartUploadInput) SetKey(v string) *AbortMultipartUploadInput {
+ s.Key = &v
+ return s
+}
+
+// SetRequestPayer sets the RequestPayer field's value.
+func (s *AbortMultipartUploadInput) SetRequestPayer(v string) *AbortMultipartUploadInput {
+ s.RequestPayer = &v
+ return s
+}
+
+// SetUploadId sets the UploadId field's value.
+func (s *AbortMultipartUploadInput) SetUploadId(v string) *AbortMultipartUploadInput {
+ s.UploadId = &v
+ return s
+}
+
+type AbortMultipartUploadOutput struct {
+ _ struct{} `type:"structure"`
+
+ // If present, indicates that the requester was successfully charged for the
+ // request.
+ RequestCharged *string `location:"header" locationName:"x-amz-request-charged" type:"string" enum:"RequestCharged"`
+}
+
+// String returns the string representation
+func (s AbortMultipartUploadOutput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s AbortMultipartUploadOutput) GoString() string {
+ return s.String()
+}
+
+// SetRequestCharged sets the RequestCharged field's value.
+func (s *AbortMultipartUploadOutput) SetRequestCharged(v string) *AbortMultipartUploadOutput {
+ s.RequestCharged = &v
+ return s
+}
+
+// Configures the transfer acceleration state for an Amazon S3 bucket. For more
+// information, see Amazon S3 Transfer Acceleration (https://docs.aws.amazon.com/AmazonS3/latest/dev/transfer-acceleration.html)
+// in the Amazon Simple Storage Service Developer Guide.
+type AccelerateConfiguration struct {
+ _ struct{} `type:"structure"`
+
+ // Specifies the transfer acceleration status of the bucket.
+ Status *string `type:"string" enum:"BucketAccelerateStatus"`
+}
+
+// String returns the string representation
+func (s AccelerateConfiguration) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s AccelerateConfiguration) GoString() string {
+ return s.String()
+}
+
+// SetStatus sets the Status field's value.
+func (s *AccelerateConfiguration) SetStatus(v string) *AccelerateConfiguration {
+ s.Status = &v
+ return s
+}
+
+// Contains the elements that set the ACL permissions for an object per grantee.
+type AccessControlPolicy struct {
+ _ struct{} `type:"structure"`
+
+ // A list of grants.
+ Grants []*Grant `locationName:"AccessControlList" locationNameList:"Grant" type:"list"`
+
+ // Container for the bucket owner's display name and ID.
+ Owner *Owner `type:"structure"`
+}
+
+// String returns the string representation
+func (s AccessControlPolicy) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s AccessControlPolicy) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *AccessControlPolicy) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "AccessControlPolicy"}
+ if s.Grants != nil {
+ for i, v := range s.Grants {
+ if v == nil {
+ continue
+ }
+ if err := v.Validate(); err != nil {
+ invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Grants", i), err.(request.ErrInvalidParams))
+ }
+ }
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetGrants sets the Grants field's value.
+func (s *AccessControlPolicy) SetGrants(v []*Grant) *AccessControlPolicy {
+ s.Grants = v
+ return s
+}
+
+// SetOwner sets the Owner field's value.
+func (s *AccessControlPolicy) SetOwner(v *Owner) *AccessControlPolicy {
+ s.Owner = v
+ return s
+}
+
+// A container for information about access control for replicas.
+type AccessControlTranslation struct {
+ _ struct{} `type:"structure"`
+
+ // Specifies the replica ownership. For default and valid values, see PUT bucket
+ // replication (https://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketPUTreplication.html)
+ // in the Amazon Simple Storage Service API Reference.
+ //
+ // Owner is a required field
+ Owner *string `type:"string" required:"true" enum:"OwnerOverride"`
+}
+
+// String returns the string representation
+func (s AccessControlTranslation) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s AccessControlTranslation) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *AccessControlTranslation) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "AccessControlTranslation"}
+ if s.Owner == nil {
+ invalidParams.Add(request.NewErrParamRequired("Owner"))
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetOwner sets the Owner field's value.
+func (s *AccessControlTranslation) SetOwner(v string) *AccessControlTranslation {
+ s.Owner = &v
+ return s
+}
+
+// A conjunction (logical AND) of predicates, which is used in evaluating a
+// metrics filter. The operator must have at least two predicates in any combination,
+// and an object must match all of the predicates for the filter to apply.
+type AnalyticsAndOperator struct {
+ _ struct{} `type:"structure"`
+
+ // The prefix to use when evaluating an AND predicate: The prefix that an object
+ // must have to be included in the metrics results.
+ Prefix *string `type:"string"`
+
+ // The list of tags to use when evaluating an AND predicate.
+ Tags []*Tag `locationName:"Tag" locationNameList:"Tag" type:"list" flattened:"true"`
+}
+
+// String returns the string representation
+func (s AnalyticsAndOperator) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s AnalyticsAndOperator) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *AnalyticsAndOperator) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "AnalyticsAndOperator"}
+ if s.Tags != nil {
+ for i, v := range s.Tags {
+ if v == nil {
+ continue
+ }
+ if err := v.Validate(); err != nil {
+ invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Tags", i), err.(request.ErrInvalidParams))
+ }
+ }
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetPrefix sets the Prefix field's value.
+func (s *AnalyticsAndOperator) SetPrefix(v string) *AnalyticsAndOperator {
+ s.Prefix = &v
+ return s
+}
+
+// SetTags sets the Tags field's value.
+func (s *AnalyticsAndOperator) SetTags(v []*Tag) *AnalyticsAndOperator {
+ s.Tags = v
+ return s
+}
+
+// Specifies the configuration and any analyses for the analytics filter of
+// an Amazon S3 bucket.
+//
+// For more information, see GET Bucket analytics (https://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketGETAnalyticsConfig.html)
+// in the Amazon Simple Storage Service API Reference.
+type AnalyticsConfiguration struct {
+ _ struct{} `type:"structure"`
+
+ // The filter used to describe a set of objects for analyses. A filter must
+ // have exactly one prefix, one tag, or one conjunction (AnalyticsAndOperator).
+ // If no filter is provided, all objects will be considered in any analysis.
+ Filter *AnalyticsFilter `type:"structure"`
+
+ // The ID that identifies the analytics configuration.
+ //
+ // Id is a required field
+ Id *string `type:"string" required:"true"`
+
+ // Contains data related to access patterns to be collected and made available
+ // to analyze the tradeoffs between different storage classes.
+ //
+ // StorageClassAnalysis is a required field
+ StorageClassAnalysis *StorageClassAnalysis `type:"structure" required:"true"`
+}
+
+// String returns the string representation
+func (s AnalyticsConfiguration) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s AnalyticsConfiguration) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *AnalyticsConfiguration) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "AnalyticsConfiguration"}
+ if s.Id == nil {
+ invalidParams.Add(request.NewErrParamRequired("Id"))
+ }
+ if s.StorageClassAnalysis == nil {
+ invalidParams.Add(request.NewErrParamRequired("StorageClassAnalysis"))
+ }
+ if s.Filter != nil {
+ if err := s.Filter.Validate(); err != nil {
+ invalidParams.AddNested("Filter", err.(request.ErrInvalidParams))
+ }
+ }
+ if s.StorageClassAnalysis != nil {
+ if err := s.StorageClassAnalysis.Validate(); err != nil {
+ invalidParams.AddNested("StorageClassAnalysis", err.(request.ErrInvalidParams))
+ }
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetFilter sets the Filter field's value.
+func (s *AnalyticsConfiguration) SetFilter(v *AnalyticsFilter) *AnalyticsConfiguration {
+ s.Filter = v
+ return s
+}
+
+// SetId sets the Id field's value.
+func (s *AnalyticsConfiguration) SetId(v string) *AnalyticsConfiguration {
+ s.Id = &v
+ return s
+}
+
+// SetStorageClassAnalysis sets the StorageClassAnalysis field's value.
+func (s *AnalyticsConfiguration) SetStorageClassAnalysis(v *StorageClassAnalysis) *AnalyticsConfiguration {
+ s.StorageClassAnalysis = v
+ return s
+}
+
+// Where to publish the analytics results.
+type AnalyticsExportDestination struct {
+ _ struct{} `type:"structure"`
+
+ // A destination signifying output to an S3 bucket.
+ //
+ // S3BucketDestination is a required field
+ S3BucketDestination *AnalyticsS3BucketDestination `type:"structure" required:"true"`
+}
+
+// String returns the string representation
+func (s AnalyticsExportDestination) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s AnalyticsExportDestination) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *AnalyticsExportDestination) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "AnalyticsExportDestination"}
+ if s.S3BucketDestination == nil {
+ invalidParams.Add(request.NewErrParamRequired("S3BucketDestination"))
+ }
+ if s.S3BucketDestination != nil {
+ if err := s.S3BucketDestination.Validate(); err != nil {
+ invalidParams.AddNested("S3BucketDestination", err.(request.ErrInvalidParams))
+ }
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetS3BucketDestination sets the S3BucketDestination field's value.
+func (s *AnalyticsExportDestination) SetS3BucketDestination(v *AnalyticsS3BucketDestination) *AnalyticsExportDestination {
+ s.S3BucketDestination = v
+ return s
+}
+
+type AnalyticsFilter struct {
+ _ struct{} `type:"structure"`
+
+ // A conjunction (logical AND) of predicates, which is used in evaluating an
+ // analytics filter. The operator must have at least two predicates.
+ And *AnalyticsAndOperator `type:"structure"`
+
+ // The prefix to use when evaluating an analytics filter.
+ Prefix *string `type:"string"`
+
+ // The tag to use when evaluating an analytics filter.
+ Tag *Tag `type:"structure"`
+}
+
+// String returns the string representation
+func (s AnalyticsFilter) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s AnalyticsFilter) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *AnalyticsFilter) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "AnalyticsFilter"}
+ if s.And != nil {
+ if err := s.And.Validate(); err != nil {
+ invalidParams.AddNested("And", err.(request.ErrInvalidParams))
+ }
+ }
+ if s.Tag != nil {
+ if err := s.Tag.Validate(); err != nil {
+ invalidParams.AddNested("Tag", err.(request.ErrInvalidParams))
+ }
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetAnd sets the And field's value.
+func (s *AnalyticsFilter) SetAnd(v *AnalyticsAndOperator) *AnalyticsFilter {
+ s.And = v
+ return s
+}
+
+// SetPrefix sets the Prefix field's value.
+func (s *AnalyticsFilter) SetPrefix(v string) *AnalyticsFilter {
+ s.Prefix = &v
+ return s
+}
+
+// SetTag sets the Tag field's value.
+func (s *AnalyticsFilter) SetTag(v *Tag) *AnalyticsFilter {
+ s.Tag = v
+ return s
+}
+
+type AnalyticsS3BucketDestination struct {
+ _ struct{} `type:"structure"`
+
+ // The Amazon Resource Name (ARN) of the bucket to which data is exported.
+ //
+ // Bucket is a required field
+ Bucket *string `type:"string" required:"true"`
+
+ // The account ID that owns the destination bucket. If no account ID is provided,
+ // the owner will not be validated prior to exporting data.
+ BucketAccountId *string `type:"string"`
+
+ // Specifies the file format used when exporting data to Amazon S3.
+ //
+ // Format is a required field
+ Format *string `type:"string" required:"true" enum:"AnalyticsS3ExportFileFormat"`
+
+ // The prefix to use when exporting data. The prefix is prepended to all results.
+ Prefix *string `type:"string"`
+}
+
+// String returns the string representation
+func (s AnalyticsS3BucketDestination) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s AnalyticsS3BucketDestination) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *AnalyticsS3BucketDestination) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "AnalyticsS3BucketDestination"}
+ if s.Bucket == nil {
+ invalidParams.Add(request.NewErrParamRequired("Bucket"))
+ }
+ if s.Format == nil {
+ invalidParams.Add(request.NewErrParamRequired("Format"))
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetBucket sets the Bucket field's value.
+func (s *AnalyticsS3BucketDestination) SetBucket(v string) *AnalyticsS3BucketDestination {
+ s.Bucket = &v
+ return s
+}
+
+func (s *AnalyticsS3BucketDestination) getBucket() (v string) {
+ if s.Bucket == nil {
+ return v
+ }
+ return *s.Bucket
+}
+
+// SetBucketAccountId sets the BucketAccountId field's value.
+func (s *AnalyticsS3BucketDestination) SetBucketAccountId(v string) *AnalyticsS3BucketDestination {
+ s.BucketAccountId = &v
+ return s
+}
+
+// SetFormat sets the Format field's value.
+func (s *AnalyticsS3BucketDestination) SetFormat(v string) *AnalyticsS3BucketDestination {
+ s.Format = &v
+ return s
+}
+
+// SetPrefix sets the Prefix field's value.
+func (s *AnalyticsS3BucketDestination) SetPrefix(v string) *AnalyticsS3BucketDestination {
+ s.Prefix = &v
+ return s
+}
+
+type Bucket struct {
+ _ struct{} `type:"structure"`
+
+ // Date the bucket was created.
+ CreationDate *time.Time `type:"timestamp"`
+
+ // The name of the bucket.
+ Name *string `type:"string"`
+}
+
+// String returns the string representation
+func (s Bucket) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s Bucket) GoString() string {
+ return s.String()
+}
+
+// SetCreationDate sets the CreationDate field's value.
+func (s *Bucket) SetCreationDate(v time.Time) *Bucket {
+ s.CreationDate = &v
+ return s
+}
+
+// SetName sets the Name field's value.
+func (s *Bucket) SetName(v string) *Bucket {
+ s.Name = &v
+ return s
+}
+
+// Specifies the lifecycle configuration for objects in an Amazon S3 bucket.
+// For more information, see Object Lifecycle Management (https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lifecycle-mgmt.html)
+// in the Amazon Simple Storage Service Developer Guide.
+type BucketLifecycleConfiguration struct {
+ _ struct{} `type:"structure"`
+
+ // A lifecycle rule for individual objects in an Amazon S3 bucket.
+ //
+ // Rules is a required field
+ Rules []*LifecycleRule `locationName:"Rule" type:"list" flattened:"true" required:"true"`
+}
+
+// String returns the string representation
+func (s BucketLifecycleConfiguration) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s BucketLifecycleConfiguration) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *BucketLifecycleConfiguration) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "BucketLifecycleConfiguration"}
+ if s.Rules == nil {
+ invalidParams.Add(request.NewErrParamRequired("Rules"))
+ }
+ if s.Rules != nil {
+ for i, v := range s.Rules {
+ if v == nil {
+ continue
+ }
+ if err := v.Validate(); err != nil {
+ invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Rules", i), err.(request.ErrInvalidParams))
+ }
+ }
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetRules sets the Rules field's value.
+func (s *BucketLifecycleConfiguration) SetRules(v []*LifecycleRule) *BucketLifecycleConfiguration {
+ s.Rules = v
+ return s
+}
+
+type BucketLoggingStatus struct {
+ _ struct{} `type:"structure"`
+
+ // Describes where logs are stored and the prefix that Amazon S3 assigns to
+ // all log object keys for a bucket. For more information, see PUT Bucket logging
+ // (https://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketPUTlogging.html)
+ // in the Amazon Simple Storage Service API Reference.
+ LoggingEnabled *LoggingEnabled `type:"structure"`
+}
+
+// String returns the string representation
+func (s BucketLoggingStatus) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s BucketLoggingStatus) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *BucketLoggingStatus) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "BucketLoggingStatus"}
+ if s.LoggingEnabled != nil {
+ if err := s.LoggingEnabled.Validate(); err != nil {
+ invalidParams.AddNested("LoggingEnabled", err.(request.ErrInvalidParams))
+ }
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetLoggingEnabled sets the LoggingEnabled field's value.
+func (s *BucketLoggingStatus) SetLoggingEnabled(v *LoggingEnabled) *BucketLoggingStatus {
+ s.LoggingEnabled = v
+ return s
+}
+
+// Describes the cross-origin access configuration for objects in an Amazon
+// S3 bucket. For more information, see Enabling Cross-Origin Resource Sharing
+// (https://docs.aws.amazon.com/AmazonS3/latest/dev/cors.html) in the Amazon
+// Simple Storage Service Developer Guide.
+type CORSConfiguration struct {
+ _ struct{} `type:"structure"`
+
+ // A set of allowed origins and methods.
+ //
+ // CORSRules is a required field
+ CORSRules []*CORSRule `locationName:"CORSRule" type:"list" flattened:"true" required:"true"`
+}
+
+// String returns the string representation
+func (s CORSConfiguration) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s CORSConfiguration) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *CORSConfiguration) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "CORSConfiguration"}
+ if s.CORSRules == nil {
+ invalidParams.Add(request.NewErrParamRequired("CORSRules"))
+ }
+ if s.CORSRules != nil {
+ for i, v := range s.CORSRules {
+ if v == nil {
+ continue
+ }
+ if err := v.Validate(); err != nil {
+ invalidParams.AddNested(fmt.Sprintf("%s[%v]", "CORSRules", i), err.(request.ErrInvalidParams))
+ }
+ }
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetCORSRules sets the CORSRules field's value.
+func (s *CORSConfiguration) SetCORSRules(v []*CORSRule) *CORSConfiguration {
+ s.CORSRules = v
+ return s
+}
+
+// Specifies a cross-origin access rule for an Amazon S3 bucket.
+type CORSRule struct {
+ _ struct{} `type:"structure"`
+
+ // Headers that are specified in the Access-Control-Request-Headers header.
+ // These headers are allowed in a preflight OPTIONS request. In response to
+ // any preflight OPTIONS request, Amazon S3 returns any requested headers that
+ // are allowed.
+ AllowedHeaders []*string `locationName:"AllowedHeader" type:"list" flattened:"true"`
+
+ // An HTTP method that you allow the origin to execute. Valid values are GET,
+ // PUT, HEAD, POST, and DELETE.
+ //
+ // AllowedMethods is a required field
+ AllowedMethods []*string `locationName:"AllowedMethod" type:"list" flattened:"true" required:"true"`
+
+ // One or more origins you want customers to be able to access the bucket from.
+ //
+ // AllowedOrigins is a required field
+ AllowedOrigins []*string `locationName:"AllowedOrigin" type:"list" flattened:"true" required:"true"`
+
+ // One or more headers in the response that you want customers to be able to
+ // access from their applications (for example, from a JavaScript XMLHttpRequest
+ // object).
+ ExposeHeaders []*string `locationName:"ExposeHeader" type:"list" flattened:"true"`
+
+ // The time in seconds that your browser is to cache the preflight response
+ // for the specified resource.
+ MaxAgeSeconds *int64 `type:"integer"`
+}
+
+// String returns the string representation
+func (s CORSRule) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s CORSRule) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *CORSRule) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "CORSRule"}
+ if s.AllowedMethods == nil {
+ invalidParams.Add(request.NewErrParamRequired("AllowedMethods"))
+ }
+ if s.AllowedOrigins == nil {
+ invalidParams.Add(request.NewErrParamRequired("AllowedOrigins"))
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetAllowedHeaders sets the AllowedHeaders field's value.
+func (s *CORSRule) SetAllowedHeaders(v []*string) *CORSRule {
+ s.AllowedHeaders = v
+ return s
+}
+
+// SetAllowedMethods sets the AllowedMethods field's value.
+func (s *CORSRule) SetAllowedMethods(v []*string) *CORSRule {
+ s.AllowedMethods = v
+ return s
+}
+
+// SetAllowedOrigins sets the AllowedOrigins field's value.
+func (s *CORSRule) SetAllowedOrigins(v []*string) *CORSRule {
+ s.AllowedOrigins = v
+ return s
+}
+
+// SetExposeHeaders sets the ExposeHeaders field's value.
+func (s *CORSRule) SetExposeHeaders(v []*string) *CORSRule {
+ s.ExposeHeaders = v
+ return s
+}
+
+// SetMaxAgeSeconds sets the MaxAgeSeconds field's value.
+func (s *CORSRule) SetMaxAgeSeconds(v int64) *CORSRule {
+ s.MaxAgeSeconds = &v
+ return s
+}
+
+// Describes how a CSV-formatted input object is formatted.
+type CSVInput struct {
+ _ struct{} `type:"structure"`
+
+ // Specifies that CSV field values may contain quoted record delimiters and
+ // such records should be allowed. Default value is FALSE. Setting this value
+ // to TRUE may lower performance.
+ AllowQuotedRecordDelimiter *bool `type:"boolean"`
+
+ // The single character used to indicate a row should be ignored when present
+ // at the start of a row.
+ Comments *string `type:"string"`
+
+ // The value used to separate individual fields in a record.
+ FieldDelimiter *string `type:"string"`
+
+ // Describes the first line of input. Valid values: None, Ignore, Use.
+ FileHeaderInfo *string `type:"string" enum:"FileHeaderInfo"`
+
+ // Value used for escaping where the field delimiter is part of the value.
+ QuoteCharacter *string `type:"string"`
+
+ // The single character used for escaping the quote character inside an already
+ // escaped value.
+ QuoteEscapeCharacter *string `type:"string"`
+
+ // The value used to separate individual records.
+ RecordDelimiter *string `type:"string"`
+}
+
+// String returns the string representation
+func (s CSVInput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s CSVInput) GoString() string {
+ return s.String()
+}
+
+// SetAllowQuotedRecordDelimiter sets the AllowQuotedRecordDelimiter field's value.
+func (s *CSVInput) SetAllowQuotedRecordDelimiter(v bool) *CSVInput {
+ s.AllowQuotedRecordDelimiter = &v
+ return s
+}
+
+// SetComments sets the Comments field's value.
+func (s *CSVInput) SetComments(v string) *CSVInput {
+ s.Comments = &v
+ return s
+}
+
+// SetFieldDelimiter sets the FieldDelimiter field's value.
+func (s *CSVInput) SetFieldDelimiter(v string) *CSVInput {
+ s.FieldDelimiter = &v
+ return s
+}
+
+// SetFileHeaderInfo sets the FileHeaderInfo field's value.
+func (s *CSVInput) SetFileHeaderInfo(v string) *CSVInput {
+ s.FileHeaderInfo = &v
+ return s
+}
+
+// SetQuoteCharacter sets the QuoteCharacter field's value.
+func (s *CSVInput) SetQuoteCharacter(v string) *CSVInput {
+ s.QuoteCharacter = &v
+ return s
+}
+
+// SetQuoteEscapeCharacter sets the QuoteEscapeCharacter field's value.
+func (s *CSVInput) SetQuoteEscapeCharacter(v string) *CSVInput {
+ s.QuoteEscapeCharacter = &v
+ return s
+}
+
+// SetRecordDelimiter sets the RecordDelimiter field's value.
+func (s *CSVInput) SetRecordDelimiter(v string) *CSVInput {
+ s.RecordDelimiter = &v
+ return s
+}
+
+// Describes how CSV-formatted results are formatted.
+type CSVOutput struct {
+ _ struct{} `type:"structure"`
+
+ // The value used to separate individual fields in a record.
+ FieldDelimiter *string `type:"string"`
+
+ // The value used for escaping where the field delimiter is part of the value.
+ QuoteCharacter *string `type:"string"`
+
+ // Th single character used for escaping the quote character inside an already
+ // escaped value.
+ QuoteEscapeCharacter *string `type:"string"`
+
+ // Indicates whether or not all output fields should be quoted.
+ QuoteFields *string `type:"string" enum:"QuoteFields"`
+
+ // The value used to separate individual records.
+ RecordDelimiter *string `type:"string"`
+}
+
+// String returns the string representation
+func (s CSVOutput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s CSVOutput) GoString() string {
+ return s.String()
+}
+
+// SetFieldDelimiter sets the FieldDelimiter field's value.
+func (s *CSVOutput) SetFieldDelimiter(v string) *CSVOutput {
+ s.FieldDelimiter = &v
+ return s
+}
+
+// SetQuoteCharacter sets the QuoteCharacter field's value.
+func (s *CSVOutput) SetQuoteCharacter(v string) *CSVOutput {
+ s.QuoteCharacter = &v
+ return s
+}
+
+// SetQuoteEscapeCharacter sets the QuoteEscapeCharacter field's value.
+func (s *CSVOutput) SetQuoteEscapeCharacter(v string) *CSVOutput {
+ s.QuoteEscapeCharacter = &v
+ return s
+}
+
+// SetQuoteFields sets the QuoteFields field's value.
+func (s *CSVOutput) SetQuoteFields(v string) *CSVOutput {
+ s.QuoteFields = &v
+ return s
+}
+
+// SetRecordDelimiter sets the RecordDelimiter field's value.
+func (s *CSVOutput) SetRecordDelimiter(v string) *CSVOutput {
+ s.RecordDelimiter = &v
+ return s
+}
+
+type CloudFunctionConfiguration struct {
+ _ struct{} `type:"structure"`
+
+ CloudFunction *string `type:"string"`
+
+ // The bucket event for which to send notifications.
+ //
+ // Deprecated: Event has been deprecated
+ Event *string `deprecated:"true" type:"string" enum:"Event"`
+
+ Events []*string `locationName:"Event" type:"list" flattened:"true"`
+
+ // An optional unique identifier for configurations in a notification configuration.
+ // If you don't provide one, Amazon S3 will assign an ID.
+ Id *string `type:"string"`
+
+ InvocationRole *string `type:"string"`
+}
+
+// String returns the string representation
+func (s CloudFunctionConfiguration) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s CloudFunctionConfiguration) GoString() string {
+ return s.String()
+}
+
+// SetCloudFunction sets the CloudFunction field's value.
+func (s *CloudFunctionConfiguration) SetCloudFunction(v string) *CloudFunctionConfiguration {
+ s.CloudFunction = &v
+ return s
+}
+
+// SetEvent sets the Event field's value.
+func (s *CloudFunctionConfiguration) SetEvent(v string) *CloudFunctionConfiguration {
+ s.Event = &v
+ return s
+}
+
+// SetEvents sets the Events field's value.
+func (s *CloudFunctionConfiguration) SetEvents(v []*string) *CloudFunctionConfiguration {
+ s.Events = v
+ return s
+}
+
+// SetId sets the Id field's value.
+func (s *CloudFunctionConfiguration) SetId(v string) *CloudFunctionConfiguration {
+ s.Id = &v
+ return s
+}
+
+// SetInvocationRole sets the InvocationRole field's value.
+func (s *CloudFunctionConfiguration) SetInvocationRole(v string) *CloudFunctionConfiguration {
+ s.InvocationRole = &v
+ return s
+}
+
+type CommonPrefix struct {
+ _ struct{} `type:"structure"`
+
+ Prefix *string `type:"string"`
+}
+
+// String returns the string representation
+func (s CommonPrefix) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s CommonPrefix) GoString() string {
+ return s.String()
+}
+
+// SetPrefix sets the Prefix field's value.
+func (s *CommonPrefix) SetPrefix(v string) *CommonPrefix {
+ s.Prefix = &v
+ return s
+}
+
+type CompleteMultipartUploadInput struct {
+ _ struct{} `locationName:"CompleteMultipartUploadRequest" type:"structure" payload:"MultipartUpload"`
+
+ // Bucket is a required field
+ Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
+
+ // Key is a required field
+ Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"`
+
+ MultipartUpload *CompletedMultipartUpload `locationName:"CompleteMultipartUpload" type:"structure" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"`
+
+ // Confirms that the requester knows that she or he will be charged for the
+ // request. Bucket owners need not specify this parameter in their requests.
+ // Documentation on downloading objects from requester pays buckets can be found
+ // at http://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html
+ RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"`
+
+ // UploadId is a required field
+ UploadId *string `location:"querystring" locationName:"uploadId" type:"string" required:"true"`
+}
+
+// String returns the string representation
+func (s CompleteMultipartUploadInput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s CompleteMultipartUploadInput) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *CompleteMultipartUploadInput) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "CompleteMultipartUploadInput"}
+ if s.Bucket == nil {
+ invalidParams.Add(request.NewErrParamRequired("Bucket"))
+ }
+ if s.Bucket != nil && len(*s.Bucket) < 1 {
+ invalidParams.Add(request.NewErrParamMinLen("Bucket", 1))
+ }
+ if s.Key == nil {
+ invalidParams.Add(request.NewErrParamRequired("Key"))
+ }
+ if s.Key != nil && len(*s.Key) < 1 {
+ invalidParams.Add(request.NewErrParamMinLen("Key", 1))
+ }
+ if s.UploadId == nil {
+ invalidParams.Add(request.NewErrParamRequired("UploadId"))
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetBucket sets the Bucket field's value.
+func (s *CompleteMultipartUploadInput) SetBucket(v string) *CompleteMultipartUploadInput {
+ s.Bucket = &v
+ return s
+}
+
+func (s *CompleteMultipartUploadInput) getBucket() (v string) {
+ if s.Bucket == nil {
+ return v
+ }
+ return *s.Bucket
+}
+
+// SetKey sets the Key field's value.
+func (s *CompleteMultipartUploadInput) SetKey(v string) *CompleteMultipartUploadInput {
+ s.Key = &v
+ return s
+}
+
+// SetMultipartUpload sets the MultipartUpload field's value.
+func (s *CompleteMultipartUploadInput) SetMultipartUpload(v *CompletedMultipartUpload) *CompleteMultipartUploadInput {
+ s.MultipartUpload = v
+ return s
+}
+
+// SetRequestPayer sets the RequestPayer field's value.
+func (s *CompleteMultipartUploadInput) SetRequestPayer(v string) *CompleteMultipartUploadInput {
+ s.RequestPayer = &v
+ return s
+}
+
+// SetUploadId sets the UploadId field's value.
+func (s *CompleteMultipartUploadInput) SetUploadId(v string) *CompleteMultipartUploadInput {
+ s.UploadId = &v
+ return s
+}
+
+type CompleteMultipartUploadOutput struct {
+ _ struct{} `type:"structure"`
+
+ Bucket *string `type:"string"`
+
+ // Entity tag of the object.
+ ETag *string `type:"string"`
+
+ // If the object expiration is configured, this will contain the expiration
+ // date (expiry-date) and rule ID (rule-id). The value of rule-id is URL encoded.
+ Expiration *string `location:"header" locationName:"x-amz-expiration" type:"string"`
+
+ Key *string `min:"1" type:"string"`
+
+ Location *string `type:"string"`
+
+ // If present, indicates that the requester was successfully charged for the
+ // request.
+ RequestCharged *string `location:"header" locationName:"x-amz-request-charged" type:"string" enum:"RequestCharged"`
+
+ // If present, specifies the ID of the AWS Key Management Service (KMS) master
+ // encryption key that was used for the object.
+ SSEKMSKeyId *string `location:"header" locationName:"x-amz-server-side-encryption-aws-kms-key-id" type:"string" sensitive:"true"`
+
+ // The Server-side encryption algorithm used when storing this object in S3
+ // (e.g., AES256, aws:kms).
+ ServerSideEncryption *string `location:"header" locationName:"x-amz-server-side-encryption" type:"string" enum:"ServerSideEncryption"`
+
+ // Version of the object.
+ VersionId *string `location:"header" locationName:"x-amz-version-id" type:"string"`
+}
+
+// String returns the string representation
+func (s CompleteMultipartUploadOutput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s CompleteMultipartUploadOutput) GoString() string {
+ return s.String()
+}
+
+// SetBucket sets the Bucket field's value.
+func (s *CompleteMultipartUploadOutput) SetBucket(v string) *CompleteMultipartUploadOutput {
+ s.Bucket = &v
+ return s
+}
+
+func (s *CompleteMultipartUploadOutput) getBucket() (v string) {
+ if s.Bucket == nil {
+ return v
+ }
+ return *s.Bucket
+}
+
+// SetETag sets the ETag field's value.
+func (s *CompleteMultipartUploadOutput) SetETag(v string) *CompleteMultipartUploadOutput {
+ s.ETag = &v
+ return s
+}
+
+// SetExpiration sets the Expiration field's value.
+func (s *CompleteMultipartUploadOutput) SetExpiration(v string) *CompleteMultipartUploadOutput {
+ s.Expiration = &v
+ return s
+}
+
+// SetKey sets the Key field's value.
+func (s *CompleteMultipartUploadOutput) SetKey(v string) *CompleteMultipartUploadOutput {
+ s.Key = &v
+ return s
+}
+
+// SetLocation sets the Location field's value.
+func (s *CompleteMultipartUploadOutput) SetLocation(v string) *CompleteMultipartUploadOutput {
+ s.Location = &v
+ return s
+}
+
+// SetRequestCharged sets the RequestCharged field's value.
+func (s *CompleteMultipartUploadOutput) SetRequestCharged(v string) *CompleteMultipartUploadOutput {
+ s.RequestCharged = &v
+ return s
+}
+
+// SetSSEKMSKeyId sets the SSEKMSKeyId field's value.
+func (s *CompleteMultipartUploadOutput) SetSSEKMSKeyId(v string) *CompleteMultipartUploadOutput {
+ s.SSEKMSKeyId = &v
+ return s
+}
+
+// SetServerSideEncryption sets the ServerSideEncryption field's value.
+func (s *CompleteMultipartUploadOutput) SetServerSideEncryption(v string) *CompleteMultipartUploadOutput {
+ s.ServerSideEncryption = &v
+ return s
+}
+
+// SetVersionId sets the VersionId field's value.
+func (s *CompleteMultipartUploadOutput) SetVersionId(v string) *CompleteMultipartUploadOutput {
+ s.VersionId = &v
+ return s
+}
+
+type CompletedMultipartUpload struct {
+ _ struct{} `type:"structure"`
+
+ Parts []*CompletedPart `locationName:"Part" type:"list" flattened:"true"`
+}
+
+// String returns the string representation
+func (s CompletedMultipartUpload) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s CompletedMultipartUpload) GoString() string {
+ return s.String()
+}
+
+// SetParts sets the Parts field's value.
+func (s *CompletedMultipartUpload) SetParts(v []*CompletedPart) *CompletedMultipartUpload {
+ s.Parts = v
+ return s
+}
+
+type CompletedPart struct {
+ _ struct{} `type:"structure"`
+
+ // Entity tag returned when the part was uploaded.
+ ETag *string `type:"string"`
+
+ // Part number that identifies the part. This is a positive integer between
+ // 1 and 10,000.
+ PartNumber *int64 `type:"integer"`
+}
+
+// String returns the string representation
+func (s CompletedPart) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s CompletedPart) GoString() string {
+ return s.String()
+}
+
+// SetETag sets the ETag field's value.
+func (s *CompletedPart) SetETag(v string) *CompletedPart {
+ s.ETag = &v
+ return s
+}
+
+// SetPartNumber sets the PartNumber field's value.
+func (s *CompletedPart) SetPartNumber(v int64) *CompletedPart {
+ s.PartNumber = &v
+ return s
+}
+
+// Specifies a condition that must be met for a redirect to apply.
+type Condition struct {
+ _ struct{} `type:"structure"`
+
+ // The HTTP error code when the redirect is applied. In the event of an error,
+ // if the error code equals this value, then the specified redirect is applied.
+ // Required when parent element Condition is specified and sibling KeyPrefixEquals
+ // is not specified. If both are specified, then both must be true for the redirect
+ // to be applied.
+ HttpErrorCodeReturnedEquals *string `type:"string"`
+
+ // The object key name prefix when the redirect is applied. For example, to
+ // redirect requests for ExamplePage.html, the key prefix will be ExamplePage.html.
+ // To redirect request for all pages with the prefix docs/, the key prefix will
+ // be /docs, which identifies all objects in the docs/ folder. Required when
+ // the parent element Condition is specified and sibling HttpErrorCodeReturnedEquals
+ // is not specified. If both conditions are specified, both must be true for
+ // the redirect to be applied.
+ KeyPrefixEquals *string `type:"string"`
+}
+
+// String returns the string representation
+func (s Condition) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s Condition) GoString() string {
+ return s.String()
+}
+
+// SetHttpErrorCodeReturnedEquals sets the HttpErrorCodeReturnedEquals field's value.
+func (s *Condition) SetHttpErrorCodeReturnedEquals(v string) *Condition {
+ s.HttpErrorCodeReturnedEquals = &v
+ return s
+}
+
+// SetKeyPrefixEquals sets the KeyPrefixEquals field's value.
+func (s *Condition) SetKeyPrefixEquals(v string) *Condition {
+ s.KeyPrefixEquals = &v
+ return s
+}
+
+type ContinuationEvent struct {
+ _ struct{} `locationName:"ContinuationEvent" type:"structure"`
+}
+
+// String returns the string representation
+func (s ContinuationEvent) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s ContinuationEvent) GoString() string {
+ return s.String()
+}
+
+// The ContinuationEvent is and event in the SelectObjectContentEventStream group of events.
+func (s *ContinuationEvent) eventSelectObjectContentEventStream() {}
+
+// UnmarshalEvent unmarshals the EventStream Message into the ContinuationEvent value.
+// This method is only used internally within the SDK's EventStream handling.
+func (s *ContinuationEvent) UnmarshalEvent(
+ payloadUnmarshaler protocol.PayloadUnmarshaler,
+ msg eventstream.Message,
+) error {
+ return nil
+}
+
+type CopyObjectInput struct {
+ _ struct{} `locationName:"CopyObjectRequest" type:"structure"`
+
+ // The canned ACL to apply to the object.
+ ACL *string `location:"header" locationName:"x-amz-acl" type:"string" enum:"ObjectCannedACL"`
+
+ // Bucket is a required field
+ Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
+
+ // Specifies caching behavior along the request/reply chain.
+ CacheControl *string `location:"header" locationName:"Cache-Control" type:"string"`
+
+ // Specifies presentational information for the object.
+ ContentDisposition *string `location:"header" locationName:"Content-Disposition" type:"string"`
+
+ // Specifies what content encodings have been applied to the object and thus
+ // what decoding mechanisms must be applied to obtain the media-type referenced
+ // by the Content-Type header field.
+ ContentEncoding *string `location:"header" locationName:"Content-Encoding" type:"string"`
+
+ // The language the content is in.
+ ContentLanguage *string `location:"header" locationName:"Content-Language" type:"string"`
+
+ // A standard MIME type describing the format of the object data.
+ ContentType *string `location:"header" locationName:"Content-Type" type:"string"`
+
+ // The name of the source bucket and key name of the source object, separated
+ // by a slash (/). Must be URL-encoded.
+ //
+ // CopySource is a required field
+ CopySource *string `location:"header" locationName:"x-amz-copy-source" type:"string" required:"true"`
+
+ // Copies the object if its entity tag (ETag) matches the specified tag.
+ CopySourceIfMatch *string `location:"header" locationName:"x-amz-copy-source-if-match" type:"string"`
+
+ // Copies the object if it has been modified since the specified time.
+ CopySourceIfModifiedSince *time.Time `location:"header" locationName:"x-amz-copy-source-if-modified-since" type:"timestamp"`
+
+ // Copies the object if its entity tag (ETag) is different than the specified
+ // ETag.
+ CopySourceIfNoneMatch *string `location:"header" locationName:"x-amz-copy-source-if-none-match" type:"string"`
+
+ // Copies the object if it hasn't been modified since the specified time.
+ CopySourceIfUnmodifiedSince *time.Time `location:"header" locationName:"x-amz-copy-source-if-unmodified-since" type:"timestamp"`
+
+ // Specifies the algorithm to use when decrypting the source object (e.g., AES256).
+ CopySourceSSECustomerAlgorithm *string `location:"header" locationName:"x-amz-copy-source-server-side-encryption-customer-algorithm" type:"string"`
+
+ // Specifies the customer-provided encryption key for Amazon S3 to use to decrypt
+ // the source object. The encryption key provided in this header must be one
+ // that was used when the source object was created.
+ CopySourceSSECustomerKey *string `marshal-as:"blob" location:"header" locationName:"x-amz-copy-source-server-side-encryption-customer-key" type:"string" sensitive:"true"`
+
+ // Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321.
+ // Amazon S3 uses this header for a message integrity check to ensure the encryption
+ // key was transmitted without error.
+ CopySourceSSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-copy-source-server-side-encryption-customer-key-MD5" type:"string"`
+
+ // The date and time at which the object is no longer cacheable.
+ Expires *time.Time `location:"header" locationName:"Expires" type:"timestamp"`
+
+ // Gives the grantee READ, READ_ACP, and WRITE_ACP permissions on the object.
+ GrantFullControl *string `location:"header" locationName:"x-amz-grant-full-control" type:"string"`
+
+ // Allows grantee to read the object data and its metadata.
+ GrantRead *string `location:"header" locationName:"x-amz-grant-read" type:"string"`
+
+ // Allows grantee to read the object ACL.
+ GrantReadACP *string `location:"header" locationName:"x-amz-grant-read-acp" type:"string"`
+
+ // Allows grantee to write the ACL for the applicable object.
+ GrantWriteACP *string `location:"header" locationName:"x-amz-grant-write-acp" type:"string"`
+
+ // Key is a required field
+ Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"`
+
+ // A map of metadata to store with the object in S3.
+ Metadata map[string]*string `location:"headers" locationName:"x-amz-meta-" type:"map"`
+
+ // Specifies whether the metadata is copied from the source object or replaced
+ // with metadata provided in the request.
+ MetadataDirective *string `location:"header" locationName:"x-amz-metadata-directive" type:"string" enum:"MetadataDirective"`
+
+ // Specifies whether you want to apply a Legal Hold to the copied object.
+ ObjectLockLegalHoldStatus *string `location:"header" locationName:"x-amz-object-lock-legal-hold" type:"string" enum:"ObjectLockLegalHoldStatus"`
+
+ // The object lock mode that you want to apply to the copied object.
+ ObjectLockMode *string `location:"header" locationName:"x-amz-object-lock-mode" type:"string" enum:"ObjectLockMode"`
+
+ // The date and time when you want the copied object's object lock to expire.
+ ObjectLockRetainUntilDate *time.Time `location:"header" locationName:"x-amz-object-lock-retain-until-date" type:"timestamp" timestampFormat:"iso8601"`
+
+ // Confirms that the requester knows that she or he will be charged for the
+ // request. Bucket owners need not specify this parameter in their requests.
+ // Documentation on downloading objects from requester pays buckets can be found
+ // at http://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html
+ RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"`
+
+ // Specifies the algorithm to use to when encrypting the object (e.g., AES256).
+ SSECustomerAlgorithm *string `location:"header" locationName:"x-amz-server-side-encryption-customer-algorithm" type:"string"`
+
+ // Specifies the customer-provided encryption key for Amazon S3 to use in encrypting
+ // data. This value is used to store the object and then it is discarded; Amazon
+ // does not store the encryption key. The key must be appropriate for use with
+ // the algorithm specified in the x-amz-server-side​-encryption​-customer-algorithm
+ // header.
+ SSECustomerKey *string `marshal-as:"blob" location:"header" locationName:"x-amz-server-side-encryption-customer-key" type:"string" sensitive:"true"`
+
+ // Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321.
+ // Amazon S3 uses this header for a message integrity check to ensure the encryption
+ // key was transmitted without error.
+ SSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key-MD5" type:"string"`
+
+ // Specifies the AWS KMS Encryption Context to use for object encryption. The
+ // value of this header is a base64-encoded UTF-8 string holding JSON with the
+ // encryption context key-value pairs.
+ SSEKMSEncryptionContext *string `location:"header" locationName:"x-amz-server-side-encryption-context" type:"string" sensitive:"true"`
+
+ // Specifies the AWS KMS key ID to use for object encryption. All GET and PUT
+ // requests for an object protected by AWS KMS will fail if not made via SSL
+ // or using SigV4. Documentation on configuring any of the officially supported
+ // AWS SDKs and CLI can be found at http://docs.aws.amazon.com/AmazonS3/latest/dev/UsingAWSSDK.html#specify-signature-version
+ SSEKMSKeyId *string `location:"header" locationName:"x-amz-server-side-encryption-aws-kms-key-id" type:"string" sensitive:"true"`
+
+ // The Server-side encryption algorithm used when storing this object in S3
+ // (e.g., AES256, aws:kms).
+ ServerSideEncryption *string `location:"header" locationName:"x-amz-server-side-encryption" type:"string" enum:"ServerSideEncryption"`
+
+ // The type of storage to use for the object. Defaults to 'STANDARD'.
+ StorageClass *string `location:"header" locationName:"x-amz-storage-class" type:"string" enum:"StorageClass"`
+
+ // The tag-set for the object destination object this value must be used in
+ // conjunction with the TaggingDirective. The tag-set must be encoded as URL
+ // Query parameters
+ Tagging *string `location:"header" locationName:"x-amz-tagging" type:"string"`
+
+ // Specifies whether the object tag-set are copied from the source object or
+ // replaced with tag-set provided in the request.
+ TaggingDirective *string `location:"header" locationName:"x-amz-tagging-directive" type:"string" enum:"TaggingDirective"`
+
+ // If the bucket is configured as a website, redirects requests for this object
+ // to another object in the same bucket or to an external URL. Amazon S3 stores
+ // the value of this header in the object metadata.
+ WebsiteRedirectLocation *string `location:"header" locationName:"x-amz-website-redirect-location" type:"string"`
+}
+
+// String returns the string representation
+func (s CopyObjectInput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s CopyObjectInput) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *CopyObjectInput) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "CopyObjectInput"}
+ if s.Bucket == nil {
+ invalidParams.Add(request.NewErrParamRequired("Bucket"))
+ }
+ if s.Bucket != nil && len(*s.Bucket) < 1 {
+ invalidParams.Add(request.NewErrParamMinLen("Bucket", 1))
+ }
+ if s.CopySource == nil {
+ invalidParams.Add(request.NewErrParamRequired("CopySource"))
+ }
+ if s.Key == nil {
+ invalidParams.Add(request.NewErrParamRequired("Key"))
+ }
+ if s.Key != nil && len(*s.Key) < 1 {
+ invalidParams.Add(request.NewErrParamMinLen("Key", 1))
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetACL sets the ACL field's value.
+func (s *CopyObjectInput) SetACL(v string) *CopyObjectInput {
+ s.ACL = &v
+ return s
+}
+
+// SetBucket sets the Bucket field's value.
+func (s *CopyObjectInput) SetBucket(v string) *CopyObjectInput {
+ s.Bucket = &v
+ return s
+}
+
+func (s *CopyObjectInput) getBucket() (v string) {
+ if s.Bucket == nil {
+ return v
+ }
+ return *s.Bucket
+}
+
+// SetCacheControl sets the CacheControl field's value.
+func (s *CopyObjectInput) SetCacheControl(v string) *CopyObjectInput {
+ s.CacheControl = &v
+ return s
+}
+
+// SetContentDisposition sets the ContentDisposition field's value.
+func (s *CopyObjectInput) SetContentDisposition(v string) *CopyObjectInput {
+ s.ContentDisposition = &v
+ return s
+}
+
+// SetContentEncoding sets the ContentEncoding field's value.
+func (s *CopyObjectInput) SetContentEncoding(v string) *CopyObjectInput {
+ s.ContentEncoding = &v
+ return s
+}
+
+// SetContentLanguage sets the ContentLanguage field's value.
+func (s *CopyObjectInput) SetContentLanguage(v string) *CopyObjectInput {
+ s.ContentLanguage = &v
+ return s
+}
+
+// SetContentType sets the ContentType field's value.
+func (s *CopyObjectInput) SetContentType(v string) *CopyObjectInput {
+ s.ContentType = &v
+ return s
+}
+
+// SetCopySource sets the CopySource field's value.
+func (s *CopyObjectInput) SetCopySource(v string) *CopyObjectInput {
+ s.CopySource = &v
+ return s
+}
+
+// SetCopySourceIfMatch sets the CopySourceIfMatch field's value.
+func (s *CopyObjectInput) SetCopySourceIfMatch(v string) *CopyObjectInput {
+ s.CopySourceIfMatch = &v
+ return s
+}
+
+// SetCopySourceIfModifiedSince sets the CopySourceIfModifiedSince field's value.
+func (s *CopyObjectInput) SetCopySourceIfModifiedSince(v time.Time) *CopyObjectInput {
+ s.CopySourceIfModifiedSince = &v
+ return s
+}
+
+// SetCopySourceIfNoneMatch sets the CopySourceIfNoneMatch field's value.
+func (s *CopyObjectInput) SetCopySourceIfNoneMatch(v string) *CopyObjectInput {
+ s.CopySourceIfNoneMatch = &v
+ return s
+}
+
+// SetCopySourceIfUnmodifiedSince sets the CopySourceIfUnmodifiedSince field's value.
+func (s *CopyObjectInput) SetCopySourceIfUnmodifiedSince(v time.Time) *CopyObjectInput {
+ s.CopySourceIfUnmodifiedSince = &v
+ return s
+}
+
+// SetCopySourceSSECustomerAlgorithm sets the CopySourceSSECustomerAlgorithm field's value.
+func (s *CopyObjectInput) SetCopySourceSSECustomerAlgorithm(v string) *CopyObjectInput {
+ s.CopySourceSSECustomerAlgorithm = &v
+ return s
+}
+
+// SetCopySourceSSECustomerKey sets the CopySourceSSECustomerKey field's value.
+func (s *CopyObjectInput) SetCopySourceSSECustomerKey(v string) *CopyObjectInput {
+ s.CopySourceSSECustomerKey = &v
+ return s
+}
+
+func (s *CopyObjectInput) getCopySourceSSECustomerKey() (v string) {
+ if s.CopySourceSSECustomerKey == nil {
+ return v
+ }
+ return *s.CopySourceSSECustomerKey
+}
+
+// SetCopySourceSSECustomerKeyMD5 sets the CopySourceSSECustomerKeyMD5 field's value.
+func (s *CopyObjectInput) SetCopySourceSSECustomerKeyMD5(v string) *CopyObjectInput {
+ s.CopySourceSSECustomerKeyMD5 = &v
+ return s
+}
+
+// SetExpires sets the Expires field's value.
+func (s *CopyObjectInput) SetExpires(v time.Time) *CopyObjectInput {
+ s.Expires = &v
+ return s
+}
+
+// SetGrantFullControl sets the GrantFullControl field's value.
+func (s *CopyObjectInput) SetGrantFullControl(v string) *CopyObjectInput {
+ s.GrantFullControl = &v
+ return s
+}
+
+// SetGrantRead sets the GrantRead field's value.
+func (s *CopyObjectInput) SetGrantRead(v string) *CopyObjectInput {
+ s.GrantRead = &v
+ return s
+}
+
+// SetGrantReadACP sets the GrantReadACP field's value.
+func (s *CopyObjectInput) SetGrantReadACP(v string) *CopyObjectInput {
+ s.GrantReadACP = &v
+ return s
+}
+
+// SetGrantWriteACP sets the GrantWriteACP field's value.
+func (s *CopyObjectInput) SetGrantWriteACP(v string) *CopyObjectInput {
+ s.GrantWriteACP = &v
+ return s
+}
+
+// SetKey sets the Key field's value.
+func (s *CopyObjectInput) SetKey(v string) *CopyObjectInput {
+ s.Key = &v
+ return s
+}
+
+// SetMetadata sets the Metadata field's value.
+func (s *CopyObjectInput) SetMetadata(v map[string]*string) *CopyObjectInput {
+ s.Metadata = v
+ return s
+}
+
+// SetMetadataDirective sets the MetadataDirective field's value.
+func (s *CopyObjectInput) SetMetadataDirective(v string) *CopyObjectInput {
+ s.MetadataDirective = &v
+ return s
+}
+
+// SetObjectLockLegalHoldStatus sets the ObjectLockLegalHoldStatus field's value.
+func (s *CopyObjectInput) SetObjectLockLegalHoldStatus(v string) *CopyObjectInput {
+ s.ObjectLockLegalHoldStatus = &v
+ return s
+}
+
+// SetObjectLockMode sets the ObjectLockMode field's value.
+func (s *CopyObjectInput) SetObjectLockMode(v string) *CopyObjectInput {
+ s.ObjectLockMode = &v
+ return s
+}
+
+// SetObjectLockRetainUntilDate sets the ObjectLockRetainUntilDate field's value.
+func (s *CopyObjectInput) SetObjectLockRetainUntilDate(v time.Time) *CopyObjectInput {
+ s.ObjectLockRetainUntilDate = &v
+ return s
+}
+
+// SetRequestPayer sets the RequestPayer field's value.
+func (s *CopyObjectInput) SetRequestPayer(v string) *CopyObjectInput {
+ s.RequestPayer = &v
+ return s
+}
+
+// SetSSECustomerAlgorithm sets the SSECustomerAlgorithm field's value.
+func (s *CopyObjectInput) SetSSECustomerAlgorithm(v string) *CopyObjectInput {
+ s.SSECustomerAlgorithm = &v
+ return s
+}
+
+// SetSSECustomerKey sets the SSECustomerKey field's value.
+func (s *CopyObjectInput) SetSSECustomerKey(v string) *CopyObjectInput {
+ s.SSECustomerKey = &v
+ return s
+}
+
+func (s *CopyObjectInput) getSSECustomerKey() (v string) {
+ if s.SSECustomerKey == nil {
+ return v
+ }
+ return *s.SSECustomerKey
+}
+
+// SetSSECustomerKeyMD5 sets the SSECustomerKeyMD5 field's value.
+func (s *CopyObjectInput) SetSSECustomerKeyMD5(v string) *CopyObjectInput {
+ s.SSECustomerKeyMD5 = &v
+ return s
+}
+
+// SetSSEKMSEncryptionContext sets the SSEKMSEncryptionContext field's value.
+func (s *CopyObjectInput) SetSSEKMSEncryptionContext(v string) *CopyObjectInput {
+ s.SSEKMSEncryptionContext = &v
+ return s
+}
+
+// SetSSEKMSKeyId sets the SSEKMSKeyId field's value.
+func (s *CopyObjectInput) SetSSEKMSKeyId(v string) *CopyObjectInput {
+ s.SSEKMSKeyId = &v
+ return s
+}
+
+// SetServerSideEncryption sets the ServerSideEncryption field's value.
+func (s *CopyObjectInput) SetServerSideEncryption(v string) *CopyObjectInput {
+ s.ServerSideEncryption = &v
+ return s
+}
+
+// SetStorageClass sets the StorageClass field's value.
+func (s *CopyObjectInput) SetStorageClass(v string) *CopyObjectInput {
+ s.StorageClass = &v
+ return s
+}
+
+// SetTagging sets the Tagging field's value.
+func (s *CopyObjectInput) SetTagging(v string) *CopyObjectInput {
+ s.Tagging = &v
+ return s
+}
+
+// SetTaggingDirective sets the TaggingDirective field's value.
+func (s *CopyObjectInput) SetTaggingDirective(v string) *CopyObjectInput {
+ s.TaggingDirective = &v
+ return s
+}
+
+// SetWebsiteRedirectLocation sets the WebsiteRedirectLocation field's value.
+func (s *CopyObjectInput) SetWebsiteRedirectLocation(v string) *CopyObjectInput {
+ s.WebsiteRedirectLocation = &v
+ return s
+}
+
+type CopyObjectOutput struct {
+ _ struct{} `type:"structure" payload:"CopyObjectResult"`
+
+ CopyObjectResult *CopyObjectResult `type:"structure"`
+
+ CopySourceVersionId *string `location:"header" locationName:"x-amz-copy-source-version-id" type:"string"`
+
+ // If the object expiration is configured, the response includes this header.
+ Expiration *string `location:"header" locationName:"x-amz-expiration" type:"string"`
+
+ // If present, indicates that the requester was successfully charged for the
+ // request.
+ RequestCharged *string `location:"header" locationName:"x-amz-request-charged" type:"string" enum:"RequestCharged"`
+
+ // If server-side encryption with a customer-provided encryption key was requested,
+ // the response will include this header confirming the encryption algorithm
+ // used.
+ SSECustomerAlgorithm *string `location:"header" locationName:"x-amz-server-side-encryption-customer-algorithm" type:"string"`
+
+ // If server-side encryption with a customer-provided encryption key was requested,
+ // the response will include this header to provide round trip message integrity
+ // verification of the customer-provided encryption key.
+ SSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key-MD5" type:"string"`
+
+ // If present, specifies the AWS KMS Encryption Context to use for object encryption.
+ // The value of this header is a base64-encoded UTF-8 string holding JSON with
+ // the encryption context key-value pairs.
+ SSEKMSEncryptionContext *string `location:"header" locationName:"x-amz-server-side-encryption-context" type:"string" sensitive:"true"`
+
+ // If present, specifies the ID of the AWS Key Management Service (KMS) master
+ // encryption key that was used for the object.
+ SSEKMSKeyId *string `location:"header" locationName:"x-amz-server-side-encryption-aws-kms-key-id" type:"string" sensitive:"true"`
+
+ // The Server-side encryption algorithm used when storing this object in S3
+ // (e.g., AES256, aws:kms).
+ ServerSideEncryption *string `location:"header" locationName:"x-amz-server-side-encryption" type:"string" enum:"ServerSideEncryption"`
+
+ // Version ID of the newly created copy.
+ VersionId *string `location:"header" locationName:"x-amz-version-id" type:"string"`
+}
+
+// String returns the string representation
+func (s CopyObjectOutput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s CopyObjectOutput) GoString() string {
+ return s.String()
+}
+
+// SetCopyObjectResult sets the CopyObjectResult field's value.
+func (s *CopyObjectOutput) SetCopyObjectResult(v *CopyObjectResult) *CopyObjectOutput {
+ s.CopyObjectResult = v
+ return s
+}
+
+// SetCopySourceVersionId sets the CopySourceVersionId field's value.
+func (s *CopyObjectOutput) SetCopySourceVersionId(v string) *CopyObjectOutput {
+ s.CopySourceVersionId = &v
+ return s
+}
+
+// SetExpiration sets the Expiration field's value.
+func (s *CopyObjectOutput) SetExpiration(v string) *CopyObjectOutput {
+ s.Expiration = &v
+ return s
+}
+
+// SetRequestCharged sets the RequestCharged field's value.
+func (s *CopyObjectOutput) SetRequestCharged(v string) *CopyObjectOutput {
+ s.RequestCharged = &v
+ return s
+}
+
+// SetSSECustomerAlgorithm sets the SSECustomerAlgorithm field's value.
+func (s *CopyObjectOutput) SetSSECustomerAlgorithm(v string) *CopyObjectOutput {
+ s.SSECustomerAlgorithm = &v
+ return s
+}
+
+// SetSSECustomerKeyMD5 sets the SSECustomerKeyMD5 field's value.
+func (s *CopyObjectOutput) SetSSECustomerKeyMD5(v string) *CopyObjectOutput {
+ s.SSECustomerKeyMD5 = &v
+ return s
+}
+
+// SetSSEKMSEncryptionContext sets the SSEKMSEncryptionContext field's value.
+func (s *CopyObjectOutput) SetSSEKMSEncryptionContext(v string) *CopyObjectOutput {
+ s.SSEKMSEncryptionContext = &v
+ return s
+}
+
+// SetSSEKMSKeyId sets the SSEKMSKeyId field's value.
+func (s *CopyObjectOutput) SetSSEKMSKeyId(v string) *CopyObjectOutput {
+ s.SSEKMSKeyId = &v
+ return s
+}
+
+// SetServerSideEncryption sets the ServerSideEncryption field's value.
+func (s *CopyObjectOutput) SetServerSideEncryption(v string) *CopyObjectOutput {
+ s.ServerSideEncryption = &v
+ return s
+}
+
+// SetVersionId sets the VersionId field's value.
+func (s *CopyObjectOutput) SetVersionId(v string) *CopyObjectOutput {
+ s.VersionId = &v
+ return s
+}
+
+type CopyObjectResult struct {
+ _ struct{} `type:"structure"`
+
+ ETag *string `type:"string"`
+
+ LastModified *time.Time `type:"timestamp"`
+}
+
+// String returns the string representation
+func (s CopyObjectResult) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s CopyObjectResult) GoString() string {
+ return s.String()
+}
+
+// SetETag sets the ETag field's value.
+func (s *CopyObjectResult) SetETag(v string) *CopyObjectResult {
+ s.ETag = &v
+ return s
+}
+
+// SetLastModified sets the LastModified field's value.
+func (s *CopyObjectResult) SetLastModified(v time.Time) *CopyObjectResult {
+ s.LastModified = &v
+ return s
+}
+
+type CopyPartResult struct {
+ _ struct{} `type:"structure"`
+
+ // Entity tag of the object.
+ ETag *string `type:"string"`
+
+ // Date and time at which the object was uploaded.
+ LastModified *time.Time `type:"timestamp"`
+}
+
+// String returns the string representation
+func (s CopyPartResult) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s CopyPartResult) GoString() string {
+ return s.String()
+}
+
+// SetETag sets the ETag field's value.
+func (s *CopyPartResult) SetETag(v string) *CopyPartResult {
+ s.ETag = &v
+ return s
+}
+
+// SetLastModified sets the LastModified field's value.
+func (s *CopyPartResult) SetLastModified(v time.Time) *CopyPartResult {
+ s.LastModified = &v
+ return s
+}
+
+type CreateBucketConfiguration struct {
+ _ struct{} `type:"structure"`
+
+ // Specifies the region where the bucket will be created. If you don't specify
+ // a region, the bucket is created in US East (N. Virginia) Region (us-east-1).
+ LocationConstraint *string `type:"string" enum:"BucketLocationConstraint"`
+}
+
+// String returns the string representation
+func (s CreateBucketConfiguration) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s CreateBucketConfiguration) GoString() string {
+ return s.String()
+}
+
+// SetLocationConstraint sets the LocationConstraint field's value.
+func (s *CreateBucketConfiguration) SetLocationConstraint(v string) *CreateBucketConfiguration {
+ s.LocationConstraint = &v
+ return s
+}
+
+type CreateBucketInput struct {
+ _ struct{} `locationName:"CreateBucketRequest" type:"structure" payload:"CreateBucketConfiguration"`
+
+ // The canned ACL to apply to the bucket.
+ ACL *string `location:"header" locationName:"x-amz-acl" type:"string" enum:"BucketCannedACL"`
+
+ // Bucket is a required field
+ Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
+
+ CreateBucketConfiguration *CreateBucketConfiguration `locationName:"CreateBucketConfiguration" type:"structure" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"`
+
+ // Allows grantee the read, write, read ACP, and write ACP permissions on the
+ // bucket.
+ GrantFullControl *string `location:"header" locationName:"x-amz-grant-full-control" type:"string"`
+
+ // Allows grantee to list the objects in the bucket.
+ GrantRead *string `location:"header" locationName:"x-amz-grant-read" type:"string"`
+
+ // Allows grantee to read the bucket ACL.
+ GrantReadACP *string `location:"header" locationName:"x-amz-grant-read-acp" type:"string"`
+
+ // Allows grantee to create, overwrite, and delete any object in the bucket.
+ GrantWrite *string `location:"header" locationName:"x-amz-grant-write" type:"string"`
+
+ // Allows grantee to write the ACL for the applicable bucket.
+ GrantWriteACP *string `location:"header" locationName:"x-amz-grant-write-acp" type:"string"`
+
+ // Specifies whether you want Amazon S3 object lock to be enabled for the new
+ // bucket.
+ ObjectLockEnabledForBucket *bool `location:"header" locationName:"x-amz-bucket-object-lock-enabled" type:"boolean"`
+}
+
+// String returns the string representation
+func (s CreateBucketInput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s CreateBucketInput) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *CreateBucketInput) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "CreateBucketInput"}
+ if s.Bucket == nil {
+ invalidParams.Add(request.NewErrParamRequired("Bucket"))
+ }
+ if s.Bucket != nil && len(*s.Bucket) < 1 {
+ invalidParams.Add(request.NewErrParamMinLen("Bucket", 1))
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetACL sets the ACL field's value.
+func (s *CreateBucketInput) SetACL(v string) *CreateBucketInput {
+ s.ACL = &v
+ return s
+}
+
+// SetBucket sets the Bucket field's value.
+func (s *CreateBucketInput) SetBucket(v string) *CreateBucketInput {
+ s.Bucket = &v
+ return s
+}
+
+func (s *CreateBucketInput) getBucket() (v string) {
+ if s.Bucket == nil {
+ return v
+ }
+ return *s.Bucket
+}
+
+// SetCreateBucketConfiguration sets the CreateBucketConfiguration field's value.
+func (s *CreateBucketInput) SetCreateBucketConfiguration(v *CreateBucketConfiguration) *CreateBucketInput {
+ s.CreateBucketConfiguration = v
+ return s
+}
+
+// SetGrantFullControl sets the GrantFullControl field's value.
+func (s *CreateBucketInput) SetGrantFullControl(v string) *CreateBucketInput {
+ s.GrantFullControl = &v
+ return s
+}
+
+// SetGrantRead sets the GrantRead field's value.
+func (s *CreateBucketInput) SetGrantRead(v string) *CreateBucketInput {
+ s.GrantRead = &v
+ return s
+}
+
+// SetGrantReadACP sets the GrantReadACP field's value.
+func (s *CreateBucketInput) SetGrantReadACP(v string) *CreateBucketInput {
+ s.GrantReadACP = &v
+ return s
+}
+
+// SetGrantWrite sets the GrantWrite field's value.
+func (s *CreateBucketInput) SetGrantWrite(v string) *CreateBucketInput {
+ s.GrantWrite = &v
+ return s
+}
+
+// SetGrantWriteACP sets the GrantWriteACP field's value.
+func (s *CreateBucketInput) SetGrantWriteACP(v string) *CreateBucketInput {
+ s.GrantWriteACP = &v
+ return s
+}
+
+// SetObjectLockEnabledForBucket sets the ObjectLockEnabledForBucket field's value.
+func (s *CreateBucketInput) SetObjectLockEnabledForBucket(v bool) *CreateBucketInput {
+ s.ObjectLockEnabledForBucket = &v
+ return s
+}
+
+type CreateBucketOutput struct {
+ _ struct{} `type:"structure"`
+
+ Location *string `location:"header" locationName:"Location" type:"string"`
+}
+
+// String returns the string representation
+func (s CreateBucketOutput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s CreateBucketOutput) GoString() string {
+ return s.String()
+}
+
+// SetLocation sets the Location field's value.
+func (s *CreateBucketOutput) SetLocation(v string) *CreateBucketOutput {
+ s.Location = &v
+ return s
+}
+
+type CreateMultipartUploadInput struct {
+ _ struct{} `locationName:"CreateMultipartUploadRequest" type:"structure"`
+
+ // The canned ACL to apply to the object.
+ ACL *string `location:"header" locationName:"x-amz-acl" type:"string" enum:"ObjectCannedACL"`
+
+ // Bucket is a required field
+ Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
+
+ // Specifies caching behavior along the request/reply chain.
+ CacheControl *string `location:"header" locationName:"Cache-Control" type:"string"`
+
+ // Specifies presentational information for the object.
+ ContentDisposition *string `location:"header" locationName:"Content-Disposition" type:"string"`
+
+ // Specifies what content encodings have been applied to the object and thus
+ // what decoding mechanisms must be applied to obtain the media-type referenced
+ // by the Content-Type header field.
+ ContentEncoding *string `location:"header" locationName:"Content-Encoding" type:"string"`
+
+ // The language the content is in.
+ ContentLanguage *string `location:"header" locationName:"Content-Language" type:"string"`
+
+ // A standard MIME type describing the format of the object data.
+ ContentType *string `location:"header" locationName:"Content-Type" type:"string"`
+
+ // The date and time at which the object is no longer cacheable.
+ Expires *time.Time `location:"header" locationName:"Expires" type:"timestamp"`
+
+ // Gives the grantee READ, READ_ACP, and WRITE_ACP permissions on the object.
+ GrantFullControl *string `location:"header" locationName:"x-amz-grant-full-control" type:"string"`
+
+ // Allows grantee to read the object data and its metadata.
+ GrantRead *string `location:"header" locationName:"x-amz-grant-read" type:"string"`
+
+ // Allows grantee to read the object ACL.
+ GrantReadACP *string `location:"header" locationName:"x-amz-grant-read-acp" type:"string"`
+
+ // Allows grantee to write the ACL for the applicable object.
+ GrantWriteACP *string `location:"header" locationName:"x-amz-grant-write-acp" type:"string"`
+
+ // Key is a required field
+ Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"`
+
+ // A map of metadata to store with the object in S3.
+ Metadata map[string]*string `location:"headers" locationName:"x-amz-meta-" type:"map"`
+
+ // Specifies whether you want to apply a Legal Hold to the uploaded object.
+ ObjectLockLegalHoldStatus *string `location:"header" locationName:"x-amz-object-lock-legal-hold" type:"string" enum:"ObjectLockLegalHoldStatus"`
+
+ // Specifies the object lock mode that you want to apply to the uploaded object.
+ ObjectLockMode *string `location:"header" locationName:"x-amz-object-lock-mode" type:"string" enum:"ObjectLockMode"`
+
+ // Specifies the date and time when you want the object lock to expire.
+ ObjectLockRetainUntilDate *time.Time `location:"header" locationName:"x-amz-object-lock-retain-until-date" type:"timestamp" timestampFormat:"iso8601"`
+
+ // Confirms that the requester knows that she or he will be charged for the
+ // request. Bucket owners need not specify this parameter in their requests.
+ // Documentation on downloading objects from requester pays buckets can be found
+ // at http://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html
+ RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"`
+
+ // Specifies the algorithm to use to when encrypting the object (e.g., AES256).
+ SSECustomerAlgorithm *string `location:"header" locationName:"x-amz-server-side-encryption-customer-algorithm" type:"string"`
+
+ // Specifies the customer-provided encryption key for Amazon S3 to use in encrypting
+ // data. This value is used to store the object and then it is discarded; Amazon
+ // does not store the encryption key. The key must be appropriate for use with
+ // the algorithm specified in the x-amz-server-side​-encryption​-customer-algorithm
+ // header.
+ SSECustomerKey *string `marshal-as:"blob" location:"header" locationName:"x-amz-server-side-encryption-customer-key" type:"string" sensitive:"true"`
+
+ // Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321.
+ // Amazon S3 uses this header for a message integrity check to ensure the encryption
+ // key was transmitted without error.
+ SSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key-MD5" type:"string"`
+
+ // Specifies the AWS KMS Encryption Context to use for object encryption. The
+ // value of this header is a base64-encoded UTF-8 string holding JSON with the
+ // encryption context key-value pairs.
+ SSEKMSEncryptionContext *string `location:"header" locationName:"x-amz-server-side-encryption-context" type:"string" sensitive:"true"`
+
+ // Specifies the AWS KMS key ID to use for object encryption. All GET and PUT
+ // requests for an object protected by AWS KMS will fail if not made via SSL
+ // or using SigV4. Documentation on configuring any of the officially supported
+ // AWS SDKs and CLI can be found at http://docs.aws.amazon.com/AmazonS3/latest/dev/UsingAWSSDK.html#specify-signature-version
+ SSEKMSKeyId *string `location:"header" locationName:"x-amz-server-side-encryption-aws-kms-key-id" type:"string" sensitive:"true"`
+
+ // The Server-side encryption algorithm used when storing this object in S3
+ // (e.g., AES256, aws:kms).
+ ServerSideEncryption *string `location:"header" locationName:"x-amz-server-side-encryption" type:"string" enum:"ServerSideEncryption"`
+
+ // The type of storage to use for the object. Defaults to 'STANDARD'.
+ StorageClass *string `location:"header" locationName:"x-amz-storage-class" type:"string" enum:"StorageClass"`
+
+ // The tag-set for the object. The tag-set must be encoded as URL Query parameters
+ Tagging *string `location:"header" locationName:"x-amz-tagging" type:"string"`
+
+ // If the bucket is configured as a website, redirects requests for this object
+ // to another object in the same bucket or to an external URL. Amazon S3 stores
+ // the value of this header in the object metadata.
+ WebsiteRedirectLocation *string `location:"header" locationName:"x-amz-website-redirect-location" type:"string"`
+}
+
+// String returns the string representation
+func (s CreateMultipartUploadInput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s CreateMultipartUploadInput) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *CreateMultipartUploadInput) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "CreateMultipartUploadInput"}
+ if s.Bucket == nil {
+ invalidParams.Add(request.NewErrParamRequired("Bucket"))
+ }
+ if s.Bucket != nil && len(*s.Bucket) < 1 {
+ invalidParams.Add(request.NewErrParamMinLen("Bucket", 1))
+ }
+ if s.Key == nil {
+ invalidParams.Add(request.NewErrParamRequired("Key"))
+ }
+ if s.Key != nil && len(*s.Key) < 1 {
+ invalidParams.Add(request.NewErrParamMinLen("Key", 1))
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetACL sets the ACL field's value.
+func (s *CreateMultipartUploadInput) SetACL(v string) *CreateMultipartUploadInput {
+ s.ACL = &v
+ return s
+}
+
+// SetBucket sets the Bucket field's value.
+func (s *CreateMultipartUploadInput) SetBucket(v string) *CreateMultipartUploadInput {
+ s.Bucket = &v
+ return s
+}
+
+func (s *CreateMultipartUploadInput) getBucket() (v string) {
+ if s.Bucket == nil {
+ return v
+ }
+ return *s.Bucket
+}
+
+// SetCacheControl sets the CacheControl field's value.
+func (s *CreateMultipartUploadInput) SetCacheControl(v string) *CreateMultipartUploadInput {
+ s.CacheControl = &v
+ return s
+}
+
+// SetContentDisposition sets the ContentDisposition field's value.
+func (s *CreateMultipartUploadInput) SetContentDisposition(v string) *CreateMultipartUploadInput {
+ s.ContentDisposition = &v
+ return s
+}
+
+// SetContentEncoding sets the ContentEncoding field's value.
+func (s *CreateMultipartUploadInput) SetContentEncoding(v string) *CreateMultipartUploadInput {
+ s.ContentEncoding = &v
+ return s
+}
+
+// SetContentLanguage sets the ContentLanguage field's value.
+func (s *CreateMultipartUploadInput) SetContentLanguage(v string) *CreateMultipartUploadInput {
+ s.ContentLanguage = &v
+ return s
+}
+
+// SetContentType sets the ContentType field's value.
+func (s *CreateMultipartUploadInput) SetContentType(v string) *CreateMultipartUploadInput {
+ s.ContentType = &v
+ return s
+}
+
+// SetExpires sets the Expires field's value.
+func (s *CreateMultipartUploadInput) SetExpires(v time.Time) *CreateMultipartUploadInput {
+ s.Expires = &v
+ return s
+}
+
+// SetGrantFullControl sets the GrantFullControl field's value.
+func (s *CreateMultipartUploadInput) SetGrantFullControl(v string) *CreateMultipartUploadInput {
+ s.GrantFullControl = &v
+ return s
+}
+
+// SetGrantRead sets the GrantRead field's value.
+func (s *CreateMultipartUploadInput) SetGrantRead(v string) *CreateMultipartUploadInput {
+ s.GrantRead = &v
+ return s
+}
+
+// SetGrantReadACP sets the GrantReadACP field's value.
+func (s *CreateMultipartUploadInput) SetGrantReadACP(v string) *CreateMultipartUploadInput {
+ s.GrantReadACP = &v
+ return s
+}
+
+// SetGrantWriteACP sets the GrantWriteACP field's value.
+func (s *CreateMultipartUploadInput) SetGrantWriteACP(v string) *CreateMultipartUploadInput {
+ s.GrantWriteACP = &v
+ return s
+}
+
+// SetKey sets the Key field's value.
+func (s *CreateMultipartUploadInput) SetKey(v string) *CreateMultipartUploadInput {
+ s.Key = &v
+ return s
+}
+
+// SetMetadata sets the Metadata field's value.
+func (s *CreateMultipartUploadInput) SetMetadata(v map[string]*string) *CreateMultipartUploadInput {
+ s.Metadata = v
+ return s
+}
+
+// SetObjectLockLegalHoldStatus sets the ObjectLockLegalHoldStatus field's value.
+func (s *CreateMultipartUploadInput) SetObjectLockLegalHoldStatus(v string) *CreateMultipartUploadInput {
+ s.ObjectLockLegalHoldStatus = &v
+ return s
+}
+
+// SetObjectLockMode sets the ObjectLockMode field's value.
+func (s *CreateMultipartUploadInput) SetObjectLockMode(v string) *CreateMultipartUploadInput {
+ s.ObjectLockMode = &v
+ return s
+}
+
+// SetObjectLockRetainUntilDate sets the ObjectLockRetainUntilDate field's value.
+func (s *CreateMultipartUploadInput) SetObjectLockRetainUntilDate(v time.Time) *CreateMultipartUploadInput {
+ s.ObjectLockRetainUntilDate = &v
+ return s
+}
+
+// SetRequestPayer sets the RequestPayer field's value.
+func (s *CreateMultipartUploadInput) SetRequestPayer(v string) *CreateMultipartUploadInput {
+ s.RequestPayer = &v
+ return s
+}
+
+// SetSSECustomerAlgorithm sets the SSECustomerAlgorithm field's value.
+func (s *CreateMultipartUploadInput) SetSSECustomerAlgorithm(v string) *CreateMultipartUploadInput {
+ s.SSECustomerAlgorithm = &v
+ return s
+}
+
+// SetSSECustomerKey sets the SSECustomerKey field's value.
+func (s *CreateMultipartUploadInput) SetSSECustomerKey(v string) *CreateMultipartUploadInput {
+ s.SSECustomerKey = &v
+ return s
+}
+
+func (s *CreateMultipartUploadInput) getSSECustomerKey() (v string) {
+ if s.SSECustomerKey == nil {
+ return v
+ }
+ return *s.SSECustomerKey
+}
+
+// SetSSECustomerKeyMD5 sets the SSECustomerKeyMD5 field's value.
+func (s *CreateMultipartUploadInput) SetSSECustomerKeyMD5(v string) *CreateMultipartUploadInput {
+ s.SSECustomerKeyMD5 = &v
+ return s
+}
+
+// SetSSEKMSEncryptionContext sets the SSEKMSEncryptionContext field's value.
+func (s *CreateMultipartUploadInput) SetSSEKMSEncryptionContext(v string) *CreateMultipartUploadInput {
+ s.SSEKMSEncryptionContext = &v
+ return s
+}
+
+// SetSSEKMSKeyId sets the SSEKMSKeyId field's value.
+func (s *CreateMultipartUploadInput) SetSSEKMSKeyId(v string) *CreateMultipartUploadInput {
+ s.SSEKMSKeyId = &v
+ return s
+}
+
+// SetServerSideEncryption sets the ServerSideEncryption field's value.
+func (s *CreateMultipartUploadInput) SetServerSideEncryption(v string) *CreateMultipartUploadInput {
+ s.ServerSideEncryption = &v
+ return s
+}
+
+// SetStorageClass sets the StorageClass field's value.
+func (s *CreateMultipartUploadInput) SetStorageClass(v string) *CreateMultipartUploadInput {
+ s.StorageClass = &v
+ return s
+}
+
+// SetTagging sets the Tagging field's value.
+func (s *CreateMultipartUploadInput) SetTagging(v string) *CreateMultipartUploadInput {
+ s.Tagging = &v
+ return s
+}
+
+// SetWebsiteRedirectLocation sets the WebsiteRedirectLocation field's value.
+func (s *CreateMultipartUploadInput) SetWebsiteRedirectLocation(v string) *CreateMultipartUploadInput {
+ s.WebsiteRedirectLocation = &v
+ return s
+}
+
+type CreateMultipartUploadOutput struct {
+ _ struct{} `type:"structure"`
+
+ // Date when multipart upload will become eligible for abort operation by lifecycle.
+ AbortDate *time.Time `location:"header" locationName:"x-amz-abort-date" type:"timestamp"`
+
+ // Id of the lifecycle rule that makes a multipart upload eligible for abort
+ // operation.
+ AbortRuleId *string `location:"header" locationName:"x-amz-abort-rule-id" type:"string"`
+
+ // Name of the bucket to which the multipart upload was initiated.
+ Bucket *string `locationName:"Bucket" type:"string"`
+
+ // Object key for which the multipart upload was initiated.
+ Key *string `min:"1" type:"string"`
+
+ // If present, indicates that the requester was successfully charged for the
+ // request.
+ RequestCharged *string `location:"header" locationName:"x-amz-request-charged" type:"string" enum:"RequestCharged"`
+
+ // If server-side encryption with a customer-provided encryption key was requested,
+ // the response will include this header confirming the encryption algorithm
+ // used.
+ SSECustomerAlgorithm *string `location:"header" locationName:"x-amz-server-side-encryption-customer-algorithm" type:"string"`
+
+ // If server-side encryption with a customer-provided encryption key was requested,
+ // the response will include this header to provide round trip message integrity
+ // verification of the customer-provided encryption key.
+ SSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key-MD5" type:"string"`
+
+ // If present, specifies the AWS KMS Encryption Context to use for object encryption.
+ // The value of this header is a base64-encoded UTF-8 string holding JSON with
+ // the encryption context key-value pairs.
+ SSEKMSEncryptionContext *string `location:"header" locationName:"x-amz-server-side-encryption-context" type:"string" sensitive:"true"`
+
+ // If present, specifies the ID of the AWS Key Management Service (KMS) master
+ // encryption key that was used for the object.
+ SSEKMSKeyId *string `location:"header" locationName:"x-amz-server-side-encryption-aws-kms-key-id" type:"string" sensitive:"true"`
+
+ // The Server-side encryption algorithm used when storing this object in S3
+ // (e.g., AES256, aws:kms).
+ ServerSideEncryption *string `location:"header" locationName:"x-amz-server-side-encryption" type:"string" enum:"ServerSideEncryption"`
+
+ // ID for the initiated multipart upload.
+ UploadId *string `type:"string"`
+}
+
+// String returns the string representation
+func (s CreateMultipartUploadOutput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s CreateMultipartUploadOutput) GoString() string {
+ return s.String()
+}
+
+// SetAbortDate sets the AbortDate field's value.
+func (s *CreateMultipartUploadOutput) SetAbortDate(v time.Time) *CreateMultipartUploadOutput {
+ s.AbortDate = &v
+ return s
+}
+
+// SetAbortRuleId sets the AbortRuleId field's value.
+func (s *CreateMultipartUploadOutput) SetAbortRuleId(v string) *CreateMultipartUploadOutput {
+ s.AbortRuleId = &v
+ return s
+}
+
+// SetBucket sets the Bucket field's value.
+func (s *CreateMultipartUploadOutput) SetBucket(v string) *CreateMultipartUploadOutput {
+ s.Bucket = &v
+ return s
+}
+
+func (s *CreateMultipartUploadOutput) getBucket() (v string) {
+ if s.Bucket == nil {
+ return v
+ }
+ return *s.Bucket
+}
+
+// SetKey sets the Key field's value.
+func (s *CreateMultipartUploadOutput) SetKey(v string) *CreateMultipartUploadOutput {
+ s.Key = &v
+ return s
+}
+
+// SetRequestCharged sets the RequestCharged field's value.
+func (s *CreateMultipartUploadOutput) SetRequestCharged(v string) *CreateMultipartUploadOutput {
+ s.RequestCharged = &v
+ return s
+}
+
+// SetSSECustomerAlgorithm sets the SSECustomerAlgorithm field's value.
+func (s *CreateMultipartUploadOutput) SetSSECustomerAlgorithm(v string) *CreateMultipartUploadOutput {
+ s.SSECustomerAlgorithm = &v
+ return s
+}
+
+// SetSSECustomerKeyMD5 sets the SSECustomerKeyMD5 field's value.
+func (s *CreateMultipartUploadOutput) SetSSECustomerKeyMD5(v string) *CreateMultipartUploadOutput {
+ s.SSECustomerKeyMD5 = &v
+ return s
+}
+
+// SetSSEKMSEncryptionContext sets the SSEKMSEncryptionContext field's value.
+func (s *CreateMultipartUploadOutput) SetSSEKMSEncryptionContext(v string) *CreateMultipartUploadOutput {
+ s.SSEKMSEncryptionContext = &v
+ return s
+}
+
+// SetSSEKMSKeyId sets the SSEKMSKeyId field's value.
+func (s *CreateMultipartUploadOutput) SetSSEKMSKeyId(v string) *CreateMultipartUploadOutput {
+ s.SSEKMSKeyId = &v
+ return s
+}
+
+// SetServerSideEncryption sets the ServerSideEncryption field's value.
+func (s *CreateMultipartUploadOutput) SetServerSideEncryption(v string) *CreateMultipartUploadOutput {
+ s.ServerSideEncryption = &v
+ return s
+}
+
+// SetUploadId sets the UploadId field's value.
+func (s *CreateMultipartUploadOutput) SetUploadId(v string) *CreateMultipartUploadOutput {
+ s.UploadId = &v
+ return s
+}
+
+// The container element for specifying the default object lock retention settings
+// for new objects placed in the specified bucket.
+type DefaultRetention struct {
+ _ struct{} `type:"structure"`
+
+ // The number of days that you want to specify for the default retention period.
+ Days *int64 `type:"integer"`
+
+ // The default object lock retention mode you want to apply to new objects placed
+ // in the specified bucket.
+ Mode *string `type:"string" enum:"ObjectLockRetentionMode"`
+
+ // The number of years that you want to specify for the default retention period.
+ Years *int64 `type:"integer"`
+}
+
+// String returns the string representation
+func (s DefaultRetention) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s DefaultRetention) GoString() string {
+ return s.String()
+}
+
+// SetDays sets the Days field's value.
+func (s *DefaultRetention) SetDays(v int64) *DefaultRetention {
+ s.Days = &v
+ return s
+}
+
+// SetMode sets the Mode field's value.
+func (s *DefaultRetention) SetMode(v string) *DefaultRetention {
+ s.Mode = &v
+ return s
+}
+
+// SetYears sets the Years field's value.
+func (s *DefaultRetention) SetYears(v int64) *DefaultRetention {
+ s.Years = &v
+ return s
+}
+
+type Delete struct {
+ _ struct{} `type:"structure"`
+
+ // Objects is a required field
+ Objects []*ObjectIdentifier `locationName:"Object" type:"list" flattened:"true" required:"true"`
+
+ // Element to enable quiet mode for the request. When you add this element,
+ // you must set its value to true.
+ Quiet *bool `type:"boolean"`
+}
+
+// String returns the string representation
+func (s Delete) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s Delete) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *Delete) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "Delete"}
+ if s.Objects == nil {
+ invalidParams.Add(request.NewErrParamRequired("Objects"))
+ }
+ if s.Objects != nil {
+ for i, v := range s.Objects {
+ if v == nil {
+ continue
+ }
+ if err := v.Validate(); err != nil {
+ invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Objects", i), err.(request.ErrInvalidParams))
+ }
+ }
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetObjects sets the Objects field's value.
+func (s *Delete) SetObjects(v []*ObjectIdentifier) *Delete {
+ s.Objects = v
+ return s
+}
+
+// SetQuiet sets the Quiet field's value.
+func (s *Delete) SetQuiet(v bool) *Delete {
+ s.Quiet = &v
+ return s
+}
+
+type DeleteBucketAnalyticsConfigurationInput struct {
+ _ struct{} `locationName:"DeleteBucketAnalyticsConfigurationRequest" type:"structure"`
+
+ // The name of the bucket from which an analytics configuration is deleted.
+ //
+ // Bucket is a required field
+ Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
+
+ // The ID that identifies the analytics configuration.
+ //
+ // Id is a required field
+ Id *string `location:"querystring" locationName:"id" type:"string" required:"true"`
+}
+
+// String returns the string representation
+func (s DeleteBucketAnalyticsConfigurationInput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s DeleteBucketAnalyticsConfigurationInput) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *DeleteBucketAnalyticsConfigurationInput) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "DeleteBucketAnalyticsConfigurationInput"}
+ if s.Bucket == nil {
+ invalidParams.Add(request.NewErrParamRequired("Bucket"))
+ }
+ if s.Bucket != nil && len(*s.Bucket) < 1 {
+ invalidParams.Add(request.NewErrParamMinLen("Bucket", 1))
+ }
+ if s.Id == nil {
+ invalidParams.Add(request.NewErrParamRequired("Id"))
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetBucket sets the Bucket field's value.
+func (s *DeleteBucketAnalyticsConfigurationInput) SetBucket(v string) *DeleteBucketAnalyticsConfigurationInput {
+ s.Bucket = &v
+ return s
+}
+
+func (s *DeleteBucketAnalyticsConfigurationInput) getBucket() (v string) {
+ if s.Bucket == nil {
+ return v
+ }
+ return *s.Bucket
+}
+
+// SetId sets the Id field's value.
+func (s *DeleteBucketAnalyticsConfigurationInput) SetId(v string) *DeleteBucketAnalyticsConfigurationInput {
+ s.Id = &v
+ return s
+}
+
+type DeleteBucketAnalyticsConfigurationOutput struct {
+ _ struct{} `type:"structure"`
+}
+
+// String returns the string representation
+func (s DeleteBucketAnalyticsConfigurationOutput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s DeleteBucketAnalyticsConfigurationOutput) GoString() string {
+ return s.String()
+}
+
+type DeleteBucketCorsInput struct {
+ _ struct{} `locationName:"DeleteBucketCorsRequest" type:"structure"`
+
+ // Bucket is a required field
+ Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
+}
+
+// String returns the string representation
+func (s DeleteBucketCorsInput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s DeleteBucketCorsInput) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *DeleteBucketCorsInput) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "DeleteBucketCorsInput"}
+ if s.Bucket == nil {
+ invalidParams.Add(request.NewErrParamRequired("Bucket"))
+ }
+ if s.Bucket != nil && len(*s.Bucket) < 1 {
+ invalidParams.Add(request.NewErrParamMinLen("Bucket", 1))
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetBucket sets the Bucket field's value.
+func (s *DeleteBucketCorsInput) SetBucket(v string) *DeleteBucketCorsInput {
+ s.Bucket = &v
+ return s
+}
+
+func (s *DeleteBucketCorsInput) getBucket() (v string) {
+ if s.Bucket == nil {
+ return v
+ }
+ return *s.Bucket
+}
+
+type DeleteBucketCorsOutput struct {
+ _ struct{} `type:"structure"`
+}
+
+// String returns the string representation
+func (s DeleteBucketCorsOutput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s DeleteBucketCorsOutput) GoString() string {
+ return s.String()
+}
+
+type DeleteBucketEncryptionInput struct {
+ _ struct{} `locationName:"DeleteBucketEncryptionRequest" type:"structure"`
+
+ // The name of the bucket containing the server-side encryption configuration
+ // to delete.
+ //
+ // Bucket is a required field
+ Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
+}
+
+// String returns the string representation
+func (s DeleteBucketEncryptionInput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s DeleteBucketEncryptionInput) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *DeleteBucketEncryptionInput) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "DeleteBucketEncryptionInput"}
+ if s.Bucket == nil {
+ invalidParams.Add(request.NewErrParamRequired("Bucket"))
+ }
+ if s.Bucket != nil && len(*s.Bucket) < 1 {
+ invalidParams.Add(request.NewErrParamMinLen("Bucket", 1))
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetBucket sets the Bucket field's value.
+func (s *DeleteBucketEncryptionInput) SetBucket(v string) *DeleteBucketEncryptionInput {
+ s.Bucket = &v
+ return s
+}
+
+func (s *DeleteBucketEncryptionInput) getBucket() (v string) {
+ if s.Bucket == nil {
+ return v
+ }
+ return *s.Bucket
+}
+
+type DeleteBucketEncryptionOutput struct {
+ _ struct{} `type:"structure"`
+}
+
+// String returns the string representation
+func (s DeleteBucketEncryptionOutput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s DeleteBucketEncryptionOutput) GoString() string {
+ return s.String()
+}
+
+type DeleteBucketInput struct {
+ _ struct{} `locationName:"DeleteBucketRequest" type:"structure"`
+
+ // Bucket is a required field
+ Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
+}
+
+// String returns the string representation
+func (s DeleteBucketInput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s DeleteBucketInput) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *DeleteBucketInput) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "DeleteBucketInput"}
+ if s.Bucket == nil {
+ invalidParams.Add(request.NewErrParamRequired("Bucket"))
+ }
+ if s.Bucket != nil && len(*s.Bucket) < 1 {
+ invalidParams.Add(request.NewErrParamMinLen("Bucket", 1))
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetBucket sets the Bucket field's value.
+func (s *DeleteBucketInput) SetBucket(v string) *DeleteBucketInput {
+ s.Bucket = &v
+ return s
+}
+
+func (s *DeleteBucketInput) getBucket() (v string) {
+ if s.Bucket == nil {
+ return v
+ }
+ return *s.Bucket
+}
+
+type DeleteBucketInventoryConfigurationInput struct {
+ _ struct{} `locationName:"DeleteBucketInventoryConfigurationRequest" type:"structure"`
+
+ // The name of the bucket containing the inventory configuration to delete.
+ //
+ // Bucket is a required field
+ Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
+
+ // The ID used to identify the inventory configuration.
+ //
+ // Id is a required field
+ Id *string `location:"querystring" locationName:"id" type:"string" required:"true"`
+}
+
+// String returns the string representation
+func (s DeleteBucketInventoryConfigurationInput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s DeleteBucketInventoryConfigurationInput) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *DeleteBucketInventoryConfigurationInput) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "DeleteBucketInventoryConfigurationInput"}
+ if s.Bucket == nil {
+ invalidParams.Add(request.NewErrParamRequired("Bucket"))
+ }
+ if s.Bucket != nil && len(*s.Bucket) < 1 {
+ invalidParams.Add(request.NewErrParamMinLen("Bucket", 1))
+ }
+ if s.Id == nil {
+ invalidParams.Add(request.NewErrParamRequired("Id"))
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetBucket sets the Bucket field's value.
+func (s *DeleteBucketInventoryConfigurationInput) SetBucket(v string) *DeleteBucketInventoryConfigurationInput {
+ s.Bucket = &v
+ return s
+}
+
+func (s *DeleteBucketInventoryConfigurationInput) getBucket() (v string) {
+ if s.Bucket == nil {
+ return v
+ }
+ return *s.Bucket
+}
+
+// SetId sets the Id field's value.
+func (s *DeleteBucketInventoryConfigurationInput) SetId(v string) *DeleteBucketInventoryConfigurationInput {
+ s.Id = &v
+ return s
+}
+
+type DeleteBucketInventoryConfigurationOutput struct {
+ _ struct{} `type:"structure"`
+}
+
+// String returns the string representation
+func (s DeleteBucketInventoryConfigurationOutput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s DeleteBucketInventoryConfigurationOutput) GoString() string {
+ return s.String()
+}
+
+type DeleteBucketLifecycleInput struct {
+ _ struct{} `locationName:"DeleteBucketLifecycleRequest" type:"structure"`
+
+ // Bucket is a required field
+ Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
+}
+
+// String returns the string representation
+func (s DeleteBucketLifecycleInput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s DeleteBucketLifecycleInput) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *DeleteBucketLifecycleInput) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "DeleteBucketLifecycleInput"}
+ if s.Bucket == nil {
+ invalidParams.Add(request.NewErrParamRequired("Bucket"))
+ }
+ if s.Bucket != nil && len(*s.Bucket) < 1 {
+ invalidParams.Add(request.NewErrParamMinLen("Bucket", 1))
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetBucket sets the Bucket field's value.
+func (s *DeleteBucketLifecycleInput) SetBucket(v string) *DeleteBucketLifecycleInput {
+ s.Bucket = &v
+ return s
+}
+
+func (s *DeleteBucketLifecycleInput) getBucket() (v string) {
+ if s.Bucket == nil {
+ return v
+ }
+ return *s.Bucket
+}
+
+type DeleteBucketLifecycleOutput struct {
+ _ struct{} `type:"structure"`
+}
+
+// String returns the string representation
+func (s DeleteBucketLifecycleOutput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s DeleteBucketLifecycleOutput) GoString() string {
+ return s.String()
+}
+
+type DeleteBucketMetricsConfigurationInput struct {
+ _ struct{} `locationName:"DeleteBucketMetricsConfigurationRequest" type:"structure"`
+
+ // The name of the bucket containing the metrics configuration to delete.
+ //
+ // Bucket is a required field
+ Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
+
+ // The ID used to identify the metrics configuration.
+ //
+ // Id is a required field
+ Id *string `location:"querystring" locationName:"id" type:"string" required:"true"`
+}
+
+// String returns the string representation
+func (s DeleteBucketMetricsConfigurationInput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s DeleteBucketMetricsConfigurationInput) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *DeleteBucketMetricsConfigurationInput) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "DeleteBucketMetricsConfigurationInput"}
+ if s.Bucket == nil {
+ invalidParams.Add(request.NewErrParamRequired("Bucket"))
+ }
+ if s.Bucket != nil && len(*s.Bucket) < 1 {
+ invalidParams.Add(request.NewErrParamMinLen("Bucket", 1))
+ }
+ if s.Id == nil {
+ invalidParams.Add(request.NewErrParamRequired("Id"))
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetBucket sets the Bucket field's value.
+func (s *DeleteBucketMetricsConfigurationInput) SetBucket(v string) *DeleteBucketMetricsConfigurationInput {
+ s.Bucket = &v
+ return s
+}
+
+func (s *DeleteBucketMetricsConfigurationInput) getBucket() (v string) {
+ if s.Bucket == nil {
+ return v
+ }
+ return *s.Bucket
+}
+
+// SetId sets the Id field's value.
+func (s *DeleteBucketMetricsConfigurationInput) SetId(v string) *DeleteBucketMetricsConfigurationInput {
+ s.Id = &v
+ return s
+}
+
+type DeleteBucketMetricsConfigurationOutput struct {
+ _ struct{} `type:"structure"`
+}
+
+// String returns the string representation
+func (s DeleteBucketMetricsConfigurationOutput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s DeleteBucketMetricsConfigurationOutput) GoString() string {
+ return s.String()
+}
+
+type DeleteBucketOutput struct {
+ _ struct{} `type:"structure"`
+}
+
+// String returns the string representation
+func (s DeleteBucketOutput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s DeleteBucketOutput) GoString() string {
+ return s.String()
+}
+
+type DeleteBucketPolicyInput struct {
+ _ struct{} `locationName:"DeleteBucketPolicyRequest" type:"structure"`
+
+ // Bucket is a required field
+ Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
+}
+
+// String returns the string representation
+func (s DeleteBucketPolicyInput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s DeleteBucketPolicyInput) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *DeleteBucketPolicyInput) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "DeleteBucketPolicyInput"}
+ if s.Bucket == nil {
+ invalidParams.Add(request.NewErrParamRequired("Bucket"))
+ }
+ if s.Bucket != nil && len(*s.Bucket) < 1 {
+ invalidParams.Add(request.NewErrParamMinLen("Bucket", 1))
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetBucket sets the Bucket field's value.
+func (s *DeleteBucketPolicyInput) SetBucket(v string) *DeleteBucketPolicyInput {
+ s.Bucket = &v
+ return s
+}
+
+func (s *DeleteBucketPolicyInput) getBucket() (v string) {
+ if s.Bucket == nil {
+ return v
+ }
+ return *s.Bucket
+}
+
+type DeleteBucketPolicyOutput struct {
+ _ struct{} `type:"structure"`
+}
+
+// String returns the string representation
+func (s DeleteBucketPolicyOutput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s DeleteBucketPolicyOutput) GoString() string {
+ return s.String()
+}
+
+type DeleteBucketReplicationInput struct {
+ _ struct{} `locationName:"DeleteBucketReplicationRequest" type:"structure"`
+
+ // The bucket name.
+ //
+ // It can take a while to propagate the deletion of a replication configuration
+ // to all Amazon S3 systems.
+ //
+ // Bucket is a required field
+ Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
+}
+
+// String returns the string representation
+func (s DeleteBucketReplicationInput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s DeleteBucketReplicationInput) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *DeleteBucketReplicationInput) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "DeleteBucketReplicationInput"}
+ if s.Bucket == nil {
+ invalidParams.Add(request.NewErrParamRequired("Bucket"))
+ }
+ if s.Bucket != nil && len(*s.Bucket) < 1 {
+ invalidParams.Add(request.NewErrParamMinLen("Bucket", 1))
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetBucket sets the Bucket field's value.
+func (s *DeleteBucketReplicationInput) SetBucket(v string) *DeleteBucketReplicationInput {
+ s.Bucket = &v
+ return s
+}
+
+func (s *DeleteBucketReplicationInput) getBucket() (v string) {
+ if s.Bucket == nil {
+ return v
+ }
+ return *s.Bucket
+}
+
+type DeleteBucketReplicationOutput struct {
+ _ struct{} `type:"structure"`
+}
+
+// String returns the string representation
+func (s DeleteBucketReplicationOutput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s DeleteBucketReplicationOutput) GoString() string {
+ return s.String()
+}
+
+type DeleteBucketTaggingInput struct {
+ _ struct{} `locationName:"DeleteBucketTaggingRequest" type:"structure"`
+
+ // Bucket is a required field
+ Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
+}
+
+// String returns the string representation
+func (s DeleteBucketTaggingInput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s DeleteBucketTaggingInput) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *DeleteBucketTaggingInput) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "DeleteBucketTaggingInput"}
+ if s.Bucket == nil {
+ invalidParams.Add(request.NewErrParamRequired("Bucket"))
+ }
+ if s.Bucket != nil && len(*s.Bucket) < 1 {
+ invalidParams.Add(request.NewErrParamMinLen("Bucket", 1))
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetBucket sets the Bucket field's value.
+func (s *DeleteBucketTaggingInput) SetBucket(v string) *DeleteBucketTaggingInput {
+ s.Bucket = &v
+ return s
+}
+
+func (s *DeleteBucketTaggingInput) getBucket() (v string) {
+ if s.Bucket == nil {
+ return v
+ }
+ return *s.Bucket
+}
+
+type DeleteBucketTaggingOutput struct {
+ _ struct{} `type:"structure"`
+}
+
+// String returns the string representation
+func (s DeleteBucketTaggingOutput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s DeleteBucketTaggingOutput) GoString() string {
+ return s.String()
+}
+
+type DeleteBucketWebsiteInput struct {
+ _ struct{} `locationName:"DeleteBucketWebsiteRequest" type:"structure"`
+
+ // Bucket is a required field
+ Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
+}
+
+// String returns the string representation
+func (s DeleteBucketWebsiteInput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s DeleteBucketWebsiteInput) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *DeleteBucketWebsiteInput) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "DeleteBucketWebsiteInput"}
+ if s.Bucket == nil {
+ invalidParams.Add(request.NewErrParamRequired("Bucket"))
+ }
+ if s.Bucket != nil && len(*s.Bucket) < 1 {
+ invalidParams.Add(request.NewErrParamMinLen("Bucket", 1))
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetBucket sets the Bucket field's value.
+func (s *DeleteBucketWebsiteInput) SetBucket(v string) *DeleteBucketWebsiteInput {
+ s.Bucket = &v
+ return s
+}
+
+func (s *DeleteBucketWebsiteInput) getBucket() (v string) {
+ if s.Bucket == nil {
+ return v
+ }
+ return *s.Bucket
+}
+
+type DeleteBucketWebsiteOutput struct {
+ _ struct{} `type:"structure"`
+}
+
+// String returns the string representation
+func (s DeleteBucketWebsiteOutput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s DeleteBucketWebsiteOutput) GoString() string {
+ return s.String()
+}
+
+type DeleteMarkerEntry struct {
+ _ struct{} `type:"structure"`
+
+ // Specifies whether the object is (true) or is not (false) the latest version
+ // of an object.
+ IsLatest *bool `type:"boolean"`
+
+ // The object key.
+ Key *string `min:"1" type:"string"`
+
+ // Date and time the object was last modified.
+ LastModified *time.Time `type:"timestamp"`
+
+ Owner *Owner `type:"structure"`
+
+ // Version ID of an object.
+ VersionId *string `type:"string"`
+}
+
+// String returns the string representation
+func (s DeleteMarkerEntry) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s DeleteMarkerEntry) GoString() string {
+ return s.String()
+}
+
+// SetIsLatest sets the IsLatest field's value.
+func (s *DeleteMarkerEntry) SetIsLatest(v bool) *DeleteMarkerEntry {
+ s.IsLatest = &v
+ return s
+}
+
+// SetKey sets the Key field's value.
+func (s *DeleteMarkerEntry) SetKey(v string) *DeleteMarkerEntry {
+ s.Key = &v
+ return s
+}
+
+// SetLastModified sets the LastModified field's value.
+func (s *DeleteMarkerEntry) SetLastModified(v time.Time) *DeleteMarkerEntry {
+ s.LastModified = &v
+ return s
+}
+
+// SetOwner sets the Owner field's value.
+func (s *DeleteMarkerEntry) SetOwner(v *Owner) *DeleteMarkerEntry {
+ s.Owner = v
+ return s
+}
+
+// SetVersionId sets the VersionId field's value.
+func (s *DeleteMarkerEntry) SetVersionId(v string) *DeleteMarkerEntry {
+ s.VersionId = &v
+ return s
+}
+
+// Specifies whether Amazon S3 should replicate delete makers.
+type DeleteMarkerReplication struct {
+ _ struct{} `type:"structure"`
+
+ // The status of the delete marker replication.
+ //
+ // In the current implementation, Amazon S3 doesn't replicate the delete markers.
+ // The status must be Disabled.
+ Status *string `type:"string" enum:"DeleteMarkerReplicationStatus"`
+}
+
+// String returns the string representation
+func (s DeleteMarkerReplication) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s DeleteMarkerReplication) GoString() string {
+ return s.String()
+}
+
+// SetStatus sets the Status field's value.
+func (s *DeleteMarkerReplication) SetStatus(v string) *DeleteMarkerReplication {
+ s.Status = &v
+ return s
+}
+
+type DeleteObjectInput struct {
+ _ struct{} `locationName:"DeleteObjectRequest" type:"structure"`
+
+ // Bucket is a required field
+ Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
+
+ // Indicates whether Amazon S3 object lock should bypass governance-mode restrictions
+ // to process this operation.
+ BypassGovernanceRetention *bool `location:"header" locationName:"x-amz-bypass-governance-retention" type:"boolean"`
+
+ // Key is a required field
+ Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"`
+
+ // The concatenation of the authentication device's serial number, a space,
+ // and the value that is displayed on your authentication device.
+ MFA *string `location:"header" locationName:"x-amz-mfa" type:"string"`
+
+ // Confirms that the requester knows that she or he will be charged for the
+ // request. Bucket owners need not specify this parameter in their requests.
+ // Documentation on downloading objects from requester pays buckets can be found
+ // at http://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html
+ RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"`
+
+ // VersionId used to reference a specific version of the object.
+ VersionId *string `location:"querystring" locationName:"versionId" type:"string"`
+}
+
+// String returns the string representation
+func (s DeleteObjectInput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s DeleteObjectInput) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *DeleteObjectInput) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "DeleteObjectInput"}
+ if s.Bucket == nil {
+ invalidParams.Add(request.NewErrParamRequired("Bucket"))
+ }
+ if s.Bucket != nil && len(*s.Bucket) < 1 {
+ invalidParams.Add(request.NewErrParamMinLen("Bucket", 1))
+ }
+ if s.Key == nil {
+ invalidParams.Add(request.NewErrParamRequired("Key"))
+ }
+ if s.Key != nil && len(*s.Key) < 1 {
+ invalidParams.Add(request.NewErrParamMinLen("Key", 1))
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetBucket sets the Bucket field's value.
+func (s *DeleteObjectInput) SetBucket(v string) *DeleteObjectInput {
+ s.Bucket = &v
+ return s
+}
+
+func (s *DeleteObjectInput) getBucket() (v string) {
+ if s.Bucket == nil {
+ return v
+ }
+ return *s.Bucket
+}
+
+// SetBypassGovernanceRetention sets the BypassGovernanceRetention field's value.
+func (s *DeleteObjectInput) SetBypassGovernanceRetention(v bool) *DeleteObjectInput {
+ s.BypassGovernanceRetention = &v
+ return s
+}
+
+// SetKey sets the Key field's value.
+func (s *DeleteObjectInput) SetKey(v string) *DeleteObjectInput {
+ s.Key = &v
+ return s
+}
+
+// SetMFA sets the MFA field's value.
+func (s *DeleteObjectInput) SetMFA(v string) *DeleteObjectInput {
+ s.MFA = &v
+ return s
+}
+
+// SetRequestPayer sets the RequestPayer field's value.
+func (s *DeleteObjectInput) SetRequestPayer(v string) *DeleteObjectInput {
+ s.RequestPayer = &v
+ return s
+}
+
+// SetVersionId sets the VersionId field's value.
+func (s *DeleteObjectInput) SetVersionId(v string) *DeleteObjectInput {
+ s.VersionId = &v
+ return s
+}
+
+type DeleteObjectOutput struct {
+ _ struct{} `type:"structure"`
+
+ // Specifies whether the versioned object that was permanently deleted was (true)
+ // or was not (false) a delete marker.
+ DeleteMarker *bool `location:"header" locationName:"x-amz-delete-marker" type:"boolean"`
+
+ // If present, indicates that the requester was successfully charged for the
+ // request.
+ RequestCharged *string `location:"header" locationName:"x-amz-request-charged" type:"string" enum:"RequestCharged"`
+
+ // Returns the version ID of the delete marker created as a result of the DELETE
+ // operation.
+ VersionId *string `location:"header" locationName:"x-amz-version-id" type:"string"`
+}
+
+// String returns the string representation
+func (s DeleteObjectOutput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s DeleteObjectOutput) GoString() string {
+ return s.String()
+}
+
+// SetDeleteMarker sets the DeleteMarker field's value.
+func (s *DeleteObjectOutput) SetDeleteMarker(v bool) *DeleteObjectOutput {
+ s.DeleteMarker = &v
+ return s
+}
+
+// SetRequestCharged sets the RequestCharged field's value.
+func (s *DeleteObjectOutput) SetRequestCharged(v string) *DeleteObjectOutput {
+ s.RequestCharged = &v
+ return s
+}
+
+// SetVersionId sets the VersionId field's value.
+func (s *DeleteObjectOutput) SetVersionId(v string) *DeleteObjectOutput {
+ s.VersionId = &v
+ return s
+}
+
+type DeleteObjectTaggingInput struct {
+ _ struct{} `locationName:"DeleteObjectTaggingRequest" type:"structure"`
+
+ // Bucket is a required field
+ Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
+
+ // Key is a required field
+ Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"`
+
+ // The versionId of the object that the tag-set will be removed from.
+ VersionId *string `location:"querystring" locationName:"versionId" type:"string"`
+}
+
+// String returns the string representation
+func (s DeleteObjectTaggingInput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s DeleteObjectTaggingInput) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *DeleteObjectTaggingInput) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "DeleteObjectTaggingInput"}
+ if s.Bucket == nil {
+ invalidParams.Add(request.NewErrParamRequired("Bucket"))
+ }
+ if s.Bucket != nil && len(*s.Bucket) < 1 {
+ invalidParams.Add(request.NewErrParamMinLen("Bucket", 1))
+ }
+ if s.Key == nil {
+ invalidParams.Add(request.NewErrParamRequired("Key"))
+ }
+ if s.Key != nil && len(*s.Key) < 1 {
+ invalidParams.Add(request.NewErrParamMinLen("Key", 1))
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetBucket sets the Bucket field's value.
+func (s *DeleteObjectTaggingInput) SetBucket(v string) *DeleteObjectTaggingInput {
+ s.Bucket = &v
+ return s
+}
+
+func (s *DeleteObjectTaggingInput) getBucket() (v string) {
+ if s.Bucket == nil {
+ return v
+ }
+ return *s.Bucket
+}
+
+// SetKey sets the Key field's value.
+func (s *DeleteObjectTaggingInput) SetKey(v string) *DeleteObjectTaggingInput {
+ s.Key = &v
+ return s
+}
+
+// SetVersionId sets the VersionId field's value.
+func (s *DeleteObjectTaggingInput) SetVersionId(v string) *DeleteObjectTaggingInput {
+ s.VersionId = &v
+ return s
+}
+
+type DeleteObjectTaggingOutput struct {
+ _ struct{} `type:"structure"`
+
+ // The versionId of the object the tag-set was removed from.
+ VersionId *string `location:"header" locationName:"x-amz-version-id" type:"string"`
+}
+
+// String returns the string representation
+func (s DeleteObjectTaggingOutput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s DeleteObjectTaggingOutput) GoString() string {
+ return s.String()
+}
+
+// SetVersionId sets the VersionId field's value.
+func (s *DeleteObjectTaggingOutput) SetVersionId(v string) *DeleteObjectTaggingOutput {
+ s.VersionId = &v
+ return s
+}
+
+type DeleteObjectsInput struct {
+ _ struct{} `locationName:"DeleteObjectsRequest" type:"structure" payload:"Delete"`
+
+ // Bucket is a required field
+ Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
+
+ // Specifies whether you want to delete this object even if it has a Governance-type
+ // object lock in place. You must have sufficient permissions to perform this
+ // operation.
+ BypassGovernanceRetention *bool `location:"header" locationName:"x-amz-bypass-governance-retention" type:"boolean"`
+
+ // Delete is a required field
+ Delete *Delete `locationName:"Delete" type:"structure" required:"true" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"`
+
+ // The concatenation of the authentication device's serial number, a space,
+ // and the value that is displayed on your authentication device.
+ MFA *string `location:"header" locationName:"x-amz-mfa" type:"string"`
+
+ // Confirms that the requester knows that she or he will be charged for the
+ // request. Bucket owners need not specify this parameter in their requests.
+ // Documentation on downloading objects from requester pays buckets can be found
+ // at http://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html
+ RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"`
+}
+
+// String returns the string representation
+func (s DeleteObjectsInput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s DeleteObjectsInput) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *DeleteObjectsInput) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "DeleteObjectsInput"}
+ if s.Bucket == nil {
+ invalidParams.Add(request.NewErrParamRequired("Bucket"))
+ }
+ if s.Bucket != nil && len(*s.Bucket) < 1 {
+ invalidParams.Add(request.NewErrParamMinLen("Bucket", 1))
+ }
+ if s.Delete == nil {
+ invalidParams.Add(request.NewErrParamRequired("Delete"))
+ }
+ if s.Delete != nil {
+ if err := s.Delete.Validate(); err != nil {
+ invalidParams.AddNested("Delete", err.(request.ErrInvalidParams))
+ }
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetBucket sets the Bucket field's value.
+func (s *DeleteObjectsInput) SetBucket(v string) *DeleteObjectsInput {
+ s.Bucket = &v
+ return s
+}
+
+func (s *DeleteObjectsInput) getBucket() (v string) {
+ if s.Bucket == nil {
+ return v
+ }
+ return *s.Bucket
+}
+
+// SetBypassGovernanceRetention sets the BypassGovernanceRetention field's value.
+func (s *DeleteObjectsInput) SetBypassGovernanceRetention(v bool) *DeleteObjectsInput {
+ s.BypassGovernanceRetention = &v
+ return s
+}
+
+// SetDelete sets the Delete field's value.
+func (s *DeleteObjectsInput) SetDelete(v *Delete) *DeleteObjectsInput {
+ s.Delete = v
+ return s
+}
+
+// SetMFA sets the MFA field's value.
+func (s *DeleteObjectsInput) SetMFA(v string) *DeleteObjectsInput {
+ s.MFA = &v
+ return s
+}
+
+// SetRequestPayer sets the RequestPayer field's value.
+func (s *DeleteObjectsInput) SetRequestPayer(v string) *DeleteObjectsInput {
+ s.RequestPayer = &v
+ return s
+}
+
+type DeleteObjectsOutput struct {
+ _ struct{} `type:"structure"`
+
+ Deleted []*DeletedObject `type:"list" flattened:"true"`
+
+ Errors []*Error `locationName:"Error" type:"list" flattened:"true"`
+
+ // If present, indicates that the requester was successfully charged for the
+ // request.
+ RequestCharged *string `location:"header" locationName:"x-amz-request-charged" type:"string" enum:"RequestCharged"`
+}
+
+// String returns the string representation
+func (s DeleteObjectsOutput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s DeleteObjectsOutput) GoString() string {
+ return s.String()
+}
+
+// SetDeleted sets the Deleted field's value.
+func (s *DeleteObjectsOutput) SetDeleted(v []*DeletedObject) *DeleteObjectsOutput {
+ s.Deleted = v
+ return s
+}
+
+// SetErrors sets the Errors field's value.
+func (s *DeleteObjectsOutput) SetErrors(v []*Error) *DeleteObjectsOutput {
+ s.Errors = v
+ return s
+}
+
+// SetRequestCharged sets the RequestCharged field's value.
+func (s *DeleteObjectsOutput) SetRequestCharged(v string) *DeleteObjectsOutput {
+ s.RequestCharged = &v
+ return s
+}
+
+type DeletePublicAccessBlockInput struct {
+ _ struct{} `locationName:"DeletePublicAccessBlockRequest" type:"structure"`
+
+ // The Amazon S3 bucket whose PublicAccessBlock configuration you want to delete.
+ //
+ // Bucket is a required field
+ Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
+}
+
+// String returns the string representation
+func (s DeletePublicAccessBlockInput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s DeletePublicAccessBlockInput) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *DeletePublicAccessBlockInput) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "DeletePublicAccessBlockInput"}
+ if s.Bucket == nil {
+ invalidParams.Add(request.NewErrParamRequired("Bucket"))
+ }
+ if s.Bucket != nil && len(*s.Bucket) < 1 {
+ invalidParams.Add(request.NewErrParamMinLen("Bucket", 1))
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetBucket sets the Bucket field's value.
+func (s *DeletePublicAccessBlockInput) SetBucket(v string) *DeletePublicAccessBlockInput {
+ s.Bucket = &v
+ return s
+}
+
+func (s *DeletePublicAccessBlockInput) getBucket() (v string) {
+ if s.Bucket == nil {
+ return v
+ }
+ return *s.Bucket
+}
+
+type DeletePublicAccessBlockOutput struct {
+ _ struct{} `type:"structure"`
+}
+
+// String returns the string representation
+func (s DeletePublicAccessBlockOutput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s DeletePublicAccessBlockOutput) GoString() string {
+ return s.String()
+}
+
+type DeletedObject struct {
+ _ struct{} `type:"structure"`
+
+ DeleteMarker *bool `type:"boolean"`
+
+ DeleteMarkerVersionId *string `type:"string"`
+
+ Key *string `min:"1" type:"string"`
+
+ VersionId *string `type:"string"`
+}
+
+// String returns the string representation
+func (s DeletedObject) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s DeletedObject) GoString() string {
+ return s.String()
+}
+
+// SetDeleteMarker sets the DeleteMarker field's value.
+func (s *DeletedObject) SetDeleteMarker(v bool) *DeletedObject {
+ s.DeleteMarker = &v
+ return s
+}
+
+// SetDeleteMarkerVersionId sets the DeleteMarkerVersionId field's value.
+func (s *DeletedObject) SetDeleteMarkerVersionId(v string) *DeletedObject {
+ s.DeleteMarkerVersionId = &v
+ return s
+}
+
+// SetKey sets the Key field's value.
+func (s *DeletedObject) SetKey(v string) *DeletedObject {
+ s.Key = &v
+ return s
+}
+
+// SetVersionId sets the VersionId field's value.
+func (s *DeletedObject) SetVersionId(v string) *DeletedObject {
+ s.VersionId = &v
+ return s
+}
+
+// Specifies information about where to publish analysis or configuration results
+// for an Amazon S3 bucket.
+type Destination struct {
+ _ struct{} `type:"structure"`
+
+ // Specify this only in a cross-account scenario (where source and destination
+ // bucket owners are not the same), and you want to change replica ownership
+ // to the AWS account that owns the destination bucket. If this is not specified
+ // in the replication configuration, the replicas are owned by same AWS account
+ // that owns the source object.
+ AccessControlTranslation *AccessControlTranslation `type:"structure"`
+
+ // Destination bucket owner account ID. In a cross-account scenario, if you
+ // direct Amazon S3 to change replica ownership to the AWS account that owns
+ // the destination bucket by specifying the AccessControlTranslation property,
+ // this is the account ID of the destination bucket owner. For more information,
+ // see Cross-Region Replication Additional Configuration: Change Replica Owner
+ // (https://docs.aws.amazon.com/AmazonS3/latest/dev/crr-change-owner.html) in
+ // the Amazon Simple Storage Service Developer Guide.
+ Account *string `type:"string"`
+
+ // The Amazon Resource Name (ARN) of the bucket where you want Amazon S3 to
+ // store replicas of the object identified by the rule.
+ //
+ // A replication configuration can replicate objects to only one destination
+ // bucket. If there are multiple rules in your replication configuration, all
+ // rules must specify the same destination bucket.
+ //
+ // Bucket is a required field
+ Bucket *string `type:"string" required:"true"`
+
+ // A container that provides information about encryption. If SourceSelectionCriteria
+ // is specified, you must specify this element.
+ EncryptionConfiguration *EncryptionConfiguration `type:"structure"`
+
+ // The storage class to use when replicating objects, such as standard or reduced
+ // redundancy. By default, Amazon S3 uses the storage class of the source object
+ // to create the object replica.
+ //
+ // For valid values, see the StorageClass element of the PUT Bucket replication
+ // (https://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketPUTreplication.html)
+ // action in the Amazon Simple Storage Service API Reference.
+ StorageClass *string `type:"string" enum:"StorageClass"`
+}
+
+// String returns the string representation
+func (s Destination) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s Destination) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *Destination) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "Destination"}
+ if s.Bucket == nil {
+ invalidParams.Add(request.NewErrParamRequired("Bucket"))
+ }
+ if s.AccessControlTranslation != nil {
+ if err := s.AccessControlTranslation.Validate(); err != nil {
+ invalidParams.AddNested("AccessControlTranslation", err.(request.ErrInvalidParams))
+ }
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetAccessControlTranslation sets the AccessControlTranslation field's value.
+func (s *Destination) SetAccessControlTranslation(v *AccessControlTranslation) *Destination {
+ s.AccessControlTranslation = v
+ return s
+}
+
+// SetAccount sets the Account field's value.
+func (s *Destination) SetAccount(v string) *Destination {
+ s.Account = &v
+ return s
+}
+
+// SetBucket sets the Bucket field's value.
+func (s *Destination) SetBucket(v string) *Destination {
+ s.Bucket = &v
+ return s
+}
+
+func (s *Destination) getBucket() (v string) {
+ if s.Bucket == nil {
+ return v
+ }
+ return *s.Bucket
+}
+
+// SetEncryptionConfiguration sets the EncryptionConfiguration field's value.
+func (s *Destination) SetEncryptionConfiguration(v *EncryptionConfiguration) *Destination {
+ s.EncryptionConfiguration = v
+ return s
+}
+
+// SetStorageClass sets the StorageClass field's value.
+func (s *Destination) SetStorageClass(v string) *Destination {
+ s.StorageClass = &v
+ return s
+}
+
+// Describes the server-side encryption that will be applied to the restore
+// results.
+type Encryption struct {
+ _ struct{} `type:"structure"`
+
+ // The server-side encryption algorithm used when storing job results in Amazon
+ // S3 (e.g., AES256, aws:kms).
+ //
+ // EncryptionType is a required field
+ EncryptionType *string `type:"string" required:"true" enum:"ServerSideEncryption"`
+
+ // If the encryption type is aws:kms, this optional value can be used to specify
+ // the encryption context for the restore results.
+ KMSContext *string `type:"string"`
+
+ // If the encryption type is aws:kms, this optional value specifies the AWS
+ // KMS key ID to use for encryption of job results.
+ KMSKeyId *string `type:"string" sensitive:"true"`
+}
+
+// String returns the string representation
+func (s Encryption) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s Encryption) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *Encryption) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "Encryption"}
+ if s.EncryptionType == nil {
+ invalidParams.Add(request.NewErrParamRequired("EncryptionType"))
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetEncryptionType sets the EncryptionType field's value.
+func (s *Encryption) SetEncryptionType(v string) *Encryption {
+ s.EncryptionType = &v
+ return s
+}
+
+// SetKMSContext sets the KMSContext field's value.
+func (s *Encryption) SetKMSContext(v string) *Encryption {
+ s.KMSContext = &v
+ return s
+}
+
+// SetKMSKeyId sets the KMSKeyId field's value.
+func (s *Encryption) SetKMSKeyId(v string) *Encryption {
+ s.KMSKeyId = &v
+ return s
+}
+
+// Specifies encryption-related information for an Amazon S3 bucket that is
+// a destination for replicated objects.
+type EncryptionConfiguration struct {
+ _ struct{} `type:"structure"`
+
+ // Specifies the AWS KMS Key ID (Key ARN or Alias ARN) for the destination bucket.
+ // Amazon S3 uses this key to encrypt replica objects.
+ ReplicaKmsKeyID *string `type:"string"`
+}
+
+// String returns the string representation
+func (s EncryptionConfiguration) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s EncryptionConfiguration) GoString() string {
+ return s.String()
+}
+
+// SetReplicaKmsKeyID sets the ReplicaKmsKeyID field's value.
+func (s *EncryptionConfiguration) SetReplicaKmsKeyID(v string) *EncryptionConfiguration {
+ s.ReplicaKmsKeyID = &v
+ return s
+}
+
+type EndEvent struct {
+ _ struct{} `locationName:"EndEvent" type:"structure"`
+}
+
+// String returns the string representation
+func (s EndEvent) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s EndEvent) GoString() string {
+ return s.String()
+}
+
+// The EndEvent is and event in the SelectObjectContentEventStream group of events.
+func (s *EndEvent) eventSelectObjectContentEventStream() {}
+
+// UnmarshalEvent unmarshals the EventStream Message into the EndEvent value.
+// This method is only used internally within the SDK's EventStream handling.
+func (s *EndEvent) UnmarshalEvent(
+ payloadUnmarshaler protocol.PayloadUnmarshaler,
+ msg eventstream.Message,
+) error {
+ return nil
+}
+
+type Error struct {
+ _ struct{} `type:"structure"`
+
+ Code *string `type:"string"`
+
+ Key *string `min:"1" type:"string"`
+
+ Message *string `type:"string"`
+
+ VersionId *string `type:"string"`
+}
+
+// String returns the string representation
+func (s Error) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s Error) GoString() string {
+ return s.String()
+}
+
+// SetCode sets the Code field's value.
+func (s *Error) SetCode(v string) *Error {
+ s.Code = &v
+ return s
+}
+
+// SetKey sets the Key field's value.
+func (s *Error) SetKey(v string) *Error {
+ s.Key = &v
+ return s
+}
+
+// SetMessage sets the Message field's value.
+func (s *Error) SetMessage(v string) *Error {
+ s.Message = &v
+ return s
+}
+
+// SetVersionId sets the VersionId field's value.
+func (s *Error) SetVersionId(v string) *Error {
+ s.VersionId = &v
+ return s
+}
+
+type ErrorDocument struct {
+ _ struct{} `type:"structure"`
+
+ // The object key name to use when a 4XX class error occurs.
+ //
+ // Key is a required field
+ Key *string `min:"1" type:"string" required:"true"`
+}
+
+// String returns the string representation
+func (s ErrorDocument) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s ErrorDocument) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *ErrorDocument) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "ErrorDocument"}
+ if s.Key == nil {
+ invalidParams.Add(request.NewErrParamRequired("Key"))
+ }
+ if s.Key != nil && len(*s.Key) < 1 {
+ invalidParams.Add(request.NewErrParamMinLen("Key", 1))
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetKey sets the Key field's value.
+func (s *ErrorDocument) SetKey(v string) *ErrorDocument {
+ s.Key = &v
+ return s
+}
+
+// Specifies the Amazon S3 object key name to filter on and whether to filter
+// on the suffix or prefix of the key name.
+type FilterRule struct {
+ _ struct{} `type:"structure"`
+
+ // The object key name prefix or suffix identifying one or more objects to which
+ // the filtering rule applies. The maximum length is 1,024 characters. Overlapping
+ // prefixes and suffixes are not supported. For more information, see Configuring
+ // Event Notifications (https://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html)
+ // in the Amazon Simple Storage Service Developer Guide.
+ Name *string `type:"string" enum:"FilterRuleName"`
+
+ // The value that the filter searches for in object key names.
+ Value *string `type:"string"`
+}
+
+// String returns the string representation
+func (s FilterRule) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s FilterRule) GoString() string {
+ return s.String()
+}
+
+// SetName sets the Name field's value.
+func (s *FilterRule) SetName(v string) *FilterRule {
+ s.Name = &v
+ return s
+}
+
+// SetValue sets the Value field's value.
+func (s *FilterRule) SetValue(v string) *FilterRule {
+ s.Value = &v
+ return s
+}
+
+type GetBucketAccelerateConfigurationInput struct {
+ _ struct{} `locationName:"GetBucketAccelerateConfigurationRequest" type:"structure"`
+
+ // Name of the bucket for which the accelerate configuration is retrieved.
+ //
+ // Bucket is a required field
+ Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
+}
+
+// String returns the string representation
+func (s GetBucketAccelerateConfigurationInput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s GetBucketAccelerateConfigurationInput) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *GetBucketAccelerateConfigurationInput) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "GetBucketAccelerateConfigurationInput"}
+ if s.Bucket == nil {
+ invalidParams.Add(request.NewErrParamRequired("Bucket"))
+ }
+ if s.Bucket != nil && len(*s.Bucket) < 1 {
+ invalidParams.Add(request.NewErrParamMinLen("Bucket", 1))
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetBucket sets the Bucket field's value.
+func (s *GetBucketAccelerateConfigurationInput) SetBucket(v string) *GetBucketAccelerateConfigurationInput {
+ s.Bucket = &v
+ return s
+}
+
+func (s *GetBucketAccelerateConfigurationInput) getBucket() (v string) {
+ if s.Bucket == nil {
+ return v
+ }
+ return *s.Bucket
+}
+
+type GetBucketAccelerateConfigurationOutput struct {
+ _ struct{} `type:"structure"`
+
+ // The accelerate configuration of the bucket.
+ Status *string `type:"string" enum:"BucketAccelerateStatus"`
+}
+
+// String returns the string representation
+func (s GetBucketAccelerateConfigurationOutput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s GetBucketAccelerateConfigurationOutput) GoString() string {
+ return s.String()
+}
+
+// SetStatus sets the Status field's value.
+func (s *GetBucketAccelerateConfigurationOutput) SetStatus(v string) *GetBucketAccelerateConfigurationOutput {
+ s.Status = &v
+ return s
+}
+
+type GetBucketAclInput struct {
+ _ struct{} `locationName:"GetBucketAclRequest" type:"structure"`
+
+ // Bucket is a required field
+ Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
+}
+
+// String returns the string representation
+func (s GetBucketAclInput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s GetBucketAclInput) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *GetBucketAclInput) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "GetBucketAclInput"}
+ if s.Bucket == nil {
+ invalidParams.Add(request.NewErrParamRequired("Bucket"))
+ }
+ if s.Bucket != nil && len(*s.Bucket) < 1 {
+ invalidParams.Add(request.NewErrParamMinLen("Bucket", 1))
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetBucket sets the Bucket field's value.
+func (s *GetBucketAclInput) SetBucket(v string) *GetBucketAclInput {
+ s.Bucket = &v
+ return s
+}
+
+func (s *GetBucketAclInput) getBucket() (v string) {
+ if s.Bucket == nil {
+ return v
+ }
+ return *s.Bucket
+}
+
+type GetBucketAclOutput struct {
+ _ struct{} `type:"structure"`
+
+ // A list of grants.
+ Grants []*Grant `locationName:"AccessControlList" locationNameList:"Grant" type:"list"`
+
+ Owner *Owner `type:"structure"`
+}
+
+// String returns the string representation
+func (s GetBucketAclOutput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s GetBucketAclOutput) GoString() string {
+ return s.String()
+}
+
+// SetGrants sets the Grants field's value.
+func (s *GetBucketAclOutput) SetGrants(v []*Grant) *GetBucketAclOutput {
+ s.Grants = v
+ return s
+}
+
+// SetOwner sets the Owner field's value.
+func (s *GetBucketAclOutput) SetOwner(v *Owner) *GetBucketAclOutput {
+ s.Owner = v
+ return s
+}
+
+type GetBucketAnalyticsConfigurationInput struct {
+ _ struct{} `locationName:"GetBucketAnalyticsConfigurationRequest" type:"structure"`
+
+ // The name of the bucket from which an analytics configuration is retrieved.
+ //
+ // Bucket is a required field
+ Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
+
+ // The ID that identifies the analytics configuration.
+ //
+ // Id is a required field
+ Id *string `location:"querystring" locationName:"id" type:"string" required:"true"`
+}
+
+// String returns the string representation
+func (s GetBucketAnalyticsConfigurationInput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s GetBucketAnalyticsConfigurationInput) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *GetBucketAnalyticsConfigurationInput) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "GetBucketAnalyticsConfigurationInput"}
+ if s.Bucket == nil {
+ invalidParams.Add(request.NewErrParamRequired("Bucket"))
+ }
+ if s.Bucket != nil && len(*s.Bucket) < 1 {
+ invalidParams.Add(request.NewErrParamMinLen("Bucket", 1))
+ }
+ if s.Id == nil {
+ invalidParams.Add(request.NewErrParamRequired("Id"))
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetBucket sets the Bucket field's value.
+func (s *GetBucketAnalyticsConfigurationInput) SetBucket(v string) *GetBucketAnalyticsConfigurationInput {
+ s.Bucket = &v
+ return s
+}
+
+func (s *GetBucketAnalyticsConfigurationInput) getBucket() (v string) {
+ if s.Bucket == nil {
+ return v
+ }
+ return *s.Bucket
+}
+
+// SetId sets the Id field's value.
+func (s *GetBucketAnalyticsConfigurationInput) SetId(v string) *GetBucketAnalyticsConfigurationInput {
+ s.Id = &v
+ return s
+}
+
+type GetBucketAnalyticsConfigurationOutput struct {
+ _ struct{} `type:"structure" payload:"AnalyticsConfiguration"`
+
+ // The configuration and any analyses for the analytics filter.
+ AnalyticsConfiguration *AnalyticsConfiguration `type:"structure"`
+}
+
+// String returns the string representation
+func (s GetBucketAnalyticsConfigurationOutput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s GetBucketAnalyticsConfigurationOutput) GoString() string {
+ return s.String()
+}
+
+// SetAnalyticsConfiguration sets the AnalyticsConfiguration field's value.
+func (s *GetBucketAnalyticsConfigurationOutput) SetAnalyticsConfiguration(v *AnalyticsConfiguration) *GetBucketAnalyticsConfigurationOutput {
+ s.AnalyticsConfiguration = v
+ return s
+}
+
+type GetBucketCorsInput struct {
+ _ struct{} `locationName:"GetBucketCorsRequest" type:"structure"`
+
+ // Bucket is a required field
+ Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
+}
+
+// String returns the string representation
+func (s GetBucketCorsInput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s GetBucketCorsInput) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *GetBucketCorsInput) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "GetBucketCorsInput"}
+ if s.Bucket == nil {
+ invalidParams.Add(request.NewErrParamRequired("Bucket"))
+ }
+ if s.Bucket != nil && len(*s.Bucket) < 1 {
+ invalidParams.Add(request.NewErrParamMinLen("Bucket", 1))
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetBucket sets the Bucket field's value.
+func (s *GetBucketCorsInput) SetBucket(v string) *GetBucketCorsInput {
+ s.Bucket = &v
+ return s
+}
+
+func (s *GetBucketCorsInput) getBucket() (v string) {
+ if s.Bucket == nil {
+ return v
+ }
+ return *s.Bucket
+}
+
+type GetBucketCorsOutput struct {
+ _ struct{} `type:"structure"`
+
+ CORSRules []*CORSRule `locationName:"CORSRule" type:"list" flattened:"true"`
+}
+
+// String returns the string representation
+func (s GetBucketCorsOutput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s GetBucketCorsOutput) GoString() string {
+ return s.String()
+}
+
+// SetCORSRules sets the CORSRules field's value.
+func (s *GetBucketCorsOutput) SetCORSRules(v []*CORSRule) *GetBucketCorsOutput {
+ s.CORSRules = v
+ return s
+}
+
+type GetBucketEncryptionInput struct {
+ _ struct{} `locationName:"GetBucketEncryptionRequest" type:"structure"`
+
+ // The name of the bucket from which the server-side encryption configuration
+ // is retrieved.
+ //
+ // Bucket is a required field
+ Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
+}
+
+// String returns the string representation
+func (s GetBucketEncryptionInput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s GetBucketEncryptionInput) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *GetBucketEncryptionInput) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "GetBucketEncryptionInput"}
+ if s.Bucket == nil {
+ invalidParams.Add(request.NewErrParamRequired("Bucket"))
+ }
+ if s.Bucket != nil && len(*s.Bucket) < 1 {
+ invalidParams.Add(request.NewErrParamMinLen("Bucket", 1))
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetBucket sets the Bucket field's value.
+func (s *GetBucketEncryptionInput) SetBucket(v string) *GetBucketEncryptionInput {
+ s.Bucket = &v
+ return s
+}
+
+func (s *GetBucketEncryptionInput) getBucket() (v string) {
+ if s.Bucket == nil {
+ return v
+ }
+ return *s.Bucket
+}
+
+type GetBucketEncryptionOutput struct {
+ _ struct{} `type:"structure" payload:"ServerSideEncryptionConfiguration"`
+
+ // Specifies the default server-side-encryption configuration.
+ ServerSideEncryptionConfiguration *ServerSideEncryptionConfiguration `type:"structure"`
+}
+
+// String returns the string representation
+func (s GetBucketEncryptionOutput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s GetBucketEncryptionOutput) GoString() string {
+ return s.String()
+}
+
+// SetServerSideEncryptionConfiguration sets the ServerSideEncryptionConfiguration field's value.
+func (s *GetBucketEncryptionOutput) SetServerSideEncryptionConfiguration(v *ServerSideEncryptionConfiguration) *GetBucketEncryptionOutput {
+ s.ServerSideEncryptionConfiguration = v
+ return s
+}
+
+type GetBucketInventoryConfigurationInput struct {
+ _ struct{} `locationName:"GetBucketInventoryConfigurationRequest" type:"structure"`
+
+ // The name of the bucket containing the inventory configuration to retrieve.
+ //
+ // Bucket is a required field
+ Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
+
+ // The ID used to identify the inventory configuration.
+ //
+ // Id is a required field
+ Id *string `location:"querystring" locationName:"id" type:"string" required:"true"`
+}
+
+// String returns the string representation
+func (s GetBucketInventoryConfigurationInput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s GetBucketInventoryConfigurationInput) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *GetBucketInventoryConfigurationInput) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "GetBucketInventoryConfigurationInput"}
+ if s.Bucket == nil {
+ invalidParams.Add(request.NewErrParamRequired("Bucket"))
+ }
+ if s.Bucket != nil && len(*s.Bucket) < 1 {
+ invalidParams.Add(request.NewErrParamMinLen("Bucket", 1))
+ }
+ if s.Id == nil {
+ invalidParams.Add(request.NewErrParamRequired("Id"))
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetBucket sets the Bucket field's value.
+func (s *GetBucketInventoryConfigurationInput) SetBucket(v string) *GetBucketInventoryConfigurationInput {
+ s.Bucket = &v
+ return s
+}
+
+func (s *GetBucketInventoryConfigurationInput) getBucket() (v string) {
+ if s.Bucket == nil {
+ return v
+ }
+ return *s.Bucket
+}
+
+// SetId sets the Id field's value.
+func (s *GetBucketInventoryConfigurationInput) SetId(v string) *GetBucketInventoryConfigurationInput {
+ s.Id = &v
+ return s
+}
+
+type GetBucketInventoryConfigurationOutput struct {
+ _ struct{} `type:"structure" payload:"InventoryConfiguration"`
+
+ // Specifies the inventory configuration.
+ InventoryConfiguration *InventoryConfiguration `type:"structure"`
+}
+
+// String returns the string representation
+func (s GetBucketInventoryConfigurationOutput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s GetBucketInventoryConfigurationOutput) GoString() string {
+ return s.String()
+}
+
+// SetInventoryConfiguration sets the InventoryConfiguration field's value.
+func (s *GetBucketInventoryConfigurationOutput) SetInventoryConfiguration(v *InventoryConfiguration) *GetBucketInventoryConfigurationOutput {
+ s.InventoryConfiguration = v
+ return s
+}
+
+type GetBucketLifecycleConfigurationInput struct {
+ _ struct{} `locationName:"GetBucketLifecycleConfigurationRequest" type:"structure"`
+
+ // Bucket is a required field
+ Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
+}
+
+// String returns the string representation
+func (s GetBucketLifecycleConfigurationInput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s GetBucketLifecycleConfigurationInput) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *GetBucketLifecycleConfigurationInput) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "GetBucketLifecycleConfigurationInput"}
+ if s.Bucket == nil {
+ invalidParams.Add(request.NewErrParamRequired("Bucket"))
+ }
+ if s.Bucket != nil && len(*s.Bucket) < 1 {
+ invalidParams.Add(request.NewErrParamMinLen("Bucket", 1))
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetBucket sets the Bucket field's value.
+func (s *GetBucketLifecycleConfigurationInput) SetBucket(v string) *GetBucketLifecycleConfigurationInput {
+ s.Bucket = &v
+ return s
+}
+
+func (s *GetBucketLifecycleConfigurationInput) getBucket() (v string) {
+ if s.Bucket == nil {
+ return v
+ }
+ return *s.Bucket
+}
+
+type GetBucketLifecycleConfigurationOutput struct {
+ _ struct{} `type:"structure"`
+
+ Rules []*LifecycleRule `locationName:"Rule" type:"list" flattened:"true"`
+}
+
+// String returns the string representation
+func (s GetBucketLifecycleConfigurationOutput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s GetBucketLifecycleConfigurationOutput) GoString() string {
+ return s.String()
+}
+
+// SetRules sets the Rules field's value.
+func (s *GetBucketLifecycleConfigurationOutput) SetRules(v []*LifecycleRule) *GetBucketLifecycleConfigurationOutput {
+ s.Rules = v
+ return s
+}
+
+type GetBucketLifecycleInput struct {
+ _ struct{} `locationName:"GetBucketLifecycleRequest" type:"structure"`
+
+ // Bucket is a required field
+ Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
+}
+
+// String returns the string representation
+func (s GetBucketLifecycleInput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s GetBucketLifecycleInput) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *GetBucketLifecycleInput) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "GetBucketLifecycleInput"}
+ if s.Bucket == nil {
+ invalidParams.Add(request.NewErrParamRequired("Bucket"))
+ }
+ if s.Bucket != nil && len(*s.Bucket) < 1 {
+ invalidParams.Add(request.NewErrParamMinLen("Bucket", 1))
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetBucket sets the Bucket field's value.
+func (s *GetBucketLifecycleInput) SetBucket(v string) *GetBucketLifecycleInput {
+ s.Bucket = &v
+ return s
+}
+
+func (s *GetBucketLifecycleInput) getBucket() (v string) {
+ if s.Bucket == nil {
+ return v
+ }
+ return *s.Bucket
+}
+
+type GetBucketLifecycleOutput struct {
+ _ struct{} `type:"structure"`
+
+ Rules []*Rule `locationName:"Rule" type:"list" flattened:"true"`
+}
+
+// String returns the string representation
+func (s GetBucketLifecycleOutput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s GetBucketLifecycleOutput) GoString() string {
+ return s.String()
+}
+
+// SetRules sets the Rules field's value.
+func (s *GetBucketLifecycleOutput) SetRules(v []*Rule) *GetBucketLifecycleOutput {
+ s.Rules = v
+ return s
+}
+
+type GetBucketLocationInput struct {
+ _ struct{} `locationName:"GetBucketLocationRequest" type:"structure"`
+
+ // Bucket is a required field
+ Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
+}
+
+// String returns the string representation
+func (s GetBucketLocationInput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s GetBucketLocationInput) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *GetBucketLocationInput) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "GetBucketLocationInput"}
+ if s.Bucket == nil {
+ invalidParams.Add(request.NewErrParamRequired("Bucket"))
+ }
+ if s.Bucket != nil && len(*s.Bucket) < 1 {
+ invalidParams.Add(request.NewErrParamMinLen("Bucket", 1))
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetBucket sets the Bucket field's value.
+func (s *GetBucketLocationInput) SetBucket(v string) *GetBucketLocationInput {
+ s.Bucket = &v
+ return s
+}
+
+func (s *GetBucketLocationInput) getBucket() (v string) {
+ if s.Bucket == nil {
+ return v
+ }
+ return *s.Bucket
+}
+
+type GetBucketLocationOutput struct {
+ _ struct{} `type:"structure"`
+
+ LocationConstraint *string `type:"string" enum:"BucketLocationConstraint"`
+}
+
+// String returns the string representation
+func (s GetBucketLocationOutput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s GetBucketLocationOutput) GoString() string {
+ return s.String()
+}
+
+// SetLocationConstraint sets the LocationConstraint field's value.
+func (s *GetBucketLocationOutput) SetLocationConstraint(v string) *GetBucketLocationOutput {
+ s.LocationConstraint = &v
+ return s
+}
+
+type GetBucketLoggingInput struct {
+ _ struct{} `locationName:"GetBucketLoggingRequest" type:"structure"`
+
+ // Bucket is a required field
+ Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
+}
+
+// String returns the string representation
+func (s GetBucketLoggingInput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s GetBucketLoggingInput) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *GetBucketLoggingInput) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "GetBucketLoggingInput"}
+ if s.Bucket == nil {
+ invalidParams.Add(request.NewErrParamRequired("Bucket"))
+ }
+ if s.Bucket != nil && len(*s.Bucket) < 1 {
+ invalidParams.Add(request.NewErrParamMinLen("Bucket", 1))
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetBucket sets the Bucket field's value.
+func (s *GetBucketLoggingInput) SetBucket(v string) *GetBucketLoggingInput {
+ s.Bucket = &v
+ return s
+}
+
+func (s *GetBucketLoggingInput) getBucket() (v string) {
+ if s.Bucket == nil {
+ return v
+ }
+ return *s.Bucket
+}
+
+type GetBucketLoggingOutput struct {
+ _ struct{} `type:"structure"`
+
+ // Describes where logs are stored and the prefix that Amazon S3 assigns to
+ // all log object keys for a bucket. For more information, see PUT Bucket logging
+ // (https://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketPUTlogging.html)
+ // in the Amazon Simple Storage Service API Reference.
+ LoggingEnabled *LoggingEnabled `type:"structure"`
+}
+
+// String returns the string representation
+func (s GetBucketLoggingOutput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s GetBucketLoggingOutput) GoString() string {
+ return s.String()
+}
+
+// SetLoggingEnabled sets the LoggingEnabled field's value.
+func (s *GetBucketLoggingOutput) SetLoggingEnabled(v *LoggingEnabled) *GetBucketLoggingOutput {
+ s.LoggingEnabled = v
+ return s
+}
+
+type GetBucketMetricsConfigurationInput struct {
+ _ struct{} `locationName:"GetBucketMetricsConfigurationRequest" type:"structure"`
+
+ // The name of the bucket containing the metrics configuration to retrieve.
+ //
+ // Bucket is a required field
+ Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
+
+ // The ID used to identify the metrics configuration.
+ //
+ // Id is a required field
+ Id *string `location:"querystring" locationName:"id" type:"string" required:"true"`
+}
+
+// String returns the string representation
+func (s GetBucketMetricsConfigurationInput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s GetBucketMetricsConfigurationInput) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *GetBucketMetricsConfigurationInput) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "GetBucketMetricsConfigurationInput"}
+ if s.Bucket == nil {
+ invalidParams.Add(request.NewErrParamRequired("Bucket"))
+ }
+ if s.Bucket != nil && len(*s.Bucket) < 1 {
+ invalidParams.Add(request.NewErrParamMinLen("Bucket", 1))
+ }
+ if s.Id == nil {
+ invalidParams.Add(request.NewErrParamRequired("Id"))
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetBucket sets the Bucket field's value.
+func (s *GetBucketMetricsConfigurationInput) SetBucket(v string) *GetBucketMetricsConfigurationInput {
+ s.Bucket = &v
+ return s
+}
+
+func (s *GetBucketMetricsConfigurationInput) getBucket() (v string) {
+ if s.Bucket == nil {
+ return v
+ }
+ return *s.Bucket
+}
+
+// SetId sets the Id field's value.
+func (s *GetBucketMetricsConfigurationInput) SetId(v string) *GetBucketMetricsConfigurationInput {
+ s.Id = &v
+ return s
+}
+
+type GetBucketMetricsConfigurationOutput struct {
+ _ struct{} `type:"structure" payload:"MetricsConfiguration"`
+
+ // Specifies the metrics configuration.
+ MetricsConfiguration *MetricsConfiguration `type:"structure"`
+}
+
+// String returns the string representation
+func (s GetBucketMetricsConfigurationOutput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s GetBucketMetricsConfigurationOutput) GoString() string {
+ return s.String()
+}
+
+// SetMetricsConfiguration sets the MetricsConfiguration field's value.
+func (s *GetBucketMetricsConfigurationOutput) SetMetricsConfiguration(v *MetricsConfiguration) *GetBucketMetricsConfigurationOutput {
+ s.MetricsConfiguration = v
+ return s
+}
+
+type GetBucketNotificationConfigurationRequest struct {
+ _ struct{} `locationName:"GetBucketNotificationConfigurationRequest" type:"structure"`
+
+ // Name of the bucket to get the notification configuration for.
+ //
+ // Bucket is a required field
+ Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
+}
+
+// String returns the string representation
+func (s GetBucketNotificationConfigurationRequest) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s GetBucketNotificationConfigurationRequest) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *GetBucketNotificationConfigurationRequest) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "GetBucketNotificationConfigurationRequest"}
+ if s.Bucket == nil {
+ invalidParams.Add(request.NewErrParamRequired("Bucket"))
+ }
+ if s.Bucket != nil && len(*s.Bucket) < 1 {
+ invalidParams.Add(request.NewErrParamMinLen("Bucket", 1))
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetBucket sets the Bucket field's value.
+func (s *GetBucketNotificationConfigurationRequest) SetBucket(v string) *GetBucketNotificationConfigurationRequest {
+ s.Bucket = &v
+ return s
+}
+
+func (s *GetBucketNotificationConfigurationRequest) getBucket() (v string) {
+ if s.Bucket == nil {
+ return v
+ }
+ return *s.Bucket
+}
+
+type GetBucketPolicyInput struct {
+ _ struct{} `locationName:"GetBucketPolicyRequest" type:"structure"`
+
+ // Bucket is a required field
+ Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
+}
+
+// String returns the string representation
+func (s GetBucketPolicyInput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s GetBucketPolicyInput) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *GetBucketPolicyInput) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "GetBucketPolicyInput"}
+ if s.Bucket == nil {
+ invalidParams.Add(request.NewErrParamRequired("Bucket"))
+ }
+ if s.Bucket != nil && len(*s.Bucket) < 1 {
+ invalidParams.Add(request.NewErrParamMinLen("Bucket", 1))
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetBucket sets the Bucket field's value.
+func (s *GetBucketPolicyInput) SetBucket(v string) *GetBucketPolicyInput {
+ s.Bucket = &v
+ return s
+}
+
+func (s *GetBucketPolicyInput) getBucket() (v string) {
+ if s.Bucket == nil {
+ return v
+ }
+ return *s.Bucket
+}
+
+type GetBucketPolicyOutput struct {
+ _ struct{} `type:"structure" payload:"Policy"`
+
+ // The bucket policy as a JSON document.
+ Policy *string `type:"string"`
+}
+
+// String returns the string representation
+func (s GetBucketPolicyOutput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s GetBucketPolicyOutput) GoString() string {
+ return s.String()
+}
+
+// SetPolicy sets the Policy field's value.
+func (s *GetBucketPolicyOutput) SetPolicy(v string) *GetBucketPolicyOutput {
+ s.Policy = &v
+ return s
+}
+
+type GetBucketPolicyStatusInput struct {
+ _ struct{} `locationName:"GetBucketPolicyStatusRequest" type:"structure"`
+
+ // The name of the Amazon S3 bucket whose policy status you want to retrieve.
+ //
+ // Bucket is a required field
+ Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
+}
+
+// String returns the string representation
+func (s GetBucketPolicyStatusInput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s GetBucketPolicyStatusInput) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *GetBucketPolicyStatusInput) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "GetBucketPolicyStatusInput"}
+ if s.Bucket == nil {
+ invalidParams.Add(request.NewErrParamRequired("Bucket"))
+ }
+ if s.Bucket != nil && len(*s.Bucket) < 1 {
+ invalidParams.Add(request.NewErrParamMinLen("Bucket", 1))
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetBucket sets the Bucket field's value.
+func (s *GetBucketPolicyStatusInput) SetBucket(v string) *GetBucketPolicyStatusInput {
+ s.Bucket = &v
+ return s
+}
+
+func (s *GetBucketPolicyStatusInput) getBucket() (v string) {
+ if s.Bucket == nil {
+ return v
+ }
+ return *s.Bucket
+}
+
+type GetBucketPolicyStatusOutput struct {
+ _ struct{} `type:"structure" payload:"PolicyStatus"`
+
+ // The policy status for the specified bucket.
+ PolicyStatus *PolicyStatus `type:"structure"`
+}
+
+// String returns the string representation
+func (s GetBucketPolicyStatusOutput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s GetBucketPolicyStatusOutput) GoString() string {
+ return s.String()
+}
+
+// SetPolicyStatus sets the PolicyStatus field's value.
+func (s *GetBucketPolicyStatusOutput) SetPolicyStatus(v *PolicyStatus) *GetBucketPolicyStatusOutput {
+ s.PolicyStatus = v
+ return s
+}
+
+type GetBucketReplicationInput struct {
+ _ struct{} `locationName:"GetBucketReplicationRequest" type:"structure"`
+
+ // Bucket is a required field
+ Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
+}
+
+// String returns the string representation
+func (s GetBucketReplicationInput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s GetBucketReplicationInput) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *GetBucketReplicationInput) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "GetBucketReplicationInput"}
+ if s.Bucket == nil {
+ invalidParams.Add(request.NewErrParamRequired("Bucket"))
+ }
+ if s.Bucket != nil && len(*s.Bucket) < 1 {
+ invalidParams.Add(request.NewErrParamMinLen("Bucket", 1))
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetBucket sets the Bucket field's value.
+func (s *GetBucketReplicationInput) SetBucket(v string) *GetBucketReplicationInput {
+ s.Bucket = &v
+ return s
+}
+
+func (s *GetBucketReplicationInput) getBucket() (v string) {
+ if s.Bucket == nil {
+ return v
+ }
+ return *s.Bucket
+}
+
+type GetBucketReplicationOutput struct {
+ _ struct{} `type:"structure" payload:"ReplicationConfiguration"`
+
+ // A container for replication rules. You can add up to 1,000 rules. The maximum
+ // size of a replication configuration is 2 MB.
+ ReplicationConfiguration *ReplicationConfiguration `type:"structure"`
+}
+
+// String returns the string representation
+func (s GetBucketReplicationOutput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s GetBucketReplicationOutput) GoString() string {
+ return s.String()
+}
+
+// SetReplicationConfiguration sets the ReplicationConfiguration field's value.
+func (s *GetBucketReplicationOutput) SetReplicationConfiguration(v *ReplicationConfiguration) *GetBucketReplicationOutput {
+ s.ReplicationConfiguration = v
+ return s
+}
+
+type GetBucketRequestPaymentInput struct {
+ _ struct{} `locationName:"GetBucketRequestPaymentRequest" type:"structure"`
+
+ // Bucket is a required field
+ Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
+}
+
+// String returns the string representation
+func (s GetBucketRequestPaymentInput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s GetBucketRequestPaymentInput) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *GetBucketRequestPaymentInput) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "GetBucketRequestPaymentInput"}
+ if s.Bucket == nil {
+ invalidParams.Add(request.NewErrParamRequired("Bucket"))
+ }
+ if s.Bucket != nil && len(*s.Bucket) < 1 {
+ invalidParams.Add(request.NewErrParamMinLen("Bucket", 1))
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetBucket sets the Bucket field's value.
+func (s *GetBucketRequestPaymentInput) SetBucket(v string) *GetBucketRequestPaymentInput {
+ s.Bucket = &v
+ return s
+}
+
+func (s *GetBucketRequestPaymentInput) getBucket() (v string) {
+ if s.Bucket == nil {
+ return v
+ }
+ return *s.Bucket
+}
+
+type GetBucketRequestPaymentOutput struct {
+ _ struct{} `type:"structure"`
+
+ // Specifies who pays for the download and request fees.
+ Payer *string `type:"string" enum:"Payer"`
+}
+
+// String returns the string representation
+func (s GetBucketRequestPaymentOutput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s GetBucketRequestPaymentOutput) GoString() string {
+ return s.String()
+}
+
+// SetPayer sets the Payer field's value.
+func (s *GetBucketRequestPaymentOutput) SetPayer(v string) *GetBucketRequestPaymentOutput {
+ s.Payer = &v
+ return s
+}
+
+type GetBucketTaggingInput struct {
+ _ struct{} `locationName:"GetBucketTaggingRequest" type:"structure"`
+
+ // Bucket is a required field
+ Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
+}
+
+// String returns the string representation
+func (s GetBucketTaggingInput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s GetBucketTaggingInput) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *GetBucketTaggingInput) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "GetBucketTaggingInput"}
+ if s.Bucket == nil {
+ invalidParams.Add(request.NewErrParamRequired("Bucket"))
+ }
+ if s.Bucket != nil && len(*s.Bucket) < 1 {
+ invalidParams.Add(request.NewErrParamMinLen("Bucket", 1))
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetBucket sets the Bucket field's value.
+func (s *GetBucketTaggingInput) SetBucket(v string) *GetBucketTaggingInput {
+ s.Bucket = &v
+ return s
+}
+
+func (s *GetBucketTaggingInput) getBucket() (v string) {
+ if s.Bucket == nil {
+ return v
+ }
+ return *s.Bucket
+}
+
+type GetBucketTaggingOutput struct {
+ _ struct{} `type:"structure"`
+
+ // TagSet is a required field
+ TagSet []*Tag `locationNameList:"Tag" type:"list" required:"true"`
+}
+
+// String returns the string representation
+func (s GetBucketTaggingOutput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s GetBucketTaggingOutput) GoString() string {
+ return s.String()
+}
+
+// SetTagSet sets the TagSet field's value.
+func (s *GetBucketTaggingOutput) SetTagSet(v []*Tag) *GetBucketTaggingOutput {
+ s.TagSet = v
+ return s
+}
+
+type GetBucketVersioningInput struct {
+ _ struct{} `locationName:"GetBucketVersioningRequest" type:"structure"`
+
+ // Bucket is a required field
+ Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
+}
+
+// String returns the string representation
+func (s GetBucketVersioningInput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s GetBucketVersioningInput) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *GetBucketVersioningInput) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "GetBucketVersioningInput"}
+ if s.Bucket == nil {
+ invalidParams.Add(request.NewErrParamRequired("Bucket"))
+ }
+ if s.Bucket != nil && len(*s.Bucket) < 1 {
+ invalidParams.Add(request.NewErrParamMinLen("Bucket", 1))
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetBucket sets the Bucket field's value.
+func (s *GetBucketVersioningInput) SetBucket(v string) *GetBucketVersioningInput {
+ s.Bucket = &v
+ return s
+}
+
+func (s *GetBucketVersioningInput) getBucket() (v string) {
+ if s.Bucket == nil {
+ return v
+ }
+ return *s.Bucket
+}
+
+type GetBucketVersioningOutput struct {
+ _ struct{} `type:"structure"`
+
+ // Specifies whether MFA delete is enabled in the bucket versioning configuration.
+ // This element is only returned if the bucket has been configured with MFA
+ // delete. If the bucket has never been so configured, this element is not returned.
+ MFADelete *string `locationName:"MfaDelete" type:"string" enum:"MFADeleteStatus"`
+
+ // The versioning state of the bucket.
+ Status *string `type:"string" enum:"BucketVersioningStatus"`
+}
+
+// String returns the string representation
+func (s GetBucketVersioningOutput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s GetBucketVersioningOutput) GoString() string {
+ return s.String()
+}
+
+// SetMFADelete sets the MFADelete field's value.
+func (s *GetBucketVersioningOutput) SetMFADelete(v string) *GetBucketVersioningOutput {
+ s.MFADelete = &v
+ return s
+}
+
+// SetStatus sets the Status field's value.
+func (s *GetBucketVersioningOutput) SetStatus(v string) *GetBucketVersioningOutput {
+ s.Status = &v
+ return s
+}
+
+type GetBucketWebsiteInput struct {
+ _ struct{} `locationName:"GetBucketWebsiteRequest" type:"structure"`
+
+ // Bucket is a required field
+ Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
+}
+
+// String returns the string representation
+func (s GetBucketWebsiteInput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s GetBucketWebsiteInput) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *GetBucketWebsiteInput) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "GetBucketWebsiteInput"}
+ if s.Bucket == nil {
+ invalidParams.Add(request.NewErrParamRequired("Bucket"))
+ }
+ if s.Bucket != nil && len(*s.Bucket) < 1 {
+ invalidParams.Add(request.NewErrParamMinLen("Bucket", 1))
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetBucket sets the Bucket field's value.
+func (s *GetBucketWebsiteInput) SetBucket(v string) *GetBucketWebsiteInput {
+ s.Bucket = &v
+ return s
+}
+
+func (s *GetBucketWebsiteInput) getBucket() (v string) {
+ if s.Bucket == nil {
+ return v
+ }
+ return *s.Bucket
+}
+
+type GetBucketWebsiteOutput struct {
+ _ struct{} `type:"structure"`
+
+ ErrorDocument *ErrorDocument `type:"structure"`
+
+ IndexDocument *IndexDocument `type:"structure"`
+
+ // Specifies the redirect behavior of all requests to a website endpoint of
+ // an Amazon S3 bucket.
+ RedirectAllRequestsTo *RedirectAllRequestsTo `type:"structure"`
+
+ RoutingRules []*RoutingRule `locationNameList:"RoutingRule" type:"list"`
+}
+
+// String returns the string representation
+func (s GetBucketWebsiteOutput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s GetBucketWebsiteOutput) GoString() string {
+ return s.String()
+}
+
+// SetErrorDocument sets the ErrorDocument field's value.
+func (s *GetBucketWebsiteOutput) SetErrorDocument(v *ErrorDocument) *GetBucketWebsiteOutput {
+ s.ErrorDocument = v
+ return s
+}
+
+// SetIndexDocument sets the IndexDocument field's value.
+func (s *GetBucketWebsiteOutput) SetIndexDocument(v *IndexDocument) *GetBucketWebsiteOutput {
+ s.IndexDocument = v
+ return s
+}
+
+// SetRedirectAllRequestsTo sets the RedirectAllRequestsTo field's value.
+func (s *GetBucketWebsiteOutput) SetRedirectAllRequestsTo(v *RedirectAllRequestsTo) *GetBucketWebsiteOutput {
+ s.RedirectAllRequestsTo = v
+ return s
+}
+
+// SetRoutingRules sets the RoutingRules field's value.
+func (s *GetBucketWebsiteOutput) SetRoutingRules(v []*RoutingRule) *GetBucketWebsiteOutput {
+ s.RoutingRules = v
+ return s
+}
+
+type GetObjectAclInput struct {
+ _ struct{} `locationName:"GetObjectAclRequest" type:"structure"`
+
+ // Bucket is a required field
+ Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
+
+ // Key is a required field
+ Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"`
+
+ // Confirms that the requester knows that she or he will be charged for the
+ // request. Bucket owners need not specify this parameter in their requests.
+ // Documentation on downloading objects from requester pays buckets can be found
+ // at http://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html
+ RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"`
+
+ // VersionId used to reference a specific version of the object.
+ VersionId *string `location:"querystring" locationName:"versionId" type:"string"`
+}
+
+// String returns the string representation
+func (s GetObjectAclInput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s GetObjectAclInput) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *GetObjectAclInput) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "GetObjectAclInput"}
+ if s.Bucket == nil {
+ invalidParams.Add(request.NewErrParamRequired("Bucket"))
+ }
+ if s.Bucket != nil && len(*s.Bucket) < 1 {
+ invalidParams.Add(request.NewErrParamMinLen("Bucket", 1))
+ }
+ if s.Key == nil {
+ invalidParams.Add(request.NewErrParamRequired("Key"))
+ }
+ if s.Key != nil && len(*s.Key) < 1 {
+ invalidParams.Add(request.NewErrParamMinLen("Key", 1))
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetBucket sets the Bucket field's value.
+func (s *GetObjectAclInput) SetBucket(v string) *GetObjectAclInput {
+ s.Bucket = &v
+ return s
+}
+
+func (s *GetObjectAclInput) getBucket() (v string) {
+ if s.Bucket == nil {
+ return v
+ }
+ return *s.Bucket
+}
+
+// SetKey sets the Key field's value.
+func (s *GetObjectAclInput) SetKey(v string) *GetObjectAclInput {
+ s.Key = &v
+ return s
+}
+
+// SetRequestPayer sets the RequestPayer field's value.
+func (s *GetObjectAclInput) SetRequestPayer(v string) *GetObjectAclInput {
+ s.RequestPayer = &v
+ return s
+}
+
+// SetVersionId sets the VersionId field's value.
+func (s *GetObjectAclInput) SetVersionId(v string) *GetObjectAclInput {
+ s.VersionId = &v
+ return s
+}
+
+type GetObjectAclOutput struct {
+ _ struct{} `type:"structure"`
+
+ // A list of grants.
+ Grants []*Grant `locationName:"AccessControlList" locationNameList:"Grant" type:"list"`
+
+ Owner *Owner `type:"structure"`
+
+ // If present, indicates that the requester was successfully charged for the
+ // request.
+ RequestCharged *string `location:"header" locationName:"x-amz-request-charged" type:"string" enum:"RequestCharged"`
+}
+
+// String returns the string representation
+func (s GetObjectAclOutput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s GetObjectAclOutput) GoString() string {
+ return s.String()
+}
+
+// SetGrants sets the Grants field's value.
+func (s *GetObjectAclOutput) SetGrants(v []*Grant) *GetObjectAclOutput {
+ s.Grants = v
+ return s
+}
+
+// SetOwner sets the Owner field's value.
+func (s *GetObjectAclOutput) SetOwner(v *Owner) *GetObjectAclOutput {
+ s.Owner = v
+ return s
+}
+
+// SetRequestCharged sets the RequestCharged field's value.
+func (s *GetObjectAclOutput) SetRequestCharged(v string) *GetObjectAclOutput {
+ s.RequestCharged = &v
+ return s
+}
+
+type GetObjectInput struct {
+ _ struct{} `locationName:"GetObjectRequest" type:"structure"`
+
+ // Bucket is a required field
+ Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
+
+ // Return the object only if its entity tag (ETag) is the same as the one specified,
+ // otherwise return a 412 (precondition failed).
+ IfMatch *string `location:"header" locationName:"If-Match" type:"string"`
+
+ // Return the object only if it has been modified since the specified time,
+ // otherwise return a 304 (not modified).
+ IfModifiedSince *time.Time `location:"header" locationName:"If-Modified-Since" type:"timestamp"`
+
+ // Return the object only if its entity tag (ETag) is different from the one
+ // specified, otherwise return a 304 (not modified).
+ IfNoneMatch *string `location:"header" locationName:"If-None-Match" type:"string"`
+
+ // Return the object only if it has not been modified since the specified time,
+ // otherwise return a 412 (precondition failed).
+ IfUnmodifiedSince *time.Time `location:"header" locationName:"If-Unmodified-Since" type:"timestamp"`
+
+ // Key is a required field
+ Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"`
+
+ // Part number of the object being read. This is a positive integer between
+ // 1 and 10,000. Effectively performs a 'ranged' GET request for the part specified.
+ // Useful for downloading just a part of an object.
+ PartNumber *int64 `location:"querystring" locationName:"partNumber" type:"integer"`
+
+ // Downloads the specified range bytes of an object. For more information about
+ // the HTTP Range header, go to http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.35.
+ Range *string `location:"header" locationName:"Range" type:"string"`
+
+ // Confirms that the requester knows that she or he will be charged for the
+ // request. Bucket owners need not specify this parameter in their requests.
+ // Documentation on downloading objects from requester pays buckets can be found
+ // at http://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html
+ RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"`
+
+ // Sets the Cache-Control header of the response.
+ ResponseCacheControl *string `location:"querystring" locationName:"response-cache-control" type:"string"`
+
+ // Sets the Content-Disposition header of the response
+ ResponseContentDisposition *string `location:"querystring" locationName:"response-content-disposition" type:"string"`
+
+ // Sets the Content-Encoding header of the response.
+ ResponseContentEncoding *string `location:"querystring" locationName:"response-content-encoding" type:"string"`
+
+ // Sets the Content-Language header of the response.
+ ResponseContentLanguage *string `location:"querystring" locationName:"response-content-language" type:"string"`
+
+ // Sets the Content-Type header of the response.
+ ResponseContentType *string `location:"querystring" locationName:"response-content-type" type:"string"`
+
+ // Sets the Expires header of the response.
+ ResponseExpires *time.Time `location:"querystring" locationName:"response-expires" type:"timestamp"`
+
+ // Specifies the algorithm to use to when encrypting the object (e.g., AES256).
+ SSECustomerAlgorithm *string `location:"header" locationName:"x-amz-server-side-encryption-customer-algorithm" type:"string"`
+
+ // Specifies the customer-provided encryption key for Amazon S3 to use in encrypting
+ // data. This value is used to store the object and then it is discarded; Amazon
+ // does not store the encryption key. The key must be appropriate for use with
+ // the algorithm specified in the x-amz-server-side​-encryption​-customer-algorithm
+ // header.
+ SSECustomerKey *string `marshal-as:"blob" location:"header" locationName:"x-amz-server-side-encryption-customer-key" type:"string" sensitive:"true"`
+
+ // Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321.
+ // Amazon S3 uses this header for a message integrity check to ensure the encryption
+ // key was transmitted without error.
+ SSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key-MD5" type:"string"`
+
+ // VersionId used to reference a specific version of the object.
+ VersionId *string `location:"querystring" locationName:"versionId" type:"string"`
+}
+
+// String returns the string representation
+func (s GetObjectInput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s GetObjectInput) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *GetObjectInput) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "GetObjectInput"}
+ if s.Bucket == nil {
+ invalidParams.Add(request.NewErrParamRequired("Bucket"))
+ }
+ if s.Bucket != nil && len(*s.Bucket) < 1 {
+ invalidParams.Add(request.NewErrParamMinLen("Bucket", 1))
+ }
+ if s.Key == nil {
+ invalidParams.Add(request.NewErrParamRequired("Key"))
+ }
+ if s.Key != nil && len(*s.Key) < 1 {
+ invalidParams.Add(request.NewErrParamMinLen("Key", 1))
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetBucket sets the Bucket field's value.
+func (s *GetObjectInput) SetBucket(v string) *GetObjectInput {
+ s.Bucket = &v
+ return s
+}
+
+func (s *GetObjectInput) getBucket() (v string) {
+ if s.Bucket == nil {
+ return v
+ }
+ return *s.Bucket
+}
+
+// SetIfMatch sets the IfMatch field's value.
+func (s *GetObjectInput) SetIfMatch(v string) *GetObjectInput {
+ s.IfMatch = &v
+ return s
+}
+
+// SetIfModifiedSince sets the IfModifiedSince field's value.
+func (s *GetObjectInput) SetIfModifiedSince(v time.Time) *GetObjectInput {
+ s.IfModifiedSince = &v
+ return s
+}
+
+// SetIfNoneMatch sets the IfNoneMatch field's value.
+func (s *GetObjectInput) SetIfNoneMatch(v string) *GetObjectInput {
+ s.IfNoneMatch = &v
+ return s
+}
+
+// SetIfUnmodifiedSince sets the IfUnmodifiedSince field's value.
+func (s *GetObjectInput) SetIfUnmodifiedSince(v time.Time) *GetObjectInput {
+ s.IfUnmodifiedSince = &v
+ return s
+}
+
+// SetKey sets the Key field's value.
+func (s *GetObjectInput) SetKey(v string) *GetObjectInput {
+ s.Key = &v
+ return s
+}
+
+// SetPartNumber sets the PartNumber field's value.
+func (s *GetObjectInput) SetPartNumber(v int64) *GetObjectInput {
+ s.PartNumber = &v
+ return s
+}
+
+// SetRange sets the Range field's value.
+func (s *GetObjectInput) SetRange(v string) *GetObjectInput {
+ s.Range = &v
+ return s
+}
+
+// SetRequestPayer sets the RequestPayer field's value.
+func (s *GetObjectInput) SetRequestPayer(v string) *GetObjectInput {
+ s.RequestPayer = &v
+ return s
+}
+
+// SetResponseCacheControl sets the ResponseCacheControl field's value.
+func (s *GetObjectInput) SetResponseCacheControl(v string) *GetObjectInput {
+ s.ResponseCacheControl = &v
+ return s
+}
+
+// SetResponseContentDisposition sets the ResponseContentDisposition field's value.
+func (s *GetObjectInput) SetResponseContentDisposition(v string) *GetObjectInput {
+ s.ResponseContentDisposition = &v
+ return s
+}
+
+// SetResponseContentEncoding sets the ResponseContentEncoding field's value.
+func (s *GetObjectInput) SetResponseContentEncoding(v string) *GetObjectInput {
+ s.ResponseContentEncoding = &v
+ return s
+}
+
+// SetResponseContentLanguage sets the ResponseContentLanguage field's value.
+func (s *GetObjectInput) SetResponseContentLanguage(v string) *GetObjectInput {
+ s.ResponseContentLanguage = &v
+ return s
+}
+
+// SetResponseContentType sets the ResponseContentType field's value.
+func (s *GetObjectInput) SetResponseContentType(v string) *GetObjectInput {
+ s.ResponseContentType = &v
+ return s
+}
+
+// SetResponseExpires sets the ResponseExpires field's value.
+func (s *GetObjectInput) SetResponseExpires(v time.Time) *GetObjectInput {
+ s.ResponseExpires = &v
+ return s
+}
+
+// SetSSECustomerAlgorithm sets the SSECustomerAlgorithm field's value.
+func (s *GetObjectInput) SetSSECustomerAlgorithm(v string) *GetObjectInput {
+ s.SSECustomerAlgorithm = &v
+ return s
+}
+
+// SetSSECustomerKey sets the SSECustomerKey field's value.
+func (s *GetObjectInput) SetSSECustomerKey(v string) *GetObjectInput {
+ s.SSECustomerKey = &v
+ return s
+}
+
+func (s *GetObjectInput) getSSECustomerKey() (v string) {
+ if s.SSECustomerKey == nil {
+ return v
+ }
+ return *s.SSECustomerKey
+}
+
+// SetSSECustomerKeyMD5 sets the SSECustomerKeyMD5 field's value.
+func (s *GetObjectInput) SetSSECustomerKeyMD5(v string) *GetObjectInput {
+ s.SSECustomerKeyMD5 = &v
+ return s
+}
+
+// SetVersionId sets the VersionId field's value.
+func (s *GetObjectInput) SetVersionId(v string) *GetObjectInput {
+ s.VersionId = &v
+ return s
+}
+
+type GetObjectLegalHoldInput struct {
+ _ struct{} `locationName:"GetObjectLegalHoldRequest" type:"structure"`
+
+ // The bucket containing the object whose Legal Hold status you want to retrieve.
+ //
+ // Bucket is a required field
+ Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
+
+ // The key name for the object whose Legal Hold status you want to retrieve.
+ //
+ // Key is a required field
+ Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"`
+
+ // Confirms that the requester knows that she or he will be charged for the
+ // request. Bucket owners need not specify this parameter in their requests.
+ // Documentation on downloading objects from requester pays buckets can be found
+ // at http://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html
+ RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"`
+
+ // The version ID of the object whose Legal Hold status you want to retrieve.
+ VersionId *string `location:"querystring" locationName:"versionId" type:"string"`
+}
+
+// String returns the string representation
+func (s GetObjectLegalHoldInput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s GetObjectLegalHoldInput) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *GetObjectLegalHoldInput) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "GetObjectLegalHoldInput"}
+ if s.Bucket == nil {
+ invalidParams.Add(request.NewErrParamRequired("Bucket"))
+ }
+ if s.Bucket != nil && len(*s.Bucket) < 1 {
+ invalidParams.Add(request.NewErrParamMinLen("Bucket", 1))
+ }
+ if s.Key == nil {
+ invalidParams.Add(request.NewErrParamRequired("Key"))
+ }
+ if s.Key != nil && len(*s.Key) < 1 {
+ invalidParams.Add(request.NewErrParamMinLen("Key", 1))
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetBucket sets the Bucket field's value.
+func (s *GetObjectLegalHoldInput) SetBucket(v string) *GetObjectLegalHoldInput {
+ s.Bucket = &v
+ return s
+}
+
+func (s *GetObjectLegalHoldInput) getBucket() (v string) {
+ if s.Bucket == nil {
+ return v
+ }
+ return *s.Bucket
+}
+
+// SetKey sets the Key field's value.
+func (s *GetObjectLegalHoldInput) SetKey(v string) *GetObjectLegalHoldInput {
+ s.Key = &v
+ return s
+}
+
+// SetRequestPayer sets the RequestPayer field's value.
+func (s *GetObjectLegalHoldInput) SetRequestPayer(v string) *GetObjectLegalHoldInput {
+ s.RequestPayer = &v
+ return s
+}
+
+// SetVersionId sets the VersionId field's value.
+func (s *GetObjectLegalHoldInput) SetVersionId(v string) *GetObjectLegalHoldInput {
+ s.VersionId = &v
+ return s
+}
+
+type GetObjectLegalHoldOutput struct {
+ _ struct{} `type:"structure" payload:"LegalHold"`
+
+ // The current Legal Hold status for the specified object.
+ LegalHold *ObjectLockLegalHold `type:"structure"`
+}
+
+// String returns the string representation
+func (s GetObjectLegalHoldOutput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s GetObjectLegalHoldOutput) GoString() string {
+ return s.String()
+}
+
+// SetLegalHold sets the LegalHold field's value.
+func (s *GetObjectLegalHoldOutput) SetLegalHold(v *ObjectLockLegalHold) *GetObjectLegalHoldOutput {
+ s.LegalHold = v
+ return s
+}
+
+type GetObjectLockConfigurationInput struct {
+ _ struct{} `locationName:"GetObjectLockConfigurationRequest" type:"structure"`
+
+ // The bucket whose object lock configuration you want to retrieve.
+ //
+ // Bucket is a required field
+ Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
+}
+
+// String returns the string representation
+func (s GetObjectLockConfigurationInput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s GetObjectLockConfigurationInput) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *GetObjectLockConfigurationInput) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "GetObjectLockConfigurationInput"}
+ if s.Bucket == nil {
+ invalidParams.Add(request.NewErrParamRequired("Bucket"))
+ }
+ if s.Bucket != nil && len(*s.Bucket) < 1 {
+ invalidParams.Add(request.NewErrParamMinLen("Bucket", 1))
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetBucket sets the Bucket field's value.
+func (s *GetObjectLockConfigurationInput) SetBucket(v string) *GetObjectLockConfigurationInput {
+ s.Bucket = &v
+ return s
+}
+
+func (s *GetObjectLockConfigurationInput) getBucket() (v string) {
+ if s.Bucket == nil {
+ return v
+ }
+ return *s.Bucket
+}
+
+type GetObjectLockConfigurationOutput struct {
+ _ struct{} `type:"structure" payload:"ObjectLockConfiguration"`
+
+ // The specified bucket's object lock configuration.
+ ObjectLockConfiguration *ObjectLockConfiguration `type:"structure"`
+}
+
+// String returns the string representation
+func (s GetObjectLockConfigurationOutput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s GetObjectLockConfigurationOutput) GoString() string {
+ return s.String()
+}
+
+// SetObjectLockConfiguration sets the ObjectLockConfiguration field's value.
+func (s *GetObjectLockConfigurationOutput) SetObjectLockConfiguration(v *ObjectLockConfiguration) *GetObjectLockConfigurationOutput {
+ s.ObjectLockConfiguration = v
+ return s
+}
+
+type GetObjectOutput struct {
+ _ struct{} `type:"structure" payload:"Body"`
+
+ AcceptRanges *string `location:"header" locationName:"accept-ranges" type:"string"`
+
+ // Object data.
+ Body io.ReadCloser `type:"blob"`
+
+ // Specifies caching behavior along the request/reply chain.
+ CacheControl *string `location:"header" locationName:"Cache-Control" type:"string"`
+
+ // Specifies presentational information for the object.
+ ContentDisposition *string `location:"header" locationName:"Content-Disposition" type:"string"`
+
+ // Specifies what content encodings have been applied to the object and thus
+ // what decoding mechanisms must be applied to obtain the media-type referenced
+ // by the Content-Type header field.
+ ContentEncoding *string `location:"header" locationName:"Content-Encoding" type:"string"`
+
+ // The language the content is in.
+ ContentLanguage *string `location:"header" locationName:"Content-Language" type:"string"`
+
+ // Size of the body in bytes.
+ ContentLength *int64 `location:"header" locationName:"Content-Length" type:"long"`
+
+ // The portion of the object returned in the response.
+ ContentRange *string `location:"header" locationName:"Content-Range" type:"string"`
+
+ // A standard MIME type describing the format of the object data.
+ ContentType *string `location:"header" locationName:"Content-Type" type:"string"`
+
+ // Specifies whether the object retrieved was (true) or was not (false) a Delete
+ // Marker. If false, this response header does not appear in the response.
+ DeleteMarker *bool `location:"header" locationName:"x-amz-delete-marker" type:"boolean"`
+
+ // An ETag is an opaque identifier assigned by a web server to a specific version
+ // of a resource found at a URL
+ ETag *string `location:"header" locationName:"ETag" type:"string"`
+
+ // If the object expiration is configured (see PUT Bucket lifecycle), the response
+ // includes this header. It includes the expiry-date and rule-id key value pairs
+ // providing object expiration information. The value of the rule-id is URL
+ // encoded.
+ Expiration *string `location:"header" locationName:"x-amz-expiration" type:"string"`
+
+ // The date and time at which the object is no longer cacheable.
+ Expires *string `location:"header" locationName:"Expires" type:"string"`
+
+ // Last modified date of the object
+ LastModified *time.Time `location:"header" locationName:"Last-Modified" type:"timestamp"`
+
+ // A map of metadata to store with the object in S3.
+ Metadata map[string]*string `location:"headers" locationName:"x-amz-meta-" type:"map"`
+
+ // This is set to the number of metadata entries not returned in x-amz-meta
+ // headers. This can happen if you create metadata using an API like SOAP that
+ // supports more flexible metadata than the REST API. For example, using SOAP,
+ // you can create metadata whose values are not legal HTTP headers.
+ MissingMeta *int64 `location:"header" locationName:"x-amz-missing-meta" type:"integer"`
+
+ // Indicates whether this object has an active legal hold. This field is only
+ // returned if you have permission to view an object's legal hold status.
+ ObjectLockLegalHoldStatus *string `location:"header" locationName:"x-amz-object-lock-legal-hold" type:"string" enum:"ObjectLockLegalHoldStatus"`
+
+ // The object lock mode currently in place for this object.
+ ObjectLockMode *string `location:"header" locationName:"x-amz-object-lock-mode" type:"string" enum:"ObjectLockMode"`
+
+ // The date and time when this object's object lock will expire.
+ ObjectLockRetainUntilDate *time.Time `location:"header" locationName:"x-amz-object-lock-retain-until-date" type:"timestamp" timestampFormat:"iso8601"`
+
+ // The count of parts this object has.
+ PartsCount *int64 `location:"header" locationName:"x-amz-mp-parts-count" type:"integer"`
+
+ ReplicationStatus *string `location:"header" locationName:"x-amz-replication-status" type:"string" enum:"ReplicationStatus"`
+
+ // If present, indicates that the requester was successfully charged for the
+ // request.
+ RequestCharged *string `location:"header" locationName:"x-amz-request-charged" type:"string" enum:"RequestCharged"`
+
+ // Provides information about object restoration operation and expiration time
+ // of the restored object copy.
+ Restore *string `location:"header" locationName:"x-amz-restore" type:"string"`
+
+ // If server-side encryption with a customer-provided encryption key was requested,
+ // the response will include this header confirming the encryption algorithm
+ // used.
+ SSECustomerAlgorithm *string `location:"header" locationName:"x-amz-server-side-encryption-customer-algorithm" type:"string"`
+
+ // If server-side encryption with a customer-provided encryption key was requested,
+ // the response will include this header to provide round trip message integrity
+ // verification of the customer-provided encryption key.
+ SSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key-MD5" type:"string"`
+
+ // If present, specifies the ID of the AWS Key Management Service (KMS) master
+ // encryption key that was used for the object.
+ SSEKMSKeyId *string `location:"header" locationName:"x-amz-server-side-encryption-aws-kms-key-id" type:"string" sensitive:"true"`
+
+ // The Server-side encryption algorithm used when storing this object in S3
+ // (e.g., AES256, aws:kms).
+ ServerSideEncryption *string `location:"header" locationName:"x-amz-server-side-encryption" type:"string" enum:"ServerSideEncryption"`
+
+ StorageClass *string `location:"header" locationName:"x-amz-storage-class" type:"string" enum:"StorageClass"`
+
+ // The number of tags, if any, on the object.
+ TagCount *int64 `location:"header" locationName:"x-amz-tagging-count" type:"integer"`
+
+ // Version of the object.
+ VersionId *string `location:"header" locationName:"x-amz-version-id" type:"string"`
+
+ // If the bucket is configured as a website, redirects requests for this object
+ // to another object in the same bucket or to an external URL. Amazon S3 stores
+ // the value of this header in the object metadata.
+ WebsiteRedirectLocation *string `location:"header" locationName:"x-amz-website-redirect-location" type:"string"`
+}
+
+// String returns the string representation
+func (s GetObjectOutput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s GetObjectOutput) GoString() string {
+ return s.String()
+}
+
+// SetAcceptRanges sets the AcceptRanges field's value.
+func (s *GetObjectOutput) SetAcceptRanges(v string) *GetObjectOutput {
+ s.AcceptRanges = &v
+ return s
+}
+
+// SetBody sets the Body field's value.
+func (s *GetObjectOutput) SetBody(v io.ReadCloser) *GetObjectOutput {
+ s.Body = v
+ return s
+}
+
+// SetCacheControl sets the CacheControl field's value.
+func (s *GetObjectOutput) SetCacheControl(v string) *GetObjectOutput {
+ s.CacheControl = &v
+ return s
+}
+
+// SetContentDisposition sets the ContentDisposition field's value.
+func (s *GetObjectOutput) SetContentDisposition(v string) *GetObjectOutput {
+ s.ContentDisposition = &v
+ return s
+}
+
+// SetContentEncoding sets the ContentEncoding field's value.
+func (s *GetObjectOutput) SetContentEncoding(v string) *GetObjectOutput {
+ s.ContentEncoding = &v
+ return s
+}
+
+// SetContentLanguage sets the ContentLanguage field's value.
+func (s *GetObjectOutput) SetContentLanguage(v string) *GetObjectOutput {
+ s.ContentLanguage = &v
+ return s
+}
+
+// SetContentLength sets the ContentLength field's value.
+func (s *GetObjectOutput) SetContentLength(v int64) *GetObjectOutput {
+ s.ContentLength = &v
+ return s
+}
+
+// SetContentRange sets the ContentRange field's value.
+func (s *GetObjectOutput) SetContentRange(v string) *GetObjectOutput {
+ s.ContentRange = &v
+ return s
+}
+
+// SetContentType sets the ContentType field's value.
+func (s *GetObjectOutput) SetContentType(v string) *GetObjectOutput {
+ s.ContentType = &v
+ return s
+}
+
+// SetDeleteMarker sets the DeleteMarker field's value.
+func (s *GetObjectOutput) SetDeleteMarker(v bool) *GetObjectOutput {
+ s.DeleteMarker = &v
+ return s
+}
+
+// SetETag sets the ETag field's value.
+func (s *GetObjectOutput) SetETag(v string) *GetObjectOutput {
+ s.ETag = &v
+ return s
+}
+
+// SetExpiration sets the Expiration field's value.
+func (s *GetObjectOutput) SetExpiration(v string) *GetObjectOutput {
+ s.Expiration = &v
+ return s
+}
+
+// SetExpires sets the Expires field's value.
+func (s *GetObjectOutput) SetExpires(v string) *GetObjectOutput {
+ s.Expires = &v
+ return s
+}
+
+// SetLastModified sets the LastModified field's value.
+func (s *GetObjectOutput) SetLastModified(v time.Time) *GetObjectOutput {
+ s.LastModified = &v
+ return s
+}
+
+// SetMetadata sets the Metadata field's value.
+func (s *GetObjectOutput) SetMetadata(v map[string]*string) *GetObjectOutput {
+ s.Metadata = v
+ return s
+}
+
+// SetMissingMeta sets the MissingMeta field's value.
+func (s *GetObjectOutput) SetMissingMeta(v int64) *GetObjectOutput {
+ s.MissingMeta = &v
+ return s
+}
+
+// SetObjectLockLegalHoldStatus sets the ObjectLockLegalHoldStatus field's value.
+func (s *GetObjectOutput) SetObjectLockLegalHoldStatus(v string) *GetObjectOutput {
+ s.ObjectLockLegalHoldStatus = &v
+ return s
+}
+
+// SetObjectLockMode sets the ObjectLockMode field's value.
+func (s *GetObjectOutput) SetObjectLockMode(v string) *GetObjectOutput {
+ s.ObjectLockMode = &v
+ return s
+}
+
+// SetObjectLockRetainUntilDate sets the ObjectLockRetainUntilDate field's value.
+func (s *GetObjectOutput) SetObjectLockRetainUntilDate(v time.Time) *GetObjectOutput {
+ s.ObjectLockRetainUntilDate = &v
+ return s
+}
+
+// SetPartsCount sets the PartsCount field's value.
+func (s *GetObjectOutput) SetPartsCount(v int64) *GetObjectOutput {
+ s.PartsCount = &v
+ return s
+}
+
+// SetReplicationStatus sets the ReplicationStatus field's value.
+func (s *GetObjectOutput) SetReplicationStatus(v string) *GetObjectOutput {
+ s.ReplicationStatus = &v
+ return s
+}
+
+// SetRequestCharged sets the RequestCharged field's value.
+func (s *GetObjectOutput) SetRequestCharged(v string) *GetObjectOutput {
+ s.RequestCharged = &v
+ return s
+}
+
+// SetRestore sets the Restore field's value.
+func (s *GetObjectOutput) SetRestore(v string) *GetObjectOutput {
+ s.Restore = &v
+ return s
+}
+
+// SetSSECustomerAlgorithm sets the SSECustomerAlgorithm field's value.
+func (s *GetObjectOutput) SetSSECustomerAlgorithm(v string) *GetObjectOutput {
+ s.SSECustomerAlgorithm = &v
+ return s
+}
+
+// SetSSECustomerKeyMD5 sets the SSECustomerKeyMD5 field's value.
+func (s *GetObjectOutput) SetSSECustomerKeyMD5(v string) *GetObjectOutput {
+ s.SSECustomerKeyMD5 = &v
+ return s
+}
+
+// SetSSEKMSKeyId sets the SSEKMSKeyId field's value.
+func (s *GetObjectOutput) SetSSEKMSKeyId(v string) *GetObjectOutput {
+ s.SSEKMSKeyId = &v
+ return s
+}
+
+// SetServerSideEncryption sets the ServerSideEncryption field's value.
+func (s *GetObjectOutput) SetServerSideEncryption(v string) *GetObjectOutput {
+ s.ServerSideEncryption = &v
+ return s
+}
+
+// SetStorageClass sets the StorageClass field's value.
+func (s *GetObjectOutput) SetStorageClass(v string) *GetObjectOutput {
+ s.StorageClass = &v
+ return s
+}
+
+// SetTagCount sets the TagCount field's value.
+func (s *GetObjectOutput) SetTagCount(v int64) *GetObjectOutput {
+ s.TagCount = &v
+ return s
+}
+
+// SetVersionId sets the VersionId field's value.
+func (s *GetObjectOutput) SetVersionId(v string) *GetObjectOutput {
+ s.VersionId = &v
+ return s
+}
+
+// SetWebsiteRedirectLocation sets the WebsiteRedirectLocation field's value.
+func (s *GetObjectOutput) SetWebsiteRedirectLocation(v string) *GetObjectOutput {
+ s.WebsiteRedirectLocation = &v
+ return s
+}
+
+type GetObjectRetentionInput struct {
+ _ struct{} `locationName:"GetObjectRetentionRequest" type:"structure"`
+
+ // The bucket containing the object whose retention settings you want to retrieve.
+ //
+ // Bucket is a required field
+ Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
+
+ // The key name for the object whose retention settings you want to retrieve.
+ //
+ // Key is a required field
+ Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"`
+
+ // Confirms that the requester knows that she or he will be charged for the
+ // request. Bucket owners need not specify this parameter in their requests.
+ // Documentation on downloading objects from requester pays buckets can be found
+ // at http://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html
+ RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"`
+
+ // The version ID for the object whose retention settings you want to retrieve.
+ VersionId *string `location:"querystring" locationName:"versionId" type:"string"`
+}
+
+// String returns the string representation
+func (s GetObjectRetentionInput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s GetObjectRetentionInput) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *GetObjectRetentionInput) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "GetObjectRetentionInput"}
+ if s.Bucket == nil {
+ invalidParams.Add(request.NewErrParamRequired("Bucket"))
+ }
+ if s.Bucket != nil && len(*s.Bucket) < 1 {
+ invalidParams.Add(request.NewErrParamMinLen("Bucket", 1))
+ }
+ if s.Key == nil {
+ invalidParams.Add(request.NewErrParamRequired("Key"))
+ }
+ if s.Key != nil && len(*s.Key) < 1 {
+ invalidParams.Add(request.NewErrParamMinLen("Key", 1))
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetBucket sets the Bucket field's value.
+func (s *GetObjectRetentionInput) SetBucket(v string) *GetObjectRetentionInput {
+ s.Bucket = &v
+ return s
+}
+
+func (s *GetObjectRetentionInput) getBucket() (v string) {
+ if s.Bucket == nil {
+ return v
+ }
+ return *s.Bucket
+}
+
+// SetKey sets the Key field's value.
+func (s *GetObjectRetentionInput) SetKey(v string) *GetObjectRetentionInput {
+ s.Key = &v
+ return s
+}
+
+// SetRequestPayer sets the RequestPayer field's value.
+func (s *GetObjectRetentionInput) SetRequestPayer(v string) *GetObjectRetentionInput {
+ s.RequestPayer = &v
+ return s
+}
+
+// SetVersionId sets the VersionId field's value.
+func (s *GetObjectRetentionInput) SetVersionId(v string) *GetObjectRetentionInput {
+ s.VersionId = &v
+ return s
+}
+
+type GetObjectRetentionOutput struct {
+ _ struct{} `type:"structure" payload:"Retention"`
+
+ // The container element for an object's retention settings.
+ Retention *ObjectLockRetention `type:"structure"`
+}
+
+// String returns the string representation
+func (s GetObjectRetentionOutput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s GetObjectRetentionOutput) GoString() string {
+ return s.String()
+}
+
+// SetRetention sets the Retention field's value.
+func (s *GetObjectRetentionOutput) SetRetention(v *ObjectLockRetention) *GetObjectRetentionOutput {
+ s.Retention = v
+ return s
+}
+
+type GetObjectTaggingInput struct {
+ _ struct{} `locationName:"GetObjectTaggingRequest" type:"structure"`
+
+ // Bucket is a required field
+ Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
+
+ // Key is a required field
+ Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"`
+
+ VersionId *string `location:"querystring" locationName:"versionId" type:"string"`
+}
+
+// String returns the string representation
+func (s GetObjectTaggingInput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s GetObjectTaggingInput) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *GetObjectTaggingInput) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "GetObjectTaggingInput"}
+ if s.Bucket == nil {
+ invalidParams.Add(request.NewErrParamRequired("Bucket"))
+ }
+ if s.Bucket != nil && len(*s.Bucket) < 1 {
+ invalidParams.Add(request.NewErrParamMinLen("Bucket", 1))
+ }
+ if s.Key == nil {
+ invalidParams.Add(request.NewErrParamRequired("Key"))
+ }
+ if s.Key != nil && len(*s.Key) < 1 {
+ invalidParams.Add(request.NewErrParamMinLen("Key", 1))
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetBucket sets the Bucket field's value.
+func (s *GetObjectTaggingInput) SetBucket(v string) *GetObjectTaggingInput {
+ s.Bucket = &v
+ return s
+}
+
+func (s *GetObjectTaggingInput) getBucket() (v string) {
+ if s.Bucket == nil {
+ return v
+ }
+ return *s.Bucket
+}
+
+// SetKey sets the Key field's value.
+func (s *GetObjectTaggingInput) SetKey(v string) *GetObjectTaggingInput {
+ s.Key = &v
+ return s
+}
+
+// SetVersionId sets the VersionId field's value.
+func (s *GetObjectTaggingInput) SetVersionId(v string) *GetObjectTaggingInput {
+ s.VersionId = &v
+ return s
+}
+
+type GetObjectTaggingOutput struct {
+ _ struct{} `type:"structure"`
+
+ // TagSet is a required field
+ TagSet []*Tag `locationNameList:"Tag" type:"list" required:"true"`
+
+ VersionId *string `location:"header" locationName:"x-amz-version-id" type:"string"`
+}
+
+// String returns the string representation
+func (s GetObjectTaggingOutput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s GetObjectTaggingOutput) GoString() string {
+ return s.String()
+}
+
+// SetTagSet sets the TagSet field's value.
+func (s *GetObjectTaggingOutput) SetTagSet(v []*Tag) *GetObjectTaggingOutput {
+ s.TagSet = v
+ return s
+}
+
+// SetVersionId sets the VersionId field's value.
+func (s *GetObjectTaggingOutput) SetVersionId(v string) *GetObjectTaggingOutput {
+ s.VersionId = &v
+ return s
+}
+
+type GetObjectTorrentInput struct {
+ _ struct{} `locationName:"GetObjectTorrentRequest" type:"structure"`
+
+ // Bucket is a required field
+ Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
+
+ // Key is a required field
+ Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"`
+
+ // Confirms that the requester knows that she or he will be charged for the
+ // request. Bucket owners need not specify this parameter in their requests.
+ // Documentation on downloading objects from requester pays buckets can be found
+ // at http://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html
+ RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"`
+}
+
+// String returns the string representation
+func (s GetObjectTorrentInput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s GetObjectTorrentInput) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *GetObjectTorrentInput) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "GetObjectTorrentInput"}
+ if s.Bucket == nil {
+ invalidParams.Add(request.NewErrParamRequired("Bucket"))
+ }
+ if s.Bucket != nil && len(*s.Bucket) < 1 {
+ invalidParams.Add(request.NewErrParamMinLen("Bucket", 1))
+ }
+ if s.Key == nil {
+ invalidParams.Add(request.NewErrParamRequired("Key"))
+ }
+ if s.Key != nil && len(*s.Key) < 1 {
+ invalidParams.Add(request.NewErrParamMinLen("Key", 1))
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetBucket sets the Bucket field's value.
+func (s *GetObjectTorrentInput) SetBucket(v string) *GetObjectTorrentInput {
+ s.Bucket = &v
+ return s
+}
+
+func (s *GetObjectTorrentInput) getBucket() (v string) {
+ if s.Bucket == nil {
+ return v
+ }
+ return *s.Bucket
+}
+
+// SetKey sets the Key field's value.
+func (s *GetObjectTorrentInput) SetKey(v string) *GetObjectTorrentInput {
+ s.Key = &v
+ return s
+}
+
+// SetRequestPayer sets the RequestPayer field's value.
+func (s *GetObjectTorrentInput) SetRequestPayer(v string) *GetObjectTorrentInput {
+ s.RequestPayer = &v
+ return s
+}
+
+type GetObjectTorrentOutput struct {
+ _ struct{} `type:"structure" payload:"Body"`
+
+ Body io.ReadCloser `type:"blob"`
+
+ // If present, indicates that the requester was successfully charged for the
+ // request.
+ RequestCharged *string `location:"header" locationName:"x-amz-request-charged" type:"string" enum:"RequestCharged"`
+}
+
+// String returns the string representation
+func (s GetObjectTorrentOutput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s GetObjectTorrentOutput) GoString() string {
+ return s.String()
+}
+
+// SetBody sets the Body field's value.
+func (s *GetObjectTorrentOutput) SetBody(v io.ReadCloser) *GetObjectTorrentOutput {
+ s.Body = v
+ return s
+}
+
+// SetRequestCharged sets the RequestCharged field's value.
+func (s *GetObjectTorrentOutput) SetRequestCharged(v string) *GetObjectTorrentOutput {
+ s.RequestCharged = &v
+ return s
+}
+
+type GetPublicAccessBlockInput struct {
+ _ struct{} `locationName:"GetPublicAccessBlockRequest" type:"structure"`
+
+ // The name of the Amazon S3 bucket whose PublicAccessBlock configuration you
+ // want to retrieve.
+ //
+ // Bucket is a required field
+ Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
+}
+
+// String returns the string representation
+func (s GetPublicAccessBlockInput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s GetPublicAccessBlockInput) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *GetPublicAccessBlockInput) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "GetPublicAccessBlockInput"}
+ if s.Bucket == nil {
+ invalidParams.Add(request.NewErrParamRequired("Bucket"))
+ }
+ if s.Bucket != nil && len(*s.Bucket) < 1 {
+ invalidParams.Add(request.NewErrParamMinLen("Bucket", 1))
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetBucket sets the Bucket field's value.
+func (s *GetPublicAccessBlockInput) SetBucket(v string) *GetPublicAccessBlockInput {
+ s.Bucket = &v
+ return s
+}
+
+func (s *GetPublicAccessBlockInput) getBucket() (v string) {
+ if s.Bucket == nil {
+ return v
+ }
+ return *s.Bucket
+}
+
+type GetPublicAccessBlockOutput struct {
+ _ struct{} `type:"structure" payload:"PublicAccessBlockConfiguration"`
+
+ // The PublicAccessBlock configuration currently in effect for this Amazon S3
+ // bucket.
+ PublicAccessBlockConfiguration *PublicAccessBlockConfiguration `type:"structure"`
+}
+
+// String returns the string representation
+func (s GetPublicAccessBlockOutput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s GetPublicAccessBlockOutput) GoString() string {
+ return s.String()
+}
+
+// SetPublicAccessBlockConfiguration sets the PublicAccessBlockConfiguration field's value.
+func (s *GetPublicAccessBlockOutput) SetPublicAccessBlockConfiguration(v *PublicAccessBlockConfiguration) *GetPublicAccessBlockOutput {
+ s.PublicAccessBlockConfiguration = v
+ return s
+}
+
+type GlacierJobParameters struct {
+ _ struct{} `type:"structure"`
+
+ // Glacier retrieval tier at which the restore will be processed.
+ //
+ // Tier is a required field
+ Tier *string `type:"string" required:"true" enum:"Tier"`
+}
+
+// String returns the string representation
+func (s GlacierJobParameters) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s GlacierJobParameters) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *GlacierJobParameters) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "GlacierJobParameters"}
+ if s.Tier == nil {
+ invalidParams.Add(request.NewErrParamRequired("Tier"))
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetTier sets the Tier field's value.
+func (s *GlacierJobParameters) SetTier(v string) *GlacierJobParameters {
+ s.Tier = &v
+ return s
+}
+
+type Grant struct {
+ _ struct{} `type:"structure"`
+
+ Grantee *Grantee `type:"structure" xmlPrefix:"xsi" xmlURI:"http://www.w3.org/2001/XMLSchema-instance"`
+
+ // Specifies the permission given to the grantee.
+ Permission *string `type:"string" enum:"Permission"`
+}
+
+// String returns the string representation
+func (s Grant) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s Grant) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *Grant) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "Grant"}
+ if s.Grantee != nil {
+ if err := s.Grantee.Validate(); err != nil {
+ invalidParams.AddNested("Grantee", err.(request.ErrInvalidParams))
+ }
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetGrantee sets the Grantee field's value.
+func (s *Grant) SetGrantee(v *Grantee) *Grant {
+ s.Grantee = v
+ return s
+}
+
+// SetPermission sets the Permission field's value.
+func (s *Grant) SetPermission(v string) *Grant {
+ s.Permission = &v
+ return s
+}
+
+type Grantee struct {
+ _ struct{} `type:"structure" xmlPrefix:"xsi" xmlURI:"http://www.w3.org/2001/XMLSchema-instance"`
+
+ // Screen name of the grantee.
+ DisplayName *string `type:"string"`
+
+ // Email address of the grantee.
+ EmailAddress *string `type:"string"`
+
+ // The canonical user ID of the grantee.
+ ID *string `type:"string"`
+
+ // Type of grantee
+ //
+ // Type is a required field
+ Type *string `locationName:"xsi:type" type:"string" xmlAttribute:"true" required:"true" enum:"Type"`
+
+ // URI of the grantee group.
+ URI *string `type:"string"`
+}
+
+// String returns the string representation
+func (s Grantee) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s Grantee) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *Grantee) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "Grantee"}
+ if s.Type == nil {
+ invalidParams.Add(request.NewErrParamRequired("Type"))
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetDisplayName sets the DisplayName field's value.
+func (s *Grantee) SetDisplayName(v string) *Grantee {
+ s.DisplayName = &v
+ return s
+}
+
+// SetEmailAddress sets the EmailAddress field's value.
+func (s *Grantee) SetEmailAddress(v string) *Grantee {
+ s.EmailAddress = &v
+ return s
+}
+
+// SetID sets the ID field's value.
+func (s *Grantee) SetID(v string) *Grantee {
+ s.ID = &v
+ return s
+}
+
+// SetType sets the Type field's value.
+func (s *Grantee) SetType(v string) *Grantee {
+ s.Type = &v
+ return s
+}
+
+// SetURI sets the URI field's value.
+func (s *Grantee) SetURI(v string) *Grantee {
+ s.URI = &v
+ return s
+}
+
+type HeadBucketInput struct {
+ _ struct{} `locationName:"HeadBucketRequest" type:"structure"`
+
+ // Bucket is a required field
+ Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
+}
+
+// String returns the string representation
+func (s HeadBucketInput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s HeadBucketInput) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *HeadBucketInput) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "HeadBucketInput"}
+ if s.Bucket == nil {
+ invalidParams.Add(request.NewErrParamRequired("Bucket"))
+ }
+ if s.Bucket != nil && len(*s.Bucket) < 1 {
+ invalidParams.Add(request.NewErrParamMinLen("Bucket", 1))
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetBucket sets the Bucket field's value.
+func (s *HeadBucketInput) SetBucket(v string) *HeadBucketInput {
+ s.Bucket = &v
+ return s
+}
+
+func (s *HeadBucketInput) getBucket() (v string) {
+ if s.Bucket == nil {
+ return v
+ }
+ return *s.Bucket
+}
+
+type HeadBucketOutput struct {
+ _ struct{} `type:"structure"`
+}
+
+// String returns the string representation
+func (s HeadBucketOutput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s HeadBucketOutput) GoString() string {
+ return s.String()
+}
+
+type HeadObjectInput struct {
+ _ struct{} `locationName:"HeadObjectRequest" type:"structure"`
+
+ // Bucket is a required field
+ Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
+
+ // Return the object only if its entity tag (ETag) is the same as the one specified,
+ // otherwise return a 412 (precondition failed).
+ IfMatch *string `location:"header" locationName:"If-Match" type:"string"`
+
+ // Return the object only if it has been modified since the specified time,
+ // otherwise return a 304 (not modified).
+ IfModifiedSince *time.Time `location:"header" locationName:"If-Modified-Since" type:"timestamp"`
+
+ // Return the object only if its entity tag (ETag) is different from the one
+ // specified, otherwise return a 304 (not modified).
+ IfNoneMatch *string `location:"header" locationName:"If-None-Match" type:"string"`
+
+ // Return the object only if it has not been modified since the specified time,
+ // otherwise return a 412 (precondition failed).
+ IfUnmodifiedSince *time.Time `location:"header" locationName:"If-Unmodified-Since" type:"timestamp"`
+
+ // Key is a required field
+ Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"`
+
+ // Part number of the object being read. This is a positive integer between
+ // 1 and 10,000. Effectively performs a 'ranged' HEAD request for the part specified.
+ // Useful querying about the size of the part and the number of parts in this
+ // object.
+ PartNumber *int64 `location:"querystring" locationName:"partNumber" type:"integer"`
+
+ // Downloads the specified range bytes of an object. For more information about
+ // the HTTP Range header, go to http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.35.
+ Range *string `location:"header" locationName:"Range" type:"string"`
+
+ // Confirms that the requester knows that she or he will be charged for the
+ // request. Bucket owners need not specify this parameter in their requests.
+ // Documentation on downloading objects from requester pays buckets can be found
+ // at http://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html
+ RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"`
+
+ // Specifies the algorithm to use to when encrypting the object (e.g., AES256).
+ SSECustomerAlgorithm *string `location:"header" locationName:"x-amz-server-side-encryption-customer-algorithm" type:"string"`
+
+ // Specifies the customer-provided encryption key for Amazon S3 to use in encrypting
+ // data. This value is used to store the object and then it is discarded; Amazon
+ // does not store the encryption key. The key must be appropriate for use with
+ // the algorithm specified in the x-amz-server-side​-encryption​-customer-algorithm
+ // header.
+ SSECustomerKey *string `marshal-as:"blob" location:"header" locationName:"x-amz-server-side-encryption-customer-key" type:"string" sensitive:"true"`
+
+ // Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321.
+ // Amazon S3 uses this header for a message integrity check to ensure the encryption
+ // key was transmitted without error.
+ SSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key-MD5" type:"string"`
+
+ // VersionId used to reference a specific version of the object.
+ VersionId *string `location:"querystring" locationName:"versionId" type:"string"`
+}
+
+// String returns the string representation
+func (s HeadObjectInput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s HeadObjectInput) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *HeadObjectInput) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "HeadObjectInput"}
+ if s.Bucket == nil {
+ invalidParams.Add(request.NewErrParamRequired("Bucket"))
+ }
+ if s.Bucket != nil && len(*s.Bucket) < 1 {
+ invalidParams.Add(request.NewErrParamMinLen("Bucket", 1))
+ }
+ if s.Key == nil {
+ invalidParams.Add(request.NewErrParamRequired("Key"))
+ }
+ if s.Key != nil && len(*s.Key) < 1 {
+ invalidParams.Add(request.NewErrParamMinLen("Key", 1))
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetBucket sets the Bucket field's value.
+func (s *HeadObjectInput) SetBucket(v string) *HeadObjectInput {
+ s.Bucket = &v
+ return s
+}
+
+func (s *HeadObjectInput) getBucket() (v string) {
+ if s.Bucket == nil {
+ return v
+ }
+ return *s.Bucket
+}
+
+// SetIfMatch sets the IfMatch field's value.
+func (s *HeadObjectInput) SetIfMatch(v string) *HeadObjectInput {
+ s.IfMatch = &v
+ return s
+}
+
+// SetIfModifiedSince sets the IfModifiedSince field's value.
+func (s *HeadObjectInput) SetIfModifiedSince(v time.Time) *HeadObjectInput {
+ s.IfModifiedSince = &v
+ return s
+}
+
+// SetIfNoneMatch sets the IfNoneMatch field's value.
+func (s *HeadObjectInput) SetIfNoneMatch(v string) *HeadObjectInput {
+ s.IfNoneMatch = &v
+ return s
+}
+
+// SetIfUnmodifiedSince sets the IfUnmodifiedSince field's value.
+func (s *HeadObjectInput) SetIfUnmodifiedSince(v time.Time) *HeadObjectInput {
+ s.IfUnmodifiedSince = &v
+ return s
+}
+
+// SetKey sets the Key field's value.
+func (s *HeadObjectInput) SetKey(v string) *HeadObjectInput {
+ s.Key = &v
+ return s
+}
+
+// SetPartNumber sets the PartNumber field's value.
+func (s *HeadObjectInput) SetPartNumber(v int64) *HeadObjectInput {
+ s.PartNumber = &v
+ return s
+}
+
+// SetRange sets the Range field's value.
+func (s *HeadObjectInput) SetRange(v string) *HeadObjectInput {
+ s.Range = &v
+ return s
+}
+
+// SetRequestPayer sets the RequestPayer field's value.
+func (s *HeadObjectInput) SetRequestPayer(v string) *HeadObjectInput {
+ s.RequestPayer = &v
+ return s
+}
+
+// SetSSECustomerAlgorithm sets the SSECustomerAlgorithm field's value.
+func (s *HeadObjectInput) SetSSECustomerAlgorithm(v string) *HeadObjectInput {
+ s.SSECustomerAlgorithm = &v
+ return s
+}
+
+// SetSSECustomerKey sets the SSECustomerKey field's value.
+func (s *HeadObjectInput) SetSSECustomerKey(v string) *HeadObjectInput {
+ s.SSECustomerKey = &v
+ return s
+}
+
+func (s *HeadObjectInput) getSSECustomerKey() (v string) {
+ if s.SSECustomerKey == nil {
+ return v
+ }
+ return *s.SSECustomerKey
+}
+
+// SetSSECustomerKeyMD5 sets the SSECustomerKeyMD5 field's value.
+func (s *HeadObjectInput) SetSSECustomerKeyMD5(v string) *HeadObjectInput {
+ s.SSECustomerKeyMD5 = &v
+ return s
+}
+
+// SetVersionId sets the VersionId field's value.
+func (s *HeadObjectInput) SetVersionId(v string) *HeadObjectInput {
+ s.VersionId = &v
+ return s
+}
+
+type HeadObjectOutput struct {
+ _ struct{} `type:"structure"`
+
+ AcceptRanges *string `location:"header" locationName:"accept-ranges" type:"string"`
+
+ // Specifies caching behavior along the request/reply chain.
+ CacheControl *string `location:"header" locationName:"Cache-Control" type:"string"`
+
+ // Specifies presentational information for the object.
+ ContentDisposition *string `location:"header" locationName:"Content-Disposition" type:"string"`
+
+ // Specifies what content encodings have been applied to the object and thus
+ // what decoding mechanisms must be applied to obtain the media-type referenced
+ // by the Content-Type header field.
+ ContentEncoding *string `location:"header" locationName:"Content-Encoding" type:"string"`
+
+ // The language the content is in.
+ ContentLanguage *string `location:"header" locationName:"Content-Language" type:"string"`
+
+ // Size of the body in bytes.
+ ContentLength *int64 `location:"header" locationName:"Content-Length" type:"long"`
+
+ // A standard MIME type describing the format of the object data.
+ ContentType *string `location:"header" locationName:"Content-Type" type:"string"`
+
+ // Specifies whether the object retrieved was (true) or was not (false) a Delete
+ // Marker. If false, this response header does not appear in the response.
+ DeleteMarker *bool `location:"header" locationName:"x-amz-delete-marker" type:"boolean"`
+
+ // An ETag is an opaque identifier assigned by a web server to a specific version
+ // of a resource found at a URL
+ ETag *string `location:"header" locationName:"ETag" type:"string"`
+
+ // If the object expiration is configured (see PUT Bucket lifecycle), the response
+ // includes this header. It includes the expiry-date and rule-id key value pairs
+ // providing object expiration information. The value of the rule-id is URL
+ // encoded.
+ Expiration *string `location:"header" locationName:"x-amz-expiration" type:"string"`
+
+ // The date and time at which the object is no longer cacheable.
+ Expires *string `location:"header" locationName:"Expires" type:"string"`
+
+ // Last modified date of the object
+ LastModified *time.Time `location:"header" locationName:"Last-Modified" type:"timestamp"`
+
+ // A map of metadata to store with the object in S3.
+ Metadata map[string]*string `location:"headers" locationName:"x-amz-meta-" type:"map"`
+
+ // This is set to the number of metadata entries not returned in x-amz-meta
+ // headers. This can happen if you create metadata using an API like SOAP that
+ // supports more flexible metadata than the REST API. For example, using SOAP,
+ // you can create metadata whose values are not legal HTTP headers.
+ MissingMeta *int64 `location:"header" locationName:"x-amz-missing-meta" type:"integer"`
+
+ // The Legal Hold status for the specified object.
+ ObjectLockLegalHoldStatus *string `location:"header" locationName:"x-amz-object-lock-legal-hold" type:"string" enum:"ObjectLockLegalHoldStatus"`
+
+ // The object lock mode currently in place for this object.
+ ObjectLockMode *string `location:"header" locationName:"x-amz-object-lock-mode" type:"string" enum:"ObjectLockMode"`
+
+ // The date and time when this object's object lock expires.
+ ObjectLockRetainUntilDate *time.Time `location:"header" locationName:"x-amz-object-lock-retain-until-date" type:"timestamp" timestampFormat:"iso8601"`
+
+ // The count of parts this object has.
+ PartsCount *int64 `location:"header" locationName:"x-amz-mp-parts-count" type:"integer"`
+
+ ReplicationStatus *string `location:"header" locationName:"x-amz-replication-status" type:"string" enum:"ReplicationStatus"`
+
+ // If present, indicates that the requester was successfully charged for the
+ // request.
+ RequestCharged *string `location:"header" locationName:"x-amz-request-charged" type:"string" enum:"RequestCharged"`
+
+ // Provides information about object restoration operation and expiration time
+ // of the restored object copy.
+ Restore *string `location:"header" locationName:"x-amz-restore" type:"string"`
+
+ // If server-side encryption with a customer-provided encryption key was requested,
+ // the response will include this header confirming the encryption algorithm
+ // used.
+ SSECustomerAlgorithm *string `location:"header" locationName:"x-amz-server-side-encryption-customer-algorithm" type:"string"`
+
+ // If server-side encryption with a customer-provided encryption key was requested,
+ // the response will include this header to provide round trip message integrity
+ // verification of the customer-provided encryption key.
+ SSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key-MD5" type:"string"`
+
+ // If present, specifies the ID of the AWS Key Management Service (KMS) master
+ // encryption key that was used for the object.
+ SSEKMSKeyId *string `location:"header" locationName:"x-amz-server-side-encryption-aws-kms-key-id" type:"string" sensitive:"true"`
+
+ // The Server-side encryption algorithm used when storing this object in S3
+ // (e.g., AES256, aws:kms).
+ ServerSideEncryption *string `location:"header" locationName:"x-amz-server-side-encryption" type:"string" enum:"ServerSideEncryption"`
+
+ StorageClass *string `location:"header" locationName:"x-amz-storage-class" type:"string" enum:"StorageClass"`
+
+ // Version of the object.
+ VersionId *string `location:"header" locationName:"x-amz-version-id" type:"string"`
+
+ // If the bucket is configured as a website, redirects requests for this object
+ // to another object in the same bucket or to an external URL. Amazon S3 stores
+ // the value of this header in the object metadata.
+ WebsiteRedirectLocation *string `location:"header" locationName:"x-amz-website-redirect-location" type:"string"`
+}
+
+// String returns the string representation
+func (s HeadObjectOutput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s HeadObjectOutput) GoString() string {
+ return s.String()
+}
+
+// SetAcceptRanges sets the AcceptRanges field's value.
+func (s *HeadObjectOutput) SetAcceptRanges(v string) *HeadObjectOutput {
+ s.AcceptRanges = &v
+ return s
+}
+
+// SetCacheControl sets the CacheControl field's value.
+func (s *HeadObjectOutput) SetCacheControl(v string) *HeadObjectOutput {
+ s.CacheControl = &v
+ return s
+}
+
+// SetContentDisposition sets the ContentDisposition field's value.
+func (s *HeadObjectOutput) SetContentDisposition(v string) *HeadObjectOutput {
+ s.ContentDisposition = &v
+ return s
+}
+
+// SetContentEncoding sets the ContentEncoding field's value.
+func (s *HeadObjectOutput) SetContentEncoding(v string) *HeadObjectOutput {
+ s.ContentEncoding = &v
+ return s
+}
+
+// SetContentLanguage sets the ContentLanguage field's value.
+func (s *HeadObjectOutput) SetContentLanguage(v string) *HeadObjectOutput {
+ s.ContentLanguage = &v
+ return s
+}
+
+// SetContentLength sets the ContentLength field's value.
+func (s *HeadObjectOutput) SetContentLength(v int64) *HeadObjectOutput {
+ s.ContentLength = &v
+ return s
+}
+
+// SetContentType sets the ContentType field's value.
+func (s *HeadObjectOutput) SetContentType(v string) *HeadObjectOutput {
+ s.ContentType = &v
+ return s
+}
+
+// SetDeleteMarker sets the DeleteMarker field's value.
+func (s *HeadObjectOutput) SetDeleteMarker(v bool) *HeadObjectOutput {
+ s.DeleteMarker = &v
+ return s
+}
+
+// SetETag sets the ETag field's value.
+func (s *HeadObjectOutput) SetETag(v string) *HeadObjectOutput {
+ s.ETag = &v
+ return s
+}
+
+// SetExpiration sets the Expiration field's value.
+func (s *HeadObjectOutput) SetExpiration(v string) *HeadObjectOutput {
+ s.Expiration = &v
+ return s
+}
+
+// SetExpires sets the Expires field's value.
+func (s *HeadObjectOutput) SetExpires(v string) *HeadObjectOutput {
+ s.Expires = &v
+ return s
+}
+
+// SetLastModified sets the LastModified field's value.
+func (s *HeadObjectOutput) SetLastModified(v time.Time) *HeadObjectOutput {
+ s.LastModified = &v
+ return s
+}
+
+// SetMetadata sets the Metadata field's value.
+func (s *HeadObjectOutput) SetMetadata(v map[string]*string) *HeadObjectOutput {
+ s.Metadata = v
+ return s
+}
+
+// SetMissingMeta sets the MissingMeta field's value.
+func (s *HeadObjectOutput) SetMissingMeta(v int64) *HeadObjectOutput {
+ s.MissingMeta = &v
+ return s
+}
+
+// SetObjectLockLegalHoldStatus sets the ObjectLockLegalHoldStatus field's value.
+func (s *HeadObjectOutput) SetObjectLockLegalHoldStatus(v string) *HeadObjectOutput {
+ s.ObjectLockLegalHoldStatus = &v
+ return s
+}
+
+// SetObjectLockMode sets the ObjectLockMode field's value.
+func (s *HeadObjectOutput) SetObjectLockMode(v string) *HeadObjectOutput {
+ s.ObjectLockMode = &v
+ return s
+}
+
+// SetObjectLockRetainUntilDate sets the ObjectLockRetainUntilDate field's value.
+func (s *HeadObjectOutput) SetObjectLockRetainUntilDate(v time.Time) *HeadObjectOutput {
+ s.ObjectLockRetainUntilDate = &v
+ return s
+}
+
+// SetPartsCount sets the PartsCount field's value.
+func (s *HeadObjectOutput) SetPartsCount(v int64) *HeadObjectOutput {
+ s.PartsCount = &v
+ return s
+}
+
+// SetReplicationStatus sets the ReplicationStatus field's value.
+func (s *HeadObjectOutput) SetReplicationStatus(v string) *HeadObjectOutput {
+ s.ReplicationStatus = &v
+ return s
+}
+
+// SetRequestCharged sets the RequestCharged field's value.
+func (s *HeadObjectOutput) SetRequestCharged(v string) *HeadObjectOutput {
+ s.RequestCharged = &v
+ return s
+}
+
+// SetRestore sets the Restore field's value.
+func (s *HeadObjectOutput) SetRestore(v string) *HeadObjectOutput {
+ s.Restore = &v
+ return s
+}
+
+// SetSSECustomerAlgorithm sets the SSECustomerAlgorithm field's value.
+func (s *HeadObjectOutput) SetSSECustomerAlgorithm(v string) *HeadObjectOutput {
+ s.SSECustomerAlgorithm = &v
+ return s
+}
+
+// SetSSECustomerKeyMD5 sets the SSECustomerKeyMD5 field's value.
+func (s *HeadObjectOutput) SetSSECustomerKeyMD5(v string) *HeadObjectOutput {
+ s.SSECustomerKeyMD5 = &v
+ return s
+}
+
+// SetSSEKMSKeyId sets the SSEKMSKeyId field's value.
+func (s *HeadObjectOutput) SetSSEKMSKeyId(v string) *HeadObjectOutput {
+ s.SSEKMSKeyId = &v
+ return s
+}
+
+// SetServerSideEncryption sets the ServerSideEncryption field's value.
+func (s *HeadObjectOutput) SetServerSideEncryption(v string) *HeadObjectOutput {
+ s.ServerSideEncryption = &v
+ return s
+}
+
+// SetStorageClass sets the StorageClass field's value.
+func (s *HeadObjectOutput) SetStorageClass(v string) *HeadObjectOutput {
+ s.StorageClass = &v
+ return s
+}
+
+// SetVersionId sets the VersionId field's value.
+func (s *HeadObjectOutput) SetVersionId(v string) *HeadObjectOutput {
+ s.VersionId = &v
+ return s
+}
+
+// SetWebsiteRedirectLocation sets the WebsiteRedirectLocation field's value.
+func (s *HeadObjectOutput) SetWebsiteRedirectLocation(v string) *HeadObjectOutput {
+ s.WebsiteRedirectLocation = &v
+ return s
+}
+
+type IndexDocument struct {
+ _ struct{} `type:"structure"`
+
+ // A suffix that is appended to a request that is for a directory on the website
+ // endpoint (e.g. if the suffix is index.html and you make a request to samplebucket/images/
+ // the data that is returned will be for the object with the key name images/index.html)
+ // The suffix must not be empty and must not include a slash character.
+ //
+ // Suffix is a required field
+ Suffix *string `type:"string" required:"true"`
+}
+
+// String returns the string representation
+func (s IndexDocument) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s IndexDocument) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *IndexDocument) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "IndexDocument"}
+ if s.Suffix == nil {
+ invalidParams.Add(request.NewErrParamRequired("Suffix"))
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetSuffix sets the Suffix field's value.
+func (s *IndexDocument) SetSuffix(v string) *IndexDocument {
+ s.Suffix = &v
+ return s
+}
+
+type Initiator struct {
+ _ struct{} `type:"structure"`
+
+ // Name of the Principal.
+ DisplayName *string `type:"string"`
+
+ // If the principal is an AWS account, it provides the Canonical User ID. If
+ // the principal is an IAM User, it provides a user ARN value.
+ ID *string `type:"string"`
+}
+
+// String returns the string representation
+func (s Initiator) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s Initiator) GoString() string {
+ return s.String()
+}
+
+// SetDisplayName sets the DisplayName field's value.
+func (s *Initiator) SetDisplayName(v string) *Initiator {
+ s.DisplayName = &v
+ return s
+}
+
+// SetID sets the ID field's value.
+func (s *Initiator) SetID(v string) *Initiator {
+ s.ID = &v
+ return s
+}
+
+// Describes the serialization format of the object.
+type InputSerialization struct {
+ _ struct{} `type:"structure"`
+
+ // Describes the serialization of a CSV-encoded object.
+ CSV *CSVInput `type:"structure"`
+
+ // Specifies object's compression format. Valid values: NONE, GZIP, BZIP2. Default
+ // Value: NONE.
+ CompressionType *string `type:"string" enum:"CompressionType"`
+
+ // Specifies JSON as object's input serialization format.
+ JSON *JSONInput `type:"structure"`
+
+ // Specifies Parquet as object's input serialization format.
+ Parquet *ParquetInput `type:"structure"`
+}
+
+// String returns the string representation
+func (s InputSerialization) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s InputSerialization) GoString() string {
+ return s.String()
+}
+
+// SetCSV sets the CSV field's value.
+func (s *InputSerialization) SetCSV(v *CSVInput) *InputSerialization {
+ s.CSV = v
+ return s
+}
+
+// SetCompressionType sets the CompressionType field's value.
+func (s *InputSerialization) SetCompressionType(v string) *InputSerialization {
+ s.CompressionType = &v
+ return s
+}
+
+// SetJSON sets the JSON field's value.
+func (s *InputSerialization) SetJSON(v *JSONInput) *InputSerialization {
+ s.JSON = v
+ return s
+}
+
+// SetParquet sets the Parquet field's value.
+func (s *InputSerialization) SetParquet(v *ParquetInput) *InputSerialization {
+ s.Parquet = v
+ return s
+}
+
+// Specifies the inventory configuration for an Amazon S3 bucket. For more information,
+// see GET Bucket inventory (https://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketGETInventoryConfig.html)
+// in the Amazon Simple Storage Service API Reference.
+type InventoryConfiguration struct {
+ _ struct{} `type:"structure"`
+
+ // Contains information about where to publish the inventory results.
+ //
+ // Destination is a required field
+ Destination *InventoryDestination `type:"structure" required:"true"`
+
+ // Specifies an inventory filter. The inventory only includes objects that meet
+ // the filter's criteria.
+ Filter *InventoryFilter `type:"structure"`
+
+ // The ID used to identify the inventory configuration.
+ //
+ // Id is a required field
+ Id *string `type:"string" required:"true"`
+
+ // Object versions to include in the inventory list. If set to All, the list
+ // includes all the object versions, which adds the version-related fields VersionId,
+ // IsLatest, and DeleteMarker to the list. If set to Current, the list does
+ // not contain these version-related fields.
+ //
+ // IncludedObjectVersions is a required field
+ IncludedObjectVersions *string `type:"string" required:"true" enum:"InventoryIncludedObjectVersions"`
+
+ // Specifies whether the inventory is enabled or disabled. If set to True, an
+ // inventory list is generated. If set to False, no inventory list is generated.
+ //
+ // IsEnabled is a required field
+ IsEnabled *bool `type:"boolean" required:"true"`
+
+ // Contains the optional fields that are included in the inventory results.
+ OptionalFields []*string `locationNameList:"Field" type:"list"`
+
+ // Specifies the schedule for generating inventory results.
+ //
+ // Schedule is a required field
+ Schedule *InventorySchedule `type:"structure" required:"true"`
+}
+
+// String returns the string representation
+func (s InventoryConfiguration) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s InventoryConfiguration) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *InventoryConfiguration) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "InventoryConfiguration"}
+ if s.Destination == nil {
+ invalidParams.Add(request.NewErrParamRequired("Destination"))
+ }
+ if s.Id == nil {
+ invalidParams.Add(request.NewErrParamRequired("Id"))
+ }
+ if s.IncludedObjectVersions == nil {
+ invalidParams.Add(request.NewErrParamRequired("IncludedObjectVersions"))
+ }
+ if s.IsEnabled == nil {
+ invalidParams.Add(request.NewErrParamRequired("IsEnabled"))
+ }
+ if s.Schedule == nil {
+ invalidParams.Add(request.NewErrParamRequired("Schedule"))
+ }
+ if s.Destination != nil {
+ if err := s.Destination.Validate(); err != nil {
+ invalidParams.AddNested("Destination", err.(request.ErrInvalidParams))
+ }
+ }
+ if s.Filter != nil {
+ if err := s.Filter.Validate(); err != nil {
+ invalidParams.AddNested("Filter", err.(request.ErrInvalidParams))
+ }
+ }
+ if s.Schedule != nil {
+ if err := s.Schedule.Validate(); err != nil {
+ invalidParams.AddNested("Schedule", err.(request.ErrInvalidParams))
+ }
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetDestination sets the Destination field's value.
+func (s *InventoryConfiguration) SetDestination(v *InventoryDestination) *InventoryConfiguration {
+ s.Destination = v
+ return s
+}
+
+// SetFilter sets the Filter field's value.
+func (s *InventoryConfiguration) SetFilter(v *InventoryFilter) *InventoryConfiguration {
+ s.Filter = v
+ return s
+}
+
+// SetId sets the Id field's value.
+func (s *InventoryConfiguration) SetId(v string) *InventoryConfiguration {
+ s.Id = &v
+ return s
+}
+
+// SetIncludedObjectVersions sets the IncludedObjectVersions field's value.
+func (s *InventoryConfiguration) SetIncludedObjectVersions(v string) *InventoryConfiguration {
+ s.IncludedObjectVersions = &v
+ return s
+}
+
+// SetIsEnabled sets the IsEnabled field's value.
+func (s *InventoryConfiguration) SetIsEnabled(v bool) *InventoryConfiguration {
+ s.IsEnabled = &v
+ return s
+}
+
+// SetOptionalFields sets the OptionalFields field's value.
+func (s *InventoryConfiguration) SetOptionalFields(v []*string) *InventoryConfiguration {
+ s.OptionalFields = v
+ return s
+}
+
+// SetSchedule sets the Schedule field's value.
+func (s *InventoryConfiguration) SetSchedule(v *InventorySchedule) *InventoryConfiguration {
+ s.Schedule = v
+ return s
+}
+
+type InventoryDestination struct {
+ _ struct{} `type:"structure"`
+
+ // Contains the bucket name, file format, bucket owner (optional), and prefix
+ // (optional) where inventory results are published.
+ //
+ // S3BucketDestination is a required field
+ S3BucketDestination *InventoryS3BucketDestination `type:"structure" required:"true"`
+}
+
+// String returns the string representation
+func (s InventoryDestination) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s InventoryDestination) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *InventoryDestination) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "InventoryDestination"}
+ if s.S3BucketDestination == nil {
+ invalidParams.Add(request.NewErrParamRequired("S3BucketDestination"))
+ }
+ if s.S3BucketDestination != nil {
+ if err := s.S3BucketDestination.Validate(); err != nil {
+ invalidParams.AddNested("S3BucketDestination", err.(request.ErrInvalidParams))
+ }
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetS3BucketDestination sets the S3BucketDestination field's value.
+func (s *InventoryDestination) SetS3BucketDestination(v *InventoryS3BucketDestination) *InventoryDestination {
+ s.S3BucketDestination = v
+ return s
+}
+
+// Contains the type of server-side encryption used to encrypt the inventory
+// results.
+type InventoryEncryption struct {
+ _ struct{} `type:"structure"`
+
+ // Specifies the use of SSE-KMS to encrypt delivered Inventory reports.
+ SSEKMS *SSEKMS `locationName:"SSE-KMS" type:"structure"`
+
+ // Specifies the use of SSE-S3 to encrypt delivered Inventory reports.
+ SSES3 *SSES3 `locationName:"SSE-S3" type:"structure"`
+}
+
+// String returns the string representation
+func (s InventoryEncryption) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s InventoryEncryption) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *InventoryEncryption) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "InventoryEncryption"}
+ if s.SSEKMS != nil {
+ if err := s.SSEKMS.Validate(); err != nil {
+ invalidParams.AddNested("SSEKMS", err.(request.ErrInvalidParams))
+ }
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetSSEKMS sets the SSEKMS field's value.
+func (s *InventoryEncryption) SetSSEKMS(v *SSEKMS) *InventoryEncryption {
+ s.SSEKMS = v
+ return s
+}
+
+// SetSSES3 sets the SSES3 field's value.
+func (s *InventoryEncryption) SetSSES3(v *SSES3) *InventoryEncryption {
+ s.SSES3 = v
+ return s
+}
+
+type InventoryFilter struct {
+ _ struct{} `type:"structure"`
+
+ // The prefix that an object must have to be included in the inventory results.
+ //
+ // Prefix is a required field
+ Prefix *string `type:"string" required:"true"`
+}
+
+// String returns the string representation
+func (s InventoryFilter) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s InventoryFilter) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *InventoryFilter) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "InventoryFilter"}
+ if s.Prefix == nil {
+ invalidParams.Add(request.NewErrParamRequired("Prefix"))
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetPrefix sets the Prefix field's value.
+func (s *InventoryFilter) SetPrefix(v string) *InventoryFilter {
+ s.Prefix = &v
+ return s
+}
+
+type InventoryS3BucketDestination struct {
+ _ struct{} `type:"structure"`
+
+ // The ID of the account that owns the destination bucket.
+ AccountId *string `type:"string"`
+
+ // The Amazon resource name (ARN) of the bucket where inventory results will
+ // be published.
+ //
+ // Bucket is a required field
+ Bucket *string `type:"string" required:"true"`
+
+ // Contains the type of server-side encryption used to encrypt the inventory
+ // results.
+ Encryption *InventoryEncryption `type:"structure"`
+
+ // Specifies the output format of the inventory results.
+ //
+ // Format is a required field
+ Format *string `type:"string" required:"true" enum:"InventoryFormat"`
+
+ // The prefix that is prepended to all inventory results.
+ Prefix *string `type:"string"`
+}
+
+// String returns the string representation
+func (s InventoryS3BucketDestination) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s InventoryS3BucketDestination) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *InventoryS3BucketDestination) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "InventoryS3BucketDestination"}
+ if s.Bucket == nil {
+ invalidParams.Add(request.NewErrParamRequired("Bucket"))
+ }
+ if s.Format == nil {
+ invalidParams.Add(request.NewErrParamRequired("Format"))
+ }
+ if s.Encryption != nil {
+ if err := s.Encryption.Validate(); err != nil {
+ invalidParams.AddNested("Encryption", err.(request.ErrInvalidParams))
+ }
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetAccountId sets the AccountId field's value.
+func (s *InventoryS3BucketDestination) SetAccountId(v string) *InventoryS3BucketDestination {
+ s.AccountId = &v
+ return s
+}
+
+// SetBucket sets the Bucket field's value.
+func (s *InventoryS3BucketDestination) SetBucket(v string) *InventoryS3BucketDestination {
+ s.Bucket = &v
+ return s
+}
+
+func (s *InventoryS3BucketDestination) getBucket() (v string) {
+ if s.Bucket == nil {
+ return v
+ }
+ return *s.Bucket
+}
+
+// SetEncryption sets the Encryption field's value.
+func (s *InventoryS3BucketDestination) SetEncryption(v *InventoryEncryption) *InventoryS3BucketDestination {
+ s.Encryption = v
+ return s
+}
+
+// SetFormat sets the Format field's value.
+func (s *InventoryS3BucketDestination) SetFormat(v string) *InventoryS3BucketDestination {
+ s.Format = &v
+ return s
+}
+
+// SetPrefix sets the Prefix field's value.
+func (s *InventoryS3BucketDestination) SetPrefix(v string) *InventoryS3BucketDestination {
+ s.Prefix = &v
+ return s
+}
+
+type InventorySchedule struct {
+ _ struct{} `type:"structure"`
+
+ // Specifies how frequently inventory results are produced.
+ //
+ // Frequency is a required field
+ Frequency *string `type:"string" required:"true" enum:"InventoryFrequency"`
+}
+
+// String returns the string representation
+func (s InventorySchedule) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s InventorySchedule) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *InventorySchedule) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "InventorySchedule"}
+ if s.Frequency == nil {
+ invalidParams.Add(request.NewErrParamRequired("Frequency"))
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetFrequency sets the Frequency field's value.
+func (s *InventorySchedule) SetFrequency(v string) *InventorySchedule {
+ s.Frequency = &v
+ return s
+}
+
+type JSONInput struct {
+ _ struct{} `type:"structure"`
+
+ // The type of JSON. Valid values: Document, Lines.
+ Type *string `type:"string" enum:"JSONType"`
+}
+
+// String returns the string representation
+func (s JSONInput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s JSONInput) GoString() string {
+ return s.String()
+}
+
+// SetType sets the Type field's value.
+func (s *JSONInput) SetType(v string) *JSONInput {
+ s.Type = &v
+ return s
+}
+
+type JSONOutput struct {
+ _ struct{} `type:"structure"`
+
+ // The value used to separate individual records in the output.
+ RecordDelimiter *string `type:"string"`
+}
+
+// String returns the string representation
+func (s JSONOutput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s JSONOutput) GoString() string {
+ return s.String()
+}
+
+// SetRecordDelimiter sets the RecordDelimiter field's value.
+func (s *JSONOutput) SetRecordDelimiter(v string) *JSONOutput {
+ s.RecordDelimiter = &v
+ return s
+}
+
+// A container for object key name prefix and suffix filtering rules.
+type KeyFilter struct {
+ _ struct{} `type:"structure"`
+
+ // A list of containers for the key value pair that defines the criteria for
+ // the filter rule.
+ FilterRules []*FilterRule `locationName:"FilterRule" type:"list" flattened:"true"`
+}
+
+// String returns the string representation
+func (s KeyFilter) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s KeyFilter) GoString() string {
+ return s.String()
+}
+
+// SetFilterRules sets the FilterRules field's value.
+func (s *KeyFilter) SetFilterRules(v []*FilterRule) *KeyFilter {
+ s.FilterRules = v
+ return s
+}
+
+// A container for specifying the configuration for AWS Lambda notifications.
+type LambdaFunctionConfiguration struct {
+ _ struct{} `type:"structure"`
+
+ // The Amazon S3 bucket event for which to invoke the AWS Lambda function. For
+ // more information, see Supported Event Types (https://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html)
+ // in the Amazon Simple Storage Service Developer Guide.
+ //
+ // Events is a required field
+ Events []*string `locationName:"Event" type:"list" flattened:"true" required:"true"`
+
+ // Specifies object key name filtering rules. For information about key name
+ // filtering, see Configuring Event Notifications (https://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html)
+ // in the Amazon Simple Storage Service Developer Guide.
+ Filter *NotificationConfigurationFilter `type:"structure"`
+
+ // An optional unique identifier for configurations in a notification configuration.
+ // If you don't provide one, Amazon S3 will assign an ID.
+ Id *string `type:"string"`
+
+ // The Amazon Resource Name (ARN) of the AWS Lambda function that Amazon S3
+ // invokes when the specified event type occurs.
+ //
+ // LambdaFunctionArn is a required field
+ LambdaFunctionArn *string `locationName:"CloudFunction" type:"string" required:"true"`
+}
+
+// String returns the string representation
+func (s LambdaFunctionConfiguration) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s LambdaFunctionConfiguration) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *LambdaFunctionConfiguration) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "LambdaFunctionConfiguration"}
+ if s.Events == nil {
+ invalidParams.Add(request.NewErrParamRequired("Events"))
+ }
+ if s.LambdaFunctionArn == nil {
+ invalidParams.Add(request.NewErrParamRequired("LambdaFunctionArn"))
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetEvents sets the Events field's value.
+func (s *LambdaFunctionConfiguration) SetEvents(v []*string) *LambdaFunctionConfiguration {
+ s.Events = v
+ return s
+}
+
+// SetFilter sets the Filter field's value.
+func (s *LambdaFunctionConfiguration) SetFilter(v *NotificationConfigurationFilter) *LambdaFunctionConfiguration {
+ s.Filter = v
+ return s
+}
+
+// SetId sets the Id field's value.
+func (s *LambdaFunctionConfiguration) SetId(v string) *LambdaFunctionConfiguration {
+ s.Id = &v
+ return s
+}
+
+// SetLambdaFunctionArn sets the LambdaFunctionArn field's value.
+func (s *LambdaFunctionConfiguration) SetLambdaFunctionArn(v string) *LambdaFunctionConfiguration {
+ s.LambdaFunctionArn = &v
+ return s
+}
+
+type LifecycleConfiguration struct {
+ _ struct{} `type:"structure"`
+
+ // Rules is a required field
+ Rules []*Rule `locationName:"Rule" type:"list" flattened:"true" required:"true"`
+}
+
+// String returns the string representation
+func (s LifecycleConfiguration) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s LifecycleConfiguration) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *LifecycleConfiguration) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "LifecycleConfiguration"}
+ if s.Rules == nil {
+ invalidParams.Add(request.NewErrParamRequired("Rules"))
+ }
+ if s.Rules != nil {
+ for i, v := range s.Rules {
+ if v == nil {
+ continue
+ }
+ if err := v.Validate(); err != nil {
+ invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Rules", i), err.(request.ErrInvalidParams))
+ }
+ }
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetRules sets the Rules field's value.
+func (s *LifecycleConfiguration) SetRules(v []*Rule) *LifecycleConfiguration {
+ s.Rules = v
+ return s
+}
+
+type LifecycleExpiration struct {
+ _ struct{} `type:"structure"`
+
+ // Indicates at what date the object is to be moved or deleted. Should be in
+ // GMT ISO 8601 Format.
+ Date *time.Time `type:"timestamp" timestampFormat:"iso8601"`
+
+ // Indicates the lifetime, in days, of the objects that are subject to the rule.
+ // The value must be a non-zero positive integer.
+ Days *int64 `type:"integer"`
+
+ // Indicates whether Amazon S3 will remove a delete marker with no noncurrent
+ // versions. If set to true, the delete marker will be expired; if set to false
+ // the policy takes no action. This cannot be specified with Days or Date in
+ // a Lifecycle Expiration Policy.
+ ExpiredObjectDeleteMarker *bool `type:"boolean"`
+}
+
+// String returns the string representation
+func (s LifecycleExpiration) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s LifecycleExpiration) GoString() string {
+ return s.String()
+}
+
+// SetDate sets the Date field's value.
+func (s *LifecycleExpiration) SetDate(v time.Time) *LifecycleExpiration {
+ s.Date = &v
+ return s
+}
+
+// SetDays sets the Days field's value.
+func (s *LifecycleExpiration) SetDays(v int64) *LifecycleExpiration {
+ s.Days = &v
+ return s
+}
+
+// SetExpiredObjectDeleteMarker sets the ExpiredObjectDeleteMarker field's value.
+func (s *LifecycleExpiration) SetExpiredObjectDeleteMarker(v bool) *LifecycleExpiration {
+ s.ExpiredObjectDeleteMarker = &v
+ return s
+}
+
+type LifecycleRule struct {
+ _ struct{} `type:"structure"`
+
+ // Specifies the days since the initiation of an incomplete multipart upload
+ // that Amazon S3 will wait before permanently removing all parts of the upload.
+ // For more information, see Aborting Incomplete Multipart Uploads Using a Bucket
+ // Lifecycle Policy (https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuoverview.html#mpu-abort-incomplete-mpu-lifecycle-config)
+ // in the Amazon Simple Storage Service Developer Guide.
+ AbortIncompleteMultipartUpload *AbortIncompleteMultipartUpload `type:"structure"`
+
+ Expiration *LifecycleExpiration `type:"structure"`
+
+ // The Filter is used to identify objects that a Lifecycle Rule applies to.
+ // A Filter must have exactly one of Prefix, Tag, or And specified.
+ Filter *LifecycleRuleFilter `type:"structure"`
+
+ // Unique identifier for the rule. The value cannot be longer than 255 characters.
+ ID *string `type:"string"`
+
+ // Specifies when noncurrent object versions expire. Upon expiration, Amazon
+ // S3 permanently deletes the noncurrent object versions. You set this lifecycle
+ // configuration action on a bucket that has versioning enabled (or suspended)
+ // to request that Amazon S3 delete noncurrent object versions at a specific
+ // period in the object's lifetime.
+ NoncurrentVersionExpiration *NoncurrentVersionExpiration `type:"structure"`
+
+ NoncurrentVersionTransitions []*NoncurrentVersionTransition `locationName:"NoncurrentVersionTransition" type:"list" flattened:"true"`
+
+ // Prefix identifying one or more objects to which the rule applies. This is
+ // No longer used; use Filter instead.
+ //
+ // Deprecated: Prefix has been deprecated
+ Prefix *string `deprecated:"true" type:"string"`
+
+ // If 'Enabled', the rule is currently being applied. If 'Disabled', the rule
+ // is not currently being applied.
+ //
+ // Status is a required field
+ Status *string `type:"string" required:"true" enum:"ExpirationStatus"`
+
+ Transitions []*Transition `locationName:"Transition" type:"list" flattened:"true"`
+}
+
+// String returns the string representation
+func (s LifecycleRule) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s LifecycleRule) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *LifecycleRule) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "LifecycleRule"}
+ if s.Status == nil {
+ invalidParams.Add(request.NewErrParamRequired("Status"))
+ }
+ if s.Filter != nil {
+ if err := s.Filter.Validate(); err != nil {
+ invalidParams.AddNested("Filter", err.(request.ErrInvalidParams))
+ }
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetAbortIncompleteMultipartUpload sets the AbortIncompleteMultipartUpload field's value.
+func (s *LifecycleRule) SetAbortIncompleteMultipartUpload(v *AbortIncompleteMultipartUpload) *LifecycleRule {
+ s.AbortIncompleteMultipartUpload = v
+ return s
+}
+
+// SetExpiration sets the Expiration field's value.
+func (s *LifecycleRule) SetExpiration(v *LifecycleExpiration) *LifecycleRule {
+ s.Expiration = v
+ return s
+}
+
+// SetFilter sets the Filter field's value.
+func (s *LifecycleRule) SetFilter(v *LifecycleRuleFilter) *LifecycleRule {
+ s.Filter = v
+ return s
+}
+
+// SetID sets the ID field's value.
+func (s *LifecycleRule) SetID(v string) *LifecycleRule {
+ s.ID = &v
+ return s
+}
+
+// SetNoncurrentVersionExpiration sets the NoncurrentVersionExpiration field's value.
+func (s *LifecycleRule) SetNoncurrentVersionExpiration(v *NoncurrentVersionExpiration) *LifecycleRule {
+ s.NoncurrentVersionExpiration = v
+ return s
+}
+
+// SetNoncurrentVersionTransitions sets the NoncurrentVersionTransitions field's value.
+func (s *LifecycleRule) SetNoncurrentVersionTransitions(v []*NoncurrentVersionTransition) *LifecycleRule {
+ s.NoncurrentVersionTransitions = v
+ return s
+}
+
+// SetPrefix sets the Prefix field's value.
+func (s *LifecycleRule) SetPrefix(v string) *LifecycleRule {
+ s.Prefix = &v
+ return s
+}
+
+// SetStatus sets the Status field's value.
+func (s *LifecycleRule) SetStatus(v string) *LifecycleRule {
+ s.Status = &v
+ return s
+}
+
+// SetTransitions sets the Transitions field's value.
+func (s *LifecycleRule) SetTransitions(v []*Transition) *LifecycleRule {
+ s.Transitions = v
+ return s
+}
+
+// This is used in a Lifecycle Rule Filter to apply a logical AND to two or
+// more predicates. The Lifecycle Rule will apply to any object matching all
+// of the predicates configured inside the And operator.
+type LifecycleRuleAndOperator struct {
+ _ struct{} `type:"structure"`
+
+ Prefix *string `type:"string"`
+
+ // All of these tags must exist in the object's tag set in order for the rule
+ // to apply.
+ Tags []*Tag `locationName:"Tag" locationNameList:"Tag" type:"list" flattened:"true"`
+}
+
+// String returns the string representation
+func (s LifecycleRuleAndOperator) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s LifecycleRuleAndOperator) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *LifecycleRuleAndOperator) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "LifecycleRuleAndOperator"}
+ if s.Tags != nil {
+ for i, v := range s.Tags {
+ if v == nil {
+ continue
+ }
+ if err := v.Validate(); err != nil {
+ invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Tags", i), err.(request.ErrInvalidParams))
+ }
+ }
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetPrefix sets the Prefix field's value.
+func (s *LifecycleRuleAndOperator) SetPrefix(v string) *LifecycleRuleAndOperator {
+ s.Prefix = &v
+ return s
+}
+
+// SetTags sets the Tags field's value.
+func (s *LifecycleRuleAndOperator) SetTags(v []*Tag) *LifecycleRuleAndOperator {
+ s.Tags = v
+ return s
+}
+
+// The Filter is used to identify objects that a Lifecycle Rule applies to.
+// A Filter must have exactly one of Prefix, Tag, or And specified.
+type LifecycleRuleFilter struct {
+ _ struct{} `type:"structure"`
+
+ // This is used in a Lifecycle Rule Filter to apply a logical AND to two or
+ // more predicates. The Lifecycle Rule will apply to any object matching all
+ // of the predicates configured inside the And operator.
+ And *LifecycleRuleAndOperator `type:"structure"`
+
+ // Prefix identifying one or more objects to which the rule applies.
+ Prefix *string `type:"string"`
+
+ // This tag must exist in the object's tag set in order for the rule to apply.
+ Tag *Tag `type:"structure"`
+}
+
+// String returns the string representation
+func (s LifecycleRuleFilter) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s LifecycleRuleFilter) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *LifecycleRuleFilter) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "LifecycleRuleFilter"}
+ if s.And != nil {
+ if err := s.And.Validate(); err != nil {
+ invalidParams.AddNested("And", err.(request.ErrInvalidParams))
+ }
+ }
+ if s.Tag != nil {
+ if err := s.Tag.Validate(); err != nil {
+ invalidParams.AddNested("Tag", err.(request.ErrInvalidParams))
+ }
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetAnd sets the And field's value.
+func (s *LifecycleRuleFilter) SetAnd(v *LifecycleRuleAndOperator) *LifecycleRuleFilter {
+ s.And = v
+ return s
+}
+
+// SetPrefix sets the Prefix field's value.
+func (s *LifecycleRuleFilter) SetPrefix(v string) *LifecycleRuleFilter {
+ s.Prefix = &v
+ return s
+}
+
+// SetTag sets the Tag field's value.
+func (s *LifecycleRuleFilter) SetTag(v *Tag) *LifecycleRuleFilter {
+ s.Tag = v
+ return s
+}
+
+type ListBucketAnalyticsConfigurationsInput struct {
+ _ struct{} `locationName:"ListBucketAnalyticsConfigurationsRequest" type:"structure"`
+
+ // The name of the bucket from which analytics configurations are retrieved.
+ //
+ // Bucket is a required field
+ Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
+
+ // The ContinuationToken that represents a placeholder from where this request
+ // should begin.
+ ContinuationToken *string `location:"querystring" locationName:"continuation-token" type:"string"`
+}
+
+// String returns the string representation
+func (s ListBucketAnalyticsConfigurationsInput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s ListBucketAnalyticsConfigurationsInput) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *ListBucketAnalyticsConfigurationsInput) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "ListBucketAnalyticsConfigurationsInput"}
+ if s.Bucket == nil {
+ invalidParams.Add(request.NewErrParamRequired("Bucket"))
+ }
+ if s.Bucket != nil && len(*s.Bucket) < 1 {
+ invalidParams.Add(request.NewErrParamMinLen("Bucket", 1))
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetBucket sets the Bucket field's value.
+func (s *ListBucketAnalyticsConfigurationsInput) SetBucket(v string) *ListBucketAnalyticsConfigurationsInput {
+ s.Bucket = &v
+ return s
+}
+
+func (s *ListBucketAnalyticsConfigurationsInput) getBucket() (v string) {
+ if s.Bucket == nil {
+ return v
+ }
+ return *s.Bucket
+}
+
+// SetContinuationToken sets the ContinuationToken field's value.
+func (s *ListBucketAnalyticsConfigurationsInput) SetContinuationToken(v string) *ListBucketAnalyticsConfigurationsInput {
+ s.ContinuationToken = &v
+ return s
+}
+
+type ListBucketAnalyticsConfigurationsOutput struct {
+ _ struct{} `type:"structure"`
+
+ // The list of analytics configurations for a bucket.
+ AnalyticsConfigurationList []*AnalyticsConfiguration `locationName:"AnalyticsConfiguration" type:"list" flattened:"true"`
+
+ // The ContinuationToken that represents where this request began.
+ ContinuationToken *string `type:"string"`
+
+ // Indicates whether the returned list of analytics configurations is complete.
+ // A value of true indicates that the list is not complete and the NextContinuationToken
+ // will be provided for a subsequent request.
+ IsTruncated *bool `type:"boolean"`
+
+ // NextContinuationToken is sent when isTruncated is true, which indicates that
+ // there are more analytics configurations to list. The next request must include
+ // this NextContinuationToken. The token is obfuscated and is not a usable value.
+ NextContinuationToken *string `type:"string"`
+}
+
+// String returns the string representation
+func (s ListBucketAnalyticsConfigurationsOutput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s ListBucketAnalyticsConfigurationsOutput) GoString() string {
+ return s.String()
+}
+
+// SetAnalyticsConfigurationList sets the AnalyticsConfigurationList field's value.
+func (s *ListBucketAnalyticsConfigurationsOutput) SetAnalyticsConfigurationList(v []*AnalyticsConfiguration) *ListBucketAnalyticsConfigurationsOutput {
+ s.AnalyticsConfigurationList = v
+ return s
+}
+
+// SetContinuationToken sets the ContinuationToken field's value.
+func (s *ListBucketAnalyticsConfigurationsOutput) SetContinuationToken(v string) *ListBucketAnalyticsConfigurationsOutput {
+ s.ContinuationToken = &v
+ return s
+}
+
+// SetIsTruncated sets the IsTruncated field's value.
+func (s *ListBucketAnalyticsConfigurationsOutput) SetIsTruncated(v bool) *ListBucketAnalyticsConfigurationsOutput {
+ s.IsTruncated = &v
+ return s
+}
+
+// SetNextContinuationToken sets the NextContinuationToken field's value.
+func (s *ListBucketAnalyticsConfigurationsOutput) SetNextContinuationToken(v string) *ListBucketAnalyticsConfigurationsOutput {
+ s.NextContinuationToken = &v
+ return s
+}
+
+type ListBucketInventoryConfigurationsInput struct {
+ _ struct{} `locationName:"ListBucketInventoryConfigurationsRequest" type:"structure"`
+
+ // The name of the bucket containing the inventory configurations to retrieve.
+ //
+ // Bucket is a required field
+ Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
+
+ // The marker used to continue an inventory configuration listing that has been
+ // truncated. Use the NextContinuationToken from a previously truncated list
+ // response to continue the listing. The continuation token is an opaque value
+ // that Amazon S3 understands.
+ ContinuationToken *string `location:"querystring" locationName:"continuation-token" type:"string"`
+}
+
+// String returns the string representation
+func (s ListBucketInventoryConfigurationsInput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s ListBucketInventoryConfigurationsInput) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *ListBucketInventoryConfigurationsInput) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "ListBucketInventoryConfigurationsInput"}
+ if s.Bucket == nil {
+ invalidParams.Add(request.NewErrParamRequired("Bucket"))
+ }
+ if s.Bucket != nil && len(*s.Bucket) < 1 {
+ invalidParams.Add(request.NewErrParamMinLen("Bucket", 1))
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetBucket sets the Bucket field's value.
+func (s *ListBucketInventoryConfigurationsInput) SetBucket(v string) *ListBucketInventoryConfigurationsInput {
+ s.Bucket = &v
+ return s
+}
+
+func (s *ListBucketInventoryConfigurationsInput) getBucket() (v string) {
+ if s.Bucket == nil {
+ return v
+ }
+ return *s.Bucket
+}
+
+// SetContinuationToken sets the ContinuationToken field's value.
+func (s *ListBucketInventoryConfigurationsInput) SetContinuationToken(v string) *ListBucketInventoryConfigurationsInput {
+ s.ContinuationToken = &v
+ return s
+}
+
+type ListBucketInventoryConfigurationsOutput struct {
+ _ struct{} `type:"structure"`
+
+ // If sent in the request, the marker that is used as a starting point for this
+ // inventory configuration list response.
+ ContinuationToken *string `type:"string"`
+
+ // The list of inventory configurations for a bucket.
+ InventoryConfigurationList []*InventoryConfiguration `locationName:"InventoryConfiguration" type:"list" flattened:"true"`
+
+ // Indicates whether the returned list of inventory configurations is truncated
+ // in this response. A value of true indicates that the list is truncated.
+ IsTruncated *bool `type:"boolean"`
+
+ // The marker used to continue this inventory configuration listing. Use the
+ // NextContinuationToken from this response to continue the listing in a subsequent
+ // request. The continuation token is an opaque value that Amazon S3 understands.
+ NextContinuationToken *string `type:"string"`
+}
+
+// String returns the string representation
+func (s ListBucketInventoryConfigurationsOutput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s ListBucketInventoryConfigurationsOutput) GoString() string {
+ return s.String()
+}
+
+// SetContinuationToken sets the ContinuationToken field's value.
+func (s *ListBucketInventoryConfigurationsOutput) SetContinuationToken(v string) *ListBucketInventoryConfigurationsOutput {
+ s.ContinuationToken = &v
+ return s
+}
+
+// SetInventoryConfigurationList sets the InventoryConfigurationList field's value.
+func (s *ListBucketInventoryConfigurationsOutput) SetInventoryConfigurationList(v []*InventoryConfiguration) *ListBucketInventoryConfigurationsOutput {
+ s.InventoryConfigurationList = v
+ return s
+}
+
+// SetIsTruncated sets the IsTruncated field's value.
+func (s *ListBucketInventoryConfigurationsOutput) SetIsTruncated(v bool) *ListBucketInventoryConfigurationsOutput {
+ s.IsTruncated = &v
+ return s
+}
+
+// SetNextContinuationToken sets the NextContinuationToken field's value.
+func (s *ListBucketInventoryConfigurationsOutput) SetNextContinuationToken(v string) *ListBucketInventoryConfigurationsOutput {
+ s.NextContinuationToken = &v
+ return s
+}
+
+type ListBucketMetricsConfigurationsInput struct {
+ _ struct{} `locationName:"ListBucketMetricsConfigurationsRequest" type:"structure"`
+
+ // The name of the bucket containing the metrics configurations to retrieve.
+ //
+ // Bucket is a required field
+ Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
+
+ // The marker that is used to continue a metrics configuration listing that
+ // has been truncated. Use the NextContinuationToken from a previously truncated
+ // list response to continue the listing. The continuation token is an opaque
+ // value that Amazon S3 understands.
+ ContinuationToken *string `location:"querystring" locationName:"continuation-token" type:"string"`
+}
+
+// String returns the string representation
+func (s ListBucketMetricsConfigurationsInput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s ListBucketMetricsConfigurationsInput) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *ListBucketMetricsConfigurationsInput) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "ListBucketMetricsConfigurationsInput"}
+ if s.Bucket == nil {
+ invalidParams.Add(request.NewErrParamRequired("Bucket"))
+ }
+ if s.Bucket != nil && len(*s.Bucket) < 1 {
+ invalidParams.Add(request.NewErrParamMinLen("Bucket", 1))
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetBucket sets the Bucket field's value.
+func (s *ListBucketMetricsConfigurationsInput) SetBucket(v string) *ListBucketMetricsConfigurationsInput {
+ s.Bucket = &v
+ return s
+}
+
+func (s *ListBucketMetricsConfigurationsInput) getBucket() (v string) {
+ if s.Bucket == nil {
+ return v
+ }
+ return *s.Bucket
+}
+
+// SetContinuationToken sets the ContinuationToken field's value.
+func (s *ListBucketMetricsConfigurationsInput) SetContinuationToken(v string) *ListBucketMetricsConfigurationsInput {
+ s.ContinuationToken = &v
+ return s
+}
+
+type ListBucketMetricsConfigurationsOutput struct {
+ _ struct{} `type:"structure"`
+
+ // The marker that is used as a starting point for this metrics configuration
+ // list response. This value is present if it was sent in the request.
+ ContinuationToken *string `type:"string"`
+
+ // Indicates whether the returned list of metrics configurations is complete.
+ // A value of true indicates that the list is not complete and the NextContinuationToken
+ // will be provided for a subsequent request.
+ IsTruncated *bool `type:"boolean"`
+
+ // The list of metrics configurations for a bucket.
+ MetricsConfigurationList []*MetricsConfiguration `locationName:"MetricsConfiguration" type:"list" flattened:"true"`
+
+ // The marker used to continue a metrics configuration listing that has been
+ // truncated. Use the NextContinuationToken from a previously truncated list
+ // response to continue the listing. The continuation token is an opaque value
+ // that Amazon S3 understands.
+ NextContinuationToken *string `type:"string"`
+}
+
+// String returns the string representation
+func (s ListBucketMetricsConfigurationsOutput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s ListBucketMetricsConfigurationsOutput) GoString() string {
+ return s.String()
+}
+
+// SetContinuationToken sets the ContinuationToken field's value.
+func (s *ListBucketMetricsConfigurationsOutput) SetContinuationToken(v string) *ListBucketMetricsConfigurationsOutput {
+ s.ContinuationToken = &v
+ return s
+}
+
+// SetIsTruncated sets the IsTruncated field's value.
+func (s *ListBucketMetricsConfigurationsOutput) SetIsTruncated(v bool) *ListBucketMetricsConfigurationsOutput {
+ s.IsTruncated = &v
+ return s
+}
+
+// SetMetricsConfigurationList sets the MetricsConfigurationList field's value.
+func (s *ListBucketMetricsConfigurationsOutput) SetMetricsConfigurationList(v []*MetricsConfiguration) *ListBucketMetricsConfigurationsOutput {
+ s.MetricsConfigurationList = v
+ return s
+}
+
+// SetNextContinuationToken sets the NextContinuationToken field's value.
+func (s *ListBucketMetricsConfigurationsOutput) SetNextContinuationToken(v string) *ListBucketMetricsConfigurationsOutput {
+ s.NextContinuationToken = &v
+ return s
+}
+
+type ListBucketsInput struct {
+ _ struct{} `type:"structure"`
+}
+
+// String returns the string representation
+func (s ListBucketsInput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s ListBucketsInput) GoString() string {
+ return s.String()
+}
+
+type ListBucketsOutput struct {
+ _ struct{} `type:"structure"`
+
+ Buckets []*Bucket `locationNameList:"Bucket" type:"list"`
+
+ Owner *Owner `type:"structure"`
+}
+
+// String returns the string representation
+func (s ListBucketsOutput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s ListBucketsOutput) GoString() string {
+ return s.String()
+}
+
+// SetBuckets sets the Buckets field's value.
+func (s *ListBucketsOutput) SetBuckets(v []*Bucket) *ListBucketsOutput {
+ s.Buckets = v
+ return s
+}
+
+// SetOwner sets the Owner field's value.
+func (s *ListBucketsOutput) SetOwner(v *Owner) *ListBucketsOutput {
+ s.Owner = v
+ return s
+}
+
+type ListMultipartUploadsInput struct {
+ _ struct{} `locationName:"ListMultipartUploadsRequest" type:"structure"`
+
+ // Bucket is a required field
+ Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
+
+ // Character you use to group keys.
+ Delimiter *string `location:"querystring" locationName:"delimiter" type:"string"`
+
+ // Requests Amazon S3 to encode the object keys in the response and specifies
+ // the encoding method to use. An object key may contain any Unicode character;
+ // however, XML 1.0 parser cannot parse some characters, such as characters
+ // with an ASCII value from 0 to 10. For characters that are not supported in
+ // XML 1.0, you can add this parameter to request that Amazon S3 encode the
+ // keys in the response.
+ EncodingType *string `location:"querystring" locationName:"encoding-type" type:"string" enum:"EncodingType"`
+
+ // Together with upload-id-marker, this parameter specifies the multipart upload
+ // after which listing should begin.
+ KeyMarker *string `location:"querystring" locationName:"key-marker" type:"string"`
+
+ // Sets the maximum number of multipart uploads, from 1 to 1,000, to return
+ // in the response body. 1,000 is the maximum number of uploads that can be
+ // returned in a response.
+ MaxUploads *int64 `location:"querystring" locationName:"max-uploads" type:"integer"`
+
+ // Lists in-progress uploads only for those keys that begin with the specified
+ // prefix.
+ Prefix *string `location:"querystring" locationName:"prefix" type:"string"`
+
+ // Together with key-marker, specifies the multipart upload after which listing
+ // should begin. If key-marker is not specified, the upload-id-marker parameter
+ // is ignored.
+ UploadIdMarker *string `location:"querystring" locationName:"upload-id-marker" type:"string"`
+}
+
+// String returns the string representation
+func (s ListMultipartUploadsInput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s ListMultipartUploadsInput) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *ListMultipartUploadsInput) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "ListMultipartUploadsInput"}
+ if s.Bucket == nil {
+ invalidParams.Add(request.NewErrParamRequired("Bucket"))
+ }
+ if s.Bucket != nil && len(*s.Bucket) < 1 {
+ invalidParams.Add(request.NewErrParamMinLen("Bucket", 1))
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetBucket sets the Bucket field's value.
+func (s *ListMultipartUploadsInput) SetBucket(v string) *ListMultipartUploadsInput {
+ s.Bucket = &v
+ return s
+}
+
+func (s *ListMultipartUploadsInput) getBucket() (v string) {
+ if s.Bucket == nil {
+ return v
+ }
+ return *s.Bucket
+}
+
+// SetDelimiter sets the Delimiter field's value.
+func (s *ListMultipartUploadsInput) SetDelimiter(v string) *ListMultipartUploadsInput {
+ s.Delimiter = &v
+ return s
+}
+
+// SetEncodingType sets the EncodingType field's value.
+func (s *ListMultipartUploadsInput) SetEncodingType(v string) *ListMultipartUploadsInput {
+ s.EncodingType = &v
+ return s
+}
+
+// SetKeyMarker sets the KeyMarker field's value.
+func (s *ListMultipartUploadsInput) SetKeyMarker(v string) *ListMultipartUploadsInput {
+ s.KeyMarker = &v
+ return s
+}
+
+// SetMaxUploads sets the MaxUploads field's value.
+func (s *ListMultipartUploadsInput) SetMaxUploads(v int64) *ListMultipartUploadsInput {
+ s.MaxUploads = &v
+ return s
+}
+
+// SetPrefix sets the Prefix field's value.
+func (s *ListMultipartUploadsInput) SetPrefix(v string) *ListMultipartUploadsInput {
+ s.Prefix = &v
+ return s
+}
+
+// SetUploadIdMarker sets the UploadIdMarker field's value.
+func (s *ListMultipartUploadsInput) SetUploadIdMarker(v string) *ListMultipartUploadsInput {
+ s.UploadIdMarker = &v
+ return s
+}
+
+type ListMultipartUploadsOutput struct {
+ _ struct{} `type:"structure"`
+
+ // Name of the bucket to which the multipart upload was initiated.
+ Bucket *string `type:"string"`
+
+ CommonPrefixes []*CommonPrefix `type:"list" flattened:"true"`
+
+ Delimiter *string `type:"string"`
+
+ // Encoding type used by Amazon S3 to encode object keys in the response.
+ EncodingType *string `type:"string" enum:"EncodingType"`
+
+ // Indicates whether the returned list of multipart uploads is truncated. A
+ // value of true indicates that the list was truncated. The list can be truncated
+ // if the number of multipart uploads exceeds the limit allowed or specified
+ // by max uploads.
+ IsTruncated *bool `type:"boolean"`
+
+ // The key at or after which the listing began.
+ KeyMarker *string `type:"string"`
+
+ // Maximum number of multipart uploads that could have been included in the
+ // response.
+ MaxUploads *int64 `type:"integer"`
+
+ // When a list is truncated, this element specifies the value that should be
+ // used for the key-marker request parameter in a subsequent request.
+ NextKeyMarker *string `type:"string"`
+
+ // When a list is truncated, this element specifies the value that should be
+ // used for the upload-id-marker request parameter in a subsequent request.
+ NextUploadIdMarker *string `type:"string"`
+
+ // When a prefix is provided in the request, this field contains the specified
+ // prefix. The result contains only keys starting with the specified prefix.
+ Prefix *string `type:"string"`
+
+ // Upload ID after which listing began.
+ UploadIdMarker *string `type:"string"`
+
+ Uploads []*MultipartUpload `locationName:"Upload" type:"list" flattened:"true"`
+}
+
+// String returns the string representation
+func (s ListMultipartUploadsOutput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s ListMultipartUploadsOutput) GoString() string {
+ return s.String()
+}
+
+// SetBucket sets the Bucket field's value.
+func (s *ListMultipartUploadsOutput) SetBucket(v string) *ListMultipartUploadsOutput {
+ s.Bucket = &v
+ return s
+}
+
+func (s *ListMultipartUploadsOutput) getBucket() (v string) {
+ if s.Bucket == nil {
+ return v
+ }
+ return *s.Bucket
+}
+
+// SetCommonPrefixes sets the CommonPrefixes field's value.
+func (s *ListMultipartUploadsOutput) SetCommonPrefixes(v []*CommonPrefix) *ListMultipartUploadsOutput {
+ s.CommonPrefixes = v
+ return s
+}
+
+// SetDelimiter sets the Delimiter field's value.
+func (s *ListMultipartUploadsOutput) SetDelimiter(v string) *ListMultipartUploadsOutput {
+ s.Delimiter = &v
+ return s
+}
+
+// SetEncodingType sets the EncodingType field's value.
+func (s *ListMultipartUploadsOutput) SetEncodingType(v string) *ListMultipartUploadsOutput {
+ s.EncodingType = &v
+ return s
+}
+
+// SetIsTruncated sets the IsTruncated field's value.
+func (s *ListMultipartUploadsOutput) SetIsTruncated(v bool) *ListMultipartUploadsOutput {
+ s.IsTruncated = &v
+ return s
+}
+
+// SetKeyMarker sets the KeyMarker field's value.
+func (s *ListMultipartUploadsOutput) SetKeyMarker(v string) *ListMultipartUploadsOutput {
+ s.KeyMarker = &v
+ return s
+}
+
+// SetMaxUploads sets the MaxUploads field's value.
+func (s *ListMultipartUploadsOutput) SetMaxUploads(v int64) *ListMultipartUploadsOutput {
+ s.MaxUploads = &v
+ return s
+}
+
+// SetNextKeyMarker sets the NextKeyMarker field's value.
+func (s *ListMultipartUploadsOutput) SetNextKeyMarker(v string) *ListMultipartUploadsOutput {
+ s.NextKeyMarker = &v
+ return s
+}
+
+// SetNextUploadIdMarker sets the NextUploadIdMarker field's value.
+func (s *ListMultipartUploadsOutput) SetNextUploadIdMarker(v string) *ListMultipartUploadsOutput {
+ s.NextUploadIdMarker = &v
+ return s
+}
+
+// SetPrefix sets the Prefix field's value.
+func (s *ListMultipartUploadsOutput) SetPrefix(v string) *ListMultipartUploadsOutput {
+ s.Prefix = &v
+ return s
+}
+
+// SetUploadIdMarker sets the UploadIdMarker field's value.
+func (s *ListMultipartUploadsOutput) SetUploadIdMarker(v string) *ListMultipartUploadsOutput {
+ s.UploadIdMarker = &v
+ return s
+}
+
+// SetUploads sets the Uploads field's value.
+func (s *ListMultipartUploadsOutput) SetUploads(v []*MultipartUpload) *ListMultipartUploadsOutput {
+ s.Uploads = v
+ return s
+}
+
+type ListObjectVersionsInput struct {
+ _ struct{} `locationName:"ListObjectVersionsRequest" type:"structure"`
+
+ // Bucket is a required field
+ Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
+
+ // A delimiter is a character you use to group keys.
+ Delimiter *string `location:"querystring" locationName:"delimiter" type:"string"`
+
+ // Requests Amazon S3 to encode the object keys in the response and specifies
+ // the encoding method to use. An object key may contain any Unicode character;
+ // however, XML 1.0 parser cannot parse some characters, such as characters
+ // with an ASCII value from 0 to 10. For characters that are not supported in
+ // XML 1.0, you can add this parameter to request that Amazon S3 encode the
+ // keys in the response.
+ EncodingType *string `location:"querystring" locationName:"encoding-type" type:"string" enum:"EncodingType"`
+
+ // Specifies the key to start with when listing objects in a bucket.
+ KeyMarker *string `location:"querystring" locationName:"key-marker" type:"string"`
+
+ // Sets the maximum number of keys returned in the response. The response might
+ // contain fewer keys but will never contain more.
+ MaxKeys *int64 `location:"querystring" locationName:"max-keys" type:"integer"`
+
+ // Limits the response to keys that begin with the specified prefix.
+ Prefix *string `location:"querystring" locationName:"prefix" type:"string"`
+
+ // Specifies the object version you want to start listing from.
+ VersionIdMarker *string `location:"querystring" locationName:"version-id-marker" type:"string"`
+}
+
+// String returns the string representation
+func (s ListObjectVersionsInput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s ListObjectVersionsInput) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *ListObjectVersionsInput) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "ListObjectVersionsInput"}
+ if s.Bucket == nil {
+ invalidParams.Add(request.NewErrParamRequired("Bucket"))
+ }
+ if s.Bucket != nil && len(*s.Bucket) < 1 {
+ invalidParams.Add(request.NewErrParamMinLen("Bucket", 1))
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetBucket sets the Bucket field's value.
+func (s *ListObjectVersionsInput) SetBucket(v string) *ListObjectVersionsInput {
+ s.Bucket = &v
+ return s
+}
+
+func (s *ListObjectVersionsInput) getBucket() (v string) {
+ if s.Bucket == nil {
+ return v
+ }
+ return *s.Bucket
+}
+
+// SetDelimiter sets the Delimiter field's value.
+func (s *ListObjectVersionsInput) SetDelimiter(v string) *ListObjectVersionsInput {
+ s.Delimiter = &v
+ return s
+}
+
+// SetEncodingType sets the EncodingType field's value.
+func (s *ListObjectVersionsInput) SetEncodingType(v string) *ListObjectVersionsInput {
+ s.EncodingType = &v
+ return s
+}
+
+// SetKeyMarker sets the KeyMarker field's value.
+func (s *ListObjectVersionsInput) SetKeyMarker(v string) *ListObjectVersionsInput {
+ s.KeyMarker = &v
+ return s
+}
+
+// SetMaxKeys sets the MaxKeys field's value.
+func (s *ListObjectVersionsInput) SetMaxKeys(v int64) *ListObjectVersionsInput {
+ s.MaxKeys = &v
+ return s
+}
+
+// SetPrefix sets the Prefix field's value.
+func (s *ListObjectVersionsInput) SetPrefix(v string) *ListObjectVersionsInput {
+ s.Prefix = &v
+ return s
+}
+
+// SetVersionIdMarker sets the VersionIdMarker field's value.
+func (s *ListObjectVersionsInput) SetVersionIdMarker(v string) *ListObjectVersionsInput {
+ s.VersionIdMarker = &v
+ return s
+}
+
+type ListObjectVersionsOutput struct {
+ _ struct{} `type:"structure"`
+
+ CommonPrefixes []*CommonPrefix `type:"list" flattened:"true"`
+
+ DeleteMarkers []*DeleteMarkerEntry `locationName:"DeleteMarker" type:"list" flattened:"true"`
+
+ Delimiter *string `type:"string"`
+
+ // Encoding type used by Amazon S3 to encode object keys in the response.
+ EncodingType *string `type:"string" enum:"EncodingType"`
+
+ // A flag that indicates whether or not Amazon S3 returned all of the results
+ // that satisfied the search criteria. If your results were truncated, you can
+ // make a follow-up paginated request using the NextKeyMarker and NextVersionIdMarker
+ // response parameters as a starting place in another request to return the
+ // rest of the results.
+ IsTruncated *bool `type:"boolean"`
+
+ // Marks the last Key returned in a truncated response.
+ KeyMarker *string `type:"string"`
+
+ MaxKeys *int64 `type:"integer"`
+
+ Name *string `type:"string"`
+
+ // Use this value for the key marker request parameter in a subsequent request.
+ NextKeyMarker *string `type:"string"`
+
+ // Use this value for the next version id marker parameter in a subsequent request.
+ NextVersionIdMarker *string `type:"string"`
+
+ Prefix *string `type:"string"`
+
+ VersionIdMarker *string `type:"string"`
+
+ Versions []*ObjectVersion `locationName:"Version" type:"list" flattened:"true"`
+}
+
+// String returns the string representation
+func (s ListObjectVersionsOutput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s ListObjectVersionsOutput) GoString() string {
+ return s.String()
+}
+
+// SetCommonPrefixes sets the CommonPrefixes field's value.
+func (s *ListObjectVersionsOutput) SetCommonPrefixes(v []*CommonPrefix) *ListObjectVersionsOutput {
+ s.CommonPrefixes = v
+ return s
+}
+
+// SetDeleteMarkers sets the DeleteMarkers field's value.
+func (s *ListObjectVersionsOutput) SetDeleteMarkers(v []*DeleteMarkerEntry) *ListObjectVersionsOutput {
+ s.DeleteMarkers = v
+ return s
+}
+
+// SetDelimiter sets the Delimiter field's value.
+func (s *ListObjectVersionsOutput) SetDelimiter(v string) *ListObjectVersionsOutput {
+ s.Delimiter = &v
+ return s
+}
+
+// SetEncodingType sets the EncodingType field's value.
+func (s *ListObjectVersionsOutput) SetEncodingType(v string) *ListObjectVersionsOutput {
+ s.EncodingType = &v
+ return s
+}
+
+// SetIsTruncated sets the IsTruncated field's value.
+func (s *ListObjectVersionsOutput) SetIsTruncated(v bool) *ListObjectVersionsOutput {
+ s.IsTruncated = &v
+ return s
+}
+
+// SetKeyMarker sets the KeyMarker field's value.
+func (s *ListObjectVersionsOutput) SetKeyMarker(v string) *ListObjectVersionsOutput {
+ s.KeyMarker = &v
+ return s
+}
+
+// SetMaxKeys sets the MaxKeys field's value.
+func (s *ListObjectVersionsOutput) SetMaxKeys(v int64) *ListObjectVersionsOutput {
+ s.MaxKeys = &v
+ return s
+}
+
+// SetName sets the Name field's value.
+func (s *ListObjectVersionsOutput) SetName(v string) *ListObjectVersionsOutput {
+ s.Name = &v
+ return s
+}
+
+// SetNextKeyMarker sets the NextKeyMarker field's value.
+func (s *ListObjectVersionsOutput) SetNextKeyMarker(v string) *ListObjectVersionsOutput {
+ s.NextKeyMarker = &v
+ return s
+}
+
+// SetNextVersionIdMarker sets the NextVersionIdMarker field's value.
+func (s *ListObjectVersionsOutput) SetNextVersionIdMarker(v string) *ListObjectVersionsOutput {
+ s.NextVersionIdMarker = &v
+ return s
+}
+
+// SetPrefix sets the Prefix field's value.
+func (s *ListObjectVersionsOutput) SetPrefix(v string) *ListObjectVersionsOutput {
+ s.Prefix = &v
+ return s
+}
+
+// SetVersionIdMarker sets the VersionIdMarker field's value.
+func (s *ListObjectVersionsOutput) SetVersionIdMarker(v string) *ListObjectVersionsOutput {
+ s.VersionIdMarker = &v
+ return s
+}
+
+// SetVersions sets the Versions field's value.
+func (s *ListObjectVersionsOutput) SetVersions(v []*ObjectVersion) *ListObjectVersionsOutput {
+ s.Versions = v
+ return s
+}
+
+type ListObjectsInput struct {
+ _ struct{} `locationName:"ListObjectsRequest" type:"structure"`
+
+ // Bucket is a required field
+ Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
+
+ // A delimiter is a character you use to group keys.
+ Delimiter *string `location:"querystring" locationName:"delimiter" type:"string"`
+
+ // Requests Amazon S3 to encode the object keys in the response and specifies
+ // the encoding method to use. An object key may contain any Unicode character;
+ // however, XML 1.0 parser cannot parse some characters, such as characters
+ // with an ASCII value from 0 to 10. For characters that are not supported in
+ // XML 1.0, you can add this parameter to request that Amazon S3 encode the
+ // keys in the response.
+ EncodingType *string `location:"querystring" locationName:"encoding-type" type:"string" enum:"EncodingType"`
+
+ // Specifies the key to start with when listing objects in a bucket.
+ Marker *string `location:"querystring" locationName:"marker" type:"string"`
+
+ // Sets the maximum number of keys returned in the response. The response might
+ // contain fewer keys but will never contain more.
+ MaxKeys *int64 `location:"querystring" locationName:"max-keys" type:"integer"`
+
+ // Limits the response to keys that begin with the specified prefix.
+ Prefix *string `location:"querystring" locationName:"prefix" type:"string"`
+
+ // Confirms that the requester knows that she or he will be charged for the
+ // list objects request. Bucket owners need not specify this parameter in their
+ // requests.
+ RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"`
+}
+
+// String returns the string representation
+func (s ListObjectsInput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s ListObjectsInput) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *ListObjectsInput) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "ListObjectsInput"}
+ if s.Bucket == nil {
+ invalidParams.Add(request.NewErrParamRequired("Bucket"))
+ }
+ if s.Bucket != nil && len(*s.Bucket) < 1 {
+ invalidParams.Add(request.NewErrParamMinLen("Bucket", 1))
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetBucket sets the Bucket field's value.
+func (s *ListObjectsInput) SetBucket(v string) *ListObjectsInput {
+ s.Bucket = &v
+ return s
+}
+
+func (s *ListObjectsInput) getBucket() (v string) {
+ if s.Bucket == nil {
+ return v
+ }
+ return *s.Bucket
+}
+
+// SetDelimiter sets the Delimiter field's value.
+func (s *ListObjectsInput) SetDelimiter(v string) *ListObjectsInput {
+ s.Delimiter = &v
+ return s
+}
+
+// SetEncodingType sets the EncodingType field's value.
+func (s *ListObjectsInput) SetEncodingType(v string) *ListObjectsInput {
+ s.EncodingType = &v
+ return s
+}
+
+// SetMarker sets the Marker field's value.
+func (s *ListObjectsInput) SetMarker(v string) *ListObjectsInput {
+ s.Marker = &v
+ return s
+}
+
+// SetMaxKeys sets the MaxKeys field's value.
+func (s *ListObjectsInput) SetMaxKeys(v int64) *ListObjectsInput {
+ s.MaxKeys = &v
+ return s
+}
+
+// SetPrefix sets the Prefix field's value.
+func (s *ListObjectsInput) SetPrefix(v string) *ListObjectsInput {
+ s.Prefix = &v
+ return s
+}
+
+// SetRequestPayer sets the RequestPayer field's value.
+func (s *ListObjectsInput) SetRequestPayer(v string) *ListObjectsInput {
+ s.RequestPayer = &v
+ return s
+}
+
+type ListObjectsOutput struct {
+ _ struct{} `type:"structure"`
+
+ CommonPrefixes []*CommonPrefix `type:"list" flattened:"true"`
+
+ Contents []*Object `type:"list" flattened:"true"`
+
+ Delimiter *string `type:"string"`
+
+ // Encoding type used by Amazon S3 to encode object keys in the response.
+ EncodingType *string `type:"string" enum:"EncodingType"`
+
+ // A flag that indicates whether or not Amazon S3 returned all of the results
+ // that satisfied the search criteria.
+ IsTruncated *bool `type:"boolean"`
+
+ Marker *string `type:"string"`
+
+ MaxKeys *int64 `type:"integer"`
+
+ Name *string `type:"string"`
+
+ // When response is truncated (the IsTruncated element value in the response
+ // is true), you can use the key name in this field as marker in the subsequent
+ // request to get next set of objects. Amazon S3 lists objects in alphabetical
+ // order Note: This element is returned only if you have delimiter request parameter
+ // specified. If response does not include the NextMaker and it is truncated,
+ // you can use the value of the last Key in the response as the marker in the
+ // subsequent request to get the next set of object keys.
+ NextMarker *string `type:"string"`
+
+ Prefix *string `type:"string"`
+}
+
+// String returns the string representation
+func (s ListObjectsOutput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s ListObjectsOutput) GoString() string {
+ return s.String()
+}
+
+// SetCommonPrefixes sets the CommonPrefixes field's value.
+func (s *ListObjectsOutput) SetCommonPrefixes(v []*CommonPrefix) *ListObjectsOutput {
+ s.CommonPrefixes = v
+ return s
+}
+
+// SetContents sets the Contents field's value.
+func (s *ListObjectsOutput) SetContents(v []*Object) *ListObjectsOutput {
+ s.Contents = v
+ return s
+}
+
+// SetDelimiter sets the Delimiter field's value.
+func (s *ListObjectsOutput) SetDelimiter(v string) *ListObjectsOutput {
+ s.Delimiter = &v
+ return s
+}
+
+// SetEncodingType sets the EncodingType field's value.
+func (s *ListObjectsOutput) SetEncodingType(v string) *ListObjectsOutput {
+ s.EncodingType = &v
+ return s
+}
+
+// SetIsTruncated sets the IsTruncated field's value.
+func (s *ListObjectsOutput) SetIsTruncated(v bool) *ListObjectsOutput {
+ s.IsTruncated = &v
+ return s
+}
+
+// SetMarker sets the Marker field's value.
+func (s *ListObjectsOutput) SetMarker(v string) *ListObjectsOutput {
+ s.Marker = &v
+ return s
+}
+
+// SetMaxKeys sets the MaxKeys field's value.
+func (s *ListObjectsOutput) SetMaxKeys(v int64) *ListObjectsOutput {
+ s.MaxKeys = &v
+ return s
+}
+
+// SetName sets the Name field's value.
+func (s *ListObjectsOutput) SetName(v string) *ListObjectsOutput {
+ s.Name = &v
+ return s
+}
+
+// SetNextMarker sets the NextMarker field's value.
+func (s *ListObjectsOutput) SetNextMarker(v string) *ListObjectsOutput {
+ s.NextMarker = &v
+ return s
+}
+
+// SetPrefix sets the Prefix field's value.
+func (s *ListObjectsOutput) SetPrefix(v string) *ListObjectsOutput {
+ s.Prefix = &v
+ return s
+}
+
+type ListObjectsV2Input struct {
+ _ struct{} `locationName:"ListObjectsV2Request" type:"structure"`
+
+ // Name of the bucket to list.
+ //
+ // Bucket is a required field
+ Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
+
+ // ContinuationToken indicates Amazon S3 that the list is being continued on
+ // this bucket with a token. ContinuationToken is obfuscated and is not a real
+ // key
+ ContinuationToken *string `location:"querystring" locationName:"continuation-token" type:"string"`
+
+ // A delimiter is a character you use to group keys.
+ Delimiter *string `location:"querystring" locationName:"delimiter" type:"string"`
+
+ // Encoding type used by Amazon S3 to encode object keys in the response.
+ EncodingType *string `location:"querystring" locationName:"encoding-type" type:"string" enum:"EncodingType"`
+
+ // The owner field is not present in listV2 by default, if you want to return
+ // owner field with each key in the result then set the fetch owner field to
+ // true
+ FetchOwner *bool `location:"querystring" locationName:"fetch-owner" type:"boolean"`
+
+ // Sets the maximum number of keys returned in the response. The response might
+ // contain fewer keys but will never contain more.
+ MaxKeys *int64 `location:"querystring" locationName:"max-keys" type:"integer"`
+
+ // Limits the response to keys that begin with the specified prefix.
+ Prefix *string `location:"querystring" locationName:"prefix" type:"string"`
+
+ // Confirms that the requester knows that she or he will be charged for the
+ // list objects request in V2 style. Bucket owners need not specify this parameter
+ // in their requests.
+ RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"`
+
+ // StartAfter is where you want Amazon S3 to start listing from. Amazon S3 starts
+ // listing after this specified key. StartAfter can be any key in the bucket
+ StartAfter *string `location:"querystring" locationName:"start-after" type:"string"`
+}
+
+// String returns the string representation
+func (s ListObjectsV2Input) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s ListObjectsV2Input) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *ListObjectsV2Input) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "ListObjectsV2Input"}
+ if s.Bucket == nil {
+ invalidParams.Add(request.NewErrParamRequired("Bucket"))
+ }
+ if s.Bucket != nil && len(*s.Bucket) < 1 {
+ invalidParams.Add(request.NewErrParamMinLen("Bucket", 1))
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetBucket sets the Bucket field's value.
+func (s *ListObjectsV2Input) SetBucket(v string) *ListObjectsV2Input {
+ s.Bucket = &v
+ return s
+}
+
+func (s *ListObjectsV2Input) getBucket() (v string) {
+ if s.Bucket == nil {
+ return v
+ }
+ return *s.Bucket
+}
+
+// SetContinuationToken sets the ContinuationToken field's value.
+func (s *ListObjectsV2Input) SetContinuationToken(v string) *ListObjectsV2Input {
+ s.ContinuationToken = &v
+ return s
+}
+
+// SetDelimiter sets the Delimiter field's value.
+func (s *ListObjectsV2Input) SetDelimiter(v string) *ListObjectsV2Input {
+ s.Delimiter = &v
+ return s
+}
+
+// SetEncodingType sets the EncodingType field's value.
+func (s *ListObjectsV2Input) SetEncodingType(v string) *ListObjectsV2Input {
+ s.EncodingType = &v
+ return s
+}
+
+// SetFetchOwner sets the FetchOwner field's value.
+func (s *ListObjectsV2Input) SetFetchOwner(v bool) *ListObjectsV2Input {
+ s.FetchOwner = &v
+ return s
+}
+
+// SetMaxKeys sets the MaxKeys field's value.
+func (s *ListObjectsV2Input) SetMaxKeys(v int64) *ListObjectsV2Input {
+ s.MaxKeys = &v
+ return s
+}
+
+// SetPrefix sets the Prefix field's value.
+func (s *ListObjectsV2Input) SetPrefix(v string) *ListObjectsV2Input {
+ s.Prefix = &v
+ return s
+}
+
+// SetRequestPayer sets the RequestPayer field's value.
+func (s *ListObjectsV2Input) SetRequestPayer(v string) *ListObjectsV2Input {
+ s.RequestPayer = &v
+ return s
+}
+
+// SetStartAfter sets the StartAfter field's value.
+func (s *ListObjectsV2Input) SetStartAfter(v string) *ListObjectsV2Input {
+ s.StartAfter = &v
+ return s
+}
+
+type ListObjectsV2Output struct {
+ _ struct{} `type:"structure"`
+
+ // CommonPrefixes contains all (if there are any) keys between Prefix and the
+ // next occurrence of the string specified by delimiter
+ CommonPrefixes []*CommonPrefix `type:"list" flattened:"true"`
+
+ // Metadata about each object returned.
+ Contents []*Object `type:"list" flattened:"true"`
+
+ // ContinuationToken indicates Amazon S3 that the list is being continued on
+ // this bucket with a token. ContinuationToken is obfuscated and is not a real
+ // key
+ ContinuationToken *string `type:"string"`
+
+ // A delimiter is a character you use to group keys.
+ Delimiter *string `type:"string"`
+
+ // Encoding type used by Amazon S3 to encode object keys in the response.
+ EncodingType *string `type:"string" enum:"EncodingType"`
+
+ // A flag that indicates whether or not Amazon S3 returned all of the results
+ // that satisfied the search criteria.
+ IsTruncated *bool `type:"boolean"`
+
+ // KeyCount is the number of keys returned with this request. KeyCount will
+ // always be less than equals to MaxKeys field. Say you ask for 50 keys, your
+ // result will include less than equals 50 keys
+ KeyCount *int64 `type:"integer"`
+
+ // Sets the maximum number of keys returned in the response. The response might
+ // contain fewer keys but will never contain more.
+ MaxKeys *int64 `type:"integer"`
+
+ // Name of the bucket to list.
+ Name *string `type:"string"`
+
+ // NextContinuationToken is sent when isTruncated is true which means there
+ // are more keys in the bucket that can be listed. The next list requests to
+ // Amazon S3 can be continued with this NextContinuationToken. NextContinuationToken
+ // is obfuscated and is not a real key
+ NextContinuationToken *string `type:"string"`
+
+ // Limits the response to keys that begin with the specified prefix.
+ Prefix *string `type:"string"`
+
+ // StartAfter is where you want Amazon S3 to start listing from. Amazon S3 starts
+ // listing after this specified key. StartAfter can be any key in the bucket
+ StartAfter *string `type:"string"`
+}
+
+// String returns the string representation
+func (s ListObjectsV2Output) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s ListObjectsV2Output) GoString() string {
+ return s.String()
+}
+
+// SetCommonPrefixes sets the CommonPrefixes field's value.
+func (s *ListObjectsV2Output) SetCommonPrefixes(v []*CommonPrefix) *ListObjectsV2Output {
+ s.CommonPrefixes = v
+ return s
+}
+
+// SetContents sets the Contents field's value.
+func (s *ListObjectsV2Output) SetContents(v []*Object) *ListObjectsV2Output {
+ s.Contents = v
+ return s
+}
+
+// SetContinuationToken sets the ContinuationToken field's value.
+func (s *ListObjectsV2Output) SetContinuationToken(v string) *ListObjectsV2Output {
+ s.ContinuationToken = &v
+ return s
+}
+
+// SetDelimiter sets the Delimiter field's value.
+func (s *ListObjectsV2Output) SetDelimiter(v string) *ListObjectsV2Output {
+ s.Delimiter = &v
+ return s
+}
+
+// SetEncodingType sets the EncodingType field's value.
+func (s *ListObjectsV2Output) SetEncodingType(v string) *ListObjectsV2Output {
+ s.EncodingType = &v
+ return s
+}
+
+// SetIsTruncated sets the IsTruncated field's value.
+func (s *ListObjectsV2Output) SetIsTruncated(v bool) *ListObjectsV2Output {
+ s.IsTruncated = &v
+ return s
+}
+
+// SetKeyCount sets the KeyCount field's value.
+func (s *ListObjectsV2Output) SetKeyCount(v int64) *ListObjectsV2Output {
+ s.KeyCount = &v
+ return s
+}
+
+// SetMaxKeys sets the MaxKeys field's value.
+func (s *ListObjectsV2Output) SetMaxKeys(v int64) *ListObjectsV2Output {
+ s.MaxKeys = &v
+ return s
+}
+
+// SetName sets the Name field's value.
+func (s *ListObjectsV2Output) SetName(v string) *ListObjectsV2Output {
+ s.Name = &v
+ return s
+}
+
+// SetNextContinuationToken sets the NextContinuationToken field's value.
+func (s *ListObjectsV2Output) SetNextContinuationToken(v string) *ListObjectsV2Output {
+ s.NextContinuationToken = &v
+ return s
+}
+
+// SetPrefix sets the Prefix field's value.
+func (s *ListObjectsV2Output) SetPrefix(v string) *ListObjectsV2Output {
+ s.Prefix = &v
+ return s
+}
+
+// SetStartAfter sets the StartAfter field's value.
+func (s *ListObjectsV2Output) SetStartAfter(v string) *ListObjectsV2Output {
+ s.StartAfter = &v
+ return s
+}
+
+type ListPartsInput struct {
+ _ struct{} `locationName:"ListPartsRequest" type:"structure"`
+
+ // Bucket is a required field
+ Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
+
+ // Key is a required field
+ Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"`
+
+ // Sets the maximum number of parts to return.
+ MaxParts *int64 `location:"querystring" locationName:"max-parts" type:"integer"`
+
+ // Specifies the part after which listing should begin. Only parts with higher
+ // part numbers will be listed.
+ PartNumberMarker *int64 `location:"querystring" locationName:"part-number-marker" type:"integer"`
+
+ // Confirms that the requester knows that she or he will be charged for the
+ // request. Bucket owners need not specify this parameter in their requests.
+ // Documentation on downloading objects from requester pays buckets can be found
+ // at http://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html
+ RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"`
+
+ // Upload ID identifying the multipart upload whose parts are being listed.
+ //
+ // UploadId is a required field
+ UploadId *string `location:"querystring" locationName:"uploadId" type:"string" required:"true"`
+}
+
+// String returns the string representation
+func (s ListPartsInput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s ListPartsInput) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *ListPartsInput) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "ListPartsInput"}
+ if s.Bucket == nil {
+ invalidParams.Add(request.NewErrParamRequired("Bucket"))
+ }
+ if s.Bucket != nil && len(*s.Bucket) < 1 {
+ invalidParams.Add(request.NewErrParamMinLen("Bucket", 1))
+ }
+ if s.Key == nil {
+ invalidParams.Add(request.NewErrParamRequired("Key"))
+ }
+ if s.Key != nil && len(*s.Key) < 1 {
+ invalidParams.Add(request.NewErrParamMinLen("Key", 1))
+ }
+ if s.UploadId == nil {
+ invalidParams.Add(request.NewErrParamRequired("UploadId"))
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetBucket sets the Bucket field's value.
+func (s *ListPartsInput) SetBucket(v string) *ListPartsInput {
+ s.Bucket = &v
+ return s
+}
+
+func (s *ListPartsInput) getBucket() (v string) {
+ if s.Bucket == nil {
+ return v
+ }
+ return *s.Bucket
+}
+
+// SetKey sets the Key field's value.
+func (s *ListPartsInput) SetKey(v string) *ListPartsInput {
+ s.Key = &v
+ return s
+}
+
+// SetMaxParts sets the MaxParts field's value.
+func (s *ListPartsInput) SetMaxParts(v int64) *ListPartsInput {
+ s.MaxParts = &v
+ return s
+}
+
+// SetPartNumberMarker sets the PartNumberMarker field's value.
+func (s *ListPartsInput) SetPartNumberMarker(v int64) *ListPartsInput {
+ s.PartNumberMarker = &v
+ return s
+}
+
+// SetRequestPayer sets the RequestPayer field's value.
+func (s *ListPartsInput) SetRequestPayer(v string) *ListPartsInput {
+ s.RequestPayer = &v
+ return s
+}
+
+// SetUploadId sets the UploadId field's value.
+func (s *ListPartsInput) SetUploadId(v string) *ListPartsInput {
+ s.UploadId = &v
+ return s
+}
+
+type ListPartsOutput struct {
+ _ struct{} `type:"structure"`
+
+ // Date when multipart upload will become eligible for abort operation by lifecycle.
+ AbortDate *time.Time `location:"header" locationName:"x-amz-abort-date" type:"timestamp"`
+
+ // Id of the lifecycle rule that makes a multipart upload eligible for abort
+ // operation.
+ AbortRuleId *string `location:"header" locationName:"x-amz-abort-rule-id" type:"string"`
+
+ // Name of the bucket to which the multipart upload was initiated.
+ Bucket *string `type:"string"`
+
+ // Identifies who initiated the multipart upload.
+ Initiator *Initiator `type:"structure"`
+
+ // Indicates whether the returned list of parts is truncated.
+ IsTruncated *bool `type:"boolean"`
+
+ // Object key for which the multipart upload was initiated.
+ Key *string `min:"1" type:"string"`
+
+ // Maximum number of parts that were allowed in the response.
+ MaxParts *int64 `type:"integer"`
+
+ // When a list is truncated, this element specifies the last part in the list,
+ // as well as the value to use for the part-number-marker request parameter
+ // in a subsequent request.
+ NextPartNumberMarker *int64 `type:"integer"`
+
+ Owner *Owner `type:"structure"`
+
+ // Part number after which listing begins.
+ PartNumberMarker *int64 `type:"integer"`
+
+ Parts []*Part `locationName:"Part" type:"list" flattened:"true"`
+
+ // If present, indicates that the requester was successfully charged for the
+ // request.
+ RequestCharged *string `location:"header" locationName:"x-amz-request-charged" type:"string" enum:"RequestCharged"`
+
+ // The class of storage used to store the object.
+ StorageClass *string `type:"string" enum:"StorageClass"`
+
+ // Upload ID identifying the multipart upload whose parts are being listed.
+ UploadId *string `type:"string"`
+}
+
+// String returns the string representation
+func (s ListPartsOutput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s ListPartsOutput) GoString() string {
+ return s.String()
+}
+
+// SetAbortDate sets the AbortDate field's value.
+func (s *ListPartsOutput) SetAbortDate(v time.Time) *ListPartsOutput {
+ s.AbortDate = &v
+ return s
+}
+
+// SetAbortRuleId sets the AbortRuleId field's value.
+func (s *ListPartsOutput) SetAbortRuleId(v string) *ListPartsOutput {
+ s.AbortRuleId = &v
+ return s
+}
+
+// SetBucket sets the Bucket field's value.
+func (s *ListPartsOutput) SetBucket(v string) *ListPartsOutput {
+ s.Bucket = &v
+ return s
+}
+
+func (s *ListPartsOutput) getBucket() (v string) {
+ if s.Bucket == nil {
+ return v
+ }
+ return *s.Bucket
+}
+
+// SetInitiator sets the Initiator field's value.
+func (s *ListPartsOutput) SetInitiator(v *Initiator) *ListPartsOutput {
+ s.Initiator = v
+ return s
+}
+
+// SetIsTruncated sets the IsTruncated field's value.
+func (s *ListPartsOutput) SetIsTruncated(v bool) *ListPartsOutput {
+ s.IsTruncated = &v
+ return s
+}
+
+// SetKey sets the Key field's value.
+func (s *ListPartsOutput) SetKey(v string) *ListPartsOutput {
+ s.Key = &v
+ return s
+}
+
+// SetMaxParts sets the MaxParts field's value.
+func (s *ListPartsOutput) SetMaxParts(v int64) *ListPartsOutput {
+ s.MaxParts = &v
+ return s
+}
+
+// SetNextPartNumberMarker sets the NextPartNumberMarker field's value.
+func (s *ListPartsOutput) SetNextPartNumberMarker(v int64) *ListPartsOutput {
+ s.NextPartNumberMarker = &v
+ return s
+}
+
+// SetOwner sets the Owner field's value.
+func (s *ListPartsOutput) SetOwner(v *Owner) *ListPartsOutput {
+ s.Owner = v
+ return s
+}
+
+// SetPartNumberMarker sets the PartNumberMarker field's value.
+func (s *ListPartsOutput) SetPartNumberMarker(v int64) *ListPartsOutput {
+ s.PartNumberMarker = &v
+ return s
+}
+
+// SetParts sets the Parts field's value.
+func (s *ListPartsOutput) SetParts(v []*Part) *ListPartsOutput {
+ s.Parts = v
+ return s
+}
+
+// SetRequestCharged sets the RequestCharged field's value.
+func (s *ListPartsOutput) SetRequestCharged(v string) *ListPartsOutput {
+ s.RequestCharged = &v
+ return s
+}
+
+// SetStorageClass sets the StorageClass field's value.
+func (s *ListPartsOutput) SetStorageClass(v string) *ListPartsOutput {
+ s.StorageClass = &v
+ return s
+}
+
+// SetUploadId sets the UploadId field's value.
+func (s *ListPartsOutput) SetUploadId(v string) *ListPartsOutput {
+ s.UploadId = &v
+ return s
+}
+
+// Describes an S3 location that will receive the results of the restore request.
+type Location struct {
+ _ struct{} `type:"structure"`
+
+ // A list of grants that control access to the staged results.
+ AccessControlList []*Grant `locationNameList:"Grant" type:"list"`
+
+ // The name of the bucket where the restore results will be placed.
+ //
+ // BucketName is a required field
+ BucketName *string `type:"string" required:"true"`
+
+ // The canned ACL to apply to the restore results.
+ CannedACL *string `type:"string" enum:"ObjectCannedACL"`
+
+ // Describes the server-side encryption that will be applied to the restore
+ // results.
+ Encryption *Encryption `type:"structure"`
+
+ // The prefix that is prepended to the restore results for this request.
+ //
+ // Prefix is a required field
+ Prefix *string `type:"string" required:"true"`
+
+ // The class of storage used to store the restore results.
+ StorageClass *string `type:"string" enum:"StorageClass"`
+
+ // The tag-set that is applied to the restore results.
+ Tagging *Tagging `type:"structure"`
+
+ // A list of metadata to store with the restore results in S3.
+ UserMetadata []*MetadataEntry `locationNameList:"MetadataEntry" type:"list"`
+}
+
+// String returns the string representation
+func (s Location) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s Location) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *Location) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "Location"}
+ if s.BucketName == nil {
+ invalidParams.Add(request.NewErrParamRequired("BucketName"))
+ }
+ if s.Prefix == nil {
+ invalidParams.Add(request.NewErrParamRequired("Prefix"))
+ }
+ if s.AccessControlList != nil {
+ for i, v := range s.AccessControlList {
+ if v == nil {
+ continue
+ }
+ if err := v.Validate(); err != nil {
+ invalidParams.AddNested(fmt.Sprintf("%s[%v]", "AccessControlList", i), err.(request.ErrInvalidParams))
+ }
+ }
+ }
+ if s.Encryption != nil {
+ if err := s.Encryption.Validate(); err != nil {
+ invalidParams.AddNested("Encryption", err.(request.ErrInvalidParams))
+ }
+ }
+ if s.Tagging != nil {
+ if err := s.Tagging.Validate(); err != nil {
+ invalidParams.AddNested("Tagging", err.(request.ErrInvalidParams))
+ }
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetAccessControlList sets the AccessControlList field's value.
+func (s *Location) SetAccessControlList(v []*Grant) *Location {
+ s.AccessControlList = v
+ return s
+}
+
+// SetBucketName sets the BucketName field's value.
+func (s *Location) SetBucketName(v string) *Location {
+ s.BucketName = &v
+ return s
+}
+
+// SetCannedACL sets the CannedACL field's value.
+func (s *Location) SetCannedACL(v string) *Location {
+ s.CannedACL = &v
+ return s
+}
+
+// SetEncryption sets the Encryption field's value.
+func (s *Location) SetEncryption(v *Encryption) *Location {
+ s.Encryption = v
+ return s
+}
+
+// SetPrefix sets the Prefix field's value.
+func (s *Location) SetPrefix(v string) *Location {
+ s.Prefix = &v
+ return s
+}
+
+// SetStorageClass sets the StorageClass field's value.
+func (s *Location) SetStorageClass(v string) *Location {
+ s.StorageClass = &v
+ return s
+}
+
+// SetTagging sets the Tagging field's value.
+func (s *Location) SetTagging(v *Tagging) *Location {
+ s.Tagging = v
+ return s
+}
+
+// SetUserMetadata sets the UserMetadata field's value.
+func (s *Location) SetUserMetadata(v []*MetadataEntry) *Location {
+ s.UserMetadata = v
+ return s
+}
+
+// Describes where logs are stored and the prefix that Amazon S3 assigns to
+// all log object keys for a bucket. For more information, see PUT Bucket logging
+// (https://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketPUTlogging.html)
+// in the Amazon Simple Storage Service API Reference.
+type LoggingEnabled struct {
+ _ struct{} `type:"structure"`
+
+ // Specifies the bucket where you want Amazon S3 to store server access logs.
+ // You can have your logs delivered to any bucket that you own, including the
+ // same bucket that is being logged. You can also configure multiple buckets
+ // to deliver their logs to the same target bucket. In this case you should
+ // choose a different TargetPrefix for each source bucket so that the delivered
+ // log files can be distinguished by key.
+ //
+ // TargetBucket is a required field
+ TargetBucket *string `type:"string" required:"true"`
+
+ TargetGrants []*TargetGrant `locationNameList:"Grant" type:"list"`
+
+ // A prefix for all log object keys. If you store log files from multiple Amazon
+ // S3 buckets in a single bucket, you can use a prefix to distinguish which
+ // log files came from which bucket.
+ //
+ // TargetPrefix is a required field
+ TargetPrefix *string `type:"string" required:"true"`
+}
+
+// String returns the string representation
+func (s LoggingEnabled) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s LoggingEnabled) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *LoggingEnabled) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "LoggingEnabled"}
+ if s.TargetBucket == nil {
+ invalidParams.Add(request.NewErrParamRequired("TargetBucket"))
+ }
+ if s.TargetPrefix == nil {
+ invalidParams.Add(request.NewErrParamRequired("TargetPrefix"))
+ }
+ if s.TargetGrants != nil {
+ for i, v := range s.TargetGrants {
+ if v == nil {
+ continue
+ }
+ if err := v.Validate(); err != nil {
+ invalidParams.AddNested(fmt.Sprintf("%s[%v]", "TargetGrants", i), err.(request.ErrInvalidParams))
+ }
+ }
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetTargetBucket sets the TargetBucket field's value.
+func (s *LoggingEnabled) SetTargetBucket(v string) *LoggingEnabled {
+ s.TargetBucket = &v
+ return s
+}
+
+// SetTargetGrants sets the TargetGrants field's value.
+func (s *LoggingEnabled) SetTargetGrants(v []*TargetGrant) *LoggingEnabled {
+ s.TargetGrants = v
+ return s
+}
+
+// SetTargetPrefix sets the TargetPrefix field's value.
+func (s *LoggingEnabled) SetTargetPrefix(v string) *LoggingEnabled {
+ s.TargetPrefix = &v
+ return s
+}
+
+// A metadata key-value pair to store with an object.
+type MetadataEntry struct {
+ _ struct{} `type:"structure"`
+
+ Name *string `type:"string"`
+
+ Value *string `type:"string"`
+}
+
+// String returns the string representation
+func (s MetadataEntry) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s MetadataEntry) GoString() string {
+ return s.String()
+}
+
+// SetName sets the Name field's value.
+func (s *MetadataEntry) SetName(v string) *MetadataEntry {
+ s.Name = &v
+ return s
+}
+
+// SetValue sets the Value field's value.
+func (s *MetadataEntry) SetValue(v string) *MetadataEntry {
+ s.Value = &v
+ return s
+}
+
+type MetricsAndOperator struct {
+ _ struct{} `type:"structure"`
+
+ // The prefix used when evaluating an AND predicate.
+ Prefix *string `type:"string"`
+
+ // The list of tags used when evaluating an AND predicate.
+ Tags []*Tag `locationName:"Tag" locationNameList:"Tag" type:"list" flattened:"true"`
+}
+
+// String returns the string representation
+func (s MetricsAndOperator) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s MetricsAndOperator) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *MetricsAndOperator) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "MetricsAndOperator"}
+ if s.Tags != nil {
+ for i, v := range s.Tags {
+ if v == nil {
+ continue
+ }
+ if err := v.Validate(); err != nil {
+ invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Tags", i), err.(request.ErrInvalidParams))
+ }
+ }
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetPrefix sets the Prefix field's value.
+func (s *MetricsAndOperator) SetPrefix(v string) *MetricsAndOperator {
+ s.Prefix = &v
+ return s
+}
+
+// SetTags sets the Tags field's value.
+func (s *MetricsAndOperator) SetTags(v []*Tag) *MetricsAndOperator {
+ s.Tags = v
+ return s
+}
+
+// Specifies a metrics configuration for the CloudWatch request metrics (specified
+// by the metrics configuration ID) from an Amazon S3 bucket. If you're updating
+// an existing metrics configuration, note that this is a full replacement of
+// the existing metrics configuration. If you don't include the elements you
+// want to keep, they are erased. For more information, see PUT Bucket metrics
+// (https://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketPUTMetricConfiguration.html)
+// in the Amazon Simple Storage Service API Reference.
+type MetricsConfiguration struct {
+ _ struct{} `type:"structure"`
+
+ // Specifies a metrics configuration filter. The metrics configuration will
+ // only include objects that meet the filter's criteria. A filter must be a
+ // prefix, a tag, or a conjunction (MetricsAndOperator).
+ Filter *MetricsFilter `type:"structure"`
+
+ // The ID used to identify the metrics configuration.
+ //
+ // Id is a required field
+ Id *string `type:"string" required:"true"`
+}
+
+// String returns the string representation
+func (s MetricsConfiguration) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s MetricsConfiguration) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *MetricsConfiguration) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "MetricsConfiguration"}
+ if s.Id == nil {
+ invalidParams.Add(request.NewErrParamRequired("Id"))
+ }
+ if s.Filter != nil {
+ if err := s.Filter.Validate(); err != nil {
+ invalidParams.AddNested("Filter", err.(request.ErrInvalidParams))
+ }
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetFilter sets the Filter field's value.
+func (s *MetricsConfiguration) SetFilter(v *MetricsFilter) *MetricsConfiguration {
+ s.Filter = v
+ return s
+}
+
+// SetId sets the Id field's value.
+func (s *MetricsConfiguration) SetId(v string) *MetricsConfiguration {
+ s.Id = &v
+ return s
+}
+
+type MetricsFilter struct {
+ _ struct{} `type:"structure"`
+
+ // A conjunction (logical AND) of predicates, which is used in evaluating a
+ // metrics filter. The operator must have at least two predicates, and an object
+ // must match all of the predicates in order for the filter to apply.
+ And *MetricsAndOperator `type:"structure"`
+
+ // The prefix used when evaluating a metrics filter.
+ Prefix *string `type:"string"`
+
+ // The tag used when evaluating a metrics filter.
+ Tag *Tag `type:"structure"`
+}
+
+// String returns the string representation
+func (s MetricsFilter) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s MetricsFilter) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *MetricsFilter) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "MetricsFilter"}
+ if s.And != nil {
+ if err := s.And.Validate(); err != nil {
+ invalidParams.AddNested("And", err.(request.ErrInvalidParams))
+ }
+ }
+ if s.Tag != nil {
+ if err := s.Tag.Validate(); err != nil {
+ invalidParams.AddNested("Tag", err.(request.ErrInvalidParams))
+ }
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetAnd sets the And field's value.
+func (s *MetricsFilter) SetAnd(v *MetricsAndOperator) *MetricsFilter {
+ s.And = v
+ return s
+}
+
+// SetPrefix sets the Prefix field's value.
+func (s *MetricsFilter) SetPrefix(v string) *MetricsFilter {
+ s.Prefix = &v
+ return s
+}
+
+// SetTag sets the Tag field's value.
+func (s *MetricsFilter) SetTag(v *Tag) *MetricsFilter {
+ s.Tag = v
+ return s
+}
+
+type MultipartUpload struct {
+ _ struct{} `type:"structure"`
+
+ // Date and time at which the multipart upload was initiated.
+ Initiated *time.Time `type:"timestamp"`
+
+ // Identifies who initiated the multipart upload.
+ Initiator *Initiator `type:"structure"`
+
+ // Key of the object for which the multipart upload was initiated.
+ Key *string `min:"1" type:"string"`
+
+ Owner *Owner `type:"structure"`
+
+ // The class of storage used to store the object.
+ StorageClass *string `type:"string" enum:"StorageClass"`
+
+ // Upload ID that identifies the multipart upload.
+ UploadId *string `type:"string"`
+}
+
+// String returns the string representation
+func (s MultipartUpload) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s MultipartUpload) GoString() string {
+ return s.String()
+}
+
+// SetInitiated sets the Initiated field's value.
+func (s *MultipartUpload) SetInitiated(v time.Time) *MultipartUpload {
+ s.Initiated = &v
+ return s
+}
+
+// SetInitiator sets the Initiator field's value.
+func (s *MultipartUpload) SetInitiator(v *Initiator) *MultipartUpload {
+ s.Initiator = v
+ return s
+}
+
+// SetKey sets the Key field's value.
+func (s *MultipartUpload) SetKey(v string) *MultipartUpload {
+ s.Key = &v
+ return s
+}
+
+// SetOwner sets the Owner field's value.
+func (s *MultipartUpload) SetOwner(v *Owner) *MultipartUpload {
+ s.Owner = v
+ return s
+}
+
+// SetStorageClass sets the StorageClass field's value.
+func (s *MultipartUpload) SetStorageClass(v string) *MultipartUpload {
+ s.StorageClass = &v
+ return s
+}
+
+// SetUploadId sets the UploadId field's value.
+func (s *MultipartUpload) SetUploadId(v string) *MultipartUpload {
+ s.UploadId = &v
+ return s
+}
+
+// Specifies when noncurrent object versions expire. Upon expiration, Amazon
+// S3 permanently deletes the noncurrent object versions. You set this lifecycle
+// configuration action on a bucket that has versioning enabled (or suspended)
+// to request that Amazon S3 delete noncurrent object versions at a specific
+// period in the object's lifetime.
+type NoncurrentVersionExpiration struct {
+ _ struct{} `type:"structure"`
+
+ // Specifies the number of days an object is noncurrent before Amazon S3 can
+ // perform the associated action. For information about the noncurrent days
+ // calculations, see How Amazon S3 Calculates When an Object Became Noncurrent
+ // (https://docs.aws.amazon.com/AmazonS3/latest/dev/intro-lifecycle-rules.html#non-current-days-calculations)
+ // in the Amazon Simple Storage Service Developer Guide.
+ NoncurrentDays *int64 `type:"integer"`
+}
+
+// String returns the string representation
+func (s NoncurrentVersionExpiration) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s NoncurrentVersionExpiration) GoString() string {
+ return s.String()
+}
+
+// SetNoncurrentDays sets the NoncurrentDays field's value.
+func (s *NoncurrentVersionExpiration) SetNoncurrentDays(v int64) *NoncurrentVersionExpiration {
+ s.NoncurrentDays = &v
+ return s
+}
+
+// Container for the transition rule that describes when noncurrent objects
+// transition to the STANDARD_IA, ONEZONE_IA, INTELLIGENT_TIERING, GLACIER,
+// or DEEP_ARCHIVE storage class. If your bucket is versioning-enabled (or versioning
+// is suspended), you can set this action to request that Amazon S3 transition
+// noncurrent object versions to the STANDARD_IA, ONEZONE_IA, INTELLIGENT_TIERING,
+// GLACIER, or DEEP_ARCHIVE storage class at a specific period in the object's
+// lifetime.
+type NoncurrentVersionTransition struct {
+ _ struct{} `type:"structure"`
+
+ // Specifies the number of days an object is noncurrent before Amazon S3 can
+ // perform the associated action. For information about the noncurrent days
+ // calculations, see How Amazon S3 Calculates When an Object Became Noncurrent
+ // (https://docs.aws.amazon.com/AmazonS3/latest/dev/s3-access-control.html)
+ // in the Amazon Simple Storage Service Developer Guide.
+ NoncurrentDays *int64 `type:"integer"`
+
+ // The class of storage used to store the object.
+ StorageClass *string `type:"string" enum:"TransitionStorageClass"`
+}
+
+// String returns the string representation
+func (s NoncurrentVersionTransition) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s NoncurrentVersionTransition) GoString() string {
+ return s.String()
+}
+
+// SetNoncurrentDays sets the NoncurrentDays field's value.
+func (s *NoncurrentVersionTransition) SetNoncurrentDays(v int64) *NoncurrentVersionTransition {
+ s.NoncurrentDays = &v
+ return s
+}
+
+// SetStorageClass sets the StorageClass field's value.
+func (s *NoncurrentVersionTransition) SetStorageClass(v string) *NoncurrentVersionTransition {
+ s.StorageClass = &v
+ return s
+}
+
+// A container for specifying the notification configuration of the bucket.
+// If this element is empty, notifications are turned off for the bucket.
+type NotificationConfiguration struct {
+ _ struct{} `type:"structure"`
+
+ // Describes the AWS Lambda functions to invoke and the events for which to
+ // invoke them.
+ LambdaFunctionConfigurations []*LambdaFunctionConfiguration `locationName:"CloudFunctionConfiguration" type:"list" flattened:"true"`
+
+ // The Amazon Simple Queue Service queues to publish messages to and the events
+ // for which to publish messages.
+ QueueConfigurations []*QueueConfiguration `locationName:"QueueConfiguration" type:"list" flattened:"true"`
+
+ // The topic to which notifications are sent and the events for which notifications
+ // are generated.
+ TopicConfigurations []*TopicConfiguration `locationName:"TopicConfiguration" type:"list" flattened:"true"`
+}
+
+// String returns the string representation
+func (s NotificationConfiguration) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s NotificationConfiguration) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *NotificationConfiguration) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "NotificationConfiguration"}
+ if s.LambdaFunctionConfigurations != nil {
+ for i, v := range s.LambdaFunctionConfigurations {
+ if v == nil {
+ continue
+ }
+ if err := v.Validate(); err != nil {
+ invalidParams.AddNested(fmt.Sprintf("%s[%v]", "LambdaFunctionConfigurations", i), err.(request.ErrInvalidParams))
+ }
+ }
+ }
+ if s.QueueConfigurations != nil {
+ for i, v := range s.QueueConfigurations {
+ if v == nil {
+ continue
+ }
+ if err := v.Validate(); err != nil {
+ invalidParams.AddNested(fmt.Sprintf("%s[%v]", "QueueConfigurations", i), err.(request.ErrInvalidParams))
+ }
+ }
+ }
+ if s.TopicConfigurations != nil {
+ for i, v := range s.TopicConfigurations {
+ if v == nil {
+ continue
+ }
+ if err := v.Validate(); err != nil {
+ invalidParams.AddNested(fmt.Sprintf("%s[%v]", "TopicConfigurations", i), err.(request.ErrInvalidParams))
+ }
+ }
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetLambdaFunctionConfigurations sets the LambdaFunctionConfigurations field's value.
+func (s *NotificationConfiguration) SetLambdaFunctionConfigurations(v []*LambdaFunctionConfiguration) *NotificationConfiguration {
+ s.LambdaFunctionConfigurations = v
+ return s
+}
+
+// SetQueueConfigurations sets the QueueConfigurations field's value.
+func (s *NotificationConfiguration) SetQueueConfigurations(v []*QueueConfiguration) *NotificationConfiguration {
+ s.QueueConfigurations = v
+ return s
+}
+
+// SetTopicConfigurations sets the TopicConfigurations field's value.
+func (s *NotificationConfiguration) SetTopicConfigurations(v []*TopicConfiguration) *NotificationConfiguration {
+ s.TopicConfigurations = v
+ return s
+}
+
+type NotificationConfigurationDeprecated struct {
+ _ struct{} `type:"structure"`
+
+ CloudFunctionConfiguration *CloudFunctionConfiguration `type:"structure"`
+
+ QueueConfiguration *QueueConfigurationDeprecated `type:"structure"`
+
+ TopicConfiguration *TopicConfigurationDeprecated `type:"structure"`
+}
+
+// String returns the string representation
+func (s NotificationConfigurationDeprecated) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s NotificationConfigurationDeprecated) GoString() string {
+ return s.String()
+}
+
+// SetCloudFunctionConfiguration sets the CloudFunctionConfiguration field's value.
+func (s *NotificationConfigurationDeprecated) SetCloudFunctionConfiguration(v *CloudFunctionConfiguration) *NotificationConfigurationDeprecated {
+ s.CloudFunctionConfiguration = v
+ return s
+}
+
+// SetQueueConfiguration sets the QueueConfiguration field's value.
+func (s *NotificationConfigurationDeprecated) SetQueueConfiguration(v *QueueConfigurationDeprecated) *NotificationConfigurationDeprecated {
+ s.QueueConfiguration = v
+ return s
+}
+
+// SetTopicConfiguration sets the TopicConfiguration field's value.
+func (s *NotificationConfigurationDeprecated) SetTopicConfiguration(v *TopicConfigurationDeprecated) *NotificationConfigurationDeprecated {
+ s.TopicConfiguration = v
+ return s
+}
+
+// Specifies object key name filtering rules. For information about key name
+// filtering, see Configuring Event Notifications (https://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html)
+// in the Amazon Simple Storage Service Developer Guide.
+type NotificationConfigurationFilter struct {
+ _ struct{} `type:"structure"`
+
+ // A container for object key name prefix and suffix filtering rules.
+ Key *KeyFilter `locationName:"S3Key" type:"structure"`
+}
+
+// String returns the string representation
+func (s NotificationConfigurationFilter) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s NotificationConfigurationFilter) GoString() string {
+ return s.String()
+}
+
+// SetKey sets the Key field's value.
+func (s *NotificationConfigurationFilter) SetKey(v *KeyFilter) *NotificationConfigurationFilter {
+ s.Key = v
+ return s
+}
+
+type Object struct {
+ _ struct{} `type:"structure"`
+
+ ETag *string `type:"string"`
+
+ Key *string `min:"1" type:"string"`
+
+ LastModified *time.Time `type:"timestamp"`
+
+ Owner *Owner `type:"structure"`
+
+ Size *int64 `type:"integer"`
+
+ // The class of storage used to store the object.
+ StorageClass *string `type:"string" enum:"ObjectStorageClass"`
+}
+
+// String returns the string representation
+func (s Object) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s Object) GoString() string {
+ return s.String()
+}
+
+// SetETag sets the ETag field's value.
+func (s *Object) SetETag(v string) *Object {
+ s.ETag = &v
+ return s
+}
+
+// SetKey sets the Key field's value.
+func (s *Object) SetKey(v string) *Object {
+ s.Key = &v
+ return s
+}
+
+// SetLastModified sets the LastModified field's value.
+func (s *Object) SetLastModified(v time.Time) *Object {
+ s.LastModified = &v
+ return s
+}
+
+// SetOwner sets the Owner field's value.
+func (s *Object) SetOwner(v *Owner) *Object {
+ s.Owner = v
+ return s
+}
+
+// SetSize sets the Size field's value.
+func (s *Object) SetSize(v int64) *Object {
+ s.Size = &v
+ return s
+}
+
+// SetStorageClass sets the StorageClass field's value.
+func (s *Object) SetStorageClass(v string) *Object {
+ s.StorageClass = &v
+ return s
+}
+
+type ObjectIdentifier struct {
+ _ struct{} `type:"structure"`
+
+ // Key name of the object to delete.
+ //
+ // Key is a required field
+ Key *string `min:"1" type:"string" required:"true"`
+
+ // VersionId for the specific version of the object to delete.
+ VersionId *string `type:"string"`
+}
+
+// String returns the string representation
+func (s ObjectIdentifier) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s ObjectIdentifier) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *ObjectIdentifier) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "ObjectIdentifier"}
+ if s.Key == nil {
+ invalidParams.Add(request.NewErrParamRequired("Key"))
+ }
+ if s.Key != nil && len(*s.Key) < 1 {
+ invalidParams.Add(request.NewErrParamMinLen("Key", 1))
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetKey sets the Key field's value.
+func (s *ObjectIdentifier) SetKey(v string) *ObjectIdentifier {
+ s.Key = &v
+ return s
+}
+
+// SetVersionId sets the VersionId field's value.
+func (s *ObjectIdentifier) SetVersionId(v string) *ObjectIdentifier {
+ s.VersionId = &v
+ return s
+}
+
+// The container element for object lock configuration parameters.
+type ObjectLockConfiguration struct {
+ _ struct{} `type:"structure"`
+
+ // Indicates whether this bucket has an object lock configuration enabled.
+ ObjectLockEnabled *string `type:"string" enum:"ObjectLockEnabled"`
+
+ // The object lock rule in place for the specified object.
+ Rule *ObjectLockRule `type:"structure"`
+}
+
+// String returns the string representation
+func (s ObjectLockConfiguration) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s ObjectLockConfiguration) GoString() string {
+ return s.String()
+}
+
+// SetObjectLockEnabled sets the ObjectLockEnabled field's value.
+func (s *ObjectLockConfiguration) SetObjectLockEnabled(v string) *ObjectLockConfiguration {
+ s.ObjectLockEnabled = &v
+ return s
+}
+
+// SetRule sets the Rule field's value.
+func (s *ObjectLockConfiguration) SetRule(v *ObjectLockRule) *ObjectLockConfiguration {
+ s.Rule = v
+ return s
+}
+
+// A Legal Hold configuration for an object.
+type ObjectLockLegalHold struct {
+ _ struct{} `type:"structure"`
+
+ // Indicates whether the specified object has a Legal Hold in place.
+ Status *string `type:"string" enum:"ObjectLockLegalHoldStatus"`
+}
+
+// String returns the string representation
+func (s ObjectLockLegalHold) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s ObjectLockLegalHold) GoString() string {
+ return s.String()
+}
+
+// SetStatus sets the Status field's value.
+func (s *ObjectLockLegalHold) SetStatus(v string) *ObjectLockLegalHold {
+ s.Status = &v
+ return s
+}
+
+// A Retention configuration for an object.
+type ObjectLockRetention struct {
+ _ struct{} `type:"structure"`
+
+ // Indicates the Retention mode for the specified object.
+ Mode *string `type:"string" enum:"ObjectLockRetentionMode"`
+
+ // The date on which this object lock retention expires.
+ RetainUntilDate *time.Time `type:"timestamp" timestampFormat:"iso8601"`
+}
+
+// String returns the string representation
+func (s ObjectLockRetention) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s ObjectLockRetention) GoString() string {
+ return s.String()
+}
+
+// SetMode sets the Mode field's value.
+func (s *ObjectLockRetention) SetMode(v string) *ObjectLockRetention {
+ s.Mode = &v
+ return s
+}
+
+// SetRetainUntilDate sets the RetainUntilDate field's value.
+func (s *ObjectLockRetention) SetRetainUntilDate(v time.Time) *ObjectLockRetention {
+ s.RetainUntilDate = &v
+ return s
+}
+
+// The container element for an object lock rule.
+type ObjectLockRule struct {
+ _ struct{} `type:"structure"`
+
+ // The default retention period that you want to apply to new objects placed
+ // in the specified bucket.
+ DefaultRetention *DefaultRetention `type:"structure"`
+}
+
+// String returns the string representation
+func (s ObjectLockRule) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s ObjectLockRule) GoString() string {
+ return s.String()
+}
+
+// SetDefaultRetention sets the DefaultRetention field's value.
+func (s *ObjectLockRule) SetDefaultRetention(v *DefaultRetention) *ObjectLockRule {
+ s.DefaultRetention = v
+ return s
+}
+
+type ObjectVersion struct {
+ _ struct{} `type:"structure"`
+
+ ETag *string `type:"string"`
+
+ // Specifies whether the object is (true) or is not (false) the latest version
+ // of an object.
+ IsLatest *bool `type:"boolean"`
+
+ // The object key.
+ Key *string `min:"1" type:"string"`
+
+ // Date and time the object was last modified.
+ LastModified *time.Time `type:"timestamp"`
+
+ Owner *Owner `type:"structure"`
+
+ // Size in bytes of the object.
+ Size *int64 `type:"integer"`
+
+ // The class of storage used to store the object.
+ StorageClass *string `type:"string" enum:"ObjectVersionStorageClass"`
+
+ // Version ID of an object.
+ VersionId *string `type:"string"`
+}
+
+// String returns the string representation
+func (s ObjectVersion) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s ObjectVersion) GoString() string {
+ return s.String()
+}
+
+// SetETag sets the ETag field's value.
+func (s *ObjectVersion) SetETag(v string) *ObjectVersion {
+ s.ETag = &v
+ return s
+}
+
+// SetIsLatest sets the IsLatest field's value.
+func (s *ObjectVersion) SetIsLatest(v bool) *ObjectVersion {
+ s.IsLatest = &v
+ return s
+}
+
+// SetKey sets the Key field's value.
+func (s *ObjectVersion) SetKey(v string) *ObjectVersion {
+ s.Key = &v
+ return s
+}
+
+// SetLastModified sets the LastModified field's value.
+func (s *ObjectVersion) SetLastModified(v time.Time) *ObjectVersion {
+ s.LastModified = &v
+ return s
+}
+
+// SetOwner sets the Owner field's value.
+func (s *ObjectVersion) SetOwner(v *Owner) *ObjectVersion {
+ s.Owner = v
+ return s
+}
+
+// SetSize sets the Size field's value.
+func (s *ObjectVersion) SetSize(v int64) *ObjectVersion {
+ s.Size = &v
+ return s
+}
+
+// SetStorageClass sets the StorageClass field's value.
+func (s *ObjectVersion) SetStorageClass(v string) *ObjectVersion {
+ s.StorageClass = &v
+ return s
+}
+
+// SetVersionId sets the VersionId field's value.
+func (s *ObjectVersion) SetVersionId(v string) *ObjectVersion {
+ s.VersionId = &v
+ return s
+}
+
+// Describes the location where the restore job's output is stored.
+type OutputLocation struct {
+ _ struct{} `type:"structure"`
+
+ // Describes an S3 location that will receive the results of the restore request.
+ S3 *Location `type:"structure"`
+}
+
+// String returns the string representation
+func (s OutputLocation) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s OutputLocation) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *OutputLocation) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "OutputLocation"}
+ if s.S3 != nil {
+ if err := s.S3.Validate(); err != nil {
+ invalidParams.AddNested("S3", err.(request.ErrInvalidParams))
+ }
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetS3 sets the S3 field's value.
+func (s *OutputLocation) SetS3(v *Location) *OutputLocation {
+ s.S3 = v
+ return s
+}
+
+// Describes how results of the Select job are serialized.
+type OutputSerialization struct {
+ _ struct{} `type:"structure"`
+
+ // Describes the serialization of CSV-encoded Select results.
+ CSV *CSVOutput `type:"structure"`
+
+ // Specifies JSON as request's output serialization format.
+ JSON *JSONOutput `type:"structure"`
+}
+
+// String returns the string representation
+func (s OutputSerialization) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s OutputSerialization) GoString() string {
+ return s.String()
+}
+
+// SetCSV sets the CSV field's value.
+func (s *OutputSerialization) SetCSV(v *CSVOutput) *OutputSerialization {
+ s.CSV = v
+ return s
+}
+
+// SetJSON sets the JSON field's value.
+func (s *OutputSerialization) SetJSON(v *JSONOutput) *OutputSerialization {
+ s.JSON = v
+ return s
+}
+
+type Owner struct {
+ _ struct{} `type:"structure"`
+
+ DisplayName *string `type:"string"`
+
+ ID *string `type:"string"`
+}
+
+// String returns the string representation
+func (s Owner) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s Owner) GoString() string {
+ return s.String()
+}
+
+// SetDisplayName sets the DisplayName field's value.
+func (s *Owner) SetDisplayName(v string) *Owner {
+ s.DisplayName = &v
+ return s
+}
+
+// SetID sets the ID field's value.
+func (s *Owner) SetID(v string) *Owner {
+ s.ID = &v
+ return s
+}
+
+type ParquetInput struct {
+ _ struct{} `type:"structure"`
+}
+
+// String returns the string representation
+func (s ParquetInput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s ParquetInput) GoString() string {
+ return s.String()
+}
+
+type Part struct {
+ _ struct{} `type:"structure"`
+
+ // Entity tag returned when the part was uploaded.
+ ETag *string `type:"string"`
+
+ // Date and time at which the part was uploaded.
+ LastModified *time.Time `type:"timestamp"`
+
+ // Part number identifying the part. This is a positive integer between 1 and
+ // 10,000.
+ PartNumber *int64 `type:"integer"`
+
+ // Size in bytes of the uploaded part data.
+ Size *int64 `type:"integer"`
+}
+
+// String returns the string representation
+func (s Part) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s Part) GoString() string {
+ return s.String()
+}
+
+// SetETag sets the ETag field's value.
+func (s *Part) SetETag(v string) *Part {
+ s.ETag = &v
+ return s
+}
+
+// SetLastModified sets the LastModified field's value.
+func (s *Part) SetLastModified(v time.Time) *Part {
+ s.LastModified = &v
+ return s
+}
+
+// SetPartNumber sets the PartNumber field's value.
+func (s *Part) SetPartNumber(v int64) *Part {
+ s.PartNumber = &v
+ return s
+}
+
+// SetSize sets the Size field's value.
+func (s *Part) SetSize(v int64) *Part {
+ s.Size = &v
+ return s
+}
+
+// The container element for a bucket's policy status.
+type PolicyStatus struct {
+ _ struct{} `type:"structure"`
+
+ // The policy status for this bucket. TRUE indicates that this bucket is public.
+ // FALSE indicates that the bucket is not public.
+ IsPublic *bool `locationName:"IsPublic" type:"boolean"`
+}
+
+// String returns the string representation
+func (s PolicyStatus) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s PolicyStatus) GoString() string {
+ return s.String()
+}
+
+// SetIsPublic sets the IsPublic field's value.
+func (s *PolicyStatus) SetIsPublic(v bool) *PolicyStatus {
+ s.IsPublic = &v
+ return s
+}
+
+type Progress struct {
+ _ struct{} `type:"structure"`
+
+ // The current number of uncompressed object bytes processed.
+ BytesProcessed *int64 `type:"long"`
+
+ // The current number of bytes of records payload data returned.
+ BytesReturned *int64 `type:"long"`
+
+ // The current number of object bytes scanned.
+ BytesScanned *int64 `type:"long"`
+}
+
+// String returns the string representation
+func (s Progress) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s Progress) GoString() string {
+ return s.String()
+}
+
+// SetBytesProcessed sets the BytesProcessed field's value.
+func (s *Progress) SetBytesProcessed(v int64) *Progress {
+ s.BytesProcessed = &v
+ return s
+}
+
+// SetBytesReturned sets the BytesReturned field's value.
+func (s *Progress) SetBytesReturned(v int64) *Progress {
+ s.BytesReturned = &v
+ return s
+}
+
+// SetBytesScanned sets the BytesScanned field's value.
+func (s *Progress) SetBytesScanned(v int64) *Progress {
+ s.BytesScanned = &v
+ return s
+}
+
+type ProgressEvent struct {
+ _ struct{} `locationName:"ProgressEvent" type:"structure" payload:"Details"`
+
+ // The Progress event details.
+ Details *Progress `locationName:"Details" type:"structure"`
+}
+
+// String returns the string representation
+func (s ProgressEvent) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s ProgressEvent) GoString() string {
+ return s.String()
+}
+
+// SetDetails sets the Details field's value.
+func (s *ProgressEvent) SetDetails(v *Progress) *ProgressEvent {
+ s.Details = v
+ return s
+}
+
+// The ProgressEvent is and event in the SelectObjectContentEventStream group of events.
+func (s *ProgressEvent) eventSelectObjectContentEventStream() {}
+
+// UnmarshalEvent unmarshals the EventStream Message into the ProgressEvent value.
+// This method is only used internally within the SDK's EventStream handling.
+func (s *ProgressEvent) UnmarshalEvent(
+ payloadUnmarshaler protocol.PayloadUnmarshaler,
+ msg eventstream.Message,
+) error {
+ if err := payloadUnmarshaler.UnmarshalPayload(
+ bytes.NewReader(msg.Payload), s,
+ ); err != nil {
+ return err
+ }
+ return nil
+}
+
+// Specifies the Block Public Access configuration for an Amazon S3 bucket.
+type PublicAccessBlockConfiguration struct {
+ _ struct{} `type:"structure"`
+
+ // Specifies whether Amazon S3 should block public access control lists (ACLs)
+ // for this bucket and objects in this bucket. Setting this element to TRUE
+ // causes the following behavior:
+ //
+ // * PUT Bucket acl and PUT Object acl calls fail if the specified ACL is
+ // public.
+ //
+ // * PUT Object calls fail if the request includes a public ACL.
+ //
+ // Enabling this setting doesn't affect existing policies or ACLs.
+ BlockPublicAcls *bool `locationName:"BlockPublicAcls" type:"boolean"`
+
+ // Specifies whether Amazon S3 should block public bucket policies for this
+ // bucket. Setting this element to TRUE causes Amazon S3 to reject calls to
+ // PUT Bucket policy if the specified bucket policy allows public access.
+ //
+ // Enabling this setting doesn't affect existing bucket policies.
+ BlockPublicPolicy *bool `locationName:"BlockPublicPolicy" type:"boolean"`
+
+ // Specifies whether Amazon S3 should ignore public ACLs for this bucket and
+ // objects in this bucket. Setting this element to TRUE causes Amazon S3 to
+ // ignore all public ACLs on this bucket and objects in this bucket.
+ //
+ // Enabling this setting doesn't affect the persistence of any existing ACLs
+ // and doesn't prevent new public ACLs from being set.
+ IgnorePublicAcls *bool `locationName:"IgnorePublicAcls" type:"boolean"`
+
+ // Specifies whether Amazon S3 should restrict public bucket policies for this
+ // bucket. Setting this element to TRUE restricts access to this bucket to only
+ // AWS services and authorized users within this account if the bucket has a
+ // public policy.
+ //
+ // Enabling this setting doesn't affect previously stored bucket policies, except
+ // that public and cross-account access within any public bucket policy, including
+ // non-public delegation to specific accounts, is blocked.
+ RestrictPublicBuckets *bool `locationName:"RestrictPublicBuckets" type:"boolean"`
+}
+
+// String returns the string representation
+func (s PublicAccessBlockConfiguration) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s PublicAccessBlockConfiguration) GoString() string {
+ return s.String()
+}
+
+// SetBlockPublicAcls sets the BlockPublicAcls field's value.
+func (s *PublicAccessBlockConfiguration) SetBlockPublicAcls(v bool) *PublicAccessBlockConfiguration {
+ s.BlockPublicAcls = &v
+ return s
+}
+
+// SetBlockPublicPolicy sets the BlockPublicPolicy field's value.
+func (s *PublicAccessBlockConfiguration) SetBlockPublicPolicy(v bool) *PublicAccessBlockConfiguration {
+ s.BlockPublicPolicy = &v
+ return s
+}
+
+// SetIgnorePublicAcls sets the IgnorePublicAcls field's value.
+func (s *PublicAccessBlockConfiguration) SetIgnorePublicAcls(v bool) *PublicAccessBlockConfiguration {
+ s.IgnorePublicAcls = &v
+ return s
+}
+
+// SetRestrictPublicBuckets sets the RestrictPublicBuckets field's value.
+func (s *PublicAccessBlockConfiguration) SetRestrictPublicBuckets(v bool) *PublicAccessBlockConfiguration {
+ s.RestrictPublicBuckets = &v
+ return s
+}
+
+type PutBucketAccelerateConfigurationInput struct {
+ _ struct{} `locationName:"PutBucketAccelerateConfigurationRequest" type:"structure" payload:"AccelerateConfiguration"`
+
+ // Specifies the Accelerate Configuration you want to set for the bucket.
+ //
+ // AccelerateConfiguration is a required field
+ AccelerateConfiguration *AccelerateConfiguration `locationName:"AccelerateConfiguration" type:"structure" required:"true" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"`
+
+ // Name of the bucket for which the accelerate configuration is set.
+ //
+ // Bucket is a required field
+ Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
+}
+
+// String returns the string representation
+func (s PutBucketAccelerateConfigurationInput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s PutBucketAccelerateConfigurationInput) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *PutBucketAccelerateConfigurationInput) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "PutBucketAccelerateConfigurationInput"}
+ if s.AccelerateConfiguration == nil {
+ invalidParams.Add(request.NewErrParamRequired("AccelerateConfiguration"))
+ }
+ if s.Bucket == nil {
+ invalidParams.Add(request.NewErrParamRequired("Bucket"))
+ }
+ if s.Bucket != nil && len(*s.Bucket) < 1 {
+ invalidParams.Add(request.NewErrParamMinLen("Bucket", 1))
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetAccelerateConfiguration sets the AccelerateConfiguration field's value.
+func (s *PutBucketAccelerateConfigurationInput) SetAccelerateConfiguration(v *AccelerateConfiguration) *PutBucketAccelerateConfigurationInput {
+ s.AccelerateConfiguration = v
+ return s
+}
+
+// SetBucket sets the Bucket field's value.
+func (s *PutBucketAccelerateConfigurationInput) SetBucket(v string) *PutBucketAccelerateConfigurationInput {
+ s.Bucket = &v
+ return s
+}
+
+func (s *PutBucketAccelerateConfigurationInput) getBucket() (v string) {
+ if s.Bucket == nil {
+ return v
+ }
+ return *s.Bucket
+}
+
+type PutBucketAccelerateConfigurationOutput struct {
+ _ struct{} `type:"structure"`
+}
+
+// String returns the string representation
+func (s PutBucketAccelerateConfigurationOutput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s PutBucketAccelerateConfigurationOutput) GoString() string {
+ return s.String()
+}
+
+type PutBucketAclInput struct {
+ _ struct{} `locationName:"PutBucketAclRequest" type:"structure" payload:"AccessControlPolicy"`
+
+ // The canned ACL to apply to the bucket.
+ ACL *string `location:"header" locationName:"x-amz-acl" type:"string" enum:"BucketCannedACL"`
+
+ // Contains the elements that set the ACL permissions for an object per grantee.
+ AccessControlPolicy *AccessControlPolicy `locationName:"AccessControlPolicy" type:"structure" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"`
+
+ // Bucket is a required field
+ Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
+
+ // Allows grantee the read, write, read ACP, and write ACP permissions on the
+ // bucket.
+ GrantFullControl *string `location:"header" locationName:"x-amz-grant-full-control" type:"string"`
+
+ // Allows grantee to list the objects in the bucket.
+ GrantRead *string `location:"header" locationName:"x-amz-grant-read" type:"string"`
+
+ // Allows grantee to read the bucket ACL.
+ GrantReadACP *string `location:"header" locationName:"x-amz-grant-read-acp" type:"string"`
+
+ // Allows grantee to create, overwrite, and delete any object in the bucket.
+ GrantWrite *string `location:"header" locationName:"x-amz-grant-write" type:"string"`
+
+ // Allows grantee to write the ACL for the applicable bucket.
+ GrantWriteACP *string `location:"header" locationName:"x-amz-grant-write-acp" type:"string"`
+}
+
+// String returns the string representation
+func (s PutBucketAclInput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s PutBucketAclInput) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *PutBucketAclInput) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "PutBucketAclInput"}
+ if s.Bucket == nil {
+ invalidParams.Add(request.NewErrParamRequired("Bucket"))
+ }
+ if s.Bucket != nil && len(*s.Bucket) < 1 {
+ invalidParams.Add(request.NewErrParamMinLen("Bucket", 1))
+ }
+ if s.AccessControlPolicy != nil {
+ if err := s.AccessControlPolicy.Validate(); err != nil {
+ invalidParams.AddNested("AccessControlPolicy", err.(request.ErrInvalidParams))
+ }
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetACL sets the ACL field's value.
+func (s *PutBucketAclInput) SetACL(v string) *PutBucketAclInput {
+ s.ACL = &v
+ return s
+}
+
+// SetAccessControlPolicy sets the AccessControlPolicy field's value.
+func (s *PutBucketAclInput) SetAccessControlPolicy(v *AccessControlPolicy) *PutBucketAclInput {
+ s.AccessControlPolicy = v
+ return s
+}
+
+// SetBucket sets the Bucket field's value.
+func (s *PutBucketAclInput) SetBucket(v string) *PutBucketAclInput {
+ s.Bucket = &v
+ return s
+}
+
+func (s *PutBucketAclInput) getBucket() (v string) {
+ if s.Bucket == nil {
+ return v
+ }
+ return *s.Bucket
+}
+
+// SetGrantFullControl sets the GrantFullControl field's value.
+func (s *PutBucketAclInput) SetGrantFullControl(v string) *PutBucketAclInput {
+ s.GrantFullControl = &v
+ return s
+}
+
+// SetGrantRead sets the GrantRead field's value.
+func (s *PutBucketAclInput) SetGrantRead(v string) *PutBucketAclInput {
+ s.GrantRead = &v
+ return s
+}
+
+// SetGrantReadACP sets the GrantReadACP field's value.
+func (s *PutBucketAclInput) SetGrantReadACP(v string) *PutBucketAclInput {
+ s.GrantReadACP = &v
+ return s
+}
+
+// SetGrantWrite sets the GrantWrite field's value.
+func (s *PutBucketAclInput) SetGrantWrite(v string) *PutBucketAclInput {
+ s.GrantWrite = &v
+ return s
+}
+
+// SetGrantWriteACP sets the GrantWriteACP field's value.
+func (s *PutBucketAclInput) SetGrantWriteACP(v string) *PutBucketAclInput {
+ s.GrantWriteACP = &v
+ return s
+}
+
+type PutBucketAclOutput struct {
+ _ struct{} `type:"structure"`
+}
+
+// String returns the string representation
+func (s PutBucketAclOutput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s PutBucketAclOutput) GoString() string {
+ return s.String()
+}
+
+type PutBucketAnalyticsConfigurationInput struct {
+ _ struct{} `locationName:"PutBucketAnalyticsConfigurationRequest" type:"structure" payload:"AnalyticsConfiguration"`
+
+ // The configuration and any analyses for the analytics filter.
+ //
+ // AnalyticsConfiguration is a required field
+ AnalyticsConfiguration *AnalyticsConfiguration `locationName:"AnalyticsConfiguration" type:"structure" required:"true" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"`
+
+ // The name of the bucket to which an analytics configuration is stored.
+ //
+ // Bucket is a required field
+ Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
+
+ // The ID that identifies the analytics configuration.
+ //
+ // Id is a required field
+ Id *string `location:"querystring" locationName:"id" type:"string" required:"true"`
+}
+
+// String returns the string representation
+func (s PutBucketAnalyticsConfigurationInput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s PutBucketAnalyticsConfigurationInput) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *PutBucketAnalyticsConfigurationInput) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "PutBucketAnalyticsConfigurationInput"}
+ if s.AnalyticsConfiguration == nil {
+ invalidParams.Add(request.NewErrParamRequired("AnalyticsConfiguration"))
+ }
+ if s.Bucket == nil {
+ invalidParams.Add(request.NewErrParamRequired("Bucket"))
+ }
+ if s.Bucket != nil && len(*s.Bucket) < 1 {
+ invalidParams.Add(request.NewErrParamMinLen("Bucket", 1))
+ }
+ if s.Id == nil {
+ invalidParams.Add(request.NewErrParamRequired("Id"))
+ }
+ if s.AnalyticsConfiguration != nil {
+ if err := s.AnalyticsConfiguration.Validate(); err != nil {
+ invalidParams.AddNested("AnalyticsConfiguration", err.(request.ErrInvalidParams))
+ }
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetAnalyticsConfiguration sets the AnalyticsConfiguration field's value.
+func (s *PutBucketAnalyticsConfigurationInput) SetAnalyticsConfiguration(v *AnalyticsConfiguration) *PutBucketAnalyticsConfigurationInput {
+ s.AnalyticsConfiguration = v
+ return s
+}
+
+// SetBucket sets the Bucket field's value.
+func (s *PutBucketAnalyticsConfigurationInput) SetBucket(v string) *PutBucketAnalyticsConfigurationInput {
+ s.Bucket = &v
+ return s
+}
+
+func (s *PutBucketAnalyticsConfigurationInput) getBucket() (v string) {
+ if s.Bucket == nil {
+ return v
+ }
+ return *s.Bucket
+}
+
+// SetId sets the Id field's value.
+func (s *PutBucketAnalyticsConfigurationInput) SetId(v string) *PutBucketAnalyticsConfigurationInput {
+ s.Id = &v
+ return s
+}
+
+type PutBucketAnalyticsConfigurationOutput struct {
+ _ struct{} `type:"structure"`
+}
+
+// String returns the string representation
+func (s PutBucketAnalyticsConfigurationOutput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s PutBucketAnalyticsConfigurationOutput) GoString() string {
+ return s.String()
+}
+
+type PutBucketCorsInput struct {
+ _ struct{} `locationName:"PutBucketCorsRequest" type:"structure" payload:"CORSConfiguration"`
+
+ // Bucket is a required field
+ Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
+
+ // Describes the cross-origin access configuration for objects in an Amazon
+ // S3 bucket. For more information, see Enabling Cross-Origin Resource Sharing
+ // (https://docs.aws.amazon.com/AmazonS3/latest/dev/cors.html) in the Amazon
+ // Simple Storage Service Developer Guide.
+ //
+ // CORSConfiguration is a required field
+ CORSConfiguration *CORSConfiguration `locationName:"CORSConfiguration" type:"structure" required:"true" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"`
+}
+
+// String returns the string representation
+func (s PutBucketCorsInput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s PutBucketCorsInput) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *PutBucketCorsInput) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "PutBucketCorsInput"}
+ if s.Bucket == nil {
+ invalidParams.Add(request.NewErrParamRequired("Bucket"))
+ }
+ if s.Bucket != nil && len(*s.Bucket) < 1 {
+ invalidParams.Add(request.NewErrParamMinLen("Bucket", 1))
+ }
+ if s.CORSConfiguration == nil {
+ invalidParams.Add(request.NewErrParamRequired("CORSConfiguration"))
+ }
+ if s.CORSConfiguration != nil {
+ if err := s.CORSConfiguration.Validate(); err != nil {
+ invalidParams.AddNested("CORSConfiguration", err.(request.ErrInvalidParams))
+ }
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetBucket sets the Bucket field's value.
+func (s *PutBucketCorsInput) SetBucket(v string) *PutBucketCorsInput {
+ s.Bucket = &v
+ return s
+}
+
+func (s *PutBucketCorsInput) getBucket() (v string) {
+ if s.Bucket == nil {
+ return v
+ }
+ return *s.Bucket
+}
+
+// SetCORSConfiguration sets the CORSConfiguration field's value.
+func (s *PutBucketCorsInput) SetCORSConfiguration(v *CORSConfiguration) *PutBucketCorsInput {
+ s.CORSConfiguration = v
+ return s
+}
+
+type PutBucketCorsOutput struct {
+ _ struct{} `type:"structure"`
+}
+
+// String returns the string representation
+func (s PutBucketCorsOutput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s PutBucketCorsOutput) GoString() string {
+ return s.String()
+}
+
+type PutBucketEncryptionInput struct {
+ _ struct{} `locationName:"PutBucketEncryptionRequest" type:"structure" payload:"ServerSideEncryptionConfiguration"`
+
+ // Specifies default encryption for a bucket using server-side encryption with
+ // Amazon S3-managed keys (SSE-S3) or AWS KMS-managed keys (SSE-KMS). For information
+ // about the Amazon S3 default encryption feature, see Amazon S3 Default Bucket
+ // Encryption (https://docs.aws.amazon.com/AmazonS3/latest/dev/bucket-encryption.html)
+ // in the Amazon Simple Storage Service Developer Guide.
+ //
+ // Bucket is a required field
+ Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
+
+ // Specifies the default server-side-encryption configuration.
+ //
+ // ServerSideEncryptionConfiguration is a required field
+ ServerSideEncryptionConfiguration *ServerSideEncryptionConfiguration `locationName:"ServerSideEncryptionConfiguration" type:"structure" required:"true" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"`
+}
+
+// String returns the string representation
+func (s PutBucketEncryptionInput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s PutBucketEncryptionInput) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *PutBucketEncryptionInput) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "PutBucketEncryptionInput"}
+ if s.Bucket == nil {
+ invalidParams.Add(request.NewErrParamRequired("Bucket"))
+ }
+ if s.Bucket != nil && len(*s.Bucket) < 1 {
+ invalidParams.Add(request.NewErrParamMinLen("Bucket", 1))
+ }
+ if s.ServerSideEncryptionConfiguration == nil {
+ invalidParams.Add(request.NewErrParamRequired("ServerSideEncryptionConfiguration"))
+ }
+ if s.ServerSideEncryptionConfiguration != nil {
+ if err := s.ServerSideEncryptionConfiguration.Validate(); err != nil {
+ invalidParams.AddNested("ServerSideEncryptionConfiguration", err.(request.ErrInvalidParams))
+ }
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetBucket sets the Bucket field's value.
+func (s *PutBucketEncryptionInput) SetBucket(v string) *PutBucketEncryptionInput {
+ s.Bucket = &v
+ return s
+}
+
+func (s *PutBucketEncryptionInput) getBucket() (v string) {
+ if s.Bucket == nil {
+ return v
+ }
+ return *s.Bucket
+}
+
+// SetServerSideEncryptionConfiguration sets the ServerSideEncryptionConfiguration field's value.
+func (s *PutBucketEncryptionInput) SetServerSideEncryptionConfiguration(v *ServerSideEncryptionConfiguration) *PutBucketEncryptionInput {
+ s.ServerSideEncryptionConfiguration = v
+ return s
+}
+
+type PutBucketEncryptionOutput struct {
+ _ struct{} `type:"structure"`
+}
+
+// String returns the string representation
+func (s PutBucketEncryptionOutput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s PutBucketEncryptionOutput) GoString() string {
+ return s.String()
+}
+
+type PutBucketInventoryConfigurationInput struct {
+ _ struct{} `locationName:"PutBucketInventoryConfigurationRequest" type:"structure" payload:"InventoryConfiguration"`
+
+ // The name of the bucket where the inventory configuration will be stored.
+ //
+ // Bucket is a required field
+ Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
+
+ // The ID used to identify the inventory configuration.
+ //
+ // Id is a required field
+ Id *string `location:"querystring" locationName:"id" type:"string" required:"true"`
+
+ // Specifies the inventory configuration.
+ //
+ // InventoryConfiguration is a required field
+ InventoryConfiguration *InventoryConfiguration `locationName:"InventoryConfiguration" type:"structure" required:"true" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"`
+}
+
+// String returns the string representation
+func (s PutBucketInventoryConfigurationInput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s PutBucketInventoryConfigurationInput) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *PutBucketInventoryConfigurationInput) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "PutBucketInventoryConfigurationInput"}
+ if s.Bucket == nil {
+ invalidParams.Add(request.NewErrParamRequired("Bucket"))
+ }
+ if s.Bucket != nil && len(*s.Bucket) < 1 {
+ invalidParams.Add(request.NewErrParamMinLen("Bucket", 1))
+ }
+ if s.Id == nil {
+ invalidParams.Add(request.NewErrParamRequired("Id"))
+ }
+ if s.InventoryConfiguration == nil {
+ invalidParams.Add(request.NewErrParamRequired("InventoryConfiguration"))
+ }
+ if s.InventoryConfiguration != nil {
+ if err := s.InventoryConfiguration.Validate(); err != nil {
+ invalidParams.AddNested("InventoryConfiguration", err.(request.ErrInvalidParams))
+ }
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetBucket sets the Bucket field's value.
+func (s *PutBucketInventoryConfigurationInput) SetBucket(v string) *PutBucketInventoryConfigurationInput {
+ s.Bucket = &v
+ return s
+}
+
+func (s *PutBucketInventoryConfigurationInput) getBucket() (v string) {
+ if s.Bucket == nil {
+ return v
+ }
+ return *s.Bucket
+}
+
+// SetId sets the Id field's value.
+func (s *PutBucketInventoryConfigurationInput) SetId(v string) *PutBucketInventoryConfigurationInput {
+ s.Id = &v
+ return s
+}
+
+// SetInventoryConfiguration sets the InventoryConfiguration field's value.
+func (s *PutBucketInventoryConfigurationInput) SetInventoryConfiguration(v *InventoryConfiguration) *PutBucketInventoryConfigurationInput {
+ s.InventoryConfiguration = v
+ return s
+}
+
+type PutBucketInventoryConfigurationOutput struct {
+ _ struct{} `type:"structure"`
+}
+
+// String returns the string representation
+func (s PutBucketInventoryConfigurationOutput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s PutBucketInventoryConfigurationOutput) GoString() string {
+ return s.String()
+}
+
+type PutBucketLifecycleConfigurationInput struct {
+ _ struct{} `locationName:"PutBucketLifecycleConfigurationRequest" type:"structure" payload:"LifecycleConfiguration"`
+
+ // Bucket is a required field
+ Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
+
+ // Specifies the lifecycle configuration for objects in an Amazon S3 bucket.
+ // For more information, see Object Lifecycle Management (https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lifecycle-mgmt.html)
+ // in the Amazon Simple Storage Service Developer Guide.
+ LifecycleConfiguration *BucketLifecycleConfiguration `locationName:"LifecycleConfiguration" type:"structure" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"`
+}
+
+// String returns the string representation
+func (s PutBucketLifecycleConfigurationInput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s PutBucketLifecycleConfigurationInput) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *PutBucketLifecycleConfigurationInput) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "PutBucketLifecycleConfigurationInput"}
+ if s.Bucket == nil {
+ invalidParams.Add(request.NewErrParamRequired("Bucket"))
+ }
+ if s.Bucket != nil && len(*s.Bucket) < 1 {
+ invalidParams.Add(request.NewErrParamMinLen("Bucket", 1))
+ }
+ if s.LifecycleConfiguration != nil {
+ if err := s.LifecycleConfiguration.Validate(); err != nil {
+ invalidParams.AddNested("LifecycleConfiguration", err.(request.ErrInvalidParams))
+ }
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetBucket sets the Bucket field's value.
+func (s *PutBucketLifecycleConfigurationInput) SetBucket(v string) *PutBucketLifecycleConfigurationInput {
+ s.Bucket = &v
+ return s
+}
+
+func (s *PutBucketLifecycleConfigurationInput) getBucket() (v string) {
+ if s.Bucket == nil {
+ return v
+ }
+ return *s.Bucket
+}
+
+// SetLifecycleConfiguration sets the LifecycleConfiguration field's value.
+func (s *PutBucketLifecycleConfigurationInput) SetLifecycleConfiguration(v *BucketLifecycleConfiguration) *PutBucketLifecycleConfigurationInput {
+ s.LifecycleConfiguration = v
+ return s
+}
+
+type PutBucketLifecycleConfigurationOutput struct {
+ _ struct{} `type:"structure"`
+}
+
+// String returns the string representation
+func (s PutBucketLifecycleConfigurationOutput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s PutBucketLifecycleConfigurationOutput) GoString() string {
+ return s.String()
+}
+
+type PutBucketLifecycleInput struct {
+ _ struct{} `locationName:"PutBucketLifecycleRequest" type:"structure" payload:"LifecycleConfiguration"`
+
+ // Bucket is a required field
+ Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
+
+ LifecycleConfiguration *LifecycleConfiguration `locationName:"LifecycleConfiguration" type:"structure" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"`
+}
+
+// String returns the string representation
+func (s PutBucketLifecycleInput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s PutBucketLifecycleInput) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *PutBucketLifecycleInput) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "PutBucketLifecycleInput"}
+ if s.Bucket == nil {
+ invalidParams.Add(request.NewErrParamRequired("Bucket"))
+ }
+ if s.Bucket != nil && len(*s.Bucket) < 1 {
+ invalidParams.Add(request.NewErrParamMinLen("Bucket", 1))
+ }
+ if s.LifecycleConfiguration != nil {
+ if err := s.LifecycleConfiguration.Validate(); err != nil {
+ invalidParams.AddNested("LifecycleConfiguration", err.(request.ErrInvalidParams))
+ }
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetBucket sets the Bucket field's value.
+func (s *PutBucketLifecycleInput) SetBucket(v string) *PutBucketLifecycleInput {
+ s.Bucket = &v
+ return s
+}
+
+func (s *PutBucketLifecycleInput) getBucket() (v string) {
+ if s.Bucket == nil {
+ return v
+ }
+ return *s.Bucket
+}
+
+// SetLifecycleConfiguration sets the LifecycleConfiguration field's value.
+func (s *PutBucketLifecycleInput) SetLifecycleConfiguration(v *LifecycleConfiguration) *PutBucketLifecycleInput {
+ s.LifecycleConfiguration = v
+ return s
+}
+
+type PutBucketLifecycleOutput struct {
+ _ struct{} `type:"structure"`
+}
+
+// String returns the string representation
+func (s PutBucketLifecycleOutput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s PutBucketLifecycleOutput) GoString() string {
+ return s.String()
+}
+
+type PutBucketLoggingInput struct {
+ _ struct{} `locationName:"PutBucketLoggingRequest" type:"structure" payload:"BucketLoggingStatus"`
+
+ // Bucket is a required field
+ Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
+
+ // BucketLoggingStatus is a required field
+ BucketLoggingStatus *BucketLoggingStatus `locationName:"BucketLoggingStatus" type:"structure" required:"true" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"`
+}
+
+// String returns the string representation
+func (s PutBucketLoggingInput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s PutBucketLoggingInput) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *PutBucketLoggingInput) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "PutBucketLoggingInput"}
+ if s.Bucket == nil {
+ invalidParams.Add(request.NewErrParamRequired("Bucket"))
+ }
+ if s.Bucket != nil && len(*s.Bucket) < 1 {
+ invalidParams.Add(request.NewErrParamMinLen("Bucket", 1))
+ }
+ if s.BucketLoggingStatus == nil {
+ invalidParams.Add(request.NewErrParamRequired("BucketLoggingStatus"))
+ }
+ if s.BucketLoggingStatus != nil {
+ if err := s.BucketLoggingStatus.Validate(); err != nil {
+ invalidParams.AddNested("BucketLoggingStatus", err.(request.ErrInvalidParams))
+ }
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetBucket sets the Bucket field's value.
+func (s *PutBucketLoggingInput) SetBucket(v string) *PutBucketLoggingInput {
+ s.Bucket = &v
+ return s
+}
+
+func (s *PutBucketLoggingInput) getBucket() (v string) {
+ if s.Bucket == nil {
+ return v
+ }
+ return *s.Bucket
+}
+
+// SetBucketLoggingStatus sets the BucketLoggingStatus field's value.
+func (s *PutBucketLoggingInput) SetBucketLoggingStatus(v *BucketLoggingStatus) *PutBucketLoggingInput {
+ s.BucketLoggingStatus = v
+ return s
+}
+
+type PutBucketLoggingOutput struct {
+ _ struct{} `type:"structure"`
+}
+
+// String returns the string representation
+func (s PutBucketLoggingOutput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s PutBucketLoggingOutput) GoString() string {
+ return s.String()
+}
+
+type PutBucketMetricsConfigurationInput struct {
+ _ struct{} `locationName:"PutBucketMetricsConfigurationRequest" type:"structure" payload:"MetricsConfiguration"`
+
+ // The name of the bucket for which the metrics configuration is set.
+ //
+ // Bucket is a required field
+ Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
+
+ // The ID used to identify the metrics configuration.
+ //
+ // Id is a required field
+ Id *string `location:"querystring" locationName:"id" type:"string" required:"true"`
+
+ // Specifies the metrics configuration.
+ //
+ // MetricsConfiguration is a required field
+ MetricsConfiguration *MetricsConfiguration `locationName:"MetricsConfiguration" type:"structure" required:"true" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"`
+}
+
+// String returns the string representation
+func (s PutBucketMetricsConfigurationInput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s PutBucketMetricsConfigurationInput) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *PutBucketMetricsConfigurationInput) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "PutBucketMetricsConfigurationInput"}
+ if s.Bucket == nil {
+ invalidParams.Add(request.NewErrParamRequired("Bucket"))
+ }
+ if s.Bucket != nil && len(*s.Bucket) < 1 {
+ invalidParams.Add(request.NewErrParamMinLen("Bucket", 1))
+ }
+ if s.Id == nil {
+ invalidParams.Add(request.NewErrParamRequired("Id"))
+ }
+ if s.MetricsConfiguration == nil {
+ invalidParams.Add(request.NewErrParamRequired("MetricsConfiguration"))
+ }
+ if s.MetricsConfiguration != nil {
+ if err := s.MetricsConfiguration.Validate(); err != nil {
+ invalidParams.AddNested("MetricsConfiguration", err.(request.ErrInvalidParams))
+ }
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetBucket sets the Bucket field's value.
+func (s *PutBucketMetricsConfigurationInput) SetBucket(v string) *PutBucketMetricsConfigurationInput {
+ s.Bucket = &v
+ return s
+}
+
+func (s *PutBucketMetricsConfigurationInput) getBucket() (v string) {
+ if s.Bucket == nil {
+ return v
+ }
+ return *s.Bucket
+}
+
+// SetId sets the Id field's value.
+func (s *PutBucketMetricsConfigurationInput) SetId(v string) *PutBucketMetricsConfigurationInput {
+ s.Id = &v
+ return s
+}
+
+// SetMetricsConfiguration sets the MetricsConfiguration field's value.
+func (s *PutBucketMetricsConfigurationInput) SetMetricsConfiguration(v *MetricsConfiguration) *PutBucketMetricsConfigurationInput {
+ s.MetricsConfiguration = v
+ return s
+}
+
+type PutBucketMetricsConfigurationOutput struct {
+ _ struct{} `type:"structure"`
+}
+
+// String returns the string representation
+func (s PutBucketMetricsConfigurationOutput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s PutBucketMetricsConfigurationOutput) GoString() string {
+ return s.String()
+}
+
+type PutBucketNotificationConfigurationInput struct {
+ _ struct{} `locationName:"PutBucketNotificationConfigurationRequest" type:"structure" payload:"NotificationConfiguration"`
+
+ // Bucket is a required field
+ Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
+
+ // A container for specifying the notification configuration of the bucket.
+ // If this element is empty, notifications are turned off for the bucket.
+ //
+ // NotificationConfiguration is a required field
+ NotificationConfiguration *NotificationConfiguration `locationName:"NotificationConfiguration" type:"structure" required:"true" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"`
+}
+
+// String returns the string representation
+func (s PutBucketNotificationConfigurationInput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s PutBucketNotificationConfigurationInput) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *PutBucketNotificationConfigurationInput) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "PutBucketNotificationConfigurationInput"}
+ if s.Bucket == nil {
+ invalidParams.Add(request.NewErrParamRequired("Bucket"))
+ }
+ if s.Bucket != nil && len(*s.Bucket) < 1 {
+ invalidParams.Add(request.NewErrParamMinLen("Bucket", 1))
+ }
+ if s.NotificationConfiguration == nil {
+ invalidParams.Add(request.NewErrParamRequired("NotificationConfiguration"))
+ }
+ if s.NotificationConfiguration != nil {
+ if err := s.NotificationConfiguration.Validate(); err != nil {
+ invalidParams.AddNested("NotificationConfiguration", err.(request.ErrInvalidParams))
+ }
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetBucket sets the Bucket field's value.
+func (s *PutBucketNotificationConfigurationInput) SetBucket(v string) *PutBucketNotificationConfigurationInput {
+ s.Bucket = &v
+ return s
+}
+
+func (s *PutBucketNotificationConfigurationInput) getBucket() (v string) {
+ if s.Bucket == nil {
+ return v
+ }
+ return *s.Bucket
+}
+
+// SetNotificationConfiguration sets the NotificationConfiguration field's value.
+func (s *PutBucketNotificationConfigurationInput) SetNotificationConfiguration(v *NotificationConfiguration) *PutBucketNotificationConfigurationInput {
+ s.NotificationConfiguration = v
+ return s
+}
+
+type PutBucketNotificationConfigurationOutput struct {
+ _ struct{} `type:"structure"`
+}
+
+// String returns the string representation
+func (s PutBucketNotificationConfigurationOutput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s PutBucketNotificationConfigurationOutput) GoString() string {
+ return s.String()
+}
+
+type PutBucketNotificationInput struct {
+ _ struct{} `locationName:"PutBucketNotificationRequest" type:"structure" payload:"NotificationConfiguration"`
+
+ // Bucket is a required field
+ Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
+
+ // NotificationConfiguration is a required field
+ NotificationConfiguration *NotificationConfigurationDeprecated `locationName:"NotificationConfiguration" type:"structure" required:"true" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"`
+}
+
+// String returns the string representation
+func (s PutBucketNotificationInput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s PutBucketNotificationInput) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *PutBucketNotificationInput) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "PutBucketNotificationInput"}
+ if s.Bucket == nil {
+ invalidParams.Add(request.NewErrParamRequired("Bucket"))
+ }
+ if s.Bucket != nil && len(*s.Bucket) < 1 {
+ invalidParams.Add(request.NewErrParamMinLen("Bucket", 1))
+ }
+ if s.NotificationConfiguration == nil {
+ invalidParams.Add(request.NewErrParamRequired("NotificationConfiguration"))
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetBucket sets the Bucket field's value.
+func (s *PutBucketNotificationInput) SetBucket(v string) *PutBucketNotificationInput {
+ s.Bucket = &v
+ return s
+}
+
+func (s *PutBucketNotificationInput) getBucket() (v string) {
+ if s.Bucket == nil {
+ return v
+ }
+ return *s.Bucket
+}
+
+// SetNotificationConfiguration sets the NotificationConfiguration field's value.
+func (s *PutBucketNotificationInput) SetNotificationConfiguration(v *NotificationConfigurationDeprecated) *PutBucketNotificationInput {
+ s.NotificationConfiguration = v
+ return s
+}
+
+type PutBucketNotificationOutput struct {
+ _ struct{} `type:"structure"`
+}
+
+// String returns the string representation
+func (s PutBucketNotificationOutput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s PutBucketNotificationOutput) GoString() string {
+ return s.String()
+}
+
+type PutBucketPolicyInput struct {
+ _ struct{} `locationName:"PutBucketPolicyRequest" type:"structure" payload:"Policy"`
+
+ // Bucket is a required field
+ Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
+
+ // Set this parameter to true to confirm that you want to remove your permissions
+ // to change this bucket policy in the future.
+ ConfirmRemoveSelfBucketAccess *bool `location:"header" locationName:"x-amz-confirm-remove-self-bucket-access" type:"boolean"`
+
+ // The bucket policy as a JSON document.
+ //
+ // Policy is a required field
+ Policy *string `type:"string" required:"true"`
+}
+
+// String returns the string representation
+func (s PutBucketPolicyInput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s PutBucketPolicyInput) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *PutBucketPolicyInput) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "PutBucketPolicyInput"}
+ if s.Bucket == nil {
+ invalidParams.Add(request.NewErrParamRequired("Bucket"))
+ }
+ if s.Bucket != nil && len(*s.Bucket) < 1 {
+ invalidParams.Add(request.NewErrParamMinLen("Bucket", 1))
+ }
+ if s.Policy == nil {
+ invalidParams.Add(request.NewErrParamRequired("Policy"))
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetBucket sets the Bucket field's value.
+func (s *PutBucketPolicyInput) SetBucket(v string) *PutBucketPolicyInput {
+ s.Bucket = &v
+ return s
+}
+
+func (s *PutBucketPolicyInput) getBucket() (v string) {
+ if s.Bucket == nil {
+ return v
+ }
+ return *s.Bucket
+}
+
+// SetConfirmRemoveSelfBucketAccess sets the ConfirmRemoveSelfBucketAccess field's value.
+func (s *PutBucketPolicyInput) SetConfirmRemoveSelfBucketAccess(v bool) *PutBucketPolicyInput {
+ s.ConfirmRemoveSelfBucketAccess = &v
+ return s
+}
+
+// SetPolicy sets the Policy field's value.
+func (s *PutBucketPolicyInput) SetPolicy(v string) *PutBucketPolicyInput {
+ s.Policy = &v
+ return s
+}
+
+type PutBucketPolicyOutput struct {
+ _ struct{} `type:"structure"`
+}
+
+// String returns the string representation
+func (s PutBucketPolicyOutput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s PutBucketPolicyOutput) GoString() string {
+ return s.String()
+}
+
+type PutBucketReplicationInput struct {
+ _ struct{} `locationName:"PutBucketReplicationRequest" type:"structure" payload:"ReplicationConfiguration"`
+
+ // Bucket is a required field
+ Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
+
+ // A container for replication rules. You can add up to 1,000 rules. The maximum
+ // size of a replication configuration is 2 MB.
+ //
+ // ReplicationConfiguration is a required field
+ ReplicationConfiguration *ReplicationConfiguration `locationName:"ReplicationConfiguration" type:"structure" required:"true" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"`
+
+ // A token that allows Amazon S3 object lock to be enabled for an existing bucket.
+ Token *string `location:"header" locationName:"x-amz-bucket-object-lock-token" type:"string"`
+}
+
+// String returns the string representation
+func (s PutBucketReplicationInput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s PutBucketReplicationInput) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *PutBucketReplicationInput) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "PutBucketReplicationInput"}
+ if s.Bucket == nil {
+ invalidParams.Add(request.NewErrParamRequired("Bucket"))
+ }
+ if s.Bucket != nil && len(*s.Bucket) < 1 {
+ invalidParams.Add(request.NewErrParamMinLen("Bucket", 1))
+ }
+ if s.ReplicationConfiguration == nil {
+ invalidParams.Add(request.NewErrParamRequired("ReplicationConfiguration"))
+ }
+ if s.ReplicationConfiguration != nil {
+ if err := s.ReplicationConfiguration.Validate(); err != nil {
+ invalidParams.AddNested("ReplicationConfiguration", err.(request.ErrInvalidParams))
+ }
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetBucket sets the Bucket field's value.
+func (s *PutBucketReplicationInput) SetBucket(v string) *PutBucketReplicationInput {
+ s.Bucket = &v
+ return s
+}
+
+func (s *PutBucketReplicationInput) getBucket() (v string) {
+ if s.Bucket == nil {
+ return v
+ }
+ return *s.Bucket
+}
+
+// SetReplicationConfiguration sets the ReplicationConfiguration field's value.
+func (s *PutBucketReplicationInput) SetReplicationConfiguration(v *ReplicationConfiguration) *PutBucketReplicationInput {
+ s.ReplicationConfiguration = v
+ return s
+}
+
+// SetToken sets the Token field's value.
+func (s *PutBucketReplicationInput) SetToken(v string) *PutBucketReplicationInput {
+ s.Token = &v
+ return s
+}
+
+type PutBucketReplicationOutput struct {
+ _ struct{} `type:"structure"`
+}
+
+// String returns the string representation
+func (s PutBucketReplicationOutput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s PutBucketReplicationOutput) GoString() string {
+ return s.String()
+}
+
+type PutBucketRequestPaymentInput struct {
+ _ struct{} `locationName:"PutBucketRequestPaymentRequest" type:"structure" payload:"RequestPaymentConfiguration"`
+
+ // Bucket is a required field
+ Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
+
+ // RequestPaymentConfiguration is a required field
+ RequestPaymentConfiguration *RequestPaymentConfiguration `locationName:"RequestPaymentConfiguration" type:"structure" required:"true" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"`
+}
+
+// String returns the string representation
+func (s PutBucketRequestPaymentInput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s PutBucketRequestPaymentInput) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *PutBucketRequestPaymentInput) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "PutBucketRequestPaymentInput"}
+ if s.Bucket == nil {
+ invalidParams.Add(request.NewErrParamRequired("Bucket"))
+ }
+ if s.Bucket != nil && len(*s.Bucket) < 1 {
+ invalidParams.Add(request.NewErrParamMinLen("Bucket", 1))
+ }
+ if s.RequestPaymentConfiguration == nil {
+ invalidParams.Add(request.NewErrParamRequired("RequestPaymentConfiguration"))
+ }
+ if s.RequestPaymentConfiguration != nil {
+ if err := s.RequestPaymentConfiguration.Validate(); err != nil {
+ invalidParams.AddNested("RequestPaymentConfiguration", err.(request.ErrInvalidParams))
+ }
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetBucket sets the Bucket field's value.
+func (s *PutBucketRequestPaymentInput) SetBucket(v string) *PutBucketRequestPaymentInput {
+ s.Bucket = &v
+ return s
+}
+
+func (s *PutBucketRequestPaymentInput) getBucket() (v string) {
+ if s.Bucket == nil {
+ return v
+ }
+ return *s.Bucket
+}
+
+// SetRequestPaymentConfiguration sets the RequestPaymentConfiguration field's value.
+func (s *PutBucketRequestPaymentInput) SetRequestPaymentConfiguration(v *RequestPaymentConfiguration) *PutBucketRequestPaymentInput {
+ s.RequestPaymentConfiguration = v
+ return s
+}
+
+type PutBucketRequestPaymentOutput struct {
+ _ struct{} `type:"structure"`
+}
+
+// String returns the string representation
+func (s PutBucketRequestPaymentOutput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s PutBucketRequestPaymentOutput) GoString() string {
+ return s.String()
+}
+
+type PutBucketTaggingInput struct {
+ _ struct{} `locationName:"PutBucketTaggingRequest" type:"structure" payload:"Tagging"`
+
+ // Bucket is a required field
+ Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
+
+ // Tagging is a required field
+ Tagging *Tagging `locationName:"Tagging" type:"structure" required:"true" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"`
+}
+
+// String returns the string representation
+func (s PutBucketTaggingInput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s PutBucketTaggingInput) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *PutBucketTaggingInput) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "PutBucketTaggingInput"}
+ if s.Bucket == nil {
+ invalidParams.Add(request.NewErrParamRequired("Bucket"))
+ }
+ if s.Bucket != nil && len(*s.Bucket) < 1 {
+ invalidParams.Add(request.NewErrParamMinLen("Bucket", 1))
+ }
+ if s.Tagging == nil {
+ invalidParams.Add(request.NewErrParamRequired("Tagging"))
+ }
+ if s.Tagging != nil {
+ if err := s.Tagging.Validate(); err != nil {
+ invalidParams.AddNested("Tagging", err.(request.ErrInvalidParams))
+ }
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetBucket sets the Bucket field's value.
+func (s *PutBucketTaggingInput) SetBucket(v string) *PutBucketTaggingInput {
+ s.Bucket = &v
+ return s
+}
+
+func (s *PutBucketTaggingInput) getBucket() (v string) {
+ if s.Bucket == nil {
+ return v
+ }
+ return *s.Bucket
+}
+
+// SetTagging sets the Tagging field's value.
+func (s *PutBucketTaggingInput) SetTagging(v *Tagging) *PutBucketTaggingInput {
+ s.Tagging = v
+ return s
+}
+
+type PutBucketTaggingOutput struct {
+ _ struct{} `type:"structure"`
+}
+
+// String returns the string representation
+func (s PutBucketTaggingOutput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s PutBucketTaggingOutput) GoString() string {
+ return s.String()
+}
+
+type PutBucketVersioningInput struct {
+ _ struct{} `locationName:"PutBucketVersioningRequest" type:"structure" payload:"VersioningConfiguration"`
+
+ // Bucket is a required field
+ Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
+
+ // The concatenation of the authentication device's serial number, a space,
+ // and the value that is displayed on your authentication device.
+ MFA *string `location:"header" locationName:"x-amz-mfa" type:"string"`
+
+ // Describes the versioning state of an Amazon S3 bucket. For more information,
+ // see PUT Bucket versioning (https://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketPUTVersioningStatus.html)
+ // in the Amazon Simple Storage Service API Reference.
+ //
+ // VersioningConfiguration is a required field
+ VersioningConfiguration *VersioningConfiguration `locationName:"VersioningConfiguration" type:"structure" required:"true" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"`
+}
+
+// String returns the string representation
+func (s PutBucketVersioningInput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s PutBucketVersioningInput) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *PutBucketVersioningInput) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "PutBucketVersioningInput"}
+ if s.Bucket == nil {
+ invalidParams.Add(request.NewErrParamRequired("Bucket"))
+ }
+ if s.Bucket != nil && len(*s.Bucket) < 1 {
+ invalidParams.Add(request.NewErrParamMinLen("Bucket", 1))
+ }
+ if s.VersioningConfiguration == nil {
+ invalidParams.Add(request.NewErrParamRequired("VersioningConfiguration"))
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetBucket sets the Bucket field's value.
+func (s *PutBucketVersioningInput) SetBucket(v string) *PutBucketVersioningInput {
+ s.Bucket = &v
+ return s
+}
+
+func (s *PutBucketVersioningInput) getBucket() (v string) {
+ if s.Bucket == nil {
+ return v
+ }
+ return *s.Bucket
+}
+
+// SetMFA sets the MFA field's value.
+func (s *PutBucketVersioningInput) SetMFA(v string) *PutBucketVersioningInput {
+ s.MFA = &v
+ return s
+}
+
+// SetVersioningConfiguration sets the VersioningConfiguration field's value.
+func (s *PutBucketVersioningInput) SetVersioningConfiguration(v *VersioningConfiguration) *PutBucketVersioningInput {
+ s.VersioningConfiguration = v
+ return s
+}
+
+type PutBucketVersioningOutput struct {
+ _ struct{} `type:"structure"`
+}
+
+// String returns the string representation
+func (s PutBucketVersioningOutput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s PutBucketVersioningOutput) GoString() string {
+ return s.String()
+}
+
+type PutBucketWebsiteInput struct {
+ _ struct{} `locationName:"PutBucketWebsiteRequest" type:"structure" payload:"WebsiteConfiguration"`
+
+ // Bucket is a required field
+ Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
+
+ // Specifies website configuration parameters for an Amazon S3 bucket.
+ //
+ // WebsiteConfiguration is a required field
+ WebsiteConfiguration *WebsiteConfiguration `locationName:"WebsiteConfiguration" type:"structure" required:"true" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"`
+}
+
+// String returns the string representation
+func (s PutBucketWebsiteInput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s PutBucketWebsiteInput) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *PutBucketWebsiteInput) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "PutBucketWebsiteInput"}
+ if s.Bucket == nil {
+ invalidParams.Add(request.NewErrParamRequired("Bucket"))
+ }
+ if s.Bucket != nil && len(*s.Bucket) < 1 {
+ invalidParams.Add(request.NewErrParamMinLen("Bucket", 1))
+ }
+ if s.WebsiteConfiguration == nil {
+ invalidParams.Add(request.NewErrParamRequired("WebsiteConfiguration"))
+ }
+ if s.WebsiteConfiguration != nil {
+ if err := s.WebsiteConfiguration.Validate(); err != nil {
+ invalidParams.AddNested("WebsiteConfiguration", err.(request.ErrInvalidParams))
+ }
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetBucket sets the Bucket field's value.
+func (s *PutBucketWebsiteInput) SetBucket(v string) *PutBucketWebsiteInput {
+ s.Bucket = &v
+ return s
+}
+
+func (s *PutBucketWebsiteInput) getBucket() (v string) {
+ if s.Bucket == nil {
+ return v
+ }
+ return *s.Bucket
+}
+
+// SetWebsiteConfiguration sets the WebsiteConfiguration field's value.
+func (s *PutBucketWebsiteInput) SetWebsiteConfiguration(v *WebsiteConfiguration) *PutBucketWebsiteInput {
+ s.WebsiteConfiguration = v
+ return s
+}
+
+type PutBucketWebsiteOutput struct {
+ _ struct{} `type:"structure"`
+}
+
+// String returns the string representation
+func (s PutBucketWebsiteOutput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s PutBucketWebsiteOutput) GoString() string {
+ return s.String()
+}
+
+type PutObjectAclInput struct {
+ _ struct{} `locationName:"PutObjectAclRequest" type:"structure" payload:"AccessControlPolicy"`
+
+ // The canned ACL to apply to the object.
+ ACL *string `location:"header" locationName:"x-amz-acl" type:"string" enum:"ObjectCannedACL"`
+
+ // Contains the elements that set the ACL permissions for an object per grantee.
+ AccessControlPolicy *AccessControlPolicy `locationName:"AccessControlPolicy" type:"structure" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"`
+
+ // Bucket is a required field
+ Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
+
+ // Allows grantee the read, write, read ACP, and write ACP permissions on the
+ // bucket.
+ GrantFullControl *string `location:"header" locationName:"x-amz-grant-full-control" type:"string"`
+
+ // Allows grantee to list the objects in the bucket.
+ GrantRead *string `location:"header" locationName:"x-amz-grant-read" type:"string"`
+
+ // Allows grantee to read the bucket ACL.
+ GrantReadACP *string `location:"header" locationName:"x-amz-grant-read-acp" type:"string"`
+
+ // Allows grantee to create, overwrite, and delete any object in the bucket.
+ GrantWrite *string `location:"header" locationName:"x-amz-grant-write" type:"string"`
+
+ // Allows grantee to write the ACL for the applicable bucket.
+ GrantWriteACP *string `location:"header" locationName:"x-amz-grant-write-acp" type:"string"`
+
+ // Key is a required field
+ Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"`
+
+ // Confirms that the requester knows that she or he will be charged for the
+ // request. Bucket owners need not specify this parameter in their requests.
+ // Documentation on downloading objects from requester pays buckets can be found
+ // at http://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html
+ RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"`
+
+ // VersionId used to reference a specific version of the object.
+ VersionId *string `location:"querystring" locationName:"versionId" type:"string"`
+}
+
+// String returns the string representation
+func (s PutObjectAclInput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s PutObjectAclInput) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *PutObjectAclInput) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "PutObjectAclInput"}
+ if s.Bucket == nil {
+ invalidParams.Add(request.NewErrParamRequired("Bucket"))
+ }
+ if s.Bucket != nil && len(*s.Bucket) < 1 {
+ invalidParams.Add(request.NewErrParamMinLen("Bucket", 1))
+ }
+ if s.Key == nil {
+ invalidParams.Add(request.NewErrParamRequired("Key"))
+ }
+ if s.Key != nil && len(*s.Key) < 1 {
+ invalidParams.Add(request.NewErrParamMinLen("Key", 1))
+ }
+ if s.AccessControlPolicy != nil {
+ if err := s.AccessControlPolicy.Validate(); err != nil {
+ invalidParams.AddNested("AccessControlPolicy", err.(request.ErrInvalidParams))
+ }
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetACL sets the ACL field's value.
+func (s *PutObjectAclInput) SetACL(v string) *PutObjectAclInput {
+ s.ACL = &v
+ return s
+}
+
+// SetAccessControlPolicy sets the AccessControlPolicy field's value.
+func (s *PutObjectAclInput) SetAccessControlPolicy(v *AccessControlPolicy) *PutObjectAclInput {
+ s.AccessControlPolicy = v
+ return s
+}
+
+// SetBucket sets the Bucket field's value.
+func (s *PutObjectAclInput) SetBucket(v string) *PutObjectAclInput {
+ s.Bucket = &v
+ return s
+}
+
+func (s *PutObjectAclInput) getBucket() (v string) {
+ if s.Bucket == nil {
+ return v
+ }
+ return *s.Bucket
+}
+
+// SetGrantFullControl sets the GrantFullControl field's value.
+func (s *PutObjectAclInput) SetGrantFullControl(v string) *PutObjectAclInput {
+ s.GrantFullControl = &v
+ return s
+}
+
+// SetGrantRead sets the GrantRead field's value.
+func (s *PutObjectAclInput) SetGrantRead(v string) *PutObjectAclInput {
+ s.GrantRead = &v
+ return s
+}
+
+// SetGrantReadACP sets the GrantReadACP field's value.
+func (s *PutObjectAclInput) SetGrantReadACP(v string) *PutObjectAclInput {
+ s.GrantReadACP = &v
+ return s
+}
+
+// SetGrantWrite sets the GrantWrite field's value.
+func (s *PutObjectAclInput) SetGrantWrite(v string) *PutObjectAclInput {
+ s.GrantWrite = &v
+ return s
+}
+
+// SetGrantWriteACP sets the GrantWriteACP field's value.
+func (s *PutObjectAclInput) SetGrantWriteACP(v string) *PutObjectAclInput {
+ s.GrantWriteACP = &v
+ return s
+}
+
+// SetKey sets the Key field's value.
+func (s *PutObjectAclInput) SetKey(v string) *PutObjectAclInput {
+ s.Key = &v
+ return s
+}
+
+// SetRequestPayer sets the RequestPayer field's value.
+func (s *PutObjectAclInput) SetRequestPayer(v string) *PutObjectAclInput {
+ s.RequestPayer = &v
+ return s
+}
+
+// SetVersionId sets the VersionId field's value.
+func (s *PutObjectAclInput) SetVersionId(v string) *PutObjectAclInput {
+ s.VersionId = &v
+ return s
+}
+
+type PutObjectAclOutput struct {
+ _ struct{} `type:"structure"`
+
+ // If present, indicates that the requester was successfully charged for the
+ // request.
+ RequestCharged *string `location:"header" locationName:"x-amz-request-charged" type:"string" enum:"RequestCharged"`
+}
+
+// String returns the string representation
+func (s PutObjectAclOutput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s PutObjectAclOutput) GoString() string {
+ return s.String()
+}
+
+// SetRequestCharged sets the RequestCharged field's value.
+func (s *PutObjectAclOutput) SetRequestCharged(v string) *PutObjectAclOutput {
+ s.RequestCharged = &v
+ return s
+}
+
+type PutObjectInput struct {
+ _ struct{} `locationName:"PutObjectRequest" type:"structure" payload:"Body"`
+
+ // The canned ACL to apply to the object.
+ ACL *string `location:"header" locationName:"x-amz-acl" type:"string" enum:"ObjectCannedACL"`
+
+ // Object data.
+ Body io.ReadSeeker `type:"blob"`
+
+ // Name of the bucket to which the PUT operation was initiated.
+ //
+ // Bucket is a required field
+ Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
+
+ // Specifies caching behavior along the request/reply chain.
+ CacheControl *string `location:"header" locationName:"Cache-Control" type:"string"`
+
+ // Specifies presentational information for the object.
+ ContentDisposition *string `location:"header" locationName:"Content-Disposition" type:"string"`
+
+ // Specifies what content encodings have been applied to the object and thus
+ // what decoding mechanisms must be applied to obtain the media-type referenced
+ // by the Content-Type header field.
+ ContentEncoding *string `location:"header" locationName:"Content-Encoding" type:"string"`
+
+ // The language the content is in.
+ ContentLanguage *string `location:"header" locationName:"Content-Language" type:"string"`
+
+ // Size of the body in bytes. This parameter is useful when the size of the
+ // body cannot be determined automatically.
+ ContentLength *int64 `location:"header" locationName:"Content-Length" type:"long"`
+
+ // The base64-encoded 128-bit MD5 digest of the part data. This parameter is
+ // auto-populated when using the command from the CLI. This parameted is required
+ // if object lock parameters are specified.
+ ContentMD5 *string `location:"header" locationName:"Content-MD5" type:"string"`
+
+ // A standard MIME type describing the format of the object data.
+ ContentType *string `location:"header" locationName:"Content-Type" type:"string"`
+
+ // The date and time at which the object is no longer cacheable.
+ Expires *time.Time `location:"header" locationName:"Expires" type:"timestamp"`
+
+ // Gives the grantee READ, READ_ACP, and WRITE_ACP permissions on the object.
+ GrantFullControl *string `location:"header" locationName:"x-amz-grant-full-control" type:"string"`
+
+ // Allows grantee to read the object data and its metadata.
+ GrantRead *string `location:"header" locationName:"x-amz-grant-read" type:"string"`
+
+ // Allows grantee to read the object ACL.
+ GrantReadACP *string `location:"header" locationName:"x-amz-grant-read-acp" type:"string"`
+
+ // Allows grantee to write the ACL for the applicable object.
+ GrantWriteACP *string `location:"header" locationName:"x-amz-grant-write-acp" type:"string"`
+
+ // Object key for which the PUT operation was initiated.
+ //
+ // Key is a required field
+ Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"`
+
+ // A map of metadata to store with the object in S3.
+ Metadata map[string]*string `location:"headers" locationName:"x-amz-meta-" type:"map"`
+
+ // The Legal Hold status that you want to apply to the specified object.
+ ObjectLockLegalHoldStatus *string `location:"header" locationName:"x-amz-object-lock-legal-hold" type:"string" enum:"ObjectLockLegalHoldStatus"`
+
+ // The object lock mode that you want to apply to this object.
+ ObjectLockMode *string `location:"header" locationName:"x-amz-object-lock-mode" type:"string" enum:"ObjectLockMode"`
+
+ // The date and time when you want this object's object lock to expire.
+ ObjectLockRetainUntilDate *time.Time `location:"header" locationName:"x-amz-object-lock-retain-until-date" type:"timestamp" timestampFormat:"iso8601"`
+
+ // Confirms that the requester knows that she or he will be charged for the
+ // request. Bucket owners need not specify this parameter in their requests.
+ // Documentation on downloading objects from requester pays buckets can be found
+ // at http://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html
+ RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"`
+
+ // Specifies the algorithm to use to when encrypting the object (e.g., AES256).
+ SSECustomerAlgorithm *string `location:"header" locationName:"x-amz-server-side-encryption-customer-algorithm" type:"string"`
+
+ // Specifies the customer-provided encryption key for Amazon S3 to use in encrypting
+ // data. This value is used to store the object and then it is discarded; Amazon
+ // does not store the encryption key. The key must be appropriate for use with
+ // the algorithm specified in the x-amz-server-side​-encryption​-customer-algorithm
+ // header.
+ SSECustomerKey *string `marshal-as:"blob" location:"header" locationName:"x-amz-server-side-encryption-customer-key" type:"string" sensitive:"true"`
+
+ // Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321.
+ // Amazon S3 uses this header for a message integrity check to ensure the encryption
+ // key was transmitted without error.
+ SSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key-MD5" type:"string"`
+
+ // Specifies the AWS KMS Encryption Context to use for object encryption. The
+ // value of this header is a base64-encoded UTF-8 string holding JSON with the
+ // encryption context key-value pairs.
+ SSEKMSEncryptionContext *string `location:"header" locationName:"x-amz-server-side-encryption-context" type:"string" sensitive:"true"`
+
+ // Specifies the AWS KMS key ID to use for object encryption. All GET and PUT
+ // requests for an object protected by AWS KMS will fail if not made via SSL
+ // or using SigV4. Documentation on configuring any of the officially supported
+ // AWS SDKs and CLI can be found at http://docs.aws.amazon.com/AmazonS3/latest/dev/UsingAWSSDK.html#specify-signature-version
+ SSEKMSKeyId *string `location:"header" locationName:"x-amz-server-side-encryption-aws-kms-key-id" type:"string" sensitive:"true"`
+
+ // The Server-side encryption algorithm used when storing this object in S3
+ // (e.g., AES256, aws:kms).
+ ServerSideEncryption *string `location:"header" locationName:"x-amz-server-side-encryption" type:"string" enum:"ServerSideEncryption"`
+
+ // The type of storage to use for the object. Defaults to 'STANDARD'.
+ StorageClass *string `location:"header" locationName:"x-amz-storage-class" type:"string" enum:"StorageClass"`
+
+ // The tag-set for the object. The tag-set must be encoded as URL Query parameters.
+ // (For example, "Key1=Value1")
+ Tagging *string `location:"header" locationName:"x-amz-tagging" type:"string"`
+
+ // If the bucket is configured as a website, redirects requests for this object
+ // to another object in the same bucket or to an external URL. Amazon S3 stores
+ // the value of this header in the object metadata.
+ WebsiteRedirectLocation *string `location:"header" locationName:"x-amz-website-redirect-location" type:"string"`
+}
+
+// String returns the string representation
+func (s PutObjectInput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s PutObjectInput) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *PutObjectInput) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "PutObjectInput"}
+ if s.Bucket == nil {
+ invalidParams.Add(request.NewErrParamRequired("Bucket"))
+ }
+ if s.Bucket != nil && len(*s.Bucket) < 1 {
+ invalidParams.Add(request.NewErrParamMinLen("Bucket", 1))
+ }
+ if s.Key == nil {
+ invalidParams.Add(request.NewErrParamRequired("Key"))
+ }
+ if s.Key != nil && len(*s.Key) < 1 {
+ invalidParams.Add(request.NewErrParamMinLen("Key", 1))
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetACL sets the ACL field's value.
+func (s *PutObjectInput) SetACL(v string) *PutObjectInput {
+ s.ACL = &v
+ return s
+}
+
+// SetBody sets the Body field's value.
+func (s *PutObjectInput) SetBody(v io.ReadSeeker) *PutObjectInput {
+ s.Body = v
+ return s
+}
+
+// SetBucket sets the Bucket field's value.
+func (s *PutObjectInput) SetBucket(v string) *PutObjectInput {
+ s.Bucket = &v
+ return s
+}
+
+func (s *PutObjectInput) getBucket() (v string) {
+ if s.Bucket == nil {
+ return v
+ }
+ return *s.Bucket
+}
+
+// SetCacheControl sets the CacheControl field's value.
+func (s *PutObjectInput) SetCacheControl(v string) *PutObjectInput {
+ s.CacheControl = &v
+ return s
+}
+
+// SetContentDisposition sets the ContentDisposition field's value.
+func (s *PutObjectInput) SetContentDisposition(v string) *PutObjectInput {
+ s.ContentDisposition = &v
+ return s
+}
+
+// SetContentEncoding sets the ContentEncoding field's value.
+func (s *PutObjectInput) SetContentEncoding(v string) *PutObjectInput {
+ s.ContentEncoding = &v
+ return s
+}
+
+// SetContentLanguage sets the ContentLanguage field's value.
+func (s *PutObjectInput) SetContentLanguage(v string) *PutObjectInput {
+ s.ContentLanguage = &v
+ return s
+}
+
+// SetContentLength sets the ContentLength field's value.
+func (s *PutObjectInput) SetContentLength(v int64) *PutObjectInput {
+ s.ContentLength = &v
+ return s
+}
+
+// SetContentMD5 sets the ContentMD5 field's value.
+func (s *PutObjectInput) SetContentMD5(v string) *PutObjectInput {
+ s.ContentMD5 = &v
+ return s
+}
+
+// SetContentType sets the ContentType field's value.
+func (s *PutObjectInput) SetContentType(v string) *PutObjectInput {
+ s.ContentType = &v
+ return s
+}
+
+// SetExpires sets the Expires field's value.
+func (s *PutObjectInput) SetExpires(v time.Time) *PutObjectInput {
+ s.Expires = &v
+ return s
+}
+
+// SetGrantFullControl sets the GrantFullControl field's value.
+func (s *PutObjectInput) SetGrantFullControl(v string) *PutObjectInput {
+ s.GrantFullControl = &v
+ return s
+}
+
+// SetGrantRead sets the GrantRead field's value.
+func (s *PutObjectInput) SetGrantRead(v string) *PutObjectInput {
+ s.GrantRead = &v
+ return s
+}
+
+// SetGrantReadACP sets the GrantReadACP field's value.
+func (s *PutObjectInput) SetGrantReadACP(v string) *PutObjectInput {
+ s.GrantReadACP = &v
+ return s
+}
+
+// SetGrantWriteACP sets the GrantWriteACP field's value.
+func (s *PutObjectInput) SetGrantWriteACP(v string) *PutObjectInput {
+ s.GrantWriteACP = &v
+ return s
+}
+
+// SetKey sets the Key field's value.
+func (s *PutObjectInput) SetKey(v string) *PutObjectInput {
+ s.Key = &v
+ return s
+}
+
+// SetMetadata sets the Metadata field's value.
+func (s *PutObjectInput) SetMetadata(v map[string]*string) *PutObjectInput {
+ s.Metadata = v
+ return s
+}
+
+// SetObjectLockLegalHoldStatus sets the ObjectLockLegalHoldStatus field's value.
+func (s *PutObjectInput) SetObjectLockLegalHoldStatus(v string) *PutObjectInput {
+ s.ObjectLockLegalHoldStatus = &v
+ return s
+}
+
+// SetObjectLockMode sets the ObjectLockMode field's value.
+func (s *PutObjectInput) SetObjectLockMode(v string) *PutObjectInput {
+ s.ObjectLockMode = &v
+ return s
+}
+
+// SetObjectLockRetainUntilDate sets the ObjectLockRetainUntilDate field's value.
+func (s *PutObjectInput) SetObjectLockRetainUntilDate(v time.Time) *PutObjectInput {
+ s.ObjectLockRetainUntilDate = &v
+ return s
+}
+
+// SetRequestPayer sets the RequestPayer field's value.
+func (s *PutObjectInput) SetRequestPayer(v string) *PutObjectInput {
+ s.RequestPayer = &v
+ return s
+}
+
+// SetSSECustomerAlgorithm sets the SSECustomerAlgorithm field's value.
+func (s *PutObjectInput) SetSSECustomerAlgorithm(v string) *PutObjectInput {
+ s.SSECustomerAlgorithm = &v
+ return s
+}
+
+// SetSSECustomerKey sets the SSECustomerKey field's value.
+func (s *PutObjectInput) SetSSECustomerKey(v string) *PutObjectInput {
+ s.SSECustomerKey = &v
+ return s
+}
+
+func (s *PutObjectInput) getSSECustomerKey() (v string) {
+ if s.SSECustomerKey == nil {
+ return v
+ }
+ return *s.SSECustomerKey
+}
+
+// SetSSECustomerKeyMD5 sets the SSECustomerKeyMD5 field's value.
+func (s *PutObjectInput) SetSSECustomerKeyMD5(v string) *PutObjectInput {
+ s.SSECustomerKeyMD5 = &v
+ return s
+}
+
+// SetSSEKMSEncryptionContext sets the SSEKMSEncryptionContext field's value.
+func (s *PutObjectInput) SetSSEKMSEncryptionContext(v string) *PutObjectInput {
+ s.SSEKMSEncryptionContext = &v
+ return s
+}
+
+// SetSSEKMSKeyId sets the SSEKMSKeyId field's value.
+func (s *PutObjectInput) SetSSEKMSKeyId(v string) *PutObjectInput {
+ s.SSEKMSKeyId = &v
+ return s
+}
+
+// SetServerSideEncryption sets the ServerSideEncryption field's value.
+func (s *PutObjectInput) SetServerSideEncryption(v string) *PutObjectInput {
+ s.ServerSideEncryption = &v
+ return s
+}
+
+// SetStorageClass sets the StorageClass field's value.
+func (s *PutObjectInput) SetStorageClass(v string) *PutObjectInput {
+ s.StorageClass = &v
+ return s
+}
+
+// SetTagging sets the Tagging field's value.
+func (s *PutObjectInput) SetTagging(v string) *PutObjectInput {
+ s.Tagging = &v
+ return s
+}
+
+// SetWebsiteRedirectLocation sets the WebsiteRedirectLocation field's value.
+func (s *PutObjectInput) SetWebsiteRedirectLocation(v string) *PutObjectInput {
+ s.WebsiteRedirectLocation = &v
+ return s
+}
+
+type PutObjectLegalHoldInput struct {
+ _ struct{} `locationName:"PutObjectLegalHoldRequest" type:"structure" payload:"LegalHold"`
+
+ // The bucket containing the object that you want to place a Legal Hold on.
+ //
+ // Bucket is a required field
+ Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
+
+ // The key name for the object that you want to place a Legal Hold on.
+ //
+ // Key is a required field
+ Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"`
+
+ // Container element for the Legal Hold configuration you want to apply to the
+ // specified object.
+ LegalHold *ObjectLockLegalHold `locationName:"LegalHold" type:"structure" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"`
+
+ // Confirms that the requester knows that she or he will be charged for the
+ // request. Bucket owners need not specify this parameter in their requests.
+ // Documentation on downloading objects from requester pays buckets can be found
+ // at http://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html
+ RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"`
+
+ // The version ID of the object that you want to place a Legal Hold on.
+ VersionId *string `location:"querystring" locationName:"versionId" type:"string"`
+}
+
+// String returns the string representation
+func (s PutObjectLegalHoldInput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s PutObjectLegalHoldInput) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *PutObjectLegalHoldInput) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "PutObjectLegalHoldInput"}
+ if s.Bucket == nil {
+ invalidParams.Add(request.NewErrParamRequired("Bucket"))
+ }
+ if s.Bucket != nil && len(*s.Bucket) < 1 {
+ invalidParams.Add(request.NewErrParamMinLen("Bucket", 1))
+ }
+ if s.Key == nil {
+ invalidParams.Add(request.NewErrParamRequired("Key"))
+ }
+ if s.Key != nil && len(*s.Key) < 1 {
+ invalidParams.Add(request.NewErrParamMinLen("Key", 1))
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetBucket sets the Bucket field's value.
+func (s *PutObjectLegalHoldInput) SetBucket(v string) *PutObjectLegalHoldInput {
+ s.Bucket = &v
+ return s
+}
+
+func (s *PutObjectLegalHoldInput) getBucket() (v string) {
+ if s.Bucket == nil {
+ return v
+ }
+ return *s.Bucket
+}
+
+// SetKey sets the Key field's value.
+func (s *PutObjectLegalHoldInput) SetKey(v string) *PutObjectLegalHoldInput {
+ s.Key = &v
+ return s
+}
+
+// SetLegalHold sets the LegalHold field's value.
+func (s *PutObjectLegalHoldInput) SetLegalHold(v *ObjectLockLegalHold) *PutObjectLegalHoldInput {
+ s.LegalHold = v
+ return s
+}
+
+// SetRequestPayer sets the RequestPayer field's value.
+func (s *PutObjectLegalHoldInput) SetRequestPayer(v string) *PutObjectLegalHoldInput {
+ s.RequestPayer = &v
+ return s
+}
+
+// SetVersionId sets the VersionId field's value.
+func (s *PutObjectLegalHoldInput) SetVersionId(v string) *PutObjectLegalHoldInput {
+ s.VersionId = &v
+ return s
+}
+
+type PutObjectLegalHoldOutput struct {
+ _ struct{} `type:"structure"`
+
+ // If present, indicates that the requester was successfully charged for the
+ // request.
+ RequestCharged *string `location:"header" locationName:"x-amz-request-charged" type:"string" enum:"RequestCharged"`
+}
+
+// String returns the string representation
+func (s PutObjectLegalHoldOutput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s PutObjectLegalHoldOutput) GoString() string {
+ return s.String()
+}
+
+// SetRequestCharged sets the RequestCharged field's value.
+func (s *PutObjectLegalHoldOutput) SetRequestCharged(v string) *PutObjectLegalHoldOutput {
+ s.RequestCharged = &v
+ return s
+}
+
+type PutObjectLockConfigurationInput struct {
+ _ struct{} `locationName:"PutObjectLockConfigurationRequest" type:"structure" payload:"ObjectLockConfiguration"`
+
+ // The bucket whose object lock configuration you want to create or replace.
+ //
+ // Bucket is a required field
+ Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
+
+ // The object lock configuration that you want to apply to the specified bucket.
+ ObjectLockConfiguration *ObjectLockConfiguration `locationName:"ObjectLockConfiguration" type:"structure" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"`
+
+ // Confirms that the requester knows that she or he will be charged for the
+ // request. Bucket owners need not specify this parameter in their requests.
+ // Documentation on downloading objects from requester pays buckets can be found
+ // at http://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html
+ RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"`
+
+ // A token to allow Amazon S3 object lock to be enabled for an existing bucket.
+ Token *string `location:"header" locationName:"x-amz-bucket-object-lock-token" type:"string"`
+}
+
+// String returns the string representation
+func (s PutObjectLockConfigurationInput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s PutObjectLockConfigurationInput) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *PutObjectLockConfigurationInput) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "PutObjectLockConfigurationInput"}
+ if s.Bucket == nil {
+ invalidParams.Add(request.NewErrParamRequired("Bucket"))
+ }
+ if s.Bucket != nil && len(*s.Bucket) < 1 {
+ invalidParams.Add(request.NewErrParamMinLen("Bucket", 1))
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetBucket sets the Bucket field's value.
+func (s *PutObjectLockConfigurationInput) SetBucket(v string) *PutObjectLockConfigurationInput {
+ s.Bucket = &v
+ return s
+}
+
+func (s *PutObjectLockConfigurationInput) getBucket() (v string) {
+ if s.Bucket == nil {
+ return v
+ }
+ return *s.Bucket
+}
+
+// SetObjectLockConfiguration sets the ObjectLockConfiguration field's value.
+func (s *PutObjectLockConfigurationInput) SetObjectLockConfiguration(v *ObjectLockConfiguration) *PutObjectLockConfigurationInput {
+ s.ObjectLockConfiguration = v
+ return s
+}
+
+// SetRequestPayer sets the RequestPayer field's value.
+func (s *PutObjectLockConfigurationInput) SetRequestPayer(v string) *PutObjectLockConfigurationInput {
+ s.RequestPayer = &v
+ return s
+}
+
+// SetToken sets the Token field's value.
+func (s *PutObjectLockConfigurationInput) SetToken(v string) *PutObjectLockConfigurationInput {
+ s.Token = &v
+ return s
+}
+
+type PutObjectLockConfigurationOutput struct {
+ _ struct{} `type:"structure"`
+
+ // If present, indicates that the requester was successfully charged for the
+ // request.
+ RequestCharged *string `location:"header" locationName:"x-amz-request-charged" type:"string" enum:"RequestCharged"`
+}
+
+// String returns the string representation
+func (s PutObjectLockConfigurationOutput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s PutObjectLockConfigurationOutput) GoString() string {
+ return s.String()
+}
+
+// SetRequestCharged sets the RequestCharged field's value.
+func (s *PutObjectLockConfigurationOutput) SetRequestCharged(v string) *PutObjectLockConfigurationOutput {
+ s.RequestCharged = &v
+ return s
+}
+
+type PutObjectOutput struct {
+ _ struct{} `type:"structure"`
+
+ // Entity tag for the uploaded object.
+ ETag *string `location:"header" locationName:"ETag" type:"string"`
+
+ // If the object expiration is configured, this will contain the expiration
+ // date (expiry-date) and rule ID (rule-id). The value of rule-id is URL encoded.
+ Expiration *string `location:"header" locationName:"x-amz-expiration" type:"string"`
+
+ // If present, indicates that the requester was successfully charged for the
+ // request.
+ RequestCharged *string `location:"header" locationName:"x-amz-request-charged" type:"string" enum:"RequestCharged"`
+
+ // If server-side encryption with a customer-provided encryption key was requested,
+ // the response will include this header confirming the encryption algorithm
+ // used.
+ SSECustomerAlgorithm *string `location:"header" locationName:"x-amz-server-side-encryption-customer-algorithm" type:"string"`
+
+ // If server-side encryption with a customer-provided encryption key was requested,
+ // the response will include this header to provide round trip message integrity
+ // verification of the customer-provided encryption key.
+ SSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key-MD5" type:"string"`
+
+ // If present, specifies the AWS KMS Encryption Context to use for object encryption.
+ // The value of this header is a base64-encoded UTF-8 string holding JSON with
+ // the encryption context key-value pairs.
+ SSEKMSEncryptionContext *string `location:"header" locationName:"x-amz-server-side-encryption-context" type:"string" sensitive:"true"`
+
+ // If present, specifies the ID of the AWS Key Management Service (KMS) master
+ // encryption key that was used for the object.
+ SSEKMSKeyId *string `location:"header" locationName:"x-amz-server-side-encryption-aws-kms-key-id" type:"string" sensitive:"true"`
+
+ // The Server-side encryption algorithm used when storing this object in S3
+ // (e.g., AES256, aws:kms).
+ ServerSideEncryption *string `location:"header" locationName:"x-amz-server-side-encryption" type:"string" enum:"ServerSideEncryption"`
+
+ // Version of the object.
+ VersionId *string `location:"header" locationName:"x-amz-version-id" type:"string"`
+}
+
+// String returns the string representation
+func (s PutObjectOutput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s PutObjectOutput) GoString() string {
+ return s.String()
+}
+
+// SetETag sets the ETag field's value.
+func (s *PutObjectOutput) SetETag(v string) *PutObjectOutput {
+ s.ETag = &v
+ return s
+}
+
+// SetExpiration sets the Expiration field's value.
+func (s *PutObjectOutput) SetExpiration(v string) *PutObjectOutput {
+ s.Expiration = &v
+ return s
+}
+
+// SetRequestCharged sets the RequestCharged field's value.
+func (s *PutObjectOutput) SetRequestCharged(v string) *PutObjectOutput {
+ s.RequestCharged = &v
+ return s
+}
+
+// SetSSECustomerAlgorithm sets the SSECustomerAlgorithm field's value.
+func (s *PutObjectOutput) SetSSECustomerAlgorithm(v string) *PutObjectOutput {
+ s.SSECustomerAlgorithm = &v
+ return s
+}
+
+// SetSSECustomerKeyMD5 sets the SSECustomerKeyMD5 field's value.
+func (s *PutObjectOutput) SetSSECustomerKeyMD5(v string) *PutObjectOutput {
+ s.SSECustomerKeyMD5 = &v
+ return s
+}
+
+// SetSSEKMSEncryptionContext sets the SSEKMSEncryptionContext field's value.
+func (s *PutObjectOutput) SetSSEKMSEncryptionContext(v string) *PutObjectOutput {
+ s.SSEKMSEncryptionContext = &v
+ return s
+}
+
+// SetSSEKMSKeyId sets the SSEKMSKeyId field's value.
+func (s *PutObjectOutput) SetSSEKMSKeyId(v string) *PutObjectOutput {
+ s.SSEKMSKeyId = &v
+ return s
+}
+
+// SetServerSideEncryption sets the ServerSideEncryption field's value.
+func (s *PutObjectOutput) SetServerSideEncryption(v string) *PutObjectOutput {
+ s.ServerSideEncryption = &v
+ return s
+}
+
+// SetVersionId sets the VersionId field's value.
+func (s *PutObjectOutput) SetVersionId(v string) *PutObjectOutput {
+ s.VersionId = &v
+ return s
+}
+
+type PutObjectRetentionInput struct {
+ _ struct{} `locationName:"PutObjectRetentionRequest" type:"structure" payload:"Retention"`
+
+ // The bucket that contains the object you want to apply this Object Retention
+ // configuration to.
+ //
+ // Bucket is a required field
+ Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
+
+ // Indicates whether this operation should bypass Governance-mode restrictions.j
+ BypassGovernanceRetention *bool `location:"header" locationName:"x-amz-bypass-governance-retention" type:"boolean"`
+
+ // The key name for the object that you want to apply this Object Retention
+ // configuration to.
+ //
+ // Key is a required field
+ Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"`
+
+ // Confirms that the requester knows that she or he will be charged for the
+ // request. Bucket owners need not specify this parameter in their requests.
+ // Documentation on downloading objects from requester pays buckets can be found
+ // at http://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html
+ RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"`
+
+ // The container element for the Object Retention configuration.
+ Retention *ObjectLockRetention `locationName:"Retention" type:"structure" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"`
+
+ // The version ID for the object that you want to apply this Object Retention
+ // configuration to.
+ VersionId *string `location:"querystring" locationName:"versionId" type:"string"`
+}
+
+// String returns the string representation
+func (s PutObjectRetentionInput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s PutObjectRetentionInput) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *PutObjectRetentionInput) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "PutObjectRetentionInput"}
+ if s.Bucket == nil {
+ invalidParams.Add(request.NewErrParamRequired("Bucket"))
+ }
+ if s.Bucket != nil && len(*s.Bucket) < 1 {
+ invalidParams.Add(request.NewErrParamMinLen("Bucket", 1))
+ }
+ if s.Key == nil {
+ invalidParams.Add(request.NewErrParamRequired("Key"))
+ }
+ if s.Key != nil && len(*s.Key) < 1 {
+ invalidParams.Add(request.NewErrParamMinLen("Key", 1))
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetBucket sets the Bucket field's value.
+func (s *PutObjectRetentionInput) SetBucket(v string) *PutObjectRetentionInput {
+ s.Bucket = &v
+ return s
+}
+
+func (s *PutObjectRetentionInput) getBucket() (v string) {
+ if s.Bucket == nil {
+ return v
+ }
+ return *s.Bucket
+}
+
+// SetBypassGovernanceRetention sets the BypassGovernanceRetention field's value.
+func (s *PutObjectRetentionInput) SetBypassGovernanceRetention(v bool) *PutObjectRetentionInput {
+ s.BypassGovernanceRetention = &v
+ return s
+}
+
+// SetKey sets the Key field's value.
+func (s *PutObjectRetentionInput) SetKey(v string) *PutObjectRetentionInput {
+ s.Key = &v
+ return s
+}
+
+// SetRequestPayer sets the RequestPayer field's value.
+func (s *PutObjectRetentionInput) SetRequestPayer(v string) *PutObjectRetentionInput {
+ s.RequestPayer = &v
+ return s
+}
+
+// SetRetention sets the Retention field's value.
+func (s *PutObjectRetentionInput) SetRetention(v *ObjectLockRetention) *PutObjectRetentionInput {
+ s.Retention = v
+ return s
+}
+
+// SetVersionId sets the VersionId field's value.
+func (s *PutObjectRetentionInput) SetVersionId(v string) *PutObjectRetentionInput {
+ s.VersionId = &v
+ return s
+}
+
+type PutObjectRetentionOutput struct {
+ _ struct{} `type:"structure"`
+
+ // If present, indicates that the requester was successfully charged for the
+ // request.
+ RequestCharged *string `location:"header" locationName:"x-amz-request-charged" type:"string" enum:"RequestCharged"`
+}
+
+// String returns the string representation
+func (s PutObjectRetentionOutput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s PutObjectRetentionOutput) GoString() string {
+ return s.String()
+}
+
+// SetRequestCharged sets the RequestCharged field's value.
+func (s *PutObjectRetentionOutput) SetRequestCharged(v string) *PutObjectRetentionOutput {
+ s.RequestCharged = &v
+ return s
+}
+
+type PutObjectTaggingInput struct {
+ _ struct{} `locationName:"PutObjectTaggingRequest" type:"structure" payload:"Tagging"`
+
+ // Bucket is a required field
+ Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
+
+ // Key is a required field
+ Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"`
+
+ // Tagging is a required field
+ Tagging *Tagging `locationName:"Tagging" type:"structure" required:"true" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"`
+
+ VersionId *string `location:"querystring" locationName:"versionId" type:"string"`
+}
+
+// String returns the string representation
+func (s PutObjectTaggingInput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s PutObjectTaggingInput) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *PutObjectTaggingInput) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "PutObjectTaggingInput"}
+ if s.Bucket == nil {
+ invalidParams.Add(request.NewErrParamRequired("Bucket"))
+ }
+ if s.Bucket != nil && len(*s.Bucket) < 1 {
+ invalidParams.Add(request.NewErrParamMinLen("Bucket", 1))
+ }
+ if s.Key == nil {
+ invalidParams.Add(request.NewErrParamRequired("Key"))
+ }
+ if s.Key != nil && len(*s.Key) < 1 {
+ invalidParams.Add(request.NewErrParamMinLen("Key", 1))
+ }
+ if s.Tagging == nil {
+ invalidParams.Add(request.NewErrParamRequired("Tagging"))
+ }
+ if s.Tagging != nil {
+ if err := s.Tagging.Validate(); err != nil {
+ invalidParams.AddNested("Tagging", err.(request.ErrInvalidParams))
+ }
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetBucket sets the Bucket field's value.
+func (s *PutObjectTaggingInput) SetBucket(v string) *PutObjectTaggingInput {
+ s.Bucket = &v
+ return s
+}
+
+func (s *PutObjectTaggingInput) getBucket() (v string) {
+ if s.Bucket == nil {
+ return v
+ }
+ return *s.Bucket
+}
+
+// SetKey sets the Key field's value.
+func (s *PutObjectTaggingInput) SetKey(v string) *PutObjectTaggingInput {
+ s.Key = &v
+ return s
+}
+
+// SetTagging sets the Tagging field's value.
+func (s *PutObjectTaggingInput) SetTagging(v *Tagging) *PutObjectTaggingInput {
+ s.Tagging = v
+ return s
+}
+
+// SetVersionId sets the VersionId field's value.
+func (s *PutObjectTaggingInput) SetVersionId(v string) *PutObjectTaggingInput {
+ s.VersionId = &v
+ return s
+}
+
+type PutObjectTaggingOutput struct {
+ _ struct{} `type:"structure"`
+
+ VersionId *string `location:"header" locationName:"x-amz-version-id" type:"string"`
+}
+
+// String returns the string representation
+func (s PutObjectTaggingOutput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s PutObjectTaggingOutput) GoString() string {
+ return s.String()
+}
+
+// SetVersionId sets the VersionId field's value.
+func (s *PutObjectTaggingOutput) SetVersionId(v string) *PutObjectTaggingOutput {
+ s.VersionId = &v
+ return s
+}
+
+type PutPublicAccessBlockInput struct {
+ _ struct{} `locationName:"PutPublicAccessBlockRequest" type:"structure" payload:"PublicAccessBlockConfiguration"`
+
+ // The name of the Amazon S3 bucket whose PublicAccessBlock configuration you
+ // want to set.
+ //
+ // Bucket is a required field
+ Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
+
+ // The PublicAccessBlock configuration that you want to apply to this Amazon
+ // S3 bucket. You can enable the configuration options in any combination. For
+ // more information about when Amazon S3 considers a bucket or object public,
+ // see The Meaning of "Public" (https://docs.aws.amazon.com/AmazonS3/latest/dev/access-control-block-public-access.html#access-control-block-public-access-policy-status)
+ // in the Amazon Simple Storage Service Developer Guide.
+ //
+ // PublicAccessBlockConfiguration is a required field
+ PublicAccessBlockConfiguration *PublicAccessBlockConfiguration `locationName:"PublicAccessBlockConfiguration" type:"structure" required:"true" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"`
+}
+
+// String returns the string representation
+func (s PutPublicAccessBlockInput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s PutPublicAccessBlockInput) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *PutPublicAccessBlockInput) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "PutPublicAccessBlockInput"}
+ if s.Bucket == nil {
+ invalidParams.Add(request.NewErrParamRequired("Bucket"))
+ }
+ if s.Bucket != nil && len(*s.Bucket) < 1 {
+ invalidParams.Add(request.NewErrParamMinLen("Bucket", 1))
+ }
+ if s.PublicAccessBlockConfiguration == nil {
+ invalidParams.Add(request.NewErrParamRequired("PublicAccessBlockConfiguration"))
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetBucket sets the Bucket field's value.
+func (s *PutPublicAccessBlockInput) SetBucket(v string) *PutPublicAccessBlockInput {
+ s.Bucket = &v
+ return s
+}
+
+func (s *PutPublicAccessBlockInput) getBucket() (v string) {
+ if s.Bucket == nil {
+ return v
+ }
+ return *s.Bucket
+}
+
+// SetPublicAccessBlockConfiguration sets the PublicAccessBlockConfiguration field's value.
+func (s *PutPublicAccessBlockInput) SetPublicAccessBlockConfiguration(v *PublicAccessBlockConfiguration) *PutPublicAccessBlockInput {
+ s.PublicAccessBlockConfiguration = v
+ return s
+}
+
+type PutPublicAccessBlockOutput struct {
+ _ struct{} `type:"structure"`
+}
+
+// String returns the string representation
+func (s PutPublicAccessBlockOutput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s PutPublicAccessBlockOutput) GoString() string {
+ return s.String()
+}
+
+// Specifies the configuration for publishing messages to an Amazon Simple Queue
+// Service (Amazon SQS) queue when Amazon S3 detects specified events.
+type QueueConfiguration struct {
+ _ struct{} `type:"structure"`
+
+ // Events is a required field
+ Events []*string `locationName:"Event" type:"list" flattened:"true" required:"true"`
+
+ // Specifies object key name filtering rules. For information about key name
+ // filtering, see Configuring Event Notifications (https://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html)
+ // in the Amazon Simple Storage Service Developer Guide.
+ Filter *NotificationConfigurationFilter `type:"structure"`
+
+ // An optional unique identifier for configurations in a notification configuration.
+ // If you don't provide one, Amazon S3 will assign an ID.
+ Id *string `type:"string"`
+
+ // The Amazon Resource Name (ARN) of the Amazon SQS queue to which Amazon S3
+ // publishes a message when it detects events of the specified type.
+ //
+ // QueueArn is a required field
+ QueueArn *string `locationName:"Queue" type:"string" required:"true"`
+}
+
+// String returns the string representation
+func (s QueueConfiguration) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s QueueConfiguration) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *QueueConfiguration) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "QueueConfiguration"}
+ if s.Events == nil {
+ invalidParams.Add(request.NewErrParamRequired("Events"))
+ }
+ if s.QueueArn == nil {
+ invalidParams.Add(request.NewErrParamRequired("QueueArn"))
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetEvents sets the Events field's value.
+func (s *QueueConfiguration) SetEvents(v []*string) *QueueConfiguration {
+ s.Events = v
+ return s
+}
+
+// SetFilter sets the Filter field's value.
+func (s *QueueConfiguration) SetFilter(v *NotificationConfigurationFilter) *QueueConfiguration {
+ s.Filter = v
+ return s
+}
+
+// SetId sets the Id field's value.
+func (s *QueueConfiguration) SetId(v string) *QueueConfiguration {
+ s.Id = &v
+ return s
+}
+
+// SetQueueArn sets the QueueArn field's value.
+func (s *QueueConfiguration) SetQueueArn(v string) *QueueConfiguration {
+ s.QueueArn = &v
+ return s
+}
+
+type QueueConfigurationDeprecated struct {
+ _ struct{} `type:"structure"`
+
+ // The bucket event for which to send notifications.
+ //
+ // Deprecated: Event has been deprecated
+ Event *string `deprecated:"true" type:"string" enum:"Event"`
+
+ Events []*string `locationName:"Event" type:"list" flattened:"true"`
+
+ // An optional unique identifier for configurations in a notification configuration.
+ // If you don't provide one, Amazon S3 will assign an ID.
+ Id *string `type:"string"`
+
+ Queue *string `type:"string"`
+}
+
+// String returns the string representation
+func (s QueueConfigurationDeprecated) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s QueueConfigurationDeprecated) GoString() string {
+ return s.String()
+}
+
+// SetEvent sets the Event field's value.
+func (s *QueueConfigurationDeprecated) SetEvent(v string) *QueueConfigurationDeprecated {
+ s.Event = &v
+ return s
+}
+
+// SetEvents sets the Events field's value.
+func (s *QueueConfigurationDeprecated) SetEvents(v []*string) *QueueConfigurationDeprecated {
+ s.Events = v
+ return s
+}
+
+// SetId sets the Id field's value.
+func (s *QueueConfigurationDeprecated) SetId(v string) *QueueConfigurationDeprecated {
+ s.Id = &v
+ return s
+}
+
+// SetQueue sets the Queue field's value.
+func (s *QueueConfigurationDeprecated) SetQueue(v string) *QueueConfigurationDeprecated {
+ s.Queue = &v
+ return s
+}
+
+type RecordsEvent struct {
+ _ struct{} `locationName:"RecordsEvent" type:"structure" payload:"Payload"`
+
+ // The byte array of partial, one or more result records.
+ //
+ // Payload is automatically base64 encoded/decoded by the SDK.
+ Payload []byte `type:"blob"`
+}
+
+// String returns the string representation
+func (s RecordsEvent) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s RecordsEvent) GoString() string {
+ return s.String()
+}
+
+// SetPayload sets the Payload field's value.
+func (s *RecordsEvent) SetPayload(v []byte) *RecordsEvent {
+ s.Payload = v
+ return s
+}
+
+// The RecordsEvent is and event in the SelectObjectContentEventStream group of events.
+func (s *RecordsEvent) eventSelectObjectContentEventStream() {}
+
+// UnmarshalEvent unmarshals the EventStream Message into the RecordsEvent value.
+// This method is only used internally within the SDK's EventStream handling.
+func (s *RecordsEvent) UnmarshalEvent(
+ payloadUnmarshaler protocol.PayloadUnmarshaler,
+ msg eventstream.Message,
+) error {
+ s.Payload = make([]byte, len(msg.Payload))
+ copy(s.Payload, msg.Payload)
+ return nil
+}
+
+// Specifies how requests are redirected. In the event of an error, you can
+// specify a different error code to return.
+type Redirect struct {
+ _ struct{} `type:"structure"`
+
+ // The host name to use in the redirect request.
+ HostName *string `type:"string"`
+
+ // The HTTP redirect code to use on the response. Not required if one of the
+ // siblings is present.
+ HttpRedirectCode *string `type:"string"`
+
+ // Protocol to use when redirecting requests. The default is the protocol that
+ // is used in the original request.
+ Protocol *string `type:"string" enum:"Protocol"`
+
+ // The object key prefix to use in the redirect request. For example, to redirect
+ // requests for all pages with prefix docs/ (objects in the docs/ folder) to
+ // documents/, you can set a condition block with KeyPrefixEquals set to docs/
+ // and in the Redirect set ReplaceKeyPrefixWith to /documents. Not required
+ // if one of the siblings is present. Can be present only if ReplaceKeyWith
+ // is not provided.
+ ReplaceKeyPrefixWith *string `type:"string"`
+
+ // The specific object key to use in the redirect request. For example, redirect
+ // request to error.html. Not required if one of the siblings is present. Can
+ // be present only if ReplaceKeyPrefixWith is not provided.
+ ReplaceKeyWith *string `type:"string"`
+}
+
+// String returns the string representation
+func (s Redirect) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s Redirect) GoString() string {
+ return s.String()
+}
+
+// SetHostName sets the HostName field's value.
+func (s *Redirect) SetHostName(v string) *Redirect {
+ s.HostName = &v
+ return s
+}
+
+// SetHttpRedirectCode sets the HttpRedirectCode field's value.
+func (s *Redirect) SetHttpRedirectCode(v string) *Redirect {
+ s.HttpRedirectCode = &v
+ return s
+}
+
+// SetProtocol sets the Protocol field's value.
+func (s *Redirect) SetProtocol(v string) *Redirect {
+ s.Protocol = &v
+ return s
+}
+
+// SetReplaceKeyPrefixWith sets the ReplaceKeyPrefixWith field's value.
+func (s *Redirect) SetReplaceKeyPrefixWith(v string) *Redirect {
+ s.ReplaceKeyPrefixWith = &v
+ return s
+}
+
+// SetReplaceKeyWith sets the ReplaceKeyWith field's value.
+func (s *Redirect) SetReplaceKeyWith(v string) *Redirect {
+ s.ReplaceKeyWith = &v
+ return s
+}
+
+// Specifies the redirect behavior of all requests to a website endpoint of
+// an Amazon S3 bucket.
+type RedirectAllRequestsTo struct {
+ _ struct{} `type:"structure"`
+
+ // Name of the host where requests are redirected.
+ //
+ // HostName is a required field
+ HostName *string `type:"string" required:"true"`
+
+ // Protocol to use when redirecting requests. The default is the protocol that
+ // is used in the original request.
+ Protocol *string `type:"string" enum:"Protocol"`
+}
+
+// String returns the string representation
+func (s RedirectAllRequestsTo) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s RedirectAllRequestsTo) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *RedirectAllRequestsTo) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "RedirectAllRequestsTo"}
+ if s.HostName == nil {
+ invalidParams.Add(request.NewErrParamRequired("HostName"))
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetHostName sets the HostName field's value.
+func (s *RedirectAllRequestsTo) SetHostName(v string) *RedirectAllRequestsTo {
+ s.HostName = &v
+ return s
+}
+
+// SetProtocol sets the Protocol field's value.
+func (s *RedirectAllRequestsTo) SetProtocol(v string) *RedirectAllRequestsTo {
+ s.Protocol = &v
+ return s
+}
+
+// A container for replication rules. You can add up to 1,000 rules. The maximum
+// size of a replication configuration is 2 MB.
+type ReplicationConfiguration struct {
+ _ struct{} `type:"structure"`
+
+ // The Amazon Resource Name (ARN) of the AWS Identity and Access Management
+ // (IAM) role that Amazon S3 assumes when replicating objects. For more information,
+ // see How to Set Up Cross-Region Replication (https://docs.aws.amazon.com/AmazonS3/latest/dev/crr-how-setup.html)
+ // in the Amazon Simple Storage Service Developer Guide.
+ //
+ // Role is a required field
+ Role *string `type:"string" required:"true"`
+
+ // A container for one or more replication rules. A replication configuration
+ // must have at least one rule and can contain a maximum of 1,000 rules.
+ //
+ // Rules is a required field
+ Rules []*ReplicationRule `locationName:"Rule" type:"list" flattened:"true" required:"true"`
+}
+
+// String returns the string representation
+func (s ReplicationConfiguration) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s ReplicationConfiguration) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *ReplicationConfiguration) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "ReplicationConfiguration"}
+ if s.Role == nil {
+ invalidParams.Add(request.NewErrParamRequired("Role"))
+ }
+ if s.Rules == nil {
+ invalidParams.Add(request.NewErrParamRequired("Rules"))
+ }
+ if s.Rules != nil {
+ for i, v := range s.Rules {
+ if v == nil {
+ continue
+ }
+ if err := v.Validate(); err != nil {
+ invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Rules", i), err.(request.ErrInvalidParams))
+ }
+ }
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetRole sets the Role field's value.
+func (s *ReplicationConfiguration) SetRole(v string) *ReplicationConfiguration {
+ s.Role = &v
+ return s
+}
+
+// SetRules sets the Rules field's value.
+func (s *ReplicationConfiguration) SetRules(v []*ReplicationRule) *ReplicationConfiguration {
+ s.Rules = v
+ return s
+}
+
+// Specifies which Amazon S3 objects to replicate and where to store the replicas.
+type ReplicationRule struct {
+ _ struct{} `type:"structure"`
+
+ // Specifies whether Amazon S3 should replicate delete makers.
+ DeleteMarkerReplication *DeleteMarkerReplication `type:"structure"`
+
+ // A container for information about the replication destination.
+ //
+ // Destination is a required field
+ Destination *Destination `type:"structure" required:"true"`
+
+ // A filter that identifies the subset of objects to which the replication rule
+ // applies. A Filter must specify exactly one Prefix, Tag, or an And child element.
+ Filter *ReplicationRuleFilter `type:"structure"`
+
+ // A unique identifier for the rule. The maximum value is 255 characters.
+ ID *string `type:"string"`
+
+ // An object keyname prefix that identifies the object or objects to which the
+ // rule applies. The maximum prefix length is 1,024 characters. To include all
+ // objects in a bucket, specify an empty string.
+ //
+ // Deprecated: Prefix has been deprecated
+ Prefix *string `deprecated:"true" type:"string"`
+
+ // The priority associated with the rule. If you specify multiple rules in a
+ // replication configuration, Amazon S3 prioritizes the rules to prevent conflicts
+ // when filtering. If two or more rules identify the same object based on a
+ // specified filter, the rule with higher priority takes precedence. For example:
+ //
+ // * Same object quality prefix based filter criteria If prefixes you specified
+ // in multiple rules overlap
+ //
+ // * Same object qualify tag based filter criteria specified in multiple
+ // rules
+ //
+ // For more information, see Cross-Region Replication (CRR) (https://docs.aws.amazon.com/AmazonS3/latest/dev/crr.html)
+ // in the Amazon S3 Developer Guide.
+ Priority *int64 `type:"integer"`
+
+ // A container that describes additional filters for identifying the source
+ // objects that you want to replicate. You can choose to enable or disable the
+ // replication of these objects. Currently, Amazon S3 supports only the filter
+ // that you can specify for objects created with server-side encryption using
+ // an AWS KMS-Managed Key (SSE-KMS).
+ SourceSelectionCriteria *SourceSelectionCriteria `type:"structure"`
+
+ // Specifies whether the rule is enabled.
+ //
+ // Status is a required field
+ Status *string `type:"string" required:"true" enum:"ReplicationRuleStatus"`
+}
+
+// String returns the string representation
+func (s ReplicationRule) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s ReplicationRule) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *ReplicationRule) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "ReplicationRule"}
+ if s.Destination == nil {
+ invalidParams.Add(request.NewErrParamRequired("Destination"))
+ }
+ if s.Status == nil {
+ invalidParams.Add(request.NewErrParamRequired("Status"))
+ }
+ if s.Destination != nil {
+ if err := s.Destination.Validate(); err != nil {
+ invalidParams.AddNested("Destination", err.(request.ErrInvalidParams))
+ }
+ }
+ if s.Filter != nil {
+ if err := s.Filter.Validate(); err != nil {
+ invalidParams.AddNested("Filter", err.(request.ErrInvalidParams))
+ }
+ }
+ if s.SourceSelectionCriteria != nil {
+ if err := s.SourceSelectionCriteria.Validate(); err != nil {
+ invalidParams.AddNested("SourceSelectionCriteria", err.(request.ErrInvalidParams))
+ }
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetDeleteMarkerReplication sets the DeleteMarkerReplication field's value.
+func (s *ReplicationRule) SetDeleteMarkerReplication(v *DeleteMarkerReplication) *ReplicationRule {
+ s.DeleteMarkerReplication = v
+ return s
+}
+
+// SetDestination sets the Destination field's value.
+func (s *ReplicationRule) SetDestination(v *Destination) *ReplicationRule {
+ s.Destination = v
+ return s
+}
+
+// SetFilter sets the Filter field's value.
+func (s *ReplicationRule) SetFilter(v *ReplicationRuleFilter) *ReplicationRule {
+ s.Filter = v
+ return s
+}
+
+// SetID sets the ID field's value.
+func (s *ReplicationRule) SetID(v string) *ReplicationRule {
+ s.ID = &v
+ return s
+}
+
+// SetPrefix sets the Prefix field's value.
+func (s *ReplicationRule) SetPrefix(v string) *ReplicationRule {
+ s.Prefix = &v
+ return s
+}
+
+// SetPriority sets the Priority field's value.
+func (s *ReplicationRule) SetPriority(v int64) *ReplicationRule {
+ s.Priority = &v
+ return s
+}
+
+// SetSourceSelectionCriteria sets the SourceSelectionCriteria field's value.
+func (s *ReplicationRule) SetSourceSelectionCriteria(v *SourceSelectionCriteria) *ReplicationRule {
+ s.SourceSelectionCriteria = v
+ return s
+}
+
+// SetStatus sets the Status field's value.
+func (s *ReplicationRule) SetStatus(v string) *ReplicationRule {
+ s.Status = &v
+ return s
+}
+
+type ReplicationRuleAndOperator struct {
+ _ struct{} `type:"structure"`
+
+ Prefix *string `type:"string"`
+
+ Tags []*Tag `locationName:"Tag" locationNameList:"Tag" type:"list" flattened:"true"`
+}
+
+// String returns the string representation
+func (s ReplicationRuleAndOperator) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s ReplicationRuleAndOperator) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *ReplicationRuleAndOperator) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "ReplicationRuleAndOperator"}
+ if s.Tags != nil {
+ for i, v := range s.Tags {
+ if v == nil {
+ continue
+ }
+ if err := v.Validate(); err != nil {
+ invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Tags", i), err.(request.ErrInvalidParams))
+ }
+ }
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetPrefix sets the Prefix field's value.
+func (s *ReplicationRuleAndOperator) SetPrefix(v string) *ReplicationRuleAndOperator {
+ s.Prefix = &v
+ return s
+}
+
+// SetTags sets the Tags field's value.
+func (s *ReplicationRuleAndOperator) SetTags(v []*Tag) *ReplicationRuleAndOperator {
+ s.Tags = v
+ return s
+}
+
+// A filter that identifies the subset of objects to which the replication rule
+// applies. A Filter must specify exactly one Prefix, Tag, or an And child element.
+type ReplicationRuleFilter struct {
+ _ struct{} `type:"structure"`
+
+ // A container for specifying rule filters. The filters determine the subset
+ // of objects to which the rule applies. This element is required only if you
+ // specify more than one filter. For example:
+ //
+ // * If you specify both a Prefix and a Tag filter, wrap these filters in
+ // an And tag.
+ //
+ // * If you specify a filter based on multiple tags, wrap the Tag elements
+ // in an And tag.
+ And *ReplicationRuleAndOperator `type:"structure"`
+
+ // An object keyname prefix that identifies the subset of objects to which the
+ // rule applies.
+ Prefix *string `type:"string"`
+
+ // A container for specifying a tag key and value.
+ //
+ // The rule applies only to objects that have the tag in their tag set.
+ Tag *Tag `type:"structure"`
+}
+
+// String returns the string representation
+func (s ReplicationRuleFilter) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s ReplicationRuleFilter) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *ReplicationRuleFilter) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "ReplicationRuleFilter"}
+ if s.And != nil {
+ if err := s.And.Validate(); err != nil {
+ invalidParams.AddNested("And", err.(request.ErrInvalidParams))
+ }
+ }
+ if s.Tag != nil {
+ if err := s.Tag.Validate(); err != nil {
+ invalidParams.AddNested("Tag", err.(request.ErrInvalidParams))
+ }
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetAnd sets the And field's value.
+func (s *ReplicationRuleFilter) SetAnd(v *ReplicationRuleAndOperator) *ReplicationRuleFilter {
+ s.And = v
+ return s
+}
+
+// SetPrefix sets the Prefix field's value.
+func (s *ReplicationRuleFilter) SetPrefix(v string) *ReplicationRuleFilter {
+ s.Prefix = &v
+ return s
+}
+
+// SetTag sets the Tag field's value.
+func (s *ReplicationRuleFilter) SetTag(v *Tag) *ReplicationRuleFilter {
+ s.Tag = v
+ return s
+}
+
+type RequestPaymentConfiguration struct {
+ _ struct{} `type:"structure"`
+
+ // Specifies who pays for the download and request fees.
+ //
+ // Payer is a required field
+ Payer *string `type:"string" required:"true" enum:"Payer"`
+}
+
+// String returns the string representation
+func (s RequestPaymentConfiguration) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s RequestPaymentConfiguration) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *RequestPaymentConfiguration) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "RequestPaymentConfiguration"}
+ if s.Payer == nil {
+ invalidParams.Add(request.NewErrParamRequired("Payer"))
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetPayer sets the Payer field's value.
+func (s *RequestPaymentConfiguration) SetPayer(v string) *RequestPaymentConfiguration {
+ s.Payer = &v
+ return s
+}
+
+type RequestProgress struct {
+ _ struct{} `type:"structure"`
+
+ // Specifies whether periodic QueryProgress frames should be sent. Valid values:
+ // TRUE, FALSE. Default value: FALSE.
+ Enabled *bool `type:"boolean"`
+}
+
+// String returns the string representation
+func (s RequestProgress) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s RequestProgress) GoString() string {
+ return s.String()
+}
+
+// SetEnabled sets the Enabled field's value.
+func (s *RequestProgress) SetEnabled(v bool) *RequestProgress {
+ s.Enabled = &v
+ return s
+}
+
+type RestoreObjectInput struct {
+ _ struct{} `locationName:"RestoreObjectRequest" type:"structure" payload:"RestoreRequest"`
+
+ // Bucket is a required field
+ Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
+
+ // Key is a required field
+ Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"`
+
+ // Confirms that the requester knows that she or he will be charged for the
+ // request. Bucket owners need not specify this parameter in their requests.
+ // Documentation on downloading objects from requester pays buckets can be found
+ // at http://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html
+ RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"`
+
+ // Container for restore job parameters.
+ RestoreRequest *RestoreRequest `locationName:"RestoreRequest" type:"structure" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"`
+
+ VersionId *string `location:"querystring" locationName:"versionId" type:"string"`
+}
+
+// String returns the string representation
+func (s RestoreObjectInput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s RestoreObjectInput) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *RestoreObjectInput) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "RestoreObjectInput"}
+ if s.Bucket == nil {
+ invalidParams.Add(request.NewErrParamRequired("Bucket"))
+ }
+ if s.Bucket != nil && len(*s.Bucket) < 1 {
+ invalidParams.Add(request.NewErrParamMinLen("Bucket", 1))
+ }
+ if s.Key == nil {
+ invalidParams.Add(request.NewErrParamRequired("Key"))
+ }
+ if s.Key != nil && len(*s.Key) < 1 {
+ invalidParams.Add(request.NewErrParamMinLen("Key", 1))
+ }
+ if s.RestoreRequest != nil {
+ if err := s.RestoreRequest.Validate(); err != nil {
+ invalidParams.AddNested("RestoreRequest", err.(request.ErrInvalidParams))
+ }
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetBucket sets the Bucket field's value.
+func (s *RestoreObjectInput) SetBucket(v string) *RestoreObjectInput {
+ s.Bucket = &v
+ return s
+}
+
+func (s *RestoreObjectInput) getBucket() (v string) {
+ if s.Bucket == nil {
+ return v
+ }
+ return *s.Bucket
+}
+
+// SetKey sets the Key field's value.
+func (s *RestoreObjectInput) SetKey(v string) *RestoreObjectInput {
+ s.Key = &v
+ return s
+}
+
+// SetRequestPayer sets the RequestPayer field's value.
+func (s *RestoreObjectInput) SetRequestPayer(v string) *RestoreObjectInput {
+ s.RequestPayer = &v
+ return s
+}
+
+// SetRestoreRequest sets the RestoreRequest field's value.
+func (s *RestoreObjectInput) SetRestoreRequest(v *RestoreRequest) *RestoreObjectInput {
+ s.RestoreRequest = v
+ return s
+}
+
+// SetVersionId sets the VersionId field's value.
+func (s *RestoreObjectInput) SetVersionId(v string) *RestoreObjectInput {
+ s.VersionId = &v
+ return s
+}
+
+type RestoreObjectOutput struct {
+ _ struct{} `type:"structure"`
+
+ // If present, indicates that the requester was successfully charged for the
+ // request.
+ RequestCharged *string `location:"header" locationName:"x-amz-request-charged" type:"string" enum:"RequestCharged"`
+
+ // Indicates the path in the provided S3 output location where Select results
+ // will be restored to.
+ RestoreOutputPath *string `location:"header" locationName:"x-amz-restore-output-path" type:"string"`
+}
+
+// String returns the string representation
+func (s RestoreObjectOutput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s RestoreObjectOutput) GoString() string {
+ return s.String()
+}
+
+// SetRequestCharged sets the RequestCharged field's value.
+func (s *RestoreObjectOutput) SetRequestCharged(v string) *RestoreObjectOutput {
+ s.RequestCharged = &v
+ return s
+}
+
+// SetRestoreOutputPath sets the RestoreOutputPath field's value.
+func (s *RestoreObjectOutput) SetRestoreOutputPath(v string) *RestoreObjectOutput {
+ s.RestoreOutputPath = &v
+ return s
+}
+
+// Container for restore job parameters.
+type RestoreRequest struct {
+ _ struct{} `type:"structure"`
+
+ // Lifetime of the active copy in days. Do not use with restores that specify
+ // OutputLocation.
+ Days *int64 `type:"integer"`
+
+ // The optional description for the job.
+ Description *string `type:"string"`
+
+ // Glacier related parameters pertaining to this job. Do not use with restores
+ // that specify OutputLocation.
+ GlacierJobParameters *GlacierJobParameters `type:"structure"`
+
+ // Describes the location where the restore job's output is stored.
+ OutputLocation *OutputLocation `type:"structure"`
+
+ // Describes the parameters for Select job types.
+ SelectParameters *SelectParameters `type:"structure"`
+
+ // Glacier retrieval tier at which the restore will be processed.
+ Tier *string `type:"string" enum:"Tier"`
+
+ // Type of restore request.
+ Type *string `type:"string" enum:"RestoreRequestType"`
+}
+
+// String returns the string representation
+func (s RestoreRequest) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s RestoreRequest) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *RestoreRequest) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "RestoreRequest"}
+ if s.GlacierJobParameters != nil {
+ if err := s.GlacierJobParameters.Validate(); err != nil {
+ invalidParams.AddNested("GlacierJobParameters", err.(request.ErrInvalidParams))
+ }
+ }
+ if s.OutputLocation != nil {
+ if err := s.OutputLocation.Validate(); err != nil {
+ invalidParams.AddNested("OutputLocation", err.(request.ErrInvalidParams))
+ }
+ }
+ if s.SelectParameters != nil {
+ if err := s.SelectParameters.Validate(); err != nil {
+ invalidParams.AddNested("SelectParameters", err.(request.ErrInvalidParams))
+ }
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetDays sets the Days field's value.
+func (s *RestoreRequest) SetDays(v int64) *RestoreRequest {
+ s.Days = &v
+ return s
+}
+
+// SetDescription sets the Description field's value.
+func (s *RestoreRequest) SetDescription(v string) *RestoreRequest {
+ s.Description = &v
+ return s
+}
+
+// SetGlacierJobParameters sets the GlacierJobParameters field's value.
+func (s *RestoreRequest) SetGlacierJobParameters(v *GlacierJobParameters) *RestoreRequest {
+ s.GlacierJobParameters = v
+ return s
+}
+
+// SetOutputLocation sets the OutputLocation field's value.
+func (s *RestoreRequest) SetOutputLocation(v *OutputLocation) *RestoreRequest {
+ s.OutputLocation = v
+ return s
+}
+
+// SetSelectParameters sets the SelectParameters field's value.
+func (s *RestoreRequest) SetSelectParameters(v *SelectParameters) *RestoreRequest {
+ s.SelectParameters = v
+ return s
+}
+
+// SetTier sets the Tier field's value.
+func (s *RestoreRequest) SetTier(v string) *RestoreRequest {
+ s.Tier = &v
+ return s
+}
+
+// SetType sets the Type field's value.
+func (s *RestoreRequest) SetType(v string) *RestoreRequest {
+ s.Type = &v
+ return s
+}
+
+// Specifies the redirect behavior and when a redirect is applied.
+type RoutingRule struct {
+ _ struct{} `type:"structure"`
+
+ // A container for describing a condition that must be met for the specified
+ // redirect to apply. For example, 1. If request is for pages in the /docs folder,
+ // redirect to the /documents folder. 2. If request results in HTTP error 4xx,
+ // redirect request to another host where you might process the error.
+ Condition *Condition `type:"structure"`
+
+ // Container for redirect information. You can redirect requests to another
+ // host, to another page, or with another protocol. In the event of an error,
+ // you can specify a different error code to return.
+ //
+ // Redirect is a required field
+ Redirect *Redirect `type:"structure" required:"true"`
+}
+
+// String returns the string representation
+func (s RoutingRule) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s RoutingRule) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *RoutingRule) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "RoutingRule"}
+ if s.Redirect == nil {
+ invalidParams.Add(request.NewErrParamRequired("Redirect"))
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetCondition sets the Condition field's value.
+func (s *RoutingRule) SetCondition(v *Condition) *RoutingRule {
+ s.Condition = v
+ return s
+}
+
+// SetRedirect sets the Redirect field's value.
+func (s *RoutingRule) SetRedirect(v *Redirect) *RoutingRule {
+ s.Redirect = v
+ return s
+}
+
+// Specifies lifecycle rules for an Amazon S3 bucket. For more information,
+// see PUT Bucket lifecycle (https://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketPUTlifecycle.html)
+// in the Amazon Simple Storage Service API Reference.
+type Rule struct {
+ _ struct{} `type:"structure"`
+
+ // Specifies the days since the initiation of an incomplete multipart upload
+ // that Amazon S3 will wait before permanently removing all parts of the upload.
+ // For more information, see Aborting Incomplete Multipart Uploads Using a Bucket
+ // Lifecycle Policy (https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuoverview.html#mpu-abort-incomplete-mpu-lifecycle-config)
+ // in the Amazon Simple Storage Service Developer Guide.
+ AbortIncompleteMultipartUpload *AbortIncompleteMultipartUpload `type:"structure"`
+
+ Expiration *LifecycleExpiration `type:"structure"`
+
+ // Unique identifier for the rule. The value can't be longer than 255 characters.
+ ID *string `type:"string"`
+
+ // Specifies when noncurrent object versions expire. Upon expiration, Amazon
+ // S3 permanently deletes the noncurrent object versions. You set this lifecycle
+ // configuration action on a bucket that has versioning enabled (or suspended)
+ // to request that Amazon S3 delete noncurrent object versions at a specific
+ // period in the object's lifetime.
+ NoncurrentVersionExpiration *NoncurrentVersionExpiration `type:"structure"`
+
+ // Container for the transition rule that describes when noncurrent objects
+ // transition to the STANDARD_IA, ONEZONE_IA, INTELLIGENT_TIERING, GLACIER,
+ // or DEEP_ARCHIVE storage class. If your bucket is versioning-enabled (or versioning
+ // is suspended), you can set this action to request that Amazon S3 transition
+ // noncurrent object versions to the STANDARD_IA, ONEZONE_IA, INTELLIGENT_TIERING,
+ // GLACIER, or DEEP_ARCHIVE storage class at a specific period in the object's
+ // lifetime.
+ NoncurrentVersionTransition *NoncurrentVersionTransition `type:"structure"`
+
+ // Object key prefix that identifies one or more objects to which this rule
+ // applies.
+ //
+ // Prefix is a required field
+ Prefix *string `type:"string" required:"true"`
+
+ // If Enabled, the rule is currently being applied. If Disabled, the rule is
+ // not currently being applied.
+ //
+ // Status is a required field
+ Status *string `type:"string" required:"true" enum:"ExpirationStatus"`
+
+ // Specifies when an object transitions to a specified storage class.
+ Transition *Transition `type:"structure"`
+}
+
+// String returns the string representation
+func (s Rule) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s Rule) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *Rule) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "Rule"}
+ if s.Prefix == nil {
+ invalidParams.Add(request.NewErrParamRequired("Prefix"))
+ }
+ if s.Status == nil {
+ invalidParams.Add(request.NewErrParamRequired("Status"))
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetAbortIncompleteMultipartUpload sets the AbortIncompleteMultipartUpload field's value.
+func (s *Rule) SetAbortIncompleteMultipartUpload(v *AbortIncompleteMultipartUpload) *Rule {
+ s.AbortIncompleteMultipartUpload = v
+ return s
+}
+
+// SetExpiration sets the Expiration field's value.
+func (s *Rule) SetExpiration(v *LifecycleExpiration) *Rule {
+ s.Expiration = v
+ return s
+}
+
+// SetID sets the ID field's value.
+func (s *Rule) SetID(v string) *Rule {
+ s.ID = &v
+ return s
+}
+
+// SetNoncurrentVersionExpiration sets the NoncurrentVersionExpiration field's value.
+func (s *Rule) SetNoncurrentVersionExpiration(v *NoncurrentVersionExpiration) *Rule {
+ s.NoncurrentVersionExpiration = v
+ return s
+}
+
+// SetNoncurrentVersionTransition sets the NoncurrentVersionTransition field's value.
+func (s *Rule) SetNoncurrentVersionTransition(v *NoncurrentVersionTransition) *Rule {
+ s.NoncurrentVersionTransition = v
+ return s
+}
+
+// SetPrefix sets the Prefix field's value.
+func (s *Rule) SetPrefix(v string) *Rule {
+ s.Prefix = &v
+ return s
+}
+
+// SetStatus sets the Status field's value.
+func (s *Rule) SetStatus(v string) *Rule {
+ s.Status = &v
+ return s
+}
+
+// SetTransition sets the Transition field's value.
+func (s *Rule) SetTransition(v *Transition) *Rule {
+ s.Transition = v
+ return s
+}
+
+// Specifies the use of SSE-KMS to encrypt delivered Inventory reports.
+type SSEKMS struct {
+ _ struct{} `locationName:"SSE-KMS" type:"structure"`
+
+ // Specifies the ID of the AWS Key Management Service (KMS) master encryption
+ // key to use for encrypting Inventory reports.
+ //
+ // KeyId is a required field
+ KeyId *string `type:"string" required:"true" sensitive:"true"`
+}
+
+// String returns the string representation
+func (s SSEKMS) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s SSEKMS) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *SSEKMS) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "SSEKMS"}
+ if s.KeyId == nil {
+ invalidParams.Add(request.NewErrParamRequired("KeyId"))
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetKeyId sets the KeyId field's value.
+func (s *SSEKMS) SetKeyId(v string) *SSEKMS {
+ s.KeyId = &v
+ return s
+}
+
+// Specifies the use of SSE-S3 to encrypt delivered Inventory reports.
+type SSES3 struct {
+ _ struct{} `locationName:"SSE-S3" type:"structure"`
+}
+
+// String returns the string representation
+func (s SSES3) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s SSES3) GoString() string {
+ return s.String()
+}
+
+// SelectObjectContentEventStream provides handling of EventStreams for
+// the SelectObjectContent API.
+//
+// Use this type to receive SelectObjectContentEventStream events. The events
+// can be read from the Events channel member.
+//
+// The events that can be received are:
+//
+// * ContinuationEvent
+// * EndEvent
+// * ProgressEvent
+// * RecordsEvent
+// * StatsEvent
+type SelectObjectContentEventStream struct {
+ // Reader is the EventStream reader for the SelectObjectContentEventStream
+ // events. This value is automatically set by the SDK when the API call is made
+ // Use this member when unit testing your code with the SDK to mock out the
+ // EventStream Reader.
+ //
+ // Must not be nil.
+ Reader SelectObjectContentEventStreamReader
+
+ // StreamCloser is the io.Closer for the EventStream connection. For HTTP
+ // EventStream this is the response Body. The stream will be closed when
+ // the Close method of the EventStream is called.
+ StreamCloser io.Closer
+}
+
+// Close closes the EventStream. This will also cause the Events channel to be
+// closed. You can use the closing of the Events channel to terminate your
+// application's read from the API's EventStream.
+//
+// Will close the underlying EventStream reader. For EventStream over HTTP
+// connection this will also close the HTTP connection.
+//
+// Close must be called when done using the EventStream API. Not calling Close
+// may result in resource leaks.
+func (es *SelectObjectContentEventStream) Close() (err error) {
+ es.Reader.Close()
+ return es.Err()
+}
+
+// Err returns any error that occurred while reading EventStream Events from
+// the service API's response. Returns nil if there were no errors.
+func (es *SelectObjectContentEventStream) Err() error {
+ if err := es.Reader.Err(); err != nil {
+ return err
+ }
+ es.StreamCloser.Close()
+
+ return nil
+}
+
+// Events returns a channel to read EventStream Events from the
+// SelectObjectContent API.
+//
+// These events are:
+//
+// * ContinuationEvent
+// * EndEvent
+// * ProgressEvent
+// * RecordsEvent
+// * StatsEvent
+func (es *SelectObjectContentEventStream) Events() <-chan SelectObjectContentEventStreamEvent {
+ return es.Reader.Events()
+}
+
+// SelectObjectContentEventStreamEvent groups together all EventStream
+// events read from the SelectObjectContent API.
+//
+// These events are:
+//
+// * ContinuationEvent
+// * EndEvent
+// * ProgressEvent
+// * RecordsEvent
+// * StatsEvent
+type SelectObjectContentEventStreamEvent interface {
+ eventSelectObjectContentEventStream()
+}
+
+// SelectObjectContentEventStreamReader provides the interface for reading EventStream
+// Events from the SelectObjectContent API. The
+// default implementation for this interface will be SelectObjectContentEventStream.
+//
+// The reader's Close method must allow multiple concurrent calls.
+//
+// These events are:
+//
+// * ContinuationEvent
+// * EndEvent
+// * ProgressEvent
+// * RecordsEvent
+// * StatsEvent
+type SelectObjectContentEventStreamReader interface {
+ // Returns a channel of events as they are read from the event stream.
+ Events() <-chan SelectObjectContentEventStreamEvent
+
+ // Close will close the underlying event stream reader. For event stream over
+ // HTTP this will also close the HTTP connection.
+ Close() error
+
+ // Returns any error that has occurred while reading from the event stream.
+ Err() error
+}
+
+type readSelectObjectContentEventStream struct {
+ eventReader *eventstreamapi.EventReader
+ stream chan SelectObjectContentEventStreamEvent
+ errVal atomic.Value
+
+ done chan struct{}
+ closeOnce sync.Once
+}
+
+func newReadSelectObjectContentEventStream(
+ reader io.ReadCloser,
+ unmarshalers request.HandlerList,
+ logger aws.Logger,
+ logLevel aws.LogLevelType,
+) *readSelectObjectContentEventStream {
+ r := &readSelectObjectContentEventStream{
+ stream: make(chan SelectObjectContentEventStreamEvent),
+ done: make(chan struct{}),
+ }
+
+ r.eventReader = eventstreamapi.NewEventReader(
+ reader,
+ protocol.HandlerPayloadUnmarshal{
+ Unmarshalers: unmarshalers,
+ },
+ r.unmarshalerForEventType,
+ )
+ r.eventReader.UseLogger(logger, logLevel)
+
+ return r
+}
+
+// Close will close the underlying event stream reader. For EventStream over
+// HTTP this will also close the HTTP connection.
+func (r *readSelectObjectContentEventStream) Close() error {
+ r.closeOnce.Do(r.safeClose)
+
+ return r.Err()
+}
+
+func (r *readSelectObjectContentEventStream) safeClose() {
+ close(r.done)
+ err := r.eventReader.Close()
+ if err != nil {
+ r.errVal.Store(err)
+ }
+}
+
+func (r *readSelectObjectContentEventStream) Err() error {
+ if v := r.errVal.Load(); v != nil {
+ return v.(error)
+ }
+
+ return nil
+}
+
+func (r *readSelectObjectContentEventStream) Events() <-chan SelectObjectContentEventStreamEvent {
+ return r.stream
+}
+
+func (r *readSelectObjectContentEventStream) readEventStream() {
+ defer close(r.stream)
+
+ for {
+ event, err := r.eventReader.ReadEvent()
+ if err != nil {
+ if err == io.EOF {
+ return
+ }
+ select {
+ case <-r.done:
+ // If closed already ignore the error
+ return
+ default:
+ }
+ r.errVal.Store(err)
+ return
+ }
+
+ select {
+ case r.stream <- event.(SelectObjectContentEventStreamEvent):
+ case <-r.done:
+ return
+ }
+ }
+}
+
+func (r *readSelectObjectContentEventStream) unmarshalerForEventType(
+ eventType string,
+) (eventstreamapi.Unmarshaler, error) {
+ switch eventType {
+ case "Cont":
+ return &ContinuationEvent{}, nil
+
+ case "End":
+ return &EndEvent{}, nil
+
+ case "Progress":
+ return &ProgressEvent{}, nil
+
+ case "Records":
+ return &RecordsEvent{}, nil
+
+ case "Stats":
+ return &StatsEvent{}, nil
+ default:
+ return nil, awserr.New(
+ request.ErrCodeSerialization,
+ fmt.Sprintf("unknown event type name, %s, for SelectObjectContentEventStream", eventType),
+ nil,
+ )
+ }
+}
+
+// Request to filter the contents of an Amazon S3 object based on a simple Structured
+// Query Language (SQL) statement. In the request, along with the SQL expression,
+// you must specify a data serialization format (JSON or CSV) of the object.
+// Amazon S3 uses this to parse object data into records. It returns only records
+// that match the specified SQL expression. You must also specify the data serialization
+// format for the response. For more information, see S3Select API Documentation
+// (https://docs.aws.amazon.com/AmazonS3/latest/API/RESTObjectSELECTContent.html).
+type SelectObjectContentInput struct {
+ _ struct{} `locationName:"SelectObjectContentRequest" type:"structure" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"`
+
+ // The S3 bucket.
+ //
+ // Bucket is a required field
+ Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
+
+ // The expression that is used to query the object.
+ //
+ // Expression is a required field
+ Expression *string `type:"string" required:"true"`
+
+ // The type of the provided expression (for example., SQL).
+ //
+ // ExpressionType is a required field
+ ExpressionType *string `type:"string" required:"true" enum:"ExpressionType"`
+
+ // Describes the format of the data in the object that is being queried.
+ //
+ // InputSerialization is a required field
+ InputSerialization *InputSerialization `type:"structure" required:"true"`
+
+ // The object key.
+ //
+ // Key is a required field
+ Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"`
+
+ // Describes the format of the data that you want Amazon S3 to return in response.
+ //
+ // OutputSerialization is a required field
+ OutputSerialization *OutputSerialization `type:"structure" required:"true"`
+
+ // Specifies if periodic request progress information should be enabled.
+ RequestProgress *RequestProgress `type:"structure"`
+
+ // The SSE Algorithm used to encrypt the object. For more information, see Server-Side
+ // Encryption (Using Customer-Provided Encryption Keys (https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html).
+ SSECustomerAlgorithm *string `location:"header" locationName:"x-amz-server-side-encryption-customer-algorithm" type:"string"`
+
+ // The SSE Customer Key. For more information, see Server-Side Encryption (Using
+ // Customer-Provided Encryption Keys (https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html).
+ SSECustomerKey *string `marshal-as:"blob" location:"header" locationName:"x-amz-server-side-encryption-customer-key" type:"string" sensitive:"true"`
+
+ // The SSE Customer Key MD5. For more information, see Server-Side Encryption
+ // (Using Customer-Provided Encryption Keys (https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html).
+ SSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key-MD5" type:"string"`
+}
+
+// String returns the string representation
+func (s SelectObjectContentInput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s SelectObjectContentInput) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *SelectObjectContentInput) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "SelectObjectContentInput"}
+ if s.Bucket == nil {
+ invalidParams.Add(request.NewErrParamRequired("Bucket"))
+ }
+ if s.Bucket != nil && len(*s.Bucket) < 1 {
+ invalidParams.Add(request.NewErrParamMinLen("Bucket", 1))
+ }
+ if s.Expression == nil {
+ invalidParams.Add(request.NewErrParamRequired("Expression"))
+ }
+ if s.ExpressionType == nil {
+ invalidParams.Add(request.NewErrParamRequired("ExpressionType"))
+ }
+ if s.InputSerialization == nil {
+ invalidParams.Add(request.NewErrParamRequired("InputSerialization"))
+ }
+ if s.Key == nil {
+ invalidParams.Add(request.NewErrParamRequired("Key"))
+ }
+ if s.Key != nil && len(*s.Key) < 1 {
+ invalidParams.Add(request.NewErrParamMinLen("Key", 1))
+ }
+ if s.OutputSerialization == nil {
+ invalidParams.Add(request.NewErrParamRequired("OutputSerialization"))
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetBucket sets the Bucket field's value.
+func (s *SelectObjectContentInput) SetBucket(v string) *SelectObjectContentInput {
+ s.Bucket = &v
+ return s
+}
+
+func (s *SelectObjectContentInput) getBucket() (v string) {
+ if s.Bucket == nil {
+ return v
+ }
+ return *s.Bucket
+}
+
+// SetExpression sets the Expression field's value.
+func (s *SelectObjectContentInput) SetExpression(v string) *SelectObjectContentInput {
+ s.Expression = &v
+ return s
+}
+
+// SetExpressionType sets the ExpressionType field's value.
+func (s *SelectObjectContentInput) SetExpressionType(v string) *SelectObjectContentInput {
+ s.ExpressionType = &v
+ return s
+}
+
+// SetInputSerialization sets the InputSerialization field's value.
+func (s *SelectObjectContentInput) SetInputSerialization(v *InputSerialization) *SelectObjectContentInput {
+ s.InputSerialization = v
+ return s
+}
+
+// SetKey sets the Key field's value.
+func (s *SelectObjectContentInput) SetKey(v string) *SelectObjectContentInput {
+ s.Key = &v
+ return s
+}
+
+// SetOutputSerialization sets the OutputSerialization field's value.
+func (s *SelectObjectContentInput) SetOutputSerialization(v *OutputSerialization) *SelectObjectContentInput {
+ s.OutputSerialization = v
+ return s
+}
+
+// SetRequestProgress sets the RequestProgress field's value.
+func (s *SelectObjectContentInput) SetRequestProgress(v *RequestProgress) *SelectObjectContentInput {
+ s.RequestProgress = v
+ return s
+}
+
+// SetSSECustomerAlgorithm sets the SSECustomerAlgorithm field's value.
+func (s *SelectObjectContentInput) SetSSECustomerAlgorithm(v string) *SelectObjectContentInput {
+ s.SSECustomerAlgorithm = &v
+ return s
+}
+
+// SetSSECustomerKey sets the SSECustomerKey field's value.
+func (s *SelectObjectContentInput) SetSSECustomerKey(v string) *SelectObjectContentInput {
+ s.SSECustomerKey = &v
+ return s
+}
+
+func (s *SelectObjectContentInput) getSSECustomerKey() (v string) {
+ if s.SSECustomerKey == nil {
+ return v
+ }
+ return *s.SSECustomerKey
+}
+
+// SetSSECustomerKeyMD5 sets the SSECustomerKeyMD5 field's value.
+func (s *SelectObjectContentInput) SetSSECustomerKeyMD5(v string) *SelectObjectContentInput {
+ s.SSECustomerKeyMD5 = &v
+ return s
+}
+
+type SelectObjectContentOutput struct {
+ _ struct{} `type:"structure" payload:"Payload"`
+
+ // Use EventStream to use the API's stream.
+ EventStream *SelectObjectContentEventStream `type:"structure"`
+}
+
+// String returns the string representation
+func (s SelectObjectContentOutput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s SelectObjectContentOutput) GoString() string {
+ return s.String()
+}
+
+// SetEventStream sets the EventStream field's value.
+func (s *SelectObjectContentOutput) SetEventStream(v *SelectObjectContentEventStream) *SelectObjectContentOutput {
+ s.EventStream = v
+ return s
+}
+
+func (s *SelectObjectContentOutput) runEventStreamLoop(r *request.Request) {
+ if r.Error != nil {
+ return
+ }
+ reader := newReadSelectObjectContentEventStream(
+ r.HTTPResponse.Body,
+ r.Handlers.UnmarshalStream,
+ r.Config.Logger,
+ r.Config.LogLevel.Value(),
+ )
+ go reader.readEventStream()
+
+ eventStream := &SelectObjectContentEventStream{
+ StreamCloser: r.HTTPResponse.Body,
+ Reader: reader,
+ }
+ s.EventStream = eventStream
+}
+
+// Describes the parameters for Select job types.
+type SelectParameters struct {
+ _ struct{} `type:"structure"`
+
+ // The expression that is used to query the object.
+ //
+ // Expression is a required field
+ Expression *string `type:"string" required:"true"`
+
+ // The type of the provided expression (e.g., SQL).
+ //
+ // ExpressionType is a required field
+ ExpressionType *string `type:"string" required:"true" enum:"ExpressionType"`
+
+ // Describes the serialization format of the object.
+ //
+ // InputSerialization is a required field
+ InputSerialization *InputSerialization `type:"structure" required:"true"`
+
+ // Describes how the results of the Select job are serialized.
+ //
+ // OutputSerialization is a required field
+ OutputSerialization *OutputSerialization `type:"structure" required:"true"`
+}
+
+// String returns the string representation
+func (s SelectParameters) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s SelectParameters) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *SelectParameters) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "SelectParameters"}
+ if s.Expression == nil {
+ invalidParams.Add(request.NewErrParamRequired("Expression"))
+ }
+ if s.ExpressionType == nil {
+ invalidParams.Add(request.NewErrParamRequired("ExpressionType"))
+ }
+ if s.InputSerialization == nil {
+ invalidParams.Add(request.NewErrParamRequired("InputSerialization"))
+ }
+ if s.OutputSerialization == nil {
+ invalidParams.Add(request.NewErrParamRequired("OutputSerialization"))
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetExpression sets the Expression field's value.
+func (s *SelectParameters) SetExpression(v string) *SelectParameters {
+ s.Expression = &v
+ return s
+}
+
+// SetExpressionType sets the ExpressionType field's value.
+func (s *SelectParameters) SetExpressionType(v string) *SelectParameters {
+ s.ExpressionType = &v
+ return s
+}
+
+// SetInputSerialization sets the InputSerialization field's value.
+func (s *SelectParameters) SetInputSerialization(v *InputSerialization) *SelectParameters {
+ s.InputSerialization = v
+ return s
+}
+
+// SetOutputSerialization sets the OutputSerialization field's value.
+func (s *SelectParameters) SetOutputSerialization(v *OutputSerialization) *SelectParameters {
+ s.OutputSerialization = v
+ return s
+}
+
+// Describes the default server-side encryption to apply to new objects in the
+// bucket. If a PUT Object request doesn't specify any server-side encryption,
+// this default encryption will be applied. For more information, see PUT Bucket
+// encryption (https://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketPUTencryption.html)
+// in the Amazon Simple Storage Service API Reference.
+type ServerSideEncryptionByDefault struct {
+ _ struct{} `type:"structure"`
+
+ // KMS master key ID to use for the default encryption. This parameter is allowed
+ // if and only if SSEAlgorithm is set to aws:kms.
+ KMSMasterKeyID *string `type:"string" sensitive:"true"`
+
+ // Server-side encryption algorithm to use for the default encryption.
+ //
+ // SSEAlgorithm is a required field
+ SSEAlgorithm *string `type:"string" required:"true" enum:"ServerSideEncryption"`
+}
+
+// String returns the string representation
+func (s ServerSideEncryptionByDefault) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s ServerSideEncryptionByDefault) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *ServerSideEncryptionByDefault) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "ServerSideEncryptionByDefault"}
+ if s.SSEAlgorithm == nil {
+ invalidParams.Add(request.NewErrParamRequired("SSEAlgorithm"))
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetKMSMasterKeyID sets the KMSMasterKeyID field's value.
+func (s *ServerSideEncryptionByDefault) SetKMSMasterKeyID(v string) *ServerSideEncryptionByDefault {
+ s.KMSMasterKeyID = &v
+ return s
+}
+
+// SetSSEAlgorithm sets the SSEAlgorithm field's value.
+func (s *ServerSideEncryptionByDefault) SetSSEAlgorithm(v string) *ServerSideEncryptionByDefault {
+ s.SSEAlgorithm = &v
+ return s
+}
+
+// Specifies the default server-side-encryption configuration.
+type ServerSideEncryptionConfiguration struct {
+ _ struct{} `type:"structure"`
+
+ // Container for information about a particular server-side encryption configuration
+ // rule.
+ //
+ // Rules is a required field
+ Rules []*ServerSideEncryptionRule `locationName:"Rule" type:"list" flattened:"true" required:"true"`
+}
+
+// String returns the string representation
+func (s ServerSideEncryptionConfiguration) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s ServerSideEncryptionConfiguration) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *ServerSideEncryptionConfiguration) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "ServerSideEncryptionConfiguration"}
+ if s.Rules == nil {
+ invalidParams.Add(request.NewErrParamRequired("Rules"))
+ }
+ if s.Rules != nil {
+ for i, v := range s.Rules {
+ if v == nil {
+ continue
+ }
+ if err := v.Validate(); err != nil {
+ invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Rules", i), err.(request.ErrInvalidParams))
+ }
+ }
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetRules sets the Rules field's value.
+func (s *ServerSideEncryptionConfiguration) SetRules(v []*ServerSideEncryptionRule) *ServerSideEncryptionConfiguration {
+ s.Rules = v
+ return s
+}
+
+// Specifies the default server-side encryption configuration.
+type ServerSideEncryptionRule struct {
+ _ struct{} `type:"structure"`
+
+ // Specifies the default server-side encryption to apply to new objects in the
+ // bucket. If a PUT Object request doesn't specify any server-side encryption,
+ // this default encryption will be applied.
+ ApplyServerSideEncryptionByDefault *ServerSideEncryptionByDefault `type:"structure"`
+}
+
+// String returns the string representation
+func (s ServerSideEncryptionRule) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s ServerSideEncryptionRule) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *ServerSideEncryptionRule) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "ServerSideEncryptionRule"}
+ if s.ApplyServerSideEncryptionByDefault != nil {
+ if err := s.ApplyServerSideEncryptionByDefault.Validate(); err != nil {
+ invalidParams.AddNested("ApplyServerSideEncryptionByDefault", err.(request.ErrInvalidParams))
+ }
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetApplyServerSideEncryptionByDefault sets the ApplyServerSideEncryptionByDefault field's value.
+func (s *ServerSideEncryptionRule) SetApplyServerSideEncryptionByDefault(v *ServerSideEncryptionByDefault) *ServerSideEncryptionRule {
+ s.ApplyServerSideEncryptionByDefault = v
+ return s
+}
+
+// A container that describes additional filters for identifying the source
+// objects that you want to replicate. You can choose to enable or disable the
+// replication of these objects. Currently, Amazon S3 supports only the filter
+// that you can specify for objects created with server-side encryption using
+// an AWS KMS-Managed Key (SSE-KMS).
+type SourceSelectionCriteria struct {
+ _ struct{} `type:"structure"`
+
+ // A container for filter information for the selection of Amazon S3 objects
+ // encrypted with AWS KMS. If you include SourceSelectionCriteria in the replication
+ // configuration, this element is required.
+ SseKmsEncryptedObjects *SseKmsEncryptedObjects `type:"structure"`
+}
+
+// String returns the string representation
+func (s SourceSelectionCriteria) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s SourceSelectionCriteria) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *SourceSelectionCriteria) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "SourceSelectionCriteria"}
+ if s.SseKmsEncryptedObjects != nil {
+ if err := s.SseKmsEncryptedObjects.Validate(); err != nil {
+ invalidParams.AddNested("SseKmsEncryptedObjects", err.(request.ErrInvalidParams))
+ }
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetSseKmsEncryptedObjects sets the SseKmsEncryptedObjects field's value.
+func (s *SourceSelectionCriteria) SetSseKmsEncryptedObjects(v *SseKmsEncryptedObjects) *SourceSelectionCriteria {
+ s.SseKmsEncryptedObjects = v
+ return s
+}
+
+// A container for filter information for the selection of S3 objects encrypted
+// with AWS KMS.
+type SseKmsEncryptedObjects struct {
+ _ struct{} `type:"structure"`
+
+ // Specifies whether Amazon S3 replicates objects created with server-side encryption
+ // using an AWS KMS-managed key.
+ //
+ // Status is a required field
+ Status *string `type:"string" required:"true" enum:"SseKmsEncryptedObjectsStatus"`
+}
+
+// String returns the string representation
+func (s SseKmsEncryptedObjects) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s SseKmsEncryptedObjects) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *SseKmsEncryptedObjects) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "SseKmsEncryptedObjects"}
+ if s.Status == nil {
+ invalidParams.Add(request.NewErrParamRequired("Status"))
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetStatus sets the Status field's value.
+func (s *SseKmsEncryptedObjects) SetStatus(v string) *SseKmsEncryptedObjects {
+ s.Status = &v
+ return s
+}
+
+type Stats struct {
+ _ struct{} `type:"structure"`
+
+ // The total number of uncompressed object bytes processed.
+ BytesProcessed *int64 `type:"long"`
+
+ // The total number of bytes of records payload data returned.
+ BytesReturned *int64 `type:"long"`
+
+ // The total number of object bytes scanned.
+ BytesScanned *int64 `type:"long"`
+}
+
+// String returns the string representation
+func (s Stats) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s Stats) GoString() string {
+ return s.String()
+}
+
+// SetBytesProcessed sets the BytesProcessed field's value.
+func (s *Stats) SetBytesProcessed(v int64) *Stats {
+ s.BytesProcessed = &v
+ return s
+}
+
+// SetBytesReturned sets the BytesReturned field's value.
+func (s *Stats) SetBytesReturned(v int64) *Stats {
+ s.BytesReturned = &v
+ return s
+}
+
+// SetBytesScanned sets the BytesScanned field's value.
+func (s *Stats) SetBytesScanned(v int64) *Stats {
+ s.BytesScanned = &v
+ return s
+}
+
+type StatsEvent struct {
+ _ struct{} `locationName:"StatsEvent" type:"structure" payload:"Details"`
+
+ // The Stats event details.
+ Details *Stats `locationName:"Details" type:"structure"`
+}
+
+// String returns the string representation
+func (s StatsEvent) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s StatsEvent) GoString() string {
+ return s.String()
+}
+
+// SetDetails sets the Details field's value.
+func (s *StatsEvent) SetDetails(v *Stats) *StatsEvent {
+ s.Details = v
+ return s
+}
+
+// The StatsEvent is and event in the SelectObjectContentEventStream group of events.
+func (s *StatsEvent) eventSelectObjectContentEventStream() {}
+
+// UnmarshalEvent unmarshals the EventStream Message into the StatsEvent value.
+// This method is only used internally within the SDK's EventStream handling.
+func (s *StatsEvent) UnmarshalEvent(
+ payloadUnmarshaler protocol.PayloadUnmarshaler,
+ msg eventstream.Message,
+) error {
+ if err := payloadUnmarshaler.UnmarshalPayload(
+ bytes.NewReader(msg.Payload), s,
+ ); err != nil {
+ return err
+ }
+ return nil
+}
+
+// Specifies data related to access patterns to be collected and made available
+// to analyze the tradeoffs between different storage classes for an Amazon
+// S3 bucket.
+type StorageClassAnalysis struct {
+ _ struct{} `type:"structure"`
+
+ // Specifies how data related to the storage class analysis for an Amazon S3
+ // bucket should be exported.
+ DataExport *StorageClassAnalysisDataExport `type:"structure"`
+}
+
+// String returns the string representation
+func (s StorageClassAnalysis) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s StorageClassAnalysis) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *StorageClassAnalysis) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "StorageClassAnalysis"}
+ if s.DataExport != nil {
+ if err := s.DataExport.Validate(); err != nil {
+ invalidParams.AddNested("DataExport", err.(request.ErrInvalidParams))
+ }
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetDataExport sets the DataExport field's value.
+func (s *StorageClassAnalysis) SetDataExport(v *StorageClassAnalysisDataExport) *StorageClassAnalysis {
+ s.DataExport = v
+ return s
+}
+
+type StorageClassAnalysisDataExport struct {
+ _ struct{} `type:"structure"`
+
+ // The place to store the data for an analysis.
+ //
+ // Destination is a required field
+ Destination *AnalyticsExportDestination `type:"structure" required:"true"`
+
+ // The version of the output schema to use when exporting data. Must be V_1.
+ //
+ // OutputSchemaVersion is a required field
+ OutputSchemaVersion *string `type:"string" required:"true" enum:"StorageClassAnalysisSchemaVersion"`
+}
+
+// String returns the string representation
+func (s StorageClassAnalysisDataExport) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s StorageClassAnalysisDataExport) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *StorageClassAnalysisDataExport) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "StorageClassAnalysisDataExport"}
+ if s.Destination == nil {
+ invalidParams.Add(request.NewErrParamRequired("Destination"))
+ }
+ if s.OutputSchemaVersion == nil {
+ invalidParams.Add(request.NewErrParamRequired("OutputSchemaVersion"))
+ }
+ if s.Destination != nil {
+ if err := s.Destination.Validate(); err != nil {
+ invalidParams.AddNested("Destination", err.(request.ErrInvalidParams))
+ }
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetDestination sets the Destination field's value.
+func (s *StorageClassAnalysisDataExport) SetDestination(v *AnalyticsExportDestination) *StorageClassAnalysisDataExport {
+ s.Destination = v
+ return s
+}
+
+// SetOutputSchemaVersion sets the OutputSchemaVersion field's value.
+func (s *StorageClassAnalysisDataExport) SetOutputSchemaVersion(v string) *StorageClassAnalysisDataExport {
+ s.OutputSchemaVersion = &v
+ return s
+}
+
+type Tag struct {
+ _ struct{} `type:"structure"`
+
+ // Name of the tag.
+ //
+ // Key is a required field
+ Key *string `min:"1" type:"string" required:"true"`
+
+ // Value of the tag.
+ //
+ // Value is a required field
+ Value *string `type:"string" required:"true"`
+}
+
+// String returns the string representation
+func (s Tag) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s Tag) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *Tag) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "Tag"}
+ if s.Key == nil {
+ invalidParams.Add(request.NewErrParamRequired("Key"))
+ }
+ if s.Key != nil && len(*s.Key) < 1 {
+ invalidParams.Add(request.NewErrParamMinLen("Key", 1))
+ }
+ if s.Value == nil {
+ invalidParams.Add(request.NewErrParamRequired("Value"))
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetKey sets the Key field's value.
+func (s *Tag) SetKey(v string) *Tag {
+ s.Key = &v
+ return s
+}
+
+// SetValue sets the Value field's value.
+func (s *Tag) SetValue(v string) *Tag {
+ s.Value = &v
+ return s
+}
+
+type Tagging struct {
+ _ struct{} `type:"structure"`
+
+ // TagSet is a required field
+ TagSet []*Tag `locationNameList:"Tag" type:"list" required:"true"`
+}
+
+// String returns the string representation
+func (s Tagging) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s Tagging) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *Tagging) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "Tagging"}
+ if s.TagSet == nil {
+ invalidParams.Add(request.NewErrParamRequired("TagSet"))
+ }
+ if s.TagSet != nil {
+ for i, v := range s.TagSet {
+ if v == nil {
+ continue
+ }
+ if err := v.Validate(); err != nil {
+ invalidParams.AddNested(fmt.Sprintf("%s[%v]", "TagSet", i), err.(request.ErrInvalidParams))
+ }
+ }
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetTagSet sets the TagSet field's value.
+func (s *Tagging) SetTagSet(v []*Tag) *Tagging {
+ s.TagSet = v
+ return s
+}
+
+type TargetGrant struct {
+ _ struct{} `type:"structure"`
+
+ Grantee *Grantee `type:"structure" xmlPrefix:"xsi" xmlURI:"http://www.w3.org/2001/XMLSchema-instance"`
+
+ // Logging permissions assigned to the Grantee for the bucket.
+ Permission *string `type:"string" enum:"BucketLogsPermission"`
+}
+
+// String returns the string representation
+func (s TargetGrant) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s TargetGrant) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *TargetGrant) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "TargetGrant"}
+ if s.Grantee != nil {
+ if err := s.Grantee.Validate(); err != nil {
+ invalidParams.AddNested("Grantee", err.(request.ErrInvalidParams))
+ }
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetGrantee sets the Grantee field's value.
+func (s *TargetGrant) SetGrantee(v *Grantee) *TargetGrant {
+ s.Grantee = v
+ return s
+}
+
+// SetPermission sets the Permission field's value.
+func (s *TargetGrant) SetPermission(v string) *TargetGrant {
+ s.Permission = &v
+ return s
+}
+
+// A container for specifying the configuration for publication of messages
+// to an Amazon Simple Notification Service (Amazon SNS) topic when Amazon S3
+// detects specified events.
+type TopicConfiguration struct {
+ _ struct{} `type:"structure"`
+
+ // The Amazon S3 bucket event about which to send notifications. For more information,
+ // see Supported Event Types (https://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html)
+ // in the Amazon Simple Storage Service Developer Guide.
+ //
+ // Events is a required field
+ Events []*string `locationName:"Event" type:"list" flattened:"true" required:"true"`
+
+ // Specifies object key name filtering rules. For information about key name
+ // filtering, see Configuring Event Notifications (https://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html)
+ // in the Amazon Simple Storage Service Developer Guide.
+ Filter *NotificationConfigurationFilter `type:"structure"`
+
+ // An optional unique identifier for configurations in a notification configuration.
+ // If you don't provide one, Amazon S3 will assign an ID.
+ Id *string `type:"string"`
+
+ // The Amazon Resource Name (ARN) of the Amazon SNS topic to which Amazon S3
+ // publishes a message when it detects events of the specified type.
+ //
+ // TopicArn is a required field
+ TopicArn *string `locationName:"Topic" type:"string" required:"true"`
+}
+
+// String returns the string representation
+func (s TopicConfiguration) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s TopicConfiguration) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *TopicConfiguration) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "TopicConfiguration"}
+ if s.Events == nil {
+ invalidParams.Add(request.NewErrParamRequired("Events"))
+ }
+ if s.TopicArn == nil {
+ invalidParams.Add(request.NewErrParamRequired("TopicArn"))
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetEvents sets the Events field's value.
+func (s *TopicConfiguration) SetEvents(v []*string) *TopicConfiguration {
+ s.Events = v
+ return s
+}
+
+// SetFilter sets the Filter field's value.
+func (s *TopicConfiguration) SetFilter(v *NotificationConfigurationFilter) *TopicConfiguration {
+ s.Filter = v
+ return s
+}
+
+// SetId sets the Id field's value.
+func (s *TopicConfiguration) SetId(v string) *TopicConfiguration {
+ s.Id = &v
+ return s
+}
+
+// SetTopicArn sets the TopicArn field's value.
+func (s *TopicConfiguration) SetTopicArn(v string) *TopicConfiguration {
+ s.TopicArn = &v
+ return s
+}
+
+type TopicConfigurationDeprecated struct {
+ _ struct{} `type:"structure"`
+
+ // Bucket event for which to send notifications.
+ //
+ // Deprecated: Event has been deprecated
+ Event *string `deprecated:"true" type:"string" enum:"Event"`
+
+ Events []*string `locationName:"Event" type:"list" flattened:"true"`
+
+ // An optional unique identifier for configurations in a notification configuration.
+ // If you don't provide one, Amazon S3 will assign an ID.
+ Id *string `type:"string"`
+
+ // Amazon SNS topic to which Amazon S3 will publish a message to report the
+ // specified events for the bucket.
+ Topic *string `type:"string"`
+}
+
+// String returns the string representation
+func (s TopicConfigurationDeprecated) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s TopicConfigurationDeprecated) GoString() string {
+ return s.String()
+}
+
+// SetEvent sets the Event field's value.
+func (s *TopicConfigurationDeprecated) SetEvent(v string) *TopicConfigurationDeprecated {
+ s.Event = &v
+ return s
+}
+
+// SetEvents sets the Events field's value.
+func (s *TopicConfigurationDeprecated) SetEvents(v []*string) *TopicConfigurationDeprecated {
+ s.Events = v
+ return s
+}
+
+// SetId sets the Id field's value.
+func (s *TopicConfigurationDeprecated) SetId(v string) *TopicConfigurationDeprecated {
+ s.Id = &v
+ return s
+}
+
+// SetTopic sets the Topic field's value.
+func (s *TopicConfigurationDeprecated) SetTopic(v string) *TopicConfigurationDeprecated {
+ s.Topic = &v
+ return s
+}
+
+// Specifies when an object transitions to a specified storage class.
+type Transition struct {
+ _ struct{} `type:"structure"`
+
+ // Indicates when objects are transitioned to the specified storage class. The
+ // date value must be in ISO 8601 format. The time is always midnight UTC.
+ Date *time.Time `type:"timestamp" timestampFormat:"iso8601"`
+
+ // Indicates the number of days after creation when objects are transitioned
+ // to the specified storage class. The value must be a positive integer.
+ Days *int64 `type:"integer"`
+
+ // The storage class to which you want the object to transition.
+ StorageClass *string `type:"string" enum:"TransitionStorageClass"`
+}
+
+// String returns the string representation
+func (s Transition) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s Transition) GoString() string {
+ return s.String()
+}
+
+// SetDate sets the Date field's value.
+func (s *Transition) SetDate(v time.Time) *Transition {
+ s.Date = &v
+ return s
+}
+
+// SetDays sets the Days field's value.
+func (s *Transition) SetDays(v int64) *Transition {
+ s.Days = &v
+ return s
+}
+
+// SetStorageClass sets the StorageClass field's value.
+func (s *Transition) SetStorageClass(v string) *Transition {
+ s.StorageClass = &v
+ return s
+}
+
+type UploadPartCopyInput struct {
+ _ struct{} `locationName:"UploadPartCopyRequest" type:"structure"`
+
+ // Bucket is a required field
+ Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
+
+ // The name of the source bucket and key name of the source object, separated
+ // by a slash (/). Must be URL-encoded.
+ //
+ // CopySource is a required field
+ CopySource *string `location:"header" locationName:"x-amz-copy-source" type:"string" required:"true"`
+
+ // Copies the object if its entity tag (ETag) matches the specified tag.
+ CopySourceIfMatch *string `location:"header" locationName:"x-amz-copy-source-if-match" type:"string"`
+
+ // Copies the object if it has been modified since the specified time.
+ CopySourceIfModifiedSince *time.Time `location:"header" locationName:"x-amz-copy-source-if-modified-since" type:"timestamp"`
+
+ // Copies the object if its entity tag (ETag) is different than the specified
+ // ETag.
+ CopySourceIfNoneMatch *string `location:"header" locationName:"x-amz-copy-source-if-none-match" type:"string"`
+
+ // Copies the object if it hasn't been modified since the specified time.
+ CopySourceIfUnmodifiedSince *time.Time `location:"header" locationName:"x-amz-copy-source-if-unmodified-since" type:"timestamp"`
+
+ // The range of bytes to copy from the source object. The range value must use
+ // the form bytes=first-last, where the first and last are the zero-based byte
+ // offsets to copy. For example, bytes=0-9 indicates that you want to copy the
+ // first ten bytes of the source. You can copy a range only if the source object
+ // is greater than 5 MB.
+ CopySourceRange *string `location:"header" locationName:"x-amz-copy-source-range" type:"string"`
+
+ // Specifies the algorithm to use when decrypting the source object (e.g., AES256).
+ CopySourceSSECustomerAlgorithm *string `location:"header" locationName:"x-amz-copy-source-server-side-encryption-customer-algorithm" type:"string"`
+
+ // Specifies the customer-provided encryption key for Amazon S3 to use to decrypt
+ // the source object. The encryption key provided in this header must be one
+ // that was used when the source object was created.
+ CopySourceSSECustomerKey *string `marshal-as:"blob" location:"header" locationName:"x-amz-copy-source-server-side-encryption-customer-key" type:"string" sensitive:"true"`
+
+ // Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321.
+ // Amazon S3 uses this header for a message integrity check to ensure the encryption
+ // key was transmitted without error.
+ CopySourceSSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-copy-source-server-side-encryption-customer-key-MD5" type:"string"`
+
+ // Key is a required field
+ Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"`
+
+ // Part number of part being copied. This is a positive integer between 1 and
+ // 10,000.
+ //
+ // PartNumber is a required field
+ PartNumber *int64 `location:"querystring" locationName:"partNumber" type:"integer" required:"true"`
+
+ // Confirms that the requester knows that she or he will be charged for the
+ // request. Bucket owners need not specify this parameter in their requests.
+ // Documentation on downloading objects from requester pays buckets can be found
+ // at http://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html
+ RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"`
+
+ // Specifies the algorithm to use to when encrypting the object (e.g., AES256).
+ SSECustomerAlgorithm *string `location:"header" locationName:"x-amz-server-side-encryption-customer-algorithm" type:"string"`
+
+ // Specifies the customer-provided encryption key for Amazon S3 to use in encrypting
+ // data. This value is used to store the object and then it is discarded; Amazon
+ // does not store the encryption key. The key must be appropriate for use with
+ // the algorithm specified in the x-amz-server-side​-encryption​-customer-algorithm
+ // header. This must be the same encryption key specified in the initiate multipart
+ // upload request.
+ SSECustomerKey *string `marshal-as:"blob" location:"header" locationName:"x-amz-server-side-encryption-customer-key" type:"string" sensitive:"true"`
+
+ // Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321.
+ // Amazon S3 uses this header for a message integrity check to ensure the encryption
+ // key was transmitted without error.
+ SSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key-MD5" type:"string"`
+
+ // Upload ID identifying the multipart upload whose part is being copied.
+ //
+ // UploadId is a required field
+ UploadId *string `location:"querystring" locationName:"uploadId" type:"string" required:"true"`
+}
+
+// String returns the string representation
+func (s UploadPartCopyInput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s UploadPartCopyInput) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *UploadPartCopyInput) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "UploadPartCopyInput"}
+ if s.Bucket == nil {
+ invalidParams.Add(request.NewErrParamRequired("Bucket"))
+ }
+ if s.Bucket != nil && len(*s.Bucket) < 1 {
+ invalidParams.Add(request.NewErrParamMinLen("Bucket", 1))
+ }
+ if s.CopySource == nil {
+ invalidParams.Add(request.NewErrParamRequired("CopySource"))
+ }
+ if s.Key == nil {
+ invalidParams.Add(request.NewErrParamRequired("Key"))
+ }
+ if s.Key != nil && len(*s.Key) < 1 {
+ invalidParams.Add(request.NewErrParamMinLen("Key", 1))
+ }
+ if s.PartNumber == nil {
+ invalidParams.Add(request.NewErrParamRequired("PartNumber"))
+ }
+ if s.UploadId == nil {
+ invalidParams.Add(request.NewErrParamRequired("UploadId"))
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetBucket sets the Bucket field's value.
+func (s *UploadPartCopyInput) SetBucket(v string) *UploadPartCopyInput {
+ s.Bucket = &v
+ return s
+}
+
+func (s *UploadPartCopyInput) getBucket() (v string) {
+ if s.Bucket == nil {
+ return v
+ }
+ return *s.Bucket
+}
+
+// SetCopySource sets the CopySource field's value.
+func (s *UploadPartCopyInput) SetCopySource(v string) *UploadPartCopyInput {
+ s.CopySource = &v
+ return s
+}
+
+// SetCopySourceIfMatch sets the CopySourceIfMatch field's value.
+func (s *UploadPartCopyInput) SetCopySourceIfMatch(v string) *UploadPartCopyInput {
+ s.CopySourceIfMatch = &v
+ return s
+}
+
+// SetCopySourceIfModifiedSince sets the CopySourceIfModifiedSince field's value.
+func (s *UploadPartCopyInput) SetCopySourceIfModifiedSince(v time.Time) *UploadPartCopyInput {
+ s.CopySourceIfModifiedSince = &v
+ return s
+}
+
+// SetCopySourceIfNoneMatch sets the CopySourceIfNoneMatch field's value.
+func (s *UploadPartCopyInput) SetCopySourceIfNoneMatch(v string) *UploadPartCopyInput {
+ s.CopySourceIfNoneMatch = &v
+ return s
+}
+
+// SetCopySourceIfUnmodifiedSince sets the CopySourceIfUnmodifiedSince field's value.
+func (s *UploadPartCopyInput) SetCopySourceIfUnmodifiedSince(v time.Time) *UploadPartCopyInput {
+ s.CopySourceIfUnmodifiedSince = &v
+ return s
+}
+
+// SetCopySourceRange sets the CopySourceRange field's value.
+func (s *UploadPartCopyInput) SetCopySourceRange(v string) *UploadPartCopyInput {
+ s.CopySourceRange = &v
+ return s
+}
+
+// SetCopySourceSSECustomerAlgorithm sets the CopySourceSSECustomerAlgorithm field's value.
+func (s *UploadPartCopyInput) SetCopySourceSSECustomerAlgorithm(v string) *UploadPartCopyInput {
+ s.CopySourceSSECustomerAlgorithm = &v
+ return s
+}
+
+// SetCopySourceSSECustomerKey sets the CopySourceSSECustomerKey field's value.
+func (s *UploadPartCopyInput) SetCopySourceSSECustomerKey(v string) *UploadPartCopyInput {
+ s.CopySourceSSECustomerKey = &v
+ return s
+}
+
+func (s *UploadPartCopyInput) getCopySourceSSECustomerKey() (v string) {
+ if s.CopySourceSSECustomerKey == nil {
+ return v
+ }
+ return *s.CopySourceSSECustomerKey
+}
+
+// SetCopySourceSSECustomerKeyMD5 sets the CopySourceSSECustomerKeyMD5 field's value.
+func (s *UploadPartCopyInput) SetCopySourceSSECustomerKeyMD5(v string) *UploadPartCopyInput {
+ s.CopySourceSSECustomerKeyMD5 = &v
+ return s
+}
+
+// SetKey sets the Key field's value.
+func (s *UploadPartCopyInput) SetKey(v string) *UploadPartCopyInput {
+ s.Key = &v
+ return s
+}
+
+// SetPartNumber sets the PartNumber field's value.
+func (s *UploadPartCopyInput) SetPartNumber(v int64) *UploadPartCopyInput {
+ s.PartNumber = &v
+ return s
+}
+
+// SetRequestPayer sets the RequestPayer field's value.
+func (s *UploadPartCopyInput) SetRequestPayer(v string) *UploadPartCopyInput {
+ s.RequestPayer = &v
+ return s
+}
+
+// SetSSECustomerAlgorithm sets the SSECustomerAlgorithm field's value.
+func (s *UploadPartCopyInput) SetSSECustomerAlgorithm(v string) *UploadPartCopyInput {
+ s.SSECustomerAlgorithm = &v
+ return s
+}
+
+// SetSSECustomerKey sets the SSECustomerKey field's value.
+func (s *UploadPartCopyInput) SetSSECustomerKey(v string) *UploadPartCopyInput {
+ s.SSECustomerKey = &v
+ return s
+}
+
+func (s *UploadPartCopyInput) getSSECustomerKey() (v string) {
+ if s.SSECustomerKey == nil {
+ return v
+ }
+ return *s.SSECustomerKey
+}
+
+// SetSSECustomerKeyMD5 sets the SSECustomerKeyMD5 field's value.
+func (s *UploadPartCopyInput) SetSSECustomerKeyMD5(v string) *UploadPartCopyInput {
+ s.SSECustomerKeyMD5 = &v
+ return s
+}
+
+// SetUploadId sets the UploadId field's value.
+func (s *UploadPartCopyInput) SetUploadId(v string) *UploadPartCopyInput {
+ s.UploadId = &v
+ return s
+}
+
+type UploadPartCopyOutput struct {
+ _ struct{} `type:"structure" payload:"CopyPartResult"`
+
+ CopyPartResult *CopyPartResult `type:"structure"`
+
+ // The version of the source object that was copied, if you have enabled versioning
+ // on the source bucket.
+ CopySourceVersionId *string `location:"header" locationName:"x-amz-copy-source-version-id" type:"string"`
+
+ // If present, indicates that the requester was successfully charged for the
+ // request.
+ RequestCharged *string `location:"header" locationName:"x-amz-request-charged" type:"string" enum:"RequestCharged"`
+
+ // If server-side encryption with a customer-provided encryption key was requested,
+ // the response will include this header confirming the encryption algorithm
+ // used.
+ SSECustomerAlgorithm *string `location:"header" locationName:"x-amz-server-side-encryption-customer-algorithm" type:"string"`
+
+ // If server-side encryption with a customer-provided encryption key was requested,
+ // the response will include this header to provide round trip message integrity
+ // verification of the customer-provided encryption key.
+ SSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key-MD5" type:"string"`
+
+ // If present, specifies the ID of the AWS Key Management Service (KMS) master
+ // encryption key that was used for the object.
+ SSEKMSKeyId *string `location:"header" locationName:"x-amz-server-side-encryption-aws-kms-key-id" type:"string" sensitive:"true"`
+
+ // The Server-side encryption algorithm used when storing this object in S3
+ // (e.g., AES256, aws:kms).
+ ServerSideEncryption *string `location:"header" locationName:"x-amz-server-side-encryption" type:"string" enum:"ServerSideEncryption"`
+}
+
+// String returns the string representation
+func (s UploadPartCopyOutput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s UploadPartCopyOutput) GoString() string {
+ return s.String()
+}
+
+// SetCopyPartResult sets the CopyPartResult field's value.
+func (s *UploadPartCopyOutput) SetCopyPartResult(v *CopyPartResult) *UploadPartCopyOutput {
+ s.CopyPartResult = v
+ return s
+}
+
+// SetCopySourceVersionId sets the CopySourceVersionId field's value.
+func (s *UploadPartCopyOutput) SetCopySourceVersionId(v string) *UploadPartCopyOutput {
+ s.CopySourceVersionId = &v
+ return s
+}
+
+// SetRequestCharged sets the RequestCharged field's value.
+func (s *UploadPartCopyOutput) SetRequestCharged(v string) *UploadPartCopyOutput {
+ s.RequestCharged = &v
+ return s
+}
+
+// SetSSECustomerAlgorithm sets the SSECustomerAlgorithm field's value.
+func (s *UploadPartCopyOutput) SetSSECustomerAlgorithm(v string) *UploadPartCopyOutput {
+ s.SSECustomerAlgorithm = &v
+ return s
+}
+
+// SetSSECustomerKeyMD5 sets the SSECustomerKeyMD5 field's value.
+func (s *UploadPartCopyOutput) SetSSECustomerKeyMD5(v string) *UploadPartCopyOutput {
+ s.SSECustomerKeyMD5 = &v
+ return s
+}
+
+// SetSSEKMSKeyId sets the SSEKMSKeyId field's value.
+func (s *UploadPartCopyOutput) SetSSEKMSKeyId(v string) *UploadPartCopyOutput {
+ s.SSEKMSKeyId = &v
+ return s
+}
+
+// SetServerSideEncryption sets the ServerSideEncryption field's value.
+func (s *UploadPartCopyOutput) SetServerSideEncryption(v string) *UploadPartCopyOutput {
+ s.ServerSideEncryption = &v
+ return s
+}
+
+type UploadPartInput struct {
+ _ struct{} `locationName:"UploadPartRequest" type:"structure" payload:"Body"`
+
+ // Object data.
+ Body io.ReadSeeker `type:"blob"`
+
+ // Name of the bucket to which the multipart upload was initiated.
+ //
+ // Bucket is a required field
+ Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
+
+ // Size of the body in bytes. This parameter is useful when the size of the
+ // body cannot be determined automatically.
+ ContentLength *int64 `location:"header" locationName:"Content-Length" type:"long"`
+
+ // The base64-encoded 128-bit MD5 digest of the part data. This parameter is
+ // auto-populated when using the command from the CLI. This parameted is required
+ // if object lock parameters are specified.
+ ContentMD5 *string `location:"header" locationName:"Content-MD5" type:"string"`
+
+ // Object key for which the multipart upload was initiated.
+ //
+ // Key is a required field
+ Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"`
+
+ // Part number of part being uploaded. This is a positive integer between 1
+ // and 10,000.
+ //
+ // PartNumber is a required field
+ PartNumber *int64 `location:"querystring" locationName:"partNumber" type:"integer" required:"true"`
+
+ // Confirms that the requester knows that she or he will be charged for the
+ // request. Bucket owners need not specify this parameter in their requests.
+ // Documentation on downloading objects from requester pays buckets can be found
+ // at http://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html
+ RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"`
+
+ // Specifies the algorithm to use to when encrypting the object (e.g., AES256).
+ SSECustomerAlgorithm *string `location:"header" locationName:"x-amz-server-side-encryption-customer-algorithm" type:"string"`
+
+ // Specifies the customer-provided encryption key for Amazon S3 to use in encrypting
+ // data. This value is used to store the object and then it is discarded; Amazon
+ // does not store the encryption key. The key must be appropriate for use with
+ // the algorithm specified in the x-amz-server-side​-encryption​-customer-algorithm
+ // header. This must be the same encryption key specified in the initiate multipart
+ // upload request.
+ SSECustomerKey *string `marshal-as:"blob" location:"header" locationName:"x-amz-server-side-encryption-customer-key" type:"string" sensitive:"true"`
+
+ // Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321.
+ // Amazon S3 uses this header for a message integrity check to ensure the encryption
+ // key was transmitted without error.
+ SSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key-MD5" type:"string"`
+
+ // Upload ID identifying the multipart upload whose part is being uploaded.
+ //
+ // UploadId is a required field
+ UploadId *string `location:"querystring" locationName:"uploadId" type:"string" required:"true"`
+}
+
+// String returns the string representation
+func (s UploadPartInput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s UploadPartInput) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *UploadPartInput) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "UploadPartInput"}
+ if s.Bucket == nil {
+ invalidParams.Add(request.NewErrParamRequired("Bucket"))
+ }
+ if s.Bucket != nil && len(*s.Bucket) < 1 {
+ invalidParams.Add(request.NewErrParamMinLen("Bucket", 1))
+ }
+ if s.Key == nil {
+ invalidParams.Add(request.NewErrParamRequired("Key"))
+ }
+ if s.Key != nil && len(*s.Key) < 1 {
+ invalidParams.Add(request.NewErrParamMinLen("Key", 1))
+ }
+ if s.PartNumber == nil {
+ invalidParams.Add(request.NewErrParamRequired("PartNumber"))
+ }
+ if s.UploadId == nil {
+ invalidParams.Add(request.NewErrParamRequired("UploadId"))
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetBody sets the Body field's value.
+func (s *UploadPartInput) SetBody(v io.ReadSeeker) *UploadPartInput {
+ s.Body = v
+ return s
+}
+
+// SetBucket sets the Bucket field's value.
+func (s *UploadPartInput) SetBucket(v string) *UploadPartInput {
+ s.Bucket = &v
+ return s
+}
+
+func (s *UploadPartInput) getBucket() (v string) {
+ if s.Bucket == nil {
+ return v
+ }
+ return *s.Bucket
+}
+
+// SetContentLength sets the ContentLength field's value.
+func (s *UploadPartInput) SetContentLength(v int64) *UploadPartInput {
+ s.ContentLength = &v
+ return s
+}
+
+// SetContentMD5 sets the ContentMD5 field's value.
+func (s *UploadPartInput) SetContentMD5(v string) *UploadPartInput {
+ s.ContentMD5 = &v
+ return s
+}
+
+// SetKey sets the Key field's value.
+func (s *UploadPartInput) SetKey(v string) *UploadPartInput {
+ s.Key = &v
+ return s
+}
+
+// SetPartNumber sets the PartNumber field's value.
+func (s *UploadPartInput) SetPartNumber(v int64) *UploadPartInput {
+ s.PartNumber = &v
+ return s
+}
+
+// SetRequestPayer sets the RequestPayer field's value.
+func (s *UploadPartInput) SetRequestPayer(v string) *UploadPartInput {
+ s.RequestPayer = &v
+ return s
+}
+
+// SetSSECustomerAlgorithm sets the SSECustomerAlgorithm field's value.
+func (s *UploadPartInput) SetSSECustomerAlgorithm(v string) *UploadPartInput {
+ s.SSECustomerAlgorithm = &v
+ return s
+}
+
+// SetSSECustomerKey sets the SSECustomerKey field's value.
+func (s *UploadPartInput) SetSSECustomerKey(v string) *UploadPartInput {
+ s.SSECustomerKey = &v
+ return s
+}
+
+func (s *UploadPartInput) getSSECustomerKey() (v string) {
+ if s.SSECustomerKey == nil {
+ return v
+ }
+ return *s.SSECustomerKey
+}
+
+// SetSSECustomerKeyMD5 sets the SSECustomerKeyMD5 field's value.
+func (s *UploadPartInput) SetSSECustomerKeyMD5(v string) *UploadPartInput {
+ s.SSECustomerKeyMD5 = &v
+ return s
+}
+
+// SetUploadId sets the UploadId field's value.
+func (s *UploadPartInput) SetUploadId(v string) *UploadPartInput {
+ s.UploadId = &v
+ return s
+}
+
+type UploadPartOutput struct {
+ _ struct{} `type:"structure"`
+
+ // Entity tag for the uploaded object.
+ ETag *string `location:"header" locationName:"ETag" type:"string"`
+
+ // If present, indicates that the requester was successfully charged for the
+ // request.
+ RequestCharged *string `location:"header" locationName:"x-amz-request-charged" type:"string" enum:"RequestCharged"`
+
+ // If server-side encryption with a customer-provided encryption key was requested,
+ // the response will include this header confirming the encryption algorithm
+ // used.
+ SSECustomerAlgorithm *string `location:"header" locationName:"x-amz-server-side-encryption-customer-algorithm" type:"string"`
+
+ // If server-side encryption with a customer-provided encryption key was requested,
+ // the response will include this header to provide round trip message integrity
+ // verification of the customer-provided encryption key.
+ SSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key-MD5" type:"string"`
+
+ // If present, specifies the ID of the AWS Key Management Service (KMS) master
+ // encryption key that was used for the object.
+ SSEKMSKeyId *string `location:"header" locationName:"x-amz-server-side-encryption-aws-kms-key-id" type:"string" sensitive:"true"`
+
+ // The Server-side encryption algorithm used when storing this object in S3
+ // (e.g., AES256, aws:kms).
+ ServerSideEncryption *string `location:"header" locationName:"x-amz-server-side-encryption" type:"string" enum:"ServerSideEncryption"`
+}
+
+// String returns the string representation
+func (s UploadPartOutput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s UploadPartOutput) GoString() string {
+ return s.String()
+}
+
+// SetETag sets the ETag field's value.
+func (s *UploadPartOutput) SetETag(v string) *UploadPartOutput {
+ s.ETag = &v
+ return s
+}
+
+// SetRequestCharged sets the RequestCharged field's value.
+func (s *UploadPartOutput) SetRequestCharged(v string) *UploadPartOutput {
+ s.RequestCharged = &v
+ return s
+}
+
+// SetSSECustomerAlgorithm sets the SSECustomerAlgorithm field's value.
+func (s *UploadPartOutput) SetSSECustomerAlgorithm(v string) *UploadPartOutput {
+ s.SSECustomerAlgorithm = &v
+ return s
+}
+
+// SetSSECustomerKeyMD5 sets the SSECustomerKeyMD5 field's value.
+func (s *UploadPartOutput) SetSSECustomerKeyMD5(v string) *UploadPartOutput {
+ s.SSECustomerKeyMD5 = &v
+ return s
+}
+
+// SetSSEKMSKeyId sets the SSEKMSKeyId field's value.
+func (s *UploadPartOutput) SetSSEKMSKeyId(v string) *UploadPartOutput {
+ s.SSEKMSKeyId = &v
+ return s
+}
+
+// SetServerSideEncryption sets the ServerSideEncryption field's value.
+func (s *UploadPartOutput) SetServerSideEncryption(v string) *UploadPartOutput {
+ s.ServerSideEncryption = &v
+ return s
+}
+
+// Describes the versioning state of an Amazon S3 bucket. For more information,
+// see PUT Bucket versioning (https://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketPUTVersioningStatus.html)
+// in the Amazon Simple Storage Service API Reference.
+type VersioningConfiguration struct {
+ _ struct{} `type:"structure"`
+
+ // Specifies whether MFA delete is enabled in the bucket versioning configuration.
+ // This element is only returned if the bucket has been configured with MFA
+ // delete. If the bucket has never been so configured, this element is not returned.
+ MFADelete *string `locationName:"MfaDelete" type:"string" enum:"MFADelete"`
+
+ // The versioning state of the bucket.
+ Status *string `type:"string" enum:"BucketVersioningStatus"`
+}
+
+// String returns the string representation
+func (s VersioningConfiguration) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s VersioningConfiguration) GoString() string {
+ return s.String()
+}
+
+// SetMFADelete sets the MFADelete field's value.
+func (s *VersioningConfiguration) SetMFADelete(v string) *VersioningConfiguration {
+ s.MFADelete = &v
+ return s
+}
+
+// SetStatus sets the Status field's value.
+func (s *VersioningConfiguration) SetStatus(v string) *VersioningConfiguration {
+ s.Status = &v
+ return s
+}
+
+// Specifies website configuration parameters for an Amazon S3 bucket.
+type WebsiteConfiguration struct {
+ _ struct{} `type:"structure"`
+
+ // The name of the error document for the website.
+ ErrorDocument *ErrorDocument `type:"structure"`
+
+ // The name of the index document for the website.
+ IndexDocument *IndexDocument `type:"structure"`
+
+ // The redirect behavior for every request to this bucket's website endpoint.
+ //
+ // If you specify this property, you can't specify any other property.
+ RedirectAllRequestsTo *RedirectAllRequestsTo `type:"structure"`
+
+ // Rules that define when a redirect is applied and the redirect behavior.
+ RoutingRules []*RoutingRule `locationNameList:"RoutingRule" type:"list"`
+}
+
+// String returns the string representation
+func (s WebsiteConfiguration) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s WebsiteConfiguration) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *WebsiteConfiguration) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "WebsiteConfiguration"}
+ if s.ErrorDocument != nil {
+ if err := s.ErrorDocument.Validate(); err != nil {
+ invalidParams.AddNested("ErrorDocument", err.(request.ErrInvalidParams))
+ }
+ }
+ if s.IndexDocument != nil {
+ if err := s.IndexDocument.Validate(); err != nil {
+ invalidParams.AddNested("IndexDocument", err.(request.ErrInvalidParams))
+ }
+ }
+ if s.RedirectAllRequestsTo != nil {
+ if err := s.RedirectAllRequestsTo.Validate(); err != nil {
+ invalidParams.AddNested("RedirectAllRequestsTo", err.(request.ErrInvalidParams))
+ }
+ }
+ if s.RoutingRules != nil {
+ for i, v := range s.RoutingRules {
+ if v == nil {
+ continue
+ }
+ if err := v.Validate(); err != nil {
+ invalidParams.AddNested(fmt.Sprintf("%s[%v]", "RoutingRules", i), err.(request.ErrInvalidParams))
+ }
+ }
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetErrorDocument sets the ErrorDocument field's value.
+func (s *WebsiteConfiguration) SetErrorDocument(v *ErrorDocument) *WebsiteConfiguration {
+ s.ErrorDocument = v
+ return s
+}
+
+// SetIndexDocument sets the IndexDocument field's value.
+func (s *WebsiteConfiguration) SetIndexDocument(v *IndexDocument) *WebsiteConfiguration {
+ s.IndexDocument = v
+ return s
+}
+
+// SetRedirectAllRequestsTo sets the RedirectAllRequestsTo field's value.
+func (s *WebsiteConfiguration) SetRedirectAllRequestsTo(v *RedirectAllRequestsTo) *WebsiteConfiguration {
+ s.RedirectAllRequestsTo = v
+ return s
+}
+
+// SetRoutingRules sets the RoutingRules field's value.
+func (s *WebsiteConfiguration) SetRoutingRules(v []*RoutingRule) *WebsiteConfiguration {
+ s.RoutingRules = v
+ return s
+}
+
+const (
+ // AnalyticsS3ExportFileFormatCsv is a AnalyticsS3ExportFileFormat enum value
+ AnalyticsS3ExportFileFormatCsv = "CSV"
+)
+
+const (
+ // BucketAccelerateStatusEnabled is a BucketAccelerateStatus enum value
+ BucketAccelerateStatusEnabled = "Enabled"
+
+ // BucketAccelerateStatusSuspended is a BucketAccelerateStatus enum value
+ BucketAccelerateStatusSuspended = "Suspended"
+)
+
+const (
+ // BucketCannedACLPrivate is a BucketCannedACL enum value
+ BucketCannedACLPrivate = "private"
+
+ // BucketCannedACLPublicRead is a BucketCannedACL enum value
+ BucketCannedACLPublicRead = "public-read"
+
+ // BucketCannedACLPublicReadWrite is a BucketCannedACL enum value
+ BucketCannedACLPublicReadWrite = "public-read-write"
+
+ // BucketCannedACLAuthenticatedRead is a BucketCannedACL enum value
+ BucketCannedACLAuthenticatedRead = "authenticated-read"
+)
+
+const (
+ // BucketLocationConstraintEu is a BucketLocationConstraint enum value
+ BucketLocationConstraintEu = "EU"
+
+ // BucketLocationConstraintEuWest1 is a BucketLocationConstraint enum value
+ BucketLocationConstraintEuWest1 = "eu-west-1"
+
+ // BucketLocationConstraintUsWest1 is a BucketLocationConstraint enum value
+ BucketLocationConstraintUsWest1 = "us-west-1"
+
+ // BucketLocationConstraintUsWest2 is a BucketLocationConstraint enum value
+ BucketLocationConstraintUsWest2 = "us-west-2"
+
+ // BucketLocationConstraintApSouth1 is a BucketLocationConstraint enum value
+ BucketLocationConstraintApSouth1 = "ap-south-1"
+
+ // BucketLocationConstraintApSoutheast1 is a BucketLocationConstraint enum value
+ BucketLocationConstraintApSoutheast1 = "ap-southeast-1"
+
+ // BucketLocationConstraintApSoutheast2 is a BucketLocationConstraint enum value
+ BucketLocationConstraintApSoutheast2 = "ap-southeast-2"
+
+ // BucketLocationConstraintApNortheast1 is a BucketLocationConstraint enum value
+ BucketLocationConstraintApNortheast1 = "ap-northeast-1"
+
+ // BucketLocationConstraintSaEast1 is a BucketLocationConstraint enum value
+ BucketLocationConstraintSaEast1 = "sa-east-1"
+
+ // BucketLocationConstraintCnNorth1 is a BucketLocationConstraint enum value
+ BucketLocationConstraintCnNorth1 = "cn-north-1"
+
+ // BucketLocationConstraintEuCentral1 is a BucketLocationConstraint enum value
+ BucketLocationConstraintEuCentral1 = "eu-central-1"
+)
+
+const (
+ // BucketLogsPermissionFullControl is a BucketLogsPermission enum value
+ BucketLogsPermissionFullControl = "FULL_CONTROL"
+
+ // BucketLogsPermissionRead is a BucketLogsPermission enum value
+ BucketLogsPermissionRead = "READ"
+
+ // BucketLogsPermissionWrite is a BucketLogsPermission enum value
+ BucketLogsPermissionWrite = "WRITE"
+)
+
+const (
+ // BucketVersioningStatusEnabled is a BucketVersioningStatus enum value
+ BucketVersioningStatusEnabled = "Enabled"
+
+ // BucketVersioningStatusSuspended is a BucketVersioningStatus enum value
+ BucketVersioningStatusSuspended = "Suspended"
+)
+
+const (
+ // CompressionTypeNone is a CompressionType enum value
+ CompressionTypeNone = "NONE"
+
+ // CompressionTypeGzip is a CompressionType enum value
+ CompressionTypeGzip = "GZIP"
+
+ // CompressionTypeBzip2 is a CompressionType enum value
+ CompressionTypeBzip2 = "BZIP2"
+)
+
+const (
+ // DeleteMarkerReplicationStatusEnabled is a DeleteMarkerReplicationStatus enum value
+ DeleteMarkerReplicationStatusEnabled = "Enabled"
+
+ // DeleteMarkerReplicationStatusDisabled is a DeleteMarkerReplicationStatus enum value
+ DeleteMarkerReplicationStatusDisabled = "Disabled"
+)
+
+// Requests Amazon S3 to encode the object keys in the response and specifies
+// the encoding method to use. An object key may contain any Unicode character;
+// however, XML 1.0 parser cannot parse some characters, such as characters
+// with an ASCII value from 0 to 10. For characters that are not supported in
+// XML 1.0, you can add this parameter to request that Amazon S3 encode the
+// keys in the response.
+const (
+ // EncodingTypeUrl is a EncodingType enum value
+ EncodingTypeUrl = "url"
+)
+
+// The bucket event for which to send notifications.
+const (
+ // EventS3ReducedRedundancyLostObject is a Event enum value
+ EventS3ReducedRedundancyLostObject = "s3:ReducedRedundancyLostObject"
+
+ // EventS3ObjectCreated is a Event enum value
+ EventS3ObjectCreated = "s3:ObjectCreated:*"
+
+ // EventS3ObjectCreatedPut is a Event enum value
+ EventS3ObjectCreatedPut = "s3:ObjectCreated:Put"
+
+ // EventS3ObjectCreatedPost is a Event enum value
+ EventS3ObjectCreatedPost = "s3:ObjectCreated:Post"
+
+ // EventS3ObjectCreatedCopy is a Event enum value
+ EventS3ObjectCreatedCopy = "s3:ObjectCreated:Copy"
+
+ // EventS3ObjectCreatedCompleteMultipartUpload is a Event enum value
+ EventS3ObjectCreatedCompleteMultipartUpload = "s3:ObjectCreated:CompleteMultipartUpload"
+
+ // EventS3ObjectRemoved is a Event enum value
+ EventS3ObjectRemoved = "s3:ObjectRemoved:*"
+
+ // EventS3ObjectRemovedDelete is a Event enum value
+ EventS3ObjectRemovedDelete = "s3:ObjectRemoved:Delete"
+
+ // EventS3ObjectRemovedDeleteMarkerCreated is a Event enum value
+ EventS3ObjectRemovedDeleteMarkerCreated = "s3:ObjectRemoved:DeleteMarkerCreated"
+
+ // EventS3ObjectRestorePost is a Event enum value
+ EventS3ObjectRestorePost = "s3:ObjectRestore:Post"
+
+ // EventS3ObjectRestoreCompleted is a Event enum value
+ EventS3ObjectRestoreCompleted = "s3:ObjectRestore:Completed"
+)
+
+const (
+ // ExpirationStatusEnabled is a ExpirationStatus enum value
+ ExpirationStatusEnabled = "Enabled"
+
+ // ExpirationStatusDisabled is a ExpirationStatus enum value
+ ExpirationStatusDisabled = "Disabled"
+)
+
+const (
+ // ExpressionTypeSql is a ExpressionType enum value
+ ExpressionTypeSql = "SQL"
+)
+
+const (
+ // FileHeaderInfoUse is a FileHeaderInfo enum value
+ FileHeaderInfoUse = "USE"
+
+ // FileHeaderInfoIgnore is a FileHeaderInfo enum value
+ FileHeaderInfoIgnore = "IGNORE"
+
+ // FileHeaderInfoNone is a FileHeaderInfo enum value
+ FileHeaderInfoNone = "NONE"
+)
+
+const (
+ // FilterRuleNamePrefix is a FilterRuleName enum value
+ FilterRuleNamePrefix = "prefix"
+
+ // FilterRuleNameSuffix is a FilterRuleName enum value
+ FilterRuleNameSuffix = "suffix"
+)
+
+const (
+ // InventoryFormatCsv is a InventoryFormat enum value
+ InventoryFormatCsv = "CSV"
+
+ // InventoryFormatOrc is a InventoryFormat enum value
+ InventoryFormatOrc = "ORC"
+
+ // InventoryFormatParquet is a InventoryFormat enum value
+ InventoryFormatParquet = "Parquet"
+)
+
+const (
+ // InventoryFrequencyDaily is a InventoryFrequency enum value
+ InventoryFrequencyDaily = "Daily"
+
+ // InventoryFrequencyWeekly is a InventoryFrequency enum value
+ InventoryFrequencyWeekly = "Weekly"
+)
+
+const (
+ // InventoryIncludedObjectVersionsAll is a InventoryIncludedObjectVersions enum value
+ InventoryIncludedObjectVersionsAll = "All"
+
+ // InventoryIncludedObjectVersionsCurrent is a InventoryIncludedObjectVersions enum value
+ InventoryIncludedObjectVersionsCurrent = "Current"
+)
+
+const (
+ // InventoryOptionalFieldSize is a InventoryOptionalField enum value
+ InventoryOptionalFieldSize = "Size"
+
+ // InventoryOptionalFieldLastModifiedDate is a InventoryOptionalField enum value
+ InventoryOptionalFieldLastModifiedDate = "LastModifiedDate"
+
+ // InventoryOptionalFieldStorageClass is a InventoryOptionalField enum value
+ InventoryOptionalFieldStorageClass = "StorageClass"
+
+ // InventoryOptionalFieldEtag is a InventoryOptionalField enum value
+ InventoryOptionalFieldEtag = "ETag"
+
+ // InventoryOptionalFieldIsMultipartUploaded is a InventoryOptionalField enum value
+ InventoryOptionalFieldIsMultipartUploaded = "IsMultipartUploaded"
+
+ // InventoryOptionalFieldReplicationStatus is a InventoryOptionalField enum value
+ InventoryOptionalFieldReplicationStatus = "ReplicationStatus"
+
+ // InventoryOptionalFieldEncryptionStatus is a InventoryOptionalField enum value
+ InventoryOptionalFieldEncryptionStatus = "EncryptionStatus"
+
+ // InventoryOptionalFieldObjectLockRetainUntilDate is a InventoryOptionalField enum value
+ InventoryOptionalFieldObjectLockRetainUntilDate = "ObjectLockRetainUntilDate"
+
+ // InventoryOptionalFieldObjectLockMode is a InventoryOptionalField enum value
+ InventoryOptionalFieldObjectLockMode = "ObjectLockMode"
+
+ // InventoryOptionalFieldObjectLockLegalHoldStatus is a InventoryOptionalField enum value
+ InventoryOptionalFieldObjectLockLegalHoldStatus = "ObjectLockLegalHoldStatus"
+)
+
+const (
+ // JSONTypeDocument is a JSONType enum value
+ JSONTypeDocument = "DOCUMENT"
+
+ // JSONTypeLines is a JSONType enum value
+ JSONTypeLines = "LINES"
+)
+
+const (
+ // MFADeleteEnabled is a MFADelete enum value
+ MFADeleteEnabled = "Enabled"
+
+ // MFADeleteDisabled is a MFADelete enum value
+ MFADeleteDisabled = "Disabled"
+)
+
+const (
+ // MFADeleteStatusEnabled is a MFADeleteStatus enum value
+ MFADeleteStatusEnabled = "Enabled"
+
+ // MFADeleteStatusDisabled is a MFADeleteStatus enum value
+ MFADeleteStatusDisabled = "Disabled"
+)
+
+const (
+ // MetadataDirectiveCopy is a MetadataDirective enum value
+ MetadataDirectiveCopy = "COPY"
+
+ // MetadataDirectiveReplace is a MetadataDirective enum value
+ MetadataDirectiveReplace = "REPLACE"
+)
+
+const (
+ // ObjectCannedACLPrivate is a ObjectCannedACL enum value
+ ObjectCannedACLPrivate = "private"
+
+ // ObjectCannedACLPublicRead is a ObjectCannedACL enum value
+ ObjectCannedACLPublicRead = "public-read"
+
+ // ObjectCannedACLPublicReadWrite is a ObjectCannedACL enum value
+ ObjectCannedACLPublicReadWrite = "public-read-write"
+
+ // ObjectCannedACLAuthenticatedRead is a ObjectCannedACL enum value
+ ObjectCannedACLAuthenticatedRead = "authenticated-read"
+
+ // ObjectCannedACLAwsExecRead is a ObjectCannedACL enum value
+ ObjectCannedACLAwsExecRead = "aws-exec-read"
+
+ // ObjectCannedACLBucketOwnerRead is a ObjectCannedACL enum value
+ ObjectCannedACLBucketOwnerRead = "bucket-owner-read"
+
+ // ObjectCannedACLBucketOwnerFullControl is a ObjectCannedACL enum value
+ ObjectCannedACLBucketOwnerFullControl = "bucket-owner-full-control"
+)
+
+const (
+ // ObjectLockEnabledEnabled is a ObjectLockEnabled enum value
+ ObjectLockEnabledEnabled = "Enabled"
+)
+
+const (
+ // ObjectLockLegalHoldStatusOn is a ObjectLockLegalHoldStatus enum value
+ ObjectLockLegalHoldStatusOn = "ON"
+
+ // ObjectLockLegalHoldStatusOff is a ObjectLockLegalHoldStatus enum value
+ ObjectLockLegalHoldStatusOff = "OFF"
+)
+
+const (
+ // ObjectLockModeGovernance is a ObjectLockMode enum value
+ ObjectLockModeGovernance = "GOVERNANCE"
+
+ // ObjectLockModeCompliance is a ObjectLockMode enum value
+ ObjectLockModeCompliance = "COMPLIANCE"
+)
+
+const (
+ // ObjectLockRetentionModeGovernance is a ObjectLockRetentionMode enum value
+ ObjectLockRetentionModeGovernance = "GOVERNANCE"
+
+ // ObjectLockRetentionModeCompliance is a ObjectLockRetentionMode enum value
+ ObjectLockRetentionModeCompliance = "COMPLIANCE"
+)
+
+const (
+ // ObjectStorageClassStandard is a ObjectStorageClass enum value
+ ObjectStorageClassStandard = "STANDARD"
+
+ // ObjectStorageClassReducedRedundancy is a ObjectStorageClass enum value
+ ObjectStorageClassReducedRedundancy = "REDUCED_REDUNDANCY"
+
+ // ObjectStorageClassGlacier is a ObjectStorageClass enum value
+ ObjectStorageClassGlacier = "GLACIER"
+
+ // ObjectStorageClassStandardIa is a ObjectStorageClass enum value
+ ObjectStorageClassStandardIa = "STANDARD_IA"
+
+ // ObjectStorageClassOnezoneIa is a ObjectStorageClass enum value
+ ObjectStorageClassOnezoneIa = "ONEZONE_IA"
+
+ // ObjectStorageClassIntelligentTiering is a ObjectStorageClass enum value
+ ObjectStorageClassIntelligentTiering = "INTELLIGENT_TIERING"
+
+ // ObjectStorageClassDeepArchive is a ObjectStorageClass enum value
+ ObjectStorageClassDeepArchive = "DEEP_ARCHIVE"
+)
+
+const (
+ // ObjectVersionStorageClassStandard is a ObjectVersionStorageClass enum value
+ ObjectVersionStorageClassStandard = "STANDARD"
+)
+
+const (
+ // OwnerOverrideDestination is a OwnerOverride enum value
+ OwnerOverrideDestination = "Destination"
+)
+
+const (
+ // PayerRequester is a Payer enum value
+ PayerRequester = "Requester"
+
+ // PayerBucketOwner is a Payer enum value
+ PayerBucketOwner = "BucketOwner"
+)
+
+const (
+ // PermissionFullControl is a Permission enum value
+ PermissionFullControl = "FULL_CONTROL"
+
+ // PermissionWrite is a Permission enum value
+ PermissionWrite = "WRITE"
+
+ // PermissionWriteAcp is a Permission enum value
+ PermissionWriteAcp = "WRITE_ACP"
+
+ // PermissionRead is a Permission enum value
+ PermissionRead = "READ"
+
+ // PermissionReadAcp is a Permission enum value
+ PermissionReadAcp = "READ_ACP"
+)
+
+const (
+ // ProtocolHttp is a Protocol enum value
+ ProtocolHttp = "http"
+
+ // ProtocolHttps is a Protocol enum value
+ ProtocolHttps = "https"
+)
+
+const (
+ // QuoteFieldsAlways is a QuoteFields enum value
+ QuoteFieldsAlways = "ALWAYS"
+
+ // QuoteFieldsAsneeded is a QuoteFields enum value
+ QuoteFieldsAsneeded = "ASNEEDED"
+)
+
+const (
+ // ReplicationRuleStatusEnabled is a ReplicationRuleStatus enum value
+ ReplicationRuleStatusEnabled = "Enabled"
+
+ // ReplicationRuleStatusDisabled is a ReplicationRuleStatus enum value
+ ReplicationRuleStatusDisabled = "Disabled"
+)
+
+const (
+ // ReplicationStatusComplete is a ReplicationStatus enum value
+ ReplicationStatusComplete = "COMPLETE"
+
+ // ReplicationStatusPending is a ReplicationStatus enum value
+ ReplicationStatusPending = "PENDING"
+
+ // ReplicationStatusFailed is a ReplicationStatus enum value
+ ReplicationStatusFailed = "FAILED"
+
+ // ReplicationStatusReplica is a ReplicationStatus enum value
+ ReplicationStatusReplica = "REPLICA"
+)
+
+// If present, indicates that the requester was successfully charged for the
+// request.
+const (
+ // RequestChargedRequester is a RequestCharged enum value
+ RequestChargedRequester = "requester"
+)
+
+// Confirms that the requester knows that she or he will be charged for the
+// request. Bucket owners need not specify this parameter in their requests.
+// Documentation on downloading objects from requester pays buckets can be found
+// at http://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html
+const (
+ // RequestPayerRequester is a RequestPayer enum value
+ RequestPayerRequester = "requester"
+)
+
+const (
+ // RestoreRequestTypeSelect is a RestoreRequestType enum value
+ RestoreRequestTypeSelect = "SELECT"
+)
+
+const (
+ // ServerSideEncryptionAes256 is a ServerSideEncryption enum value
+ ServerSideEncryptionAes256 = "AES256"
+
+ // ServerSideEncryptionAwsKms is a ServerSideEncryption enum value
+ ServerSideEncryptionAwsKms = "aws:kms"
+)
+
+const (
+ // SseKmsEncryptedObjectsStatusEnabled is a SseKmsEncryptedObjectsStatus enum value
+ SseKmsEncryptedObjectsStatusEnabled = "Enabled"
+
+ // SseKmsEncryptedObjectsStatusDisabled is a SseKmsEncryptedObjectsStatus enum value
+ SseKmsEncryptedObjectsStatusDisabled = "Disabled"
+)
+
+const (
+ // StorageClassStandard is a StorageClass enum value
+ StorageClassStandard = "STANDARD"
+
+ // StorageClassReducedRedundancy is a StorageClass enum value
+ StorageClassReducedRedundancy = "REDUCED_REDUNDANCY"
+
+ // StorageClassStandardIa is a StorageClass enum value
+ StorageClassStandardIa = "STANDARD_IA"
+
+ // StorageClassOnezoneIa is a StorageClass enum value
+ StorageClassOnezoneIa = "ONEZONE_IA"
+
+ // StorageClassIntelligentTiering is a StorageClass enum value
+ StorageClassIntelligentTiering = "INTELLIGENT_TIERING"
+
+ // StorageClassGlacier is a StorageClass enum value
+ StorageClassGlacier = "GLACIER"
+
+ // StorageClassDeepArchive is a StorageClass enum value
+ StorageClassDeepArchive = "DEEP_ARCHIVE"
+)
+
+const (
+ // StorageClassAnalysisSchemaVersionV1 is a StorageClassAnalysisSchemaVersion enum value
+ StorageClassAnalysisSchemaVersionV1 = "V_1"
+)
+
+const (
+ // TaggingDirectiveCopy is a TaggingDirective enum value
+ TaggingDirectiveCopy = "COPY"
+
+ // TaggingDirectiveReplace is a TaggingDirective enum value
+ TaggingDirectiveReplace = "REPLACE"
+)
+
+const (
+ // TierStandard is a Tier enum value
+ TierStandard = "Standard"
+
+ // TierBulk is a Tier enum value
+ TierBulk = "Bulk"
+
+ // TierExpedited is a Tier enum value
+ TierExpedited = "Expedited"
+)
+
+const (
+ // TransitionStorageClassGlacier is a TransitionStorageClass enum value
+ TransitionStorageClassGlacier = "GLACIER"
+
+ // TransitionStorageClassStandardIa is a TransitionStorageClass enum value
+ TransitionStorageClassStandardIa = "STANDARD_IA"
+
+ // TransitionStorageClassOnezoneIa is a TransitionStorageClass enum value
+ TransitionStorageClassOnezoneIa = "ONEZONE_IA"
+
+ // TransitionStorageClassIntelligentTiering is a TransitionStorageClass enum value
+ TransitionStorageClassIntelligentTiering = "INTELLIGENT_TIERING"
+
+ // TransitionStorageClassDeepArchive is a TransitionStorageClass enum value
+ TransitionStorageClassDeepArchive = "DEEP_ARCHIVE"
+)
+
+const (
+ // TypeCanonicalUser is a Type enum value
+ TypeCanonicalUser = "CanonicalUser"
+
+ // TypeAmazonCustomerByEmail is a Type enum value
+ TypeAmazonCustomerByEmail = "AmazonCustomerByEmail"
+
+ // TypeGroup is a Type enum value
+ TypeGroup = "Group"
+)
diff --git a/vendor/github.com/aws/aws-sdk-go/service/s3/body_hash.go b/vendor/github.com/aws/aws-sdk-go/service/s3/body_hash.go
new file mode 100644
index 00000000..5c8ce5cc
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/service/s3/body_hash.go
@@ -0,0 +1,249 @@
+package s3
+
+import (
+ "bytes"
+ "crypto/md5"
+ "crypto/sha256"
+ "encoding/base64"
+ "encoding/hex"
+ "fmt"
+ "hash"
+ "io"
+
+ "github.com/aws/aws-sdk-go/aws"
+ "github.com/aws/aws-sdk-go/aws/awserr"
+ "github.com/aws/aws-sdk-go/aws/request"
+ "github.com/aws/aws-sdk-go/internal/sdkio"
+)
+
+const (
+ contentMD5Header = "Content-Md5"
+ contentSha256Header = "X-Amz-Content-Sha256"
+ amzTeHeader = "X-Amz-Te"
+ amzTxEncodingHeader = "X-Amz-Transfer-Encoding"
+
+ appendMD5TxEncoding = "append-md5"
+)
+
+// contentMD5 computes and sets the HTTP Content-MD5 header for requests that
+// require it.
+func contentMD5(r *request.Request) {
+ h := md5.New()
+
+ if !aws.IsReaderSeekable(r.Body) {
+ if r.Config.Logger != nil {
+ r.Config.Logger.Log(fmt.Sprintf(
+ "Unable to compute Content-MD5 for unseekable body, S3.%s",
+ r.Operation.Name))
+ }
+ return
+ }
+
+ if _, err := copySeekableBody(h, r.Body); err != nil {
+ r.Error = awserr.New("ContentMD5", "failed to compute body MD5", err)
+ return
+ }
+
+ // encode the md5 checksum in base64 and set the request header.
+ v := base64.StdEncoding.EncodeToString(h.Sum(nil))
+ r.HTTPRequest.Header.Set(contentMD5Header, v)
+}
+
+// computeBodyHashes will add Content MD5 and Content Sha256 hashes to the
+// request. If the body is not seekable or S3DisableContentMD5Validation set
+// this handler will be ignored.
+func computeBodyHashes(r *request.Request) {
+ if aws.BoolValue(r.Config.S3DisableContentMD5Validation) {
+ return
+ }
+ if r.IsPresigned() {
+ return
+ }
+ if r.Error != nil || !aws.IsReaderSeekable(r.Body) {
+ return
+ }
+
+ var md5Hash, sha256Hash hash.Hash
+ hashers := make([]io.Writer, 0, 2)
+
+ // Determine upfront which hashes can be set without overriding user
+ // provide header data.
+ if v := r.HTTPRequest.Header.Get(contentMD5Header); len(v) == 0 {
+ md5Hash = md5.New()
+ hashers = append(hashers, md5Hash)
+ }
+
+ if v := r.HTTPRequest.Header.Get(contentSha256Header); len(v) == 0 {
+ sha256Hash = sha256.New()
+ hashers = append(hashers, sha256Hash)
+ }
+
+ // Create the destination writer based on the hashes that are not already
+ // provided by the user.
+ var dst io.Writer
+ switch len(hashers) {
+ case 0:
+ return
+ case 1:
+ dst = hashers[0]
+ default:
+ dst = io.MultiWriter(hashers...)
+ }
+
+ if _, err := copySeekableBody(dst, r.Body); err != nil {
+ r.Error = awserr.New("BodyHashError", "failed to compute body hashes", err)
+ return
+ }
+
+ // For the hashes created, set the associated headers that the user did not
+ // already provide.
+ if md5Hash != nil {
+ sum := make([]byte, md5.Size)
+ encoded := make([]byte, md5Base64EncLen)
+
+ base64.StdEncoding.Encode(encoded, md5Hash.Sum(sum[0:0]))
+ r.HTTPRequest.Header[contentMD5Header] = []string{string(encoded)}
+ }
+
+ if sha256Hash != nil {
+ encoded := make([]byte, sha256HexEncLen)
+ sum := make([]byte, sha256.Size)
+
+ hex.Encode(encoded, sha256Hash.Sum(sum[0:0]))
+ r.HTTPRequest.Header[contentSha256Header] = []string{string(encoded)}
+ }
+}
+
+const (
+ md5Base64EncLen = (md5.Size + 2) / 3 * 4 // base64.StdEncoding.EncodedLen
+ sha256HexEncLen = sha256.Size * 2 // hex.EncodedLen
+)
+
+func copySeekableBody(dst io.Writer, src io.ReadSeeker) (int64, error) {
+ curPos, err := src.Seek(0, sdkio.SeekCurrent)
+ if err != nil {
+ return 0, err
+ }
+
+ // hash the body. seek back to the first position after reading to reset
+ // the body for transmission. copy errors may be assumed to be from the
+ // body.
+ n, err := io.Copy(dst, src)
+ if err != nil {
+ return n, err
+ }
+
+ _, err = src.Seek(curPos, sdkio.SeekStart)
+ if err != nil {
+ return n, err
+ }
+
+ return n, nil
+}
+
+// Adds the x-amz-te: append_md5 header to the request. This requests the service
+// responds with a trailing MD5 checksum.
+//
+// Will not ask for append MD5 if disabled, the request is presigned or,
+// or the API operation does not support content MD5 validation.
+func askForTxEncodingAppendMD5(r *request.Request) {
+ if aws.BoolValue(r.Config.S3DisableContentMD5Validation) {
+ return
+ }
+ if r.IsPresigned() {
+ return
+ }
+ r.HTTPRequest.Header.Set(amzTeHeader, appendMD5TxEncoding)
+}
+
+func useMD5ValidationReader(r *request.Request) {
+ if r.Error != nil {
+ return
+ }
+
+ if v := r.HTTPResponse.Header.Get(amzTxEncodingHeader); v != appendMD5TxEncoding {
+ return
+ }
+
+ var bodyReader *io.ReadCloser
+ var contentLen int64
+ switch tv := r.Data.(type) {
+ case *GetObjectOutput:
+ bodyReader = &tv.Body
+ contentLen = aws.Int64Value(tv.ContentLength)
+ // Update ContentLength hiden the trailing MD5 checksum.
+ tv.ContentLength = aws.Int64(contentLen - md5.Size)
+ tv.ContentRange = aws.String(r.HTTPResponse.Header.Get("X-Amz-Content-Range"))
+ default:
+ r.Error = awserr.New("ChecksumValidationError",
+ fmt.Sprintf("%s: %s header received on unsupported API, %s",
+ amzTxEncodingHeader, appendMD5TxEncoding, r.Operation.Name,
+ ), nil)
+ return
+ }
+
+ if contentLen < md5.Size {
+ r.Error = awserr.New("ChecksumValidationError",
+ fmt.Sprintf("invalid Content-Length %d for %s %s",
+ contentLen, appendMD5TxEncoding, amzTxEncodingHeader,
+ ), nil)
+ return
+ }
+
+ // Wrap and swap the response body reader with the validation reader.
+ *bodyReader = newMD5ValidationReader(*bodyReader, contentLen-md5.Size)
+}
+
+type md5ValidationReader struct {
+ rawReader io.ReadCloser
+ payload io.Reader
+ hash hash.Hash
+
+ payloadLen int64
+ read int64
+}
+
+func newMD5ValidationReader(reader io.ReadCloser, payloadLen int64) *md5ValidationReader {
+ h := md5.New()
+ return &md5ValidationReader{
+ rawReader: reader,
+ payload: io.TeeReader(&io.LimitedReader{R: reader, N: payloadLen}, h),
+ hash: h,
+ payloadLen: payloadLen,
+ }
+}
+
+func (v *md5ValidationReader) Read(p []byte) (n int, err error) {
+ n, err = v.payload.Read(p)
+ if err != nil && err != io.EOF {
+ return n, err
+ }
+
+ v.read += int64(n)
+
+ if err == io.EOF {
+ if v.read != v.payloadLen {
+ return n, io.ErrUnexpectedEOF
+ }
+ expectSum := make([]byte, md5.Size)
+ actualSum := make([]byte, md5.Size)
+ if _, sumReadErr := io.ReadFull(v.rawReader, expectSum); sumReadErr != nil {
+ return n, sumReadErr
+ }
+ actualSum = v.hash.Sum(actualSum[0:0])
+ if !bytes.Equal(expectSum, actualSum) {
+ return n, awserr.New("InvalidChecksum",
+ fmt.Sprintf("expected MD5 checksum %s, got %s",
+ hex.EncodeToString(expectSum),
+ hex.EncodeToString(actualSum),
+ ),
+ nil)
+ }
+ }
+
+ return n, err
+}
+
+func (v *md5ValidationReader) Close() error {
+ return v.rawReader.Close()
+}
diff --git a/vendor/github.com/aws/aws-sdk-go/service/s3/bucket_location.go b/vendor/github.com/aws/aws-sdk-go/service/s3/bucket_location.go
new file mode 100644
index 00000000..9ba8a788
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/service/s3/bucket_location.go
@@ -0,0 +1,107 @@
+package s3
+
+import (
+ "io/ioutil"
+ "regexp"
+
+ "github.com/aws/aws-sdk-go/aws"
+ "github.com/aws/aws-sdk-go/aws/awserr"
+ "github.com/aws/aws-sdk-go/aws/awsutil"
+ "github.com/aws/aws-sdk-go/aws/request"
+)
+
+var reBucketLocation = regexp.MustCompile(`>([^<>]+)<\/Location`)
+
+// NormalizeBucketLocation is a utility function which will update the
+// passed in value to always be a region ID. Generally this would be used
+// with GetBucketLocation API operation.
+//
+// Replaces empty string with "us-east-1", and "EU" with "eu-west-1".
+//
+// See http://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketGETlocation.html
+// for more information on the values that can be returned.
+func NormalizeBucketLocation(loc string) string {
+ switch loc {
+ case "":
+ loc = "us-east-1"
+ case "EU":
+ loc = "eu-west-1"
+ }
+
+ return loc
+}
+
+// NormalizeBucketLocationHandler is a request handler which will update the
+// GetBucketLocation's result LocationConstraint value to always be a region ID.
+//
+// Replaces empty string with "us-east-1", and "EU" with "eu-west-1".
+//
+// See http://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketGETlocation.html
+// for more information on the values that can be returned.
+//
+// req, result := svc.GetBucketLocationRequest(&s3.GetBucketLocationInput{
+// Bucket: aws.String(bucket),
+// })
+// req.Handlers.Unmarshal.PushBackNamed(NormalizeBucketLocationHandler)
+// err := req.Send()
+var NormalizeBucketLocationHandler = request.NamedHandler{
+ Name: "awssdk.s3.NormalizeBucketLocation",
+ Fn: func(req *request.Request) {
+ if req.Error != nil {
+ return
+ }
+
+ out := req.Data.(*GetBucketLocationOutput)
+ loc := NormalizeBucketLocation(aws.StringValue(out.LocationConstraint))
+ out.LocationConstraint = aws.String(loc)
+ },
+}
+
+// WithNormalizeBucketLocation is a request option which will update the
+// GetBucketLocation's result LocationConstraint value to always be a region ID.
+//
+// Replaces empty string with "us-east-1", and "EU" with "eu-west-1".
+//
+// See http://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketGETlocation.html
+// for more information on the values that can be returned.
+//
+// result, err := svc.GetBucketLocationWithContext(ctx,
+// &s3.GetBucketLocationInput{
+// Bucket: aws.String(bucket),
+// },
+// s3.WithNormalizeBucketLocation,
+// )
+func WithNormalizeBucketLocation(r *request.Request) {
+ r.Handlers.Unmarshal.PushBackNamed(NormalizeBucketLocationHandler)
+}
+
+func buildGetBucketLocation(r *request.Request) {
+ if r.DataFilled() {
+ out := r.Data.(*GetBucketLocationOutput)
+ b, err := ioutil.ReadAll(r.HTTPResponse.Body)
+ if err != nil {
+ r.Error = awserr.New(request.ErrCodeSerialization,
+ "failed reading response body", err)
+ return
+ }
+
+ match := reBucketLocation.FindSubmatch(b)
+ if len(match) > 1 {
+ loc := string(match[1])
+ out.LocationConstraint = aws.String(loc)
+ }
+ }
+}
+
+func populateLocationConstraint(r *request.Request) {
+ if r.ParamsFilled() && aws.StringValue(r.Config.Region) != "us-east-1" {
+ in := r.Params.(*CreateBucketInput)
+ if in.CreateBucketConfiguration == nil {
+ r.Params = awsutil.CopyOf(r.Params)
+ in = r.Params.(*CreateBucketInput)
+ in.CreateBucketConfiguration = &CreateBucketConfiguration{
+ LocationConstraint: r.Config.Region,
+ }
+ }
+ }
+}
diff --git a/vendor/github.com/aws/aws-sdk-go/service/s3/customizations.go b/vendor/github.com/aws/aws-sdk-go/service/s3/customizations.go
new file mode 100644
index 00000000..23d386b1
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/service/s3/customizations.go
@@ -0,0 +1,75 @@
+package s3
+
+import (
+ "github.com/aws/aws-sdk-go/aws/client"
+ "github.com/aws/aws-sdk-go/aws/request"
+ "github.com/aws/aws-sdk-go/internal/s3err"
+)
+
+func init() {
+ initClient = defaultInitClientFn
+ initRequest = defaultInitRequestFn
+}
+
+func defaultInitClientFn(c *client.Client) {
+ // Support building custom endpoints based on config
+ c.Handlers.Build.PushFront(updateEndpointForS3Config)
+
+ // Require SSL when using SSE keys
+ c.Handlers.Validate.PushBack(validateSSERequiresSSL)
+ c.Handlers.Build.PushBack(computeSSEKeyMD5)
+ c.Handlers.Build.PushBack(computeCopySourceSSEKeyMD5)
+
+ // S3 uses custom error unmarshaling logic
+ c.Handlers.UnmarshalError.Clear()
+ c.Handlers.UnmarshalError.PushBack(unmarshalError)
+ c.Handlers.UnmarshalError.PushBackNamed(s3err.RequestFailureWrapperHandler())
+}
+
+func defaultInitRequestFn(r *request.Request) {
+ // Add reuest handlers for specific platforms.
+ // e.g. 100-continue support for PUT requests using Go 1.6
+ platformRequestHandlers(r)
+
+ switch r.Operation.Name {
+ case opPutBucketCors, opPutBucketLifecycle, opPutBucketPolicy,
+ opPutBucketTagging, opDeleteObjects, opPutBucketLifecycleConfiguration,
+ opPutObjectLegalHold, opPutObjectRetention, opPutObjectLockConfiguration,
+ opPutBucketReplication:
+ // These S3 operations require Content-MD5 to be set
+ r.Handlers.Build.PushBack(contentMD5)
+ case opGetBucketLocation:
+ // GetBucketLocation has custom parsing logic
+ r.Handlers.Unmarshal.PushFront(buildGetBucketLocation)
+ case opCreateBucket:
+ // Auto-populate LocationConstraint with current region
+ r.Handlers.Validate.PushFront(populateLocationConstraint)
+ case opCopyObject, opUploadPartCopy, opCompleteMultipartUpload:
+ r.Handlers.Unmarshal.PushFront(copyMultipartStatusOKUnmarhsalError)
+ r.Handlers.Unmarshal.PushBackNamed(s3err.RequestFailureWrapperHandler())
+ case opPutObject, opUploadPart:
+ r.Handlers.Build.PushBack(computeBodyHashes)
+ // Disabled until #1837 root issue is resolved.
+ // case opGetObject:
+ // r.Handlers.Build.PushBack(askForTxEncodingAppendMD5)
+ // r.Handlers.Unmarshal.PushBack(useMD5ValidationReader)
+ }
+}
+
+// bucketGetter is an accessor interface to grab the "Bucket" field from
+// an S3 type.
+type bucketGetter interface {
+ getBucket() string
+}
+
+// sseCustomerKeyGetter is an accessor interface to grab the "SSECustomerKey"
+// field from an S3 type.
+type sseCustomerKeyGetter interface {
+ getSSECustomerKey() string
+}
+
+// copySourceSSECustomerKeyGetter is an accessor interface to grab the
+// "CopySourceSSECustomerKey" field from an S3 type.
+type copySourceSSECustomerKeyGetter interface {
+ getCopySourceSSECustomerKey() string
+}
diff --git a/vendor/github.com/aws/aws-sdk-go/service/s3/doc.go b/vendor/github.com/aws/aws-sdk-go/service/s3/doc.go
new file mode 100644
index 00000000..0def0225
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/service/s3/doc.go
@@ -0,0 +1,26 @@
+// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT.
+
+// Package s3 provides the client and types for making API
+// requests to Amazon Simple Storage Service.
+//
+// See https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01 for more information on this service.
+//
+// See s3 package documentation for more information.
+// https://docs.aws.amazon.com/sdk-for-go/api/service/s3/
+//
+// Using the Client
+//
+// To contact Amazon Simple Storage Service with the SDK use the New function to create
+// a new service client. With that client you can make API requests to the service.
+// These clients are safe to use concurrently.
+//
+// See the SDK's documentation for more information on how to use the SDK.
+// https://docs.aws.amazon.com/sdk-for-go/api/
+//
+// See aws.Config documentation for more information on configuring SDK clients.
+// https://docs.aws.amazon.com/sdk-for-go/api/aws/#Config
+//
+// See the Amazon Simple Storage Service client S3 for more
+// information on creating client for this service.
+// https://docs.aws.amazon.com/sdk-for-go/api/service/s3/#New
+package s3
diff --git a/vendor/github.com/aws/aws-sdk-go/service/s3/doc_custom.go b/vendor/github.com/aws/aws-sdk-go/service/s3/doc_custom.go
new file mode 100644
index 00000000..4b65f715
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/service/s3/doc_custom.go
@@ -0,0 +1,123 @@
+// Upload Managers
+//
+// The s3manager package's Uploader provides concurrent upload of content to S3
+// by taking advantage of S3's Multipart APIs. The Uploader also supports both
+// io.Reader for streaming uploads, and will also take advantage of io.ReadSeeker
+// for optimizations if the Body satisfies that type. Once the Uploader instance
+// is created you can call Upload concurrently from multiple goroutines safely.
+//
+// // The session the S3 Uploader will use
+// sess := session.Must(session.NewSession())
+//
+// // Create an uploader with the session and default options
+// uploader := s3manager.NewUploader(sess)
+//
+// f, err := os.Open(filename)
+// if err != nil {
+// return fmt.Errorf("failed to open file %q, %v", filename, err)
+// }
+//
+// // Upload the file to S3.
+// result, err := uploader.Upload(&s3manager.UploadInput{
+// Bucket: aws.String(myBucket),
+// Key: aws.String(myString),
+// Body: f,
+// })
+// if err != nil {
+// return fmt.Errorf("failed to upload file, %v", err)
+// }
+// fmt.Printf("file uploaded to, %s\n", aws.StringValue(result.Location))
+//
+// See the s3manager package's Uploader type documentation for more information.
+// https://docs.aws.amazon.com/sdk-for-go/api/service/s3/s3manager/#Uploader
+//
+// Download Manager
+//
+// The s3manager package's Downloader provides concurrently downloading of Objects
+// from S3. The Downloader will write S3 Object content with an io.WriterAt.
+// Once the Downloader instance is created you can call Download concurrently from
+// multiple goroutines safely.
+//
+// // The session the S3 Downloader will use
+// sess := session.Must(session.NewSession())
+//
+// // Create a downloader with the session and default options
+// downloader := s3manager.NewDownloader(sess)
+//
+// // Create a file to write the S3 Object contents to.
+// f, err := os.Create(filename)
+// if err != nil {
+// return fmt.Errorf("failed to create file %q, %v", filename, err)
+// }
+//
+// // Write the contents of S3 Object to the file
+// n, err := downloader.Download(f, &s3.GetObjectInput{
+// Bucket: aws.String(myBucket),
+// Key: aws.String(myString),
+// })
+// if err != nil {
+// return fmt.Errorf("failed to download file, %v", err)
+// }
+// fmt.Printf("file downloaded, %d bytes\n", n)
+//
+// See the s3manager package's Downloader type documentation for more information.
+// https://docs.aws.amazon.com/sdk-for-go/api/service/s3/s3manager/#Downloader
+//
+// Automatic URI cleaning
+//
+// Interacting with objects whose keys contain adjacent slashes (e.g. bucketname/foo//bar/objectname)
+// requires setting DisableRestProtocolURICleaning to true in the aws.Config struct
+// used by the service client.
+//
+// svc := s3.New(sess, &aws.Config{
+// DisableRestProtocolURICleaning: aws.Bool(true),
+// })
+// out, err := svc.GetObject(&s3.GetObjectInput {
+// Bucket: aws.String("bucketname"),
+// Key: aws.String("//foo//bar//moo"),
+// })
+//
+// Get Bucket Region
+//
+// GetBucketRegion will attempt to get the region for a bucket using a region
+// hint to determine which AWS partition to perform the query on. Use this utility
+// to determine the region a bucket is in.
+//
+// sess := session.Must(session.NewSession())
+//
+// bucket := "my-bucket"
+// region, err := s3manager.GetBucketRegion(ctx, sess, bucket, "us-west-2")
+// if err != nil {
+// if aerr, ok := err.(awserr.Error); ok && aerr.Code() == "NotFound" {
+// fmt.Fprintf(os.Stderr, "unable to find bucket %s's region not found\n", bucket)
+// }
+// return err
+// }
+// fmt.Printf("Bucket %s is in %s region\n", bucket, region)
+//
+// See the s3manager package's GetBucketRegion function documentation for more information
+// https://docs.aws.amazon.com/sdk-for-go/api/service/s3/s3manager/#GetBucketRegion
+//
+// S3 Crypto Client
+//
+// The s3crypto package provides the tools to upload and download encrypted
+// content from S3. The Encryption and Decryption clients can be used concurrently
+// once the client is created.
+//
+// sess := session.Must(session.NewSession())
+//
+// // Create the decryption client.
+// svc := s3crypto.NewDecryptionClient(sess)
+//
+// // The object will be downloaded from S3 and decrypted locally. By metadata
+// // about the object's encryption will instruct the decryption client how
+// // decrypt the content of the object. By default KMS is used for keys.
+// result, err := svc.GetObject(&s3.GetObjectInput {
+// Bucket: aws.String(myBucket),
+// Key: aws.String(myKey),
+// })
+//
+// See the s3crypto package documentation for more information.
+// https://docs.aws.amazon.com/sdk-for-go/api/service/s3/s3crypto/
+//
+package s3
diff --git a/vendor/github.com/aws/aws-sdk-go/service/s3/errors.go b/vendor/github.com/aws/aws-sdk-go/service/s3/errors.go
new file mode 100644
index 00000000..931cb17b
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/service/s3/errors.go
@@ -0,0 +1,48 @@
+// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT.
+
+package s3
+
+const (
+
+ // ErrCodeBucketAlreadyExists for service response error code
+ // "BucketAlreadyExists".
+ //
+ // The requested bucket name is not available. The bucket namespace is shared
+ // by all users of the system. Please select a different name and try again.
+ ErrCodeBucketAlreadyExists = "BucketAlreadyExists"
+
+ // ErrCodeBucketAlreadyOwnedByYou for service response error code
+ // "BucketAlreadyOwnedByYou".
+ ErrCodeBucketAlreadyOwnedByYou = "BucketAlreadyOwnedByYou"
+
+ // ErrCodeNoSuchBucket for service response error code
+ // "NoSuchBucket".
+ //
+ // The specified bucket does not exist.
+ ErrCodeNoSuchBucket = "NoSuchBucket"
+
+ // ErrCodeNoSuchKey for service response error code
+ // "NoSuchKey".
+ //
+ // The specified key does not exist.
+ ErrCodeNoSuchKey = "NoSuchKey"
+
+ // ErrCodeNoSuchUpload for service response error code
+ // "NoSuchUpload".
+ //
+ // The specified multipart upload does not exist.
+ ErrCodeNoSuchUpload = "NoSuchUpload"
+
+ // ErrCodeObjectAlreadyInActiveTierError for service response error code
+ // "ObjectAlreadyInActiveTierError".
+ //
+ // This operation is not allowed against this storage tier
+ ErrCodeObjectAlreadyInActiveTierError = "ObjectAlreadyInActiveTierError"
+
+ // ErrCodeObjectNotInActiveTierError for service response error code
+ // "ObjectNotInActiveTierError".
+ //
+ // The source object of the COPY operation is not in the active tier and is
+ // only stored in Amazon Glacier.
+ ErrCodeObjectNotInActiveTierError = "ObjectNotInActiveTierError"
+)
diff --git a/vendor/github.com/aws/aws-sdk-go/service/s3/host_style_bucket.go b/vendor/github.com/aws/aws-sdk-go/service/s3/host_style_bucket.go
new file mode 100644
index 00000000..a7fbc2de
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/service/s3/host_style_bucket.go
@@ -0,0 +1,155 @@
+package s3
+
+import (
+ "fmt"
+ "net/url"
+ "regexp"
+ "strings"
+
+ "github.com/aws/aws-sdk-go/aws"
+ "github.com/aws/aws-sdk-go/aws/awserr"
+ "github.com/aws/aws-sdk-go/aws/request"
+)
+
+// an operationBlacklist is a list of operation names that should a
+// request handler should not be executed with.
+type operationBlacklist []string
+
+// Continue will return true of the Request's operation name is not
+// in the blacklist. False otherwise.
+func (b operationBlacklist) Continue(r *request.Request) bool {
+ for i := 0; i < len(b); i++ {
+ if b[i] == r.Operation.Name {
+ return false
+ }
+ }
+ return true
+}
+
+var accelerateOpBlacklist = operationBlacklist{
+ opListBuckets, opCreateBucket, opDeleteBucket,
+}
+
+// Request handler to automatically add the bucket name to the endpoint domain
+// if possible. This style of bucket is valid for all bucket names which are
+// DNS compatible and do not contain "."
+func updateEndpointForS3Config(r *request.Request) {
+ forceHostStyle := aws.BoolValue(r.Config.S3ForcePathStyle)
+ accelerate := aws.BoolValue(r.Config.S3UseAccelerate)
+
+ if accelerate && accelerateOpBlacklist.Continue(r) {
+ if forceHostStyle {
+ if r.Config.Logger != nil {
+ r.Config.Logger.Log("ERROR: aws.Config.S3UseAccelerate is not compatible with aws.Config.S3ForcePathStyle, ignoring S3ForcePathStyle.")
+ }
+ }
+ updateEndpointForAccelerate(r)
+ } else if !forceHostStyle && r.Operation.Name != opGetBucketLocation {
+ updateEndpointForHostStyle(r)
+ }
+}
+
+func updateEndpointForHostStyle(r *request.Request) {
+ bucket, ok := bucketNameFromReqParams(r.Params)
+ if !ok {
+ // Ignore operation requests if the bucketname was not provided
+ // if this is an input validation error the validation handler
+ // will report it.
+ return
+ }
+
+ if !hostCompatibleBucketName(r.HTTPRequest.URL, bucket) {
+ // bucket name must be valid to put into the host
+ return
+ }
+
+ moveBucketToHost(r.HTTPRequest.URL, bucket)
+}
+
+var (
+ accelElem = []byte("s3-accelerate.dualstack.")
+)
+
+func updateEndpointForAccelerate(r *request.Request) {
+ bucket, ok := bucketNameFromReqParams(r.Params)
+ if !ok {
+ // Ignore operation requests if the bucketname was not provided
+ // if this is an input validation error the validation handler
+ // will report it.
+ return
+ }
+
+ if !hostCompatibleBucketName(r.HTTPRequest.URL, bucket) {
+ r.Error = awserr.New("InvalidParameterException",
+ fmt.Sprintf("bucket name %s is not compatible with S3 Accelerate", bucket),
+ nil)
+ return
+ }
+
+ parts := strings.Split(r.HTTPRequest.URL.Host, ".")
+ if len(parts) < 3 {
+ r.Error = awserr.New("InvalidParameterExecption",
+ fmt.Sprintf("unable to update endpoint host for S3 accelerate, hostname invalid, %s",
+ r.HTTPRequest.URL.Host), nil)
+ return
+ }
+
+ if parts[0] == "s3" || strings.HasPrefix(parts[0], "s3-") {
+ parts[0] = "s3-accelerate"
+ }
+ for i := 1; i+1 < len(parts); i++ {
+ if parts[i] == aws.StringValue(r.Config.Region) {
+ parts = append(parts[:i], parts[i+1:]...)
+ break
+ }
+ }
+
+ r.HTTPRequest.URL.Host = strings.Join(parts, ".")
+
+ moveBucketToHost(r.HTTPRequest.URL, bucket)
+}
+
+// Attempts to retrieve the bucket name from the request input parameters.
+// If no bucket is found, or the field is empty "", false will be returned.
+func bucketNameFromReqParams(params interface{}) (string, bool) {
+ if iface, ok := params.(bucketGetter); ok {
+ b := iface.getBucket()
+ return b, len(b) > 0
+ }
+
+ return "", false
+}
+
+// hostCompatibleBucketName returns true if the request should
+// put the bucket in the host. This is false if S3ForcePathStyle is
+// explicitly set or if the bucket is not DNS compatible.
+func hostCompatibleBucketName(u *url.URL, bucket string) bool {
+ // Bucket might be DNS compatible but dots in the hostname will fail
+ // certificate validation, so do not use host-style.
+ if u.Scheme == "https" && strings.Contains(bucket, ".") {
+ return false
+ }
+
+ // if the bucket is DNS compatible
+ return dnsCompatibleBucketName(bucket)
+}
+
+var reDomain = regexp.MustCompile(`^[a-z0-9][a-z0-9\.\-]{1,61}[a-z0-9]$`)
+var reIPAddress = regexp.MustCompile(`^(\d+\.){3}\d+$`)
+
+// dnsCompatibleBucketName returns true if the bucket name is DNS compatible.
+// Buckets created outside of the classic region MUST be DNS compatible.
+func dnsCompatibleBucketName(bucket string) bool {
+ return reDomain.MatchString(bucket) &&
+ !reIPAddress.MatchString(bucket) &&
+ !strings.Contains(bucket, "..")
+}
+
+// moveBucketToHost moves the bucket name from the URI path to URL host.
+func moveBucketToHost(u *url.URL, bucket string) {
+ u.Host = bucket + "." + u.Host
+ u.Path = strings.Replace(u.Path, "/{Bucket}", "", -1)
+ if u.Path == "" {
+ u.Path = "/"
+ }
+}
diff --git a/vendor/github.com/aws/aws-sdk-go/service/s3/platform_handlers.go b/vendor/github.com/aws/aws-sdk-go/service/s3/platform_handlers.go
new file mode 100644
index 00000000..8e6f3307
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/service/s3/platform_handlers.go
@@ -0,0 +1,8 @@
+// +build !go1.6
+
+package s3
+
+import "github.com/aws/aws-sdk-go/aws/request"
+
+func platformRequestHandlers(r *request.Request) {
+}
diff --git a/vendor/github.com/aws/aws-sdk-go/service/s3/platform_handlers_go1.6.go b/vendor/github.com/aws/aws-sdk-go/service/s3/platform_handlers_go1.6.go
new file mode 100644
index 00000000..14d05f7b
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/service/s3/platform_handlers_go1.6.go
@@ -0,0 +1,28 @@
+// +build go1.6
+
+package s3
+
+import (
+ "github.com/aws/aws-sdk-go/aws"
+ "github.com/aws/aws-sdk-go/aws/request"
+)
+
+func platformRequestHandlers(r *request.Request) {
+ if r.Operation.HTTPMethod == "PUT" {
+ // 100-Continue should only be used on put requests.
+ r.Handlers.Sign.PushBack(add100Continue)
+ }
+}
+
+func add100Continue(r *request.Request) {
+ if aws.BoolValue(r.Config.S3Disable100Continue) {
+ return
+ }
+ if r.HTTPRequest.ContentLength < 1024*1024*2 {
+ // Ignore requests smaller than 2MB. This helps prevent delaying
+ // requests unnecessarily.
+ return
+ }
+
+ r.HTTPRequest.Header.Set("Expect", "100-Continue")
+}
diff --git a/vendor/github.com/aws/aws-sdk-go/service/s3/s3iface/interface.go b/vendor/github.com/aws/aws-sdk-go/service/s3/s3iface/interface.go
new file mode 100644
index 00000000..2646a427
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/service/s3/s3iface/interface.go
@@ -0,0 +1,443 @@
+// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT.
+
+// Package s3iface provides an interface to enable mocking the Amazon Simple Storage Service service client
+// for testing your code.
+//
+// It is important to note that this interface will have breaking changes
+// when the service model is updated and adds new API operations, paginators,
+// and waiters.
+package s3iface
+
+import (
+ "github.com/aws/aws-sdk-go/aws"
+ "github.com/aws/aws-sdk-go/aws/request"
+ "github.com/aws/aws-sdk-go/service/s3"
+)
+
+// S3API provides an interface to enable mocking the
+// s3.S3 service client's API operation,
+// paginators, and waiters. This make unit testing your code that calls out
+// to the SDK's service client's calls easier.
+//
+// The best way to use this interface is so the SDK's service client's calls
+// can be stubbed out for unit testing your code with the SDK without needing
+// to inject custom request handlers into the SDK's request pipeline.
+//
+// // myFunc uses an SDK service client to make a request to
+// // Amazon Simple Storage Service.
+// func myFunc(svc s3iface.S3API) bool {
+// // Make svc.AbortMultipartUpload request
+// }
+//
+// func main() {
+// sess := session.New()
+// svc := s3.New(sess)
+//
+// myFunc(svc)
+// }
+//
+// In your _test.go file:
+//
+// // Define a mock struct to be used in your unit tests of myFunc.
+// type mockS3Client struct {
+// s3iface.S3API
+// }
+// func (m *mockS3Client) AbortMultipartUpload(input *s3.AbortMultipartUploadInput) (*s3.AbortMultipartUploadOutput, error) {
+// // mock response/functionality
+// }
+//
+// func TestMyFunc(t *testing.T) {
+// // Setup Test
+// mockSvc := &mockS3Client{}
+//
+// myfunc(mockSvc)
+//
+// // Verify myFunc's functionality
+// }
+//
+// It is important to note that this interface will have breaking changes
+// when the service model is updated and adds new API operations, paginators,
+// and waiters. Its suggested to use the pattern above for testing, or using
+// tooling to generate mocks to satisfy the interfaces.
+type S3API interface {
+ AbortMultipartUpload(*s3.AbortMultipartUploadInput) (*s3.AbortMultipartUploadOutput, error)
+ AbortMultipartUploadWithContext(aws.Context, *s3.AbortMultipartUploadInput, ...request.Option) (*s3.AbortMultipartUploadOutput, error)
+ AbortMultipartUploadRequest(*s3.AbortMultipartUploadInput) (*request.Request, *s3.AbortMultipartUploadOutput)
+
+ CompleteMultipartUpload(*s3.CompleteMultipartUploadInput) (*s3.CompleteMultipartUploadOutput, error)
+ CompleteMultipartUploadWithContext(aws.Context, *s3.CompleteMultipartUploadInput, ...request.Option) (*s3.CompleteMultipartUploadOutput, error)
+ CompleteMultipartUploadRequest(*s3.CompleteMultipartUploadInput) (*request.Request, *s3.CompleteMultipartUploadOutput)
+
+ CopyObject(*s3.CopyObjectInput) (*s3.CopyObjectOutput, error)
+ CopyObjectWithContext(aws.Context, *s3.CopyObjectInput, ...request.Option) (*s3.CopyObjectOutput, error)
+ CopyObjectRequest(*s3.CopyObjectInput) (*request.Request, *s3.CopyObjectOutput)
+
+ CreateBucket(*s3.CreateBucketInput) (*s3.CreateBucketOutput, error)
+ CreateBucketWithContext(aws.Context, *s3.CreateBucketInput, ...request.Option) (*s3.CreateBucketOutput, error)
+ CreateBucketRequest(*s3.CreateBucketInput) (*request.Request, *s3.CreateBucketOutput)
+
+ CreateMultipartUpload(*s3.CreateMultipartUploadInput) (*s3.CreateMultipartUploadOutput, error)
+ CreateMultipartUploadWithContext(aws.Context, *s3.CreateMultipartUploadInput, ...request.Option) (*s3.CreateMultipartUploadOutput, error)
+ CreateMultipartUploadRequest(*s3.CreateMultipartUploadInput) (*request.Request, *s3.CreateMultipartUploadOutput)
+
+ DeleteBucket(*s3.DeleteBucketInput) (*s3.DeleteBucketOutput, error)
+ DeleteBucketWithContext(aws.Context, *s3.DeleteBucketInput, ...request.Option) (*s3.DeleteBucketOutput, error)
+ DeleteBucketRequest(*s3.DeleteBucketInput) (*request.Request, *s3.DeleteBucketOutput)
+
+ DeleteBucketAnalyticsConfiguration(*s3.DeleteBucketAnalyticsConfigurationInput) (*s3.DeleteBucketAnalyticsConfigurationOutput, error)
+ DeleteBucketAnalyticsConfigurationWithContext(aws.Context, *s3.DeleteBucketAnalyticsConfigurationInput, ...request.Option) (*s3.DeleteBucketAnalyticsConfigurationOutput, error)
+ DeleteBucketAnalyticsConfigurationRequest(*s3.DeleteBucketAnalyticsConfigurationInput) (*request.Request, *s3.DeleteBucketAnalyticsConfigurationOutput)
+
+ DeleteBucketCors(*s3.DeleteBucketCorsInput) (*s3.DeleteBucketCorsOutput, error)
+ DeleteBucketCorsWithContext(aws.Context, *s3.DeleteBucketCorsInput, ...request.Option) (*s3.DeleteBucketCorsOutput, error)
+ DeleteBucketCorsRequest(*s3.DeleteBucketCorsInput) (*request.Request, *s3.DeleteBucketCorsOutput)
+
+ DeleteBucketEncryption(*s3.DeleteBucketEncryptionInput) (*s3.DeleteBucketEncryptionOutput, error)
+ DeleteBucketEncryptionWithContext(aws.Context, *s3.DeleteBucketEncryptionInput, ...request.Option) (*s3.DeleteBucketEncryptionOutput, error)
+ DeleteBucketEncryptionRequest(*s3.DeleteBucketEncryptionInput) (*request.Request, *s3.DeleteBucketEncryptionOutput)
+
+ DeleteBucketInventoryConfiguration(*s3.DeleteBucketInventoryConfigurationInput) (*s3.DeleteBucketInventoryConfigurationOutput, error)
+ DeleteBucketInventoryConfigurationWithContext(aws.Context, *s3.DeleteBucketInventoryConfigurationInput, ...request.Option) (*s3.DeleteBucketInventoryConfigurationOutput, error)
+ DeleteBucketInventoryConfigurationRequest(*s3.DeleteBucketInventoryConfigurationInput) (*request.Request, *s3.DeleteBucketInventoryConfigurationOutput)
+
+ DeleteBucketLifecycle(*s3.DeleteBucketLifecycleInput) (*s3.DeleteBucketLifecycleOutput, error)
+ DeleteBucketLifecycleWithContext(aws.Context, *s3.DeleteBucketLifecycleInput, ...request.Option) (*s3.DeleteBucketLifecycleOutput, error)
+ DeleteBucketLifecycleRequest(*s3.DeleteBucketLifecycleInput) (*request.Request, *s3.DeleteBucketLifecycleOutput)
+
+ DeleteBucketMetricsConfiguration(*s3.DeleteBucketMetricsConfigurationInput) (*s3.DeleteBucketMetricsConfigurationOutput, error)
+ DeleteBucketMetricsConfigurationWithContext(aws.Context, *s3.DeleteBucketMetricsConfigurationInput, ...request.Option) (*s3.DeleteBucketMetricsConfigurationOutput, error)
+ DeleteBucketMetricsConfigurationRequest(*s3.DeleteBucketMetricsConfigurationInput) (*request.Request, *s3.DeleteBucketMetricsConfigurationOutput)
+
+ DeleteBucketPolicy(*s3.DeleteBucketPolicyInput) (*s3.DeleteBucketPolicyOutput, error)
+ DeleteBucketPolicyWithContext(aws.Context, *s3.DeleteBucketPolicyInput, ...request.Option) (*s3.DeleteBucketPolicyOutput, error)
+ DeleteBucketPolicyRequest(*s3.DeleteBucketPolicyInput) (*request.Request, *s3.DeleteBucketPolicyOutput)
+
+ DeleteBucketReplication(*s3.DeleteBucketReplicationInput) (*s3.DeleteBucketReplicationOutput, error)
+ DeleteBucketReplicationWithContext(aws.Context, *s3.DeleteBucketReplicationInput, ...request.Option) (*s3.DeleteBucketReplicationOutput, error)
+ DeleteBucketReplicationRequest(*s3.DeleteBucketReplicationInput) (*request.Request, *s3.DeleteBucketReplicationOutput)
+
+ DeleteBucketTagging(*s3.DeleteBucketTaggingInput) (*s3.DeleteBucketTaggingOutput, error)
+ DeleteBucketTaggingWithContext(aws.Context, *s3.DeleteBucketTaggingInput, ...request.Option) (*s3.DeleteBucketTaggingOutput, error)
+ DeleteBucketTaggingRequest(*s3.DeleteBucketTaggingInput) (*request.Request, *s3.DeleteBucketTaggingOutput)
+
+ DeleteBucketWebsite(*s3.DeleteBucketWebsiteInput) (*s3.DeleteBucketWebsiteOutput, error)
+ DeleteBucketWebsiteWithContext(aws.Context, *s3.DeleteBucketWebsiteInput, ...request.Option) (*s3.DeleteBucketWebsiteOutput, error)
+ DeleteBucketWebsiteRequest(*s3.DeleteBucketWebsiteInput) (*request.Request, *s3.DeleteBucketWebsiteOutput)
+
+ DeleteObject(*s3.DeleteObjectInput) (*s3.DeleteObjectOutput, error)
+ DeleteObjectWithContext(aws.Context, *s3.DeleteObjectInput, ...request.Option) (*s3.DeleteObjectOutput, error)
+ DeleteObjectRequest(*s3.DeleteObjectInput) (*request.Request, *s3.DeleteObjectOutput)
+
+ DeleteObjectTagging(*s3.DeleteObjectTaggingInput) (*s3.DeleteObjectTaggingOutput, error)
+ DeleteObjectTaggingWithContext(aws.Context, *s3.DeleteObjectTaggingInput, ...request.Option) (*s3.DeleteObjectTaggingOutput, error)
+ DeleteObjectTaggingRequest(*s3.DeleteObjectTaggingInput) (*request.Request, *s3.DeleteObjectTaggingOutput)
+
+ DeleteObjects(*s3.DeleteObjectsInput) (*s3.DeleteObjectsOutput, error)
+ DeleteObjectsWithContext(aws.Context, *s3.DeleteObjectsInput, ...request.Option) (*s3.DeleteObjectsOutput, error)
+ DeleteObjectsRequest(*s3.DeleteObjectsInput) (*request.Request, *s3.DeleteObjectsOutput)
+
+ DeletePublicAccessBlock(*s3.DeletePublicAccessBlockInput) (*s3.DeletePublicAccessBlockOutput, error)
+ DeletePublicAccessBlockWithContext(aws.Context, *s3.DeletePublicAccessBlockInput, ...request.Option) (*s3.DeletePublicAccessBlockOutput, error)
+ DeletePublicAccessBlockRequest(*s3.DeletePublicAccessBlockInput) (*request.Request, *s3.DeletePublicAccessBlockOutput)
+
+ GetBucketAccelerateConfiguration(*s3.GetBucketAccelerateConfigurationInput) (*s3.GetBucketAccelerateConfigurationOutput, error)
+ GetBucketAccelerateConfigurationWithContext(aws.Context, *s3.GetBucketAccelerateConfigurationInput, ...request.Option) (*s3.GetBucketAccelerateConfigurationOutput, error)
+ GetBucketAccelerateConfigurationRequest(*s3.GetBucketAccelerateConfigurationInput) (*request.Request, *s3.GetBucketAccelerateConfigurationOutput)
+
+ GetBucketAcl(*s3.GetBucketAclInput) (*s3.GetBucketAclOutput, error)
+ GetBucketAclWithContext(aws.Context, *s3.GetBucketAclInput, ...request.Option) (*s3.GetBucketAclOutput, error)
+ GetBucketAclRequest(*s3.GetBucketAclInput) (*request.Request, *s3.GetBucketAclOutput)
+
+ GetBucketAnalyticsConfiguration(*s3.GetBucketAnalyticsConfigurationInput) (*s3.GetBucketAnalyticsConfigurationOutput, error)
+ GetBucketAnalyticsConfigurationWithContext(aws.Context, *s3.GetBucketAnalyticsConfigurationInput, ...request.Option) (*s3.GetBucketAnalyticsConfigurationOutput, error)
+ GetBucketAnalyticsConfigurationRequest(*s3.GetBucketAnalyticsConfigurationInput) (*request.Request, *s3.GetBucketAnalyticsConfigurationOutput)
+
+ GetBucketCors(*s3.GetBucketCorsInput) (*s3.GetBucketCorsOutput, error)
+ GetBucketCorsWithContext(aws.Context, *s3.GetBucketCorsInput, ...request.Option) (*s3.GetBucketCorsOutput, error)
+ GetBucketCorsRequest(*s3.GetBucketCorsInput) (*request.Request, *s3.GetBucketCorsOutput)
+
+ GetBucketEncryption(*s3.GetBucketEncryptionInput) (*s3.GetBucketEncryptionOutput, error)
+ GetBucketEncryptionWithContext(aws.Context, *s3.GetBucketEncryptionInput, ...request.Option) (*s3.GetBucketEncryptionOutput, error)
+ GetBucketEncryptionRequest(*s3.GetBucketEncryptionInput) (*request.Request, *s3.GetBucketEncryptionOutput)
+
+ GetBucketInventoryConfiguration(*s3.GetBucketInventoryConfigurationInput) (*s3.GetBucketInventoryConfigurationOutput, error)
+ GetBucketInventoryConfigurationWithContext(aws.Context, *s3.GetBucketInventoryConfigurationInput, ...request.Option) (*s3.GetBucketInventoryConfigurationOutput, error)
+ GetBucketInventoryConfigurationRequest(*s3.GetBucketInventoryConfigurationInput) (*request.Request, *s3.GetBucketInventoryConfigurationOutput)
+
+ GetBucketLifecycle(*s3.GetBucketLifecycleInput) (*s3.GetBucketLifecycleOutput, error)
+ GetBucketLifecycleWithContext(aws.Context, *s3.GetBucketLifecycleInput, ...request.Option) (*s3.GetBucketLifecycleOutput, error)
+ GetBucketLifecycleRequest(*s3.GetBucketLifecycleInput) (*request.Request, *s3.GetBucketLifecycleOutput)
+
+ GetBucketLifecycleConfiguration(*s3.GetBucketLifecycleConfigurationInput) (*s3.GetBucketLifecycleConfigurationOutput, error)
+ GetBucketLifecycleConfigurationWithContext(aws.Context, *s3.GetBucketLifecycleConfigurationInput, ...request.Option) (*s3.GetBucketLifecycleConfigurationOutput, error)
+ GetBucketLifecycleConfigurationRequest(*s3.GetBucketLifecycleConfigurationInput) (*request.Request, *s3.GetBucketLifecycleConfigurationOutput)
+
+ GetBucketLocation(*s3.GetBucketLocationInput) (*s3.GetBucketLocationOutput, error)
+ GetBucketLocationWithContext(aws.Context, *s3.GetBucketLocationInput, ...request.Option) (*s3.GetBucketLocationOutput, error)
+ GetBucketLocationRequest(*s3.GetBucketLocationInput) (*request.Request, *s3.GetBucketLocationOutput)
+
+ GetBucketLogging(*s3.GetBucketLoggingInput) (*s3.GetBucketLoggingOutput, error)
+ GetBucketLoggingWithContext(aws.Context, *s3.GetBucketLoggingInput, ...request.Option) (*s3.GetBucketLoggingOutput, error)
+ GetBucketLoggingRequest(*s3.GetBucketLoggingInput) (*request.Request, *s3.GetBucketLoggingOutput)
+
+ GetBucketMetricsConfiguration(*s3.GetBucketMetricsConfigurationInput) (*s3.GetBucketMetricsConfigurationOutput, error)
+ GetBucketMetricsConfigurationWithContext(aws.Context, *s3.GetBucketMetricsConfigurationInput, ...request.Option) (*s3.GetBucketMetricsConfigurationOutput, error)
+ GetBucketMetricsConfigurationRequest(*s3.GetBucketMetricsConfigurationInput) (*request.Request, *s3.GetBucketMetricsConfigurationOutput)
+
+ GetBucketNotification(*s3.GetBucketNotificationConfigurationRequest) (*s3.NotificationConfigurationDeprecated, error)
+ GetBucketNotificationWithContext(aws.Context, *s3.GetBucketNotificationConfigurationRequest, ...request.Option) (*s3.NotificationConfigurationDeprecated, error)
+ GetBucketNotificationRequest(*s3.GetBucketNotificationConfigurationRequest) (*request.Request, *s3.NotificationConfigurationDeprecated)
+
+ GetBucketNotificationConfiguration(*s3.GetBucketNotificationConfigurationRequest) (*s3.NotificationConfiguration, error)
+ GetBucketNotificationConfigurationWithContext(aws.Context, *s3.GetBucketNotificationConfigurationRequest, ...request.Option) (*s3.NotificationConfiguration, error)
+ GetBucketNotificationConfigurationRequest(*s3.GetBucketNotificationConfigurationRequest) (*request.Request, *s3.NotificationConfiguration)
+
+ GetBucketPolicy(*s3.GetBucketPolicyInput) (*s3.GetBucketPolicyOutput, error)
+ GetBucketPolicyWithContext(aws.Context, *s3.GetBucketPolicyInput, ...request.Option) (*s3.GetBucketPolicyOutput, error)
+ GetBucketPolicyRequest(*s3.GetBucketPolicyInput) (*request.Request, *s3.GetBucketPolicyOutput)
+
+ GetBucketPolicyStatus(*s3.GetBucketPolicyStatusInput) (*s3.GetBucketPolicyStatusOutput, error)
+ GetBucketPolicyStatusWithContext(aws.Context, *s3.GetBucketPolicyStatusInput, ...request.Option) (*s3.GetBucketPolicyStatusOutput, error)
+ GetBucketPolicyStatusRequest(*s3.GetBucketPolicyStatusInput) (*request.Request, *s3.GetBucketPolicyStatusOutput)
+
+ GetBucketReplication(*s3.GetBucketReplicationInput) (*s3.GetBucketReplicationOutput, error)
+ GetBucketReplicationWithContext(aws.Context, *s3.GetBucketReplicationInput, ...request.Option) (*s3.GetBucketReplicationOutput, error)
+ GetBucketReplicationRequest(*s3.GetBucketReplicationInput) (*request.Request, *s3.GetBucketReplicationOutput)
+
+ GetBucketRequestPayment(*s3.GetBucketRequestPaymentInput) (*s3.GetBucketRequestPaymentOutput, error)
+ GetBucketRequestPaymentWithContext(aws.Context, *s3.GetBucketRequestPaymentInput, ...request.Option) (*s3.GetBucketRequestPaymentOutput, error)
+ GetBucketRequestPaymentRequest(*s3.GetBucketRequestPaymentInput) (*request.Request, *s3.GetBucketRequestPaymentOutput)
+
+ GetBucketTagging(*s3.GetBucketTaggingInput) (*s3.GetBucketTaggingOutput, error)
+ GetBucketTaggingWithContext(aws.Context, *s3.GetBucketTaggingInput, ...request.Option) (*s3.GetBucketTaggingOutput, error)
+ GetBucketTaggingRequest(*s3.GetBucketTaggingInput) (*request.Request, *s3.GetBucketTaggingOutput)
+
+ GetBucketVersioning(*s3.GetBucketVersioningInput) (*s3.GetBucketVersioningOutput, error)
+ GetBucketVersioningWithContext(aws.Context, *s3.GetBucketVersioningInput, ...request.Option) (*s3.GetBucketVersioningOutput, error)
+ GetBucketVersioningRequest(*s3.GetBucketVersioningInput) (*request.Request, *s3.GetBucketVersioningOutput)
+
+ GetBucketWebsite(*s3.GetBucketWebsiteInput) (*s3.GetBucketWebsiteOutput, error)
+ GetBucketWebsiteWithContext(aws.Context, *s3.GetBucketWebsiteInput, ...request.Option) (*s3.GetBucketWebsiteOutput, error)
+ GetBucketWebsiteRequest(*s3.GetBucketWebsiteInput) (*request.Request, *s3.GetBucketWebsiteOutput)
+
+ GetObject(*s3.GetObjectInput) (*s3.GetObjectOutput, error)
+ GetObjectWithContext(aws.Context, *s3.GetObjectInput, ...request.Option) (*s3.GetObjectOutput, error)
+ GetObjectRequest(*s3.GetObjectInput) (*request.Request, *s3.GetObjectOutput)
+
+ GetObjectAcl(*s3.GetObjectAclInput) (*s3.GetObjectAclOutput, error)
+ GetObjectAclWithContext(aws.Context, *s3.GetObjectAclInput, ...request.Option) (*s3.GetObjectAclOutput, error)
+ GetObjectAclRequest(*s3.GetObjectAclInput) (*request.Request, *s3.GetObjectAclOutput)
+
+ GetObjectLegalHold(*s3.GetObjectLegalHoldInput) (*s3.GetObjectLegalHoldOutput, error)
+ GetObjectLegalHoldWithContext(aws.Context, *s3.GetObjectLegalHoldInput, ...request.Option) (*s3.GetObjectLegalHoldOutput, error)
+ GetObjectLegalHoldRequest(*s3.GetObjectLegalHoldInput) (*request.Request, *s3.GetObjectLegalHoldOutput)
+
+ GetObjectLockConfiguration(*s3.GetObjectLockConfigurationInput) (*s3.GetObjectLockConfigurationOutput, error)
+ GetObjectLockConfigurationWithContext(aws.Context, *s3.GetObjectLockConfigurationInput, ...request.Option) (*s3.GetObjectLockConfigurationOutput, error)
+ GetObjectLockConfigurationRequest(*s3.GetObjectLockConfigurationInput) (*request.Request, *s3.GetObjectLockConfigurationOutput)
+
+ GetObjectRetention(*s3.GetObjectRetentionInput) (*s3.GetObjectRetentionOutput, error)
+ GetObjectRetentionWithContext(aws.Context, *s3.GetObjectRetentionInput, ...request.Option) (*s3.GetObjectRetentionOutput, error)
+ GetObjectRetentionRequest(*s3.GetObjectRetentionInput) (*request.Request, *s3.GetObjectRetentionOutput)
+
+ GetObjectTagging(*s3.GetObjectTaggingInput) (*s3.GetObjectTaggingOutput, error)
+ GetObjectTaggingWithContext(aws.Context, *s3.GetObjectTaggingInput, ...request.Option) (*s3.GetObjectTaggingOutput, error)
+ GetObjectTaggingRequest(*s3.GetObjectTaggingInput) (*request.Request, *s3.GetObjectTaggingOutput)
+
+ GetObjectTorrent(*s3.GetObjectTorrentInput) (*s3.GetObjectTorrentOutput, error)
+ GetObjectTorrentWithContext(aws.Context, *s3.GetObjectTorrentInput, ...request.Option) (*s3.GetObjectTorrentOutput, error)
+ GetObjectTorrentRequest(*s3.GetObjectTorrentInput) (*request.Request, *s3.GetObjectTorrentOutput)
+
+ GetPublicAccessBlock(*s3.GetPublicAccessBlockInput) (*s3.GetPublicAccessBlockOutput, error)
+ GetPublicAccessBlockWithContext(aws.Context, *s3.GetPublicAccessBlockInput, ...request.Option) (*s3.GetPublicAccessBlockOutput, error)
+ GetPublicAccessBlockRequest(*s3.GetPublicAccessBlockInput) (*request.Request, *s3.GetPublicAccessBlockOutput)
+
+ HeadBucket(*s3.HeadBucketInput) (*s3.HeadBucketOutput, error)
+ HeadBucketWithContext(aws.Context, *s3.HeadBucketInput, ...request.Option) (*s3.HeadBucketOutput, error)
+ HeadBucketRequest(*s3.HeadBucketInput) (*request.Request, *s3.HeadBucketOutput)
+
+ HeadObject(*s3.HeadObjectInput) (*s3.HeadObjectOutput, error)
+ HeadObjectWithContext(aws.Context, *s3.HeadObjectInput, ...request.Option) (*s3.HeadObjectOutput, error)
+ HeadObjectRequest(*s3.HeadObjectInput) (*request.Request, *s3.HeadObjectOutput)
+
+ ListBucketAnalyticsConfigurations(*s3.ListBucketAnalyticsConfigurationsInput) (*s3.ListBucketAnalyticsConfigurationsOutput, error)
+ ListBucketAnalyticsConfigurationsWithContext(aws.Context, *s3.ListBucketAnalyticsConfigurationsInput, ...request.Option) (*s3.ListBucketAnalyticsConfigurationsOutput, error)
+ ListBucketAnalyticsConfigurationsRequest(*s3.ListBucketAnalyticsConfigurationsInput) (*request.Request, *s3.ListBucketAnalyticsConfigurationsOutput)
+
+ ListBucketInventoryConfigurations(*s3.ListBucketInventoryConfigurationsInput) (*s3.ListBucketInventoryConfigurationsOutput, error)
+ ListBucketInventoryConfigurationsWithContext(aws.Context, *s3.ListBucketInventoryConfigurationsInput, ...request.Option) (*s3.ListBucketInventoryConfigurationsOutput, error)
+ ListBucketInventoryConfigurationsRequest(*s3.ListBucketInventoryConfigurationsInput) (*request.Request, *s3.ListBucketInventoryConfigurationsOutput)
+
+ ListBucketMetricsConfigurations(*s3.ListBucketMetricsConfigurationsInput) (*s3.ListBucketMetricsConfigurationsOutput, error)
+ ListBucketMetricsConfigurationsWithContext(aws.Context, *s3.ListBucketMetricsConfigurationsInput, ...request.Option) (*s3.ListBucketMetricsConfigurationsOutput, error)
+ ListBucketMetricsConfigurationsRequest(*s3.ListBucketMetricsConfigurationsInput) (*request.Request, *s3.ListBucketMetricsConfigurationsOutput)
+
+ ListBuckets(*s3.ListBucketsInput) (*s3.ListBucketsOutput, error)
+ ListBucketsWithContext(aws.Context, *s3.ListBucketsInput, ...request.Option) (*s3.ListBucketsOutput, error)
+ ListBucketsRequest(*s3.ListBucketsInput) (*request.Request, *s3.ListBucketsOutput)
+
+ ListMultipartUploads(*s3.ListMultipartUploadsInput) (*s3.ListMultipartUploadsOutput, error)
+ ListMultipartUploadsWithContext(aws.Context, *s3.ListMultipartUploadsInput, ...request.Option) (*s3.ListMultipartUploadsOutput, error)
+ ListMultipartUploadsRequest(*s3.ListMultipartUploadsInput) (*request.Request, *s3.ListMultipartUploadsOutput)
+
+ ListMultipartUploadsPages(*s3.ListMultipartUploadsInput, func(*s3.ListMultipartUploadsOutput, bool) bool) error
+ ListMultipartUploadsPagesWithContext(aws.Context, *s3.ListMultipartUploadsInput, func(*s3.ListMultipartUploadsOutput, bool) bool, ...request.Option) error
+
+ ListObjectVersions(*s3.ListObjectVersionsInput) (*s3.ListObjectVersionsOutput, error)
+ ListObjectVersionsWithContext(aws.Context, *s3.ListObjectVersionsInput, ...request.Option) (*s3.ListObjectVersionsOutput, error)
+ ListObjectVersionsRequest(*s3.ListObjectVersionsInput) (*request.Request, *s3.ListObjectVersionsOutput)
+
+ ListObjectVersionsPages(*s3.ListObjectVersionsInput, func(*s3.ListObjectVersionsOutput, bool) bool) error
+ ListObjectVersionsPagesWithContext(aws.Context, *s3.ListObjectVersionsInput, func(*s3.ListObjectVersionsOutput, bool) bool, ...request.Option) error
+
+ ListObjects(*s3.ListObjectsInput) (*s3.ListObjectsOutput, error)
+ ListObjectsWithContext(aws.Context, *s3.ListObjectsInput, ...request.Option) (*s3.ListObjectsOutput, error)
+ ListObjectsRequest(*s3.ListObjectsInput) (*request.Request, *s3.ListObjectsOutput)
+
+ ListObjectsPages(*s3.ListObjectsInput, func(*s3.ListObjectsOutput, bool) bool) error
+ ListObjectsPagesWithContext(aws.Context, *s3.ListObjectsInput, func(*s3.ListObjectsOutput, bool) bool, ...request.Option) error
+
+ ListObjectsV2(*s3.ListObjectsV2Input) (*s3.ListObjectsV2Output, error)
+ ListObjectsV2WithContext(aws.Context, *s3.ListObjectsV2Input, ...request.Option) (*s3.ListObjectsV2Output, error)
+ ListObjectsV2Request(*s3.ListObjectsV2Input) (*request.Request, *s3.ListObjectsV2Output)
+
+ ListObjectsV2Pages(*s3.ListObjectsV2Input, func(*s3.ListObjectsV2Output, bool) bool) error
+ ListObjectsV2PagesWithContext(aws.Context, *s3.ListObjectsV2Input, func(*s3.ListObjectsV2Output, bool) bool, ...request.Option) error
+
+ ListParts(*s3.ListPartsInput) (*s3.ListPartsOutput, error)
+ ListPartsWithContext(aws.Context, *s3.ListPartsInput, ...request.Option) (*s3.ListPartsOutput, error)
+ ListPartsRequest(*s3.ListPartsInput) (*request.Request, *s3.ListPartsOutput)
+
+ ListPartsPages(*s3.ListPartsInput, func(*s3.ListPartsOutput, bool) bool) error
+ ListPartsPagesWithContext(aws.Context, *s3.ListPartsInput, func(*s3.ListPartsOutput, bool) bool, ...request.Option) error
+
+ PutBucketAccelerateConfiguration(*s3.PutBucketAccelerateConfigurationInput) (*s3.PutBucketAccelerateConfigurationOutput, error)
+ PutBucketAccelerateConfigurationWithContext(aws.Context, *s3.PutBucketAccelerateConfigurationInput, ...request.Option) (*s3.PutBucketAccelerateConfigurationOutput, error)
+ PutBucketAccelerateConfigurationRequest(*s3.PutBucketAccelerateConfigurationInput) (*request.Request, *s3.PutBucketAccelerateConfigurationOutput)
+
+ PutBucketAcl(*s3.PutBucketAclInput) (*s3.PutBucketAclOutput, error)
+ PutBucketAclWithContext(aws.Context, *s3.PutBucketAclInput, ...request.Option) (*s3.PutBucketAclOutput, error)
+ PutBucketAclRequest(*s3.PutBucketAclInput) (*request.Request, *s3.PutBucketAclOutput)
+
+ PutBucketAnalyticsConfiguration(*s3.PutBucketAnalyticsConfigurationInput) (*s3.PutBucketAnalyticsConfigurationOutput, error)
+ PutBucketAnalyticsConfigurationWithContext(aws.Context, *s3.PutBucketAnalyticsConfigurationInput, ...request.Option) (*s3.PutBucketAnalyticsConfigurationOutput, error)
+ PutBucketAnalyticsConfigurationRequest(*s3.PutBucketAnalyticsConfigurationInput) (*request.Request, *s3.PutBucketAnalyticsConfigurationOutput)
+
+ PutBucketCors(*s3.PutBucketCorsInput) (*s3.PutBucketCorsOutput, error)
+ PutBucketCorsWithContext(aws.Context, *s3.PutBucketCorsInput, ...request.Option) (*s3.PutBucketCorsOutput, error)
+ PutBucketCorsRequest(*s3.PutBucketCorsInput) (*request.Request, *s3.PutBucketCorsOutput)
+
+ PutBucketEncryption(*s3.PutBucketEncryptionInput) (*s3.PutBucketEncryptionOutput, error)
+ PutBucketEncryptionWithContext(aws.Context, *s3.PutBucketEncryptionInput, ...request.Option) (*s3.PutBucketEncryptionOutput, error)
+ PutBucketEncryptionRequest(*s3.PutBucketEncryptionInput) (*request.Request, *s3.PutBucketEncryptionOutput)
+
+ PutBucketInventoryConfiguration(*s3.PutBucketInventoryConfigurationInput) (*s3.PutBucketInventoryConfigurationOutput, error)
+ PutBucketInventoryConfigurationWithContext(aws.Context, *s3.PutBucketInventoryConfigurationInput, ...request.Option) (*s3.PutBucketInventoryConfigurationOutput, error)
+ PutBucketInventoryConfigurationRequest(*s3.PutBucketInventoryConfigurationInput) (*request.Request, *s3.PutBucketInventoryConfigurationOutput)
+
+ PutBucketLifecycle(*s3.PutBucketLifecycleInput) (*s3.PutBucketLifecycleOutput, error)
+ PutBucketLifecycleWithContext(aws.Context, *s3.PutBucketLifecycleInput, ...request.Option) (*s3.PutBucketLifecycleOutput, error)
+ PutBucketLifecycleRequest(*s3.PutBucketLifecycleInput) (*request.Request, *s3.PutBucketLifecycleOutput)
+
+ PutBucketLifecycleConfiguration(*s3.PutBucketLifecycleConfigurationInput) (*s3.PutBucketLifecycleConfigurationOutput, error)
+ PutBucketLifecycleConfigurationWithContext(aws.Context, *s3.PutBucketLifecycleConfigurationInput, ...request.Option) (*s3.PutBucketLifecycleConfigurationOutput, error)
+ PutBucketLifecycleConfigurationRequest(*s3.PutBucketLifecycleConfigurationInput) (*request.Request, *s3.PutBucketLifecycleConfigurationOutput)
+
+ PutBucketLogging(*s3.PutBucketLoggingInput) (*s3.PutBucketLoggingOutput, error)
+ PutBucketLoggingWithContext(aws.Context, *s3.PutBucketLoggingInput, ...request.Option) (*s3.PutBucketLoggingOutput, error)
+ PutBucketLoggingRequest(*s3.PutBucketLoggingInput) (*request.Request, *s3.PutBucketLoggingOutput)
+
+ PutBucketMetricsConfiguration(*s3.PutBucketMetricsConfigurationInput) (*s3.PutBucketMetricsConfigurationOutput, error)
+ PutBucketMetricsConfigurationWithContext(aws.Context, *s3.PutBucketMetricsConfigurationInput, ...request.Option) (*s3.PutBucketMetricsConfigurationOutput, error)
+ PutBucketMetricsConfigurationRequest(*s3.PutBucketMetricsConfigurationInput) (*request.Request, *s3.PutBucketMetricsConfigurationOutput)
+
+ PutBucketNotification(*s3.PutBucketNotificationInput) (*s3.PutBucketNotificationOutput, error)
+ PutBucketNotificationWithContext(aws.Context, *s3.PutBucketNotificationInput, ...request.Option) (*s3.PutBucketNotificationOutput, error)
+ PutBucketNotificationRequest(*s3.PutBucketNotificationInput) (*request.Request, *s3.PutBucketNotificationOutput)
+
+ PutBucketNotificationConfiguration(*s3.PutBucketNotificationConfigurationInput) (*s3.PutBucketNotificationConfigurationOutput, error)
+ PutBucketNotificationConfigurationWithContext(aws.Context, *s3.PutBucketNotificationConfigurationInput, ...request.Option) (*s3.PutBucketNotificationConfigurationOutput, error)
+ PutBucketNotificationConfigurationRequest(*s3.PutBucketNotificationConfigurationInput) (*request.Request, *s3.PutBucketNotificationConfigurationOutput)
+
+ PutBucketPolicy(*s3.PutBucketPolicyInput) (*s3.PutBucketPolicyOutput, error)
+ PutBucketPolicyWithContext(aws.Context, *s3.PutBucketPolicyInput, ...request.Option) (*s3.PutBucketPolicyOutput, error)
+ PutBucketPolicyRequest(*s3.PutBucketPolicyInput) (*request.Request, *s3.PutBucketPolicyOutput)
+
+ PutBucketReplication(*s3.PutBucketReplicationInput) (*s3.PutBucketReplicationOutput, error)
+ PutBucketReplicationWithContext(aws.Context, *s3.PutBucketReplicationInput, ...request.Option) (*s3.PutBucketReplicationOutput, error)
+ PutBucketReplicationRequest(*s3.PutBucketReplicationInput) (*request.Request, *s3.PutBucketReplicationOutput)
+
+ PutBucketRequestPayment(*s3.PutBucketRequestPaymentInput) (*s3.PutBucketRequestPaymentOutput, error)
+ PutBucketRequestPaymentWithContext(aws.Context, *s3.PutBucketRequestPaymentInput, ...request.Option) (*s3.PutBucketRequestPaymentOutput, error)
+ PutBucketRequestPaymentRequest(*s3.PutBucketRequestPaymentInput) (*request.Request, *s3.PutBucketRequestPaymentOutput)
+
+ PutBucketTagging(*s3.PutBucketTaggingInput) (*s3.PutBucketTaggingOutput, error)
+ PutBucketTaggingWithContext(aws.Context, *s3.PutBucketTaggingInput, ...request.Option) (*s3.PutBucketTaggingOutput, error)
+ PutBucketTaggingRequest(*s3.PutBucketTaggingInput) (*request.Request, *s3.PutBucketTaggingOutput)
+
+ PutBucketVersioning(*s3.PutBucketVersioningInput) (*s3.PutBucketVersioningOutput, error)
+ PutBucketVersioningWithContext(aws.Context, *s3.PutBucketVersioningInput, ...request.Option) (*s3.PutBucketVersioningOutput, error)
+ PutBucketVersioningRequest(*s3.PutBucketVersioningInput) (*request.Request, *s3.PutBucketVersioningOutput)
+
+ PutBucketWebsite(*s3.PutBucketWebsiteInput) (*s3.PutBucketWebsiteOutput, error)
+ PutBucketWebsiteWithContext(aws.Context, *s3.PutBucketWebsiteInput, ...request.Option) (*s3.PutBucketWebsiteOutput, error)
+ PutBucketWebsiteRequest(*s3.PutBucketWebsiteInput) (*request.Request, *s3.PutBucketWebsiteOutput)
+
+ PutObject(*s3.PutObjectInput) (*s3.PutObjectOutput, error)
+ PutObjectWithContext(aws.Context, *s3.PutObjectInput, ...request.Option) (*s3.PutObjectOutput, error)
+ PutObjectRequest(*s3.PutObjectInput) (*request.Request, *s3.PutObjectOutput)
+
+ PutObjectAcl(*s3.PutObjectAclInput) (*s3.PutObjectAclOutput, error)
+ PutObjectAclWithContext(aws.Context, *s3.PutObjectAclInput, ...request.Option) (*s3.PutObjectAclOutput, error)
+ PutObjectAclRequest(*s3.PutObjectAclInput) (*request.Request, *s3.PutObjectAclOutput)
+
+ PutObjectLegalHold(*s3.PutObjectLegalHoldInput) (*s3.PutObjectLegalHoldOutput, error)
+ PutObjectLegalHoldWithContext(aws.Context, *s3.PutObjectLegalHoldInput, ...request.Option) (*s3.PutObjectLegalHoldOutput, error)
+ PutObjectLegalHoldRequest(*s3.PutObjectLegalHoldInput) (*request.Request, *s3.PutObjectLegalHoldOutput)
+
+ PutObjectLockConfiguration(*s3.PutObjectLockConfigurationInput) (*s3.PutObjectLockConfigurationOutput, error)
+ PutObjectLockConfigurationWithContext(aws.Context, *s3.PutObjectLockConfigurationInput, ...request.Option) (*s3.PutObjectLockConfigurationOutput, error)
+ PutObjectLockConfigurationRequest(*s3.PutObjectLockConfigurationInput) (*request.Request, *s3.PutObjectLockConfigurationOutput)
+
+ PutObjectRetention(*s3.PutObjectRetentionInput) (*s3.PutObjectRetentionOutput, error)
+ PutObjectRetentionWithContext(aws.Context, *s3.PutObjectRetentionInput, ...request.Option) (*s3.PutObjectRetentionOutput, error)
+ PutObjectRetentionRequest(*s3.PutObjectRetentionInput) (*request.Request, *s3.PutObjectRetentionOutput)
+
+ PutObjectTagging(*s3.PutObjectTaggingInput) (*s3.PutObjectTaggingOutput, error)
+ PutObjectTaggingWithContext(aws.Context, *s3.PutObjectTaggingInput, ...request.Option) (*s3.PutObjectTaggingOutput, error)
+ PutObjectTaggingRequest(*s3.PutObjectTaggingInput) (*request.Request, *s3.PutObjectTaggingOutput)
+
+ PutPublicAccessBlock(*s3.PutPublicAccessBlockInput) (*s3.PutPublicAccessBlockOutput, error)
+ PutPublicAccessBlockWithContext(aws.Context, *s3.PutPublicAccessBlockInput, ...request.Option) (*s3.PutPublicAccessBlockOutput, error)
+ PutPublicAccessBlockRequest(*s3.PutPublicAccessBlockInput) (*request.Request, *s3.PutPublicAccessBlockOutput)
+
+ RestoreObject(*s3.RestoreObjectInput) (*s3.RestoreObjectOutput, error)
+ RestoreObjectWithContext(aws.Context, *s3.RestoreObjectInput, ...request.Option) (*s3.RestoreObjectOutput, error)
+ RestoreObjectRequest(*s3.RestoreObjectInput) (*request.Request, *s3.RestoreObjectOutput)
+
+ SelectObjectContent(*s3.SelectObjectContentInput) (*s3.SelectObjectContentOutput, error)
+ SelectObjectContentWithContext(aws.Context, *s3.SelectObjectContentInput, ...request.Option) (*s3.SelectObjectContentOutput, error)
+ SelectObjectContentRequest(*s3.SelectObjectContentInput) (*request.Request, *s3.SelectObjectContentOutput)
+
+ UploadPart(*s3.UploadPartInput) (*s3.UploadPartOutput, error)
+ UploadPartWithContext(aws.Context, *s3.UploadPartInput, ...request.Option) (*s3.UploadPartOutput, error)
+ UploadPartRequest(*s3.UploadPartInput) (*request.Request, *s3.UploadPartOutput)
+
+ UploadPartCopy(*s3.UploadPartCopyInput) (*s3.UploadPartCopyOutput, error)
+ UploadPartCopyWithContext(aws.Context, *s3.UploadPartCopyInput, ...request.Option) (*s3.UploadPartCopyOutput, error)
+ UploadPartCopyRequest(*s3.UploadPartCopyInput) (*request.Request, *s3.UploadPartCopyOutput)
+
+ WaitUntilBucketExists(*s3.HeadBucketInput) error
+ WaitUntilBucketExistsWithContext(aws.Context, *s3.HeadBucketInput, ...request.WaiterOption) error
+
+ WaitUntilBucketNotExists(*s3.HeadBucketInput) error
+ WaitUntilBucketNotExistsWithContext(aws.Context, *s3.HeadBucketInput, ...request.WaiterOption) error
+
+ WaitUntilObjectExists(*s3.HeadObjectInput) error
+ WaitUntilObjectExistsWithContext(aws.Context, *s3.HeadObjectInput, ...request.WaiterOption) error
+
+ WaitUntilObjectNotExists(*s3.HeadObjectInput) error
+ WaitUntilObjectNotExistsWithContext(aws.Context, *s3.HeadObjectInput, ...request.WaiterOption) error
+}
+
+var _ S3API = (*s3.S3)(nil)
diff --git a/vendor/github.com/aws/aws-sdk-go/service/s3/s3manager/batch.go b/vendor/github.com/aws/aws-sdk-go/service/s3/s3manager/batch.go
new file mode 100644
index 00000000..22bd0b7c
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/service/s3/s3manager/batch.go
@@ -0,0 +1,529 @@
+package s3manager
+
+import (
+ "bytes"
+ "fmt"
+ "io"
+
+ "github.com/aws/aws-sdk-go/aws"
+ "github.com/aws/aws-sdk-go/aws/awserr"
+ "github.com/aws/aws-sdk-go/aws/client"
+ "github.com/aws/aws-sdk-go/aws/request"
+ "github.com/aws/aws-sdk-go/service/s3"
+ "github.com/aws/aws-sdk-go/service/s3/s3iface"
+)
+
+const (
+ // DefaultBatchSize is the batch size we initialize when constructing a batch delete client.
+ // This value is used when calling DeleteObjects. This represents how many objects to delete
+ // per DeleteObjects call.
+ DefaultBatchSize = 100
+)
+
+// BatchError will contain the key and bucket of the object that failed to
+// either upload or download.
+type BatchError struct {
+ Errors Errors
+ code string
+ message string
+}
+
+// Errors is a typed alias for a slice of errors to satisfy the error
+// interface.
+type Errors []Error
+
+func (errs Errors) Error() string {
+ buf := bytes.NewBuffer(nil)
+ for i, err := range errs {
+ buf.WriteString(err.Error())
+ if i+1 < len(errs) {
+ buf.WriteString("\n")
+ }
+ }
+ return buf.String()
+}
+
+// Error will contain the original error, bucket, and key of the operation that failed
+// during batch operations.
+type Error struct {
+ OrigErr error
+ Bucket *string
+ Key *string
+}
+
+func newError(err error, bucket, key *string) Error {
+ return Error{
+ err,
+ bucket,
+ key,
+ }
+}
+
+func (err *Error) Error() string {
+ origErr := ""
+ if err.OrigErr != nil {
+ origErr = ":\n" + err.OrigErr.Error()
+ }
+ return fmt.Sprintf("failed to perform batch operation on %q to %q%s",
+ aws.StringValue(err.Key),
+ aws.StringValue(err.Bucket),
+ origErr,
+ )
+}
+
+// NewBatchError will return a BatchError that satisfies the awserr.Error interface.
+func NewBatchError(code, message string, err []Error) awserr.Error {
+ return &BatchError{
+ Errors: err,
+ code: code,
+ message: message,
+ }
+}
+
+// Code will return the code associated with the batch error.
+func (err *BatchError) Code() string {
+ return err.code
+}
+
+// Message will return the message associated with the batch error.
+func (err *BatchError) Message() string {
+ return err.message
+}
+
+func (err *BatchError) Error() string {
+ return awserr.SprintError(err.Code(), err.Message(), "", err.Errors)
+}
+
+// OrigErr will return the original error. Which, in this case, will always be nil
+// for batched operations.
+func (err *BatchError) OrigErr() error {
+ return err.Errors
+}
+
+// BatchDeleteIterator is an interface that uses the scanner pattern to
+// iterate through what needs to be deleted.
+type BatchDeleteIterator interface {
+ Next() bool
+ Err() error
+ DeleteObject() BatchDeleteObject
+}
+
+// DeleteListIterator is an alternative iterator for the BatchDelete client. This will
+// iterate through a list of objects and delete the objects.
+//
+// Example:
+// iter := &s3manager.DeleteListIterator{
+// Client: svc,
+// Input: &s3.ListObjectsInput{
+// Bucket: aws.String("bucket"),
+// MaxKeys: aws.Int64(5),
+// },
+// Paginator: request.Pagination{
+// NewRequest: func() (*request.Request, error) {
+// var inCpy *ListObjectsInput
+// if input != nil {
+// tmp := *input
+// inCpy = &tmp
+// }
+// req, _ := c.ListObjectsRequest(inCpy)
+// return req, nil
+// },
+// },
+// }
+//
+// batcher := s3manager.NewBatchDeleteWithClient(svc)
+// if err := batcher.Delete(aws.BackgroundContext(), iter); err != nil {
+// return err
+// }
+type DeleteListIterator struct {
+ Bucket *string
+ Paginator request.Pagination
+ objects []*s3.Object
+}
+
+// NewDeleteListIterator will return a new DeleteListIterator.
+func NewDeleteListIterator(svc s3iface.S3API, input *s3.ListObjectsInput, opts ...func(*DeleteListIterator)) BatchDeleteIterator {
+ iter := &DeleteListIterator{
+ Bucket: input.Bucket,
+ Paginator: request.Pagination{
+ NewRequest: func() (*request.Request, error) {
+ var inCpy *s3.ListObjectsInput
+ if input != nil {
+ tmp := *input
+ inCpy = &tmp
+ }
+ req, _ := svc.ListObjectsRequest(inCpy)
+ return req, nil
+ },
+ },
+ }
+
+ for _, opt := range opts {
+ opt(iter)
+ }
+ return iter
+}
+
+// Next will use the S3API client to iterate through a list of objects.
+func (iter *DeleteListIterator) Next() bool {
+ if len(iter.objects) > 0 {
+ iter.objects = iter.objects[1:]
+ }
+
+ if len(iter.objects) == 0 && iter.Paginator.Next() {
+ iter.objects = iter.Paginator.Page().(*s3.ListObjectsOutput).Contents
+ }
+
+ return len(iter.objects) > 0
+}
+
+// Err will return the last known error from Next.
+func (iter *DeleteListIterator) Err() error {
+ return iter.Paginator.Err()
+}
+
+// DeleteObject will return the current object to be deleted.
+func (iter *DeleteListIterator) DeleteObject() BatchDeleteObject {
+ return BatchDeleteObject{
+ Object: &s3.DeleteObjectInput{
+ Bucket: iter.Bucket,
+ Key: iter.objects[0].Key,
+ },
+ }
+}
+
+// BatchDelete will use the s3 package's service client to perform a batch
+// delete.
+type BatchDelete struct {
+ Client s3iface.S3API
+ BatchSize int
+}
+
+// NewBatchDeleteWithClient will return a new delete client that can delete a batched amount of
+// objects.
+//
+// Example:
+// batcher := s3manager.NewBatchDeleteWithClient(client, size)
+//
+// objects := []BatchDeleteObject{
+// {
+// Object: &s3.DeleteObjectInput {
+// Key: aws.String("key"),
+// Bucket: aws.String("bucket"),
+// },
+// },
+// }
+//
+// if err := batcher.Delete(aws.BackgroundContext(), &s3manager.DeleteObjectsIterator{
+// Objects: objects,
+// }); err != nil {
+// return err
+// }
+func NewBatchDeleteWithClient(client s3iface.S3API, options ...func(*BatchDelete)) *BatchDelete {
+ svc := &BatchDelete{
+ Client: client,
+ BatchSize: DefaultBatchSize,
+ }
+
+ for _, opt := range options {
+ opt(svc)
+ }
+
+ return svc
+}
+
+// NewBatchDelete will return a new delete client that can delete a batched amount of
+// objects.
+//
+// Example:
+// batcher := s3manager.NewBatchDelete(sess, size)
+//
+// objects := []BatchDeleteObject{
+// {
+// Object: &s3.DeleteObjectInput {
+// Key: aws.String("key"),
+// Bucket: aws.String("bucket"),
+// },
+// },
+// }
+//
+// if err := batcher.Delete(aws.BackgroundContext(), &s3manager.DeleteObjectsIterator{
+// Objects: objects,
+// }); err != nil {
+// return err
+// }
+func NewBatchDelete(c client.ConfigProvider, options ...func(*BatchDelete)) *BatchDelete {
+ client := s3.New(c)
+ return NewBatchDeleteWithClient(client, options...)
+}
+
+// BatchDeleteObject is a wrapper object for calling the batch delete operation.
+type BatchDeleteObject struct {
+ Object *s3.DeleteObjectInput
+ // After will run after each iteration during the batch process. This function will
+ // be executed whether or not the request was successful.
+ After func() error
+}
+
+// DeleteObjectsIterator is an interface that uses the scanner pattern to iterate
+// through a series of objects to be deleted.
+type DeleteObjectsIterator struct {
+ Objects []BatchDeleteObject
+ index int
+ inc bool
+}
+
+// Next will increment the default iterator's index and ensure that there
+// is another object to iterator to.
+func (iter *DeleteObjectsIterator) Next() bool {
+ if iter.inc {
+ iter.index++
+ } else {
+ iter.inc = true
+ }
+ return iter.index < len(iter.Objects)
+}
+
+// Err will return an error. Since this is just used to satisfy the BatchDeleteIterator interface
+// this will only return nil.
+func (iter *DeleteObjectsIterator) Err() error {
+ return nil
+}
+
+// DeleteObject will return the BatchDeleteObject at the current batched index.
+func (iter *DeleteObjectsIterator) DeleteObject() BatchDeleteObject {
+ object := iter.Objects[iter.index]
+ return object
+}
+
+// Delete will use the iterator to queue up objects that need to be deleted.
+// Once the batch size is met, this will call the deleteBatch function.
+func (d *BatchDelete) Delete(ctx aws.Context, iter BatchDeleteIterator) error {
+ var errs []Error
+ objects := []BatchDeleteObject{}
+ var input *s3.DeleteObjectsInput
+
+ for iter.Next() {
+ o := iter.DeleteObject()
+
+ if input == nil {
+ input = initDeleteObjectsInput(o.Object)
+ }
+
+ parity := hasParity(input, o)
+ if parity {
+ input.Delete.Objects = append(input.Delete.Objects, &s3.ObjectIdentifier{
+ Key: o.Object.Key,
+ VersionId: o.Object.VersionId,
+ })
+ objects = append(objects, o)
+ }
+
+ if len(input.Delete.Objects) == d.BatchSize || !parity {
+ if err := deleteBatch(ctx, d, input, objects); err != nil {
+ errs = append(errs, err...)
+ }
+
+ objects = objects[:0]
+ input = nil
+
+ if !parity {
+ objects = append(objects, o)
+ input = initDeleteObjectsInput(o.Object)
+ input.Delete.Objects = append(input.Delete.Objects, &s3.ObjectIdentifier{
+ Key: o.Object.Key,
+ VersionId: o.Object.VersionId,
+ })
+ }
+ }
+ }
+
+ // iter.Next() could return false (above) plus populate iter.Err()
+ if iter.Err() != nil {
+ errs = append(errs, newError(iter.Err(), nil, nil))
+ }
+
+ if input != nil && len(input.Delete.Objects) > 0 {
+ if err := deleteBatch(ctx, d, input, objects); err != nil {
+ errs = append(errs, err...)
+ }
+ }
+
+ if len(errs) > 0 {
+ return NewBatchError("BatchedDeleteIncomplete", "some objects have failed to be deleted.", errs)
+ }
+ return nil
+}
+
+func initDeleteObjectsInput(o *s3.DeleteObjectInput) *s3.DeleteObjectsInput {
+ return &s3.DeleteObjectsInput{
+ Bucket: o.Bucket,
+ MFA: o.MFA,
+ RequestPayer: o.RequestPayer,
+ Delete: &s3.Delete{},
+ }
+}
+
+const (
+ // ErrDeleteBatchFailCode represents an error code which will be returned
+ // only when DeleteObjects.Errors has an error that does not contain a code.
+ ErrDeleteBatchFailCode = "DeleteBatchError"
+ errDefaultDeleteBatchMessage = "failed to delete"
+)
+
+// deleteBatch will delete a batch of items in the objects parameters.
+func deleteBatch(ctx aws.Context, d *BatchDelete, input *s3.DeleteObjectsInput, objects []BatchDeleteObject) []Error {
+ errs := []Error{}
+
+ if result, err := d.Client.DeleteObjectsWithContext(ctx, input); err != nil {
+ for i := 0; i < len(input.Delete.Objects); i++ {
+ errs = append(errs, newError(err, input.Bucket, input.Delete.Objects[i].Key))
+ }
+ } else if len(result.Errors) > 0 {
+ for i := 0; i < len(result.Errors); i++ {
+ code := ErrDeleteBatchFailCode
+ msg := errDefaultDeleteBatchMessage
+ if result.Errors[i].Message != nil {
+ msg = *result.Errors[i].Message
+ }
+ if result.Errors[i].Code != nil {
+ code = *result.Errors[i].Code
+ }
+
+ errs = append(errs, newError(awserr.New(code, msg, err), input.Bucket, result.Errors[i].Key))
+ }
+ }
+ for _, object := range objects {
+ if object.After == nil {
+ continue
+ }
+ if err := object.After(); err != nil {
+ errs = append(errs, newError(err, object.Object.Bucket, object.Object.Key))
+ }
+ }
+
+ return errs
+}
+
+func hasParity(o1 *s3.DeleteObjectsInput, o2 BatchDeleteObject) bool {
+ if o1.Bucket != nil && o2.Object.Bucket != nil {
+ if *o1.Bucket != *o2.Object.Bucket {
+ return false
+ }
+ } else if o1.Bucket != o2.Object.Bucket {
+ return false
+ }
+
+ if o1.MFA != nil && o2.Object.MFA != nil {
+ if *o1.MFA != *o2.Object.MFA {
+ return false
+ }
+ } else if o1.MFA != o2.Object.MFA {
+ return false
+ }
+
+ if o1.RequestPayer != nil && o2.Object.RequestPayer != nil {
+ if *o1.RequestPayer != *o2.Object.RequestPayer {
+ return false
+ }
+ } else if o1.RequestPayer != o2.Object.RequestPayer {
+ return false
+ }
+
+ return true
+}
+
+// BatchDownloadIterator is an interface that uses the scanner pattern to iterate
+// through a series of objects to be downloaded.
+type BatchDownloadIterator interface {
+ Next() bool
+ Err() error
+ DownloadObject() BatchDownloadObject
+}
+
+// BatchDownloadObject contains all necessary information to run a batch operation once.
+type BatchDownloadObject struct {
+ Object *s3.GetObjectInput
+ Writer io.WriterAt
+ // After will run after each iteration during the batch process. This function will
+ // be executed whether or not the request was successful.
+ After func() error
+}
+
+// DownloadObjectsIterator implements the BatchDownloadIterator interface and allows for batched
+// download of objects.
+type DownloadObjectsIterator struct {
+ Objects []BatchDownloadObject
+ index int
+ inc bool
+}
+
+// Next will increment the default iterator's index and ensure that there
+// is another object to iterator to.
+func (batcher *DownloadObjectsIterator) Next() bool {
+ if batcher.inc {
+ batcher.index++
+ } else {
+ batcher.inc = true
+ }
+ return batcher.index < len(batcher.Objects)
+}
+
+// DownloadObject will return the BatchDownloadObject at the current batched index.
+func (batcher *DownloadObjectsIterator) DownloadObject() BatchDownloadObject {
+ object := batcher.Objects[batcher.index]
+ return object
+}
+
+// Err will return an error. Since this is just used to satisfy the BatchDeleteIterator interface
+// this will only return nil.
+func (batcher *DownloadObjectsIterator) Err() error {
+ return nil
+}
+
+// BatchUploadIterator is an interface that uses the scanner pattern to
+// iterate through what needs to be uploaded.
+type BatchUploadIterator interface {
+ Next() bool
+ Err() error
+ UploadObject() BatchUploadObject
+}
+
+// UploadObjectsIterator implements the BatchUploadIterator interface and allows for batched
+// upload of objects.
+type UploadObjectsIterator struct {
+ Objects []BatchUploadObject
+ index int
+ inc bool
+}
+
+// Next will increment the default iterator's index and ensure that there
+// is another object to iterator to.
+func (batcher *UploadObjectsIterator) Next() bool {
+ if batcher.inc {
+ batcher.index++
+ } else {
+ batcher.inc = true
+ }
+ return batcher.index < len(batcher.Objects)
+}
+
+// Err will return an error. Since this is just used to satisfy the BatchUploadIterator interface
+// this will only return nil.
+func (batcher *UploadObjectsIterator) Err() error {
+ return nil
+}
+
+// UploadObject will return the BatchUploadObject at the current batched index.
+func (batcher *UploadObjectsIterator) UploadObject() BatchUploadObject {
+ object := batcher.Objects[batcher.index]
+ return object
+}
+
+// BatchUploadObject contains all necessary information to run a batch operation once.
+type BatchUploadObject struct {
+ Object *UploadInput
+ // After will run after each iteration during the batch process. This function will
+ // be executed whether or not the request was successful.
+ After func() error
+}
diff --git a/vendor/github.com/aws/aws-sdk-go/service/s3/s3manager/bucket_region.go b/vendor/github.com/aws/aws-sdk-go/service/s3/s3manager/bucket_region.go
new file mode 100644
index 00000000..f61665a5
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/service/s3/s3manager/bucket_region.go
@@ -0,0 +1,88 @@
+package s3manager
+
+import (
+ "github.com/aws/aws-sdk-go/aws"
+ "github.com/aws/aws-sdk-go/aws/client"
+ "github.com/aws/aws-sdk-go/aws/credentials"
+ "github.com/aws/aws-sdk-go/aws/request"
+ "github.com/aws/aws-sdk-go/service/s3"
+ "github.com/aws/aws-sdk-go/service/s3/s3iface"
+)
+
+// GetBucketRegion will attempt to get the region for a bucket using the
+// regionHint to determine which AWS partition to perform the query on.
+//
+// The request will not be signed, and will not use your AWS credentials.
+//
+// A "NotFound" error code will be returned if the bucket does not exist in the
+// AWS partition the regionHint belongs to. If the regionHint parameter is an
+// empty string GetBucketRegion will fallback to the ConfigProvider's region
+// config. If the regionHint is empty, and the ConfigProvider does not have a
+// region value, an error will be returned..
+//
+// For example to get the region of a bucket which exists in "eu-central-1"
+// you could provide a region hint of "us-west-2".
+//
+// sess := session.Must(session.NewSession())
+//
+// bucket := "my-bucket"
+// region, err := s3manager.GetBucketRegion(ctx, sess, bucket, "us-west-2")
+// if err != nil {
+// if aerr, ok := err.(awserr.Error); ok && aerr.Code() == "NotFound" {
+// fmt.Fprintf(os.Stderr, "unable to find bucket %s's region not found\n", bucket)
+// }
+// return err
+// }
+// fmt.Printf("Bucket %s is in %s region\n", bucket, region)
+//
+func GetBucketRegion(ctx aws.Context, c client.ConfigProvider, bucket, regionHint string, opts ...request.Option) (string, error) {
+ var cfg aws.Config
+ if len(regionHint) != 0 {
+ cfg.Region = aws.String(regionHint)
+ }
+ svc := s3.New(c, &cfg)
+ return GetBucketRegionWithClient(ctx, svc, bucket, opts...)
+}
+
+const bucketRegionHeader = "X-Amz-Bucket-Region"
+
+// GetBucketRegionWithClient is the same as GetBucketRegion with the exception
+// that it takes a S3 service client instead of a Session. The regionHint is
+// derived from the region the S3 service client was created in.
+//
+// See GetBucketRegion for more information.
+func GetBucketRegionWithClient(ctx aws.Context, svc s3iface.S3API, bucket string, opts ...request.Option) (string, error) {
+ req, _ := svc.HeadBucketRequest(&s3.HeadBucketInput{
+ Bucket: aws.String(bucket),
+ })
+ req.Config.S3ForcePathStyle = aws.Bool(true)
+ req.Config.Credentials = credentials.AnonymousCredentials
+ req.SetContext(ctx)
+
+ // Disable HTTP redirects to prevent an invalid 301 from eating the response
+ // because Go's HTTP client will fail, and drop the response if an 301 is
+ // received without a location header. S3 will return a 301 without the
+ // location header for HeadObject API calls.
+ req.DisableFollowRedirects = true
+
+ var bucketRegion string
+ req.Handlers.Send.PushBack(func(r *request.Request) {
+ bucketRegion = r.HTTPResponse.Header.Get(bucketRegionHeader)
+ if len(bucketRegion) == 0 {
+ return
+ }
+ r.HTTPResponse.StatusCode = 200
+ r.HTTPResponse.Status = "OK"
+ r.Error = nil
+ })
+
+ req.ApplyOptions(opts...)
+
+ if err := req.Send(); err != nil {
+ return "", err
+ }
+
+ bucketRegion = s3.NormalizeBucketLocation(bucketRegion)
+
+ return bucketRegion, nil
+}
diff --git a/vendor/github.com/aws/aws-sdk-go/service/s3/s3manager/buffered_read_seeker.go b/vendor/github.com/aws/aws-sdk-go/service/s3/s3manager/buffered_read_seeker.go
new file mode 100644
index 00000000..f1d9e85c
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/service/s3/s3manager/buffered_read_seeker.go
@@ -0,0 +1,81 @@
+package s3manager
+
+import (
+ "io"
+
+ "github.com/aws/aws-sdk-go/internal/sdkio"
+)
+
+// BufferedReadSeeker is buffered io.ReadSeeker
+type BufferedReadSeeker struct {
+ r io.ReadSeeker
+ buffer []byte
+ readIdx, writeIdx int
+}
+
+// NewBufferedReadSeeker returns a new BufferedReadSeeker
+// if len(b) == 0 then the buffer will be initialized to 64 KiB.
+func NewBufferedReadSeeker(r io.ReadSeeker, b []byte) *BufferedReadSeeker {
+ if len(b) == 0 {
+ b = make([]byte, 64*1024)
+ }
+ return &BufferedReadSeeker{r: r, buffer: b}
+}
+
+func (b *BufferedReadSeeker) reset(r io.ReadSeeker) {
+ b.r = r
+ b.readIdx, b.writeIdx = 0, 0
+}
+
+// Read will read up len(p) bytes into p and will return
+// the number of bytes read and any error that occurred.
+// If the len(p) > the buffer size then a single read request
+// will be issued to the underlying io.ReadSeeker for len(p) bytes.
+// A Read request will at most perform a single Read to the underlying
+// io.ReadSeeker, and may return < len(p) if serviced from the buffer.
+func (b *BufferedReadSeeker) Read(p []byte) (n int, err error) {
+ if len(p) == 0 {
+ return n, err
+ }
+
+ if b.readIdx == b.writeIdx {
+ if len(p) >= len(b.buffer) {
+ n, err = b.r.Read(p)
+ return n, err
+ }
+ b.readIdx, b.writeIdx = 0, 0
+
+ n, err = b.r.Read(b.buffer)
+ if n == 0 {
+ return n, err
+ }
+
+ b.writeIdx += n
+ }
+
+ n = copy(p, b.buffer[b.readIdx:b.writeIdx])
+ b.readIdx += n
+
+ return n, err
+}
+
+// Seek will position then underlying io.ReadSeeker to the given offset
+// and will clear the buffer.
+func (b *BufferedReadSeeker) Seek(offset int64, whence int) (int64, error) {
+ n, err := b.r.Seek(offset, whence)
+
+ b.reset(b.r)
+
+ return n, err
+}
+
+// ReadAt will read up to len(p) bytes at the given file offset.
+// This will result in the buffer being cleared.
+func (b *BufferedReadSeeker) ReadAt(p []byte, off int64) (int, error) {
+ _, err := b.Seek(off, sdkio.SeekStart)
+ if err != nil {
+ return 0, err
+ }
+
+ return b.Read(p)
+}
diff --git a/vendor/github.com/aws/aws-sdk-go/service/s3/s3manager/default_read_seeker_write_to.go b/vendor/github.com/aws/aws-sdk-go/service/s3/s3manager/default_read_seeker_write_to.go
new file mode 100644
index 00000000..42276530
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/service/s3/s3manager/default_read_seeker_write_to.go
@@ -0,0 +1,7 @@
+// +build !windows
+
+package s3manager
+
+func defaultUploadBufferProvider() ReadSeekerWriteToProvider {
+ return nil
+}
diff --git a/vendor/github.com/aws/aws-sdk-go/service/s3/s3manager/default_read_seeker_write_to_windows.go b/vendor/github.com/aws/aws-sdk-go/service/s3/s3manager/default_read_seeker_write_to_windows.go
new file mode 100644
index 00000000..687082c3
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/service/s3/s3manager/default_read_seeker_write_to_windows.go
@@ -0,0 +1,5 @@
+package s3manager
+
+func defaultUploadBufferProvider() ReadSeekerWriteToProvider {
+ return NewBufferedReadSeekerWriteToPool(1024 * 1024)
+}
diff --git a/vendor/github.com/aws/aws-sdk-go/service/s3/s3manager/default_writer_read_from.go b/vendor/github.com/aws/aws-sdk-go/service/s3/s3manager/default_writer_read_from.go
new file mode 100644
index 00000000..ada50c24
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/service/s3/s3manager/default_writer_read_from.go
@@ -0,0 +1,7 @@
+// +build !windows
+
+package s3manager
+
+func defaultDownloadBufferProvider() WriterReadFromProvider {
+ return nil
+}
diff --git a/vendor/github.com/aws/aws-sdk-go/service/s3/s3manager/default_writer_read_from_windows.go b/vendor/github.com/aws/aws-sdk-go/service/s3/s3manager/default_writer_read_from_windows.go
new file mode 100644
index 00000000..7e9d9579
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/service/s3/s3manager/default_writer_read_from_windows.go
@@ -0,0 +1,5 @@
+package s3manager
+
+func defaultDownloadBufferProvider() WriterReadFromProvider {
+ return NewPooledBufferedWriterReadFromProvider(1024 * 1024)
+}
diff --git a/vendor/github.com/aws/aws-sdk-go/service/s3/s3manager/doc.go b/vendor/github.com/aws/aws-sdk-go/service/s3/s3manager/doc.go
new file mode 100644
index 00000000..229c0d63
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/service/s3/s3manager/doc.go
@@ -0,0 +1,3 @@
+// Package s3manager provides utilities to upload and download objects from
+// S3 concurrently. Helpful for when working with large objects.
+package s3manager
diff --git a/vendor/github.com/aws/aws-sdk-go/service/s3/s3manager/download.go b/vendor/github.com/aws/aws-sdk-go/service/s3/s3manager/download.go
new file mode 100644
index 00000000..4b54b7c0
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/service/s3/s3manager/download.go
@@ -0,0 +1,597 @@
+package s3manager
+
+import (
+ "fmt"
+ "io"
+ "net/http"
+ "strconv"
+ "strings"
+ "sync"
+
+ "github.com/aws/aws-sdk-go/aws"
+ "github.com/aws/aws-sdk-go/aws/awserr"
+ "github.com/aws/aws-sdk-go/aws/awsutil"
+ "github.com/aws/aws-sdk-go/aws/client"
+ "github.com/aws/aws-sdk-go/aws/request"
+ "github.com/aws/aws-sdk-go/service/s3"
+ "github.com/aws/aws-sdk-go/service/s3/s3iface"
+)
+
+// DefaultDownloadPartSize is the default range of bytes to get at a time when
+// using Download().
+const DefaultDownloadPartSize = 1024 * 1024 * 5
+
+// DefaultDownloadConcurrency is the default number of goroutines to spin up
+// when using Download().
+const DefaultDownloadConcurrency = 5
+
+type errReadingBody struct {
+ err error
+}
+
+func (e *errReadingBody) Error() string {
+ return fmt.Sprintf("failed to read part body: %v", e.err)
+}
+
+func (e *errReadingBody) Unwrap() error {
+ return e.err
+}
+
+// The Downloader structure that calls Download(). It is safe to call Download()
+// on this structure for multiple objects and across concurrent goroutines.
+// Mutating the Downloader's properties is not safe to be done concurrently.
+type Downloader struct {
+ // The size (in bytes) to request from S3 for each part.
+ // The minimum allowed part size is 5MB, and if this value is set to zero,
+ // the DefaultDownloadPartSize value will be used.
+ //
+ // PartSize is ignored if the Range input parameter is provided.
+ PartSize int64
+
+ // The number of goroutines to spin up in parallel when sending parts.
+ // If this is set to zero, the DefaultDownloadConcurrency value will be used.
+ //
+ // Concurrency of 1 will download the parts sequentially.
+ //
+ // Concurrency is ignored if the Range input parameter is provided.
+ Concurrency int
+
+ // An S3 client to use when performing downloads.
+ S3 s3iface.S3API
+
+ // List of request options that will be passed down to individual API
+ // operation requests made by the downloader.
+ RequestOptions []request.Option
+
+ // Defines the buffer strategy used when downloading a part.
+ //
+ // If a WriterReadFromProvider is given the Download manager
+ // will pass the io.WriterAt of the Download request to the provider
+ // and will use the returned WriterReadFrom from the provider as the
+ // destination writer when copying from http response body.
+ BufferProvider WriterReadFromProvider
+}
+
+// WithDownloaderRequestOptions appends to the Downloader's API request options.
+func WithDownloaderRequestOptions(opts ...request.Option) func(*Downloader) {
+ return func(d *Downloader) {
+ d.RequestOptions = append(d.RequestOptions, opts...)
+ }
+}
+
+// NewDownloader creates a new Downloader instance to downloads objects from
+// S3 in concurrent chunks. Pass in additional functional options to customize
+// the downloader behavior. Requires a client.ConfigProvider in order to create
+// a S3 service client. The session.Session satisfies the client.ConfigProvider
+// interface.
+//
+// Example:
+// // The session the S3 Downloader will use
+// sess := session.Must(session.NewSession())
+//
+// // Create a downloader with the session and default options
+// downloader := s3manager.NewDownloader(sess)
+//
+// // Create a downloader with the session and custom options
+// downloader := s3manager.NewDownloader(sess, func(d *s3manager.Downloader) {
+// d.PartSize = 64 * 1024 * 1024 // 64MB per part
+// })
+func NewDownloader(c client.ConfigProvider, options ...func(*Downloader)) *Downloader {
+ return newDownloader(s3.New(c), options...)
+}
+
+func newDownloader(client s3iface.S3API, options ...func(*Downloader)) *Downloader {
+ d := &Downloader{
+ S3: client,
+ PartSize: DefaultDownloadPartSize,
+ Concurrency: DefaultDownloadConcurrency,
+ BufferProvider: defaultDownloadBufferProvider(),
+ }
+ for _, option := range options {
+ option(d)
+ }
+
+ return d
+}
+
+// NewDownloaderWithClient creates a new Downloader instance to downloads
+// objects from S3 in concurrent chunks. Pass in additional functional
+// options to customize the downloader behavior. Requires a S3 service client
+// to make S3 API calls.
+//
+// Example:
+// // The session the S3 Downloader will use
+// sess := session.Must(session.NewSession())
+//
+// // The S3 client the S3 Downloader will use
+// s3Svc := s3.New(sess)
+//
+// // Create a downloader with the s3 client and default options
+// downloader := s3manager.NewDownloaderWithClient(s3Svc)
+//
+// // Create a downloader with the s3 client and custom options
+// downloader := s3manager.NewDownloaderWithClient(s3Svc, func(d *s3manager.Downloader) {
+// d.PartSize = 64 * 1024 * 1024 // 64MB per part
+// })
+func NewDownloaderWithClient(svc s3iface.S3API, options ...func(*Downloader)) *Downloader {
+ return newDownloader(svc, options...)
+}
+
+type maxRetrier interface {
+ MaxRetries() int
+}
+
+// Download downloads an object in S3 and writes the payload into w using
+// concurrent GET requests. The n int64 returned is the size of the object downloaded
+// in bytes.
+//
+// Additional functional options can be provided to configure the individual
+// download. These options are copies of the Downloader instance Download is called from.
+// Modifying the options will not impact the original Downloader instance.
+//
+// It is safe to call this method concurrently across goroutines.
+//
+// The w io.WriterAt can be satisfied by an os.File to do multipart concurrent
+// downloads, or in memory []byte wrapper using aws.WriteAtBuffer.
+//
+// Specifying a Downloader.Concurrency of 1 will cause the Downloader to
+// download the parts from S3 sequentially.
+//
+// If the GetObjectInput's Range value is provided that will cause the downloader
+// to perform a single GetObjectInput request for that object's range. This will
+// caused the part size, and concurrency configurations to be ignored.
+func (d Downloader) Download(w io.WriterAt, input *s3.GetObjectInput, options ...func(*Downloader)) (n int64, err error) {
+ return d.DownloadWithContext(aws.BackgroundContext(), w, input, options...)
+}
+
+// DownloadWithContext downloads an object in S3 and writes the payload into w
+// using concurrent GET requests. The n int64 returned is the size of the object downloaded
+// in bytes.
+//
+// DownloadWithContext is the same as Download with the additional support for
+// Context input parameters. The Context must not be nil. A nil Context will
+// cause a panic. Use the Context to add deadlining, timeouts, etc. The
+// DownloadWithContext may create sub-contexts for individual underlying
+// requests.
+//
+// Additional functional options can be provided to configure the individual
+// download. These options are copies of the Downloader instance Download is
+// called from. Modifying the options will not impact the original Downloader
+// instance. Use the WithDownloaderRequestOptions helper function to pass in request
+// options that will be applied to all API operations made with this downloader.
+//
+// The w io.WriterAt can be satisfied by an os.File to do multipart concurrent
+// downloads, or in memory []byte wrapper using aws.WriteAtBuffer.
+//
+// Specifying a Downloader.Concurrency of 1 will cause the Downloader to
+// download the parts from S3 sequentially.
+//
+// It is safe to call this method concurrently across goroutines.
+//
+// If the GetObjectInput's Range value is provided that will cause the downloader
+// to perform a single GetObjectInput request for that object's range. This will
+// caused the part size, and concurrency configurations to be ignored.
+func (d Downloader) DownloadWithContext(ctx aws.Context, w io.WriterAt, input *s3.GetObjectInput, options ...func(*Downloader)) (n int64, err error) {
+ impl := downloader{w: w, in: input, cfg: d, ctx: ctx}
+
+ for _, option := range options {
+ option(&impl.cfg)
+ }
+ impl.cfg.RequestOptions = append(impl.cfg.RequestOptions, request.WithAppendUserAgent("S3Manager"))
+
+ if s, ok := d.S3.(maxRetrier); ok {
+ impl.partBodyMaxRetries = s.MaxRetries()
+ }
+
+ impl.totalBytes = -1
+ if impl.cfg.Concurrency == 0 {
+ impl.cfg.Concurrency = DefaultDownloadConcurrency
+ }
+
+ if impl.cfg.PartSize == 0 {
+ impl.cfg.PartSize = DefaultDownloadPartSize
+ }
+
+ return impl.download()
+}
+
+// DownloadWithIterator will download a batched amount of objects in S3 and writes them
+// to the io.WriterAt specificed in the iterator.
+//
+// Example:
+// svc := s3manager.NewDownloader(session)
+//
+// fooFile, err := os.Open("/tmp/foo.file")
+// if err != nil {
+// return err
+// }
+//
+// barFile, err := os.Open("/tmp/bar.file")
+// if err != nil {
+// return err
+// }
+//
+// objects := []s3manager.BatchDownloadObject {
+// {
+// Object: &s3.GetObjectInput {
+// Bucket: aws.String("bucket"),
+// Key: aws.String("foo"),
+// },
+// Writer: fooFile,
+// },
+// {
+// Object: &s3.GetObjectInput {
+// Bucket: aws.String("bucket"),
+// Key: aws.String("bar"),
+// },
+// Writer: barFile,
+// },
+// }
+//
+// iter := &s3manager.DownloadObjectsIterator{Objects: objects}
+// if err := svc.DownloadWithIterator(aws.BackgroundContext(), iter); err != nil {
+// return err
+// }
+func (d Downloader) DownloadWithIterator(ctx aws.Context, iter BatchDownloadIterator, opts ...func(*Downloader)) error {
+ var errs []Error
+ for iter.Next() {
+ object := iter.DownloadObject()
+ if _, err := d.DownloadWithContext(ctx, object.Writer, object.Object, opts...); err != nil {
+ errs = append(errs, newError(err, object.Object.Bucket, object.Object.Key))
+ }
+
+ if object.After == nil {
+ continue
+ }
+
+ if err := object.After(); err != nil {
+ errs = append(errs, newError(err, object.Object.Bucket, object.Object.Key))
+ }
+ }
+
+ if len(errs) > 0 {
+ return NewBatchError("BatchedDownloadIncomplete", "some objects have failed to download.", errs)
+ }
+ return nil
+}
+
+// downloader is the implementation structure used internally by Downloader.
+type downloader struct {
+ ctx aws.Context
+ cfg Downloader
+
+ in *s3.GetObjectInput
+ w io.WriterAt
+
+ wg sync.WaitGroup
+ m sync.Mutex
+
+ pos int64
+ totalBytes int64
+ written int64
+ err error
+
+ partBodyMaxRetries int
+}
+
+// download performs the implementation of the object download across ranged
+// GETs.
+func (d *downloader) download() (n int64, err error) {
+ // If range is specified fall back to single download of that range
+ // this enables the functionality of ranged gets with the downloader but
+ // at the cost of no multipart downloads.
+ if rng := aws.StringValue(d.in.Range); len(rng) > 0 {
+ d.downloadRange(rng)
+ return d.written, d.err
+ }
+
+ // Spin off first worker to check additional header information
+ d.getChunk()
+
+ if total := d.getTotalBytes(); total >= 0 {
+ // Spin up workers
+ ch := make(chan dlchunk, d.cfg.Concurrency)
+
+ for i := 0; i < d.cfg.Concurrency; i++ {
+ d.wg.Add(1)
+ go d.downloadPart(ch)
+ }
+
+ // Assign work
+ for d.getErr() == nil {
+ if d.pos >= total {
+ break // We're finished queuing chunks
+ }
+
+ // Queue the next range of bytes to read.
+ ch <- dlchunk{w: d.w, start: d.pos, size: d.cfg.PartSize}
+ d.pos += d.cfg.PartSize
+ }
+
+ // Wait for completion
+ close(ch)
+ d.wg.Wait()
+ } else {
+ // Checking if we read anything new
+ for d.err == nil {
+ d.getChunk()
+ }
+
+ // We expect a 416 error letting us know we are done downloading the
+ // total bytes. Since we do not know the content's length, this will
+ // keep grabbing chunks of data until the range of bytes specified in
+ // the request is out of range of the content. Once, this happens, a
+ // 416 should occur.
+ e, ok := d.err.(awserr.RequestFailure)
+ if ok && e.StatusCode() == http.StatusRequestedRangeNotSatisfiable {
+ d.err = nil
+ }
+ }
+
+ // Return error
+ return d.written, d.err
+}
+
+// downloadPart is an individual goroutine worker reading from the ch channel
+// and performing a GetObject request on the data with a given byte range.
+//
+// If this is the first worker, this operation also resolves the total number
+// of bytes to be read so that the worker manager knows when it is finished.
+func (d *downloader) downloadPart(ch chan dlchunk) {
+ defer d.wg.Done()
+ for {
+ chunk, ok := <-ch
+ if !ok {
+ break
+ }
+ if d.getErr() != nil {
+ // Drain the channel if there is an error, to prevent deadlocking
+ // of download producer.
+ continue
+ }
+
+ if err := d.downloadChunk(chunk); err != nil {
+ d.setErr(err)
+ }
+ }
+}
+
+// getChunk grabs a chunk of data from the body.
+// Not thread safe. Should only used when grabbing data on a single thread.
+func (d *downloader) getChunk() {
+ if d.getErr() != nil {
+ return
+ }
+
+ chunk := dlchunk{w: d.w, start: d.pos, size: d.cfg.PartSize}
+ d.pos += d.cfg.PartSize
+
+ if err := d.downloadChunk(chunk); err != nil {
+ d.setErr(err)
+ }
+}
+
+// downloadRange downloads an Object given the passed in Byte-Range value.
+// The chunk used down download the range will be configured for that range.
+func (d *downloader) downloadRange(rng string) {
+ if d.getErr() != nil {
+ return
+ }
+
+ chunk := dlchunk{w: d.w, start: d.pos}
+ // Ranges specified will short circuit the multipart download
+ chunk.withRange = rng
+
+ if err := d.downloadChunk(chunk); err != nil {
+ d.setErr(err)
+ }
+
+ // Update the position based on the amount of data received.
+ d.pos = d.written
+}
+
+// downloadChunk downloads the chunk from s3
+func (d *downloader) downloadChunk(chunk dlchunk) error {
+ in := &s3.GetObjectInput{}
+ awsutil.Copy(in, d.in)
+
+ // Get the next byte range of data
+ in.Range = aws.String(chunk.ByteRange())
+
+ var n int64
+ var err error
+ for retry := 0; retry <= d.partBodyMaxRetries; retry++ {
+ n, err = d.tryDownloadChunk(in, &chunk)
+ if err == nil {
+ break
+ }
+ // Check if the returned error is an errReadingBody.
+ // If err is errReadingBody this indicates that an error
+ // occurred while copying the http response body.
+ // If this occurs we unwrap the err to set the underlying error
+ // and attempt any remaining retries.
+ if bodyErr, ok := err.(*errReadingBody); ok {
+ err = bodyErr.Unwrap()
+ } else {
+ return err
+ }
+
+ chunk.cur = 0
+ logMessage(d.cfg.S3, aws.LogDebugWithRequestRetries,
+ fmt.Sprintf("DEBUG: object part body download interrupted %s, err, %v, retrying attempt %d",
+ aws.StringValue(in.Key), err, retry))
+ }
+
+ d.incrWritten(n)
+
+ return err
+}
+
+func (d *downloader) tryDownloadChunk(in *s3.GetObjectInput, w io.Writer) (int64, error) {
+ cleanup := func() {}
+ if d.cfg.BufferProvider != nil {
+ w, cleanup = d.cfg.BufferProvider.GetReadFrom(w)
+ }
+ defer cleanup()
+
+ resp, err := d.cfg.S3.GetObjectWithContext(d.ctx, in, d.cfg.RequestOptions...)
+ if err != nil {
+ return 0, err
+ }
+ d.setTotalBytes(resp) // Set total if not yet set.
+
+ n, err := io.Copy(w, resp.Body)
+ resp.Body.Close()
+ if err != nil {
+ return n, &errReadingBody{err: err}
+ }
+
+ return n, nil
+}
+
+func logMessage(svc s3iface.S3API, level aws.LogLevelType, msg string) {
+ s, ok := svc.(*s3.S3)
+ if !ok {
+ return
+ }
+
+ if s.Config.Logger == nil {
+ return
+ }
+
+ if s.Config.LogLevel.Matches(level) {
+ s.Config.Logger.Log(msg)
+ }
+}
+
+// getTotalBytes is a thread-safe getter for retrieving the total byte status.
+func (d *downloader) getTotalBytes() int64 {
+ d.m.Lock()
+ defer d.m.Unlock()
+
+ return d.totalBytes
+}
+
+// setTotalBytes is a thread-safe setter for setting the total byte status.
+// Will extract the object's total bytes from the Content-Range if the file
+// will be chunked, or Content-Length. Content-Length is used when the response
+// does not include a Content-Range. Meaning the object was not chunked. This
+// occurs when the full file fits within the PartSize directive.
+func (d *downloader) setTotalBytes(resp *s3.GetObjectOutput) {
+ d.m.Lock()
+ defer d.m.Unlock()
+
+ if d.totalBytes >= 0 {
+ return
+ }
+
+ if resp.ContentRange == nil {
+ // ContentRange is nil when the full file contents is provided, and
+ // is not chunked. Use ContentLength instead.
+ if resp.ContentLength != nil {
+ d.totalBytes = *resp.ContentLength
+ return
+ }
+ } else {
+ parts := strings.Split(*resp.ContentRange, "/")
+
+ total := int64(-1)
+ var err error
+ // Checking for whether or not a numbered total exists
+ // If one does not exist, we will assume the total to be -1, undefined,
+ // and sequentially download each chunk until hitting a 416 error
+ totalStr := parts[len(parts)-1]
+ if totalStr != "*" {
+ total, err = strconv.ParseInt(totalStr, 10, 64)
+ if err != nil {
+ d.err = err
+ return
+ }
+ }
+
+ d.totalBytes = total
+ }
+}
+
+func (d *downloader) incrWritten(n int64) {
+ d.m.Lock()
+ defer d.m.Unlock()
+
+ d.written += n
+}
+
+// getErr is a thread-safe getter for the error object
+func (d *downloader) getErr() error {
+ d.m.Lock()
+ defer d.m.Unlock()
+
+ return d.err
+}
+
+// setErr is a thread-safe setter for the error object
+func (d *downloader) setErr(e error) {
+ d.m.Lock()
+ defer d.m.Unlock()
+
+ d.err = e
+}
+
+// dlchunk represents a single chunk of data to write by the worker routine.
+// This structure also implements an io.SectionReader style interface for
+// io.WriterAt, effectively making it an io.SectionWriter (which does not
+// exist).
+type dlchunk struct {
+ w io.WriterAt
+ start int64
+ size int64
+ cur int64
+
+ // specifies the byte range the chunk should be downloaded with.
+ withRange string
+}
+
+// Write wraps io.WriterAt for the dlchunk, writing from the dlchunk's start
+// position to its end (or EOF).
+//
+// If a range is specified on the dlchunk the size will be ignored when writing.
+// as the total size may not of be known ahead of time.
+func (c *dlchunk) Write(p []byte) (n int, err error) {
+ if c.cur >= c.size && len(c.withRange) == 0 {
+ return 0, io.EOF
+ }
+
+ n, err = c.w.WriteAt(p, c.start+c.cur)
+ c.cur += int64(n)
+
+ return
+}
+
+// ByteRange returns a HTTP Byte-Range header value that should be used by the
+// client to request the chunk's range.
+func (c *dlchunk) ByteRange() string {
+ if len(c.withRange) != 0 {
+ return c.withRange
+ }
+
+ return fmt.Sprintf("bytes=%d-%d", c.start, c.start+c.size-1)
+}
diff --git a/vendor/github.com/aws/aws-sdk-go/service/s3/s3manager/read_seeker_write_to.go b/vendor/github.com/aws/aws-sdk-go/service/s3/s3manager/read_seeker_write_to.go
new file mode 100644
index 00000000..f62e1a45
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/service/s3/s3manager/read_seeker_write_to.go
@@ -0,0 +1,65 @@
+package s3manager
+
+import (
+ "io"
+ "sync"
+)
+
+// ReadSeekerWriteTo defines an interface implementing io.WriteTo and io.ReadSeeker
+type ReadSeekerWriteTo interface {
+ io.ReadSeeker
+ io.WriterTo
+}
+
+// BufferedReadSeekerWriteTo wraps a BufferedReadSeeker with an io.WriteAt
+// implementation.
+type BufferedReadSeekerWriteTo struct {
+ *BufferedReadSeeker
+}
+
+// WriteTo writes to the given io.Writer from BufferedReadSeeker until there's no more data to write or
+// an error occurs. Returns the number of bytes written and any error encountered during the write.
+func (b *BufferedReadSeekerWriteTo) WriteTo(writer io.Writer) (int64, error) {
+ return io.Copy(writer, b.BufferedReadSeeker)
+}
+
+// ReadSeekerWriteToProvider provides an implementation of io.WriteTo for an io.ReadSeeker
+type ReadSeekerWriteToProvider interface {
+ GetWriteTo(seeker io.ReadSeeker) (r ReadSeekerWriteTo, cleanup func())
+}
+
+// BufferedReadSeekerWriteToPool uses a sync.Pool to create and reuse
+// []byte slices for buffering parts in memory
+type BufferedReadSeekerWriteToPool struct {
+ pool sync.Pool
+}
+
+// NewBufferedReadSeekerWriteToPool will return a new BufferedReadSeekerWriteToPool that will create
+// a pool of reusable buffers . If size is less then < 64 KiB then the buffer
+// will default to 64 KiB. Reason: io.Copy from writers or readers that don't support io.WriteTo or io.ReadFrom
+// respectively will default to copying 32 KiB.
+func NewBufferedReadSeekerWriteToPool(size int) *BufferedReadSeekerWriteToPool {
+ if size < 65536 {
+ size = 65536
+ }
+
+ return &BufferedReadSeekerWriteToPool{
+ pool: sync.Pool{New: func() interface{} {
+ return make([]byte, size)
+ }},
+ }
+}
+
+// GetWriteTo will wrap the provided io.ReadSeeker with a BufferedReadSeekerWriteTo.
+// The provided cleanup must be called after operations have been completed on the
+// returned io.ReadSeekerWriteTo in order to signal the return of resources to the pool.
+func (p *BufferedReadSeekerWriteToPool) GetWriteTo(seeker io.ReadSeeker) (r ReadSeekerWriteTo, cleanup func()) {
+ buffer := p.pool.Get().([]byte)
+
+ r = &BufferedReadSeekerWriteTo{BufferedReadSeeker: NewBufferedReadSeeker(seeker, buffer)}
+ cleanup = func() {
+ p.pool.Put(buffer)
+ }
+
+ return r, cleanup
+}
diff --git a/vendor/github.com/aws/aws-sdk-go/service/s3/s3manager/upload.go b/vendor/github.com/aws/aws-sdk-go/service/s3/s3manager/upload.go
new file mode 100644
index 00000000..8debfcd0
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/service/s3/s3manager/upload.go
@@ -0,0 +1,774 @@
+package s3manager
+
+import (
+ "bytes"
+ "fmt"
+ "io"
+ "sort"
+ "sync"
+
+ "github.com/aws/aws-sdk-go/aws"
+ "github.com/aws/aws-sdk-go/aws/awserr"
+ "github.com/aws/aws-sdk-go/aws/awsutil"
+ "github.com/aws/aws-sdk-go/aws/client"
+ "github.com/aws/aws-sdk-go/aws/credentials"
+ "github.com/aws/aws-sdk-go/aws/request"
+ "github.com/aws/aws-sdk-go/service/s3"
+ "github.com/aws/aws-sdk-go/service/s3/s3iface"
+)
+
+// MaxUploadParts is the maximum allowed number of parts in a multi-part upload
+// on Amazon S3.
+const MaxUploadParts = 10000
+
+// MinUploadPartSize is the minimum allowed part size when uploading a part to
+// Amazon S3.
+const MinUploadPartSize int64 = 1024 * 1024 * 5
+
+// DefaultUploadPartSize is the default part size to buffer chunks of a
+// payload into.
+const DefaultUploadPartSize = MinUploadPartSize
+
+// DefaultUploadConcurrency is the default number of goroutines to spin up when
+// using Upload().
+const DefaultUploadConcurrency = 5
+
+// A MultiUploadFailure wraps a failed S3 multipart upload. An error returned
+// will satisfy this interface when a multi part upload failed to upload all
+// chucks to S3. In the case of a failure the UploadID is needed to operate on
+// the chunks, if any, which were uploaded.
+//
+// Example:
+//
+// u := s3manager.NewUploader(opts)
+// output, err := u.upload(input)
+// if err != nil {
+// if multierr, ok := err.(s3manager.MultiUploadFailure); ok {
+// // Process error and its associated uploadID
+// fmt.Println("Error:", multierr.Code(), multierr.Message(), multierr.UploadID())
+// } else {
+// // Process error generically
+// fmt.Println("Error:", err.Error())
+// }
+// }
+//
+type MultiUploadFailure interface {
+ awserr.Error
+
+ // Returns the upload id for the S3 multipart upload that failed.
+ UploadID() string
+}
+
+// So that the Error interface type can be included as an anonymous field
+// in the multiUploadError struct and not conflict with the error.Error() method.
+type awsError awserr.Error
+
+// A multiUploadError wraps the upload ID of a failed s3 multipart upload.
+// Composed of BaseError for code, message, and original error
+//
+// Should be used for an error that occurred failing a S3 multipart upload,
+// and a upload ID is available. If an uploadID is not available a more relevant
+type multiUploadError struct {
+ awsError
+
+ // ID for multipart upload which failed.
+ uploadID string
+}
+
+// Error returns the string representation of the error.
+//
+// See apierr.BaseError ErrorWithExtra for output format
+//
+// Satisfies the error interface.
+func (m multiUploadError) Error() string {
+ extra := fmt.Sprintf("upload id: %s", m.uploadID)
+ return awserr.SprintError(m.Code(), m.Message(), extra, m.OrigErr())
+}
+
+// String returns the string representation of the error.
+// Alias for Error to satisfy the stringer interface.
+func (m multiUploadError) String() string {
+ return m.Error()
+}
+
+// UploadID returns the id of the S3 upload which failed.
+func (m multiUploadError) UploadID() string {
+ return m.uploadID
+}
+
+// UploadOutput represents a response from the Upload() call.
+type UploadOutput struct {
+ // The URL where the object was uploaded to.
+ Location string
+
+ // The version of the object that was uploaded. Will only be populated if
+ // the S3 Bucket is versioned. If the bucket is not versioned this field
+ // will not be set.
+ VersionID *string
+
+ // The ID for a multipart upload to S3. In the case of an error the error
+ // can be cast to the MultiUploadFailure interface to extract the upload ID.
+ UploadID string
+}
+
+// WithUploaderRequestOptions appends to the Uploader's API request options.
+func WithUploaderRequestOptions(opts ...request.Option) func(*Uploader) {
+ return func(u *Uploader) {
+ u.RequestOptions = append(u.RequestOptions, opts...)
+ }
+}
+
+// The Uploader structure that calls Upload(). It is safe to call Upload()
+// on this structure for multiple objects and across concurrent goroutines.
+// Mutating the Uploader's properties is not safe to be done concurrently.
+type Uploader struct {
+ // The buffer size (in bytes) to use when buffering data into chunks and
+ // sending them as parts to S3. The minimum allowed part size is 5MB, and
+ // if this value is set to zero, the DefaultUploadPartSize value will be used.
+ PartSize int64
+
+ // The number of goroutines to spin up in parallel per call to Upload when
+ // sending parts. If this is set to zero, the DefaultUploadConcurrency value
+ // will be used.
+ //
+ // The concurrency pool is not shared between calls to Upload.
+ Concurrency int
+
+ // Setting this value to true will cause the SDK to avoid calling
+ // AbortMultipartUpload on a failure, leaving all successfully uploaded
+ // parts on S3 for manual recovery.
+ //
+ // Note that storing parts of an incomplete multipart upload counts towards
+ // space usage on S3 and will add additional costs if not cleaned up.
+ LeavePartsOnError bool
+
+ // MaxUploadParts is the max number of parts which will be uploaded to S3.
+ // Will be used to calculate the partsize of the object to be uploaded.
+ // E.g: 5GB file, with MaxUploadParts set to 100, will upload the file
+ // as 100, 50MB parts. With a limited of s3.MaxUploadParts (10,000 parts).
+ //
+ // MaxUploadParts must not be used to limit the total number of bytes uploaded.
+ // Use a type like to io.LimitReader (https://golang.org/pkg/io/#LimitedReader)
+ // instead. An io.LimitReader is helpful when uploading an unbounded reader
+ // to S3, and you know its maximum size. Otherwise the reader's io.EOF returned
+ // error must be used to signal end of stream.
+ //
+ // Defaults to package const's MaxUploadParts value.
+ MaxUploadParts int
+
+ // The client to use when uploading to S3.
+ S3 s3iface.S3API
+
+ // List of request options that will be passed down to individual API
+ // operation requests made by the uploader.
+ RequestOptions []request.Option
+
+ // Defines the buffer strategy used when uploading a part
+ BufferProvider ReadSeekerWriteToProvider
+
+ // partPool allows for the re-usage of streaming payload part buffers between upload calls
+ partPool *partPool
+}
+
+// NewUploader creates a new Uploader instance to upload objects to S3. Pass In
+// additional functional options to customize the uploader's behavior. Requires a
+// client.ConfigProvider in order to create a S3 service client. The session.Session
+// satisfies the client.ConfigProvider interface.
+//
+// Example:
+// // The session the S3 Uploader will use
+// sess := session.Must(session.NewSession())
+//
+// // Create an uploader with the session and default options
+// uploader := s3manager.NewUploader(sess)
+//
+// // Create an uploader with the session and custom options
+// uploader := s3manager.NewUploader(session, func(u *s3manager.Uploader) {
+// u.PartSize = 64 * 1024 * 1024 // 64MB per part
+// })
+func NewUploader(c client.ConfigProvider, options ...func(*Uploader)) *Uploader {
+ return newUploader(s3.New(c), options...)
+}
+
+func newUploader(client s3iface.S3API, options ...func(*Uploader)) *Uploader {
+ u := &Uploader{
+ S3: client,
+ PartSize: DefaultUploadPartSize,
+ Concurrency: DefaultUploadConcurrency,
+ LeavePartsOnError: false,
+ MaxUploadParts: MaxUploadParts,
+ BufferProvider: defaultUploadBufferProvider(),
+ }
+
+ for _, option := range options {
+ option(u)
+ }
+
+ u.partPool = newPartPool(u.PartSize)
+
+ return u
+}
+
+// NewUploaderWithClient creates a new Uploader instance to upload objects to S3. Pass in
+// additional functional options to customize the uploader's behavior. Requires
+// a S3 service client to make S3 API calls.
+//
+// Example:
+// // The session the S3 Uploader will use
+// sess := session.Must(session.NewSession())
+//
+// // S3 service client the Upload manager will use.
+// s3Svc := s3.New(sess)
+//
+// // Create an uploader with S3 client and default options
+// uploader := s3manager.NewUploaderWithClient(s3Svc)
+//
+// // Create an uploader with S3 client and custom options
+// uploader := s3manager.NewUploaderWithClient(s3Svc, func(u *s3manager.Uploader) {
+// u.PartSize = 64 * 1024 * 1024 // 64MB per part
+// })
+func NewUploaderWithClient(svc s3iface.S3API, options ...func(*Uploader)) *Uploader {
+ return newUploader(svc, options...)
+}
+
+// Upload uploads an object to S3, intelligently buffering large files into
+// smaller chunks and sending them in parallel across multiple goroutines. You
+// can configure the buffer size and concurrency through the Uploader's parameters.
+//
+// Additional functional options can be provided to configure the individual
+// upload. These options are copies of the Uploader instance Upload is called from.
+// Modifying the options will not impact the original Uploader instance.
+//
+// Use the WithUploaderRequestOptions helper function to pass in request
+// options that will be applied to all API operations made with this uploader.
+//
+// It is safe to call this method concurrently across goroutines.
+//
+// Example:
+// // Upload input parameters
+// upParams := &s3manager.UploadInput{
+// Bucket: &bucketName,
+// Key: &keyName,
+// Body: file,
+// }
+//
+// // Perform an upload.
+// result, err := uploader.Upload(upParams)
+//
+// // Perform upload with options different than the those in the Uploader.
+// result, err := uploader.Upload(upParams, func(u *s3manager.Uploader) {
+// u.PartSize = 10 * 1024 * 1024 // 10MB part size
+// u.LeavePartsOnError = true // Don't delete the parts if the upload fails.
+// })
+func (u Uploader) Upload(input *UploadInput, options ...func(*Uploader)) (*UploadOutput, error) {
+ return u.UploadWithContext(aws.BackgroundContext(), input, options...)
+}
+
+// UploadWithContext uploads an object to S3, intelligently buffering large
+// files into smaller chunks and sending them in parallel across multiple
+// goroutines. You can configure the buffer size and concurrency through the
+// Uploader's parameters.
+//
+// UploadWithContext is the same as Upload with the additional support for
+// Context input parameters. The Context must not be nil. A nil Context will
+// cause a panic. Use the context to add deadlining, timeouts, etc. The
+// UploadWithContext may create sub-contexts for individual underlying requests.
+//
+// Additional functional options can be provided to configure the individual
+// upload. These options are copies of the Uploader instance Upload is called from.
+// Modifying the options will not impact the original Uploader instance.
+//
+// Use the WithUploaderRequestOptions helper function to pass in request
+// options that will be applied to all API operations made with this uploader.
+//
+// It is safe to call this method concurrently across goroutines.
+func (u Uploader) UploadWithContext(ctx aws.Context, input *UploadInput, opts ...func(*Uploader)) (*UploadOutput, error) {
+ i := uploader{in: input, cfg: u, ctx: ctx}
+
+ for _, opt := range opts {
+ opt(&i.cfg)
+ }
+
+ i.cfg.RequestOptions = append(i.cfg.RequestOptions, request.WithAppendUserAgent("S3Manager"))
+
+ return i.upload()
+}
+
+// UploadWithIterator will upload a batched amount of objects to S3. This operation uses
+// the iterator pattern to know which object to upload next. Since this is an interface this
+// allows for custom defined functionality.
+//
+// Example:
+// svc:= s3manager.NewUploader(sess)
+//
+// objects := []BatchUploadObject{
+// {
+// Object: &s3manager.UploadInput {
+// Key: aws.String("key"),
+// Bucket: aws.String("bucket"),
+// },
+// },
+// }
+//
+// iter := &s3manager.UploadObjectsIterator{Objects: objects}
+// if err := svc.UploadWithIterator(aws.BackgroundContext(), iter); err != nil {
+// return err
+// }
+func (u Uploader) UploadWithIterator(ctx aws.Context, iter BatchUploadIterator, opts ...func(*Uploader)) error {
+ var errs []Error
+ for iter.Next() {
+ object := iter.UploadObject()
+ if _, err := u.UploadWithContext(ctx, object.Object, opts...); err != nil {
+ s3Err := Error{
+ OrigErr: err,
+ Bucket: object.Object.Bucket,
+ Key: object.Object.Key,
+ }
+
+ errs = append(errs, s3Err)
+ }
+
+ if object.After == nil {
+ continue
+ }
+
+ if err := object.After(); err != nil {
+ s3Err := Error{
+ OrigErr: err,
+ Bucket: object.Object.Bucket,
+ Key: object.Object.Key,
+ }
+
+ errs = append(errs, s3Err)
+ }
+ }
+
+ if len(errs) > 0 {
+ return NewBatchError("BatchedUploadIncomplete", "some objects have failed to upload.", errs)
+ }
+ return nil
+}
+
+// internal structure to manage an upload to S3.
+type uploader struct {
+ ctx aws.Context
+ cfg Uploader
+
+ in *UploadInput
+
+ readerPos int64 // current reader position
+ totalSize int64 // set to -1 if the size is not known
+}
+
+// internal logic for deciding whether to upload a single part or use a
+// multipart upload.
+func (u *uploader) upload() (*UploadOutput, error) {
+ if err := u.init(); err != nil {
+ return nil, awserr.New("ReadRequestBody", "unable to initialize upload", err)
+ }
+
+ if u.cfg.PartSize < MinUploadPartSize {
+ msg := fmt.Sprintf("part size must be at least %d bytes", MinUploadPartSize)
+ return nil, awserr.New("ConfigError", msg, nil)
+ }
+
+ // Do one read to determine if we have more than one part
+ reader, _, cleanup, err := u.nextReader()
+ if err == io.EOF { // single part
+ return u.singlePart(reader, cleanup)
+ } else if err != nil {
+ cleanup()
+ return nil, awserr.New("ReadRequestBody", "read upload data failed", err)
+ }
+
+ mu := multiuploader{uploader: u}
+ return mu.upload(reader, cleanup)
+}
+
+// init will initialize all default options.
+func (u *uploader) init() error {
+ if u.cfg.Concurrency == 0 {
+ u.cfg.Concurrency = DefaultUploadConcurrency
+ }
+ if u.cfg.PartSize == 0 {
+ u.cfg.PartSize = DefaultUploadPartSize
+ }
+ if u.cfg.MaxUploadParts == 0 {
+ u.cfg.MaxUploadParts = MaxUploadParts
+ }
+
+ // If PartSize was changed or partPool was never setup then we need to allocated a new pool
+ // so that we return []byte slices of the correct size
+ if u.cfg.partPool == nil || u.cfg.partPool.partSize != u.cfg.PartSize {
+ u.cfg.partPool = newPartPool(u.cfg.PartSize)
+ }
+
+ // Try to get the total size for some optimizations
+ return u.initSize()
+}
+
+// initSize tries to detect the total stream size, setting u.totalSize. If
+// the size is not known, totalSize is set to -1.
+func (u *uploader) initSize() error {
+ u.totalSize = -1
+
+ switch r := u.in.Body.(type) {
+ case io.Seeker:
+ n, err := aws.SeekerLen(r)
+ if err != nil {
+ return err
+ }
+ u.totalSize = n
+
+ // Try to adjust partSize if it is too small and account for
+ // integer division truncation.
+ if u.totalSize/u.cfg.PartSize >= int64(u.cfg.MaxUploadParts) {
+ // Add one to the part size to account for remainders
+ // during the size calculation. e.g odd number of bytes.
+ u.cfg.PartSize = (u.totalSize / int64(u.cfg.MaxUploadParts)) + 1
+ }
+ }
+
+ return nil
+}
+
+// nextReader returns a seekable reader representing the next packet of data.
+// This operation increases the shared u.readerPos counter, but note that it
+// does not need to be wrapped in a mutex because nextReader is only called
+// from the main thread.
+func (u *uploader) nextReader() (io.ReadSeeker, int, func(), error) {
+ type readerAtSeeker interface {
+ io.ReaderAt
+ io.ReadSeeker
+ }
+ switch r := u.in.Body.(type) {
+ case readerAtSeeker:
+ var err error
+
+ n := u.cfg.PartSize
+ if u.totalSize >= 0 {
+ bytesLeft := u.totalSize - u.readerPos
+
+ if bytesLeft <= u.cfg.PartSize {
+ err = io.EOF
+ n = bytesLeft
+ }
+ }
+
+ var (
+ reader io.ReadSeeker
+ cleanup func()
+ )
+
+ reader = io.NewSectionReader(r, u.readerPos, n)
+ if u.cfg.BufferProvider != nil {
+ reader, cleanup = u.cfg.BufferProvider.GetWriteTo(reader)
+ } else {
+ cleanup = func() {}
+ }
+
+ u.readerPos += n
+
+ return reader, int(n), cleanup, err
+
+ default:
+ part := u.cfg.partPool.Get().([]byte)
+ n, err := readFillBuf(r, part)
+ u.readerPos += int64(n)
+
+ cleanup := func() {
+ u.cfg.partPool.Put(part)
+ }
+
+ return bytes.NewReader(part[0:n]), n, cleanup, err
+ }
+}
+
+func readFillBuf(r io.Reader, b []byte) (offset int, err error) {
+ for offset < len(b) && err == nil {
+ var n int
+ n, err = r.Read(b[offset:])
+ offset += n
+ }
+
+ return offset, err
+}
+
+// singlePart contains upload logic for uploading a single chunk via
+// a regular PutObject request. Multipart requests require at least two
+// parts, or at least 5MB of data.
+func (u *uploader) singlePart(r io.ReadSeeker, cleanup func()) (*UploadOutput, error) {
+ defer cleanup()
+
+ params := &s3.PutObjectInput{}
+ awsutil.Copy(params, u.in)
+ params.Body = r
+
+ // Need to use request form because URL generated in request is
+ // used in return.
+ req, out := u.cfg.S3.PutObjectRequest(params)
+ req.SetContext(u.ctx)
+ req.ApplyOptions(u.cfg.RequestOptions...)
+ if err := req.Send(); err != nil {
+ return nil, err
+ }
+
+ url := req.HTTPRequest.URL.String()
+ return &UploadOutput{
+ Location: url,
+ VersionID: out.VersionId,
+ }, nil
+}
+
+// internal structure to manage a specific multipart upload to S3.
+type multiuploader struct {
+ *uploader
+ wg sync.WaitGroup
+ m sync.Mutex
+ err error
+ uploadID string
+ parts completedParts
+}
+
+// keeps track of a single chunk of data being sent to S3.
+type chunk struct {
+ buf io.ReadSeeker
+ num int64
+ cleanup func()
+}
+
+// completedParts is a wrapper to make parts sortable by their part number,
+// since S3 required this list to be sent in sorted order.
+type completedParts []*s3.CompletedPart
+
+func (a completedParts) Len() int { return len(a) }
+func (a completedParts) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
+func (a completedParts) Less(i, j int) bool { return *a[i].PartNumber < *a[j].PartNumber }
+
+// upload will perform a multipart upload using the firstBuf buffer containing
+// the first chunk of data.
+func (u *multiuploader) upload(firstBuf io.ReadSeeker, cleanup func()) (*UploadOutput, error) {
+ params := &s3.CreateMultipartUploadInput{}
+ awsutil.Copy(params, u.in)
+
+ // Create the multipart
+ resp, err := u.cfg.S3.CreateMultipartUploadWithContext(u.ctx, params, u.cfg.RequestOptions...)
+ if err != nil {
+ return nil, err
+ }
+ u.uploadID = *resp.UploadId
+
+ // Create the workers
+ ch := make(chan chunk, u.cfg.Concurrency)
+ for i := 0; i < u.cfg.Concurrency; i++ {
+ u.wg.Add(1)
+ go u.readChunk(ch)
+ }
+
+ // Send part 1 to the workers
+ var num int64 = 1
+ ch <- chunk{buf: firstBuf, num: num, cleanup: cleanup}
+
+ // Read and queue the rest of the parts
+ for u.geterr() == nil && err == nil {
+ var (
+ reader io.ReadSeeker
+ nextChunkLen int
+ ok bool
+ )
+
+ reader, nextChunkLen, cleanup, err = u.nextReader()
+ ok, err = u.shouldContinue(num, nextChunkLen, err)
+ if !ok {
+ cleanup()
+ if err != nil {
+ u.seterr(err)
+ }
+ break
+ }
+
+ num++
+
+ ch <- chunk{buf: reader, num: num, cleanup: cleanup}
+ }
+
+ // Close the channel, wait for workers, and complete upload
+ close(ch)
+ u.wg.Wait()
+ complete := u.complete()
+
+ if err := u.geterr(); err != nil {
+ return nil, &multiUploadError{
+ awsError: awserr.New(
+ "MultipartUpload",
+ "upload multipart failed",
+ err),
+ uploadID: u.uploadID,
+ }
+ }
+
+ // Create a presigned URL of the S3 Get Object in order to have parity with
+ // single part upload.
+ getReq, _ := u.cfg.S3.GetObjectRequest(&s3.GetObjectInput{
+ Bucket: u.in.Bucket,
+ Key: u.in.Key,
+ })
+ getReq.Config.Credentials = credentials.AnonymousCredentials
+ uploadLocation, _, _ := getReq.PresignRequest(1)
+
+ return &UploadOutput{
+ Location: uploadLocation,
+ VersionID: complete.VersionId,
+ UploadID: u.uploadID,
+ }, nil
+}
+
+func (u *multiuploader) shouldContinue(part int64, nextChunkLen int, err error) (bool, error) {
+ if err != nil && err != io.EOF {
+ return false, awserr.New("ReadRequestBody", "read multipart upload data failed", err)
+ }
+
+ if nextChunkLen == 0 {
+ // No need to upload empty part, if file was empty to start
+ // with empty single part would of been created and never
+ // started multipart upload.
+ return false, nil
+ }
+
+ part++
+ // This upload exceeded maximum number of supported parts, error now.
+ if part > int64(u.cfg.MaxUploadParts) || part > int64(MaxUploadParts) {
+ var msg string
+ if part > int64(u.cfg.MaxUploadParts) {
+ msg = fmt.Sprintf("exceeded total allowed configured MaxUploadParts (%d). Adjust PartSize to fit in this limit",
+ u.cfg.MaxUploadParts)
+ } else {
+ msg = fmt.Sprintf("exceeded total allowed S3 limit MaxUploadParts (%d). Adjust PartSize to fit in this limit",
+ MaxUploadParts)
+ }
+ return false, awserr.New("TotalPartsExceeded", msg, nil)
+ }
+
+ return true, err
+}
+
+// readChunk runs in worker goroutines to pull chunks off of the ch channel
+// and send() them as UploadPart requests.
+func (u *multiuploader) readChunk(ch chan chunk) {
+ defer u.wg.Done()
+ for {
+ data, ok := <-ch
+
+ if !ok {
+ break
+ }
+
+ if u.geterr() == nil {
+ if err := u.send(data); err != nil {
+ u.seterr(err)
+ }
+ }
+ }
+}
+
+// send performs an UploadPart request and keeps track of the completed
+// part information.
+func (u *multiuploader) send(c chunk) error {
+ params := &s3.UploadPartInput{
+ Bucket: u.in.Bucket,
+ Key: u.in.Key,
+ Body: c.buf,
+ UploadId: &u.uploadID,
+ SSECustomerAlgorithm: u.in.SSECustomerAlgorithm,
+ SSECustomerKey: u.in.SSECustomerKey,
+ PartNumber: &c.num,
+ }
+
+ resp, err := u.cfg.S3.UploadPartWithContext(u.ctx, params, u.cfg.RequestOptions...)
+ c.cleanup()
+ if err != nil {
+ return err
+ }
+
+ n := c.num
+ completed := &s3.CompletedPart{ETag: resp.ETag, PartNumber: &n}
+
+ u.m.Lock()
+ u.parts = append(u.parts, completed)
+ u.m.Unlock()
+
+ return nil
+}
+
+// geterr is a thread-safe getter for the error object
+func (u *multiuploader) geterr() error {
+ u.m.Lock()
+ defer u.m.Unlock()
+
+ return u.err
+}
+
+// seterr is a thread-safe setter for the error object
+func (u *multiuploader) seterr(e error) {
+ u.m.Lock()
+ defer u.m.Unlock()
+
+ u.err = e
+}
+
+// fail will abort the multipart unless LeavePartsOnError is set to true.
+func (u *multiuploader) fail() {
+ if u.cfg.LeavePartsOnError {
+ return
+ }
+
+ params := &s3.AbortMultipartUploadInput{
+ Bucket: u.in.Bucket,
+ Key: u.in.Key,
+ UploadId: &u.uploadID,
+ }
+ _, err := u.cfg.S3.AbortMultipartUploadWithContext(u.ctx, params, u.cfg.RequestOptions...)
+ if err != nil {
+ logMessage(u.cfg.S3, aws.LogDebug, fmt.Sprintf("failed to abort multipart upload, %v", err))
+ }
+}
+
+// complete successfully completes a multipart upload and returns the response.
+func (u *multiuploader) complete() *s3.CompleteMultipartUploadOutput {
+ if u.geterr() != nil {
+ u.fail()
+ return nil
+ }
+
+ // Parts must be sorted in PartNumber order.
+ sort.Sort(u.parts)
+
+ params := &s3.CompleteMultipartUploadInput{
+ Bucket: u.in.Bucket,
+ Key: u.in.Key,
+ UploadId: &u.uploadID,
+ MultipartUpload: &s3.CompletedMultipartUpload{Parts: u.parts},
+ }
+ resp, err := u.cfg.S3.CompleteMultipartUploadWithContext(u.ctx, params, u.cfg.RequestOptions...)
+ if err != nil {
+ u.seterr(err)
+ u.fail()
+ }
+
+ return resp
+}
+
+type partPool struct {
+ partSize int64
+ sync.Pool
+}
+
+func newPartPool(partSize int64) *partPool {
+ p := &partPool{partSize: partSize}
+
+ p.New = func() interface{} {
+ return make([]byte, p.partSize)
+ }
+
+ return p
+}
diff --git a/vendor/github.com/aws/aws-sdk-go/service/s3/s3manager/upload_input.go b/vendor/github.com/aws/aws-sdk-go/service/s3/s3manager/upload_input.go
new file mode 100644
index 00000000..459c7a31
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/service/s3/s3manager/upload_input.go
@@ -0,0 +1,129 @@
+// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT.
+
+package s3manager
+
+import (
+ "io"
+ "time"
+)
+
+// UploadInput provides the input parameters for uploading a stream or buffer
+// to an object in an Amazon S3 bucket. This type is similar to the s3
+// package's PutObjectInput with the exception that the Body member is an
+// io.Reader instead of an io.ReadSeeker.
+type UploadInput struct {
+ _ struct{} `locationName:"PutObjectRequest" type:"structure" payload:"Body"`
+
+ // The canned ACL to apply to the object.
+ ACL *string `location:"header" locationName:"x-amz-acl" type:"string" enum:"ObjectCannedACL"`
+
+ // The readable body payload to send to S3.
+ Body io.Reader
+
+ // Name of the bucket to which the PUT operation was initiated.
+ //
+ // Bucket is a required field
+ Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
+
+ // Specifies caching behavior along the request/reply chain.
+ CacheControl *string `location:"header" locationName:"Cache-Control" type:"string"`
+
+ // Specifies presentational information for the object.
+ ContentDisposition *string `location:"header" locationName:"Content-Disposition" type:"string"`
+
+ // Specifies what content encodings have been applied to the object and thus
+ // what decoding mechanisms must be applied to obtain the media-type referenced
+ // by the Content-Type header field.
+ ContentEncoding *string `location:"header" locationName:"Content-Encoding" type:"string"`
+
+ // The language the content is in.
+ ContentLanguage *string `location:"header" locationName:"Content-Language" type:"string"`
+
+ // The base64-encoded 128-bit MD5 digest of the part data. This parameter is
+ // auto-populated when using the command from the CLI. This parameted is required
+ // if object lock parameters are specified.
+ ContentMD5 *string `location:"header" locationName:"Content-MD5" type:"string"`
+
+ // A standard MIME type describing the format of the object data.
+ ContentType *string `location:"header" locationName:"Content-Type" type:"string"`
+
+ // The date and time at which the object is no longer cacheable.
+ Expires *time.Time `location:"header" locationName:"Expires" type:"timestamp"`
+
+ // Gives the grantee READ, READ_ACP, and WRITE_ACP permissions on the object.
+ GrantFullControl *string `location:"header" locationName:"x-amz-grant-full-control" type:"string"`
+
+ // Allows grantee to read the object data and its metadata.
+ GrantRead *string `location:"header" locationName:"x-amz-grant-read" type:"string"`
+
+ // Allows grantee to read the object ACL.
+ GrantReadACP *string `location:"header" locationName:"x-amz-grant-read-acp" type:"string"`
+
+ // Allows grantee to write the ACL for the applicable object.
+ GrantWriteACP *string `location:"header" locationName:"x-amz-grant-write-acp" type:"string"`
+
+ // Object key for which the PUT operation was initiated.
+ //
+ // Key is a required field
+ Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"`
+
+ // A map of metadata to store with the object in S3.
+ Metadata map[string]*string `location:"headers" locationName:"x-amz-meta-" type:"map"`
+
+ // The Legal Hold status that you want to apply to the specified object.
+ ObjectLockLegalHoldStatus *string `location:"header" locationName:"x-amz-object-lock-legal-hold" type:"string" enum:"ObjectLockLegalHoldStatus"`
+
+ // The object lock mode that you want to apply to this object.
+ ObjectLockMode *string `location:"header" locationName:"x-amz-object-lock-mode" type:"string" enum:"ObjectLockMode"`
+
+ // The date and time when you want this object's object lock to expire.
+ ObjectLockRetainUntilDate *time.Time `location:"header" locationName:"x-amz-object-lock-retain-until-date" type:"timestamp" timestampFormat:"iso8601"`
+
+ // Confirms that the requester knows that she or he will be charged for the
+ // request. Bucket owners need not specify this parameter in their requests.
+ // Documentation on downloading objects from requester pays buckets can be found
+ // at http://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html
+ RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"`
+
+ // Specifies the algorithm to use to when encrypting the object (e.g., AES256).
+ SSECustomerAlgorithm *string `location:"header" locationName:"x-amz-server-side-encryption-customer-algorithm" type:"string"`
+
+ // Specifies the customer-provided encryption key for Amazon S3 to use in encrypting
+ // data. This value is used to store the object and then it is discarded; Amazon
+ // does not store the encryption key. The key must be appropriate for use with
+ // the algorithm specified in the x-amz-server-side​-encryption​-customer-algorithm
+ // header.
+ SSECustomerKey *string `marshal-as:"blob" location:"header" locationName:"x-amz-server-side-encryption-customer-key" type:"string" sensitive:"true"`
+
+ // Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321.
+ // Amazon S3 uses this header for a message integrity check to ensure the encryption
+ // key was transmitted without error.
+ SSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key-MD5" type:"string"`
+
+ // Specifies the AWS KMS Encryption Context to use for object encryption. The
+ // value of this header is a base64-encoded UTF-8 string holding JSON with the
+ // encryption context key-value pairs.
+ SSEKMSEncryptionContext *string `location:"header" locationName:"x-amz-server-side-encryption-context" type:"string" sensitive:"true"`
+
+ // Specifies the AWS KMS key ID to use for object encryption. All GET and PUT
+ // requests for an object protected by AWS KMS will fail if not made via SSL
+ // or using SigV4. Documentation on configuring any of the officially supported
+ // AWS SDKs and CLI can be found at http://docs.aws.amazon.com/AmazonS3/latest/dev/UsingAWSSDK.html#specify-signature-version
+ SSEKMSKeyId *string `location:"header" locationName:"x-amz-server-side-encryption-aws-kms-key-id" type:"string" sensitive:"true"`
+
+ // The Server-side encryption algorithm used when storing this object in S3
+ // (e.g., AES256, aws:kms).
+ ServerSideEncryption *string `location:"header" locationName:"x-amz-server-side-encryption" type:"string" enum:"ServerSideEncryption"`
+
+ // The type of storage to use for the object. Defaults to 'STANDARD'.
+ StorageClass *string `location:"header" locationName:"x-amz-storage-class" type:"string" enum:"StorageClass"`
+
+ // The tag-set for the object. The tag-set must be encoded as URL Query parameters.
+ // (For example, "Key1=Value1")
+ Tagging *string `location:"header" locationName:"x-amz-tagging" type:"string"`
+
+ // If the bucket is configured as a website, redirects requests for this object
+ // to another object in the same bucket or to an external URL. Amazon S3 stores
+ // the value of this header in the object metadata.
+ WebsiteRedirectLocation *string `location:"header" locationName:"x-amz-website-redirect-location" type:"string"`
+}
diff --git a/vendor/github.com/aws/aws-sdk-go/service/s3/s3manager/writer_read_from.go b/vendor/github.com/aws/aws-sdk-go/service/s3/s3manager/writer_read_from.go
new file mode 100644
index 00000000..765dc07c
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/service/s3/s3manager/writer_read_from.go
@@ -0,0 +1,75 @@
+package s3manager
+
+import (
+ "bufio"
+ "io"
+ "sync"
+
+ "github.com/aws/aws-sdk-go/internal/sdkio"
+)
+
+// WriterReadFrom defines an interface implementing io.Writer and io.ReaderFrom
+type WriterReadFrom interface {
+ io.Writer
+ io.ReaderFrom
+}
+
+// WriterReadFromProvider provides an implementation of io.ReadFrom for the given io.Writer
+type WriterReadFromProvider interface {
+ GetReadFrom(writer io.Writer) (w WriterReadFrom, cleanup func())
+}
+
+type bufferedWriter interface {
+ WriterReadFrom
+ Flush() error
+ Reset(io.Writer)
+}
+
+type bufferedReadFrom struct {
+ bufferedWriter
+}
+
+func (b *bufferedReadFrom) ReadFrom(r io.Reader) (int64, error) {
+ n, err := b.bufferedWriter.ReadFrom(r)
+ if flushErr := b.Flush(); flushErr != nil && err == nil {
+ err = flushErr
+ }
+ return n, err
+}
+
+// PooledBufferedReadFromProvider is a WriterReadFromProvider that uses a sync.Pool
+// to manage allocation and reuse of *bufio.Writer structures.
+type PooledBufferedReadFromProvider struct {
+ pool sync.Pool
+}
+
+// NewPooledBufferedWriterReadFromProvider returns a new PooledBufferedReadFromProvider
+// Size is used to control the size of the underlying *bufio.Writer created for
+// calls to GetReadFrom.
+func NewPooledBufferedWriterReadFromProvider(size int) *PooledBufferedReadFromProvider {
+ if size < int(32*sdkio.KibiByte) {
+ size = int(64 * sdkio.KibiByte)
+ }
+
+ return &PooledBufferedReadFromProvider{
+ pool: sync.Pool{
+ New: func() interface{} {
+ return &bufferedReadFrom{bufferedWriter: bufio.NewWriterSize(nil, size)}
+ },
+ },
+ }
+}
+
+// GetReadFrom takes an io.Writer and wraps it with a type which satisfies the WriterReadFrom
+// interface/ Additionally a cleanup function is provided which must be called after usage of the WriterReadFrom
+// has been completed in order to allow the reuse of the *bufio.Writer
+func (p *PooledBufferedReadFromProvider) GetReadFrom(writer io.Writer) (r WriterReadFrom, cleanup func()) {
+ buffer := p.pool.Get().(*bufferedReadFrom)
+ buffer.Reset(writer)
+ r = buffer
+ cleanup = func() {
+ buffer.Reset(nil) // Reset to nil writer to release reference
+ p.pool.Put(buffer)
+ }
+ return r, cleanup
+}
diff --git a/vendor/github.com/aws/aws-sdk-go/service/s3/service.go b/vendor/github.com/aws/aws-sdk-go/service/s3/service.go
new file mode 100644
index 00000000..d17dcc9d
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/service/s3/service.go
@@ -0,0 +1,99 @@
+// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT.
+
+package s3
+
+import (
+ "github.com/aws/aws-sdk-go/aws"
+ "github.com/aws/aws-sdk-go/aws/client"
+ "github.com/aws/aws-sdk-go/aws/client/metadata"
+ "github.com/aws/aws-sdk-go/aws/request"
+ "github.com/aws/aws-sdk-go/aws/signer/v4"
+ "github.com/aws/aws-sdk-go/private/protocol/restxml"
+)
+
+// S3 provides the API operation methods for making requests to
+// Amazon Simple Storage Service. See this package's package overview docs
+// for details on the service.
+//
+// S3 methods are safe to use concurrently. It is not safe to
+// modify mutate any of the struct's properties though.
+type S3 struct {
+ *client.Client
+}
+
+// Used for custom client initialization logic
+var initClient func(*client.Client)
+
+// Used for custom request initialization logic
+var initRequest func(*request.Request)
+
+// Service information constants
+const (
+ ServiceName = "s3" // Name of service.
+ EndpointsID = ServiceName // ID to lookup a service endpoint with.
+ ServiceID = "S3" // ServiceID is a unique identifer of a specific service.
+)
+
+// New creates a new instance of the S3 client with a session.
+// If additional configuration is needed for the client instance use the optional
+// aws.Config parameter to add your extra config.
+//
+// Example:
+// // Create a S3 client from just a session.
+// svc := s3.New(mySession)
+//
+// // Create a S3 client with additional configuration
+// svc := s3.New(mySession, aws.NewConfig().WithRegion("us-west-2"))
+func New(p client.ConfigProvider, cfgs ...*aws.Config) *S3 {
+ c := p.ClientConfig(EndpointsID, cfgs...)
+ return newClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion, c.SigningName)
+}
+
+// newClient creates, initializes and returns a new service client instance.
+func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion, signingName string) *S3 {
+ svc := &S3{
+ Client: client.New(
+ cfg,
+ metadata.ClientInfo{
+ ServiceName: ServiceName,
+ ServiceID: ServiceID,
+ SigningName: signingName,
+ SigningRegion: signingRegion,
+ Endpoint: endpoint,
+ APIVersion: "2006-03-01",
+ },
+ handlers,
+ ),
+ }
+
+ // Handlers
+ svc.Handlers.Sign.PushBackNamed(v4.BuildNamedHandler(v4.SignRequestHandler.Name, func(s *v4.Signer) {
+ s.DisableURIPathEscaping = true
+ }))
+ svc.Handlers.Build.PushBackNamed(restxml.BuildHandler)
+ svc.Handlers.Unmarshal.PushBackNamed(restxml.UnmarshalHandler)
+ svc.Handlers.UnmarshalMeta.PushBackNamed(restxml.UnmarshalMetaHandler)
+ svc.Handlers.UnmarshalError.PushBackNamed(restxml.UnmarshalErrorHandler)
+
+ svc.Handlers.UnmarshalStream.PushBackNamed(restxml.UnmarshalHandler)
+
+ // Run custom client initialization if present
+ if initClient != nil {
+ initClient(svc.Client)
+ }
+
+ return svc
+}
+
+// newRequest creates a new request for a S3 operation and runs any
+// custom request initialization.
+func (c *S3) newRequest(op *request.Operation, params, data interface{}) *request.Request {
+ req := c.NewRequest(op, params, data)
+
+ // Run custom request initialization if present
+ if initRequest != nil {
+ initRequest(req)
+ }
+
+ return req
+}
diff --git a/vendor/github.com/aws/aws-sdk-go/service/s3/sse.go b/vendor/github.com/aws/aws-sdk-go/service/s3/sse.go
new file mode 100644
index 00000000..b71c835d
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/service/s3/sse.go
@@ -0,0 +1,84 @@
+package s3
+
+import (
+ "crypto/md5"
+ "encoding/base64"
+ "net/http"
+
+ "github.com/aws/aws-sdk-go/aws/awserr"
+ "github.com/aws/aws-sdk-go/aws/request"
+)
+
+var errSSERequiresSSL = awserr.New("ConfigError", "cannot send SSE keys over HTTP.", nil)
+
+func validateSSERequiresSSL(r *request.Request) {
+ if r.HTTPRequest.URL.Scheme == "https" {
+ return
+ }
+
+ if iface, ok := r.Params.(sseCustomerKeyGetter); ok {
+ if len(iface.getSSECustomerKey()) > 0 {
+ r.Error = errSSERequiresSSL
+ return
+ }
+ }
+
+ if iface, ok := r.Params.(copySourceSSECustomerKeyGetter); ok {
+ if len(iface.getCopySourceSSECustomerKey()) > 0 {
+ r.Error = errSSERequiresSSL
+ return
+ }
+ }
+}
+
+const (
+ sseKeyHeader = "x-amz-server-side-encryption-customer-key"
+ sseKeyMD5Header = sseKeyHeader + "-md5"
+)
+
+func computeSSEKeyMD5(r *request.Request) {
+ var key string
+ if g, ok := r.Params.(sseCustomerKeyGetter); ok {
+ key = g.getSSECustomerKey()
+ }
+
+ computeKeyMD5(sseKeyHeader, sseKeyMD5Header, key, r.HTTPRequest)
+}
+
+const (
+ copySrcSSEKeyHeader = "x-amz-copy-source-server-side-encryption-customer-key"
+ copySrcSSEKeyMD5Header = copySrcSSEKeyHeader + "-md5"
+)
+
+func computeCopySourceSSEKeyMD5(r *request.Request) {
+ var key string
+ if g, ok := r.Params.(copySourceSSECustomerKeyGetter); ok {
+ key = g.getCopySourceSSECustomerKey()
+ }
+
+ computeKeyMD5(copySrcSSEKeyHeader, copySrcSSEKeyMD5Header, key, r.HTTPRequest)
+}
+
+func computeKeyMD5(keyHeader, keyMD5Header, key string, r *http.Request) {
+ if len(key) == 0 {
+ // Backwards compatiablity where user just set the header value instead
+ // of using the API parameter, or setting the header value for an
+ // operation without the parameters modeled.
+ key = r.Header.Get(keyHeader)
+ if len(key) == 0 {
+ return
+ }
+
+ // In backwards compatiable, the header's value is not base64 encoded,
+ // and needs to be encoded and updated by the SDK's customizations.
+ b64Key := base64.StdEncoding.EncodeToString([]byte(key))
+ r.Header.Set(keyHeader, b64Key)
+ }
+
+ // Only update Key's MD5 if not already set.
+ if len(r.Header.Get(keyMD5Header)) == 0 {
+ sum := md5.Sum([]byte(key))
+ keyMD5 := base64.StdEncoding.EncodeToString(sum[:])
+ r.Header.Set(keyMD5Header, keyMD5)
+ }
+}
diff --git a/vendor/github.com/aws/aws-sdk-go/service/s3/statusok_error.go b/vendor/github.com/aws/aws-sdk-go/service/s3/statusok_error.go
new file mode 100644
index 00000000..f6a69aed
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/service/s3/statusok_error.go
@@ -0,0 +1,40 @@
+package s3
+
+import (
+ "bytes"
+ "io/ioutil"
+ "net/http"
+
+ "github.com/aws/aws-sdk-go/aws/awserr"
+ "github.com/aws/aws-sdk-go/aws/request"
+ "github.com/aws/aws-sdk-go/internal/sdkio"
+)
+
+func copyMultipartStatusOKUnmarhsalError(r *request.Request) {
+ b, err := ioutil.ReadAll(r.HTTPResponse.Body)
+ if err != nil {
+ r.Error = awserr.NewRequestFailure(
+ awserr.New(request.ErrCodeSerialization, "unable to read response body", err),
+ r.HTTPResponse.StatusCode,
+ r.RequestID,
+ )
+ return
+ }
+ body := bytes.NewReader(b)
+ r.HTTPResponse.Body = ioutil.NopCloser(body)
+ defer body.Seek(0, sdkio.SeekStart)
+
+ if body.Len() == 0 {
+ // If there is no body don't attempt to parse the body.
+ return
+ }
+
+ unmarshalError(r)
+ if err, ok := r.Error.(awserr.Error); ok && err != nil {
+ if err.Code() == request.ErrCodeSerialization {
+ r.Error = nil
+ return
+ }
+ r.HTTPResponse.StatusCode = http.StatusServiceUnavailable
+ }
+}
diff --git a/vendor/github.com/aws/aws-sdk-go/service/s3/unmarshal_error.go b/vendor/github.com/aws/aws-sdk-go/service/s3/unmarshal_error.go
new file mode 100644
index 00000000..5b63fac7
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/service/s3/unmarshal_error.go
@@ -0,0 +1,88 @@
+package s3
+
+import (
+ "encoding/xml"
+ "fmt"
+ "io"
+ "io/ioutil"
+ "net/http"
+ "strings"
+
+ "github.com/aws/aws-sdk-go/aws"
+ "github.com/aws/aws-sdk-go/aws/awserr"
+ "github.com/aws/aws-sdk-go/aws/request"
+ "github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil"
+)
+
+type xmlErrorResponse struct {
+ XMLName xml.Name `xml:"Error"`
+ Code string `xml:"Code"`
+ Message string `xml:"Message"`
+}
+
+func unmarshalError(r *request.Request) {
+ defer r.HTTPResponse.Body.Close()
+ defer io.Copy(ioutil.Discard, r.HTTPResponse.Body)
+
+ // Bucket exists in a different region, and request needs
+ // to be made to the correct region.
+ if r.HTTPResponse.StatusCode == http.StatusMovedPermanently {
+ msg := fmt.Sprintf(
+ "incorrect region, the bucket is not in '%s' region at endpoint '%s'",
+ aws.StringValue(r.Config.Region),
+ aws.StringValue(r.Config.Endpoint),
+ )
+ if v := r.HTTPResponse.Header.Get("x-amz-bucket-region"); len(v) != 0 {
+ msg += fmt.Sprintf(", bucket is in '%s' region", v)
+ }
+ r.Error = awserr.NewRequestFailure(
+ awserr.New("BucketRegionError", msg, nil),
+ r.HTTPResponse.StatusCode,
+ r.RequestID,
+ )
+ return
+ }
+
+ // Attempt to parse error from body if it is known
+ var errResp xmlErrorResponse
+ err := xmlutil.UnmarshalXMLError(&errResp, r.HTTPResponse.Body)
+ if err == io.EOF {
+ // Only capture the error if an unmarshal error occurs that is not EOF,
+ // because S3 might send an error without a error message which causes
+ // the XML unmarshal to fail with EOF.
+ err = nil
+ }
+ if err != nil {
+ r.Error = awserr.NewRequestFailure(
+ awserr.New(request.ErrCodeSerialization,
+ "failed to unmarshal error message", err),
+ r.HTTPResponse.StatusCode,
+ r.RequestID,
+ )
+ return
+ }
+
+ // Fallback to status code converted to message if still no error code
+ if len(errResp.Code) == 0 {
+ statusText := http.StatusText(r.HTTPResponse.StatusCode)
+ errResp.Code = strings.Replace(statusText, " ", "", -1)
+ errResp.Message = statusText
+ }
+
+ r.Error = awserr.NewRequestFailure(
+ awserr.New(errResp.Code, errResp.Message, err),
+ r.HTTPResponse.StatusCode,
+ r.RequestID,
+ )
+}
+
+// A RequestFailure provides access to the S3 Request ID and Host ID values
+// returned from API operation errors. Getting the error as a string will
+// return the formated error with the same information as awserr.RequestFailure,
+// while also adding the HostID value from the response.
+type RequestFailure interface {
+ awserr.RequestFailure
+
+ // Host ID is the S3 Host ID needed for debug, and contacting support
+ HostID() string
+}
diff --git a/vendor/github.com/aws/aws-sdk-go/service/s3/waiters.go b/vendor/github.com/aws/aws-sdk-go/service/s3/waiters.go
new file mode 100644
index 00000000..2596c694
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/service/s3/waiters.go
@@ -0,0 +1,214 @@
+// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT.
+
+package s3
+
+import (
+ "time"
+
+ "github.com/aws/aws-sdk-go/aws"
+ "github.com/aws/aws-sdk-go/aws/request"
+)
+
+// WaitUntilBucketExists uses the Amazon S3 API operation
+// HeadBucket to wait for a condition to be met before returning.
+// If the condition is not met within the max attempt window, an error will
+// be returned.
+func (c *S3) WaitUntilBucketExists(input *HeadBucketInput) error {
+ return c.WaitUntilBucketExistsWithContext(aws.BackgroundContext(), input)
+}
+
+// WaitUntilBucketExistsWithContext is an extended version of WaitUntilBucketExists.
+// With the support for passing in a context and options to configure the
+// Waiter and the underlying request options.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *S3) WaitUntilBucketExistsWithContext(ctx aws.Context, input *HeadBucketInput, opts ...request.WaiterOption) error {
+ w := request.Waiter{
+ Name: "WaitUntilBucketExists",
+ MaxAttempts: 20,
+ Delay: request.ConstantWaiterDelay(5 * time.Second),
+ Acceptors: []request.WaiterAcceptor{
+ {
+ State: request.SuccessWaiterState,
+ Matcher: request.StatusWaiterMatch,
+ Expected: 200,
+ },
+ {
+ State: request.SuccessWaiterState,
+ Matcher: request.StatusWaiterMatch,
+ Expected: 301,
+ },
+ {
+ State: request.SuccessWaiterState,
+ Matcher: request.StatusWaiterMatch,
+ Expected: 403,
+ },
+ {
+ State: request.RetryWaiterState,
+ Matcher: request.StatusWaiterMatch,
+ Expected: 404,
+ },
+ },
+ Logger: c.Config.Logger,
+ NewRequest: func(opts []request.Option) (*request.Request, error) {
+ var inCpy *HeadBucketInput
+ if input != nil {
+ tmp := *input
+ inCpy = &tmp
+ }
+ req, _ := c.HeadBucketRequest(inCpy)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return req, nil
+ },
+ }
+ w.ApplyOptions(opts...)
+
+ return w.WaitWithContext(ctx)
+}
+
+// WaitUntilBucketNotExists uses the Amazon S3 API operation
+// HeadBucket to wait for a condition to be met before returning.
+// If the condition is not met within the max attempt window, an error will
+// be returned.
+func (c *S3) WaitUntilBucketNotExists(input *HeadBucketInput) error {
+ return c.WaitUntilBucketNotExistsWithContext(aws.BackgroundContext(), input)
+}
+
+// WaitUntilBucketNotExistsWithContext is an extended version of WaitUntilBucketNotExists.
+// With the support for passing in a context and options to configure the
+// Waiter and the underlying request options.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *S3) WaitUntilBucketNotExistsWithContext(ctx aws.Context, input *HeadBucketInput, opts ...request.WaiterOption) error {
+ w := request.Waiter{
+ Name: "WaitUntilBucketNotExists",
+ MaxAttempts: 20,
+ Delay: request.ConstantWaiterDelay(5 * time.Second),
+ Acceptors: []request.WaiterAcceptor{
+ {
+ State: request.SuccessWaiterState,
+ Matcher: request.StatusWaiterMatch,
+ Expected: 404,
+ },
+ },
+ Logger: c.Config.Logger,
+ NewRequest: func(opts []request.Option) (*request.Request, error) {
+ var inCpy *HeadBucketInput
+ if input != nil {
+ tmp := *input
+ inCpy = &tmp
+ }
+ req, _ := c.HeadBucketRequest(inCpy)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return req, nil
+ },
+ }
+ w.ApplyOptions(opts...)
+
+ return w.WaitWithContext(ctx)
+}
+
+// WaitUntilObjectExists uses the Amazon S3 API operation
+// HeadObject to wait for a condition to be met before returning.
+// If the condition is not met within the max attempt window, an error will
+// be returned.
+func (c *S3) WaitUntilObjectExists(input *HeadObjectInput) error {
+ return c.WaitUntilObjectExistsWithContext(aws.BackgroundContext(), input)
+}
+
+// WaitUntilObjectExistsWithContext is an extended version of WaitUntilObjectExists.
+// With the support for passing in a context and options to configure the
+// Waiter and the underlying request options.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *S3) WaitUntilObjectExistsWithContext(ctx aws.Context, input *HeadObjectInput, opts ...request.WaiterOption) error {
+ w := request.Waiter{
+ Name: "WaitUntilObjectExists",
+ MaxAttempts: 20,
+ Delay: request.ConstantWaiterDelay(5 * time.Second),
+ Acceptors: []request.WaiterAcceptor{
+ {
+ State: request.SuccessWaiterState,
+ Matcher: request.StatusWaiterMatch,
+ Expected: 200,
+ },
+ {
+ State: request.RetryWaiterState,
+ Matcher: request.StatusWaiterMatch,
+ Expected: 404,
+ },
+ },
+ Logger: c.Config.Logger,
+ NewRequest: func(opts []request.Option) (*request.Request, error) {
+ var inCpy *HeadObjectInput
+ if input != nil {
+ tmp := *input
+ inCpy = &tmp
+ }
+ req, _ := c.HeadObjectRequest(inCpy)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return req, nil
+ },
+ }
+ w.ApplyOptions(opts...)
+
+ return w.WaitWithContext(ctx)
+}
+
+// WaitUntilObjectNotExists uses the Amazon S3 API operation
+// HeadObject to wait for a condition to be met before returning.
+// If the condition is not met within the max attempt window, an error will
+// be returned.
+func (c *S3) WaitUntilObjectNotExists(input *HeadObjectInput) error {
+ return c.WaitUntilObjectNotExistsWithContext(aws.BackgroundContext(), input)
+}
+
+// WaitUntilObjectNotExistsWithContext is an extended version of WaitUntilObjectNotExists.
+// With the support for passing in a context and options to configure the
+// Waiter and the underlying request options.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *S3) WaitUntilObjectNotExistsWithContext(ctx aws.Context, input *HeadObjectInput, opts ...request.WaiterOption) error {
+ w := request.Waiter{
+ Name: "WaitUntilObjectNotExists",
+ MaxAttempts: 20,
+ Delay: request.ConstantWaiterDelay(5 * time.Second),
+ Acceptors: []request.WaiterAcceptor{
+ {
+ State: request.SuccessWaiterState,
+ Matcher: request.StatusWaiterMatch,
+ Expected: 404,
+ },
+ },
+ Logger: c.Config.Logger,
+ NewRequest: func(opts []request.Option) (*request.Request, error) {
+ var inCpy *HeadObjectInput
+ if input != nil {
+ tmp := *input
+ inCpy = &tmp
+ }
+ req, _ := c.HeadObjectRequest(inCpy)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return req, nil
+ },
+ }
+ w.ApplyOptions(opts...)
+
+ return w.WaitWithContext(ctx)
+}
diff --git a/vendor/github.com/golang/protobuf/AUTHORS b/vendor/github.com/golang/protobuf/AUTHORS
new file mode 100644
index 00000000..15167cd7
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/AUTHORS
@@ -0,0 +1,3 @@
+# This source code refers to The Go Authors for copyright purposes.
+# The master list of authors is in the main Go distribution,
+# visible at http://tip.golang.org/AUTHORS.
diff --git a/vendor/github.com/golang/protobuf/CONTRIBUTORS b/vendor/github.com/golang/protobuf/CONTRIBUTORS
new file mode 100644
index 00000000..1c4577e9
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/CONTRIBUTORS
@@ -0,0 +1,3 @@
+# This source code was written by the Go contributors.
+# The master list of contributors is in the main Go distribution,
+# visible at http://tip.golang.org/CONTRIBUTORS.
diff --git a/vendor/github.com/golang/protobuf/LICENSE b/vendor/github.com/golang/protobuf/LICENSE
new file mode 100644
index 00000000..0f646931
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/LICENSE
@@ -0,0 +1,28 @@
+Copyright 2010 The Go Authors. All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+ * Redistributions of source code must retain the above copyright
+notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above
+copyright notice, this list of conditions and the following disclaimer
+in the documentation and/or other materials provided with the
+distribution.
+ * Neither the name of Google Inc. nor the names of its
+contributors may be used to endorse or promote products derived from
+this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
diff --git a/vendor/github.com/golang/protobuf/proto/clone.go b/vendor/github.com/golang/protobuf/proto/clone.go
new file mode 100644
index 00000000..3cd3249f
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/proto/clone.go
@@ -0,0 +1,253 @@
+// Go support for Protocol Buffers - Google's data interchange format
+//
+// Copyright 2011 The Go Authors. All rights reserved.
+// https://github.com/golang/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Protocol buffer deep copy and merge.
+// TODO: RawMessage.
+
+package proto
+
+import (
+ "fmt"
+ "log"
+ "reflect"
+ "strings"
+)
+
+// Clone returns a deep copy of a protocol buffer.
+func Clone(src Message) Message {
+ in := reflect.ValueOf(src)
+ if in.IsNil() {
+ return src
+ }
+ out := reflect.New(in.Type().Elem())
+ dst := out.Interface().(Message)
+ Merge(dst, src)
+ return dst
+}
+
+// Merger is the interface representing objects that can merge messages of the same type.
+type Merger interface {
+ // Merge merges src into this message.
+ // Required and optional fields that are set in src will be set to that value in dst.
+ // Elements of repeated fields will be appended.
+ //
+ // Merge may panic if called with a different argument type than the receiver.
+ Merge(src Message)
+}
+
+// generatedMerger is the custom merge method that generated protos will have.
+// We must add this method since a generate Merge method will conflict with
+// many existing protos that have a Merge data field already defined.
+type generatedMerger interface {
+ XXX_Merge(src Message)
+}
+
+// Merge merges src into dst.
+// Required and optional fields that are set in src will be set to that value in dst.
+// Elements of repeated fields will be appended.
+// Merge panics if src and dst are not the same type, or if dst is nil.
+func Merge(dst, src Message) {
+ if m, ok := dst.(Merger); ok {
+ m.Merge(src)
+ return
+ }
+
+ in := reflect.ValueOf(src)
+ out := reflect.ValueOf(dst)
+ if out.IsNil() {
+ panic("proto: nil destination")
+ }
+ if in.Type() != out.Type() {
+ panic(fmt.Sprintf("proto.Merge(%T, %T) type mismatch", dst, src))
+ }
+ if in.IsNil() {
+ return // Merge from nil src is a noop
+ }
+ if m, ok := dst.(generatedMerger); ok {
+ m.XXX_Merge(src)
+ return
+ }
+ mergeStruct(out.Elem(), in.Elem())
+}
+
+func mergeStruct(out, in reflect.Value) {
+ sprop := GetProperties(in.Type())
+ for i := 0; i < in.NumField(); i++ {
+ f := in.Type().Field(i)
+ if strings.HasPrefix(f.Name, "XXX_") {
+ continue
+ }
+ mergeAny(out.Field(i), in.Field(i), false, sprop.Prop[i])
+ }
+
+ if emIn, err := extendable(in.Addr().Interface()); err == nil {
+ emOut, _ := extendable(out.Addr().Interface())
+ mIn, muIn := emIn.extensionsRead()
+ if mIn != nil {
+ mOut := emOut.extensionsWrite()
+ muIn.Lock()
+ mergeExtension(mOut, mIn)
+ muIn.Unlock()
+ }
+ }
+
+ uf := in.FieldByName("XXX_unrecognized")
+ if !uf.IsValid() {
+ return
+ }
+ uin := uf.Bytes()
+ if len(uin) > 0 {
+ out.FieldByName("XXX_unrecognized").SetBytes(append([]byte(nil), uin...))
+ }
+}
+
+// mergeAny performs a merge between two values of the same type.
+// viaPtr indicates whether the values were indirected through a pointer (implying proto2).
+// prop is set if this is a struct field (it may be nil).
+func mergeAny(out, in reflect.Value, viaPtr bool, prop *Properties) {
+ if in.Type() == protoMessageType {
+ if !in.IsNil() {
+ if out.IsNil() {
+ out.Set(reflect.ValueOf(Clone(in.Interface().(Message))))
+ } else {
+ Merge(out.Interface().(Message), in.Interface().(Message))
+ }
+ }
+ return
+ }
+ switch in.Kind() {
+ case reflect.Bool, reflect.Float32, reflect.Float64, reflect.Int32, reflect.Int64,
+ reflect.String, reflect.Uint32, reflect.Uint64:
+ if !viaPtr && isProto3Zero(in) {
+ return
+ }
+ out.Set(in)
+ case reflect.Interface:
+ // Probably a oneof field; copy non-nil values.
+ if in.IsNil() {
+ return
+ }
+ // Allocate destination if it is not set, or set to a different type.
+ // Otherwise we will merge as normal.
+ if out.IsNil() || out.Elem().Type() != in.Elem().Type() {
+ out.Set(reflect.New(in.Elem().Elem().Type())) // interface -> *T -> T -> new(T)
+ }
+ mergeAny(out.Elem(), in.Elem(), false, nil)
+ case reflect.Map:
+ if in.Len() == 0 {
+ return
+ }
+ if out.IsNil() {
+ out.Set(reflect.MakeMap(in.Type()))
+ }
+ // For maps with value types of *T or []byte we need to deep copy each value.
+ elemKind := in.Type().Elem().Kind()
+ for _, key := range in.MapKeys() {
+ var val reflect.Value
+ switch elemKind {
+ case reflect.Ptr:
+ val = reflect.New(in.Type().Elem().Elem())
+ mergeAny(val, in.MapIndex(key), false, nil)
+ case reflect.Slice:
+ val = in.MapIndex(key)
+ val = reflect.ValueOf(append([]byte{}, val.Bytes()...))
+ default:
+ val = in.MapIndex(key)
+ }
+ out.SetMapIndex(key, val)
+ }
+ case reflect.Ptr:
+ if in.IsNil() {
+ return
+ }
+ if out.IsNil() {
+ out.Set(reflect.New(in.Elem().Type()))
+ }
+ mergeAny(out.Elem(), in.Elem(), true, nil)
+ case reflect.Slice:
+ if in.IsNil() {
+ return
+ }
+ if in.Type().Elem().Kind() == reflect.Uint8 {
+ // []byte is a scalar bytes field, not a repeated field.
+
+ // Edge case: if this is in a proto3 message, a zero length
+ // bytes field is considered the zero value, and should not
+ // be merged.
+ if prop != nil && prop.proto3 && in.Len() == 0 {
+ return
+ }
+
+ // Make a deep copy.
+ // Append to []byte{} instead of []byte(nil) so that we never end up
+ // with a nil result.
+ out.SetBytes(append([]byte{}, in.Bytes()...))
+ return
+ }
+ n := in.Len()
+ if out.IsNil() {
+ out.Set(reflect.MakeSlice(in.Type(), 0, n))
+ }
+ switch in.Type().Elem().Kind() {
+ case reflect.Bool, reflect.Float32, reflect.Float64, reflect.Int32, reflect.Int64,
+ reflect.String, reflect.Uint32, reflect.Uint64:
+ out.Set(reflect.AppendSlice(out, in))
+ default:
+ for i := 0; i < n; i++ {
+ x := reflect.Indirect(reflect.New(in.Type().Elem()))
+ mergeAny(x, in.Index(i), false, nil)
+ out.Set(reflect.Append(out, x))
+ }
+ }
+ case reflect.Struct:
+ mergeStruct(out, in)
+ default:
+ // unknown type, so not a protocol buffer
+ log.Printf("proto: don't know how to copy %v", in)
+ }
+}
+
+func mergeExtension(out, in map[int32]Extension) {
+ for extNum, eIn := range in {
+ eOut := Extension{desc: eIn.desc}
+ if eIn.value != nil {
+ v := reflect.New(reflect.TypeOf(eIn.value)).Elem()
+ mergeAny(v, reflect.ValueOf(eIn.value), false, nil)
+ eOut.value = v.Interface()
+ }
+ if eIn.enc != nil {
+ eOut.enc = make([]byte, len(eIn.enc))
+ copy(eOut.enc, eIn.enc)
+ }
+
+ out[extNum] = eOut
+ }
+}
diff --git a/vendor/github.com/golang/protobuf/proto/decode.go b/vendor/github.com/golang/protobuf/proto/decode.go
new file mode 100644
index 00000000..63b0f08b
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/proto/decode.go
@@ -0,0 +1,427 @@
+// Go support for Protocol Buffers - Google's data interchange format
+//
+// Copyright 2010 The Go Authors. All rights reserved.
+// https://github.com/golang/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+package proto
+
+/*
+ * Routines for decoding protocol buffer data to construct in-memory representations.
+ */
+
+import (
+ "errors"
+ "fmt"
+ "io"
+)
+
+// errOverflow is returned when an integer is too large to be represented.
+var errOverflow = errors.New("proto: integer overflow")
+
+// ErrInternalBadWireType is returned by generated code when an incorrect
+// wire type is encountered. It does not get returned to user code.
+var ErrInternalBadWireType = errors.New("proto: internal error: bad wiretype for oneof")
+
+// DecodeVarint reads a varint-encoded integer from the slice.
+// It returns the integer and the number of bytes consumed, or
+// zero if there is not enough.
+// This is the format for the
+// int32, int64, uint32, uint64, bool, and enum
+// protocol buffer types.
+func DecodeVarint(buf []byte) (x uint64, n int) {
+ for shift := uint(0); shift < 64; shift += 7 {
+ if n >= len(buf) {
+ return 0, 0
+ }
+ b := uint64(buf[n])
+ n++
+ x |= (b & 0x7F) << shift
+ if (b & 0x80) == 0 {
+ return x, n
+ }
+ }
+
+ // The number is too large to represent in a 64-bit value.
+ return 0, 0
+}
+
+func (p *Buffer) decodeVarintSlow() (x uint64, err error) {
+ i := p.index
+ l := len(p.buf)
+
+ for shift := uint(0); shift < 64; shift += 7 {
+ if i >= l {
+ err = io.ErrUnexpectedEOF
+ return
+ }
+ b := p.buf[i]
+ i++
+ x |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ p.index = i
+ return
+ }
+ }
+
+ // The number is too large to represent in a 64-bit value.
+ err = errOverflow
+ return
+}
+
+// DecodeVarint reads a varint-encoded integer from the Buffer.
+// This is the format for the
+// int32, int64, uint32, uint64, bool, and enum
+// protocol buffer types.
+func (p *Buffer) DecodeVarint() (x uint64, err error) {
+ i := p.index
+ buf := p.buf
+
+ if i >= len(buf) {
+ return 0, io.ErrUnexpectedEOF
+ } else if buf[i] < 0x80 {
+ p.index++
+ return uint64(buf[i]), nil
+ } else if len(buf)-i < 10 {
+ return p.decodeVarintSlow()
+ }
+
+ var b uint64
+ // we already checked the first byte
+ x = uint64(buf[i]) - 0x80
+ i++
+
+ b = uint64(buf[i])
+ i++
+ x += b << 7
+ if b&0x80 == 0 {
+ goto done
+ }
+ x -= 0x80 << 7
+
+ b = uint64(buf[i])
+ i++
+ x += b << 14
+ if b&0x80 == 0 {
+ goto done
+ }
+ x -= 0x80 << 14
+
+ b = uint64(buf[i])
+ i++
+ x += b << 21
+ if b&0x80 == 0 {
+ goto done
+ }
+ x -= 0x80 << 21
+
+ b = uint64(buf[i])
+ i++
+ x += b << 28
+ if b&0x80 == 0 {
+ goto done
+ }
+ x -= 0x80 << 28
+
+ b = uint64(buf[i])
+ i++
+ x += b << 35
+ if b&0x80 == 0 {
+ goto done
+ }
+ x -= 0x80 << 35
+
+ b = uint64(buf[i])
+ i++
+ x += b << 42
+ if b&0x80 == 0 {
+ goto done
+ }
+ x -= 0x80 << 42
+
+ b = uint64(buf[i])
+ i++
+ x += b << 49
+ if b&0x80 == 0 {
+ goto done
+ }
+ x -= 0x80 << 49
+
+ b = uint64(buf[i])
+ i++
+ x += b << 56
+ if b&0x80 == 0 {
+ goto done
+ }
+ x -= 0x80 << 56
+
+ b = uint64(buf[i])
+ i++
+ x += b << 63
+ if b&0x80 == 0 {
+ goto done
+ }
+
+ return 0, errOverflow
+
+done:
+ p.index = i
+ return x, nil
+}
+
+// DecodeFixed64 reads a 64-bit integer from the Buffer.
+// This is the format for the
+// fixed64, sfixed64, and double protocol buffer types.
+func (p *Buffer) DecodeFixed64() (x uint64, err error) {
+ // x, err already 0
+ i := p.index + 8
+ if i < 0 || i > len(p.buf) {
+ err = io.ErrUnexpectedEOF
+ return
+ }
+ p.index = i
+
+ x = uint64(p.buf[i-8])
+ x |= uint64(p.buf[i-7]) << 8
+ x |= uint64(p.buf[i-6]) << 16
+ x |= uint64(p.buf[i-5]) << 24
+ x |= uint64(p.buf[i-4]) << 32
+ x |= uint64(p.buf[i-3]) << 40
+ x |= uint64(p.buf[i-2]) << 48
+ x |= uint64(p.buf[i-1]) << 56
+ return
+}
+
+// DecodeFixed32 reads a 32-bit integer from the Buffer.
+// This is the format for the
+// fixed32, sfixed32, and float protocol buffer types.
+func (p *Buffer) DecodeFixed32() (x uint64, err error) {
+ // x, err already 0
+ i := p.index + 4
+ if i < 0 || i > len(p.buf) {
+ err = io.ErrUnexpectedEOF
+ return
+ }
+ p.index = i
+
+ x = uint64(p.buf[i-4])
+ x |= uint64(p.buf[i-3]) << 8
+ x |= uint64(p.buf[i-2]) << 16
+ x |= uint64(p.buf[i-1]) << 24
+ return
+}
+
+// DecodeZigzag64 reads a zigzag-encoded 64-bit integer
+// from the Buffer.
+// This is the format used for the sint64 protocol buffer type.
+func (p *Buffer) DecodeZigzag64() (x uint64, err error) {
+ x, err = p.DecodeVarint()
+ if err != nil {
+ return
+ }
+ x = (x >> 1) ^ uint64((int64(x&1)<<63)>>63)
+ return
+}
+
+// DecodeZigzag32 reads a zigzag-encoded 32-bit integer
+// from the Buffer.
+// This is the format used for the sint32 protocol buffer type.
+func (p *Buffer) DecodeZigzag32() (x uint64, err error) {
+ x, err = p.DecodeVarint()
+ if err != nil {
+ return
+ }
+ x = uint64((uint32(x) >> 1) ^ uint32((int32(x&1)<<31)>>31))
+ return
+}
+
+// DecodeRawBytes reads a count-delimited byte buffer from the Buffer.
+// This is the format used for the bytes protocol buffer
+// type and for embedded messages.
+func (p *Buffer) DecodeRawBytes(alloc bool) (buf []byte, err error) {
+ n, err := p.DecodeVarint()
+ if err != nil {
+ return nil, err
+ }
+
+ nb := int(n)
+ if nb < 0 {
+ return nil, fmt.Errorf("proto: bad byte length %d", nb)
+ }
+ end := p.index + nb
+ if end < p.index || end > len(p.buf) {
+ return nil, io.ErrUnexpectedEOF
+ }
+
+ if !alloc {
+ // todo: check if can get more uses of alloc=false
+ buf = p.buf[p.index:end]
+ p.index += nb
+ return
+ }
+
+ buf = make([]byte, nb)
+ copy(buf, p.buf[p.index:])
+ p.index += nb
+ return
+}
+
+// DecodeStringBytes reads an encoded string from the Buffer.
+// This is the format used for the proto2 string type.
+func (p *Buffer) DecodeStringBytes() (s string, err error) {
+ buf, err := p.DecodeRawBytes(false)
+ if err != nil {
+ return
+ }
+ return string(buf), nil
+}
+
+// Unmarshaler is the interface representing objects that can
+// unmarshal themselves. The argument points to data that may be
+// overwritten, so implementations should not keep references to the
+// buffer.
+// Unmarshal implementations should not clear the receiver.
+// Any unmarshaled data should be merged into the receiver.
+// Callers of Unmarshal that do not want to retain existing data
+// should Reset the receiver before calling Unmarshal.
+type Unmarshaler interface {
+ Unmarshal([]byte) error
+}
+
+// newUnmarshaler is the interface representing objects that can
+// unmarshal themselves. The semantics are identical to Unmarshaler.
+//
+// This exists to support protoc-gen-go generated messages.
+// The proto package will stop type-asserting to this interface in the future.
+//
+// DO NOT DEPEND ON THIS.
+type newUnmarshaler interface {
+ XXX_Unmarshal([]byte) error
+}
+
+// Unmarshal parses the protocol buffer representation in buf and places the
+// decoded result in pb. If the struct underlying pb does not match
+// the data in buf, the results can be unpredictable.
+//
+// Unmarshal resets pb before starting to unmarshal, so any
+// existing data in pb is always removed. Use UnmarshalMerge
+// to preserve and append to existing data.
+func Unmarshal(buf []byte, pb Message) error {
+ pb.Reset()
+ if u, ok := pb.(newUnmarshaler); ok {
+ return u.XXX_Unmarshal(buf)
+ }
+ if u, ok := pb.(Unmarshaler); ok {
+ return u.Unmarshal(buf)
+ }
+ return NewBuffer(buf).Unmarshal(pb)
+}
+
+// UnmarshalMerge parses the protocol buffer representation in buf and
+// writes the decoded result to pb. If the struct underlying pb does not match
+// the data in buf, the results can be unpredictable.
+//
+// UnmarshalMerge merges into existing data in pb.
+// Most code should use Unmarshal instead.
+func UnmarshalMerge(buf []byte, pb Message) error {
+ if u, ok := pb.(newUnmarshaler); ok {
+ return u.XXX_Unmarshal(buf)
+ }
+ if u, ok := pb.(Unmarshaler); ok {
+ // NOTE: The history of proto have unfortunately been inconsistent
+ // whether Unmarshaler should or should not implicitly clear itself.
+ // Some implementations do, most do not.
+ // Thus, calling this here may or may not do what people want.
+ //
+ // See https://github.com/golang/protobuf/issues/424
+ return u.Unmarshal(buf)
+ }
+ return NewBuffer(buf).Unmarshal(pb)
+}
+
+// DecodeMessage reads a count-delimited message from the Buffer.
+func (p *Buffer) DecodeMessage(pb Message) error {
+ enc, err := p.DecodeRawBytes(false)
+ if err != nil {
+ return err
+ }
+ return NewBuffer(enc).Unmarshal(pb)
+}
+
+// DecodeGroup reads a tag-delimited group from the Buffer.
+// StartGroup tag is already consumed. This function consumes
+// EndGroup tag.
+func (p *Buffer) DecodeGroup(pb Message) error {
+ b := p.buf[p.index:]
+ x, y := findEndGroup(b)
+ if x < 0 {
+ return io.ErrUnexpectedEOF
+ }
+ err := Unmarshal(b[:x], pb)
+ p.index += y
+ return err
+}
+
+// Unmarshal parses the protocol buffer representation in the
+// Buffer and places the decoded result in pb. If the struct
+// underlying pb does not match the data in the buffer, the results can be
+// unpredictable.
+//
+// Unlike proto.Unmarshal, this does not reset pb before starting to unmarshal.
+func (p *Buffer) Unmarshal(pb Message) error {
+ // If the object can unmarshal itself, let it.
+ if u, ok := pb.(newUnmarshaler); ok {
+ err := u.XXX_Unmarshal(p.buf[p.index:])
+ p.index = len(p.buf)
+ return err
+ }
+ if u, ok := pb.(Unmarshaler); ok {
+ // NOTE: The history of proto have unfortunately been inconsistent
+ // whether Unmarshaler should or should not implicitly clear itself.
+ // Some implementations do, most do not.
+ // Thus, calling this here may or may not do what people want.
+ //
+ // See https://github.com/golang/protobuf/issues/424
+ err := u.Unmarshal(p.buf[p.index:])
+ p.index = len(p.buf)
+ return err
+ }
+
+ // Slow workaround for messages that aren't Unmarshalers.
+ // This includes some hand-coded .pb.go files and
+ // bootstrap protos.
+ // TODO: fix all of those and then add Unmarshal to
+ // the Message interface. Then:
+ // The cast above and code below can be deleted.
+ // The old unmarshaler can be deleted.
+ // Clients can call Unmarshal directly (can already do that, actually).
+ var info InternalMessageInfo
+ err := info.Unmarshal(pb, p.buf[p.index:])
+ p.index = len(p.buf)
+ return err
+}
diff --git a/vendor/github.com/golang/protobuf/proto/deprecated.go b/vendor/github.com/golang/protobuf/proto/deprecated.go
new file mode 100644
index 00000000..35b882c0
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/proto/deprecated.go
@@ -0,0 +1,63 @@
+// Go support for Protocol Buffers - Google's data interchange format
+//
+// Copyright 2018 The Go Authors. All rights reserved.
+// https://github.com/golang/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+package proto
+
+import "errors"
+
+// Deprecated: do not use.
+type Stats struct{ Emalloc, Dmalloc, Encode, Decode, Chit, Cmiss, Size uint64 }
+
+// Deprecated: do not use.
+func GetStats() Stats { return Stats{} }
+
+// Deprecated: do not use.
+func MarshalMessageSet(interface{}) ([]byte, error) {
+ return nil, errors.New("proto: not implemented")
+}
+
+// Deprecated: do not use.
+func UnmarshalMessageSet([]byte, interface{}) error {
+ return errors.New("proto: not implemented")
+}
+
+// Deprecated: do not use.
+func MarshalMessageSetJSON(interface{}) ([]byte, error) {
+ return nil, errors.New("proto: not implemented")
+}
+
+// Deprecated: do not use.
+func UnmarshalMessageSetJSON([]byte, interface{}) error {
+ return errors.New("proto: not implemented")
+}
+
+// Deprecated: do not use.
+func RegisterMessageSetType(Message, int32, string) {}
diff --git a/vendor/github.com/golang/protobuf/proto/discard.go b/vendor/github.com/golang/protobuf/proto/discard.go
new file mode 100644
index 00000000..dea2617c
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/proto/discard.go
@@ -0,0 +1,350 @@
+// Go support for Protocol Buffers - Google's data interchange format
+//
+// Copyright 2017 The Go Authors. All rights reserved.
+// https://github.com/golang/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+package proto
+
+import (
+ "fmt"
+ "reflect"
+ "strings"
+ "sync"
+ "sync/atomic"
+)
+
+type generatedDiscarder interface {
+ XXX_DiscardUnknown()
+}
+
+// DiscardUnknown recursively discards all unknown fields from this message
+// and all embedded messages.
+//
+// When unmarshaling a message with unrecognized fields, the tags and values
+// of such fields are preserved in the Message. This allows a later call to
+// marshal to be able to produce a message that continues to have those
+// unrecognized fields. To avoid this, DiscardUnknown is used to
+// explicitly clear the unknown fields after unmarshaling.
+//
+// For proto2 messages, the unknown fields of message extensions are only
+// discarded from messages that have been accessed via GetExtension.
+func DiscardUnknown(m Message) {
+ if m, ok := m.(generatedDiscarder); ok {
+ m.XXX_DiscardUnknown()
+ return
+ }
+ // TODO: Dynamically populate a InternalMessageInfo for legacy messages,
+ // but the master branch has no implementation for InternalMessageInfo,
+ // so it would be more work to replicate that approach.
+ discardLegacy(m)
+}
+
+// DiscardUnknown recursively discards all unknown fields.
+func (a *InternalMessageInfo) DiscardUnknown(m Message) {
+ di := atomicLoadDiscardInfo(&a.discard)
+ if di == nil {
+ di = getDiscardInfo(reflect.TypeOf(m).Elem())
+ atomicStoreDiscardInfo(&a.discard, di)
+ }
+ di.discard(toPointer(&m))
+}
+
+type discardInfo struct {
+ typ reflect.Type
+
+ initialized int32 // 0: only typ is valid, 1: everything is valid
+ lock sync.Mutex
+
+ fields []discardFieldInfo
+ unrecognized field
+}
+
+type discardFieldInfo struct {
+ field field // Offset of field, guaranteed to be valid
+ discard func(src pointer)
+}
+
+var (
+ discardInfoMap = map[reflect.Type]*discardInfo{}
+ discardInfoLock sync.Mutex
+)
+
+func getDiscardInfo(t reflect.Type) *discardInfo {
+ discardInfoLock.Lock()
+ defer discardInfoLock.Unlock()
+ di := discardInfoMap[t]
+ if di == nil {
+ di = &discardInfo{typ: t}
+ discardInfoMap[t] = di
+ }
+ return di
+}
+
+func (di *discardInfo) discard(src pointer) {
+ if src.isNil() {
+ return // Nothing to do.
+ }
+
+ if atomic.LoadInt32(&di.initialized) == 0 {
+ di.computeDiscardInfo()
+ }
+
+ for _, fi := range di.fields {
+ sfp := src.offset(fi.field)
+ fi.discard(sfp)
+ }
+
+ // For proto2 messages, only discard unknown fields in message extensions
+ // that have been accessed via GetExtension.
+ if em, err := extendable(src.asPointerTo(di.typ).Interface()); err == nil {
+ // Ignore lock since DiscardUnknown is not concurrency safe.
+ emm, _ := em.extensionsRead()
+ for _, mx := range emm {
+ if m, ok := mx.value.(Message); ok {
+ DiscardUnknown(m)
+ }
+ }
+ }
+
+ if di.unrecognized.IsValid() {
+ *src.offset(di.unrecognized).toBytes() = nil
+ }
+}
+
+func (di *discardInfo) computeDiscardInfo() {
+ di.lock.Lock()
+ defer di.lock.Unlock()
+ if di.initialized != 0 {
+ return
+ }
+ t := di.typ
+ n := t.NumField()
+
+ for i := 0; i < n; i++ {
+ f := t.Field(i)
+ if strings.HasPrefix(f.Name, "XXX_") {
+ continue
+ }
+
+ dfi := discardFieldInfo{field: toField(&f)}
+ tf := f.Type
+
+ // Unwrap tf to get its most basic type.
+ var isPointer, isSlice bool
+ if tf.Kind() == reflect.Slice && tf.Elem().Kind() != reflect.Uint8 {
+ isSlice = true
+ tf = tf.Elem()
+ }
+ if tf.Kind() == reflect.Ptr {
+ isPointer = true
+ tf = tf.Elem()
+ }
+ if isPointer && isSlice && tf.Kind() != reflect.Struct {
+ panic(fmt.Sprintf("%v.%s cannot be a slice of pointers to primitive types", t, f.Name))
+ }
+
+ switch tf.Kind() {
+ case reflect.Struct:
+ switch {
+ case !isPointer:
+ panic(fmt.Sprintf("%v.%s cannot be a direct struct value", t, f.Name))
+ case isSlice: // E.g., []*pb.T
+ di := getDiscardInfo(tf)
+ dfi.discard = func(src pointer) {
+ sps := src.getPointerSlice()
+ for _, sp := range sps {
+ if !sp.isNil() {
+ di.discard(sp)
+ }
+ }
+ }
+ default: // E.g., *pb.T
+ di := getDiscardInfo(tf)
+ dfi.discard = func(src pointer) {
+ sp := src.getPointer()
+ if !sp.isNil() {
+ di.discard(sp)
+ }
+ }
+ }
+ case reflect.Map:
+ switch {
+ case isPointer || isSlice:
+ panic(fmt.Sprintf("%v.%s cannot be a pointer to a map or a slice of map values", t, f.Name))
+ default: // E.g., map[K]V
+ if tf.Elem().Kind() == reflect.Ptr { // Proto struct (e.g., *T)
+ dfi.discard = func(src pointer) {
+ sm := src.asPointerTo(tf).Elem()
+ if sm.Len() == 0 {
+ return
+ }
+ for _, key := range sm.MapKeys() {
+ val := sm.MapIndex(key)
+ DiscardUnknown(val.Interface().(Message))
+ }
+ }
+ } else {
+ dfi.discard = func(pointer) {} // Noop
+ }
+ }
+ case reflect.Interface:
+ // Must be oneof field.
+ switch {
+ case isPointer || isSlice:
+ panic(fmt.Sprintf("%v.%s cannot be a pointer to a interface or a slice of interface values", t, f.Name))
+ default: // E.g., interface{}
+ // TODO: Make this faster?
+ dfi.discard = func(src pointer) {
+ su := src.asPointerTo(tf).Elem()
+ if !su.IsNil() {
+ sv := su.Elem().Elem().Field(0)
+ if sv.Kind() == reflect.Ptr && sv.IsNil() {
+ return
+ }
+ switch sv.Type().Kind() {
+ case reflect.Ptr: // Proto struct (e.g., *T)
+ DiscardUnknown(sv.Interface().(Message))
+ }
+ }
+ }
+ }
+ default:
+ continue
+ }
+ di.fields = append(di.fields, dfi)
+ }
+
+ di.unrecognized = invalidField
+ if f, ok := t.FieldByName("XXX_unrecognized"); ok {
+ if f.Type != reflect.TypeOf([]byte{}) {
+ panic("expected XXX_unrecognized to be of type []byte")
+ }
+ di.unrecognized = toField(&f)
+ }
+
+ atomic.StoreInt32(&di.initialized, 1)
+}
+
+func discardLegacy(m Message) {
+ v := reflect.ValueOf(m)
+ if v.Kind() != reflect.Ptr || v.IsNil() {
+ return
+ }
+ v = v.Elem()
+ if v.Kind() != reflect.Struct {
+ return
+ }
+ t := v.Type()
+
+ for i := 0; i < v.NumField(); i++ {
+ f := t.Field(i)
+ if strings.HasPrefix(f.Name, "XXX_") {
+ continue
+ }
+ vf := v.Field(i)
+ tf := f.Type
+
+ // Unwrap tf to get its most basic type.
+ var isPointer, isSlice bool
+ if tf.Kind() == reflect.Slice && tf.Elem().Kind() != reflect.Uint8 {
+ isSlice = true
+ tf = tf.Elem()
+ }
+ if tf.Kind() == reflect.Ptr {
+ isPointer = true
+ tf = tf.Elem()
+ }
+ if isPointer && isSlice && tf.Kind() != reflect.Struct {
+ panic(fmt.Sprintf("%T.%s cannot be a slice of pointers to primitive types", m, f.Name))
+ }
+
+ switch tf.Kind() {
+ case reflect.Struct:
+ switch {
+ case !isPointer:
+ panic(fmt.Sprintf("%T.%s cannot be a direct struct value", m, f.Name))
+ case isSlice: // E.g., []*pb.T
+ for j := 0; j < vf.Len(); j++ {
+ discardLegacy(vf.Index(j).Interface().(Message))
+ }
+ default: // E.g., *pb.T
+ discardLegacy(vf.Interface().(Message))
+ }
+ case reflect.Map:
+ switch {
+ case isPointer || isSlice:
+ panic(fmt.Sprintf("%T.%s cannot be a pointer to a map or a slice of map values", m, f.Name))
+ default: // E.g., map[K]V
+ tv := vf.Type().Elem()
+ if tv.Kind() == reflect.Ptr && tv.Implements(protoMessageType) { // Proto struct (e.g., *T)
+ for _, key := range vf.MapKeys() {
+ val := vf.MapIndex(key)
+ discardLegacy(val.Interface().(Message))
+ }
+ }
+ }
+ case reflect.Interface:
+ // Must be oneof field.
+ switch {
+ case isPointer || isSlice:
+ panic(fmt.Sprintf("%T.%s cannot be a pointer to a interface or a slice of interface values", m, f.Name))
+ default: // E.g., test_proto.isCommunique_Union interface
+ if !vf.IsNil() && f.Tag.Get("protobuf_oneof") != "" {
+ vf = vf.Elem() // E.g., *test_proto.Communique_Msg
+ if !vf.IsNil() {
+ vf = vf.Elem() // E.g., test_proto.Communique_Msg
+ vf = vf.Field(0) // E.g., Proto struct (e.g., *T) or primitive value
+ if vf.Kind() == reflect.Ptr {
+ discardLegacy(vf.Interface().(Message))
+ }
+ }
+ }
+ }
+ }
+ }
+
+ if vf := v.FieldByName("XXX_unrecognized"); vf.IsValid() {
+ if vf.Type() != reflect.TypeOf([]byte{}) {
+ panic("expected XXX_unrecognized to be of type []byte")
+ }
+ vf.Set(reflect.ValueOf([]byte(nil)))
+ }
+
+ // For proto2 messages, only discard unknown fields in message extensions
+ // that have been accessed via GetExtension.
+ if em, err := extendable(m); err == nil {
+ // Ignore lock since discardLegacy is not concurrency safe.
+ emm, _ := em.extensionsRead()
+ for _, mx := range emm {
+ if m, ok := mx.value.(Message); ok {
+ discardLegacy(m)
+ }
+ }
+ }
+}
diff --git a/vendor/github.com/golang/protobuf/proto/encode.go b/vendor/github.com/golang/protobuf/proto/encode.go
new file mode 100644
index 00000000..3abfed2c
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/proto/encode.go
@@ -0,0 +1,203 @@
+// Go support for Protocol Buffers - Google's data interchange format
+//
+// Copyright 2010 The Go Authors. All rights reserved.
+// https://github.com/golang/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+package proto
+
+/*
+ * Routines for encoding data into the wire format for protocol buffers.
+ */
+
+import (
+ "errors"
+ "reflect"
+)
+
+var (
+ // errRepeatedHasNil is the error returned if Marshal is called with
+ // a struct with a repeated field containing a nil element.
+ errRepeatedHasNil = errors.New("proto: repeated field has nil element")
+
+ // errOneofHasNil is the error returned if Marshal is called with
+ // a struct with a oneof field containing a nil element.
+ errOneofHasNil = errors.New("proto: oneof field has nil value")
+
+ // ErrNil is the error returned if Marshal is called with nil.
+ ErrNil = errors.New("proto: Marshal called with nil")
+
+ // ErrTooLarge is the error returned if Marshal is called with a
+ // message that encodes to >2GB.
+ ErrTooLarge = errors.New("proto: message encodes to over 2 GB")
+)
+
+// The fundamental encoders that put bytes on the wire.
+// Those that take integer types all accept uint64 and are
+// therefore of type valueEncoder.
+
+const maxVarintBytes = 10 // maximum length of a varint
+
+// EncodeVarint returns the varint encoding of x.
+// This is the format for the
+// int32, int64, uint32, uint64, bool, and enum
+// protocol buffer types.
+// Not used by the package itself, but helpful to clients
+// wishing to use the same encoding.
+func EncodeVarint(x uint64) []byte {
+ var buf [maxVarintBytes]byte
+ var n int
+ for n = 0; x > 127; n++ {
+ buf[n] = 0x80 | uint8(x&0x7F)
+ x >>= 7
+ }
+ buf[n] = uint8(x)
+ n++
+ return buf[0:n]
+}
+
+// EncodeVarint writes a varint-encoded integer to the Buffer.
+// This is the format for the
+// int32, int64, uint32, uint64, bool, and enum
+// protocol buffer types.
+func (p *Buffer) EncodeVarint(x uint64) error {
+ for x >= 1<<7 {
+ p.buf = append(p.buf, uint8(x&0x7f|0x80))
+ x >>= 7
+ }
+ p.buf = append(p.buf, uint8(x))
+ return nil
+}
+
+// SizeVarint returns the varint encoding size of an integer.
+func SizeVarint(x uint64) int {
+ switch {
+ case x < 1<<7:
+ return 1
+ case x < 1<<14:
+ return 2
+ case x < 1<<21:
+ return 3
+ case x < 1<<28:
+ return 4
+ case x < 1<<35:
+ return 5
+ case x < 1<<42:
+ return 6
+ case x < 1<<49:
+ return 7
+ case x < 1<<56:
+ return 8
+ case x < 1<<63:
+ return 9
+ }
+ return 10
+}
+
+// EncodeFixed64 writes a 64-bit integer to the Buffer.
+// This is the format for the
+// fixed64, sfixed64, and double protocol buffer types.
+func (p *Buffer) EncodeFixed64(x uint64) error {
+ p.buf = append(p.buf,
+ uint8(x),
+ uint8(x>>8),
+ uint8(x>>16),
+ uint8(x>>24),
+ uint8(x>>32),
+ uint8(x>>40),
+ uint8(x>>48),
+ uint8(x>>56))
+ return nil
+}
+
+// EncodeFixed32 writes a 32-bit integer to the Buffer.
+// This is the format for the
+// fixed32, sfixed32, and float protocol buffer types.
+func (p *Buffer) EncodeFixed32(x uint64) error {
+ p.buf = append(p.buf,
+ uint8(x),
+ uint8(x>>8),
+ uint8(x>>16),
+ uint8(x>>24))
+ return nil
+}
+
+// EncodeZigzag64 writes a zigzag-encoded 64-bit integer
+// to the Buffer.
+// This is the format used for the sint64 protocol buffer type.
+func (p *Buffer) EncodeZigzag64(x uint64) error {
+ // use signed number to get arithmetic right shift.
+ return p.EncodeVarint(uint64((x << 1) ^ uint64((int64(x) >> 63))))
+}
+
+// EncodeZigzag32 writes a zigzag-encoded 32-bit integer
+// to the Buffer.
+// This is the format used for the sint32 protocol buffer type.
+func (p *Buffer) EncodeZigzag32(x uint64) error {
+ // use signed number to get arithmetic right shift.
+ return p.EncodeVarint(uint64((uint32(x) << 1) ^ uint32((int32(x) >> 31))))
+}
+
+// EncodeRawBytes writes a count-delimited byte buffer to the Buffer.
+// This is the format used for the bytes protocol buffer
+// type and for embedded messages.
+func (p *Buffer) EncodeRawBytes(b []byte) error {
+ p.EncodeVarint(uint64(len(b)))
+ p.buf = append(p.buf, b...)
+ return nil
+}
+
+// EncodeStringBytes writes an encoded string to the Buffer.
+// This is the format used for the proto2 string type.
+func (p *Buffer) EncodeStringBytes(s string) error {
+ p.EncodeVarint(uint64(len(s)))
+ p.buf = append(p.buf, s...)
+ return nil
+}
+
+// Marshaler is the interface representing objects that can marshal themselves.
+type Marshaler interface {
+ Marshal() ([]byte, error)
+}
+
+// EncodeMessage writes the protocol buffer to the Buffer,
+// prefixed by a varint-encoded length.
+func (p *Buffer) EncodeMessage(pb Message) error {
+ siz := Size(pb)
+ p.EncodeVarint(uint64(siz))
+ return p.Marshal(pb)
+}
+
+// All protocol buffer fields are nillable, but be careful.
+func isNil(v reflect.Value) bool {
+ switch v.Kind() {
+ case reflect.Interface, reflect.Map, reflect.Ptr, reflect.Slice:
+ return v.IsNil()
+ }
+ return false
+}
diff --git a/vendor/github.com/golang/protobuf/proto/equal.go b/vendor/github.com/golang/protobuf/proto/equal.go
new file mode 100644
index 00000000..f9b6e41b
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/proto/equal.go
@@ -0,0 +1,301 @@
+// Go support for Protocol Buffers - Google's data interchange format
+//
+// Copyright 2011 The Go Authors. All rights reserved.
+// https://github.com/golang/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Protocol buffer comparison.
+
+package proto
+
+import (
+ "bytes"
+ "log"
+ "reflect"
+ "strings"
+)
+
+/*
+Equal returns true iff protocol buffers a and b are equal.
+The arguments must both be pointers to protocol buffer structs.
+
+Equality is defined in this way:
+ - Two messages are equal iff they are the same type,
+ corresponding fields are equal, unknown field sets
+ are equal, and extensions sets are equal.
+ - Two set scalar fields are equal iff their values are equal.
+ If the fields are of a floating-point type, remember that
+ NaN != x for all x, including NaN. If the message is defined
+ in a proto3 .proto file, fields are not "set"; specifically,
+ zero length proto3 "bytes" fields are equal (nil == {}).
+ - Two repeated fields are equal iff their lengths are the same,
+ and their corresponding elements are equal. Note a "bytes" field,
+ although represented by []byte, is not a repeated field and the
+ rule for the scalar fields described above applies.
+ - Two unset fields are equal.
+ - Two unknown field sets are equal if their current
+ encoded state is equal.
+ - Two extension sets are equal iff they have corresponding
+ elements that are pairwise equal.
+ - Two map fields are equal iff their lengths are the same,
+ and they contain the same set of elements. Zero-length map
+ fields are equal.
+ - Every other combination of things are not equal.
+
+The return value is undefined if a and b are not protocol buffers.
+*/
+func Equal(a, b Message) bool {
+ if a == nil || b == nil {
+ return a == b
+ }
+ v1, v2 := reflect.ValueOf(a), reflect.ValueOf(b)
+ if v1.Type() != v2.Type() {
+ return false
+ }
+ if v1.Kind() == reflect.Ptr {
+ if v1.IsNil() {
+ return v2.IsNil()
+ }
+ if v2.IsNil() {
+ return false
+ }
+ v1, v2 = v1.Elem(), v2.Elem()
+ }
+ if v1.Kind() != reflect.Struct {
+ return false
+ }
+ return equalStruct(v1, v2)
+}
+
+// v1 and v2 are known to have the same type.
+func equalStruct(v1, v2 reflect.Value) bool {
+ sprop := GetProperties(v1.Type())
+ for i := 0; i < v1.NumField(); i++ {
+ f := v1.Type().Field(i)
+ if strings.HasPrefix(f.Name, "XXX_") {
+ continue
+ }
+ f1, f2 := v1.Field(i), v2.Field(i)
+ if f.Type.Kind() == reflect.Ptr {
+ if n1, n2 := f1.IsNil(), f2.IsNil(); n1 && n2 {
+ // both unset
+ continue
+ } else if n1 != n2 {
+ // set/unset mismatch
+ return false
+ }
+ f1, f2 = f1.Elem(), f2.Elem()
+ }
+ if !equalAny(f1, f2, sprop.Prop[i]) {
+ return false
+ }
+ }
+
+ if em1 := v1.FieldByName("XXX_InternalExtensions"); em1.IsValid() {
+ em2 := v2.FieldByName("XXX_InternalExtensions")
+ if !equalExtensions(v1.Type(), em1.Interface().(XXX_InternalExtensions), em2.Interface().(XXX_InternalExtensions)) {
+ return false
+ }
+ }
+
+ if em1 := v1.FieldByName("XXX_extensions"); em1.IsValid() {
+ em2 := v2.FieldByName("XXX_extensions")
+ if !equalExtMap(v1.Type(), em1.Interface().(map[int32]Extension), em2.Interface().(map[int32]Extension)) {
+ return false
+ }
+ }
+
+ uf := v1.FieldByName("XXX_unrecognized")
+ if !uf.IsValid() {
+ return true
+ }
+
+ u1 := uf.Bytes()
+ u2 := v2.FieldByName("XXX_unrecognized").Bytes()
+ return bytes.Equal(u1, u2)
+}
+
+// v1 and v2 are known to have the same type.
+// prop may be nil.
+func equalAny(v1, v2 reflect.Value, prop *Properties) bool {
+ if v1.Type() == protoMessageType {
+ m1, _ := v1.Interface().(Message)
+ m2, _ := v2.Interface().(Message)
+ return Equal(m1, m2)
+ }
+ switch v1.Kind() {
+ case reflect.Bool:
+ return v1.Bool() == v2.Bool()
+ case reflect.Float32, reflect.Float64:
+ return v1.Float() == v2.Float()
+ case reflect.Int32, reflect.Int64:
+ return v1.Int() == v2.Int()
+ case reflect.Interface:
+ // Probably a oneof field; compare the inner values.
+ n1, n2 := v1.IsNil(), v2.IsNil()
+ if n1 || n2 {
+ return n1 == n2
+ }
+ e1, e2 := v1.Elem(), v2.Elem()
+ if e1.Type() != e2.Type() {
+ return false
+ }
+ return equalAny(e1, e2, nil)
+ case reflect.Map:
+ if v1.Len() != v2.Len() {
+ return false
+ }
+ for _, key := range v1.MapKeys() {
+ val2 := v2.MapIndex(key)
+ if !val2.IsValid() {
+ // This key was not found in the second map.
+ return false
+ }
+ if !equalAny(v1.MapIndex(key), val2, nil) {
+ return false
+ }
+ }
+ return true
+ case reflect.Ptr:
+ // Maps may have nil values in them, so check for nil.
+ if v1.IsNil() && v2.IsNil() {
+ return true
+ }
+ if v1.IsNil() != v2.IsNil() {
+ return false
+ }
+ return equalAny(v1.Elem(), v2.Elem(), prop)
+ case reflect.Slice:
+ if v1.Type().Elem().Kind() == reflect.Uint8 {
+ // short circuit: []byte
+
+ // Edge case: if this is in a proto3 message, a zero length
+ // bytes field is considered the zero value.
+ if prop != nil && prop.proto3 && v1.Len() == 0 && v2.Len() == 0 {
+ return true
+ }
+ if v1.IsNil() != v2.IsNil() {
+ return false
+ }
+ return bytes.Equal(v1.Interface().([]byte), v2.Interface().([]byte))
+ }
+
+ if v1.Len() != v2.Len() {
+ return false
+ }
+ for i := 0; i < v1.Len(); i++ {
+ if !equalAny(v1.Index(i), v2.Index(i), prop) {
+ return false
+ }
+ }
+ return true
+ case reflect.String:
+ return v1.Interface().(string) == v2.Interface().(string)
+ case reflect.Struct:
+ return equalStruct(v1, v2)
+ case reflect.Uint32, reflect.Uint64:
+ return v1.Uint() == v2.Uint()
+ }
+
+ // unknown type, so not a protocol buffer
+ log.Printf("proto: don't know how to compare %v", v1)
+ return false
+}
+
+// base is the struct type that the extensions are based on.
+// x1 and x2 are InternalExtensions.
+func equalExtensions(base reflect.Type, x1, x2 XXX_InternalExtensions) bool {
+ em1, _ := x1.extensionsRead()
+ em2, _ := x2.extensionsRead()
+ return equalExtMap(base, em1, em2)
+}
+
+func equalExtMap(base reflect.Type, em1, em2 map[int32]Extension) bool {
+ if len(em1) != len(em2) {
+ return false
+ }
+
+ for extNum, e1 := range em1 {
+ e2, ok := em2[extNum]
+ if !ok {
+ return false
+ }
+
+ m1 := extensionAsLegacyType(e1.value)
+ m2 := extensionAsLegacyType(e2.value)
+
+ if m1 == nil && m2 == nil {
+ // Both have only encoded form.
+ if bytes.Equal(e1.enc, e2.enc) {
+ continue
+ }
+ // The bytes are different, but the extensions might still be
+ // equal. We need to decode them to compare.
+ }
+
+ if m1 != nil && m2 != nil {
+ // Both are unencoded.
+ if !equalAny(reflect.ValueOf(m1), reflect.ValueOf(m2), nil) {
+ return false
+ }
+ continue
+ }
+
+ // At least one is encoded. To do a semantically correct comparison
+ // we need to unmarshal them first.
+ var desc *ExtensionDesc
+ if m := extensionMaps[base]; m != nil {
+ desc = m[extNum]
+ }
+ if desc == nil {
+ // If both have only encoded form and the bytes are the same,
+ // it is handled above. We get here when the bytes are different.
+ // We don't know how to decode it, so just compare them as byte
+ // slices.
+ log.Printf("proto: don't know how to compare extension %d of %v", extNum, base)
+ return false
+ }
+ var err error
+ if m1 == nil {
+ m1, err = decodeExtension(e1.enc, desc)
+ }
+ if m2 == nil && err == nil {
+ m2, err = decodeExtension(e2.enc, desc)
+ }
+ if err != nil {
+ // The encoded form is invalid.
+ log.Printf("proto: badly encoded extension %d of %v: %v", extNum, base, err)
+ return false
+ }
+ if !equalAny(reflect.ValueOf(m1), reflect.ValueOf(m2), nil) {
+ return false
+ }
+ }
+
+ return true
+}
diff --git a/vendor/github.com/golang/protobuf/proto/extensions.go b/vendor/github.com/golang/protobuf/proto/extensions.go
new file mode 100644
index 00000000..fa88add3
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/proto/extensions.go
@@ -0,0 +1,607 @@
+// Go support for Protocol Buffers - Google's data interchange format
+//
+// Copyright 2010 The Go Authors. All rights reserved.
+// https://github.com/golang/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+package proto
+
+/*
+ * Types and routines for supporting protocol buffer extensions.
+ */
+
+import (
+ "errors"
+ "fmt"
+ "io"
+ "reflect"
+ "strconv"
+ "sync"
+)
+
+// ErrMissingExtension is the error returned by GetExtension if the named extension is not in the message.
+var ErrMissingExtension = errors.New("proto: missing extension")
+
+// ExtensionRange represents a range of message extensions for a protocol buffer.
+// Used in code generated by the protocol compiler.
+type ExtensionRange struct {
+ Start, End int32 // both inclusive
+}
+
+// extendableProto is an interface implemented by any protocol buffer generated by the current
+// proto compiler that may be extended.
+type extendableProto interface {
+ Message
+ ExtensionRangeArray() []ExtensionRange
+ extensionsWrite() map[int32]Extension
+ extensionsRead() (map[int32]Extension, sync.Locker)
+}
+
+// extendableProtoV1 is an interface implemented by a protocol buffer generated by the previous
+// version of the proto compiler that may be extended.
+type extendableProtoV1 interface {
+ Message
+ ExtensionRangeArray() []ExtensionRange
+ ExtensionMap() map[int32]Extension
+}
+
+// extensionAdapter is a wrapper around extendableProtoV1 that implements extendableProto.
+type extensionAdapter struct {
+ extendableProtoV1
+}
+
+func (e extensionAdapter) extensionsWrite() map[int32]Extension {
+ return e.ExtensionMap()
+}
+
+func (e extensionAdapter) extensionsRead() (map[int32]Extension, sync.Locker) {
+ return e.ExtensionMap(), notLocker{}
+}
+
+// notLocker is a sync.Locker whose Lock and Unlock methods are nops.
+type notLocker struct{}
+
+func (n notLocker) Lock() {}
+func (n notLocker) Unlock() {}
+
+// extendable returns the extendableProto interface for the given generated proto message.
+// If the proto message has the old extension format, it returns a wrapper that implements
+// the extendableProto interface.
+func extendable(p interface{}) (extendableProto, error) {
+ switch p := p.(type) {
+ case extendableProto:
+ if isNilPtr(p) {
+ return nil, fmt.Errorf("proto: nil %T is not extendable", p)
+ }
+ return p, nil
+ case extendableProtoV1:
+ if isNilPtr(p) {
+ return nil, fmt.Errorf("proto: nil %T is not extendable", p)
+ }
+ return extensionAdapter{p}, nil
+ }
+ // Don't allocate a specific error containing %T:
+ // this is the hot path for Clone and MarshalText.
+ return nil, errNotExtendable
+}
+
+var errNotExtendable = errors.New("proto: not an extendable proto.Message")
+
+func isNilPtr(x interface{}) bool {
+ v := reflect.ValueOf(x)
+ return v.Kind() == reflect.Ptr && v.IsNil()
+}
+
+// XXX_InternalExtensions is an internal representation of proto extensions.
+//
+// Each generated message struct type embeds an anonymous XXX_InternalExtensions field,
+// thus gaining the unexported 'extensions' method, which can be called only from the proto package.
+//
+// The methods of XXX_InternalExtensions are not concurrency safe in general,
+// but calls to logically read-only methods such as has and get may be executed concurrently.
+type XXX_InternalExtensions struct {
+ // The struct must be indirect so that if a user inadvertently copies a
+ // generated message and its embedded XXX_InternalExtensions, they
+ // avoid the mayhem of a copied mutex.
+ //
+ // The mutex serializes all logically read-only operations to p.extensionMap.
+ // It is up to the client to ensure that write operations to p.extensionMap are
+ // mutually exclusive with other accesses.
+ p *struct {
+ mu sync.Mutex
+ extensionMap map[int32]Extension
+ }
+}
+
+// extensionsWrite returns the extension map, creating it on first use.
+func (e *XXX_InternalExtensions) extensionsWrite() map[int32]Extension {
+ if e.p == nil {
+ e.p = new(struct {
+ mu sync.Mutex
+ extensionMap map[int32]Extension
+ })
+ e.p.extensionMap = make(map[int32]Extension)
+ }
+ return e.p.extensionMap
+}
+
+// extensionsRead returns the extensions map for read-only use. It may be nil.
+// The caller must hold the returned mutex's lock when accessing Elements within the map.
+func (e *XXX_InternalExtensions) extensionsRead() (map[int32]Extension, sync.Locker) {
+ if e.p == nil {
+ return nil, nil
+ }
+ return e.p.extensionMap, &e.p.mu
+}
+
+// ExtensionDesc represents an extension specification.
+// Used in generated code from the protocol compiler.
+type ExtensionDesc struct {
+ ExtendedType Message // nil pointer to the type that is being extended
+ ExtensionType interface{} // nil pointer to the extension type
+ Field int32 // field number
+ Name string // fully-qualified name of extension, for text formatting
+ Tag string // protobuf tag style
+ Filename string // name of the file in which the extension is defined
+}
+
+func (ed *ExtensionDesc) repeated() bool {
+ t := reflect.TypeOf(ed.ExtensionType)
+ return t.Kind() == reflect.Slice && t.Elem().Kind() != reflect.Uint8
+}
+
+// Extension represents an extension in a message.
+type Extension struct {
+ // When an extension is stored in a message using SetExtension
+ // only desc and value are set. When the message is marshaled
+ // enc will be set to the encoded form of the message.
+ //
+ // When a message is unmarshaled and contains extensions, each
+ // extension will have only enc set. When such an extension is
+ // accessed using GetExtension (or GetExtensions) desc and value
+ // will be set.
+ desc *ExtensionDesc
+
+ // value is a concrete value for the extension field. Let the type of
+ // desc.ExtensionType be the "API type" and the type of Extension.value
+ // be the "storage type". The API type and storage type are the same except:
+ // * For scalars (except []byte), the API type uses *T,
+ // while the storage type uses T.
+ // * For repeated fields, the API type uses []T, while the storage type
+ // uses *[]T.
+ //
+ // The reason for the divergence is so that the storage type more naturally
+ // matches what is expected of when retrieving the values through the
+ // protobuf reflection APIs.
+ //
+ // The value may only be populated if desc is also populated.
+ value interface{}
+
+ // enc is the raw bytes for the extension field.
+ enc []byte
+}
+
+// SetRawExtension is for testing only.
+func SetRawExtension(base Message, id int32, b []byte) {
+ epb, err := extendable(base)
+ if err != nil {
+ return
+ }
+ extmap := epb.extensionsWrite()
+ extmap[id] = Extension{enc: b}
+}
+
+// isExtensionField returns true iff the given field number is in an extension range.
+func isExtensionField(pb extendableProto, field int32) bool {
+ for _, er := range pb.ExtensionRangeArray() {
+ if er.Start <= field && field <= er.End {
+ return true
+ }
+ }
+ return false
+}
+
+// checkExtensionTypes checks that the given extension is valid for pb.
+func checkExtensionTypes(pb extendableProto, extension *ExtensionDesc) error {
+ var pbi interface{} = pb
+ // Check the extended type.
+ if ea, ok := pbi.(extensionAdapter); ok {
+ pbi = ea.extendableProtoV1
+ }
+ if a, b := reflect.TypeOf(pbi), reflect.TypeOf(extension.ExtendedType); a != b {
+ return fmt.Errorf("proto: bad extended type; %v does not extend %v", b, a)
+ }
+ // Check the range.
+ if !isExtensionField(pb, extension.Field) {
+ return errors.New("proto: bad extension number; not in declared ranges")
+ }
+ return nil
+}
+
+// extPropKey is sufficient to uniquely identify an extension.
+type extPropKey struct {
+ base reflect.Type
+ field int32
+}
+
+var extProp = struct {
+ sync.RWMutex
+ m map[extPropKey]*Properties
+}{
+ m: make(map[extPropKey]*Properties),
+}
+
+func extensionProperties(ed *ExtensionDesc) *Properties {
+ key := extPropKey{base: reflect.TypeOf(ed.ExtendedType), field: ed.Field}
+
+ extProp.RLock()
+ if prop, ok := extProp.m[key]; ok {
+ extProp.RUnlock()
+ return prop
+ }
+ extProp.RUnlock()
+
+ extProp.Lock()
+ defer extProp.Unlock()
+ // Check again.
+ if prop, ok := extProp.m[key]; ok {
+ return prop
+ }
+
+ prop := new(Properties)
+ prop.Init(reflect.TypeOf(ed.ExtensionType), "unknown_name", ed.Tag, nil)
+ extProp.m[key] = prop
+ return prop
+}
+
+// HasExtension returns whether the given extension is present in pb.
+func HasExtension(pb Message, extension *ExtensionDesc) bool {
+ // TODO: Check types, field numbers, etc.?
+ epb, err := extendable(pb)
+ if err != nil {
+ return false
+ }
+ extmap, mu := epb.extensionsRead()
+ if extmap == nil {
+ return false
+ }
+ mu.Lock()
+ _, ok := extmap[extension.Field]
+ mu.Unlock()
+ return ok
+}
+
+// ClearExtension removes the given extension from pb.
+func ClearExtension(pb Message, extension *ExtensionDesc) {
+ epb, err := extendable(pb)
+ if err != nil {
+ return
+ }
+ // TODO: Check types, field numbers, etc.?
+ extmap := epb.extensionsWrite()
+ delete(extmap, extension.Field)
+}
+
+// GetExtension retrieves a proto2 extended field from pb.
+//
+// If the descriptor is type complete (i.e., ExtensionDesc.ExtensionType is non-nil),
+// then GetExtension parses the encoded field and returns a Go value of the specified type.
+// If the field is not present, then the default value is returned (if one is specified),
+// otherwise ErrMissingExtension is reported.
+//
+// If the descriptor is not type complete (i.e., ExtensionDesc.ExtensionType is nil),
+// then GetExtension returns the raw encoded bytes of the field extension.
+func GetExtension(pb Message, extension *ExtensionDesc) (interface{}, error) {
+ epb, err := extendable(pb)
+ if err != nil {
+ return nil, err
+ }
+
+ if extension.ExtendedType != nil {
+ // can only check type if this is a complete descriptor
+ if err := checkExtensionTypes(epb, extension); err != nil {
+ return nil, err
+ }
+ }
+
+ emap, mu := epb.extensionsRead()
+ if emap == nil {
+ return defaultExtensionValue(extension)
+ }
+ mu.Lock()
+ defer mu.Unlock()
+ e, ok := emap[extension.Field]
+ if !ok {
+ // defaultExtensionValue returns the default value or
+ // ErrMissingExtension if there is no default.
+ return defaultExtensionValue(extension)
+ }
+
+ if e.value != nil {
+ // Already decoded. Check the descriptor, though.
+ if e.desc != extension {
+ // This shouldn't happen. If it does, it means that
+ // GetExtension was called twice with two different
+ // descriptors with the same field number.
+ return nil, errors.New("proto: descriptor conflict")
+ }
+ return extensionAsLegacyType(e.value), nil
+ }
+
+ if extension.ExtensionType == nil {
+ // incomplete descriptor
+ return e.enc, nil
+ }
+
+ v, err := decodeExtension(e.enc, extension)
+ if err != nil {
+ return nil, err
+ }
+
+ // Remember the decoded version and drop the encoded version.
+ // That way it is safe to mutate what we return.
+ e.value = extensionAsStorageType(v)
+ e.desc = extension
+ e.enc = nil
+ emap[extension.Field] = e
+ return extensionAsLegacyType(e.value), nil
+}
+
+// defaultExtensionValue returns the default value for extension.
+// If no default for an extension is defined ErrMissingExtension is returned.
+func defaultExtensionValue(extension *ExtensionDesc) (interface{}, error) {
+ if extension.ExtensionType == nil {
+ // incomplete descriptor, so no default
+ return nil, ErrMissingExtension
+ }
+
+ t := reflect.TypeOf(extension.ExtensionType)
+ props := extensionProperties(extension)
+
+ sf, _, err := fieldDefault(t, props)
+ if err != nil {
+ return nil, err
+ }
+
+ if sf == nil || sf.value == nil {
+ // There is no default value.
+ return nil, ErrMissingExtension
+ }
+
+ if t.Kind() != reflect.Ptr {
+ // We do not need to return a Ptr, we can directly return sf.value.
+ return sf.value, nil
+ }
+
+ // We need to return an interface{} that is a pointer to sf.value.
+ value := reflect.New(t).Elem()
+ value.Set(reflect.New(value.Type().Elem()))
+ if sf.kind == reflect.Int32 {
+ // We may have an int32 or an enum, but the underlying data is int32.
+ // Since we can't set an int32 into a non int32 reflect.value directly
+ // set it as a int32.
+ value.Elem().SetInt(int64(sf.value.(int32)))
+ } else {
+ value.Elem().Set(reflect.ValueOf(sf.value))
+ }
+ return value.Interface(), nil
+}
+
+// decodeExtension decodes an extension encoded in b.
+func decodeExtension(b []byte, extension *ExtensionDesc) (interface{}, error) {
+ t := reflect.TypeOf(extension.ExtensionType)
+ unmarshal := typeUnmarshaler(t, extension.Tag)
+
+ // t is a pointer to a struct, pointer to basic type or a slice.
+ // Allocate space to store the pointer/slice.
+ value := reflect.New(t).Elem()
+
+ var err error
+ for {
+ x, n := decodeVarint(b)
+ if n == 0 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ b = b[n:]
+ wire := int(x) & 7
+
+ b, err = unmarshal(b, valToPointer(value.Addr()), wire)
+ if err != nil {
+ return nil, err
+ }
+
+ if len(b) == 0 {
+ break
+ }
+ }
+ return value.Interface(), nil
+}
+
+// GetExtensions returns a slice of the extensions present in pb that are also listed in es.
+// The returned slice has the same length as es; missing extensions will appear as nil elements.
+func GetExtensions(pb Message, es []*ExtensionDesc) (extensions []interface{}, err error) {
+ epb, err := extendable(pb)
+ if err != nil {
+ return nil, err
+ }
+ extensions = make([]interface{}, len(es))
+ for i, e := range es {
+ extensions[i], err = GetExtension(epb, e)
+ if err == ErrMissingExtension {
+ err = nil
+ }
+ if err != nil {
+ return
+ }
+ }
+ return
+}
+
+// ExtensionDescs returns a new slice containing pb's extension descriptors, in undefined order.
+// For non-registered extensions, ExtensionDescs returns an incomplete descriptor containing
+// just the Field field, which defines the extension's field number.
+func ExtensionDescs(pb Message) ([]*ExtensionDesc, error) {
+ epb, err := extendable(pb)
+ if err != nil {
+ return nil, err
+ }
+ registeredExtensions := RegisteredExtensions(pb)
+
+ emap, mu := epb.extensionsRead()
+ if emap == nil {
+ return nil, nil
+ }
+ mu.Lock()
+ defer mu.Unlock()
+ extensions := make([]*ExtensionDesc, 0, len(emap))
+ for extid, e := range emap {
+ desc := e.desc
+ if desc == nil {
+ desc = registeredExtensions[extid]
+ if desc == nil {
+ desc = &ExtensionDesc{Field: extid}
+ }
+ }
+
+ extensions = append(extensions, desc)
+ }
+ return extensions, nil
+}
+
+// SetExtension sets the specified extension of pb to the specified value.
+func SetExtension(pb Message, extension *ExtensionDesc, value interface{}) error {
+ epb, err := extendable(pb)
+ if err != nil {
+ return err
+ }
+ if err := checkExtensionTypes(epb, extension); err != nil {
+ return err
+ }
+ typ := reflect.TypeOf(extension.ExtensionType)
+ if typ != reflect.TypeOf(value) {
+ return fmt.Errorf("proto: bad extension value type. got: %T, want: %T", value, extension.ExtensionType)
+ }
+ // nil extension values need to be caught early, because the
+ // encoder can't distinguish an ErrNil due to a nil extension
+ // from an ErrNil due to a missing field. Extensions are
+ // always optional, so the encoder would just swallow the error
+ // and drop all the extensions from the encoded message.
+ if reflect.ValueOf(value).IsNil() {
+ return fmt.Errorf("proto: SetExtension called with nil value of type %T", value)
+ }
+
+ extmap := epb.extensionsWrite()
+ extmap[extension.Field] = Extension{desc: extension, value: extensionAsStorageType(value)}
+ return nil
+}
+
+// ClearAllExtensions clears all extensions from pb.
+func ClearAllExtensions(pb Message) {
+ epb, err := extendable(pb)
+ if err != nil {
+ return
+ }
+ m := epb.extensionsWrite()
+ for k := range m {
+ delete(m, k)
+ }
+}
+
+// A global registry of extensions.
+// The generated code will register the generated descriptors by calling RegisterExtension.
+
+var extensionMaps = make(map[reflect.Type]map[int32]*ExtensionDesc)
+
+// RegisterExtension is called from the generated code.
+func RegisterExtension(desc *ExtensionDesc) {
+ st := reflect.TypeOf(desc.ExtendedType).Elem()
+ m := extensionMaps[st]
+ if m == nil {
+ m = make(map[int32]*ExtensionDesc)
+ extensionMaps[st] = m
+ }
+ if _, ok := m[desc.Field]; ok {
+ panic("proto: duplicate extension registered: " + st.String() + " " + strconv.Itoa(int(desc.Field)))
+ }
+ m[desc.Field] = desc
+}
+
+// RegisteredExtensions returns a map of the registered extensions of a
+// protocol buffer struct, indexed by the extension number.
+// The argument pb should be a nil pointer to the struct type.
+func RegisteredExtensions(pb Message) map[int32]*ExtensionDesc {
+ return extensionMaps[reflect.TypeOf(pb).Elem()]
+}
+
+// extensionAsLegacyType converts an value in the storage type as the API type.
+// See Extension.value.
+func extensionAsLegacyType(v interface{}) interface{} {
+ switch rv := reflect.ValueOf(v); rv.Kind() {
+ case reflect.Bool, reflect.Int32, reflect.Int64, reflect.Uint32, reflect.Uint64, reflect.Float32, reflect.Float64, reflect.String:
+ // Represent primitive types as a pointer to the value.
+ rv2 := reflect.New(rv.Type())
+ rv2.Elem().Set(rv)
+ v = rv2.Interface()
+ case reflect.Ptr:
+ // Represent slice types as the value itself.
+ switch rv.Type().Elem().Kind() {
+ case reflect.Slice:
+ if rv.IsNil() {
+ v = reflect.Zero(rv.Type().Elem()).Interface()
+ } else {
+ v = rv.Elem().Interface()
+ }
+ }
+ }
+ return v
+}
+
+// extensionAsStorageType converts an value in the API type as the storage type.
+// See Extension.value.
+func extensionAsStorageType(v interface{}) interface{} {
+ switch rv := reflect.ValueOf(v); rv.Kind() {
+ case reflect.Ptr:
+ // Represent slice types as the value itself.
+ switch rv.Type().Elem().Kind() {
+ case reflect.Bool, reflect.Int32, reflect.Int64, reflect.Uint32, reflect.Uint64, reflect.Float32, reflect.Float64, reflect.String:
+ if rv.IsNil() {
+ v = reflect.Zero(rv.Type().Elem()).Interface()
+ } else {
+ v = rv.Elem().Interface()
+ }
+ }
+ case reflect.Slice:
+ // Represent slice types as a pointer to the value.
+ if rv.Type().Elem().Kind() != reflect.Uint8 {
+ rv2 := reflect.New(rv.Type())
+ rv2.Elem().Set(rv)
+ v = rv2.Interface()
+ }
+ }
+ return v
+}
diff --git a/vendor/github.com/golang/protobuf/proto/lib.go b/vendor/github.com/golang/protobuf/proto/lib.go
new file mode 100644
index 00000000..fdd328bb
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/proto/lib.go
@@ -0,0 +1,965 @@
+// Go support for Protocol Buffers - Google's data interchange format
+//
+// Copyright 2010 The Go Authors. All rights reserved.
+// https://github.com/golang/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+/*
+Package proto converts data structures to and from the wire format of
+protocol buffers. It works in concert with the Go source code generated
+for .proto files by the protocol compiler.
+
+A summary of the properties of the protocol buffer interface
+for a protocol buffer variable v:
+
+ - Names are turned from camel_case to CamelCase for export.
+ - There are no methods on v to set fields; just treat
+ them as structure fields.
+ - There are getters that return a field's value if set,
+ and return the field's default value if unset.
+ The getters work even if the receiver is a nil message.
+ - The zero value for a struct is its correct initialization state.
+ All desired fields must be set before marshaling.
+ - A Reset() method will restore a protobuf struct to its zero state.
+ - Non-repeated fields are pointers to the values; nil means unset.
+ That is, optional or required field int32 f becomes F *int32.
+ - Repeated fields are slices.
+ - Helper functions are available to aid the setting of fields.
+ msg.Foo = proto.String("hello") // set field
+ - Constants are defined to hold the default values of all fields that
+ have them. They have the form Default_StructName_FieldName.
+ Because the getter methods handle defaulted values,
+ direct use of these constants should be rare.
+ - Enums are given type names and maps from names to values.
+ Enum values are prefixed by the enclosing message's name, or by the
+ enum's type name if it is a top-level enum. Enum types have a String
+ method, and a Enum method to assist in message construction.
+ - Nested messages, groups and enums have type names prefixed with the name of
+ the surrounding message type.
+ - Extensions are given descriptor names that start with E_,
+ followed by an underscore-delimited list of the nested messages
+ that contain it (if any) followed by the CamelCased name of the
+ extension field itself. HasExtension, ClearExtension, GetExtension
+ and SetExtension are functions for manipulating extensions.
+ - Oneof field sets are given a single field in their message,
+ with distinguished wrapper types for each possible field value.
+ - Marshal and Unmarshal are functions to encode and decode the wire format.
+
+When the .proto file specifies `syntax="proto3"`, there are some differences:
+
+ - Non-repeated fields of non-message type are values instead of pointers.
+ - Enum types do not get an Enum method.
+
+The simplest way to describe this is to see an example.
+Given file test.proto, containing
+
+ package example;
+
+ enum FOO { X = 17; }
+
+ message Test {
+ required string label = 1;
+ optional int32 type = 2 [default=77];
+ repeated int64 reps = 3;
+ optional group OptionalGroup = 4 {
+ required string RequiredField = 5;
+ }
+ oneof union {
+ int32 number = 6;
+ string name = 7;
+ }
+ }
+
+The resulting file, test.pb.go, is:
+
+ package example
+
+ import proto "github.com/golang/protobuf/proto"
+ import math "math"
+
+ type FOO int32
+ const (
+ FOO_X FOO = 17
+ )
+ var FOO_name = map[int32]string{
+ 17: "X",
+ }
+ var FOO_value = map[string]int32{
+ "X": 17,
+ }
+
+ func (x FOO) Enum() *FOO {
+ p := new(FOO)
+ *p = x
+ return p
+ }
+ func (x FOO) String() string {
+ return proto.EnumName(FOO_name, int32(x))
+ }
+ func (x *FOO) UnmarshalJSON(data []byte) error {
+ value, err := proto.UnmarshalJSONEnum(FOO_value, data)
+ if err != nil {
+ return err
+ }
+ *x = FOO(value)
+ return nil
+ }
+
+ type Test struct {
+ Label *string `protobuf:"bytes,1,req,name=label" json:"label,omitempty"`
+ Type *int32 `protobuf:"varint,2,opt,name=type,def=77" json:"type,omitempty"`
+ Reps []int64 `protobuf:"varint,3,rep,name=reps" json:"reps,omitempty"`
+ Optionalgroup *Test_OptionalGroup `protobuf:"group,4,opt,name=OptionalGroup" json:"optionalgroup,omitempty"`
+ // Types that are valid to be assigned to Union:
+ // *Test_Number
+ // *Test_Name
+ Union isTest_Union `protobuf_oneof:"union"`
+ XXX_unrecognized []byte `json:"-"`
+ }
+ func (m *Test) Reset() { *m = Test{} }
+ func (m *Test) String() string { return proto.CompactTextString(m) }
+ func (*Test) ProtoMessage() {}
+
+ type isTest_Union interface {
+ isTest_Union()
+ }
+
+ type Test_Number struct {
+ Number int32 `protobuf:"varint,6,opt,name=number"`
+ }
+ type Test_Name struct {
+ Name string `protobuf:"bytes,7,opt,name=name"`
+ }
+
+ func (*Test_Number) isTest_Union() {}
+ func (*Test_Name) isTest_Union() {}
+
+ func (m *Test) GetUnion() isTest_Union {
+ if m != nil {
+ return m.Union
+ }
+ return nil
+ }
+ const Default_Test_Type int32 = 77
+
+ func (m *Test) GetLabel() string {
+ if m != nil && m.Label != nil {
+ return *m.Label
+ }
+ return ""
+ }
+
+ func (m *Test) GetType() int32 {
+ if m != nil && m.Type != nil {
+ return *m.Type
+ }
+ return Default_Test_Type
+ }
+
+ func (m *Test) GetOptionalgroup() *Test_OptionalGroup {
+ if m != nil {
+ return m.Optionalgroup
+ }
+ return nil
+ }
+
+ type Test_OptionalGroup struct {
+ RequiredField *string `protobuf:"bytes,5,req" json:"RequiredField,omitempty"`
+ }
+ func (m *Test_OptionalGroup) Reset() { *m = Test_OptionalGroup{} }
+ func (m *Test_OptionalGroup) String() string { return proto.CompactTextString(m) }
+
+ func (m *Test_OptionalGroup) GetRequiredField() string {
+ if m != nil && m.RequiredField != nil {
+ return *m.RequiredField
+ }
+ return ""
+ }
+
+ func (m *Test) GetNumber() int32 {
+ if x, ok := m.GetUnion().(*Test_Number); ok {
+ return x.Number
+ }
+ return 0
+ }
+
+ func (m *Test) GetName() string {
+ if x, ok := m.GetUnion().(*Test_Name); ok {
+ return x.Name
+ }
+ return ""
+ }
+
+ func init() {
+ proto.RegisterEnum("example.FOO", FOO_name, FOO_value)
+ }
+
+To create and play with a Test object:
+
+ package main
+
+ import (
+ "log"
+
+ "github.com/golang/protobuf/proto"
+ pb "./example.pb"
+ )
+
+ func main() {
+ test := &pb.Test{
+ Label: proto.String("hello"),
+ Type: proto.Int32(17),
+ Reps: []int64{1, 2, 3},
+ Optionalgroup: &pb.Test_OptionalGroup{
+ RequiredField: proto.String("good bye"),
+ },
+ Union: &pb.Test_Name{"fred"},
+ }
+ data, err := proto.Marshal(test)
+ if err != nil {
+ log.Fatal("marshaling error: ", err)
+ }
+ newTest := &pb.Test{}
+ err = proto.Unmarshal(data, newTest)
+ if err != nil {
+ log.Fatal("unmarshaling error: ", err)
+ }
+ // Now test and newTest contain the same data.
+ if test.GetLabel() != newTest.GetLabel() {
+ log.Fatalf("data mismatch %q != %q", test.GetLabel(), newTest.GetLabel())
+ }
+ // Use a type switch to determine which oneof was set.
+ switch u := test.Union.(type) {
+ case *pb.Test_Number: // u.Number contains the number.
+ case *pb.Test_Name: // u.Name contains the string.
+ }
+ // etc.
+ }
+*/
+package proto
+
+import (
+ "encoding/json"
+ "fmt"
+ "log"
+ "reflect"
+ "sort"
+ "strconv"
+ "sync"
+)
+
+// RequiredNotSetError is an error type returned by either Marshal or Unmarshal.
+// Marshal reports this when a required field is not initialized.
+// Unmarshal reports this when a required field is missing from the wire data.
+type RequiredNotSetError struct{ field string }
+
+func (e *RequiredNotSetError) Error() string {
+ if e.field == "" {
+ return fmt.Sprintf("proto: required field not set")
+ }
+ return fmt.Sprintf("proto: required field %q not set", e.field)
+}
+func (e *RequiredNotSetError) RequiredNotSet() bool {
+ return true
+}
+
+type invalidUTF8Error struct{ field string }
+
+func (e *invalidUTF8Error) Error() string {
+ if e.field == "" {
+ return "proto: invalid UTF-8 detected"
+ }
+ return fmt.Sprintf("proto: field %q contains invalid UTF-8", e.field)
+}
+func (e *invalidUTF8Error) InvalidUTF8() bool {
+ return true
+}
+
+// errInvalidUTF8 is a sentinel error to identify fields with invalid UTF-8.
+// This error should not be exposed to the external API as such errors should
+// be recreated with the field information.
+var errInvalidUTF8 = &invalidUTF8Error{}
+
+// isNonFatal reports whether the error is either a RequiredNotSet error
+// or a InvalidUTF8 error.
+func isNonFatal(err error) bool {
+ if re, ok := err.(interface{ RequiredNotSet() bool }); ok && re.RequiredNotSet() {
+ return true
+ }
+ if re, ok := err.(interface{ InvalidUTF8() bool }); ok && re.InvalidUTF8() {
+ return true
+ }
+ return false
+}
+
+type nonFatal struct{ E error }
+
+// Merge merges err into nf and reports whether it was successful.
+// Otherwise it returns false for any fatal non-nil errors.
+func (nf *nonFatal) Merge(err error) (ok bool) {
+ if err == nil {
+ return true // not an error
+ }
+ if !isNonFatal(err) {
+ return false // fatal error
+ }
+ if nf.E == nil {
+ nf.E = err // store first instance of non-fatal error
+ }
+ return true
+}
+
+// Message is implemented by generated protocol buffer messages.
+type Message interface {
+ Reset()
+ String() string
+ ProtoMessage()
+}
+
+// A Buffer is a buffer manager for marshaling and unmarshaling
+// protocol buffers. It may be reused between invocations to
+// reduce memory usage. It is not necessary to use a Buffer;
+// the global functions Marshal and Unmarshal create a
+// temporary Buffer and are fine for most applications.
+type Buffer struct {
+ buf []byte // encode/decode byte stream
+ index int // read point
+
+ deterministic bool
+}
+
+// NewBuffer allocates a new Buffer and initializes its internal data to
+// the contents of the argument slice.
+func NewBuffer(e []byte) *Buffer {
+ return &Buffer{buf: e}
+}
+
+// Reset resets the Buffer, ready for marshaling a new protocol buffer.
+func (p *Buffer) Reset() {
+ p.buf = p.buf[0:0] // for reading/writing
+ p.index = 0 // for reading
+}
+
+// SetBuf replaces the internal buffer with the slice,
+// ready for unmarshaling the contents of the slice.
+func (p *Buffer) SetBuf(s []byte) {
+ p.buf = s
+ p.index = 0
+}
+
+// Bytes returns the contents of the Buffer.
+func (p *Buffer) Bytes() []byte { return p.buf }
+
+// SetDeterministic sets whether to use deterministic serialization.
+//
+// Deterministic serialization guarantees that for a given binary, equal
+// messages will always be serialized to the same bytes. This implies:
+//
+// - Repeated serialization of a message will return the same bytes.
+// - Different processes of the same binary (which may be executing on
+// different machines) will serialize equal messages to the same bytes.
+//
+// Note that the deterministic serialization is NOT canonical across
+// languages. It is not guaranteed to remain stable over time. It is unstable
+// across different builds with schema changes due to unknown fields.
+// Users who need canonical serialization (e.g., persistent storage in a
+// canonical form, fingerprinting, etc.) should define their own
+// canonicalization specification and implement their own serializer rather
+// than relying on this API.
+//
+// If deterministic serialization is requested, map entries will be sorted
+// by keys in lexographical order. This is an implementation detail and
+// subject to change.
+func (p *Buffer) SetDeterministic(deterministic bool) {
+ p.deterministic = deterministic
+}
+
+/*
+ * Helper routines for simplifying the creation of optional fields of basic type.
+ */
+
+// Bool is a helper routine that allocates a new bool value
+// to store v and returns a pointer to it.
+func Bool(v bool) *bool {
+ return &v
+}
+
+// Int32 is a helper routine that allocates a new int32 value
+// to store v and returns a pointer to it.
+func Int32(v int32) *int32 {
+ return &v
+}
+
+// Int is a helper routine that allocates a new int32 value
+// to store v and returns a pointer to it, but unlike Int32
+// its argument value is an int.
+func Int(v int) *int32 {
+ p := new(int32)
+ *p = int32(v)
+ return p
+}
+
+// Int64 is a helper routine that allocates a new int64 value
+// to store v and returns a pointer to it.
+func Int64(v int64) *int64 {
+ return &v
+}
+
+// Float32 is a helper routine that allocates a new float32 value
+// to store v and returns a pointer to it.
+func Float32(v float32) *float32 {
+ return &v
+}
+
+// Float64 is a helper routine that allocates a new float64 value
+// to store v and returns a pointer to it.
+func Float64(v float64) *float64 {
+ return &v
+}
+
+// Uint32 is a helper routine that allocates a new uint32 value
+// to store v and returns a pointer to it.
+func Uint32(v uint32) *uint32 {
+ return &v
+}
+
+// Uint64 is a helper routine that allocates a new uint64 value
+// to store v and returns a pointer to it.
+func Uint64(v uint64) *uint64 {
+ return &v
+}
+
+// String is a helper routine that allocates a new string value
+// to store v and returns a pointer to it.
+func String(v string) *string {
+ return &v
+}
+
+// EnumName is a helper function to simplify printing protocol buffer enums
+// by name. Given an enum map and a value, it returns a useful string.
+func EnumName(m map[int32]string, v int32) string {
+ s, ok := m[v]
+ if ok {
+ return s
+ }
+ return strconv.Itoa(int(v))
+}
+
+// UnmarshalJSONEnum is a helper function to simplify recovering enum int values
+// from their JSON-encoded representation. Given a map from the enum's symbolic
+// names to its int values, and a byte buffer containing the JSON-encoded
+// value, it returns an int32 that can be cast to the enum type by the caller.
+//
+// The function can deal with both JSON representations, numeric and symbolic.
+func UnmarshalJSONEnum(m map[string]int32, data []byte, enumName string) (int32, error) {
+ if data[0] == '"' {
+ // New style: enums are strings.
+ var repr string
+ if err := json.Unmarshal(data, &repr); err != nil {
+ return -1, err
+ }
+ val, ok := m[repr]
+ if !ok {
+ return 0, fmt.Errorf("unrecognized enum %s value %q", enumName, repr)
+ }
+ return val, nil
+ }
+ // Old style: enums are ints.
+ var val int32
+ if err := json.Unmarshal(data, &val); err != nil {
+ return 0, fmt.Errorf("cannot unmarshal %#q into enum %s", data, enumName)
+ }
+ return val, nil
+}
+
+// DebugPrint dumps the encoded data in b in a debugging format with a header
+// including the string s. Used in testing but made available for general debugging.
+func (p *Buffer) DebugPrint(s string, b []byte) {
+ var u uint64
+
+ obuf := p.buf
+ index := p.index
+ p.buf = b
+ p.index = 0
+ depth := 0
+
+ fmt.Printf("\n--- %s ---\n", s)
+
+out:
+ for {
+ for i := 0; i < depth; i++ {
+ fmt.Print(" ")
+ }
+
+ index := p.index
+ if index == len(p.buf) {
+ break
+ }
+
+ op, err := p.DecodeVarint()
+ if err != nil {
+ fmt.Printf("%3d: fetching op err %v\n", index, err)
+ break out
+ }
+ tag := op >> 3
+ wire := op & 7
+
+ switch wire {
+ default:
+ fmt.Printf("%3d: t=%3d unknown wire=%d\n",
+ index, tag, wire)
+ break out
+
+ case WireBytes:
+ var r []byte
+
+ r, err = p.DecodeRawBytes(false)
+ if err != nil {
+ break out
+ }
+ fmt.Printf("%3d: t=%3d bytes [%d]", index, tag, len(r))
+ if len(r) <= 6 {
+ for i := 0; i < len(r); i++ {
+ fmt.Printf(" %.2x", r[i])
+ }
+ } else {
+ for i := 0; i < 3; i++ {
+ fmt.Printf(" %.2x", r[i])
+ }
+ fmt.Printf(" ..")
+ for i := len(r) - 3; i < len(r); i++ {
+ fmt.Printf(" %.2x", r[i])
+ }
+ }
+ fmt.Printf("\n")
+
+ case WireFixed32:
+ u, err = p.DecodeFixed32()
+ if err != nil {
+ fmt.Printf("%3d: t=%3d fix32 err %v\n", index, tag, err)
+ break out
+ }
+ fmt.Printf("%3d: t=%3d fix32 %d\n", index, tag, u)
+
+ case WireFixed64:
+ u, err = p.DecodeFixed64()
+ if err != nil {
+ fmt.Printf("%3d: t=%3d fix64 err %v\n", index, tag, err)
+ break out
+ }
+ fmt.Printf("%3d: t=%3d fix64 %d\n", index, tag, u)
+
+ case WireVarint:
+ u, err = p.DecodeVarint()
+ if err != nil {
+ fmt.Printf("%3d: t=%3d varint err %v\n", index, tag, err)
+ break out
+ }
+ fmt.Printf("%3d: t=%3d varint %d\n", index, tag, u)
+
+ case WireStartGroup:
+ fmt.Printf("%3d: t=%3d start\n", index, tag)
+ depth++
+
+ case WireEndGroup:
+ depth--
+ fmt.Printf("%3d: t=%3d end\n", index, tag)
+ }
+ }
+
+ if depth != 0 {
+ fmt.Printf("%3d: start-end not balanced %d\n", p.index, depth)
+ }
+ fmt.Printf("\n")
+
+ p.buf = obuf
+ p.index = index
+}
+
+// SetDefaults sets unset protocol buffer fields to their default values.
+// It only modifies fields that are both unset and have defined defaults.
+// It recursively sets default values in any non-nil sub-messages.
+func SetDefaults(pb Message) {
+ setDefaults(reflect.ValueOf(pb), true, false)
+}
+
+// v is a pointer to a struct.
+func setDefaults(v reflect.Value, recur, zeros bool) {
+ v = v.Elem()
+
+ defaultMu.RLock()
+ dm, ok := defaults[v.Type()]
+ defaultMu.RUnlock()
+ if !ok {
+ dm = buildDefaultMessage(v.Type())
+ defaultMu.Lock()
+ defaults[v.Type()] = dm
+ defaultMu.Unlock()
+ }
+
+ for _, sf := range dm.scalars {
+ f := v.Field(sf.index)
+ if !f.IsNil() {
+ // field already set
+ continue
+ }
+ dv := sf.value
+ if dv == nil && !zeros {
+ // no explicit default, and don't want to set zeros
+ continue
+ }
+ fptr := f.Addr().Interface() // **T
+ // TODO: Consider batching the allocations we do here.
+ switch sf.kind {
+ case reflect.Bool:
+ b := new(bool)
+ if dv != nil {
+ *b = dv.(bool)
+ }
+ *(fptr.(**bool)) = b
+ case reflect.Float32:
+ f := new(float32)
+ if dv != nil {
+ *f = dv.(float32)
+ }
+ *(fptr.(**float32)) = f
+ case reflect.Float64:
+ f := new(float64)
+ if dv != nil {
+ *f = dv.(float64)
+ }
+ *(fptr.(**float64)) = f
+ case reflect.Int32:
+ // might be an enum
+ if ft := f.Type(); ft != int32PtrType {
+ // enum
+ f.Set(reflect.New(ft.Elem()))
+ if dv != nil {
+ f.Elem().SetInt(int64(dv.(int32)))
+ }
+ } else {
+ // int32 field
+ i := new(int32)
+ if dv != nil {
+ *i = dv.(int32)
+ }
+ *(fptr.(**int32)) = i
+ }
+ case reflect.Int64:
+ i := new(int64)
+ if dv != nil {
+ *i = dv.(int64)
+ }
+ *(fptr.(**int64)) = i
+ case reflect.String:
+ s := new(string)
+ if dv != nil {
+ *s = dv.(string)
+ }
+ *(fptr.(**string)) = s
+ case reflect.Uint8:
+ // exceptional case: []byte
+ var b []byte
+ if dv != nil {
+ db := dv.([]byte)
+ b = make([]byte, len(db))
+ copy(b, db)
+ } else {
+ b = []byte{}
+ }
+ *(fptr.(*[]byte)) = b
+ case reflect.Uint32:
+ u := new(uint32)
+ if dv != nil {
+ *u = dv.(uint32)
+ }
+ *(fptr.(**uint32)) = u
+ case reflect.Uint64:
+ u := new(uint64)
+ if dv != nil {
+ *u = dv.(uint64)
+ }
+ *(fptr.(**uint64)) = u
+ default:
+ log.Printf("proto: can't set default for field %v (sf.kind=%v)", f, sf.kind)
+ }
+ }
+
+ for _, ni := range dm.nested {
+ f := v.Field(ni)
+ // f is *T or []*T or map[T]*T
+ switch f.Kind() {
+ case reflect.Ptr:
+ if f.IsNil() {
+ continue
+ }
+ setDefaults(f, recur, zeros)
+
+ case reflect.Slice:
+ for i := 0; i < f.Len(); i++ {
+ e := f.Index(i)
+ if e.IsNil() {
+ continue
+ }
+ setDefaults(e, recur, zeros)
+ }
+
+ case reflect.Map:
+ for _, k := range f.MapKeys() {
+ e := f.MapIndex(k)
+ if e.IsNil() {
+ continue
+ }
+ setDefaults(e, recur, zeros)
+ }
+ }
+ }
+}
+
+var (
+ // defaults maps a protocol buffer struct type to a slice of the fields,
+ // with its scalar fields set to their proto-declared non-zero default values.
+ defaultMu sync.RWMutex
+ defaults = make(map[reflect.Type]defaultMessage)
+
+ int32PtrType = reflect.TypeOf((*int32)(nil))
+)
+
+// defaultMessage represents information about the default values of a message.
+type defaultMessage struct {
+ scalars []scalarField
+ nested []int // struct field index of nested messages
+}
+
+type scalarField struct {
+ index int // struct field index
+ kind reflect.Kind // element type (the T in *T or []T)
+ value interface{} // the proto-declared default value, or nil
+}
+
+// t is a struct type.
+func buildDefaultMessage(t reflect.Type) (dm defaultMessage) {
+ sprop := GetProperties(t)
+ for _, prop := range sprop.Prop {
+ fi, ok := sprop.decoderTags.get(prop.Tag)
+ if !ok {
+ // XXX_unrecognized
+ continue
+ }
+ ft := t.Field(fi).Type
+
+ sf, nested, err := fieldDefault(ft, prop)
+ switch {
+ case err != nil:
+ log.Print(err)
+ case nested:
+ dm.nested = append(dm.nested, fi)
+ case sf != nil:
+ sf.index = fi
+ dm.scalars = append(dm.scalars, *sf)
+ }
+ }
+
+ return dm
+}
+
+// fieldDefault returns the scalarField for field type ft.
+// sf will be nil if the field can not have a default.
+// nestedMessage will be true if this is a nested message.
+// Note that sf.index is not set on return.
+func fieldDefault(ft reflect.Type, prop *Properties) (sf *scalarField, nestedMessage bool, err error) {
+ var canHaveDefault bool
+ switch ft.Kind() {
+ case reflect.Ptr:
+ if ft.Elem().Kind() == reflect.Struct {
+ nestedMessage = true
+ } else {
+ canHaveDefault = true // proto2 scalar field
+ }
+
+ case reflect.Slice:
+ switch ft.Elem().Kind() {
+ case reflect.Ptr:
+ nestedMessage = true // repeated message
+ case reflect.Uint8:
+ canHaveDefault = true // bytes field
+ }
+
+ case reflect.Map:
+ if ft.Elem().Kind() == reflect.Ptr {
+ nestedMessage = true // map with message values
+ }
+ }
+
+ if !canHaveDefault {
+ if nestedMessage {
+ return nil, true, nil
+ }
+ return nil, false, nil
+ }
+
+ // We now know that ft is a pointer or slice.
+ sf = &scalarField{kind: ft.Elem().Kind()}
+
+ // scalar fields without defaults
+ if !prop.HasDefault {
+ return sf, false, nil
+ }
+
+ // a scalar field: either *T or []byte
+ switch ft.Elem().Kind() {
+ case reflect.Bool:
+ x, err := strconv.ParseBool(prop.Default)
+ if err != nil {
+ return nil, false, fmt.Errorf("proto: bad default bool %q: %v", prop.Default, err)
+ }
+ sf.value = x
+ case reflect.Float32:
+ x, err := strconv.ParseFloat(prop.Default, 32)
+ if err != nil {
+ return nil, false, fmt.Errorf("proto: bad default float32 %q: %v", prop.Default, err)
+ }
+ sf.value = float32(x)
+ case reflect.Float64:
+ x, err := strconv.ParseFloat(prop.Default, 64)
+ if err != nil {
+ return nil, false, fmt.Errorf("proto: bad default float64 %q: %v", prop.Default, err)
+ }
+ sf.value = x
+ case reflect.Int32:
+ x, err := strconv.ParseInt(prop.Default, 10, 32)
+ if err != nil {
+ return nil, false, fmt.Errorf("proto: bad default int32 %q: %v", prop.Default, err)
+ }
+ sf.value = int32(x)
+ case reflect.Int64:
+ x, err := strconv.ParseInt(prop.Default, 10, 64)
+ if err != nil {
+ return nil, false, fmt.Errorf("proto: bad default int64 %q: %v", prop.Default, err)
+ }
+ sf.value = x
+ case reflect.String:
+ sf.value = prop.Default
+ case reflect.Uint8:
+ // []byte (not *uint8)
+ sf.value = []byte(prop.Default)
+ case reflect.Uint32:
+ x, err := strconv.ParseUint(prop.Default, 10, 32)
+ if err != nil {
+ return nil, false, fmt.Errorf("proto: bad default uint32 %q: %v", prop.Default, err)
+ }
+ sf.value = uint32(x)
+ case reflect.Uint64:
+ x, err := strconv.ParseUint(prop.Default, 10, 64)
+ if err != nil {
+ return nil, false, fmt.Errorf("proto: bad default uint64 %q: %v", prop.Default, err)
+ }
+ sf.value = x
+ default:
+ return nil, false, fmt.Errorf("proto: unhandled def kind %v", ft.Elem().Kind())
+ }
+
+ return sf, false, nil
+}
+
+// mapKeys returns a sort.Interface to be used for sorting the map keys.
+// Map fields may have key types of non-float scalars, strings and enums.
+func mapKeys(vs []reflect.Value) sort.Interface {
+ s := mapKeySorter{vs: vs}
+
+ // Type specialization per https://developers.google.com/protocol-buffers/docs/proto#maps.
+ if len(vs) == 0 {
+ return s
+ }
+ switch vs[0].Kind() {
+ case reflect.Int32, reflect.Int64:
+ s.less = func(a, b reflect.Value) bool { return a.Int() < b.Int() }
+ case reflect.Uint32, reflect.Uint64:
+ s.less = func(a, b reflect.Value) bool { return a.Uint() < b.Uint() }
+ case reflect.Bool:
+ s.less = func(a, b reflect.Value) bool { return !a.Bool() && b.Bool() } // false < true
+ case reflect.String:
+ s.less = func(a, b reflect.Value) bool { return a.String() < b.String() }
+ default:
+ panic(fmt.Sprintf("unsupported map key type: %v", vs[0].Kind()))
+ }
+
+ return s
+}
+
+type mapKeySorter struct {
+ vs []reflect.Value
+ less func(a, b reflect.Value) bool
+}
+
+func (s mapKeySorter) Len() int { return len(s.vs) }
+func (s mapKeySorter) Swap(i, j int) { s.vs[i], s.vs[j] = s.vs[j], s.vs[i] }
+func (s mapKeySorter) Less(i, j int) bool {
+ return s.less(s.vs[i], s.vs[j])
+}
+
+// isProto3Zero reports whether v is a zero proto3 value.
+func isProto3Zero(v reflect.Value) bool {
+ switch v.Kind() {
+ case reflect.Bool:
+ return !v.Bool()
+ case reflect.Int32, reflect.Int64:
+ return v.Int() == 0
+ case reflect.Uint32, reflect.Uint64:
+ return v.Uint() == 0
+ case reflect.Float32, reflect.Float64:
+ return v.Float() == 0
+ case reflect.String:
+ return v.String() == ""
+ }
+ return false
+}
+
+const (
+ // ProtoPackageIsVersion3 is referenced from generated protocol buffer files
+ // to assert that that code is compatible with this version of the proto package.
+ ProtoPackageIsVersion3 = true
+
+ // ProtoPackageIsVersion2 is referenced from generated protocol buffer files
+ // to assert that that code is compatible with this version of the proto package.
+ ProtoPackageIsVersion2 = true
+
+ // ProtoPackageIsVersion1 is referenced from generated protocol buffer files
+ // to assert that that code is compatible with this version of the proto package.
+ ProtoPackageIsVersion1 = true
+)
+
+// InternalMessageInfo is a type used internally by generated .pb.go files.
+// This type is not intended to be used by non-generated code.
+// This type is not subject to any compatibility guarantee.
+type InternalMessageInfo struct {
+ marshal *marshalInfo
+ unmarshal *unmarshalInfo
+ merge *mergeInfo
+ discard *discardInfo
+}
diff --git a/vendor/github.com/golang/protobuf/proto/message_set.go b/vendor/github.com/golang/protobuf/proto/message_set.go
new file mode 100644
index 00000000..f48a7567
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/proto/message_set.go
@@ -0,0 +1,181 @@
+// Go support for Protocol Buffers - Google's data interchange format
+//
+// Copyright 2010 The Go Authors. All rights reserved.
+// https://github.com/golang/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+package proto
+
+/*
+ * Support for message sets.
+ */
+
+import (
+ "errors"
+)
+
+// errNoMessageTypeID occurs when a protocol buffer does not have a message type ID.
+// A message type ID is required for storing a protocol buffer in a message set.
+var errNoMessageTypeID = errors.New("proto does not have a message type ID")
+
+// The first two types (_MessageSet_Item and messageSet)
+// model what the protocol compiler produces for the following protocol message:
+// message MessageSet {
+// repeated group Item = 1 {
+// required int32 type_id = 2;
+// required string message = 3;
+// };
+// }
+// That is the MessageSet wire format. We can't use a proto to generate these
+// because that would introduce a circular dependency between it and this package.
+
+type _MessageSet_Item struct {
+ TypeId *int32 `protobuf:"varint,2,req,name=type_id"`
+ Message []byte `protobuf:"bytes,3,req,name=message"`
+}
+
+type messageSet struct {
+ Item []*_MessageSet_Item `protobuf:"group,1,rep"`
+ XXX_unrecognized []byte
+ // TODO: caching?
+}
+
+// Make sure messageSet is a Message.
+var _ Message = (*messageSet)(nil)
+
+// messageTypeIder is an interface satisfied by a protocol buffer type
+// that may be stored in a MessageSet.
+type messageTypeIder interface {
+ MessageTypeId() int32
+}
+
+func (ms *messageSet) find(pb Message) *_MessageSet_Item {
+ mti, ok := pb.(messageTypeIder)
+ if !ok {
+ return nil
+ }
+ id := mti.MessageTypeId()
+ for _, item := range ms.Item {
+ if *item.TypeId == id {
+ return item
+ }
+ }
+ return nil
+}
+
+func (ms *messageSet) Has(pb Message) bool {
+ return ms.find(pb) != nil
+}
+
+func (ms *messageSet) Unmarshal(pb Message) error {
+ if item := ms.find(pb); item != nil {
+ return Unmarshal(item.Message, pb)
+ }
+ if _, ok := pb.(messageTypeIder); !ok {
+ return errNoMessageTypeID
+ }
+ return nil // TODO: return error instead?
+}
+
+func (ms *messageSet) Marshal(pb Message) error {
+ msg, err := Marshal(pb)
+ if err != nil {
+ return err
+ }
+ if item := ms.find(pb); item != nil {
+ // reuse existing item
+ item.Message = msg
+ return nil
+ }
+
+ mti, ok := pb.(messageTypeIder)
+ if !ok {
+ return errNoMessageTypeID
+ }
+
+ mtid := mti.MessageTypeId()
+ ms.Item = append(ms.Item, &_MessageSet_Item{
+ TypeId: &mtid,
+ Message: msg,
+ })
+ return nil
+}
+
+func (ms *messageSet) Reset() { *ms = messageSet{} }
+func (ms *messageSet) String() string { return CompactTextString(ms) }
+func (*messageSet) ProtoMessage() {}
+
+// Support for the message_set_wire_format message option.
+
+func skipVarint(buf []byte) []byte {
+ i := 0
+ for ; buf[i]&0x80 != 0; i++ {
+ }
+ return buf[i+1:]
+}
+
+// unmarshalMessageSet decodes the extension map encoded in buf in the message set wire format.
+// It is called by Unmarshal methods on protocol buffer messages with the message_set_wire_format option.
+func unmarshalMessageSet(buf []byte, exts interface{}) error {
+ var m map[int32]Extension
+ switch exts := exts.(type) {
+ case *XXX_InternalExtensions:
+ m = exts.extensionsWrite()
+ case map[int32]Extension:
+ m = exts
+ default:
+ return errors.New("proto: not an extension map")
+ }
+
+ ms := new(messageSet)
+ if err := Unmarshal(buf, ms); err != nil {
+ return err
+ }
+ for _, item := range ms.Item {
+ id := *item.TypeId
+ msg := item.Message
+
+ // Restore wire type and field number varint, plus length varint.
+ // Be careful to preserve duplicate items.
+ b := EncodeVarint(uint64(id)<<3 | WireBytes)
+ if ext, ok := m[id]; ok {
+ // Existing data; rip off the tag and length varint
+ // so we join the new data correctly.
+ // We can assume that ext.enc is set because we are unmarshaling.
+ o := ext.enc[len(b):] // skip wire type and field number
+ _, n := DecodeVarint(o) // calculate length of length varint
+ o = o[n:] // skip length varint
+ msg = append(o, msg...) // join old data and new data
+ }
+ b = append(b, EncodeVarint(uint64(len(msg)))...)
+ b = append(b, msg...)
+
+ m[id] = Extension{enc: b}
+ }
+ return nil
+}
diff --git a/vendor/github.com/golang/protobuf/proto/pointer_reflect.go b/vendor/github.com/golang/protobuf/proto/pointer_reflect.go
new file mode 100644
index 00000000..94fa9194
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/proto/pointer_reflect.go
@@ -0,0 +1,360 @@
+// Go support for Protocol Buffers - Google's data interchange format
+//
+// Copyright 2012 The Go Authors. All rights reserved.
+// https://github.com/golang/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// +build purego appengine js
+
+// This file contains an implementation of proto field accesses using package reflect.
+// It is slower than the code in pointer_unsafe.go but it avoids package unsafe and can
+// be used on App Engine.
+
+package proto
+
+import (
+ "reflect"
+ "sync"
+)
+
+const unsafeAllowed = false
+
+// A field identifies a field in a struct, accessible from a pointer.
+// In this implementation, a field is identified by the sequence of field indices
+// passed to reflect's FieldByIndex.
+type field []int
+
+// toField returns a field equivalent to the given reflect field.
+func toField(f *reflect.StructField) field {
+ return f.Index
+}
+
+// invalidField is an invalid field identifier.
+var invalidField = field(nil)
+
+// zeroField is a noop when calling pointer.offset.
+var zeroField = field([]int{})
+
+// IsValid reports whether the field identifier is valid.
+func (f field) IsValid() bool { return f != nil }
+
+// The pointer type is for the table-driven decoder.
+// The implementation here uses a reflect.Value of pointer type to
+// create a generic pointer. In pointer_unsafe.go we use unsafe
+// instead of reflect to implement the same (but faster) interface.
+type pointer struct {
+ v reflect.Value
+}
+
+// toPointer converts an interface of pointer type to a pointer
+// that points to the same target.
+func toPointer(i *Message) pointer {
+ return pointer{v: reflect.ValueOf(*i)}
+}
+
+// toAddrPointer converts an interface to a pointer that points to
+// the interface data.
+func toAddrPointer(i *interface{}, isptr, deref bool) pointer {
+ v := reflect.ValueOf(*i)
+ u := reflect.New(v.Type())
+ u.Elem().Set(v)
+ if deref {
+ u = u.Elem()
+ }
+ return pointer{v: u}
+}
+
+// valToPointer converts v to a pointer. v must be of pointer type.
+func valToPointer(v reflect.Value) pointer {
+ return pointer{v: v}
+}
+
+// offset converts from a pointer to a structure to a pointer to
+// one of its fields.
+func (p pointer) offset(f field) pointer {
+ return pointer{v: p.v.Elem().FieldByIndex(f).Addr()}
+}
+
+func (p pointer) isNil() bool {
+ return p.v.IsNil()
+}
+
+// grow updates the slice s in place to make it one element longer.
+// s must be addressable.
+// Returns the (addressable) new element.
+func grow(s reflect.Value) reflect.Value {
+ n, m := s.Len(), s.Cap()
+ if n < m {
+ s.SetLen(n + 1)
+ } else {
+ s.Set(reflect.Append(s, reflect.Zero(s.Type().Elem())))
+ }
+ return s.Index(n)
+}
+
+func (p pointer) toInt64() *int64 {
+ return p.v.Interface().(*int64)
+}
+func (p pointer) toInt64Ptr() **int64 {
+ return p.v.Interface().(**int64)
+}
+func (p pointer) toInt64Slice() *[]int64 {
+ return p.v.Interface().(*[]int64)
+}
+
+var int32ptr = reflect.TypeOf((*int32)(nil))
+
+func (p pointer) toInt32() *int32 {
+ return p.v.Convert(int32ptr).Interface().(*int32)
+}
+
+// The toInt32Ptr/Slice methods don't work because of enums.
+// Instead, we must use set/get methods for the int32ptr/slice case.
+/*
+ func (p pointer) toInt32Ptr() **int32 {
+ return p.v.Interface().(**int32)
+}
+ func (p pointer) toInt32Slice() *[]int32 {
+ return p.v.Interface().(*[]int32)
+}
+*/
+func (p pointer) getInt32Ptr() *int32 {
+ if p.v.Type().Elem().Elem() == reflect.TypeOf(int32(0)) {
+ // raw int32 type
+ return p.v.Elem().Interface().(*int32)
+ }
+ // an enum
+ return p.v.Elem().Convert(int32PtrType).Interface().(*int32)
+}
+func (p pointer) setInt32Ptr(v int32) {
+ // Allocate value in a *int32. Possibly convert that to a *enum.
+ // Then assign it to a **int32 or **enum.
+ // Note: we can convert *int32 to *enum, but we can't convert
+ // **int32 to **enum!
+ p.v.Elem().Set(reflect.ValueOf(&v).Convert(p.v.Type().Elem()))
+}
+
+// getInt32Slice copies []int32 from p as a new slice.
+// This behavior differs from the implementation in pointer_unsafe.go.
+func (p pointer) getInt32Slice() []int32 {
+ if p.v.Type().Elem().Elem() == reflect.TypeOf(int32(0)) {
+ // raw int32 type
+ return p.v.Elem().Interface().([]int32)
+ }
+ // an enum
+ // Allocate a []int32, then assign []enum's values into it.
+ // Note: we can't convert []enum to []int32.
+ slice := p.v.Elem()
+ s := make([]int32, slice.Len())
+ for i := 0; i < slice.Len(); i++ {
+ s[i] = int32(slice.Index(i).Int())
+ }
+ return s
+}
+
+// setInt32Slice copies []int32 into p as a new slice.
+// This behavior differs from the implementation in pointer_unsafe.go.
+func (p pointer) setInt32Slice(v []int32) {
+ if p.v.Type().Elem().Elem() == reflect.TypeOf(int32(0)) {
+ // raw int32 type
+ p.v.Elem().Set(reflect.ValueOf(v))
+ return
+ }
+ // an enum
+ // Allocate a []enum, then assign []int32's values into it.
+ // Note: we can't convert []enum to []int32.
+ slice := reflect.MakeSlice(p.v.Type().Elem(), len(v), cap(v))
+ for i, x := range v {
+ slice.Index(i).SetInt(int64(x))
+ }
+ p.v.Elem().Set(slice)
+}
+func (p pointer) appendInt32Slice(v int32) {
+ grow(p.v.Elem()).SetInt(int64(v))
+}
+
+func (p pointer) toUint64() *uint64 {
+ return p.v.Interface().(*uint64)
+}
+func (p pointer) toUint64Ptr() **uint64 {
+ return p.v.Interface().(**uint64)
+}
+func (p pointer) toUint64Slice() *[]uint64 {
+ return p.v.Interface().(*[]uint64)
+}
+func (p pointer) toUint32() *uint32 {
+ return p.v.Interface().(*uint32)
+}
+func (p pointer) toUint32Ptr() **uint32 {
+ return p.v.Interface().(**uint32)
+}
+func (p pointer) toUint32Slice() *[]uint32 {
+ return p.v.Interface().(*[]uint32)
+}
+func (p pointer) toBool() *bool {
+ return p.v.Interface().(*bool)
+}
+func (p pointer) toBoolPtr() **bool {
+ return p.v.Interface().(**bool)
+}
+func (p pointer) toBoolSlice() *[]bool {
+ return p.v.Interface().(*[]bool)
+}
+func (p pointer) toFloat64() *float64 {
+ return p.v.Interface().(*float64)
+}
+func (p pointer) toFloat64Ptr() **float64 {
+ return p.v.Interface().(**float64)
+}
+func (p pointer) toFloat64Slice() *[]float64 {
+ return p.v.Interface().(*[]float64)
+}
+func (p pointer) toFloat32() *float32 {
+ return p.v.Interface().(*float32)
+}
+func (p pointer) toFloat32Ptr() **float32 {
+ return p.v.Interface().(**float32)
+}
+func (p pointer) toFloat32Slice() *[]float32 {
+ return p.v.Interface().(*[]float32)
+}
+func (p pointer) toString() *string {
+ return p.v.Interface().(*string)
+}
+func (p pointer) toStringPtr() **string {
+ return p.v.Interface().(**string)
+}
+func (p pointer) toStringSlice() *[]string {
+ return p.v.Interface().(*[]string)
+}
+func (p pointer) toBytes() *[]byte {
+ return p.v.Interface().(*[]byte)
+}
+func (p pointer) toBytesSlice() *[][]byte {
+ return p.v.Interface().(*[][]byte)
+}
+func (p pointer) toExtensions() *XXX_InternalExtensions {
+ return p.v.Interface().(*XXX_InternalExtensions)
+}
+func (p pointer) toOldExtensions() *map[int32]Extension {
+ return p.v.Interface().(*map[int32]Extension)
+}
+func (p pointer) getPointer() pointer {
+ return pointer{v: p.v.Elem()}
+}
+func (p pointer) setPointer(q pointer) {
+ p.v.Elem().Set(q.v)
+}
+func (p pointer) appendPointer(q pointer) {
+ grow(p.v.Elem()).Set(q.v)
+}
+
+// getPointerSlice copies []*T from p as a new []pointer.
+// This behavior differs from the implementation in pointer_unsafe.go.
+func (p pointer) getPointerSlice() []pointer {
+ if p.v.IsNil() {
+ return nil
+ }
+ n := p.v.Elem().Len()
+ s := make([]pointer, n)
+ for i := 0; i < n; i++ {
+ s[i] = pointer{v: p.v.Elem().Index(i)}
+ }
+ return s
+}
+
+// setPointerSlice copies []pointer into p as a new []*T.
+// This behavior differs from the implementation in pointer_unsafe.go.
+func (p pointer) setPointerSlice(v []pointer) {
+ if v == nil {
+ p.v.Elem().Set(reflect.New(p.v.Elem().Type()).Elem())
+ return
+ }
+ s := reflect.MakeSlice(p.v.Elem().Type(), 0, len(v))
+ for _, p := range v {
+ s = reflect.Append(s, p.v)
+ }
+ p.v.Elem().Set(s)
+}
+
+// getInterfacePointer returns a pointer that points to the
+// interface data of the interface pointed by p.
+func (p pointer) getInterfacePointer() pointer {
+ if p.v.Elem().IsNil() {
+ return pointer{v: p.v.Elem()}
+ }
+ return pointer{v: p.v.Elem().Elem().Elem().Field(0).Addr()} // *interface -> interface -> *struct -> struct
+}
+
+func (p pointer) asPointerTo(t reflect.Type) reflect.Value {
+ // TODO: check that p.v.Type().Elem() == t?
+ return p.v
+}
+
+func atomicLoadUnmarshalInfo(p **unmarshalInfo) *unmarshalInfo {
+ atomicLock.Lock()
+ defer atomicLock.Unlock()
+ return *p
+}
+func atomicStoreUnmarshalInfo(p **unmarshalInfo, v *unmarshalInfo) {
+ atomicLock.Lock()
+ defer atomicLock.Unlock()
+ *p = v
+}
+func atomicLoadMarshalInfo(p **marshalInfo) *marshalInfo {
+ atomicLock.Lock()
+ defer atomicLock.Unlock()
+ return *p
+}
+func atomicStoreMarshalInfo(p **marshalInfo, v *marshalInfo) {
+ atomicLock.Lock()
+ defer atomicLock.Unlock()
+ *p = v
+}
+func atomicLoadMergeInfo(p **mergeInfo) *mergeInfo {
+ atomicLock.Lock()
+ defer atomicLock.Unlock()
+ return *p
+}
+func atomicStoreMergeInfo(p **mergeInfo, v *mergeInfo) {
+ atomicLock.Lock()
+ defer atomicLock.Unlock()
+ *p = v
+}
+func atomicLoadDiscardInfo(p **discardInfo) *discardInfo {
+ atomicLock.Lock()
+ defer atomicLock.Unlock()
+ return *p
+}
+func atomicStoreDiscardInfo(p **discardInfo, v *discardInfo) {
+ atomicLock.Lock()
+ defer atomicLock.Unlock()
+ *p = v
+}
+
+var atomicLock sync.Mutex
diff --git a/vendor/github.com/golang/protobuf/proto/pointer_unsafe.go b/vendor/github.com/golang/protobuf/proto/pointer_unsafe.go
new file mode 100644
index 00000000..dbfffe07
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/proto/pointer_unsafe.go
@@ -0,0 +1,313 @@
+// Go support for Protocol Buffers - Google's data interchange format
+//
+// Copyright 2012 The Go Authors. All rights reserved.
+// https://github.com/golang/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// +build !purego,!appengine,!js
+
+// This file contains the implementation of the proto field accesses using package unsafe.
+
+package proto
+
+import (
+ "reflect"
+ "sync/atomic"
+ "unsafe"
+)
+
+const unsafeAllowed = true
+
+// A field identifies a field in a struct, accessible from a pointer.
+// In this implementation, a field is identified by its byte offset from the start of the struct.
+type field uintptr
+
+// toField returns a field equivalent to the given reflect field.
+func toField(f *reflect.StructField) field {
+ return field(f.Offset)
+}
+
+// invalidField is an invalid field identifier.
+const invalidField = ^field(0)
+
+// zeroField is a noop when calling pointer.offset.
+const zeroField = field(0)
+
+// IsValid reports whether the field identifier is valid.
+func (f field) IsValid() bool {
+ return f != invalidField
+}
+
+// The pointer type below is for the new table-driven encoder/decoder.
+// The implementation here uses unsafe.Pointer to create a generic pointer.
+// In pointer_reflect.go we use reflect instead of unsafe to implement
+// the same (but slower) interface.
+type pointer struct {
+ p unsafe.Pointer
+}
+
+// size of pointer
+var ptrSize = unsafe.Sizeof(uintptr(0))
+
+// toPointer converts an interface of pointer type to a pointer
+// that points to the same target.
+func toPointer(i *Message) pointer {
+ // Super-tricky - read pointer out of data word of interface value.
+ // Saves ~25ns over the equivalent:
+ // return valToPointer(reflect.ValueOf(*i))
+ return pointer{p: (*[2]unsafe.Pointer)(unsafe.Pointer(i))[1]}
+}
+
+// toAddrPointer converts an interface to a pointer that points to
+// the interface data.
+func toAddrPointer(i *interface{}, isptr, deref bool) (p pointer) {
+ // Super-tricky - read or get the address of data word of interface value.
+ if isptr {
+ // The interface is of pointer type, thus it is a direct interface.
+ // The data word is the pointer data itself. We take its address.
+ p = pointer{p: unsafe.Pointer(uintptr(unsafe.Pointer(i)) + ptrSize)}
+ } else {
+ // The interface is not of pointer type. The data word is the pointer
+ // to the data.
+ p = pointer{p: (*[2]unsafe.Pointer)(unsafe.Pointer(i))[1]}
+ }
+ if deref {
+ p.p = *(*unsafe.Pointer)(p.p)
+ }
+ return p
+}
+
+// valToPointer converts v to a pointer. v must be of pointer type.
+func valToPointer(v reflect.Value) pointer {
+ return pointer{p: unsafe.Pointer(v.Pointer())}
+}
+
+// offset converts from a pointer to a structure to a pointer to
+// one of its fields.
+func (p pointer) offset(f field) pointer {
+ // For safety, we should panic if !f.IsValid, however calling panic causes
+ // this to no longer be inlineable, which is a serious performance cost.
+ /*
+ if !f.IsValid() {
+ panic("invalid field")
+ }
+ */
+ return pointer{p: unsafe.Pointer(uintptr(p.p) + uintptr(f))}
+}
+
+func (p pointer) isNil() bool {
+ return p.p == nil
+}
+
+func (p pointer) toInt64() *int64 {
+ return (*int64)(p.p)
+}
+func (p pointer) toInt64Ptr() **int64 {
+ return (**int64)(p.p)
+}
+func (p pointer) toInt64Slice() *[]int64 {
+ return (*[]int64)(p.p)
+}
+func (p pointer) toInt32() *int32 {
+ return (*int32)(p.p)
+}
+
+// See pointer_reflect.go for why toInt32Ptr/Slice doesn't exist.
+/*
+ func (p pointer) toInt32Ptr() **int32 {
+ return (**int32)(p.p)
+ }
+ func (p pointer) toInt32Slice() *[]int32 {
+ return (*[]int32)(p.p)
+ }
+*/
+func (p pointer) getInt32Ptr() *int32 {
+ return *(**int32)(p.p)
+}
+func (p pointer) setInt32Ptr(v int32) {
+ *(**int32)(p.p) = &v
+}
+
+// getInt32Slice loads a []int32 from p.
+// The value returned is aliased with the original slice.
+// This behavior differs from the implementation in pointer_reflect.go.
+func (p pointer) getInt32Slice() []int32 {
+ return *(*[]int32)(p.p)
+}
+
+// setInt32Slice stores a []int32 to p.
+// The value set is aliased with the input slice.
+// This behavior differs from the implementation in pointer_reflect.go.
+func (p pointer) setInt32Slice(v []int32) {
+ *(*[]int32)(p.p) = v
+}
+
+// TODO: Can we get rid of appendInt32Slice and use setInt32Slice instead?
+func (p pointer) appendInt32Slice(v int32) {
+ s := (*[]int32)(p.p)
+ *s = append(*s, v)
+}
+
+func (p pointer) toUint64() *uint64 {
+ return (*uint64)(p.p)
+}
+func (p pointer) toUint64Ptr() **uint64 {
+ return (**uint64)(p.p)
+}
+func (p pointer) toUint64Slice() *[]uint64 {
+ return (*[]uint64)(p.p)
+}
+func (p pointer) toUint32() *uint32 {
+ return (*uint32)(p.p)
+}
+func (p pointer) toUint32Ptr() **uint32 {
+ return (**uint32)(p.p)
+}
+func (p pointer) toUint32Slice() *[]uint32 {
+ return (*[]uint32)(p.p)
+}
+func (p pointer) toBool() *bool {
+ return (*bool)(p.p)
+}
+func (p pointer) toBoolPtr() **bool {
+ return (**bool)(p.p)
+}
+func (p pointer) toBoolSlice() *[]bool {
+ return (*[]bool)(p.p)
+}
+func (p pointer) toFloat64() *float64 {
+ return (*float64)(p.p)
+}
+func (p pointer) toFloat64Ptr() **float64 {
+ return (**float64)(p.p)
+}
+func (p pointer) toFloat64Slice() *[]float64 {
+ return (*[]float64)(p.p)
+}
+func (p pointer) toFloat32() *float32 {
+ return (*float32)(p.p)
+}
+func (p pointer) toFloat32Ptr() **float32 {
+ return (**float32)(p.p)
+}
+func (p pointer) toFloat32Slice() *[]float32 {
+ return (*[]float32)(p.p)
+}
+func (p pointer) toString() *string {
+ return (*string)(p.p)
+}
+func (p pointer) toStringPtr() **string {
+ return (**string)(p.p)
+}
+func (p pointer) toStringSlice() *[]string {
+ return (*[]string)(p.p)
+}
+func (p pointer) toBytes() *[]byte {
+ return (*[]byte)(p.p)
+}
+func (p pointer) toBytesSlice() *[][]byte {
+ return (*[][]byte)(p.p)
+}
+func (p pointer) toExtensions() *XXX_InternalExtensions {
+ return (*XXX_InternalExtensions)(p.p)
+}
+func (p pointer) toOldExtensions() *map[int32]Extension {
+ return (*map[int32]Extension)(p.p)
+}
+
+// getPointerSlice loads []*T from p as a []pointer.
+// The value returned is aliased with the original slice.
+// This behavior differs from the implementation in pointer_reflect.go.
+func (p pointer) getPointerSlice() []pointer {
+ // Super-tricky - p should point to a []*T where T is a
+ // message type. We load it as []pointer.
+ return *(*[]pointer)(p.p)
+}
+
+// setPointerSlice stores []pointer into p as a []*T.
+// The value set is aliased with the input slice.
+// This behavior differs from the implementation in pointer_reflect.go.
+func (p pointer) setPointerSlice(v []pointer) {
+ // Super-tricky - p should point to a []*T where T is a
+ // message type. We store it as []pointer.
+ *(*[]pointer)(p.p) = v
+}
+
+// getPointer loads the pointer at p and returns it.
+func (p pointer) getPointer() pointer {
+ return pointer{p: *(*unsafe.Pointer)(p.p)}
+}
+
+// setPointer stores the pointer q at p.
+func (p pointer) setPointer(q pointer) {
+ *(*unsafe.Pointer)(p.p) = q.p
+}
+
+// append q to the slice pointed to by p.
+func (p pointer) appendPointer(q pointer) {
+ s := (*[]unsafe.Pointer)(p.p)
+ *s = append(*s, q.p)
+}
+
+// getInterfacePointer returns a pointer that points to the
+// interface data of the interface pointed by p.
+func (p pointer) getInterfacePointer() pointer {
+ // Super-tricky - read pointer out of data word of interface value.
+ return pointer{p: (*(*[2]unsafe.Pointer)(p.p))[1]}
+}
+
+// asPointerTo returns a reflect.Value that is a pointer to an
+// object of type t stored at p.
+func (p pointer) asPointerTo(t reflect.Type) reflect.Value {
+ return reflect.NewAt(t, p.p)
+}
+
+func atomicLoadUnmarshalInfo(p **unmarshalInfo) *unmarshalInfo {
+ return (*unmarshalInfo)(atomic.LoadPointer((*unsafe.Pointer)(unsafe.Pointer(p))))
+}
+func atomicStoreUnmarshalInfo(p **unmarshalInfo, v *unmarshalInfo) {
+ atomic.StorePointer((*unsafe.Pointer)(unsafe.Pointer(p)), unsafe.Pointer(v))
+}
+func atomicLoadMarshalInfo(p **marshalInfo) *marshalInfo {
+ return (*marshalInfo)(atomic.LoadPointer((*unsafe.Pointer)(unsafe.Pointer(p))))
+}
+func atomicStoreMarshalInfo(p **marshalInfo, v *marshalInfo) {
+ atomic.StorePointer((*unsafe.Pointer)(unsafe.Pointer(p)), unsafe.Pointer(v))
+}
+func atomicLoadMergeInfo(p **mergeInfo) *mergeInfo {
+ return (*mergeInfo)(atomic.LoadPointer((*unsafe.Pointer)(unsafe.Pointer(p))))
+}
+func atomicStoreMergeInfo(p **mergeInfo, v *mergeInfo) {
+ atomic.StorePointer((*unsafe.Pointer)(unsafe.Pointer(p)), unsafe.Pointer(v))
+}
+func atomicLoadDiscardInfo(p **discardInfo) *discardInfo {
+ return (*discardInfo)(atomic.LoadPointer((*unsafe.Pointer)(unsafe.Pointer(p))))
+}
+func atomicStoreDiscardInfo(p **discardInfo, v *discardInfo) {
+ atomic.StorePointer((*unsafe.Pointer)(unsafe.Pointer(p)), unsafe.Pointer(v))
+}
diff --git a/vendor/github.com/golang/protobuf/proto/properties.go b/vendor/github.com/golang/protobuf/proto/properties.go
new file mode 100644
index 00000000..79668ff5
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/proto/properties.go
@@ -0,0 +1,545 @@
+// Go support for Protocol Buffers - Google's data interchange format
+//
+// Copyright 2010 The Go Authors. All rights reserved.
+// https://github.com/golang/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+package proto
+
+/*
+ * Routines for encoding data into the wire format for protocol buffers.
+ */
+
+import (
+ "fmt"
+ "log"
+ "os"
+ "reflect"
+ "sort"
+ "strconv"
+ "strings"
+ "sync"
+)
+
+const debug bool = false
+
+// Constants that identify the encoding of a value on the wire.
+const (
+ WireVarint = 0
+ WireFixed64 = 1
+ WireBytes = 2
+ WireStartGroup = 3
+ WireEndGroup = 4
+ WireFixed32 = 5
+)
+
+// tagMap is an optimization over map[int]int for typical protocol buffer
+// use-cases. Encoded protocol buffers are often in tag order with small tag
+// numbers.
+type tagMap struct {
+ fastTags []int
+ slowTags map[int]int
+}
+
+// tagMapFastLimit is the upper bound on the tag number that will be stored in
+// the tagMap slice rather than its map.
+const tagMapFastLimit = 1024
+
+func (p *tagMap) get(t int) (int, bool) {
+ if t > 0 && t < tagMapFastLimit {
+ if t >= len(p.fastTags) {
+ return 0, false
+ }
+ fi := p.fastTags[t]
+ return fi, fi >= 0
+ }
+ fi, ok := p.slowTags[t]
+ return fi, ok
+}
+
+func (p *tagMap) put(t int, fi int) {
+ if t > 0 && t < tagMapFastLimit {
+ for len(p.fastTags) < t+1 {
+ p.fastTags = append(p.fastTags, -1)
+ }
+ p.fastTags[t] = fi
+ return
+ }
+ if p.slowTags == nil {
+ p.slowTags = make(map[int]int)
+ }
+ p.slowTags[t] = fi
+}
+
+// StructProperties represents properties for all the fields of a struct.
+// decoderTags and decoderOrigNames should only be used by the decoder.
+type StructProperties struct {
+ Prop []*Properties // properties for each field
+ reqCount int // required count
+ decoderTags tagMap // map from proto tag to struct field number
+ decoderOrigNames map[string]int // map from original name to struct field number
+ order []int // list of struct field numbers in tag order
+
+ // OneofTypes contains information about the oneof fields in this message.
+ // It is keyed by the original name of a field.
+ OneofTypes map[string]*OneofProperties
+}
+
+// OneofProperties represents information about a specific field in a oneof.
+type OneofProperties struct {
+ Type reflect.Type // pointer to generated struct type for this oneof field
+ Field int // struct field number of the containing oneof in the message
+ Prop *Properties
+}
+
+// Implement the sorting interface so we can sort the fields in tag order, as recommended by the spec.
+// See encode.go, (*Buffer).enc_struct.
+
+func (sp *StructProperties) Len() int { return len(sp.order) }
+func (sp *StructProperties) Less(i, j int) bool {
+ return sp.Prop[sp.order[i]].Tag < sp.Prop[sp.order[j]].Tag
+}
+func (sp *StructProperties) Swap(i, j int) { sp.order[i], sp.order[j] = sp.order[j], sp.order[i] }
+
+// Properties represents the protocol-specific behavior of a single struct field.
+type Properties struct {
+ Name string // name of the field, for error messages
+ OrigName string // original name before protocol compiler (always set)
+ JSONName string // name to use for JSON; determined by protoc
+ Wire string
+ WireType int
+ Tag int
+ Required bool
+ Optional bool
+ Repeated bool
+ Packed bool // relevant for repeated primitives only
+ Enum string // set for enum types only
+ proto3 bool // whether this is known to be a proto3 field
+ oneof bool // whether this is a oneof field
+
+ Default string // default value
+ HasDefault bool // whether an explicit default was provided
+
+ stype reflect.Type // set for struct types only
+ sprop *StructProperties // set for struct types only
+
+ mtype reflect.Type // set for map types only
+ MapKeyProp *Properties // set for map types only
+ MapValProp *Properties // set for map types only
+}
+
+// String formats the properties in the protobuf struct field tag style.
+func (p *Properties) String() string {
+ s := p.Wire
+ s += ","
+ s += strconv.Itoa(p.Tag)
+ if p.Required {
+ s += ",req"
+ }
+ if p.Optional {
+ s += ",opt"
+ }
+ if p.Repeated {
+ s += ",rep"
+ }
+ if p.Packed {
+ s += ",packed"
+ }
+ s += ",name=" + p.OrigName
+ if p.JSONName != p.OrigName {
+ s += ",json=" + p.JSONName
+ }
+ if p.proto3 {
+ s += ",proto3"
+ }
+ if p.oneof {
+ s += ",oneof"
+ }
+ if len(p.Enum) > 0 {
+ s += ",enum=" + p.Enum
+ }
+ if p.HasDefault {
+ s += ",def=" + p.Default
+ }
+ return s
+}
+
+// Parse populates p by parsing a string in the protobuf struct field tag style.
+func (p *Properties) Parse(s string) {
+ // "bytes,49,opt,name=foo,def=hello!"
+ fields := strings.Split(s, ",") // breaks def=, but handled below.
+ if len(fields) < 2 {
+ fmt.Fprintf(os.Stderr, "proto: tag has too few fields: %q\n", s)
+ return
+ }
+
+ p.Wire = fields[0]
+ switch p.Wire {
+ case "varint":
+ p.WireType = WireVarint
+ case "fixed32":
+ p.WireType = WireFixed32
+ case "fixed64":
+ p.WireType = WireFixed64
+ case "zigzag32":
+ p.WireType = WireVarint
+ case "zigzag64":
+ p.WireType = WireVarint
+ case "bytes", "group":
+ p.WireType = WireBytes
+ // no numeric converter for non-numeric types
+ default:
+ fmt.Fprintf(os.Stderr, "proto: tag has unknown wire type: %q\n", s)
+ return
+ }
+
+ var err error
+ p.Tag, err = strconv.Atoi(fields[1])
+ if err != nil {
+ return
+ }
+
+outer:
+ for i := 2; i < len(fields); i++ {
+ f := fields[i]
+ switch {
+ case f == "req":
+ p.Required = true
+ case f == "opt":
+ p.Optional = true
+ case f == "rep":
+ p.Repeated = true
+ case f == "packed":
+ p.Packed = true
+ case strings.HasPrefix(f, "name="):
+ p.OrigName = f[5:]
+ case strings.HasPrefix(f, "json="):
+ p.JSONName = f[5:]
+ case strings.HasPrefix(f, "enum="):
+ p.Enum = f[5:]
+ case f == "proto3":
+ p.proto3 = true
+ case f == "oneof":
+ p.oneof = true
+ case strings.HasPrefix(f, "def="):
+ p.HasDefault = true
+ p.Default = f[4:] // rest of string
+ if i+1 < len(fields) {
+ // Commas aren't escaped, and def is always last.
+ p.Default += "," + strings.Join(fields[i+1:], ",")
+ break outer
+ }
+ }
+ }
+}
+
+var protoMessageType = reflect.TypeOf((*Message)(nil)).Elem()
+
+// setFieldProps initializes the field properties for submessages and maps.
+func (p *Properties) setFieldProps(typ reflect.Type, f *reflect.StructField, lockGetProp bool) {
+ switch t1 := typ; t1.Kind() {
+ case reflect.Ptr:
+ if t1.Elem().Kind() == reflect.Struct {
+ p.stype = t1.Elem()
+ }
+
+ case reflect.Slice:
+ if t2 := t1.Elem(); t2.Kind() == reflect.Ptr && t2.Elem().Kind() == reflect.Struct {
+ p.stype = t2.Elem()
+ }
+
+ case reflect.Map:
+ p.mtype = t1
+ p.MapKeyProp = &Properties{}
+ p.MapKeyProp.init(reflect.PtrTo(p.mtype.Key()), "Key", f.Tag.Get("protobuf_key"), nil, lockGetProp)
+ p.MapValProp = &Properties{}
+ vtype := p.mtype.Elem()
+ if vtype.Kind() != reflect.Ptr && vtype.Kind() != reflect.Slice {
+ // The value type is not a message (*T) or bytes ([]byte),
+ // so we need encoders for the pointer to this type.
+ vtype = reflect.PtrTo(vtype)
+ }
+ p.MapValProp.init(vtype, "Value", f.Tag.Get("protobuf_val"), nil, lockGetProp)
+ }
+
+ if p.stype != nil {
+ if lockGetProp {
+ p.sprop = GetProperties(p.stype)
+ } else {
+ p.sprop = getPropertiesLocked(p.stype)
+ }
+ }
+}
+
+var (
+ marshalerType = reflect.TypeOf((*Marshaler)(nil)).Elem()
+)
+
+// Init populates the properties from a protocol buffer struct tag.
+func (p *Properties) Init(typ reflect.Type, name, tag string, f *reflect.StructField) {
+ p.init(typ, name, tag, f, true)
+}
+
+func (p *Properties) init(typ reflect.Type, name, tag string, f *reflect.StructField, lockGetProp bool) {
+ // "bytes,49,opt,def=hello!"
+ p.Name = name
+ p.OrigName = name
+ if tag == "" {
+ return
+ }
+ p.Parse(tag)
+ p.setFieldProps(typ, f, lockGetProp)
+}
+
+var (
+ propertiesMu sync.RWMutex
+ propertiesMap = make(map[reflect.Type]*StructProperties)
+)
+
+// GetProperties returns the list of properties for the type represented by t.
+// t must represent a generated struct type of a protocol message.
+func GetProperties(t reflect.Type) *StructProperties {
+ if t.Kind() != reflect.Struct {
+ panic("proto: type must have kind struct")
+ }
+
+ // Most calls to GetProperties in a long-running program will be
+ // retrieving details for types we have seen before.
+ propertiesMu.RLock()
+ sprop, ok := propertiesMap[t]
+ propertiesMu.RUnlock()
+ if ok {
+ return sprop
+ }
+
+ propertiesMu.Lock()
+ sprop = getPropertiesLocked(t)
+ propertiesMu.Unlock()
+ return sprop
+}
+
+type (
+ oneofFuncsIface interface {
+ XXX_OneofFuncs() (func(Message, *Buffer) error, func(Message, int, int, *Buffer) (bool, error), func(Message) int, []interface{})
+ }
+ oneofWrappersIface interface {
+ XXX_OneofWrappers() []interface{}
+ }
+)
+
+// getPropertiesLocked requires that propertiesMu is held.
+func getPropertiesLocked(t reflect.Type) *StructProperties {
+ if prop, ok := propertiesMap[t]; ok {
+ return prop
+ }
+
+ prop := new(StructProperties)
+ // in case of recursive protos, fill this in now.
+ propertiesMap[t] = prop
+
+ // build properties
+ prop.Prop = make([]*Properties, t.NumField())
+ prop.order = make([]int, t.NumField())
+
+ for i := 0; i < t.NumField(); i++ {
+ f := t.Field(i)
+ p := new(Properties)
+ name := f.Name
+ p.init(f.Type, name, f.Tag.Get("protobuf"), &f, false)
+
+ oneof := f.Tag.Get("protobuf_oneof") // special case
+ if oneof != "" {
+ // Oneof fields don't use the traditional protobuf tag.
+ p.OrigName = oneof
+ }
+ prop.Prop[i] = p
+ prop.order[i] = i
+ if debug {
+ print(i, " ", f.Name, " ", t.String(), " ")
+ if p.Tag > 0 {
+ print(p.String())
+ }
+ print("\n")
+ }
+ }
+
+ // Re-order prop.order.
+ sort.Sort(prop)
+
+ var oots []interface{}
+ switch m := reflect.Zero(reflect.PtrTo(t)).Interface().(type) {
+ case oneofFuncsIface:
+ _, _, _, oots = m.XXX_OneofFuncs()
+ case oneofWrappersIface:
+ oots = m.XXX_OneofWrappers()
+ }
+ if len(oots) > 0 {
+ // Interpret oneof metadata.
+ prop.OneofTypes = make(map[string]*OneofProperties)
+ for _, oot := range oots {
+ oop := &OneofProperties{
+ Type: reflect.ValueOf(oot).Type(), // *T
+ Prop: new(Properties),
+ }
+ sft := oop.Type.Elem().Field(0)
+ oop.Prop.Name = sft.Name
+ oop.Prop.Parse(sft.Tag.Get("protobuf"))
+ // There will be exactly one interface field that
+ // this new value is assignable to.
+ for i := 0; i < t.NumField(); i++ {
+ f := t.Field(i)
+ if f.Type.Kind() != reflect.Interface {
+ continue
+ }
+ if !oop.Type.AssignableTo(f.Type) {
+ continue
+ }
+ oop.Field = i
+ break
+ }
+ prop.OneofTypes[oop.Prop.OrigName] = oop
+ }
+ }
+
+ // build required counts
+ // build tags
+ reqCount := 0
+ prop.decoderOrigNames = make(map[string]int)
+ for i, p := range prop.Prop {
+ if strings.HasPrefix(p.Name, "XXX_") {
+ // Internal fields should not appear in tags/origNames maps.
+ // They are handled specially when encoding and decoding.
+ continue
+ }
+ if p.Required {
+ reqCount++
+ }
+ prop.decoderTags.put(p.Tag, i)
+ prop.decoderOrigNames[p.OrigName] = i
+ }
+ prop.reqCount = reqCount
+
+ return prop
+}
+
+// A global registry of enum types.
+// The generated code will register the generated maps by calling RegisterEnum.
+
+var enumValueMaps = make(map[string]map[string]int32)
+
+// RegisterEnum is called from the generated code to install the enum descriptor
+// maps into the global table to aid parsing text format protocol buffers.
+func RegisterEnum(typeName string, unusedNameMap map[int32]string, valueMap map[string]int32) {
+ if _, ok := enumValueMaps[typeName]; ok {
+ panic("proto: duplicate enum registered: " + typeName)
+ }
+ enumValueMaps[typeName] = valueMap
+}
+
+// EnumValueMap returns the mapping from names to integers of the
+// enum type enumType, or a nil if not found.
+func EnumValueMap(enumType string) map[string]int32 {
+ return enumValueMaps[enumType]
+}
+
+// A registry of all linked message types.
+// The string is a fully-qualified proto name ("pkg.Message").
+var (
+ protoTypedNils = make(map[string]Message) // a map from proto names to typed nil pointers
+ protoMapTypes = make(map[string]reflect.Type) // a map from proto names to map types
+ revProtoTypes = make(map[reflect.Type]string)
+)
+
+// RegisterType is called from generated code and maps from the fully qualified
+// proto name to the type (pointer to struct) of the protocol buffer.
+func RegisterType(x Message, name string) {
+ if _, ok := protoTypedNils[name]; ok {
+ // TODO: Some day, make this a panic.
+ log.Printf("proto: duplicate proto type registered: %s", name)
+ return
+ }
+ t := reflect.TypeOf(x)
+ if v := reflect.ValueOf(x); v.Kind() == reflect.Ptr && v.Pointer() == 0 {
+ // Generated code always calls RegisterType with nil x.
+ // This check is just for extra safety.
+ protoTypedNils[name] = x
+ } else {
+ protoTypedNils[name] = reflect.Zero(t).Interface().(Message)
+ }
+ revProtoTypes[t] = name
+}
+
+// RegisterMapType is called from generated code and maps from the fully qualified
+// proto name to the native map type of the proto map definition.
+func RegisterMapType(x interface{}, name string) {
+ if reflect.TypeOf(x).Kind() != reflect.Map {
+ panic(fmt.Sprintf("RegisterMapType(%T, %q); want map", x, name))
+ }
+ if _, ok := protoMapTypes[name]; ok {
+ log.Printf("proto: duplicate proto type registered: %s", name)
+ return
+ }
+ t := reflect.TypeOf(x)
+ protoMapTypes[name] = t
+ revProtoTypes[t] = name
+}
+
+// MessageName returns the fully-qualified proto name for the given message type.
+func MessageName(x Message) string {
+ type xname interface {
+ XXX_MessageName() string
+ }
+ if m, ok := x.(xname); ok {
+ return m.XXX_MessageName()
+ }
+ return revProtoTypes[reflect.TypeOf(x)]
+}
+
+// MessageType returns the message type (pointer to struct) for a named message.
+// The type is not guaranteed to implement proto.Message if the name refers to a
+// map entry.
+func MessageType(name string) reflect.Type {
+ if t, ok := protoTypedNils[name]; ok {
+ return reflect.TypeOf(t)
+ }
+ return protoMapTypes[name]
+}
+
+// A registry of all linked proto files.
+var (
+ protoFiles = make(map[string][]byte) // file name => fileDescriptor
+)
+
+// RegisterFile is called from generated code and maps from the
+// full file name of a .proto file to its compressed FileDescriptorProto.
+func RegisterFile(filename string, fileDescriptor []byte) {
+ protoFiles[filename] = fileDescriptor
+}
+
+// FileDescriptor returns the compressed FileDescriptorProto for a .proto file.
+func FileDescriptor(filename string) []byte { return protoFiles[filename] }
diff --git a/vendor/github.com/golang/protobuf/proto/table_marshal.go b/vendor/github.com/golang/protobuf/proto/table_marshal.go
new file mode 100644
index 00000000..5cb11fa9
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/proto/table_marshal.go
@@ -0,0 +1,2776 @@
+// Go support for Protocol Buffers - Google's data interchange format
+//
+// Copyright 2016 The Go Authors. All rights reserved.
+// https://github.com/golang/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+package proto
+
+import (
+ "errors"
+ "fmt"
+ "math"
+ "reflect"
+ "sort"
+ "strconv"
+ "strings"
+ "sync"
+ "sync/atomic"
+ "unicode/utf8"
+)
+
+// a sizer takes a pointer to a field and the size of its tag, computes the size of
+// the encoded data.
+type sizer func(pointer, int) int
+
+// a marshaler takes a byte slice, a pointer to a field, and its tag (in wire format),
+// marshals the field to the end of the slice, returns the slice and error (if any).
+type marshaler func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error)
+
+// marshalInfo is the information used for marshaling a message.
+type marshalInfo struct {
+ typ reflect.Type
+ fields []*marshalFieldInfo
+ unrecognized field // offset of XXX_unrecognized
+ extensions field // offset of XXX_InternalExtensions
+ v1extensions field // offset of XXX_extensions
+ sizecache field // offset of XXX_sizecache
+ initialized int32 // 0 -- only typ is set, 1 -- fully initialized
+ messageset bool // uses message set wire format
+ hasmarshaler bool // has custom marshaler
+ sync.RWMutex // protect extElems map, also for initialization
+ extElems map[int32]*marshalElemInfo // info of extension elements
+}
+
+// marshalFieldInfo is the information used for marshaling a field of a message.
+type marshalFieldInfo struct {
+ field field
+ wiretag uint64 // tag in wire format
+ tagsize int // size of tag in wire format
+ sizer sizer
+ marshaler marshaler
+ isPointer bool
+ required bool // field is required
+ name string // name of the field, for error reporting
+ oneofElems map[reflect.Type]*marshalElemInfo // info of oneof elements
+}
+
+// marshalElemInfo is the information used for marshaling an extension or oneof element.
+type marshalElemInfo struct {
+ wiretag uint64 // tag in wire format
+ tagsize int // size of tag in wire format
+ sizer sizer
+ marshaler marshaler
+ isptr bool // elem is pointer typed, thus interface of this type is a direct interface (extension only)
+ deref bool // dereference the pointer before operating on it; implies isptr
+}
+
+var (
+ marshalInfoMap = map[reflect.Type]*marshalInfo{}
+ marshalInfoLock sync.Mutex
+)
+
+// getMarshalInfo returns the information to marshal a given type of message.
+// The info it returns may not necessarily initialized.
+// t is the type of the message (NOT the pointer to it).
+func getMarshalInfo(t reflect.Type) *marshalInfo {
+ marshalInfoLock.Lock()
+ u, ok := marshalInfoMap[t]
+ if !ok {
+ u = &marshalInfo{typ: t}
+ marshalInfoMap[t] = u
+ }
+ marshalInfoLock.Unlock()
+ return u
+}
+
+// Size is the entry point from generated code,
+// and should be ONLY called by generated code.
+// It computes the size of encoded data of msg.
+// a is a pointer to a place to store cached marshal info.
+func (a *InternalMessageInfo) Size(msg Message) int {
+ u := getMessageMarshalInfo(msg, a)
+ ptr := toPointer(&msg)
+ if ptr.isNil() {
+ // We get here if msg is a typed nil ((*SomeMessage)(nil)),
+ // so it satisfies the interface, and msg == nil wouldn't
+ // catch it. We don't want crash in this case.
+ return 0
+ }
+ return u.size(ptr)
+}
+
+// Marshal is the entry point from generated code,
+// and should be ONLY called by generated code.
+// It marshals msg to the end of b.
+// a is a pointer to a place to store cached marshal info.
+func (a *InternalMessageInfo) Marshal(b []byte, msg Message, deterministic bool) ([]byte, error) {
+ u := getMessageMarshalInfo(msg, a)
+ ptr := toPointer(&msg)
+ if ptr.isNil() {
+ // We get here if msg is a typed nil ((*SomeMessage)(nil)),
+ // so it satisfies the interface, and msg == nil wouldn't
+ // catch it. We don't want crash in this case.
+ return b, ErrNil
+ }
+ return u.marshal(b, ptr, deterministic)
+}
+
+func getMessageMarshalInfo(msg interface{}, a *InternalMessageInfo) *marshalInfo {
+ // u := a.marshal, but atomically.
+ // We use an atomic here to ensure memory consistency.
+ u := atomicLoadMarshalInfo(&a.marshal)
+ if u == nil {
+ // Get marshal information from type of message.
+ t := reflect.ValueOf(msg).Type()
+ if t.Kind() != reflect.Ptr {
+ panic(fmt.Sprintf("cannot handle non-pointer message type %v", t))
+ }
+ u = getMarshalInfo(t.Elem())
+ // Store it in the cache for later users.
+ // a.marshal = u, but atomically.
+ atomicStoreMarshalInfo(&a.marshal, u)
+ }
+ return u
+}
+
+// size is the main function to compute the size of the encoded data of a message.
+// ptr is the pointer to the message.
+func (u *marshalInfo) size(ptr pointer) int {
+ if atomic.LoadInt32(&u.initialized) == 0 {
+ u.computeMarshalInfo()
+ }
+
+ // If the message can marshal itself, let it do it, for compatibility.
+ // NOTE: This is not efficient.
+ if u.hasmarshaler {
+ m := ptr.asPointerTo(u.typ).Interface().(Marshaler)
+ b, _ := m.Marshal()
+ return len(b)
+ }
+
+ n := 0
+ for _, f := range u.fields {
+ if f.isPointer && ptr.offset(f.field).getPointer().isNil() {
+ // nil pointer always marshals to nothing
+ continue
+ }
+ n += f.sizer(ptr.offset(f.field), f.tagsize)
+ }
+ if u.extensions.IsValid() {
+ e := ptr.offset(u.extensions).toExtensions()
+ if u.messageset {
+ n += u.sizeMessageSet(e)
+ } else {
+ n += u.sizeExtensions(e)
+ }
+ }
+ if u.v1extensions.IsValid() {
+ m := *ptr.offset(u.v1extensions).toOldExtensions()
+ n += u.sizeV1Extensions(m)
+ }
+ if u.unrecognized.IsValid() {
+ s := *ptr.offset(u.unrecognized).toBytes()
+ n += len(s)
+ }
+ // cache the result for use in marshal
+ if u.sizecache.IsValid() {
+ atomic.StoreInt32(ptr.offset(u.sizecache).toInt32(), int32(n))
+ }
+ return n
+}
+
+// cachedsize gets the size from cache. If there is no cache (i.e. message is not generated),
+// fall back to compute the size.
+func (u *marshalInfo) cachedsize(ptr pointer) int {
+ if u.sizecache.IsValid() {
+ return int(atomic.LoadInt32(ptr.offset(u.sizecache).toInt32()))
+ }
+ return u.size(ptr)
+}
+
+// marshal is the main function to marshal a message. It takes a byte slice and appends
+// the encoded data to the end of the slice, returns the slice and error (if any).
+// ptr is the pointer to the message.
+// If deterministic is true, map is marshaled in deterministic order.
+func (u *marshalInfo) marshal(b []byte, ptr pointer, deterministic bool) ([]byte, error) {
+ if atomic.LoadInt32(&u.initialized) == 0 {
+ u.computeMarshalInfo()
+ }
+
+ // If the message can marshal itself, let it do it, for compatibility.
+ // NOTE: This is not efficient.
+ if u.hasmarshaler {
+ m := ptr.asPointerTo(u.typ).Interface().(Marshaler)
+ b1, err := m.Marshal()
+ b = append(b, b1...)
+ return b, err
+ }
+
+ var err, errLater error
+ // The old marshaler encodes extensions at beginning.
+ if u.extensions.IsValid() {
+ e := ptr.offset(u.extensions).toExtensions()
+ if u.messageset {
+ b, err = u.appendMessageSet(b, e, deterministic)
+ } else {
+ b, err = u.appendExtensions(b, e, deterministic)
+ }
+ if err != nil {
+ return b, err
+ }
+ }
+ if u.v1extensions.IsValid() {
+ m := *ptr.offset(u.v1extensions).toOldExtensions()
+ b, err = u.appendV1Extensions(b, m, deterministic)
+ if err != nil {
+ return b, err
+ }
+ }
+ for _, f := range u.fields {
+ if f.required {
+ if ptr.offset(f.field).getPointer().isNil() {
+ // Required field is not set.
+ // We record the error but keep going, to give a complete marshaling.
+ if errLater == nil {
+ errLater = &RequiredNotSetError{f.name}
+ }
+ continue
+ }
+ }
+ if f.isPointer && ptr.offset(f.field).getPointer().isNil() {
+ // nil pointer always marshals to nothing
+ continue
+ }
+ b, err = f.marshaler(b, ptr.offset(f.field), f.wiretag, deterministic)
+ if err != nil {
+ if err1, ok := err.(*RequiredNotSetError); ok {
+ // Required field in submessage is not set.
+ // We record the error but keep going, to give a complete marshaling.
+ if errLater == nil {
+ errLater = &RequiredNotSetError{f.name + "." + err1.field}
+ }
+ continue
+ }
+ if err == errRepeatedHasNil {
+ err = errors.New("proto: repeated field " + f.name + " has nil element")
+ }
+ if err == errInvalidUTF8 {
+ if errLater == nil {
+ fullName := revProtoTypes[reflect.PtrTo(u.typ)] + "." + f.name
+ errLater = &invalidUTF8Error{fullName}
+ }
+ continue
+ }
+ return b, err
+ }
+ }
+ if u.unrecognized.IsValid() {
+ s := *ptr.offset(u.unrecognized).toBytes()
+ b = append(b, s...)
+ }
+ return b, errLater
+}
+
+// computeMarshalInfo initializes the marshal info.
+func (u *marshalInfo) computeMarshalInfo() {
+ u.Lock()
+ defer u.Unlock()
+ if u.initialized != 0 { // non-atomic read is ok as it is protected by the lock
+ return
+ }
+
+ t := u.typ
+ u.unrecognized = invalidField
+ u.extensions = invalidField
+ u.v1extensions = invalidField
+ u.sizecache = invalidField
+
+ // If the message can marshal itself, let it do it, for compatibility.
+ // NOTE: This is not efficient.
+ if reflect.PtrTo(t).Implements(marshalerType) {
+ u.hasmarshaler = true
+ atomic.StoreInt32(&u.initialized, 1)
+ return
+ }
+
+ // get oneof implementers
+ var oneofImplementers []interface{}
+ switch m := reflect.Zero(reflect.PtrTo(t)).Interface().(type) {
+ case oneofFuncsIface:
+ _, _, _, oneofImplementers = m.XXX_OneofFuncs()
+ case oneofWrappersIface:
+ oneofImplementers = m.XXX_OneofWrappers()
+ }
+
+ n := t.NumField()
+
+ // deal with XXX fields first
+ for i := 0; i < t.NumField(); i++ {
+ f := t.Field(i)
+ if !strings.HasPrefix(f.Name, "XXX_") {
+ continue
+ }
+ switch f.Name {
+ case "XXX_sizecache":
+ u.sizecache = toField(&f)
+ case "XXX_unrecognized":
+ u.unrecognized = toField(&f)
+ case "XXX_InternalExtensions":
+ u.extensions = toField(&f)
+ u.messageset = f.Tag.Get("protobuf_messageset") == "1"
+ case "XXX_extensions":
+ u.v1extensions = toField(&f)
+ case "XXX_NoUnkeyedLiteral":
+ // nothing to do
+ default:
+ panic("unknown XXX field: " + f.Name)
+ }
+ n--
+ }
+
+ // normal fields
+ fields := make([]marshalFieldInfo, n) // batch allocation
+ u.fields = make([]*marshalFieldInfo, 0, n)
+ for i, j := 0, 0; i < t.NumField(); i++ {
+ f := t.Field(i)
+
+ if strings.HasPrefix(f.Name, "XXX_") {
+ continue
+ }
+ field := &fields[j]
+ j++
+ field.name = f.Name
+ u.fields = append(u.fields, field)
+ if f.Tag.Get("protobuf_oneof") != "" {
+ field.computeOneofFieldInfo(&f, oneofImplementers)
+ continue
+ }
+ if f.Tag.Get("protobuf") == "" {
+ // field has no tag (not in generated message), ignore it
+ u.fields = u.fields[:len(u.fields)-1]
+ j--
+ continue
+ }
+ field.computeMarshalFieldInfo(&f)
+ }
+
+ // fields are marshaled in tag order on the wire.
+ sort.Sort(byTag(u.fields))
+
+ atomic.StoreInt32(&u.initialized, 1)
+}
+
+// helper for sorting fields by tag
+type byTag []*marshalFieldInfo
+
+func (a byTag) Len() int { return len(a) }
+func (a byTag) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
+func (a byTag) Less(i, j int) bool { return a[i].wiretag < a[j].wiretag }
+
+// getExtElemInfo returns the information to marshal an extension element.
+// The info it returns is initialized.
+func (u *marshalInfo) getExtElemInfo(desc *ExtensionDesc) *marshalElemInfo {
+ // get from cache first
+ u.RLock()
+ e, ok := u.extElems[desc.Field]
+ u.RUnlock()
+ if ok {
+ return e
+ }
+
+ t := reflect.TypeOf(desc.ExtensionType) // pointer or slice to basic type or struct
+ tags := strings.Split(desc.Tag, ",")
+ tag, err := strconv.Atoi(tags[1])
+ if err != nil {
+ panic("tag is not an integer")
+ }
+ wt := wiretype(tags[0])
+ if t.Kind() == reflect.Ptr && t.Elem().Kind() != reflect.Struct {
+ t = t.Elem()
+ }
+ sizer, marshaler := typeMarshaler(t, tags, false, false)
+ var deref bool
+ if t.Kind() == reflect.Slice && t.Elem().Kind() != reflect.Uint8 {
+ t = reflect.PtrTo(t)
+ deref = true
+ }
+ e = &marshalElemInfo{
+ wiretag: uint64(tag)<<3 | wt,
+ tagsize: SizeVarint(uint64(tag) << 3),
+ sizer: sizer,
+ marshaler: marshaler,
+ isptr: t.Kind() == reflect.Ptr,
+ deref: deref,
+ }
+
+ // update cache
+ u.Lock()
+ if u.extElems == nil {
+ u.extElems = make(map[int32]*marshalElemInfo)
+ }
+ u.extElems[desc.Field] = e
+ u.Unlock()
+ return e
+}
+
+// computeMarshalFieldInfo fills up the information to marshal a field.
+func (fi *marshalFieldInfo) computeMarshalFieldInfo(f *reflect.StructField) {
+ // parse protobuf tag of the field.
+ // tag has format of "bytes,49,opt,name=foo,def=hello!"
+ tags := strings.Split(f.Tag.Get("protobuf"), ",")
+ if tags[0] == "" {
+ return
+ }
+ tag, err := strconv.Atoi(tags[1])
+ if err != nil {
+ panic("tag is not an integer")
+ }
+ wt := wiretype(tags[0])
+ if tags[2] == "req" {
+ fi.required = true
+ }
+ fi.setTag(f, tag, wt)
+ fi.setMarshaler(f, tags)
+}
+
+func (fi *marshalFieldInfo) computeOneofFieldInfo(f *reflect.StructField, oneofImplementers []interface{}) {
+ fi.field = toField(f)
+ fi.wiretag = math.MaxInt32 // Use a large tag number, make oneofs sorted at the end. This tag will not appear on the wire.
+ fi.isPointer = true
+ fi.sizer, fi.marshaler = makeOneOfMarshaler(fi, f)
+ fi.oneofElems = make(map[reflect.Type]*marshalElemInfo)
+
+ ityp := f.Type // interface type
+ for _, o := range oneofImplementers {
+ t := reflect.TypeOf(o)
+ if !t.Implements(ityp) {
+ continue
+ }
+ sf := t.Elem().Field(0) // oneof implementer is a struct with a single field
+ tags := strings.Split(sf.Tag.Get("protobuf"), ",")
+ tag, err := strconv.Atoi(tags[1])
+ if err != nil {
+ panic("tag is not an integer")
+ }
+ wt := wiretype(tags[0])
+ sizer, marshaler := typeMarshaler(sf.Type, tags, false, true) // oneof should not omit any zero value
+ fi.oneofElems[t.Elem()] = &marshalElemInfo{
+ wiretag: uint64(tag)<<3 | wt,
+ tagsize: SizeVarint(uint64(tag) << 3),
+ sizer: sizer,
+ marshaler: marshaler,
+ }
+ }
+}
+
+// wiretype returns the wire encoding of the type.
+func wiretype(encoding string) uint64 {
+ switch encoding {
+ case "fixed32":
+ return WireFixed32
+ case "fixed64":
+ return WireFixed64
+ case "varint", "zigzag32", "zigzag64":
+ return WireVarint
+ case "bytes":
+ return WireBytes
+ case "group":
+ return WireStartGroup
+ }
+ panic("unknown wire type " + encoding)
+}
+
+// setTag fills up the tag (in wire format) and its size in the info of a field.
+func (fi *marshalFieldInfo) setTag(f *reflect.StructField, tag int, wt uint64) {
+ fi.field = toField(f)
+ fi.wiretag = uint64(tag)<<3 | wt
+ fi.tagsize = SizeVarint(uint64(tag) << 3)
+}
+
+// setMarshaler fills up the sizer and marshaler in the info of a field.
+func (fi *marshalFieldInfo) setMarshaler(f *reflect.StructField, tags []string) {
+ switch f.Type.Kind() {
+ case reflect.Map:
+ // map field
+ fi.isPointer = true
+ fi.sizer, fi.marshaler = makeMapMarshaler(f)
+ return
+ case reflect.Ptr, reflect.Slice:
+ fi.isPointer = true
+ }
+ fi.sizer, fi.marshaler = typeMarshaler(f.Type, tags, true, false)
+}
+
+// typeMarshaler returns the sizer and marshaler of a given field.
+// t is the type of the field.
+// tags is the generated "protobuf" tag of the field.
+// If nozero is true, zero value is not marshaled to the wire.
+// If oneof is true, it is a oneof field.
+func typeMarshaler(t reflect.Type, tags []string, nozero, oneof bool) (sizer, marshaler) {
+ encoding := tags[0]
+
+ pointer := false
+ slice := false
+ if t.Kind() == reflect.Slice && t.Elem().Kind() != reflect.Uint8 {
+ slice = true
+ t = t.Elem()
+ }
+ if t.Kind() == reflect.Ptr {
+ pointer = true
+ t = t.Elem()
+ }
+
+ packed := false
+ proto3 := false
+ validateUTF8 := true
+ for i := 2; i < len(tags); i++ {
+ if tags[i] == "packed" {
+ packed = true
+ }
+ if tags[i] == "proto3" {
+ proto3 = true
+ }
+ }
+ validateUTF8 = validateUTF8 && proto3
+
+ switch t.Kind() {
+ case reflect.Bool:
+ if pointer {
+ return sizeBoolPtr, appendBoolPtr
+ }
+ if slice {
+ if packed {
+ return sizeBoolPackedSlice, appendBoolPackedSlice
+ }
+ return sizeBoolSlice, appendBoolSlice
+ }
+ if nozero {
+ return sizeBoolValueNoZero, appendBoolValueNoZero
+ }
+ return sizeBoolValue, appendBoolValue
+ case reflect.Uint32:
+ switch encoding {
+ case "fixed32":
+ if pointer {
+ return sizeFixed32Ptr, appendFixed32Ptr
+ }
+ if slice {
+ if packed {
+ return sizeFixed32PackedSlice, appendFixed32PackedSlice
+ }
+ return sizeFixed32Slice, appendFixed32Slice
+ }
+ if nozero {
+ return sizeFixed32ValueNoZero, appendFixed32ValueNoZero
+ }
+ return sizeFixed32Value, appendFixed32Value
+ case "varint":
+ if pointer {
+ return sizeVarint32Ptr, appendVarint32Ptr
+ }
+ if slice {
+ if packed {
+ return sizeVarint32PackedSlice, appendVarint32PackedSlice
+ }
+ return sizeVarint32Slice, appendVarint32Slice
+ }
+ if nozero {
+ return sizeVarint32ValueNoZero, appendVarint32ValueNoZero
+ }
+ return sizeVarint32Value, appendVarint32Value
+ }
+ case reflect.Int32:
+ switch encoding {
+ case "fixed32":
+ if pointer {
+ return sizeFixedS32Ptr, appendFixedS32Ptr
+ }
+ if slice {
+ if packed {
+ return sizeFixedS32PackedSlice, appendFixedS32PackedSlice
+ }
+ return sizeFixedS32Slice, appendFixedS32Slice
+ }
+ if nozero {
+ return sizeFixedS32ValueNoZero, appendFixedS32ValueNoZero
+ }
+ return sizeFixedS32Value, appendFixedS32Value
+ case "varint":
+ if pointer {
+ return sizeVarintS32Ptr, appendVarintS32Ptr
+ }
+ if slice {
+ if packed {
+ return sizeVarintS32PackedSlice, appendVarintS32PackedSlice
+ }
+ return sizeVarintS32Slice, appendVarintS32Slice
+ }
+ if nozero {
+ return sizeVarintS32ValueNoZero, appendVarintS32ValueNoZero
+ }
+ return sizeVarintS32Value, appendVarintS32Value
+ case "zigzag32":
+ if pointer {
+ return sizeZigzag32Ptr, appendZigzag32Ptr
+ }
+ if slice {
+ if packed {
+ return sizeZigzag32PackedSlice, appendZigzag32PackedSlice
+ }
+ return sizeZigzag32Slice, appendZigzag32Slice
+ }
+ if nozero {
+ return sizeZigzag32ValueNoZero, appendZigzag32ValueNoZero
+ }
+ return sizeZigzag32Value, appendZigzag32Value
+ }
+ case reflect.Uint64:
+ switch encoding {
+ case "fixed64":
+ if pointer {
+ return sizeFixed64Ptr, appendFixed64Ptr
+ }
+ if slice {
+ if packed {
+ return sizeFixed64PackedSlice, appendFixed64PackedSlice
+ }
+ return sizeFixed64Slice, appendFixed64Slice
+ }
+ if nozero {
+ return sizeFixed64ValueNoZero, appendFixed64ValueNoZero
+ }
+ return sizeFixed64Value, appendFixed64Value
+ case "varint":
+ if pointer {
+ return sizeVarint64Ptr, appendVarint64Ptr
+ }
+ if slice {
+ if packed {
+ return sizeVarint64PackedSlice, appendVarint64PackedSlice
+ }
+ return sizeVarint64Slice, appendVarint64Slice
+ }
+ if nozero {
+ return sizeVarint64ValueNoZero, appendVarint64ValueNoZero
+ }
+ return sizeVarint64Value, appendVarint64Value
+ }
+ case reflect.Int64:
+ switch encoding {
+ case "fixed64":
+ if pointer {
+ return sizeFixedS64Ptr, appendFixedS64Ptr
+ }
+ if slice {
+ if packed {
+ return sizeFixedS64PackedSlice, appendFixedS64PackedSlice
+ }
+ return sizeFixedS64Slice, appendFixedS64Slice
+ }
+ if nozero {
+ return sizeFixedS64ValueNoZero, appendFixedS64ValueNoZero
+ }
+ return sizeFixedS64Value, appendFixedS64Value
+ case "varint":
+ if pointer {
+ return sizeVarintS64Ptr, appendVarintS64Ptr
+ }
+ if slice {
+ if packed {
+ return sizeVarintS64PackedSlice, appendVarintS64PackedSlice
+ }
+ return sizeVarintS64Slice, appendVarintS64Slice
+ }
+ if nozero {
+ return sizeVarintS64ValueNoZero, appendVarintS64ValueNoZero
+ }
+ return sizeVarintS64Value, appendVarintS64Value
+ case "zigzag64":
+ if pointer {
+ return sizeZigzag64Ptr, appendZigzag64Ptr
+ }
+ if slice {
+ if packed {
+ return sizeZigzag64PackedSlice, appendZigzag64PackedSlice
+ }
+ return sizeZigzag64Slice, appendZigzag64Slice
+ }
+ if nozero {
+ return sizeZigzag64ValueNoZero, appendZigzag64ValueNoZero
+ }
+ return sizeZigzag64Value, appendZigzag64Value
+ }
+ case reflect.Float32:
+ if pointer {
+ return sizeFloat32Ptr, appendFloat32Ptr
+ }
+ if slice {
+ if packed {
+ return sizeFloat32PackedSlice, appendFloat32PackedSlice
+ }
+ return sizeFloat32Slice, appendFloat32Slice
+ }
+ if nozero {
+ return sizeFloat32ValueNoZero, appendFloat32ValueNoZero
+ }
+ return sizeFloat32Value, appendFloat32Value
+ case reflect.Float64:
+ if pointer {
+ return sizeFloat64Ptr, appendFloat64Ptr
+ }
+ if slice {
+ if packed {
+ return sizeFloat64PackedSlice, appendFloat64PackedSlice
+ }
+ return sizeFloat64Slice, appendFloat64Slice
+ }
+ if nozero {
+ return sizeFloat64ValueNoZero, appendFloat64ValueNoZero
+ }
+ return sizeFloat64Value, appendFloat64Value
+ case reflect.String:
+ if validateUTF8 {
+ if pointer {
+ return sizeStringPtr, appendUTF8StringPtr
+ }
+ if slice {
+ return sizeStringSlice, appendUTF8StringSlice
+ }
+ if nozero {
+ return sizeStringValueNoZero, appendUTF8StringValueNoZero
+ }
+ return sizeStringValue, appendUTF8StringValue
+ }
+ if pointer {
+ return sizeStringPtr, appendStringPtr
+ }
+ if slice {
+ return sizeStringSlice, appendStringSlice
+ }
+ if nozero {
+ return sizeStringValueNoZero, appendStringValueNoZero
+ }
+ return sizeStringValue, appendStringValue
+ case reflect.Slice:
+ if slice {
+ return sizeBytesSlice, appendBytesSlice
+ }
+ if oneof {
+ // Oneof bytes field may also have "proto3" tag.
+ // We want to marshal it as a oneof field. Do this
+ // check before the proto3 check.
+ return sizeBytesOneof, appendBytesOneof
+ }
+ if proto3 {
+ return sizeBytes3, appendBytes3
+ }
+ return sizeBytes, appendBytes
+ case reflect.Struct:
+ switch encoding {
+ case "group":
+ if slice {
+ return makeGroupSliceMarshaler(getMarshalInfo(t))
+ }
+ return makeGroupMarshaler(getMarshalInfo(t))
+ case "bytes":
+ if slice {
+ return makeMessageSliceMarshaler(getMarshalInfo(t))
+ }
+ return makeMessageMarshaler(getMarshalInfo(t))
+ }
+ }
+ panic(fmt.Sprintf("unknown or mismatched type: type: %v, wire type: %v", t, encoding))
+}
+
+// Below are functions to size/marshal a specific type of a field.
+// They are stored in the field's info, and called by function pointers.
+// They have type sizer or marshaler.
+
+func sizeFixed32Value(_ pointer, tagsize int) int {
+ return 4 + tagsize
+}
+func sizeFixed32ValueNoZero(ptr pointer, tagsize int) int {
+ v := *ptr.toUint32()
+ if v == 0 {
+ return 0
+ }
+ return 4 + tagsize
+}
+func sizeFixed32Ptr(ptr pointer, tagsize int) int {
+ p := *ptr.toUint32Ptr()
+ if p == nil {
+ return 0
+ }
+ return 4 + tagsize
+}
+func sizeFixed32Slice(ptr pointer, tagsize int) int {
+ s := *ptr.toUint32Slice()
+ return (4 + tagsize) * len(s)
+}
+func sizeFixed32PackedSlice(ptr pointer, tagsize int) int {
+ s := *ptr.toUint32Slice()
+ if len(s) == 0 {
+ return 0
+ }
+ return 4*len(s) + SizeVarint(uint64(4*len(s))) + tagsize
+}
+func sizeFixedS32Value(_ pointer, tagsize int) int {
+ return 4 + tagsize
+}
+func sizeFixedS32ValueNoZero(ptr pointer, tagsize int) int {
+ v := *ptr.toInt32()
+ if v == 0 {
+ return 0
+ }
+ return 4 + tagsize
+}
+func sizeFixedS32Ptr(ptr pointer, tagsize int) int {
+ p := ptr.getInt32Ptr()
+ if p == nil {
+ return 0
+ }
+ return 4 + tagsize
+}
+func sizeFixedS32Slice(ptr pointer, tagsize int) int {
+ s := ptr.getInt32Slice()
+ return (4 + tagsize) * len(s)
+}
+func sizeFixedS32PackedSlice(ptr pointer, tagsize int) int {
+ s := ptr.getInt32Slice()
+ if len(s) == 0 {
+ return 0
+ }
+ return 4*len(s) + SizeVarint(uint64(4*len(s))) + tagsize
+}
+func sizeFloat32Value(_ pointer, tagsize int) int {
+ return 4 + tagsize
+}
+func sizeFloat32ValueNoZero(ptr pointer, tagsize int) int {
+ v := math.Float32bits(*ptr.toFloat32())
+ if v == 0 {
+ return 0
+ }
+ return 4 + tagsize
+}
+func sizeFloat32Ptr(ptr pointer, tagsize int) int {
+ p := *ptr.toFloat32Ptr()
+ if p == nil {
+ return 0
+ }
+ return 4 + tagsize
+}
+func sizeFloat32Slice(ptr pointer, tagsize int) int {
+ s := *ptr.toFloat32Slice()
+ return (4 + tagsize) * len(s)
+}
+func sizeFloat32PackedSlice(ptr pointer, tagsize int) int {
+ s := *ptr.toFloat32Slice()
+ if len(s) == 0 {
+ return 0
+ }
+ return 4*len(s) + SizeVarint(uint64(4*len(s))) + tagsize
+}
+func sizeFixed64Value(_ pointer, tagsize int) int {
+ return 8 + tagsize
+}
+func sizeFixed64ValueNoZero(ptr pointer, tagsize int) int {
+ v := *ptr.toUint64()
+ if v == 0 {
+ return 0
+ }
+ return 8 + tagsize
+}
+func sizeFixed64Ptr(ptr pointer, tagsize int) int {
+ p := *ptr.toUint64Ptr()
+ if p == nil {
+ return 0
+ }
+ return 8 + tagsize
+}
+func sizeFixed64Slice(ptr pointer, tagsize int) int {
+ s := *ptr.toUint64Slice()
+ return (8 + tagsize) * len(s)
+}
+func sizeFixed64PackedSlice(ptr pointer, tagsize int) int {
+ s := *ptr.toUint64Slice()
+ if len(s) == 0 {
+ return 0
+ }
+ return 8*len(s) + SizeVarint(uint64(8*len(s))) + tagsize
+}
+func sizeFixedS64Value(_ pointer, tagsize int) int {
+ return 8 + tagsize
+}
+func sizeFixedS64ValueNoZero(ptr pointer, tagsize int) int {
+ v := *ptr.toInt64()
+ if v == 0 {
+ return 0
+ }
+ return 8 + tagsize
+}
+func sizeFixedS64Ptr(ptr pointer, tagsize int) int {
+ p := *ptr.toInt64Ptr()
+ if p == nil {
+ return 0
+ }
+ return 8 + tagsize
+}
+func sizeFixedS64Slice(ptr pointer, tagsize int) int {
+ s := *ptr.toInt64Slice()
+ return (8 + tagsize) * len(s)
+}
+func sizeFixedS64PackedSlice(ptr pointer, tagsize int) int {
+ s := *ptr.toInt64Slice()
+ if len(s) == 0 {
+ return 0
+ }
+ return 8*len(s) + SizeVarint(uint64(8*len(s))) + tagsize
+}
+func sizeFloat64Value(_ pointer, tagsize int) int {
+ return 8 + tagsize
+}
+func sizeFloat64ValueNoZero(ptr pointer, tagsize int) int {
+ v := math.Float64bits(*ptr.toFloat64())
+ if v == 0 {
+ return 0
+ }
+ return 8 + tagsize
+}
+func sizeFloat64Ptr(ptr pointer, tagsize int) int {
+ p := *ptr.toFloat64Ptr()
+ if p == nil {
+ return 0
+ }
+ return 8 + tagsize
+}
+func sizeFloat64Slice(ptr pointer, tagsize int) int {
+ s := *ptr.toFloat64Slice()
+ return (8 + tagsize) * len(s)
+}
+func sizeFloat64PackedSlice(ptr pointer, tagsize int) int {
+ s := *ptr.toFloat64Slice()
+ if len(s) == 0 {
+ return 0
+ }
+ return 8*len(s) + SizeVarint(uint64(8*len(s))) + tagsize
+}
+func sizeVarint32Value(ptr pointer, tagsize int) int {
+ v := *ptr.toUint32()
+ return SizeVarint(uint64(v)) + tagsize
+}
+func sizeVarint32ValueNoZero(ptr pointer, tagsize int) int {
+ v := *ptr.toUint32()
+ if v == 0 {
+ return 0
+ }
+ return SizeVarint(uint64(v)) + tagsize
+}
+func sizeVarint32Ptr(ptr pointer, tagsize int) int {
+ p := *ptr.toUint32Ptr()
+ if p == nil {
+ return 0
+ }
+ return SizeVarint(uint64(*p)) + tagsize
+}
+func sizeVarint32Slice(ptr pointer, tagsize int) int {
+ s := *ptr.toUint32Slice()
+ n := 0
+ for _, v := range s {
+ n += SizeVarint(uint64(v)) + tagsize
+ }
+ return n
+}
+func sizeVarint32PackedSlice(ptr pointer, tagsize int) int {
+ s := *ptr.toUint32Slice()
+ if len(s) == 0 {
+ return 0
+ }
+ n := 0
+ for _, v := range s {
+ n += SizeVarint(uint64(v))
+ }
+ return n + SizeVarint(uint64(n)) + tagsize
+}
+func sizeVarintS32Value(ptr pointer, tagsize int) int {
+ v := *ptr.toInt32()
+ return SizeVarint(uint64(v)) + tagsize
+}
+func sizeVarintS32ValueNoZero(ptr pointer, tagsize int) int {
+ v := *ptr.toInt32()
+ if v == 0 {
+ return 0
+ }
+ return SizeVarint(uint64(v)) + tagsize
+}
+func sizeVarintS32Ptr(ptr pointer, tagsize int) int {
+ p := ptr.getInt32Ptr()
+ if p == nil {
+ return 0
+ }
+ return SizeVarint(uint64(*p)) + tagsize
+}
+func sizeVarintS32Slice(ptr pointer, tagsize int) int {
+ s := ptr.getInt32Slice()
+ n := 0
+ for _, v := range s {
+ n += SizeVarint(uint64(v)) + tagsize
+ }
+ return n
+}
+func sizeVarintS32PackedSlice(ptr pointer, tagsize int) int {
+ s := ptr.getInt32Slice()
+ if len(s) == 0 {
+ return 0
+ }
+ n := 0
+ for _, v := range s {
+ n += SizeVarint(uint64(v))
+ }
+ return n + SizeVarint(uint64(n)) + tagsize
+}
+func sizeVarint64Value(ptr pointer, tagsize int) int {
+ v := *ptr.toUint64()
+ return SizeVarint(v) + tagsize
+}
+func sizeVarint64ValueNoZero(ptr pointer, tagsize int) int {
+ v := *ptr.toUint64()
+ if v == 0 {
+ return 0
+ }
+ return SizeVarint(v) + tagsize
+}
+func sizeVarint64Ptr(ptr pointer, tagsize int) int {
+ p := *ptr.toUint64Ptr()
+ if p == nil {
+ return 0
+ }
+ return SizeVarint(*p) + tagsize
+}
+func sizeVarint64Slice(ptr pointer, tagsize int) int {
+ s := *ptr.toUint64Slice()
+ n := 0
+ for _, v := range s {
+ n += SizeVarint(v) + tagsize
+ }
+ return n
+}
+func sizeVarint64PackedSlice(ptr pointer, tagsize int) int {
+ s := *ptr.toUint64Slice()
+ if len(s) == 0 {
+ return 0
+ }
+ n := 0
+ for _, v := range s {
+ n += SizeVarint(v)
+ }
+ return n + SizeVarint(uint64(n)) + tagsize
+}
+func sizeVarintS64Value(ptr pointer, tagsize int) int {
+ v := *ptr.toInt64()
+ return SizeVarint(uint64(v)) + tagsize
+}
+func sizeVarintS64ValueNoZero(ptr pointer, tagsize int) int {
+ v := *ptr.toInt64()
+ if v == 0 {
+ return 0
+ }
+ return SizeVarint(uint64(v)) + tagsize
+}
+func sizeVarintS64Ptr(ptr pointer, tagsize int) int {
+ p := *ptr.toInt64Ptr()
+ if p == nil {
+ return 0
+ }
+ return SizeVarint(uint64(*p)) + tagsize
+}
+func sizeVarintS64Slice(ptr pointer, tagsize int) int {
+ s := *ptr.toInt64Slice()
+ n := 0
+ for _, v := range s {
+ n += SizeVarint(uint64(v)) + tagsize
+ }
+ return n
+}
+func sizeVarintS64PackedSlice(ptr pointer, tagsize int) int {
+ s := *ptr.toInt64Slice()
+ if len(s) == 0 {
+ return 0
+ }
+ n := 0
+ for _, v := range s {
+ n += SizeVarint(uint64(v))
+ }
+ return n + SizeVarint(uint64(n)) + tagsize
+}
+func sizeZigzag32Value(ptr pointer, tagsize int) int {
+ v := *ptr.toInt32()
+ return SizeVarint(uint64((uint32(v)<<1)^uint32((int32(v)>>31)))) + tagsize
+}
+func sizeZigzag32ValueNoZero(ptr pointer, tagsize int) int {
+ v := *ptr.toInt32()
+ if v == 0 {
+ return 0
+ }
+ return SizeVarint(uint64((uint32(v)<<1)^uint32((int32(v)>>31)))) + tagsize
+}
+func sizeZigzag32Ptr(ptr pointer, tagsize int) int {
+ p := ptr.getInt32Ptr()
+ if p == nil {
+ return 0
+ }
+ v := *p
+ return SizeVarint(uint64((uint32(v)<<1)^uint32((int32(v)>>31)))) + tagsize
+}
+func sizeZigzag32Slice(ptr pointer, tagsize int) int {
+ s := ptr.getInt32Slice()
+ n := 0
+ for _, v := range s {
+ n += SizeVarint(uint64((uint32(v)<<1)^uint32((int32(v)>>31)))) + tagsize
+ }
+ return n
+}
+func sizeZigzag32PackedSlice(ptr pointer, tagsize int) int {
+ s := ptr.getInt32Slice()
+ if len(s) == 0 {
+ return 0
+ }
+ n := 0
+ for _, v := range s {
+ n += SizeVarint(uint64((uint32(v) << 1) ^ uint32((int32(v) >> 31))))
+ }
+ return n + SizeVarint(uint64(n)) + tagsize
+}
+func sizeZigzag64Value(ptr pointer, tagsize int) int {
+ v := *ptr.toInt64()
+ return SizeVarint(uint64(v<<1)^uint64((int64(v)>>63))) + tagsize
+}
+func sizeZigzag64ValueNoZero(ptr pointer, tagsize int) int {
+ v := *ptr.toInt64()
+ if v == 0 {
+ return 0
+ }
+ return SizeVarint(uint64(v<<1)^uint64((int64(v)>>63))) + tagsize
+}
+func sizeZigzag64Ptr(ptr pointer, tagsize int) int {
+ p := *ptr.toInt64Ptr()
+ if p == nil {
+ return 0
+ }
+ v := *p
+ return SizeVarint(uint64(v<<1)^uint64((int64(v)>>63))) + tagsize
+}
+func sizeZigzag64Slice(ptr pointer, tagsize int) int {
+ s := *ptr.toInt64Slice()
+ n := 0
+ for _, v := range s {
+ n += SizeVarint(uint64(v<<1)^uint64((int64(v)>>63))) + tagsize
+ }
+ return n
+}
+func sizeZigzag64PackedSlice(ptr pointer, tagsize int) int {
+ s := *ptr.toInt64Slice()
+ if len(s) == 0 {
+ return 0
+ }
+ n := 0
+ for _, v := range s {
+ n += SizeVarint(uint64(v<<1) ^ uint64((int64(v) >> 63)))
+ }
+ return n + SizeVarint(uint64(n)) + tagsize
+}
+func sizeBoolValue(_ pointer, tagsize int) int {
+ return 1 + tagsize
+}
+func sizeBoolValueNoZero(ptr pointer, tagsize int) int {
+ v := *ptr.toBool()
+ if !v {
+ return 0
+ }
+ return 1 + tagsize
+}
+func sizeBoolPtr(ptr pointer, tagsize int) int {
+ p := *ptr.toBoolPtr()
+ if p == nil {
+ return 0
+ }
+ return 1 + tagsize
+}
+func sizeBoolSlice(ptr pointer, tagsize int) int {
+ s := *ptr.toBoolSlice()
+ return (1 + tagsize) * len(s)
+}
+func sizeBoolPackedSlice(ptr pointer, tagsize int) int {
+ s := *ptr.toBoolSlice()
+ if len(s) == 0 {
+ return 0
+ }
+ return len(s) + SizeVarint(uint64(len(s))) + tagsize
+}
+func sizeStringValue(ptr pointer, tagsize int) int {
+ v := *ptr.toString()
+ return len(v) + SizeVarint(uint64(len(v))) + tagsize
+}
+func sizeStringValueNoZero(ptr pointer, tagsize int) int {
+ v := *ptr.toString()
+ if v == "" {
+ return 0
+ }
+ return len(v) + SizeVarint(uint64(len(v))) + tagsize
+}
+func sizeStringPtr(ptr pointer, tagsize int) int {
+ p := *ptr.toStringPtr()
+ if p == nil {
+ return 0
+ }
+ v := *p
+ return len(v) + SizeVarint(uint64(len(v))) + tagsize
+}
+func sizeStringSlice(ptr pointer, tagsize int) int {
+ s := *ptr.toStringSlice()
+ n := 0
+ for _, v := range s {
+ n += len(v) + SizeVarint(uint64(len(v))) + tagsize
+ }
+ return n
+}
+func sizeBytes(ptr pointer, tagsize int) int {
+ v := *ptr.toBytes()
+ if v == nil {
+ return 0
+ }
+ return len(v) + SizeVarint(uint64(len(v))) + tagsize
+}
+func sizeBytes3(ptr pointer, tagsize int) int {
+ v := *ptr.toBytes()
+ if len(v) == 0 {
+ return 0
+ }
+ return len(v) + SizeVarint(uint64(len(v))) + tagsize
+}
+func sizeBytesOneof(ptr pointer, tagsize int) int {
+ v := *ptr.toBytes()
+ return len(v) + SizeVarint(uint64(len(v))) + tagsize
+}
+func sizeBytesSlice(ptr pointer, tagsize int) int {
+ s := *ptr.toBytesSlice()
+ n := 0
+ for _, v := range s {
+ n += len(v) + SizeVarint(uint64(len(v))) + tagsize
+ }
+ return n
+}
+
+// appendFixed32 appends an encoded fixed32 to b.
+func appendFixed32(b []byte, v uint32) []byte {
+ b = append(b,
+ byte(v),
+ byte(v>>8),
+ byte(v>>16),
+ byte(v>>24))
+ return b
+}
+
+// appendFixed64 appends an encoded fixed64 to b.
+func appendFixed64(b []byte, v uint64) []byte {
+ b = append(b,
+ byte(v),
+ byte(v>>8),
+ byte(v>>16),
+ byte(v>>24),
+ byte(v>>32),
+ byte(v>>40),
+ byte(v>>48),
+ byte(v>>56))
+ return b
+}
+
+// appendVarint appends an encoded varint to b.
+func appendVarint(b []byte, v uint64) []byte {
+ // TODO: make 1-byte (maybe 2-byte) case inline-able, once we
+ // have non-leaf inliner.
+ switch {
+ case v < 1<<7:
+ b = append(b, byte(v))
+ case v < 1<<14:
+ b = append(b,
+ byte(v&0x7f|0x80),
+ byte(v>>7))
+ case v < 1<<21:
+ b = append(b,
+ byte(v&0x7f|0x80),
+ byte((v>>7)&0x7f|0x80),
+ byte(v>>14))
+ case v < 1<<28:
+ b = append(b,
+ byte(v&0x7f|0x80),
+ byte((v>>7)&0x7f|0x80),
+ byte((v>>14)&0x7f|0x80),
+ byte(v>>21))
+ case v < 1<<35:
+ b = append(b,
+ byte(v&0x7f|0x80),
+ byte((v>>7)&0x7f|0x80),
+ byte((v>>14)&0x7f|0x80),
+ byte((v>>21)&0x7f|0x80),
+ byte(v>>28))
+ case v < 1<<42:
+ b = append(b,
+ byte(v&0x7f|0x80),
+ byte((v>>7)&0x7f|0x80),
+ byte((v>>14)&0x7f|0x80),
+ byte((v>>21)&0x7f|0x80),
+ byte((v>>28)&0x7f|0x80),
+ byte(v>>35))
+ case v < 1<<49:
+ b = append(b,
+ byte(v&0x7f|0x80),
+ byte((v>>7)&0x7f|0x80),
+ byte((v>>14)&0x7f|0x80),
+ byte((v>>21)&0x7f|0x80),
+ byte((v>>28)&0x7f|0x80),
+ byte((v>>35)&0x7f|0x80),
+ byte(v>>42))
+ case v < 1<<56:
+ b = append(b,
+ byte(v&0x7f|0x80),
+ byte((v>>7)&0x7f|0x80),
+ byte((v>>14)&0x7f|0x80),
+ byte((v>>21)&0x7f|0x80),
+ byte((v>>28)&0x7f|0x80),
+ byte((v>>35)&0x7f|0x80),
+ byte((v>>42)&0x7f|0x80),
+ byte(v>>49))
+ case v < 1<<63:
+ b = append(b,
+ byte(v&0x7f|0x80),
+ byte((v>>7)&0x7f|0x80),
+ byte((v>>14)&0x7f|0x80),
+ byte((v>>21)&0x7f|0x80),
+ byte((v>>28)&0x7f|0x80),
+ byte((v>>35)&0x7f|0x80),
+ byte((v>>42)&0x7f|0x80),
+ byte((v>>49)&0x7f|0x80),
+ byte(v>>56))
+ default:
+ b = append(b,
+ byte(v&0x7f|0x80),
+ byte((v>>7)&0x7f|0x80),
+ byte((v>>14)&0x7f|0x80),
+ byte((v>>21)&0x7f|0x80),
+ byte((v>>28)&0x7f|0x80),
+ byte((v>>35)&0x7f|0x80),
+ byte((v>>42)&0x7f|0x80),
+ byte((v>>49)&0x7f|0x80),
+ byte((v>>56)&0x7f|0x80),
+ 1)
+ }
+ return b
+}
+
+func appendFixed32Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ v := *ptr.toUint32()
+ b = appendVarint(b, wiretag)
+ b = appendFixed32(b, v)
+ return b, nil
+}
+func appendFixed32ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ v := *ptr.toUint32()
+ if v == 0 {
+ return b, nil
+ }
+ b = appendVarint(b, wiretag)
+ b = appendFixed32(b, v)
+ return b, nil
+}
+func appendFixed32Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ p := *ptr.toUint32Ptr()
+ if p == nil {
+ return b, nil
+ }
+ b = appendVarint(b, wiretag)
+ b = appendFixed32(b, *p)
+ return b, nil
+}
+func appendFixed32Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ s := *ptr.toUint32Slice()
+ for _, v := range s {
+ b = appendVarint(b, wiretag)
+ b = appendFixed32(b, v)
+ }
+ return b, nil
+}
+func appendFixed32PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ s := *ptr.toUint32Slice()
+ if len(s) == 0 {
+ return b, nil
+ }
+ b = appendVarint(b, wiretag&^7|WireBytes)
+ b = appendVarint(b, uint64(4*len(s)))
+ for _, v := range s {
+ b = appendFixed32(b, v)
+ }
+ return b, nil
+}
+func appendFixedS32Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ v := *ptr.toInt32()
+ b = appendVarint(b, wiretag)
+ b = appendFixed32(b, uint32(v))
+ return b, nil
+}
+func appendFixedS32ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ v := *ptr.toInt32()
+ if v == 0 {
+ return b, nil
+ }
+ b = appendVarint(b, wiretag)
+ b = appendFixed32(b, uint32(v))
+ return b, nil
+}
+func appendFixedS32Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ p := ptr.getInt32Ptr()
+ if p == nil {
+ return b, nil
+ }
+ b = appendVarint(b, wiretag)
+ b = appendFixed32(b, uint32(*p))
+ return b, nil
+}
+func appendFixedS32Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ s := ptr.getInt32Slice()
+ for _, v := range s {
+ b = appendVarint(b, wiretag)
+ b = appendFixed32(b, uint32(v))
+ }
+ return b, nil
+}
+func appendFixedS32PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ s := ptr.getInt32Slice()
+ if len(s) == 0 {
+ return b, nil
+ }
+ b = appendVarint(b, wiretag&^7|WireBytes)
+ b = appendVarint(b, uint64(4*len(s)))
+ for _, v := range s {
+ b = appendFixed32(b, uint32(v))
+ }
+ return b, nil
+}
+func appendFloat32Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ v := math.Float32bits(*ptr.toFloat32())
+ b = appendVarint(b, wiretag)
+ b = appendFixed32(b, v)
+ return b, nil
+}
+func appendFloat32ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ v := math.Float32bits(*ptr.toFloat32())
+ if v == 0 {
+ return b, nil
+ }
+ b = appendVarint(b, wiretag)
+ b = appendFixed32(b, v)
+ return b, nil
+}
+func appendFloat32Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ p := *ptr.toFloat32Ptr()
+ if p == nil {
+ return b, nil
+ }
+ b = appendVarint(b, wiretag)
+ b = appendFixed32(b, math.Float32bits(*p))
+ return b, nil
+}
+func appendFloat32Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ s := *ptr.toFloat32Slice()
+ for _, v := range s {
+ b = appendVarint(b, wiretag)
+ b = appendFixed32(b, math.Float32bits(v))
+ }
+ return b, nil
+}
+func appendFloat32PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ s := *ptr.toFloat32Slice()
+ if len(s) == 0 {
+ return b, nil
+ }
+ b = appendVarint(b, wiretag&^7|WireBytes)
+ b = appendVarint(b, uint64(4*len(s)))
+ for _, v := range s {
+ b = appendFixed32(b, math.Float32bits(v))
+ }
+ return b, nil
+}
+func appendFixed64Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ v := *ptr.toUint64()
+ b = appendVarint(b, wiretag)
+ b = appendFixed64(b, v)
+ return b, nil
+}
+func appendFixed64ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ v := *ptr.toUint64()
+ if v == 0 {
+ return b, nil
+ }
+ b = appendVarint(b, wiretag)
+ b = appendFixed64(b, v)
+ return b, nil
+}
+func appendFixed64Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ p := *ptr.toUint64Ptr()
+ if p == nil {
+ return b, nil
+ }
+ b = appendVarint(b, wiretag)
+ b = appendFixed64(b, *p)
+ return b, nil
+}
+func appendFixed64Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ s := *ptr.toUint64Slice()
+ for _, v := range s {
+ b = appendVarint(b, wiretag)
+ b = appendFixed64(b, v)
+ }
+ return b, nil
+}
+func appendFixed64PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ s := *ptr.toUint64Slice()
+ if len(s) == 0 {
+ return b, nil
+ }
+ b = appendVarint(b, wiretag&^7|WireBytes)
+ b = appendVarint(b, uint64(8*len(s)))
+ for _, v := range s {
+ b = appendFixed64(b, v)
+ }
+ return b, nil
+}
+func appendFixedS64Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ v := *ptr.toInt64()
+ b = appendVarint(b, wiretag)
+ b = appendFixed64(b, uint64(v))
+ return b, nil
+}
+func appendFixedS64ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ v := *ptr.toInt64()
+ if v == 0 {
+ return b, nil
+ }
+ b = appendVarint(b, wiretag)
+ b = appendFixed64(b, uint64(v))
+ return b, nil
+}
+func appendFixedS64Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ p := *ptr.toInt64Ptr()
+ if p == nil {
+ return b, nil
+ }
+ b = appendVarint(b, wiretag)
+ b = appendFixed64(b, uint64(*p))
+ return b, nil
+}
+func appendFixedS64Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ s := *ptr.toInt64Slice()
+ for _, v := range s {
+ b = appendVarint(b, wiretag)
+ b = appendFixed64(b, uint64(v))
+ }
+ return b, nil
+}
+func appendFixedS64PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ s := *ptr.toInt64Slice()
+ if len(s) == 0 {
+ return b, nil
+ }
+ b = appendVarint(b, wiretag&^7|WireBytes)
+ b = appendVarint(b, uint64(8*len(s)))
+ for _, v := range s {
+ b = appendFixed64(b, uint64(v))
+ }
+ return b, nil
+}
+func appendFloat64Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ v := math.Float64bits(*ptr.toFloat64())
+ b = appendVarint(b, wiretag)
+ b = appendFixed64(b, v)
+ return b, nil
+}
+func appendFloat64ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ v := math.Float64bits(*ptr.toFloat64())
+ if v == 0 {
+ return b, nil
+ }
+ b = appendVarint(b, wiretag)
+ b = appendFixed64(b, v)
+ return b, nil
+}
+func appendFloat64Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ p := *ptr.toFloat64Ptr()
+ if p == nil {
+ return b, nil
+ }
+ b = appendVarint(b, wiretag)
+ b = appendFixed64(b, math.Float64bits(*p))
+ return b, nil
+}
+func appendFloat64Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ s := *ptr.toFloat64Slice()
+ for _, v := range s {
+ b = appendVarint(b, wiretag)
+ b = appendFixed64(b, math.Float64bits(v))
+ }
+ return b, nil
+}
+func appendFloat64PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ s := *ptr.toFloat64Slice()
+ if len(s) == 0 {
+ return b, nil
+ }
+ b = appendVarint(b, wiretag&^7|WireBytes)
+ b = appendVarint(b, uint64(8*len(s)))
+ for _, v := range s {
+ b = appendFixed64(b, math.Float64bits(v))
+ }
+ return b, nil
+}
+func appendVarint32Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ v := *ptr.toUint32()
+ b = appendVarint(b, wiretag)
+ b = appendVarint(b, uint64(v))
+ return b, nil
+}
+func appendVarint32ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ v := *ptr.toUint32()
+ if v == 0 {
+ return b, nil
+ }
+ b = appendVarint(b, wiretag)
+ b = appendVarint(b, uint64(v))
+ return b, nil
+}
+func appendVarint32Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ p := *ptr.toUint32Ptr()
+ if p == nil {
+ return b, nil
+ }
+ b = appendVarint(b, wiretag)
+ b = appendVarint(b, uint64(*p))
+ return b, nil
+}
+func appendVarint32Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ s := *ptr.toUint32Slice()
+ for _, v := range s {
+ b = appendVarint(b, wiretag)
+ b = appendVarint(b, uint64(v))
+ }
+ return b, nil
+}
+func appendVarint32PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ s := *ptr.toUint32Slice()
+ if len(s) == 0 {
+ return b, nil
+ }
+ b = appendVarint(b, wiretag&^7|WireBytes)
+ // compute size
+ n := 0
+ for _, v := range s {
+ n += SizeVarint(uint64(v))
+ }
+ b = appendVarint(b, uint64(n))
+ for _, v := range s {
+ b = appendVarint(b, uint64(v))
+ }
+ return b, nil
+}
+func appendVarintS32Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ v := *ptr.toInt32()
+ b = appendVarint(b, wiretag)
+ b = appendVarint(b, uint64(v))
+ return b, nil
+}
+func appendVarintS32ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ v := *ptr.toInt32()
+ if v == 0 {
+ return b, nil
+ }
+ b = appendVarint(b, wiretag)
+ b = appendVarint(b, uint64(v))
+ return b, nil
+}
+func appendVarintS32Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ p := ptr.getInt32Ptr()
+ if p == nil {
+ return b, nil
+ }
+ b = appendVarint(b, wiretag)
+ b = appendVarint(b, uint64(*p))
+ return b, nil
+}
+func appendVarintS32Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ s := ptr.getInt32Slice()
+ for _, v := range s {
+ b = appendVarint(b, wiretag)
+ b = appendVarint(b, uint64(v))
+ }
+ return b, nil
+}
+func appendVarintS32PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ s := ptr.getInt32Slice()
+ if len(s) == 0 {
+ return b, nil
+ }
+ b = appendVarint(b, wiretag&^7|WireBytes)
+ // compute size
+ n := 0
+ for _, v := range s {
+ n += SizeVarint(uint64(v))
+ }
+ b = appendVarint(b, uint64(n))
+ for _, v := range s {
+ b = appendVarint(b, uint64(v))
+ }
+ return b, nil
+}
+func appendVarint64Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ v := *ptr.toUint64()
+ b = appendVarint(b, wiretag)
+ b = appendVarint(b, v)
+ return b, nil
+}
+func appendVarint64ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ v := *ptr.toUint64()
+ if v == 0 {
+ return b, nil
+ }
+ b = appendVarint(b, wiretag)
+ b = appendVarint(b, v)
+ return b, nil
+}
+func appendVarint64Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ p := *ptr.toUint64Ptr()
+ if p == nil {
+ return b, nil
+ }
+ b = appendVarint(b, wiretag)
+ b = appendVarint(b, *p)
+ return b, nil
+}
+func appendVarint64Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ s := *ptr.toUint64Slice()
+ for _, v := range s {
+ b = appendVarint(b, wiretag)
+ b = appendVarint(b, v)
+ }
+ return b, nil
+}
+func appendVarint64PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ s := *ptr.toUint64Slice()
+ if len(s) == 0 {
+ return b, nil
+ }
+ b = appendVarint(b, wiretag&^7|WireBytes)
+ // compute size
+ n := 0
+ for _, v := range s {
+ n += SizeVarint(v)
+ }
+ b = appendVarint(b, uint64(n))
+ for _, v := range s {
+ b = appendVarint(b, v)
+ }
+ return b, nil
+}
+func appendVarintS64Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ v := *ptr.toInt64()
+ b = appendVarint(b, wiretag)
+ b = appendVarint(b, uint64(v))
+ return b, nil
+}
+func appendVarintS64ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ v := *ptr.toInt64()
+ if v == 0 {
+ return b, nil
+ }
+ b = appendVarint(b, wiretag)
+ b = appendVarint(b, uint64(v))
+ return b, nil
+}
+func appendVarintS64Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ p := *ptr.toInt64Ptr()
+ if p == nil {
+ return b, nil
+ }
+ b = appendVarint(b, wiretag)
+ b = appendVarint(b, uint64(*p))
+ return b, nil
+}
+func appendVarintS64Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ s := *ptr.toInt64Slice()
+ for _, v := range s {
+ b = appendVarint(b, wiretag)
+ b = appendVarint(b, uint64(v))
+ }
+ return b, nil
+}
+func appendVarintS64PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ s := *ptr.toInt64Slice()
+ if len(s) == 0 {
+ return b, nil
+ }
+ b = appendVarint(b, wiretag&^7|WireBytes)
+ // compute size
+ n := 0
+ for _, v := range s {
+ n += SizeVarint(uint64(v))
+ }
+ b = appendVarint(b, uint64(n))
+ for _, v := range s {
+ b = appendVarint(b, uint64(v))
+ }
+ return b, nil
+}
+func appendZigzag32Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ v := *ptr.toInt32()
+ b = appendVarint(b, wiretag)
+ b = appendVarint(b, uint64((uint32(v)<<1)^uint32((int32(v)>>31))))
+ return b, nil
+}
+func appendZigzag32ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ v := *ptr.toInt32()
+ if v == 0 {
+ return b, nil
+ }
+ b = appendVarint(b, wiretag)
+ b = appendVarint(b, uint64((uint32(v)<<1)^uint32((int32(v)>>31))))
+ return b, nil
+}
+func appendZigzag32Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ p := ptr.getInt32Ptr()
+ if p == nil {
+ return b, nil
+ }
+ b = appendVarint(b, wiretag)
+ v := *p
+ b = appendVarint(b, uint64((uint32(v)<<1)^uint32((int32(v)>>31))))
+ return b, nil
+}
+func appendZigzag32Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ s := ptr.getInt32Slice()
+ for _, v := range s {
+ b = appendVarint(b, wiretag)
+ b = appendVarint(b, uint64((uint32(v)<<1)^uint32((int32(v)>>31))))
+ }
+ return b, nil
+}
+func appendZigzag32PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ s := ptr.getInt32Slice()
+ if len(s) == 0 {
+ return b, nil
+ }
+ b = appendVarint(b, wiretag&^7|WireBytes)
+ // compute size
+ n := 0
+ for _, v := range s {
+ n += SizeVarint(uint64((uint32(v) << 1) ^ uint32((int32(v) >> 31))))
+ }
+ b = appendVarint(b, uint64(n))
+ for _, v := range s {
+ b = appendVarint(b, uint64((uint32(v)<<1)^uint32((int32(v)>>31))))
+ }
+ return b, nil
+}
+func appendZigzag64Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ v := *ptr.toInt64()
+ b = appendVarint(b, wiretag)
+ b = appendVarint(b, uint64(v<<1)^uint64((int64(v)>>63)))
+ return b, nil
+}
+func appendZigzag64ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ v := *ptr.toInt64()
+ if v == 0 {
+ return b, nil
+ }
+ b = appendVarint(b, wiretag)
+ b = appendVarint(b, uint64(v<<1)^uint64((int64(v)>>63)))
+ return b, nil
+}
+func appendZigzag64Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ p := *ptr.toInt64Ptr()
+ if p == nil {
+ return b, nil
+ }
+ b = appendVarint(b, wiretag)
+ v := *p
+ b = appendVarint(b, uint64(v<<1)^uint64((int64(v)>>63)))
+ return b, nil
+}
+func appendZigzag64Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ s := *ptr.toInt64Slice()
+ for _, v := range s {
+ b = appendVarint(b, wiretag)
+ b = appendVarint(b, uint64(v<<1)^uint64((int64(v)>>63)))
+ }
+ return b, nil
+}
+func appendZigzag64PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ s := *ptr.toInt64Slice()
+ if len(s) == 0 {
+ return b, nil
+ }
+ b = appendVarint(b, wiretag&^7|WireBytes)
+ // compute size
+ n := 0
+ for _, v := range s {
+ n += SizeVarint(uint64(v<<1) ^ uint64((int64(v) >> 63)))
+ }
+ b = appendVarint(b, uint64(n))
+ for _, v := range s {
+ b = appendVarint(b, uint64(v<<1)^uint64((int64(v)>>63)))
+ }
+ return b, nil
+}
+func appendBoolValue(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ v := *ptr.toBool()
+ b = appendVarint(b, wiretag)
+ if v {
+ b = append(b, 1)
+ } else {
+ b = append(b, 0)
+ }
+ return b, nil
+}
+func appendBoolValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ v := *ptr.toBool()
+ if !v {
+ return b, nil
+ }
+ b = appendVarint(b, wiretag)
+ b = append(b, 1)
+ return b, nil
+}
+
+func appendBoolPtr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ p := *ptr.toBoolPtr()
+ if p == nil {
+ return b, nil
+ }
+ b = appendVarint(b, wiretag)
+ if *p {
+ b = append(b, 1)
+ } else {
+ b = append(b, 0)
+ }
+ return b, nil
+}
+func appendBoolSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ s := *ptr.toBoolSlice()
+ for _, v := range s {
+ b = appendVarint(b, wiretag)
+ if v {
+ b = append(b, 1)
+ } else {
+ b = append(b, 0)
+ }
+ }
+ return b, nil
+}
+func appendBoolPackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ s := *ptr.toBoolSlice()
+ if len(s) == 0 {
+ return b, nil
+ }
+ b = appendVarint(b, wiretag&^7|WireBytes)
+ b = appendVarint(b, uint64(len(s)))
+ for _, v := range s {
+ if v {
+ b = append(b, 1)
+ } else {
+ b = append(b, 0)
+ }
+ }
+ return b, nil
+}
+func appendStringValue(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ v := *ptr.toString()
+ b = appendVarint(b, wiretag)
+ b = appendVarint(b, uint64(len(v)))
+ b = append(b, v...)
+ return b, nil
+}
+func appendStringValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ v := *ptr.toString()
+ if v == "" {
+ return b, nil
+ }
+ b = appendVarint(b, wiretag)
+ b = appendVarint(b, uint64(len(v)))
+ b = append(b, v...)
+ return b, nil
+}
+func appendStringPtr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ p := *ptr.toStringPtr()
+ if p == nil {
+ return b, nil
+ }
+ v := *p
+ b = appendVarint(b, wiretag)
+ b = appendVarint(b, uint64(len(v)))
+ b = append(b, v...)
+ return b, nil
+}
+func appendStringSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ s := *ptr.toStringSlice()
+ for _, v := range s {
+ b = appendVarint(b, wiretag)
+ b = appendVarint(b, uint64(len(v)))
+ b = append(b, v...)
+ }
+ return b, nil
+}
+func appendUTF8StringValue(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ var invalidUTF8 bool
+ v := *ptr.toString()
+ if !utf8.ValidString(v) {
+ invalidUTF8 = true
+ }
+ b = appendVarint(b, wiretag)
+ b = appendVarint(b, uint64(len(v)))
+ b = append(b, v...)
+ if invalidUTF8 {
+ return b, errInvalidUTF8
+ }
+ return b, nil
+}
+func appendUTF8StringValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ var invalidUTF8 bool
+ v := *ptr.toString()
+ if v == "" {
+ return b, nil
+ }
+ if !utf8.ValidString(v) {
+ invalidUTF8 = true
+ }
+ b = appendVarint(b, wiretag)
+ b = appendVarint(b, uint64(len(v)))
+ b = append(b, v...)
+ if invalidUTF8 {
+ return b, errInvalidUTF8
+ }
+ return b, nil
+}
+func appendUTF8StringPtr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ var invalidUTF8 bool
+ p := *ptr.toStringPtr()
+ if p == nil {
+ return b, nil
+ }
+ v := *p
+ if !utf8.ValidString(v) {
+ invalidUTF8 = true
+ }
+ b = appendVarint(b, wiretag)
+ b = appendVarint(b, uint64(len(v)))
+ b = append(b, v...)
+ if invalidUTF8 {
+ return b, errInvalidUTF8
+ }
+ return b, nil
+}
+func appendUTF8StringSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ var invalidUTF8 bool
+ s := *ptr.toStringSlice()
+ for _, v := range s {
+ if !utf8.ValidString(v) {
+ invalidUTF8 = true
+ }
+ b = appendVarint(b, wiretag)
+ b = appendVarint(b, uint64(len(v)))
+ b = append(b, v...)
+ }
+ if invalidUTF8 {
+ return b, errInvalidUTF8
+ }
+ return b, nil
+}
+func appendBytes(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ v := *ptr.toBytes()
+ if v == nil {
+ return b, nil
+ }
+ b = appendVarint(b, wiretag)
+ b = appendVarint(b, uint64(len(v)))
+ b = append(b, v...)
+ return b, nil
+}
+func appendBytes3(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ v := *ptr.toBytes()
+ if len(v) == 0 {
+ return b, nil
+ }
+ b = appendVarint(b, wiretag)
+ b = appendVarint(b, uint64(len(v)))
+ b = append(b, v...)
+ return b, nil
+}
+func appendBytesOneof(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ v := *ptr.toBytes()
+ b = appendVarint(b, wiretag)
+ b = appendVarint(b, uint64(len(v)))
+ b = append(b, v...)
+ return b, nil
+}
+func appendBytesSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ s := *ptr.toBytesSlice()
+ for _, v := range s {
+ b = appendVarint(b, wiretag)
+ b = appendVarint(b, uint64(len(v)))
+ b = append(b, v...)
+ }
+ return b, nil
+}
+
+// makeGroupMarshaler returns the sizer and marshaler for a group.
+// u is the marshal info of the underlying message.
+func makeGroupMarshaler(u *marshalInfo) (sizer, marshaler) {
+ return func(ptr pointer, tagsize int) int {
+ p := ptr.getPointer()
+ if p.isNil() {
+ return 0
+ }
+ return u.size(p) + 2*tagsize
+ },
+ func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) {
+ p := ptr.getPointer()
+ if p.isNil() {
+ return b, nil
+ }
+ var err error
+ b = appendVarint(b, wiretag) // start group
+ b, err = u.marshal(b, p, deterministic)
+ b = appendVarint(b, wiretag+(WireEndGroup-WireStartGroup)) // end group
+ return b, err
+ }
+}
+
+// makeGroupSliceMarshaler returns the sizer and marshaler for a group slice.
+// u is the marshal info of the underlying message.
+func makeGroupSliceMarshaler(u *marshalInfo) (sizer, marshaler) {
+ return func(ptr pointer, tagsize int) int {
+ s := ptr.getPointerSlice()
+ n := 0
+ for _, v := range s {
+ if v.isNil() {
+ continue
+ }
+ n += u.size(v) + 2*tagsize
+ }
+ return n
+ },
+ func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) {
+ s := ptr.getPointerSlice()
+ var err error
+ var nerr nonFatal
+ for _, v := range s {
+ if v.isNil() {
+ return b, errRepeatedHasNil
+ }
+ b = appendVarint(b, wiretag) // start group
+ b, err = u.marshal(b, v, deterministic)
+ b = appendVarint(b, wiretag+(WireEndGroup-WireStartGroup)) // end group
+ if !nerr.Merge(err) {
+ if err == ErrNil {
+ err = errRepeatedHasNil
+ }
+ return b, err
+ }
+ }
+ return b, nerr.E
+ }
+}
+
+// makeMessageMarshaler returns the sizer and marshaler for a message field.
+// u is the marshal info of the message.
+func makeMessageMarshaler(u *marshalInfo) (sizer, marshaler) {
+ return func(ptr pointer, tagsize int) int {
+ p := ptr.getPointer()
+ if p.isNil() {
+ return 0
+ }
+ siz := u.size(p)
+ return siz + SizeVarint(uint64(siz)) + tagsize
+ },
+ func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) {
+ p := ptr.getPointer()
+ if p.isNil() {
+ return b, nil
+ }
+ b = appendVarint(b, wiretag)
+ siz := u.cachedsize(p)
+ b = appendVarint(b, uint64(siz))
+ return u.marshal(b, p, deterministic)
+ }
+}
+
+// makeMessageSliceMarshaler returns the sizer and marshaler for a message slice.
+// u is the marshal info of the message.
+func makeMessageSliceMarshaler(u *marshalInfo) (sizer, marshaler) {
+ return func(ptr pointer, tagsize int) int {
+ s := ptr.getPointerSlice()
+ n := 0
+ for _, v := range s {
+ if v.isNil() {
+ continue
+ }
+ siz := u.size(v)
+ n += siz + SizeVarint(uint64(siz)) + tagsize
+ }
+ return n
+ },
+ func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) {
+ s := ptr.getPointerSlice()
+ var err error
+ var nerr nonFatal
+ for _, v := range s {
+ if v.isNil() {
+ return b, errRepeatedHasNil
+ }
+ b = appendVarint(b, wiretag)
+ siz := u.cachedsize(v)
+ b = appendVarint(b, uint64(siz))
+ b, err = u.marshal(b, v, deterministic)
+
+ if !nerr.Merge(err) {
+ if err == ErrNil {
+ err = errRepeatedHasNil
+ }
+ return b, err
+ }
+ }
+ return b, nerr.E
+ }
+}
+
+// makeMapMarshaler returns the sizer and marshaler for a map field.
+// f is the pointer to the reflect data structure of the field.
+func makeMapMarshaler(f *reflect.StructField) (sizer, marshaler) {
+ // figure out key and value type
+ t := f.Type
+ keyType := t.Key()
+ valType := t.Elem()
+ keyTags := strings.Split(f.Tag.Get("protobuf_key"), ",")
+ valTags := strings.Split(f.Tag.Get("protobuf_val"), ",")
+ keySizer, keyMarshaler := typeMarshaler(keyType, keyTags, false, false) // don't omit zero value in map
+ valSizer, valMarshaler := typeMarshaler(valType, valTags, false, false) // don't omit zero value in map
+ keyWireTag := 1<<3 | wiretype(keyTags[0])
+ valWireTag := 2<<3 | wiretype(valTags[0])
+
+ // We create an interface to get the addresses of the map key and value.
+ // If value is pointer-typed, the interface is a direct interface, the
+ // idata itself is the value. Otherwise, the idata is the pointer to the
+ // value.
+ // Key cannot be pointer-typed.
+ valIsPtr := valType.Kind() == reflect.Ptr
+
+ // If value is a message with nested maps, calling
+ // valSizer in marshal may be quadratic. We should use
+ // cached version in marshal (but not in size).
+ // If value is not message type, we don't have size cache,
+ // but it cannot be nested either. Just use valSizer.
+ valCachedSizer := valSizer
+ if valIsPtr && valType.Elem().Kind() == reflect.Struct {
+ u := getMarshalInfo(valType.Elem())
+ valCachedSizer = func(ptr pointer, tagsize int) int {
+ // Same as message sizer, but use cache.
+ p := ptr.getPointer()
+ if p.isNil() {
+ return 0
+ }
+ siz := u.cachedsize(p)
+ return siz + SizeVarint(uint64(siz)) + tagsize
+ }
+ }
+ return func(ptr pointer, tagsize int) int {
+ m := ptr.asPointerTo(t).Elem() // the map
+ n := 0
+ for _, k := range m.MapKeys() {
+ ki := k.Interface()
+ vi := m.MapIndex(k).Interface()
+ kaddr := toAddrPointer(&ki, false, false) // pointer to key
+ vaddr := toAddrPointer(&vi, valIsPtr, false) // pointer to value
+ siz := keySizer(kaddr, 1) + valSizer(vaddr, 1) // tag of key = 1 (size=1), tag of val = 2 (size=1)
+ n += siz + SizeVarint(uint64(siz)) + tagsize
+ }
+ return n
+ },
+ func(b []byte, ptr pointer, tag uint64, deterministic bool) ([]byte, error) {
+ m := ptr.asPointerTo(t).Elem() // the map
+ var err error
+ keys := m.MapKeys()
+ if len(keys) > 1 && deterministic {
+ sort.Sort(mapKeys(keys))
+ }
+
+ var nerr nonFatal
+ for _, k := range keys {
+ ki := k.Interface()
+ vi := m.MapIndex(k).Interface()
+ kaddr := toAddrPointer(&ki, false, false) // pointer to key
+ vaddr := toAddrPointer(&vi, valIsPtr, false) // pointer to value
+ b = appendVarint(b, tag)
+ siz := keySizer(kaddr, 1) + valCachedSizer(vaddr, 1) // tag of key = 1 (size=1), tag of val = 2 (size=1)
+ b = appendVarint(b, uint64(siz))
+ b, err = keyMarshaler(b, kaddr, keyWireTag, deterministic)
+ if !nerr.Merge(err) {
+ return b, err
+ }
+ b, err = valMarshaler(b, vaddr, valWireTag, deterministic)
+ if err != ErrNil && !nerr.Merge(err) { // allow nil value in map
+ return b, err
+ }
+ }
+ return b, nerr.E
+ }
+}
+
+// makeOneOfMarshaler returns the sizer and marshaler for a oneof field.
+// fi is the marshal info of the field.
+// f is the pointer to the reflect data structure of the field.
+func makeOneOfMarshaler(fi *marshalFieldInfo, f *reflect.StructField) (sizer, marshaler) {
+ // Oneof field is an interface. We need to get the actual data type on the fly.
+ t := f.Type
+ return func(ptr pointer, _ int) int {
+ p := ptr.getInterfacePointer()
+ if p.isNil() {
+ return 0
+ }
+ v := ptr.asPointerTo(t).Elem().Elem().Elem() // *interface -> interface -> *struct -> struct
+ telem := v.Type()
+ e := fi.oneofElems[telem]
+ return e.sizer(p, e.tagsize)
+ },
+ func(b []byte, ptr pointer, _ uint64, deterministic bool) ([]byte, error) {
+ p := ptr.getInterfacePointer()
+ if p.isNil() {
+ return b, nil
+ }
+ v := ptr.asPointerTo(t).Elem().Elem().Elem() // *interface -> interface -> *struct -> struct
+ telem := v.Type()
+ if telem.Field(0).Type.Kind() == reflect.Ptr && p.getPointer().isNil() {
+ return b, errOneofHasNil
+ }
+ e := fi.oneofElems[telem]
+ return e.marshaler(b, p, e.wiretag, deterministic)
+ }
+}
+
+// sizeExtensions computes the size of encoded data for a XXX_InternalExtensions field.
+func (u *marshalInfo) sizeExtensions(ext *XXX_InternalExtensions) int {
+ m, mu := ext.extensionsRead()
+ if m == nil {
+ return 0
+ }
+ mu.Lock()
+
+ n := 0
+ for _, e := range m {
+ if e.value == nil || e.desc == nil {
+ // Extension is only in its encoded form.
+ n += len(e.enc)
+ continue
+ }
+
+ // We don't skip extensions that have an encoded form set,
+ // because the extension value may have been mutated after
+ // the last time this function was called.
+ ei := u.getExtElemInfo(e.desc)
+ v := e.value
+ p := toAddrPointer(&v, ei.isptr, ei.deref)
+ n += ei.sizer(p, ei.tagsize)
+ }
+ mu.Unlock()
+ return n
+}
+
+// appendExtensions marshals a XXX_InternalExtensions field to the end of byte slice b.
+func (u *marshalInfo) appendExtensions(b []byte, ext *XXX_InternalExtensions, deterministic bool) ([]byte, error) {
+ m, mu := ext.extensionsRead()
+ if m == nil {
+ return b, nil
+ }
+ mu.Lock()
+ defer mu.Unlock()
+
+ var err error
+ var nerr nonFatal
+
+ // Fast-path for common cases: zero or one extensions.
+ // Don't bother sorting the keys.
+ if len(m) <= 1 {
+ for _, e := range m {
+ if e.value == nil || e.desc == nil {
+ // Extension is only in its encoded form.
+ b = append(b, e.enc...)
+ continue
+ }
+
+ // We don't skip extensions that have an encoded form set,
+ // because the extension value may have been mutated after
+ // the last time this function was called.
+
+ ei := u.getExtElemInfo(e.desc)
+ v := e.value
+ p := toAddrPointer(&v, ei.isptr, ei.deref)
+ b, err = ei.marshaler(b, p, ei.wiretag, deterministic)
+ if !nerr.Merge(err) {
+ return b, err
+ }
+ }
+ return b, nerr.E
+ }
+
+ // Sort the keys to provide a deterministic encoding.
+ // Not sure this is required, but the old code does it.
+ keys := make([]int, 0, len(m))
+ for k := range m {
+ keys = append(keys, int(k))
+ }
+ sort.Ints(keys)
+
+ for _, k := range keys {
+ e := m[int32(k)]
+ if e.value == nil || e.desc == nil {
+ // Extension is only in its encoded form.
+ b = append(b, e.enc...)
+ continue
+ }
+
+ // We don't skip extensions that have an encoded form set,
+ // because the extension value may have been mutated after
+ // the last time this function was called.
+
+ ei := u.getExtElemInfo(e.desc)
+ v := e.value
+ p := toAddrPointer(&v, ei.isptr, ei.deref)
+ b, err = ei.marshaler(b, p, ei.wiretag, deterministic)
+ if !nerr.Merge(err) {
+ return b, err
+ }
+ }
+ return b, nerr.E
+}
+
+// message set format is:
+// message MessageSet {
+// repeated group Item = 1 {
+// required int32 type_id = 2;
+// required string message = 3;
+// };
+// }
+
+// sizeMessageSet computes the size of encoded data for a XXX_InternalExtensions field
+// in message set format (above).
+func (u *marshalInfo) sizeMessageSet(ext *XXX_InternalExtensions) int {
+ m, mu := ext.extensionsRead()
+ if m == nil {
+ return 0
+ }
+ mu.Lock()
+
+ n := 0
+ for id, e := range m {
+ n += 2 // start group, end group. tag = 1 (size=1)
+ n += SizeVarint(uint64(id)) + 1 // type_id, tag = 2 (size=1)
+
+ if e.value == nil || e.desc == nil {
+ // Extension is only in its encoded form.
+ msgWithLen := skipVarint(e.enc) // skip old tag, but leave the length varint
+ siz := len(msgWithLen)
+ n += siz + 1 // message, tag = 3 (size=1)
+ continue
+ }
+
+ // We don't skip extensions that have an encoded form set,
+ // because the extension value may have been mutated after
+ // the last time this function was called.
+
+ ei := u.getExtElemInfo(e.desc)
+ v := e.value
+ p := toAddrPointer(&v, ei.isptr, ei.deref)
+ n += ei.sizer(p, 1) // message, tag = 3 (size=1)
+ }
+ mu.Unlock()
+ return n
+}
+
+// appendMessageSet marshals a XXX_InternalExtensions field in message set format (above)
+// to the end of byte slice b.
+func (u *marshalInfo) appendMessageSet(b []byte, ext *XXX_InternalExtensions, deterministic bool) ([]byte, error) {
+ m, mu := ext.extensionsRead()
+ if m == nil {
+ return b, nil
+ }
+ mu.Lock()
+ defer mu.Unlock()
+
+ var err error
+ var nerr nonFatal
+
+ // Fast-path for common cases: zero or one extensions.
+ // Don't bother sorting the keys.
+ if len(m) <= 1 {
+ for id, e := range m {
+ b = append(b, 1<<3|WireStartGroup)
+ b = append(b, 2<<3|WireVarint)
+ b = appendVarint(b, uint64(id))
+
+ if e.value == nil || e.desc == nil {
+ // Extension is only in its encoded form.
+ msgWithLen := skipVarint(e.enc) // skip old tag, but leave the length varint
+ b = append(b, 3<<3|WireBytes)
+ b = append(b, msgWithLen...)
+ b = append(b, 1<<3|WireEndGroup)
+ continue
+ }
+
+ // We don't skip extensions that have an encoded form set,
+ // because the extension value may have been mutated after
+ // the last time this function was called.
+
+ ei := u.getExtElemInfo(e.desc)
+ v := e.value
+ p := toAddrPointer(&v, ei.isptr, ei.deref)
+ b, err = ei.marshaler(b, p, 3<<3|WireBytes, deterministic)
+ if !nerr.Merge(err) {
+ return b, err
+ }
+ b = append(b, 1<<3|WireEndGroup)
+ }
+ return b, nerr.E
+ }
+
+ // Sort the keys to provide a deterministic encoding.
+ keys := make([]int, 0, len(m))
+ for k := range m {
+ keys = append(keys, int(k))
+ }
+ sort.Ints(keys)
+
+ for _, id := range keys {
+ e := m[int32(id)]
+ b = append(b, 1<<3|WireStartGroup)
+ b = append(b, 2<<3|WireVarint)
+ b = appendVarint(b, uint64(id))
+
+ if e.value == nil || e.desc == nil {
+ // Extension is only in its encoded form.
+ msgWithLen := skipVarint(e.enc) // skip old tag, but leave the length varint
+ b = append(b, 3<<3|WireBytes)
+ b = append(b, msgWithLen...)
+ b = append(b, 1<<3|WireEndGroup)
+ continue
+ }
+
+ // We don't skip extensions that have an encoded form set,
+ // because the extension value may have been mutated after
+ // the last time this function was called.
+
+ ei := u.getExtElemInfo(e.desc)
+ v := e.value
+ p := toAddrPointer(&v, ei.isptr, ei.deref)
+ b, err = ei.marshaler(b, p, 3<<3|WireBytes, deterministic)
+ b = append(b, 1<<3|WireEndGroup)
+ if !nerr.Merge(err) {
+ return b, err
+ }
+ }
+ return b, nerr.E
+}
+
+// sizeV1Extensions computes the size of encoded data for a V1-API extension field.
+func (u *marshalInfo) sizeV1Extensions(m map[int32]Extension) int {
+ if m == nil {
+ return 0
+ }
+
+ n := 0
+ for _, e := range m {
+ if e.value == nil || e.desc == nil {
+ // Extension is only in its encoded form.
+ n += len(e.enc)
+ continue
+ }
+
+ // We don't skip extensions that have an encoded form set,
+ // because the extension value may have been mutated after
+ // the last time this function was called.
+
+ ei := u.getExtElemInfo(e.desc)
+ v := e.value
+ p := toAddrPointer(&v, ei.isptr, ei.deref)
+ n += ei.sizer(p, ei.tagsize)
+ }
+ return n
+}
+
+// appendV1Extensions marshals a V1-API extension field to the end of byte slice b.
+func (u *marshalInfo) appendV1Extensions(b []byte, m map[int32]Extension, deterministic bool) ([]byte, error) {
+ if m == nil {
+ return b, nil
+ }
+
+ // Sort the keys to provide a deterministic encoding.
+ keys := make([]int, 0, len(m))
+ for k := range m {
+ keys = append(keys, int(k))
+ }
+ sort.Ints(keys)
+
+ var err error
+ var nerr nonFatal
+ for _, k := range keys {
+ e := m[int32(k)]
+ if e.value == nil || e.desc == nil {
+ // Extension is only in its encoded form.
+ b = append(b, e.enc...)
+ continue
+ }
+
+ // We don't skip extensions that have an encoded form set,
+ // because the extension value may have been mutated after
+ // the last time this function was called.
+
+ ei := u.getExtElemInfo(e.desc)
+ v := e.value
+ p := toAddrPointer(&v, ei.isptr, ei.deref)
+ b, err = ei.marshaler(b, p, ei.wiretag, deterministic)
+ if !nerr.Merge(err) {
+ return b, err
+ }
+ }
+ return b, nerr.E
+}
+
+// newMarshaler is the interface representing objects that can marshal themselves.
+//
+// This exists to support protoc-gen-go generated messages.
+// The proto package will stop type-asserting to this interface in the future.
+//
+// DO NOT DEPEND ON THIS.
+type newMarshaler interface {
+ XXX_Size() int
+ XXX_Marshal(b []byte, deterministic bool) ([]byte, error)
+}
+
+// Size returns the encoded size of a protocol buffer message.
+// This is the main entry point.
+func Size(pb Message) int {
+ if m, ok := pb.(newMarshaler); ok {
+ return m.XXX_Size()
+ }
+ if m, ok := pb.(Marshaler); ok {
+ // If the message can marshal itself, let it do it, for compatibility.
+ // NOTE: This is not efficient.
+ b, _ := m.Marshal()
+ return len(b)
+ }
+ // in case somehow we didn't generate the wrapper
+ if pb == nil {
+ return 0
+ }
+ var info InternalMessageInfo
+ return info.Size(pb)
+}
+
+// Marshal takes a protocol buffer message
+// and encodes it into the wire format, returning the data.
+// This is the main entry point.
+func Marshal(pb Message) ([]byte, error) {
+ if m, ok := pb.(newMarshaler); ok {
+ siz := m.XXX_Size()
+ b := make([]byte, 0, siz)
+ return m.XXX_Marshal(b, false)
+ }
+ if m, ok := pb.(Marshaler); ok {
+ // If the message can marshal itself, let it do it, for compatibility.
+ // NOTE: This is not efficient.
+ return m.Marshal()
+ }
+ // in case somehow we didn't generate the wrapper
+ if pb == nil {
+ return nil, ErrNil
+ }
+ var info InternalMessageInfo
+ siz := info.Size(pb)
+ b := make([]byte, 0, siz)
+ return info.Marshal(b, pb, false)
+}
+
+// Marshal takes a protocol buffer message
+// and encodes it into the wire format, writing the result to the
+// Buffer.
+// This is an alternative entry point. It is not necessary to use
+// a Buffer for most applications.
+func (p *Buffer) Marshal(pb Message) error {
+ var err error
+ if m, ok := pb.(newMarshaler); ok {
+ siz := m.XXX_Size()
+ p.grow(siz) // make sure buf has enough capacity
+ p.buf, err = m.XXX_Marshal(p.buf, p.deterministic)
+ return err
+ }
+ if m, ok := pb.(Marshaler); ok {
+ // If the message can marshal itself, let it do it, for compatibility.
+ // NOTE: This is not efficient.
+ b, err := m.Marshal()
+ p.buf = append(p.buf, b...)
+ return err
+ }
+ // in case somehow we didn't generate the wrapper
+ if pb == nil {
+ return ErrNil
+ }
+ var info InternalMessageInfo
+ siz := info.Size(pb)
+ p.grow(siz) // make sure buf has enough capacity
+ p.buf, err = info.Marshal(p.buf, pb, p.deterministic)
+ return err
+}
+
+// grow grows the buffer's capacity, if necessary, to guarantee space for
+// another n bytes. After grow(n), at least n bytes can be written to the
+// buffer without another allocation.
+func (p *Buffer) grow(n int) {
+ need := len(p.buf) + n
+ if need <= cap(p.buf) {
+ return
+ }
+ newCap := len(p.buf) * 2
+ if newCap < need {
+ newCap = need
+ }
+ p.buf = append(make([]byte, 0, newCap), p.buf...)
+}
diff --git a/vendor/github.com/golang/protobuf/proto/table_merge.go b/vendor/github.com/golang/protobuf/proto/table_merge.go
new file mode 100644
index 00000000..5525def6
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/proto/table_merge.go
@@ -0,0 +1,654 @@
+// Go support for Protocol Buffers - Google's data interchange format
+//
+// Copyright 2016 The Go Authors. All rights reserved.
+// https://github.com/golang/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+package proto
+
+import (
+ "fmt"
+ "reflect"
+ "strings"
+ "sync"
+ "sync/atomic"
+)
+
+// Merge merges the src message into dst.
+// This assumes that dst and src of the same type and are non-nil.
+func (a *InternalMessageInfo) Merge(dst, src Message) {
+ mi := atomicLoadMergeInfo(&a.merge)
+ if mi == nil {
+ mi = getMergeInfo(reflect.TypeOf(dst).Elem())
+ atomicStoreMergeInfo(&a.merge, mi)
+ }
+ mi.merge(toPointer(&dst), toPointer(&src))
+}
+
+type mergeInfo struct {
+ typ reflect.Type
+
+ initialized int32 // 0: only typ is valid, 1: everything is valid
+ lock sync.Mutex
+
+ fields []mergeFieldInfo
+ unrecognized field // Offset of XXX_unrecognized
+}
+
+type mergeFieldInfo struct {
+ field field // Offset of field, guaranteed to be valid
+
+ // isPointer reports whether the value in the field is a pointer.
+ // This is true for the following situations:
+ // * Pointer to struct
+ // * Pointer to basic type (proto2 only)
+ // * Slice (first value in slice header is a pointer)
+ // * String (first value in string header is a pointer)
+ isPointer bool
+
+ // basicWidth reports the width of the field assuming that it is directly
+ // embedded in the struct (as is the case for basic types in proto3).
+ // The possible values are:
+ // 0: invalid
+ // 1: bool
+ // 4: int32, uint32, float32
+ // 8: int64, uint64, float64
+ basicWidth int
+
+ // Where dst and src are pointers to the types being merged.
+ merge func(dst, src pointer)
+}
+
+var (
+ mergeInfoMap = map[reflect.Type]*mergeInfo{}
+ mergeInfoLock sync.Mutex
+)
+
+func getMergeInfo(t reflect.Type) *mergeInfo {
+ mergeInfoLock.Lock()
+ defer mergeInfoLock.Unlock()
+ mi := mergeInfoMap[t]
+ if mi == nil {
+ mi = &mergeInfo{typ: t}
+ mergeInfoMap[t] = mi
+ }
+ return mi
+}
+
+// merge merges src into dst assuming they are both of type *mi.typ.
+func (mi *mergeInfo) merge(dst, src pointer) {
+ if dst.isNil() {
+ panic("proto: nil destination")
+ }
+ if src.isNil() {
+ return // Nothing to do.
+ }
+
+ if atomic.LoadInt32(&mi.initialized) == 0 {
+ mi.computeMergeInfo()
+ }
+
+ for _, fi := range mi.fields {
+ sfp := src.offset(fi.field)
+
+ // As an optimization, we can avoid the merge function call cost
+ // if we know for sure that the source will have no effect
+ // by checking if it is the zero value.
+ if unsafeAllowed {
+ if fi.isPointer && sfp.getPointer().isNil() { // Could be slice or string
+ continue
+ }
+ if fi.basicWidth > 0 {
+ switch {
+ case fi.basicWidth == 1 && !*sfp.toBool():
+ continue
+ case fi.basicWidth == 4 && *sfp.toUint32() == 0:
+ continue
+ case fi.basicWidth == 8 && *sfp.toUint64() == 0:
+ continue
+ }
+ }
+ }
+
+ dfp := dst.offset(fi.field)
+ fi.merge(dfp, sfp)
+ }
+
+ // TODO: Make this faster?
+ out := dst.asPointerTo(mi.typ).Elem()
+ in := src.asPointerTo(mi.typ).Elem()
+ if emIn, err := extendable(in.Addr().Interface()); err == nil {
+ emOut, _ := extendable(out.Addr().Interface())
+ mIn, muIn := emIn.extensionsRead()
+ if mIn != nil {
+ mOut := emOut.extensionsWrite()
+ muIn.Lock()
+ mergeExtension(mOut, mIn)
+ muIn.Unlock()
+ }
+ }
+
+ if mi.unrecognized.IsValid() {
+ if b := *src.offset(mi.unrecognized).toBytes(); len(b) > 0 {
+ *dst.offset(mi.unrecognized).toBytes() = append([]byte(nil), b...)
+ }
+ }
+}
+
+func (mi *mergeInfo) computeMergeInfo() {
+ mi.lock.Lock()
+ defer mi.lock.Unlock()
+ if mi.initialized != 0 {
+ return
+ }
+ t := mi.typ
+ n := t.NumField()
+
+ props := GetProperties(t)
+ for i := 0; i < n; i++ {
+ f := t.Field(i)
+ if strings.HasPrefix(f.Name, "XXX_") {
+ continue
+ }
+
+ mfi := mergeFieldInfo{field: toField(&f)}
+ tf := f.Type
+
+ // As an optimization, we can avoid the merge function call cost
+ // if we know for sure that the source will have no effect
+ // by checking if it is the zero value.
+ if unsafeAllowed {
+ switch tf.Kind() {
+ case reflect.Ptr, reflect.Slice, reflect.String:
+ // As a special case, we assume slices and strings are pointers
+ // since we know that the first field in the SliceSlice or
+ // StringHeader is a data pointer.
+ mfi.isPointer = true
+ case reflect.Bool:
+ mfi.basicWidth = 1
+ case reflect.Int32, reflect.Uint32, reflect.Float32:
+ mfi.basicWidth = 4
+ case reflect.Int64, reflect.Uint64, reflect.Float64:
+ mfi.basicWidth = 8
+ }
+ }
+
+ // Unwrap tf to get at its most basic type.
+ var isPointer, isSlice bool
+ if tf.Kind() == reflect.Slice && tf.Elem().Kind() != reflect.Uint8 {
+ isSlice = true
+ tf = tf.Elem()
+ }
+ if tf.Kind() == reflect.Ptr {
+ isPointer = true
+ tf = tf.Elem()
+ }
+ if isPointer && isSlice && tf.Kind() != reflect.Struct {
+ panic("both pointer and slice for basic type in " + tf.Name())
+ }
+
+ switch tf.Kind() {
+ case reflect.Int32:
+ switch {
+ case isSlice: // E.g., []int32
+ mfi.merge = func(dst, src pointer) {
+ // NOTE: toInt32Slice is not defined (see pointer_reflect.go).
+ /*
+ sfsp := src.toInt32Slice()
+ if *sfsp != nil {
+ dfsp := dst.toInt32Slice()
+ *dfsp = append(*dfsp, *sfsp...)
+ if *dfsp == nil {
+ *dfsp = []int64{}
+ }
+ }
+ */
+ sfs := src.getInt32Slice()
+ if sfs != nil {
+ dfs := dst.getInt32Slice()
+ dfs = append(dfs, sfs...)
+ if dfs == nil {
+ dfs = []int32{}
+ }
+ dst.setInt32Slice(dfs)
+ }
+ }
+ case isPointer: // E.g., *int32
+ mfi.merge = func(dst, src pointer) {
+ // NOTE: toInt32Ptr is not defined (see pointer_reflect.go).
+ /*
+ sfpp := src.toInt32Ptr()
+ if *sfpp != nil {
+ dfpp := dst.toInt32Ptr()
+ if *dfpp == nil {
+ *dfpp = Int32(**sfpp)
+ } else {
+ **dfpp = **sfpp
+ }
+ }
+ */
+ sfp := src.getInt32Ptr()
+ if sfp != nil {
+ dfp := dst.getInt32Ptr()
+ if dfp == nil {
+ dst.setInt32Ptr(*sfp)
+ } else {
+ *dfp = *sfp
+ }
+ }
+ }
+ default: // E.g., int32
+ mfi.merge = func(dst, src pointer) {
+ if v := *src.toInt32(); v != 0 {
+ *dst.toInt32() = v
+ }
+ }
+ }
+ case reflect.Int64:
+ switch {
+ case isSlice: // E.g., []int64
+ mfi.merge = func(dst, src pointer) {
+ sfsp := src.toInt64Slice()
+ if *sfsp != nil {
+ dfsp := dst.toInt64Slice()
+ *dfsp = append(*dfsp, *sfsp...)
+ if *dfsp == nil {
+ *dfsp = []int64{}
+ }
+ }
+ }
+ case isPointer: // E.g., *int64
+ mfi.merge = func(dst, src pointer) {
+ sfpp := src.toInt64Ptr()
+ if *sfpp != nil {
+ dfpp := dst.toInt64Ptr()
+ if *dfpp == nil {
+ *dfpp = Int64(**sfpp)
+ } else {
+ **dfpp = **sfpp
+ }
+ }
+ }
+ default: // E.g., int64
+ mfi.merge = func(dst, src pointer) {
+ if v := *src.toInt64(); v != 0 {
+ *dst.toInt64() = v
+ }
+ }
+ }
+ case reflect.Uint32:
+ switch {
+ case isSlice: // E.g., []uint32
+ mfi.merge = func(dst, src pointer) {
+ sfsp := src.toUint32Slice()
+ if *sfsp != nil {
+ dfsp := dst.toUint32Slice()
+ *dfsp = append(*dfsp, *sfsp...)
+ if *dfsp == nil {
+ *dfsp = []uint32{}
+ }
+ }
+ }
+ case isPointer: // E.g., *uint32
+ mfi.merge = func(dst, src pointer) {
+ sfpp := src.toUint32Ptr()
+ if *sfpp != nil {
+ dfpp := dst.toUint32Ptr()
+ if *dfpp == nil {
+ *dfpp = Uint32(**sfpp)
+ } else {
+ **dfpp = **sfpp
+ }
+ }
+ }
+ default: // E.g., uint32
+ mfi.merge = func(dst, src pointer) {
+ if v := *src.toUint32(); v != 0 {
+ *dst.toUint32() = v
+ }
+ }
+ }
+ case reflect.Uint64:
+ switch {
+ case isSlice: // E.g., []uint64
+ mfi.merge = func(dst, src pointer) {
+ sfsp := src.toUint64Slice()
+ if *sfsp != nil {
+ dfsp := dst.toUint64Slice()
+ *dfsp = append(*dfsp, *sfsp...)
+ if *dfsp == nil {
+ *dfsp = []uint64{}
+ }
+ }
+ }
+ case isPointer: // E.g., *uint64
+ mfi.merge = func(dst, src pointer) {
+ sfpp := src.toUint64Ptr()
+ if *sfpp != nil {
+ dfpp := dst.toUint64Ptr()
+ if *dfpp == nil {
+ *dfpp = Uint64(**sfpp)
+ } else {
+ **dfpp = **sfpp
+ }
+ }
+ }
+ default: // E.g., uint64
+ mfi.merge = func(dst, src pointer) {
+ if v := *src.toUint64(); v != 0 {
+ *dst.toUint64() = v
+ }
+ }
+ }
+ case reflect.Float32:
+ switch {
+ case isSlice: // E.g., []float32
+ mfi.merge = func(dst, src pointer) {
+ sfsp := src.toFloat32Slice()
+ if *sfsp != nil {
+ dfsp := dst.toFloat32Slice()
+ *dfsp = append(*dfsp, *sfsp...)
+ if *dfsp == nil {
+ *dfsp = []float32{}
+ }
+ }
+ }
+ case isPointer: // E.g., *float32
+ mfi.merge = func(dst, src pointer) {
+ sfpp := src.toFloat32Ptr()
+ if *sfpp != nil {
+ dfpp := dst.toFloat32Ptr()
+ if *dfpp == nil {
+ *dfpp = Float32(**sfpp)
+ } else {
+ **dfpp = **sfpp
+ }
+ }
+ }
+ default: // E.g., float32
+ mfi.merge = func(dst, src pointer) {
+ if v := *src.toFloat32(); v != 0 {
+ *dst.toFloat32() = v
+ }
+ }
+ }
+ case reflect.Float64:
+ switch {
+ case isSlice: // E.g., []float64
+ mfi.merge = func(dst, src pointer) {
+ sfsp := src.toFloat64Slice()
+ if *sfsp != nil {
+ dfsp := dst.toFloat64Slice()
+ *dfsp = append(*dfsp, *sfsp...)
+ if *dfsp == nil {
+ *dfsp = []float64{}
+ }
+ }
+ }
+ case isPointer: // E.g., *float64
+ mfi.merge = func(dst, src pointer) {
+ sfpp := src.toFloat64Ptr()
+ if *sfpp != nil {
+ dfpp := dst.toFloat64Ptr()
+ if *dfpp == nil {
+ *dfpp = Float64(**sfpp)
+ } else {
+ **dfpp = **sfpp
+ }
+ }
+ }
+ default: // E.g., float64
+ mfi.merge = func(dst, src pointer) {
+ if v := *src.toFloat64(); v != 0 {
+ *dst.toFloat64() = v
+ }
+ }
+ }
+ case reflect.Bool:
+ switch {
+ case isSlice: // E.g., []bool
+ mfi.merge = func(dst, src pointer) {
+ sfsp := src.toBoolSlice()
+ if *sfsp != nil {
+ dfsp := dst.toBoolSlice()
+ *dfsp = append(*dfsp, *sfsp...)
+ if *dfsp == nil {
+ *dfsp = []bool{}
+ }
+ }
+ }
+ case isPointer: // E.g., *bool
+ mfi.merge = func(dst, src pointer) {
+ sfpp := src.toBoolPtr()
+ if *sfpp != nil {
+ dfpp := dst.toBoolPtr()
+ if *dfpp == nil {
+ *dfpp = Bool(**sfpp)
+ } else {
+ **dfpp = **sfpp
+ }
+ }
+ }
+ default: // E.g., bool
+ mfi.merge = func(dst, src pointer) {
+ if v := *src.toBool(); v {
+ *dst.toBool() = v
+ }
+ }
+ }
+ case reflect.String:
+ switch {
+ case isSlice: // E.g., []string
+ mfi.merge = func(dst, src pointer) {
+ sfsp := src.toStringSlice()
+ if *sfsp != nil {
+ dfsp := dst.toStringSlice()
+ *dfsp = append(*dfsp, *sfsp...)
+ if *dfsp == nil {
+ *dfsp = []string{}
+ }
+ }
+ }
+ case isPointer: // E.g., *string
+ mfi.merge = func(dst, src pointer) {
+ sfpp := src.toStringPtr()
+ if *sfpp != nil {
+ dfpp := dst.toStringPtr()
+ if *dfpp == nil {
+ *dfpp = String(**sfpp)
+ } else {
+ **dfpp = **sfpp
+ }
+ }
+ }
+ default: // E.g., string
+ mfi.merge = func(dst, src pointer) {
+ if v := *src.toString(); v != "" {
+ *dst.toString() = v
+ }
+ }
+ }
+ case reflect.Slice:
+ isProto3 := props.Prop[i].proto3
+ switch {
+ case isPointer:
+ panic("bad pointer in byte slice case in " + tf.Name())
+ case tf.Elem().Kind() != reflect.Uint8:
+ panic("bad element kind in byte slice case in " + tf.Name())
+ case isSlice: // E.g., [][]byte
+ mfi.merge = func(dst, src pointer) {
+ sbsp := src.toBytesSlice()
+ if *sbsp != nil {
+ dbsp := dst.toBytesSlice()
+ for _, sb := range *sbsp {
+ if sb == nil {
+ *dbsp = append(*dbsp, nil)
+ } else {
+ *dbsp = append(*dbsp, append([]byte{}, sb...))
+ }
+ }
+ if *dbsp == nil {
+ *dbsp = [][]byte{}
+ }
+ }
+ }
+ default: // E.g., []byte
+ mfi.merge = func(dst, src pointer) {
+ sbp := src.toBytes()
+ if *sbp != nil {
+ dbp := dst.toBytes()
+ if !isProto3 || len(*sbp) > 0 {
+ *dbp = append([]byte{}, *sbp...)
+ }
+ }
+ }
+ }
+ case reflect.Struct:
+ switch {
+ case !isPointer:
+ panic(fmt.Sprintf("message field %s without pointer", tf))
+ case isSlice: // E.g., []*pb.T
+ mi := getMergeInfo(tf)
+ mfi.merge = func(dst, src pointer) {
+ sps := src.getPointerSlice()
+ if sps != nil {
+ dps := dst.getPointerSlice()
+ for _, sp := range sps {
+ var dp pointer
+ if !sp.isNil() {
+ dp = valToPointer(reflect.New(tf))
+ mi.merge(dp, sp)
+ }
+ dps = append(dps, dp)
+ }
+ if dps == nil {
+ dps = []pointer{}
+ }
+ dst.setPointerSlice(dps)
+ }
+ }
+ default: // E.g., *pb.T
+ mi := getMergeInfo(tf)
+ mfi.merge = func(dst, src pointer) {
+ sp := src.getPointer()
+ if !sp.isNil() {
+ dp := dst.getPointer()
+ if dp.isNil() {
+ dp = valToPointer(reflect.New(tf))
+ dst.setPointer(dp)
+ }
+ mi.merge(dp, sp)
+ }
+ }
+ }
+ case reflect.Map:
+ switch {
+ case isPointer || isSlice:
+ panic("bad pointer or slice in map case in " + tf.Name())
+ default: // E.g., map[K]V
+ mfi.merge = func(dst, src pointer) {
+ sm := src.asPointerTo(tf).Elem()
+ if sm.Len() == 0 {
+ return
+ }
+ dm := dst.asPointerTo(tf).Elem()
+ if dm.IsNil() {
+ dm.Set(reflect.MakeMap(tf))
+ }
+
+ switch tf.Elem().Kind() {
+ case reflect.Ptr: // Proto struct (e.g., *T)
+ for _, key := range sm.MapKeys() {
+ val := sm.MapIndex(key)
+ val = reflect.ValueOf(Clone(val.Interface().(Message)))
+ dm.SetMapIndex(key, val)
+ }
+ case reflect.Slice: // E.g. Bytes type (e.g., []byte)
+ for _, key := range sm.MapKeys() {
+ val := sm.MapIndex(key)
+ val = reflect.ValueOf(append([]byte{}, val.Bytes()...))
+ dm.SetMapIndex(key, val)
+ }
+ default: // Basic type (e.g., string)
+ for _, key := range sm.MapKeys() {
+ val := sm.MapIndex(key)
+ dm.SetMapIndex(key, val)
+ }
+ }
+ }
+ }
+ case reflect.Interface:
+ // Must be oneof field.
+ switch {
+ case isPointer || isSlice:
+ panic("bad pointer or slice in interface case in " + tf.Name())
+ default: // E.g., interface{}
+ // TODO: Make this faster?
+ mfi.merge = func(dst, src pointer) {
+ su := src.asPointerTo(tf).Elem()
+ if !su.IsNil() {
+ du := dst.asPointerTo(tf).Elem()
+ typ := su.Elem().Type()
+ if du.IsNil() || du.Elem().Type() != typ {
+ du.Set(reflect.New(typ.Elem())) // Initialize interface if empty
+ }
+ sv := su.Elem().Elem().Field(0)
+ if sv.Kind() == reflect.Ptr && sv.IsNil() {
+ return
+ }
+ dv := du.Elem().Elem().Field(0)
+ if dv.Kind() == reflect.Ptr && dv.IsNil() {
+ dv.Set(reflect.New(sv.Type().Elem())) // Initialize proto message if empty
+ }
+ switch sv.Type().Kind() {
+ case reflect.Ptr: // Proto struct (e.g., *T)
+ Merge(dv.Interface().(Message), sv.Interface().(Message))
+ case reflect.Slice: // E.g. Bytes type (e.g., []byte)
+ dv.Set(reflect.ValueOf(append([]byte{}, sv.Bytes()...)))
+ default: // Basic type (e.g., string)
+ dv.Set(sv)
+ }
+ }
+ }
+ }
+ default:
+ panic(fmt.Sprintf("merger not found for type:%s", tf))
+ }
+ mi.fields = append(mi.fields, mfi)
+ }
+
+ mi.unrecognized = invalidField
+ if f, ok := t.FieldByName("XXX_unrecognized"); ok {
+ if f.Type != reflect.TypeOf([]byte{}) {
+ panic("expected XXX_unrecognized to be of type []byte")
+ }
+ mi.unrecognized = toField(&f)
+ }
+
+ atomic.StoreInt32(&mi.initialized, 1)
+}
diff --git a/vendor/github.com/golang/protobuf/proto/table_unmarshal.go b/vendor/github.com/golang/protobuf/proto/table_unmarshal.go
new file mode 100644
index 00000000..acee2fc5
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/proto/table_unmarshal.go
@@ -0,0 +1,2053 @@
+// Go support for Protocol Buffers - Google's data interchange format
+//
+// Copyright 2016 The Go Authors. All rights reserved.
+// https://github.com/golang/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+package proto
+
+import (
+ "errors"
+ "fmt"
+ "io"
+ "math"
+ "reflect"
+ "strconv"
+ "strings"
+ "sync"
+ "sync/atomic"
+ "unicode/utf8"
+)
+
+// Unmarshal is the entry point from the generated .pb.go files.
+// This function is not intended to be used by non-generated code.
+// This function is not subject to any compatibility guarantee.
+// msg contains a pointer to a protocol buffer struct.
+// b is the data to be unmarshaled into the protocol buffer.
+// a is a pointer to a place to store cached unmarshal information.
+func (a *InternalMessageInfo) Unmarshal(msg Message, b []byte) error {
+ // Load the unmarshal information for this message type.
+ // The atomic load ensures memory consistency.
+ u := atomicLoadUnmarshalInfo(&a.unmarshal)
+ if u == nil {
+ // Slow path: find unmarshal info for msg, update a with it.
+ u = getUnmarshalInfo(reflect.TypeOf(msg).Elem())
+ atomicStoreUnmarshalInfo(&a.unmarshal, u)
+ }
+ // Then do the unmarshaling.
+ err := u.unmarshal(toPointer(&msg), b)
+ return err
+}
+
+type unmarshalInfo struct {
+ typ reflect.Type // type of the protobuf struct
+
+ // 0 = only typ field is initialized
+ // 1 = completely initialized
+ initialized int32
+ lock sync.Mutex // prevents double initialization
+ dense []unmarshalFieldInfo // fields indexed by tag #
+ sparse map[uint64]unmarshalFieldInfo // fields indexed by tag #
+ reqFields []string // names of required fields
+ reqMask uint64 // 1<<len(reqFields)-1
+ unrecognized field // offset of []byte to put unrecognized data (or invalidField if we should throw it away)
+ extensions field // offset of extensions field (of type proto.XXX_InternalExtensions), or invalidField if it does not exist
+ oldExtensions field // offset of old-form extensions field (of type map[int]Extension)
+ extensionRanges []ExtensionRange // if non-nil, implies extensions field is valid
+ isMessageSet bool // if true, implies extensions field is valid
+}
+
+// An unmarshaler takes a stream of bytes and a pointer to a field of a message.
+// It decodes the field, stores it at f, and returns the unused bytes.
+// w is the wire encoding.
+// b is the data after the tag and wire encoding have been read.
+type unmarshaler func(b []byte, f pointer, w int) ([]byte, error)
+
+type unmarshalFieldInfo struct {
+ // location of the field in the proto message structure.
+ field field
+
+ // function to unmarshal the data for the field.
+ unmarshal unmarshaler
+
+ // if a required field, contains a single set bit at this field's index in the required field list.
+ reqMask uint64
+
+ name string // name of the field, for error reporting
+}
+
+var (
+ unmarshalInfoMap = map[reflect.Type]*unmarshalInfo{}
+ unmarshalInfoLock sync.Mutex
+)
+
+// getUnmarshalInfo returns the data structure which can be
+// subsequently used to unmarshal a message of the given type.
+// t is the type of the message (note: not pointer to message).
+func getUnmarshalInfo(t reflect.Type) *unmarshalInfo {
+ // It would be correct to return a new unmarshalInfo
+ // unconditionally. We would end up allocating one
+ // per occurrence of that type as a message or submessage.
+ // We use a cache here just to reduce memory usage.
+ unmarshalInfoLock.Lock()
+ defer unmarshalInfoLock.Unlock()
+ u := unmarshalInfoMap[t]
+ if u == nil {
+ u = &unmarshalInfo{typ: t}
+ // Note: we just set the type here. The rest of the fields
+ // will be initialized on first use.
+ unmarshalInfoMap[t] = u
+ }
+ return u
+}
+
+// unmarshal does the main work of unmarshaling a message.
+// u provides type information used to unmarshal the message.
+// m is a pointer to a protocol buffer message.
+// b is a byte stream to unmarshal into m.
+// This is top routine used when recursively unmarshaling submessages.
+func (u *unmarshalInfo) unmarshal(m pointer, b []byte) error {
+ if atomic.LoadInt32(&u.initialized) == 0 {
+ u.computeUnmarshalInfo()
+ }
+ if u.isMessageSet {
+ return unmarshalMessageSet(b, m.offset(u.extensions).toExtensions())
+ }
+ var reqMask uint64 // bitmask of required fields we've seen.
+ var errLater error
+ for len(b) > 0 {
+ // Read tag and wire type.
+ // Special case 1 and 2 byte varints.
+ var x uint64
+ if b[0] < 128 {
+ x = uint64(b[0])
+ b = b[1:]
+ } else if len(b) >= 2 && b[1] < 128 {
+ x = uint64(b[0]&0x7f) + uint64(b[1])<<7
+ b = b[2:]
+ } else {
+ var n int
+ x, n = decodeVarint(b)
+ if n == 0 {
+ return io.ErrUnexpectedEOF
+ }
+ b = b[n:]
+ }
+ tag := x >> 3
+ wire := int(x) & 7
+
+ // Dispatch on the tag to one of the unmarshal* functions below.
+ var f unmarshalFieldInfo
+ if tag < uint64(len(u.dense)) {
+ f = u.dense[tag]
+ } else {
+ f = u.sparse[tag]
+ }
+ if fn := f.unmarshal; fn != nil {
+ var err error
+ b, err = fn(b, m.offset(f.field), wire)
+ if err == nil {
+ reqMask |= f.reqMask
+ continue
+ }
+ if r, ok := err.(*RequiredNotSetError); ok {
+ // Remember this error, but keep parsing. We need to produce
+ // a full parse even if a required field is missing.
+ if errLater == nil {
+ errLater = r
+ }
+ reqMask |= f.reqMask
+ continue
+ }
+ if err != errInternalBadWireType {
+ if err == errInvalidUTF8 {
+ if errLater == nil {
+ fullName := revProtoTypes[reflect.PtrTo(u.typ)] + "." + f.name
+ errLater = &invalidUTF8Error{fullName}
+ }
+ continue
+ }
+ return err
+ }
+ // Fragments with bad wire type are treated as unknown fields.
+ }
+
+ // Unknown tag.
+ if !u.unrecognized.IsValid() {
+ // Don't keep unrecognized data; just skip it.
+ var err error
+ b, err = skipField(b, wire)
+ if err != nil {
+ return err
+ }
+ continue
+ }
+ // Keep unrecognized data around.
+ // maybe in extensions, maybe in the unrecognized field.
+ z := m.offset(u.unrecognized).toBytes()
+ var emap map[int32]Extension
+ var e Extension
+ for _, r := range u.extensionRanges {
+ if uint64(r.Start) <= tag && tag <= uint64(r.End) {
+ if u.extensions.IsValid() {
+ mp := m.offset(u.extensions).toExtensions()
+ emap = mp.extensionsWrite()
+ e = emap[int32(tag)]
+ z = &e.enc
+ break
+ }
+ if u.oldExtensions.IsValid() {
+ p := m.offset(u.oldExtensions).toOldExtensions()
+ emap = *p
+ if emap == nil {
+ emap = map[int32]Extension{}
+ *p = emap
+ }
+ e = emap[int32(tag)]
+ z = &e.enc
+ break
+ }
+ panic("no extensions field available")
+ }
+ }
+
+ // Use wire type to skip data.
+ var err error
+ b0 := b
+ b, err = skipField(b, wire)
+ if err != nil {
+ return err
+ }
+ *z = encodeVarint(*z, tag<<3|uint64(wire))
+ *z = append(*z, b0[:len(b0)-len(b)]...)
+
+ if emap != nil {
+ emap[int32(tag)] = e
+ }
+ }
+ if reqMask != u.reqMask && errLater == nil {
+ // A required field of this message is missing.
+ for _, n := range u.reqFields {
+ if reqMask&1 == 0 {
+ errLater = &RequiredNotSetError{n}
+ }
+ reqMask >>= 1
+ }
+ }
+ return errLater
+}
+
+// computeUnmarshalInfo fills in u with information for use
+// in unmarshaling protocol buffers of type u.typ.
+func (u *unmarshalInfo) computeUnmarshalInfo() {
+ u.lock.Lock()
+ defer u.lock.Unlock()
+ if u.initialized != 0 {
+ return
+ }
+ t := u.typ
+ n := t.NumField()
+
+ // Set up the "not found" value for the unrecognized byte buffer.
+ // This is the default for proto3.
+ u.unrecognized = invalidField
+ u.extensions = invalidField
+ u.oldExtensions = invalidField
+
+ // List of the generated type and offset for each oneof field.
+ type oneofField struct {
+ ityp reflect.Type // interface type of oneof field
+ field field // offset in containing message
+ }
+ var oneofFields []oneofField
+
+ for i := 0; i < n; i++ {
+ f := t.Field(i)
+ if f.Name == "XXX_unrecognized" {
+ // The byte slice used to hold unrecognized input is special.
+ if f.Type != reflect.TypeOf(([]byte)(nil)) {
+ panic("bad type for XXX_unrecognized field: " + f.Type.Name())
+ }
+ u.unrecognized = toField(&f)
+ continue
+ }
+ if f.Name == "XXX_InternalExtensions" {
+ // Ditto here.
+ if f.Type != reflect.TypeOf(XXX_InternalExtensions{}) {
+ panic("bad type for XXX_InternalExtensions field: " + f.Type.Name())
+ }
+ u.extensions = toField(&f)
+ if f.Tag.Get("protobuf_messageset") == "1" {
+ u.isMessageSet = true
+ }
+ continue
+ }
+ if f.Name == "XXX_extensions" {
+ // An older form of the extensions field.
+ if f.Type != reflect.TypeOf((map[int32]Extension)(nil)) {
+ panic("bad type for XXX_extensions field: " + f.Type.Name())
+ }
+ u.oldExtensions = toField(&f)
+ continue
+ }
+ if f.Name == "XXX_NoUnkeyedLiteral" || f.Name == "XXX_sizecache" {
+ continue
+ }
+
+ oneof := f.Tag.Get("protobuf_oneof")
+ if oneof != "" {
+ oneofFields = append(oneofFields, oneofField{f.Type, toField(&f)})
+ // The rest of oneof processing happens below.
+ continue
+ }
+
+ tags := f.Tag.Get("protobuf")
+ tagArray := strings.Split(tags, ",")
+ if len(tagArray) < 2 {
+ panic("protobuf tag not enough fields in " + t.Name() + "." + f.Name + ": " + tags)
+ }
+ tag, err := strconv.Atoi(tagArray[1])
+ if err != nil {
+ panic("protobuf tag field not an integer: " + tagArray[1])
+ }
+
+ name := ""
+ for _, tag := range tagArray[3:] {
+ if strings.HasPrefix(tag, "name=") {
+ name = tag[5:]
+ }
+ }
+
+ // Extract unmarshaling function from the field (its type and tags).
+ unmarshal := fieldUnmarshaler(&f)
+
+ // Required field?
+ var reqMask uint64
+ if tagArray[2] == "req" {
+ bit := len(u.reqFields)
+ u.reqFields = append(u.reqFields, name)
+ reqMask = uint64(1) << uint(bit)
+ // TODO: if we have more than 64 required fields, we end up
+ // not verifying that all required fields are present.
+ // Fix this, perhaps using a count of required fields?
+ }
+
+ // Store the info in the correct slot in the message.
+ u.setTag(tag, toField(&f), unmarshal, reqMask, name)
+ }
+
+ // Find any types associated with oneof fields.
+ var oneofImplementers []interface{}
+ switch m := reflect.Zero(reflect.PtrTo(t)).Interface().(type) {
+ case oneofFuncsIface:
+ _, _, _, oneofImplementers = m.XXX_OneofFuncs()
+ case oneofWrappersIface:
+ oneofImplementers = m.XXX_OneofWrappers()
+ }
+ for _, v := range oneofImplementers {
+ tptr := reflect.TypeOf(v) // *Msg_X
+ typ := tptr.Elem() // Msg_X
+
+ f := typ.Field(0) // oneof implementers have one field
+ baseUnmarshal := fieldUnmarshaler(&f)
+ tags := strings.Split(f.Tag.Get("protobuf"), ",")
+ fieldNum, err := strconv.Atoi(tags[1])
+ if err != nil {
+ panic("protobuf tag field not an integer: " + tags[1])
+ }
+ var name string
+ for _, tag := range tags {
+ if strings.HasPrefix(tag, "name=") {
+ name = strings.TrimPrefix(tag, "name=")
+ break
+ }
+ }
+
+ // Find the oneof field that this struct implements.
+ // Might take O(n^2) to process all of the oneofs, but who cares.
+ for _, of := range oneofFields {
+ if tptr.Implements(of.ityp) {
+ // We have found the corresponding interface for this struct.
+ // That lets us know where this struct should be stored
+ // when we encounter it during unmarshaling.
+ unmarshal := makeUnmarshalOneof(typ, of.ityp, baseUnmarshal)
+ u.setTag(fieldNum, of.field, unmarshal, 0, name)
+ }
+ }
+
+ }
+
+ // Get extension ranges, if any.
+ fn := reflect.Zero(reflect.PtrTo(t)).MethodByName("ExtensionRangeArray")
+ if fn.IsValid() {
+ if !u.extensions.IsValid() && !u.oldExtensions.IsValid() {
+ panic("a message with extensions, but no extensions field in " + t.Name())
+ }
+ u.extensionRanges = fn.Call(nil)[0].Interface().([]ExtensionRange)
+ }
+
+ // Explicitly disallow tag 0. This will ensure we flag an error
+ // when decoding a buffer of all zeros. Without this code, we
+ // would decode and skip an all-zero buffer of even length.
+ // [0 0] is [tag=0/wiretype=varint varint-encoded-0].
+ u.setTag(0, zeroField, func(b []byte, f pointer, w int) ([]byte, error) {
+ return nil, fmt.Errorf("proto: %s: illegal tag 0 (wire type %d)", t, w)
+ }, 0, "")
+
+ // Set mask for required field check.
+ u.reqMask = uint64(1)<<uint(len(u.reqFields)) - 1
+
+ atomic.StoreInt32(&u.initialized, 1)
+}
+
+// setTag stores the unmarshal information for the given tag.
+// tag = tag # for field
+// field/unmarshal = unmarshal info for that field.
+// reqMask = if required, bitmask for field position in required field list. 0 otherwise.
+// name = short name of the field.
+func (u *unmarshalInfo) setTag(tag int, field field, unmarshal unmarshaler, reqMask uint64, name string) {
+ i := unmarshalFieldInfo{field: field, unmarshal: unmarshal, reqMask: reqMask, name: name}
+ n := u.typ.NumField()
+ if tag >= 0 && (tag < 16 || tag < 2*n) { // TODO: what are the right numbers here?
+ for len(u.dense) <= tag {
+ u.dense = append(u.dense, unmarshalFieldInfo{})
+ }
+ u.dense[tag] = i
+ return
+ }
+ if u.sparse == nil {
+ u.sparse = map[uint64]unmarshalFieldInfo{}
+ }
+ u.sparse[uint64(tag)] = i
+}
+
+// fieldUnmarshaler returns an unmarshaler for the given field.
+func fieldUnmarshaler(f *reflect.StructField) unmarshaler {
+ if f.Type.Kind() == reflect.Map {
+ return makeUnmarshalMap(f)
+ }
+ return typeUnmarshaler(f.Type, f.Tag.Get("protobuf"))
+}
+
+// typeUnmarshaler returns an unmarshaler for the given field type / field tag pair.
+func typeUnmarshaler(t reflect.Type, tags string) unmarshaler {
+ tagArray := strings.Split(tags, ",")
+ encoding := tagArray[0]
+ name := "unknown"
+ proto3 := false
+ validateUTF8 := true
+ for _, tag := range tagArray[3:] {
+ if strings.HasPrefix(tag, "name=") {
+ name = tag[5:]
+ }
+ if tag == "proto3" {
+ proto3 = true
+ }
+ }
+ validateUTF8 = validateUTF8 && proto3
+
+ // Figure out packaging (pointer, slice, or both)
+ slice := false
+ pointer := false
+ if t.Kind() == reflect.Slice && t.Elem().Kind() != reflect.Uint8 {
+ slice = true
+ t = t.Elem()
+ }
+ if t.Kind() == reflect.Ptr {
+ pointer = true
+ t = t.Elem()
+ }
+
+ // We'll never have both pointer and slice for basic types.
+ if pointer && slice && t.Kind() != reflect.Struct {
+ panic("both pointer and slice for basic type in " + t.Name())
+ }
+
+ switch t.Kind() {
+ case reflect.Bool:
+ if pointer {
+ return unmarshalBoolPtr
+ }
+ if slice {
+ return unmarshalBoolSlice
+ }
+ return unmarshalBoolValue
+ case reflect.Int32:
+ switch encoding {
+ case "fixed32":
+ if pointer {
+ return unmarshalFixedS32Ptr
+ }
+ if slice {
+ return unmarshalFixedS32Slice
+ }
+ return unmarshalFixedS32Value
+ case "varint":
+ // this could be int32 or enum
+ if pointer {
+ return unmarshalInt32Ptr
+ }
+ if slice {
+ return unmarshalInt32Slice
+ }
+ return unmarshalInt32Value
+ case "zigzag32":
+ if pointer {
+ return unmarshalSint32Ptr
+ }
+ if slice {
+ return unmarshalSint32Slice
+ }
+ return unmarshalSint32Value
+ }
+ case reflect.Int64:
+ switch encoding {
+ case "fixed64":
+ if pointer {
+ return unmarshalFixedS64Ptr
+ }
+ if slice {
+ return unmarshalFixedS64Slice
+ }
+ return unmarshalFixedS64Value
+ case "varint":
+ if pointer {
+ return unmarshalInt64Ptr
+ }
+ if slice {
+ return unmarshalInt64Slice
+ }
+ return unmarshalInt64Value
+ case "zigzag64":
+ if pointer {
+ return unmarshalSint64Ptr
+ }
+ if slice {
+ return unmarshalSint64Slice
+ }
+ return unmarshalSint64Value
+ }
+ case reflect.Uint32:
+ switch encoding {
+ case "fixed32":
+ if pointer {
+ return unmarshalFixed32Ptr
+ }
+ if slice {
+ return unmarshalFixed32Slice
+ }
+ return unmarshalFixed32Value
+ case "varint":
+ if pointer {
+ return unmarshalUint32Ptr
+ }
+ if slice {
+ return unmarshalUint32Slice
+ }
+ return unmarshalUint32Value
+ }
+ case reflect.Uint64:
+ switch encoding {
+ case "fixed64":
+ if pointer {
+ return unmarshalFixed64Ptr
+ }
+ if slice {
+ return unmarshalFixed64Slice
+ }
+ return unmarshalFixed64Value
+ case "varint":
+ if pointer {
+ return unmarshalUint64Ptr
+ }
+ if slice {
+ return unmarshalUint64Slice
+ }
+ return unmarshalUint64Value
+ }
+ case reflect.Float32:
+ if pointer {
+ return unmarshalFloat32Ptr
+ }
+ if slice {
+ return unmarshalFloat32Slice
+ }
+ return unmarshalFloat32Value
+ case reflect.Float64:
+ if pointer {
+ return unmarshalFloat64Ptr
+ }
+ if slice {
+ return unmarshalFloat64Slice
+ }
+ return unmarshalFloat64Value
+ case reflect.Map:
+ panic("map type in typeUnmarshaler in " + t.Name())
+ case reflect.Slice:
+ if pointer {
+ panic("bad pointer in slice case in " + t.Name())
+ }
+ if slice {
+ return unmarshalBytesSlice
+ }
+ return unmarshalBytesValue
+ case reflect.String:
+ if validateUTF8 {
+ if pointer {
+ return unmarshalUTF8StringPtr
+ }
+ if slice {
+ return unmarshalUTF8StringSlice
+ }
+ return unmarshalUTF8StringValue
+ }
+ if pointer {
+ return unmarshalStringPtr
+ }
+ if slice {
+ return unmarshalStringSlice
+ }
+ return unmarshalStringValue
+ case reflect.Struct:
+ // message or group field
+ if !pointer {
+ panic(fmt.Sprintf("message/group field %s:%s without pointer", t, encoding))
+ }
+ switch encoding {
+ case "bytes":
+ if slice {
+ return makeUnmarshalMessageSlicePtr(getUnmarshalInfo(t), name)
+ }
+ return makeUnmarshalMessagePtr(getUnmarshalInfo(t), name)
+ case "group":
+ if slice {
+ return makeUnmarshalGroupSlicePtr(getUnmarshalInfo(t), name)
+ }
+ return makeUnmarshalGroupPtr(getUnmarshalInfo(t), name)
+ }
+ }
+ panic(fmt.Sprintf("unmarshaler not found type:%s encoding:%s", t, encoding))
+}
+
+// Below are all the unmarshalers for individual fields of various types.
+
+func unmarshalInt64Value(b []byte, f pointer, w int) ([]byte, error) {
+ if w != WireVarint {
+ return b, errInternalBadWireType
+ }
+ x, n := decodeVarint(b)
+ if n == 0 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ b = b[n:]
+ v := int64(x)
+ *f.toInt64() = v
+ return b, nil
+}
+
+func unmarshalInt64Ptr(b []byte, f pointer, w int) ([]byte, error) {
+ if w != WireVarint {
+ return b, errInternalBadWireType
+ }
+ x, n := decodeVarint(b)
+ if n == 0 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ b = b[n:]
+ v := int64(x)
+ *f.toInt64Ptr() = &v
+ return b, nil
+}
+
+func unmarshalInt64Slice(b []byte, f pointer, w int) ([]byte, error) {
+ if w == WireBytes { // packed
+ x, n := decodeVarint(b)
+ if n == 0 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ b = b[n:]
+ if x > uint64(len(b)) {
+ return nil, io.ErrUnexpectedEOF
+ }
+ res := b[x:]
+ b = b[:x]
+ for len(b) > 0 {
+ x, n = decodeVarint(b)
+ if n == 0 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ b = b[n:]
+ v := int64(x)
+ s := f.toInt64Slice()
+ *s = append(*s, v)
+ }
+ return res, nil
+ }
+ if w != WireVarint {
+ return b, errInternalBadWireType
+ }
+ x, n := decodeVarint(b)
+ if n == 0 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ b = b[n:]
+ v := int64(x)
+ s := f.toInt64Slice()
+ *s = append(*s, v)
+ return b, nil
+}
+
+func unmarshalSint64Value(b []byte, f pointer, w int) ([]byte, error) {
+ if w != WireVarint {
+ return b, errInternalBadWireType
+ }
+ x, n := decodeVarint(b)
+ if n == 0 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ b = b[n:]
+ v := int64(x>>1) ^ int64(x)<<63>>63
+ *f.toInt64() = v
+ return b, nil
+}
+
+func unmarshalSint64Ptr(b []byte, f pointer, w int) ([]byte, error) {
+ if w != WireVarint {
+ return b, errInternalBadWireType
+ }
+ x, n := decodeVarint(b)
+ if n == 0 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ b = b[n:]
+ v := int64(x>>1) ^ int64(x)<<63>>63
+ *f.toInt64Ptr() = &v
+ return b, nil
+}
+
+func unmarshalSint64Slice(b []byte, f pointer, w int) ([]byte, error) {
+ if w == WireBytes { // packed
+ x, n := decodeVarint(b)
+ if n == 0 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ b = b[n:]
+ if x > uint64(len(b)) {
+ return nil, io.ErrUnexpectedEOF
+ }
+ res := b[x:]
+ b = b[:x]
+ for len(b) > 0 {
+ x, n = decodeVarint(b)
+ if n == 0 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ b = b[n:]
+ v := int64(x>>1) ^ int64(x)<<63>>63
+ s := f.toInt64Slice()
+ *s = append(*s, v)
+ }
+ return res, nil
+ }
+ if w != WireVarint {
+ return b, errInternalBadWireType
+ }
+ x, n := decodeVarint(b)
+ if n == 0 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ b = b[n:]
+ v := int64(x>>1) ^ int64(x)<<63>>63
+ s := f.toInt64Slice()
+ *s = append(*s, v)
+ return b, nil
+}
+
+func unmarshalUint64Value(b []byte, f pointer, w int) ([]byte, error) {
+ if w != WireVarint {
+ return b, errInternalBadWireType
+ }
+ x, n := decodeVarint(b)
+ if n == 0 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ b = b[n:]
+ v := uint64(x)
+ *f.toUint64() = v
+ return b, nil
+}
+
+func unmarshalUint64Ptr(b []byte, f pointer, w int) ([]byte, error) {
+ if w != WireVarint {
+ return b, errInternalBadWireType
+ }
+ x, n := decodeVarint(b)
+ if n == 0 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ b = b[n:]
+ v := uint64(x)
+ *f.toUint64Ptr() = &v
+ return b, nil
+}
+
+func unmarshalUint64Slice(b []byte, f pointer, w int) ([]byte, error) {
+ if w == WireBytes { // packed
+ x, n := decodeVarint(b)
+ if n == 0 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ b = b[n:]
+ if x > uint64(len(b)) {
+ return nil, io.ErrUnexpectedEOF
+ }
+ res := b[x:]
+ b = b[:x]
+ for len(b) > 0 {
+ x, n = decodeVarint(b)
+ if n == 0 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ b = b[n:]
+ v := uint64(x)
+ s := f.toUint64Slice()
+ *s = append(*s, v)
+ }
+ return res, nil
+ }
+ if w != WireVarint {
+ return b, errInternalBadWireType
+ }
+ x, n := decodeVarint(b)
+ if n == 0 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ b = b[n:]
+ v := uint64(x)
+ s := f.toUint64Slice()
+ *s = append(*s, v)
+ return b, nil
+}
+
+func unmarshalInt32Value(b []byte, f pointer, w int) ([]byte, error) {
+ if w != WireVarint {
+ return b, errInternalBadWireType
+ }
+ x, n := decodeVarint(b)
+ if n == 0 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ b = b[n:]
+ v := int32(x)
+ *f.toInt32() = v
+ return b, nil
+}
+
+func unmarshalInt32Ptr(b []byte, f pointer, w int) ([]byte, error) {
+ if w != WireVarint {
+ return b, errInternalBadWireType
+ }
+ x, n := decodeVarint(b)
+ if n == 0 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ b = b[n:]
+ v := int32(x)
+ f.setInt32Ptr(v)
+ return b, nil
+}
+
+func unmarshalInt32Slice(b []byte, f pointer, w int) ([]byte, error) {
+ if w == WireBytes { // packed
+ x, n := decodeVarint(b)
+ if n == 0 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ b = b[n:]
+ if x > uint64(len(b)) {
+ return nil, io.ErrUnexpectedEOF
+ }
+ res := b[x:]
+ b = b[:x]
+ for len(b) > 0 {
+ x, n = decodeVarint(b)
+ if n == 0 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ b = b[n:]
+ v := int32(x)
+ f.appendInt32Slice(v)
+ }
+ return res, nil
+ }
+ if w != WireVarint {
+ return b, errInternalBadWireType
+ }
+ x, n := decodeVarint(b)
+ if n == 0 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ b = b[n:]
+ v := int32(x)
+ f.appendInt32Slice(v)
+ return b, nil
+}
+
+func unmarshalSint32Value(b []byte, f pointer, w int) ([]byte, error) {
+ if w != WireVarint {
+ return b, errInternalBadWireType
+ }
+ x, n := decodeVarint(b)
+ if n == 0 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ b = b[n:]
+ v := int32(x>>1) ^ int32(x)<<31>>31
+ *f.toInt32() = v
+ return b, nil
+}
+
+func unmarshalSint32Ptr(b []byte, f pointer, w int) ([]byte, error) {
+ if w != WireVarint {
+ return b, errInternalBadWireType
+ }
+ x, n := decodeVarint(b)
+ if n == 0 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ b = b[n:]
+ v := int32(x>>1) ^ int32(x)<<31>>31
+ f.setInt32Ptr(v)
+ return b, nil
+}
+
+func unmarshalSint32Slice(b []byte, f pointer, w int) ([]byte, error) {
+ if w == WireBytes { // packed
+ x, n := decodeVarint(b)
+ if n == 0 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ b = b[n:]
+ if x > uint64(len(b)) {
+ return nil, io.ErrUnexpectedEOF
+ }
+ res := b[x:]
+ b = b[:x]
+ for len(b) > 0 {
+ x, n = decodeVarint(b)
+ if n == 0 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ b = b[n:]
+ v := int32(x>>1) ^ int32(x)<<31>>31
+ f.appendInt32Slice(v)
+ }
+ return res, nil
+ }
+ if w != WireVarint {
+ return b, errInternalBadWireType
+ }
+ x, n := decodeVarint(b)
+ if n == 0 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ b = b[n:]
+ v := int32(x>>1) ^ int32(x)<<31>>31
+ f.appendInt32Slice(v)
+ return b, nil
+}
+
+func unmarshalUint32Value(b []byte, f pointer, w int) ([]byte, error) {
+ if w != WireVarint {
+ return b, errInternalBadWireType
+ }
+ x, n := decodeVarint(b)
+ if n == 0 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ b = b[n:]
+ v := uint32(x)
+ *f.toUint32() = v
+ return b, nil
+}
+
+func unmarshalUint32Ptr(b []byte, f pointer, w int) ([]byte, error) {
+ if w != WireVarint {
+ return b, errInternalBadWireType
+ }
+ x, n := decodeVarint(b)
+ if n == 0 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ b = b[n:]
+ v := uint32(x)
+ *f.toUint32Ptr() = &v
+ return b, nil
+}
+
+func unmarshalUint32Slice(b []byte, f pointer, w int) ([]byte, error) {
+ if w == WireBytes { // packed
+ x, n := decodeVarint(b)
+ if n == 0 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ b = b[n:]
+ if x > uint64(len(b)) {
+ return nil, io.ErrUnexpectedEOF
+ }
+ res := b[x:]
+ b = b[:x]
+ for len(b) > 0 {
+ x, n = decodeVarint(b)
+ if n == 0 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ b = b[n:]
+ v := uint32(x)
+ s := f.toUint32Slice()
+ *s = append(*s, v)
+ }
+ return res, nil
+ }
+ if w != WireVarint {
+ return b, errInternalBadWireType
+ }
+ x, n := decodeVarint(b)
+ if n == 0 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ b = b[n:]
+ v := uint32(x)
+ s := f.toUint32Slice()
+ *s = append(*s, v)
+ return b, nil
+}
+
+func unmarshalFixed64Value(b []byte, f pointer, w int) ([]byte, error) {
+ if w != WireFixed64 {
+ return b, errInternalBadWireType
+ }
+ if len(b) < 8 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ v := uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56
+ *f.toUint64() = v
+ return b[8:], nil
+}
+
+func unmarshalFixed64Ptr(b []byte, f pointer, w int) ([]byte, error) {
+ if w != WireFixed64 {
+ return b, errInternalBadWireType
+ }
+ if len(b) < 8 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ v := uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56
+ *f.toUint64Ptr() = &v
+ return b[8:], nil
+}
+
+func unmarshalFixed64Slice(b []byte, f pointer, w int) ([]byte, error) {
+ if w == WireBytes { // packed
+ x, n := decodeVarint(b)
+ if n == 0 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ b = b[n:]
+ if x > uint64(len(b)) {
+ return nil, io.ErrUnexpectedEOF
+ }
+ res := b[x:]
+ b = b[:x]
+ for len(b) > 0 {
+ if len(b) < 8 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ v := uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56
+ s := f.toUint64Slice()
+ *s = append(*s, v)
+ b = b[8:]
+ }
+ return res, nil
+ }
+ if w != WireFixed64 {
+ return b, errInternalBadWireType
+ }
+ if len(b) < 8 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ v := uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56
+ s := f.toUint64Slice()
+ *s = append(*s, v)
+ return b[8:], nil
+}
+
+func unmarshalFixedS64Value(b []byte, f pointer, w int) ([]byte, error) {
+ if w != WireFixed64 {
+ return b, errInternalBadWireType
+ }
+ if len(b) < 8 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ v := int64(b[0]) | int64(b[1])<<8 | int64(b[2])<<16 | int64(b[3])<<24 | int64(b[4])<<32 | int64(b[5])<<40 | int64(b[6])<<48 | int64(b[7])<<56
+ *f.toInt64() = v
+ return b[8:], nil
+}
+
+func unmarshalFixedS64Ptr(b []byte, f pointer, w int) ([]byte, error) {
+ if w != WireFixed64 {
+ return b, errInternalBadWireType
+ }
+ if len(b) < 8 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ v := int64(b[0]) | int64(b[1])<<8 | int64(b[2])<<16 | int64(b[3])<<24 | int64(b[4])<<32 | int64(b[5])<<40 | int64(b[6])<<48 | int64(b[7])<<56
+ *f.toInt64Ptr() = &v
+ return b[8:], nil
+}
+
+func unmarshalFixedS64Slice(b []byte, f pointer, w int) ([]byte, error) {
+ if w == WireBytes { // packed
+ x, n := decodeVarint(b)
+ if n == 0 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ b = b[n:]
+ if x > uint64(len(b)) {
+ return nil, io.ErrUnexpectedEOF
+ }
+ res := b[x:]
+ b = b[:x]
+ for len(b) > 0 {
+ if len(b) < 8 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ v := int64(b[0]) | int64(b[1])<<8 | int64(b[2])<<16 | int64(b[3])<<24 | int64(b[4])<<32 | int64(b[5])<<40 | int64(b[6])<<48 | int64(b[7])<<56
+ s := f.toInt64Slice()
+ *s = append(*s, v)
+ b = b[8:]
+ }
+ return res, nil
+ }
+ if w != WireFixed64 {
+ return b, errInternalBadWireType
+ }
+ if len(b) < 8 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ v := int64(b[0]) | int64(b[1])<<8 | int64(b[2])<<16 | int64(b[3])<<24 | int64(b[4])<<32 | int64(b[5])<<40 | int64(b[6])<<48 | int64(b[7])<<56
+ s := f.toInt64Slice()
+ *s = append(*s, v)
+ return b[8:], nil
+}
+
+func unmarshalFixed32Value(b []byte, f pointer, w int) ([]byte, error) {
+ if w != WireFixed32 {
+ return b, errInternalBadWireType
+ }
+ if len(b) < 4 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ v := uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24
+ *f.toUint32() = v
+ return b[4:], nil
+}
+
+func unmarshalFixed32Ptr(b []byte, f pointer, w int) ([]byte, error) {
+ if w != WireFixed32 {
+ return b, errInternalBadWireType
+ }
+ if len(b) < 4 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ v := uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24
+ *f.toUint32Ptr() = &v
+ return b[4:], nil
+}
+
+func unmarshalFixed32Slice(b []byte, f pointer, w int) ([]byte, error) {
+ if w == WireBytes { // packed
+ x, n := decodeVarint(b)
+ if n == 0 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ b = b[n:]
+ if x > uint64(len(b)) {
+ return nil, io.ErrUnexpectedEOF
+ }
+ res := b[x:]
+ b = b[:x]
+ for len(b) > 0 {
+ if len(b) < 4 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ v := uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24
+ s := f.toUint32Slice()
+ *s = append(*s, v)
+ b = b[4:]
+ }
+ return res, nil
+ }
+ if w != WireFixed32 {
+ return b, errInternalBadWireType
+ }
+ if len(b) < 4 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ v := uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24
+ s := f.toUint32Slice()
+ *s = append(*s, v)
+ return b[4:], nil
+}
+
+func unmarshalFixedS32Value(b []byte, f pointer, w int) ([]byte, error) {
+ if w != WireFixed32 {
+ return b, errInternalBadWireType
+ }
+ if len(b) < 4 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ v := int32(b[0]) | int32(b[1])<<8 | int32(b[2])<<16 | int32(b[3])<<24
+ *f.toInt32() = v
+ return b[4:], nil
+}
+
+func unmarshalFixedS32Ptr(b []byte, f pointer, w int) ([]byte, error) {
+ if w != WireFixed32 {
+ return b, errInternalBadWireType
+ }
+ if len(b) < 4 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ v := int32(b[0]) | int32(b[1])<<8 | int32(b[2])<<16 | int32(b[3])<<24
+ f.setInt32Ptr(v)
+ return b[4:], nil
+}
+
+func unmarshalFixedS32Slice(b []byte, f pointer, w int) ([]byte, error) {
+ if w == WireBytes { // packed
+ x, n := decodeVarint(b)
+ if n == 0 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ b = b[n:]
+ if x > uint64(len(b)) {
+ return nil, io.ErrUnexpectedEOF
+ }
+ res := b[x:]
+ b = b[:x]
+ for len(b) > 0 {
+ if len(b) < 4 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ v := int32(b[0]) | int32(b[1])<<8 | int32(b[2])<<16 | int32(b[3])<<24
+ f.appendInt32Slice(v)
+ b = b[4:]
+ }
+ return res, nil
+ }
+ if w != WireFixed32 {
+ return b, errInternalBadWireType
+ }
+ if len(b) < 4 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ v := int32(b[0]) | int32(b[1])<<8 | int32(b[2])<<16 | int32(b[3])<<24
+ f.appendInt32Slice(v)
+ return b[4:], nil
+}
+
+func unmarshalBoolValue(b []byte, f pointer, w int) ([]byte, error) {
+ if w != WireVarint {
+ return b, errInternalBadWireType
+ }
+ // Note: any length varint is allowed, even though any sane
+ // encoder will use one byte.
+ // See https://github.com/golang/protobuf/issues/76
+ x, n := decodeVarint(b)
+ if n == 0 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ // TODO: check if x>1? Tests seem to indicate no.
+ v := x != 0
+ *f.toBool() = v
+ return b[n:], nil
+}
+
+func unmarshalBoolPtr(b []byte, f pointer, w int) ([]byte, error) {
+ if w != WireVarint {
+ return b, errInternalBadWireType
+ }
+ x, n := decodeVarint(b)
+ if n == 0 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ v := x != 0
+ *f.toBoolPtr() = &v
+ return b[n:], nil
+}
+
+func unmarshalBoolSlice(b []byte, f pointer, w int) ([]byte, error) {
+ if w == WireBytes { // packed
+ x, n := decodeVarint(b)
+ if n == 0 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ b = b[n:]
+ if x > uint64(len(b)) {
+ return nil, io.ErrUnexpectedEOF
+ }
+ res := b[x:]
+ b = b[:x]
+ for len(b) > 0 {
+ x, n = decodeVarint(b)
+ if n == 0 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ v := x != 0
+ s := f.toBoolSlice()
+ *s = append(*s, v)
+ b = b[n:]
+ }
+ return res, nil
+ }
+ if w != WireVarint {
+ return b, errInternalBadWireType
+ }
+ x, n := decodeVarint(b)
+ if n == 0 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ v := x != 0
+ s := f.toBoolSlice()
+ *s = append(*s, v)
+ return b[n:], nil
+}
+
+func unmarshalFloat64Value(b []byte, f pointer, w int) ([]byte, error) {
+ if w != WireFixed64 {
+ return b, errInternalBadWireType
+ }
+ if len(b) < 8 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ v := math.Float64frombits(uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56)
+ *f.toFloat64() = v
+ return b[8:], nil
+}
+
+func unmarshalFloat64Ptr(b []byte, f pointer, w int) ([]byte, error) {
+ if w != WireFixed64 {
+ return b, errInternalBadWireType
+ }
+ if len(b) < 8 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ v := math.Float64frombits(uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56)
+ *f.toFloat64Ptr() = &v
+ return b[8:], nil
+}
+
+func unmarshalFloat64Slice(b []byte, f pointer, w int) ([]byte, error) {
+ if w == WireBytes { // packed
+ x, n := decodeVarint(b)
+ if n == 0 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ b = b[n:]
+ if x > uint64(len(b)) {
+ return nil, io.ErrUnexpectedEOF
+ }
+ res := b[x:]
+ b = b[:x]
+ for len(b) > 0 {
+ if len(b) < 8 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ v := math.Float64frombits(uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56)
+ s := f.toFloat64Slice()
+ *s = append(*s, v)
+ b = b[8:]
+ }
+ return res, nil
+ }
+ if w != WireFixed64 {
+ return b, errInternalBadWireType
+ }
+ if len(b) < 8 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ v := math.Float64frombits(uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56)
+ s := f.toFloat64Slice()
+ *s = append(*s, v)
+ return b[8:], nil
+}
+
+func unmarshalFloat32Value(b []byte, f pointer, w int) ([]byte, error) {
+ if w != WireFixed32 {
+ return b, errInternalBadWireType
+ }
+ if len(b) < 4 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ v := math.Float32frombits(uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24)
+ *f.toFloat32() = v
+ return b[4:], nil
+}
+
+func unmarshalFloat32Ptr(b []byte, f pointer, w int) ([]byte, error) {
+ if w != WireFixed32 {
+ return b, errInternalBadWireType
+ }
+ if len(b) < 4 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ v := math.Float32frombits(uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24)
+ *f.toFloat32Ptr() = &v
+ return b[4:], nil
+}
+
+func unmarshalFloat32Slice(b []byte, f pointer, w int) ([]byte, error) {
+ if w == WireBytes { // packed
+ x, n := decodeVarint(b)
+ if n == 0 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ b = b[n:]
+ if x > uint64(len(b)) {
+ return nil, io.ErrUnexpectedEOF
+ }
+ res := b[x:]
+ b = b[:x]
+ for len(b) > 0 {
+ if len(b) < 4 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ v := math.Float32frombits(uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24)
+ s := f.toFloat32Slice()
+ *s = append(*s, v)
+ b = b[4:]
+ }
+ return res, nil
+ }
+ if w != WireFixed32 {
+ return b, errInternalBadWireType
+ }
+ if len(b) < 4 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ v := math.Float32frombits(uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24)
+ s := f.toFloat32Slice()
+ *s = append(*s, v)
+ return b[4:], nil
+}
+
+func unmarshalStringValue(b []byte, f pointer, w int) ([]byte, error) {
+ if w != WireBytes {
+ return b, errInternalBadWireType
+ }
+ x, n := decodeVarint(b)
+ if n == 0 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ b = b[n:]
+ if x > uint64(len(b)) {
+ return nil, io.ErrUnexpectedEOF
+ }
+ v := string(b[:x])
+ *f.toString() = v
+ return b[x:], nil
+}
+
+func unmarshalStringPtr(b []byte, f pointer, w int) ([]byte, error) {
+ if w != WireBytes {
+ return b, errInternalBadWireType
+ }
+ x, n := decodeVarint(b)
+ if n == 0 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ b = b[n:]
+ if x > uint64(len(b)) {
+ return nil, io.ErrUnexpectedEOF
+ }
+ v := string(b[:x])
+ *f.toStringPtr() = &v
+ return b[x:], nil
+}
+
+func unmarshalStringSlice(b []byte, f pointer, w int) ([]byte, error) {
+ if w != WireBytes {
+ return b, errInternalBadWireType
+ }
+ x, n := decodeVarint(b)
+ if n == 0 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ b = b[n:]
+ if x > uint64(len(b)) {
+ return nil, io.ErrUnexpectedEOF
+ }
+ v := string(b[:x])
+ s := f.toStringSlice()
+ *s = append(*s, v)
+ return b[x:], nil
+}
+
+func unmarshalUTF8StringValue(b []byte, f pointer, w int) ([]byte, error) {
+ if w != WireBytes {
+ return b, errInternalBadWireType
+ }
+ x, n := decodeVarint(b)
+ if n == 0 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ b = b[n:]
+ if x > uint64(len(b)) {
+ return nil, io.ErrUnexpectedEOF
+ }
+ v := string(b[:x])
+ *f.toString() = v
+ if !utf8.ValidString(v) {
+ return b[x:], errInvalidUTF8
+ }
+ return b[x:], nil
+}
+
+func unmarshalUTF8StringPtr(b []byte, f pointer, w int) ([]byte, error) {
+ if w != WireBytes {
+ return b, errInternalBadWireType
+ }
+ x, n := decodeVarint(b)
+ if n == 0 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ b = b[n:]
+ if x > uint64(len(b)) {
+ return nil, io.ErrUnexpectedEOF
+ }
+ v := string(b[:x])
+ *f.toStringPtr() = &v
+ if !utf8.ValidString(v) {
+ return b[x:], errInvalidUTF8
+ }
+ return b[x:], nil
+}
+
+func unmarshalUTF8StringSlice(b []byte, f pointer, w int) ([]byte, error) {
+ if w != WireBytes {
+ return b, errInternalBadWireType
+ }
+ x, n := decodeVarint(b)
+ if n == 0 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ b = b[n:]
+ if x > uint64(len(b)) {
+ return nil, io.ErrUnexpectedEOF
+ }
+ v := string(b[:x])
+ s := f.toStringSlice()
+ *s = append(*s, v)
+ if !utf8.ValidString(v) {
+ return b[x:], errInvalidUTF8
+ }
+ return b[x:], nil
+}
+
+var emptyBuf [0]byte
+
+func unmarshalBytesValue(b []byte, f pointer, w int) ([]byte, error) {
+ if w != WireBytes {
+ return b, errInternalBadWireType
+ }
+ x, n := decodeVarint(b)
+ if n == 0 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ b = b[n:]
+ if x > uint64(len(b)) {
+ return nil, io.ErrUnexpectedEOF
+ }
+ // The use of append here is a trick which avoids the zeroing
+ // that would be required if we used a make/copy pair.
+ // We append to emptyBuf instead of nil because we want
+ // a non-nil result even when the length is 0.
+ v := append(emptyBuf[:], b[:x]...)
+ *f.toBytes() = v
+ return b[x:], nil
+}
+
+func unmarshalBytesSlice(b []byte, f pointer, w int) ([]byte, error) {
+ if w != WireBytes {
+ return b, errInternalBadWireType
+ }
+ x, n := decodeVarint(b)
+ if n == 0 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ b = b[n:]
+ if x > uint64(len(b)) {
+ return nil, io.ErrUnexpectedEOF
+ }
+ v := append(emptyBuf[:], b[:x]...)
+ s := f.toBytesSlice()
+ *s = append(*s, v)
+ return b[x:], nil
+}
+
+func makeUnmarshalMessagePtr(sub *unmarshalInfo, name string) unmarshaler {
+ return func(b []byte, f pointer, w int) ([]byte, error) {
+ if w != WireBytes {
+ return b, errInternalBadWireType
+ }
+ x, n := decodeVarint(b)
+ if n == 0 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ b = b[n:]
+ if x > uint64(len(b)) {
+ return nil, io.ErrUnexpectedEOF
+ }
+ // First read the message field to see if something is there.
+ // The semantics of multiple submessages are weird. Instead of
+ // the last one winning (as it is for all other fields), multiple
+ // submessages are merged.
+ v := f.getPointer()
+ if v.isNil() {
+ v = valToPointer(reflect.New(sub.typ))
+ f.setPointer(v)
+ }
+ err := sub.unmarshal(v, b[:x])
+ if err != nil {
+ if r, ok := err.(*RequiredNotSetError); ok {
+ r.field = name + "." + r.field
+ } else {
+ return nil, err
+ }
+ }
+ return b[x:], err
+ }
+}
+
+func makeUnmarshalMessageSlicePtr(sub *unmarshalInfo, name string) unmarshaler {
+ return func(b []byte, f pointer, w int) ([]byte, error) {
+ if w != WireBytes {
+ return b, errInternalBadWireType
+ }
+ x, n := decodeVarint(b)
+ if n == 0 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ b = b[n:]
+ if x > uint64(len(b)) {
+ return nil, io.ErrUnexpectedEOF
+ }
+ v := valToPointer(reflect.New(sub.typ))
+ err := sub.unmarshal(v, b[:x])
+ if err != nil {
+ if r, ok := err.(*RequiredNotSetError); ok {
+ r.field = name + "." + r.field
+ } else {
+ return nil, err
+ }
+ }
+ f.appendPointer(v)
+ return b[x:], err
+ }
+}
+
+func makeUnmarshalGroupPtr(sub *unmarshalInfo, name string) unmarshaler {
+ return func(b []byte, f pointer, w int) ([]byte, error) {
+ if w != WireStartGroup {
+ return b, errInternalBadWireType
+ }
+ x, y := findEndGroup(b)
+ if x < 0 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ v := f.getPointer()
+ if v.isNil() {
+ v = valToPointer(reflect.New(sub.typ))
+ f.setPointer(v)
+ }
+ err := sub.unmarshal(v, b[:x])
+ if err != nil {
+ if r, ok := err.(*RequiredNotSetError); ok {
+ r.field = name + "." + r.field
+ } else {
+ return nil, err
+ }
+ }
+ return b[y:], err
+ }
+}
+
+func makeUnmarshalGroupSlicePtr(sub *unmarshalInfo, name string) unmarshaler {
+ return func(b []byte, f pointer, w int) ([]byte, error) {
+ if w != WireStartGroup {
+ return b, errInternalBadWireType
+ }
+ x, y := findEndGroup(b)
+ if x < 0 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ v := valToPointer(reflect.New(sub.typ))
+ err := sub.unmarshal(v, b[:x])
+ if err != nil {
+ if r, ok := err.(*RequiredNotSetError); ok {
+ r.field = name + "." + r.field
+ } else {
+ return nil, err
+ }
+ }
+ f.appendPointer(v)
+ return b[y:], err
+ }
+}
+
+func makeUnmarshalMap(f *reflect.StructField) unmarshaler {
+ t := f.Type
+ kt := t.Key()
+ vt := t.Elem()
+ unmarshalKey := typeUnmarshaler(kt, f.Tag.Get("protobuf_key"))
+ unmarshalVal := typeUnmarshaler(vt, f.Tag.Get("protobuf_val"))
+ return func(b []byte, f pointer, w int) ([]byte, error) {
+ // The map entry is a submessage. Figure out how big it is.
+ if w != WireBytes {
+ return nil, fmt.Errorf("proto: bad wiretype for map field: got %d want %d", w, WireBytes)
+ }
+ x, n := decodeVarint(b)
+ if n == 0 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ b = b[n:]
+ if x > uint64(len(b)) {
+ return nil, io.ErrUnexpectedEOF
+ }
+ r := b[x:] // unused data to return
+ b = b[:x] // data for map entry
+
+ // Note: we could use #keys * #values ~= 200 functions
+ // to do map decoding without reflection. Probably not worth it.
+ // Maps will be somewhat slow. Oh well.
+
+ // Read key and value from data.
+ var nerr nonFatal
+ k := reflect.New(kt)
+ v := reflect.New(vt)
+ for len(b) > 0 {
+ x, n := decodeVarint(b)
+ if n == 0 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ wire := int(x) & 7
+ b = b[n:]
+
+ var err error
+ switch x >> 3 {
+ case 1:
+ b, err = unmarshalKey(b, valToPointer(k), wire)
+ case 2:
+ b, err = unmarshalVal(b, valToPointer(v), wire)
+ default:
+ err = errInternalBadWireType // skip unknown tag
+ }
+
+ if nerr.Merge(err) {
+ continue
+ }
+ if err != errInternalBadWireType {
+ return nil, err
+ }
+
+ // Skip past unknown fields.
+ b, err = skipField(b, wire)
+ if err != nil {
+ return nil, err
+ }
+ }
+
+ // Get map, allocate if needed.
+ m := f.asPointerTo(t).Elem() // an addressable map[K]T
+ if m.IsNil() {
+ m.Set(reflect.MakeMap(t))
+ }
+
+ // Insert into map.
+ m.SetMapIndex(k.Elem(), v.Elem())
+
+ return r, nerr.E
+ }
+}
+
+// makeUnmarshalOneof makes an unmarshaler for oneof fields.
+// for:
+// message Msg {
+// oneof F {
+// int64 X = 1;
+// float64 Y = 2;
+// }
+// }
+// typ is the type of the concrete entry for a oneof case (e.g. Msg_X).
+// ityp is the interface type of the oneof field (e.g. isMsg_F).
+// unmarshal is the unmarshaler for the base type of the oneof case (e.g. int64).
+// Note that this function will be called once for each case in the oneof.
+func makeUnmarshalOneof(typ, ityp reflect.Type, unmarshal unmarshaler) unmarshaler {
+ sf := typ.Field(0)
+ field0 := toField(&sf)
+ return func(b []byte, f pointer, w int) ([]byte, error) {
+ // Allocate holder for value.
+ v := reflect.New(typ)
+
+ // Unmarshal data into holder.
+ // We unmarshal into the first field of the holder object.
+ var err error
+ var nerr nonFatal
+ b, err = unmarshal(b, valToPointer(v).offset(field0), w)
+ if !nerr.Merge(err) {
+ return nil, err
+ }
+
+ // Write pointer to holder into target field.
+ f.asPointerTo(ityp).Elem().Set(v)
+
+ return b, nerr.E
+ }
+}
+
+// Error used by decode internally.
+var errInternalBadWireType = errors.New("proto: internal error: bad wiretype")
+
+// skipField skips past a field of type wire and returns the remaining bytes.
+func skipField(b []byte, wire int) ([]byte, error) {
+ switch wire {
+ case WireVarint:
+ _, k := decodeVarint(b)
+ if k == 0 {
+ return b, io.ErrUnexpectedEOF
+ }
+ b = b[k:]
+ case WireFixed32:
+ if len(b) < 4 {
+ return b, io.ErrUnexpectedEOF
+ }
+ b = b[4:]
+ case WireFixed64:
+ if len(b) < 8 {
+ return b, io.ErrUnexpectedEOF
+ }
+ b = b[8:]
+ case WireBytes:
+ m, k := decodeVarint(b)
+ if k == 0 || uint64(len(b)-k) < m {
+ return b, io.ErrUnexpectedEOF
+ }
+ b = b[uint64(k)+m:]
+ case WireStartGroup:
+ _, i := findEndGroup(b)
+ if i == -1 {
+ return b, io.ErrUnexpectedEOF
+ }
+ b = b[i:]
+ default:
+ return b, fmt.Errorf("proto: can't skip unknown wire type %d", wire)
+ }
+ return b, nil
+}
+
+// findEndGroup finds the index of the next EndGroup tag.
+// Groups may be nested, so the "next" EndGroup tag is the first
+// unpaired EndGroup.
+// findEndGroup returns the indexes of the start and end of the EndGroup tag.
+// Returns (-1,-1) if it can't find one.
+func findEndGroup(b []byte) (int, int) {
+ depth := 1
+ i := 0
+ for {
+ x, n := decodeVarint(b[i:])
+ if n == 0 {
+ return -1, -1
+ }
+ j := i
+ i += n
+ switch x & 7 {
+ case WireVarint:
+ _, k := decodeVarint(b[i:])
+ if k == 0 {
+ return -1, -1
+ }
+ i += k
+ case WireFixed32:
+ if len(b)-4 < i {
+ return -1, -1
+ }
+ i += 4
+ case WireFixed64:
+ if len(b)-8 < i {
+ return -1, -1
+ }
+ i += 8
+ case WireBytes:
+ m, k := decodeVarint(b[i:])
+ if k == 0 {
+ return -1, -1
+ }
+ i += k
+ if uint64(len(b)-i) < m {
+ return -1, -1
+ }
+ i += int(m)
+ case WireStartGroup:
+ depth++
+ case WireEndGroup:
+ depth--
+ if depth == 0 {
+ return j, i
+ }
+ default:
+ return -1, -1
+ }
+ }
+}
+
+// encodeVarint appends a varint-encoded integer to b and returns the result.
+func encodeVarint(b []byte, x uint64) []byte {
+ for x >= 1<<7 {
+ b = append(b, byte(x&0x7f|0x80))
+ x >>= 7
+ }
+ return append(b, byte(x))
+}
+
+// decodeVarint reads a varint-encoded integer from b.
+// Returns the decoded integer and the number of bytes read.
+// If there is an error, it returns 0,0.
+func decodeVarint(b []byte) (uint64, int) {
+ var x, y uint64
+ if len(b) == 0 {
+ goto bad
+ }
+ x = uint64(b[0])
+ if x < 0x80 {
+ return x, 1
+ }
+ x -= 0x80
+
+ if len(b) <= 1 {
+ goto bad
+ }
+ y = uint64(b[1])
+ x += y << 7
+ if y < 0x80 {
+ return x, 2
+ }
+ x -= 0x80 << 7
+
+ if len(b) <= 2 {
+ goto bad
+ }
+ y = uint64(b[2])
+ x += y << 14
+ if y < 0x80 {
+ return x, 3
+ }
+ x -= 0x80 << 14
+
+ if len(b) <= 3 {
+ goto bad
+ }
+ y = uint64(b[3])
+ x += y << 21
+ if y < 0x80 {
+ return x, 4
+ }
+ x -= 0x80 << 21
+
+ if len(b) <= 4 {
+ goto bad
+ }
+ y = uint64(b[4])
+ x += y << 28
+ if y < 0x80 {
+ return x, 5
+ }
+ x -= 0x80 << 28
+
+ if len(b) <= 5 {
+ goto bad
+ }
+ y = uint64(b[5])
+ x += y << 35
+ if y < 0x80 {
+ return x, 6
+ }
+ x -= 0x80 << 35
+
+ if len(b) <= 6 {
+ goto bad
+ }
+ y = uint64(b[6])
+ x += y << 42
+ if y < 0x80 {
+ return x, 7
+ }
+ x -= 0x80 << 42
+
+ if len(b) <= 7 {
+ goto bad
+ }
+ y = uint64(b[7])
+ x += y << 49
+ if y < 0x80 {
+ return x, 8
+ }
+ x -= 0x80 << 49
+
+ if len(b) <= 8 {
+ goto bad
+ }
+ y = uint64(b[8])
+ x += y << 56
+ if y < 0x80 {
+ return x, 9
+ }
+ x -= 0x80 << 56
+
+ if len(b) <= 9 {
+ goto bad
+ }
+ y = uint64(b[9])
+ x += y << 63
+ if y < 2 {
+ return x, 10
+ }
+
+bad:
+ return 0, 0
+}
diff --git a/vendor/github.com/golang/protobuf/proto/text.go b/vendor/github.com/golang/protobuf/proto/text.go
new file mode 100644
index 00000000..1aaee725
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/proto/text.go
@@ -0,0 +1,843 @@
+// Go support for Protocol Buffers - Google's data interchange format
+//
+// Copyright 2010 The Go Authors. All rights reserved.
+// https://github.com/golang/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+package proto
+
+// Functions for writing the text protocol buffer format.
+
+import (
+ "bufio"
+ "bytes"
+ "encoding"
+ "errors"
+ "fmt"
+ "io"
+ "log"
+ "math"
+ "reflect"
+ "sort"
+ "strings"
+)
+
+var (
+ newline = []byte("\n")
+ spaces = []byte(" ")
+ endBraceNewline = []byte("}\n")
+ backslashN = []byte{'\\', 'n'}
+ backslashR = []byte{'\\', 'r'}
+ backslashT = []byte{'\\', 't'}
+ backslashDQ = []byte{'\\', '"'}
+ backslashBS = []byte{'\\', '\\'}
+ posInf = []byte("inf")
+ negInf = []byte("-inf")
+ nan = []byte("nan")
+)
+
+type writer interface {
+ io.Writer
+ WriteByte(byte) error
+}
+
+// textWriter is an io.Writer that tracks its indentation level.
+type textWriter struct {
+ ind int
+ complete bool // if the current position is a complete line
+ compact bool // whether to write out as a one-liner
+ w writer
+}
+
+func (w *textWriter) WriteString(s string) (n int, err error) {
+ if !strings.Contains(s, "\n") {
+ if !w.compact && w.complete {
+ w.writeIndent()
+ }
+ w.complete = false
+ return io.WriteString(w.w, s)
+ }
+ // WriteString is typically called without newlines, so this
+ // codepath and its copy are rare. We copy to avoid
+ // duplicating all of Write's logic here.
+ return w.Write([]byte(s))
+}
+
+func (w *textWriter) Write(p []byte) (n int, err error) {
+ newlines := bytes.Count(p, newline)
+ if newlines == 0 {
+ if !w.compact && w.complete {
+ w.writeIndent()
+ }
+ n, err = w.w.Write(p)
+ w.complete = false
+ return n, err
+ }
+
+ frags := bytes.SplitN(p, newline, newlines+1)
+ if w.compact {
+ for i, frag := range frags {
+ if i > 0 {
+ if err := w.w.WriteByte(' '); err != nil {
+ return n, err
+ }
+ n++
+ }
+ nn, err := w.w.Write(frag)
+ n += nn
+ if err != nil {
+ return n, err
+ }
+ }
+ return n, nil
+ }
+
+ for i, frag := range frags {
+ if w.complete {
+ w.writeIndent()
+ }
+ nn, err := w.w.Write(frag)
+ n += nn
+ if err != nil {
+ return n, err
+ }
+ if i+1 < len(frags) {
+ if err := w.w.WriteByte('\n'); err != nil {
+ return n, err
+ }
+ n++
+ }
+ }
+ w.complete = len(frags[len(frags)-1]) == 0
+ return n, nil
+}
+
+func (w *textWriter) WriteByte(c byte) error {
+ if w.compact && c == '\n' {
+ c = ' '
+ }
+ if !w.compact && w.complete {
+ w.writeIndent()
+ }
+ err := w.w.WriteByte(c)
+ w.complete = c == '\n'
+ return err
+}
+
+func (w *textWriter) indent() { w.ind++ }
+
+func (w *textWriter) unindent() {
+ if w.ind == 0 {
+ log.Print("proto: textWriter unindented too far")
+ return
+ }
+ w.ind--
+}
+
+func writeName(w *textWriter, props *Properties) error {
+ if _, err := w.WriteString(props.OrigName); err != nil {
+ return err
+ }
+ if props.Wire != "group" {
+ return w.WriteByte(':')
+ }
+ return nil
+}
+
+func requiresQuotes(u string) bool {
+ // When type URL contains any characters except [0-9A-Za-z./\-]*, it must be quoted.
+ for _, ch := range u {
+ switch {
+ case ch == '.' || ch == '/' || ch == '_':
+ continue
+ case '0' <= ch && ch <= '9':
+ continue
+ case 'A' <= ch && ch <= 'Z':
+ continue
+ case 'a' <= ch && ch <= 'z':
+ continue
+ default:
+ return true
+ }
+ }
+ return false
+}
+
+// isAny reports whether sv is a google.protobuf.Any message
+func isAny(sv reflect.Value) bool {
+ type wkt interface {
+ XXX_WellKnownType() string
+ }
+ t, ok := sv.Addr().Interface().(wkt)
+ return ok && t.XXX_WellKnownType() == "Any"
+}
+
+// writeProto3Any writes an expanded google.protobuf.Any message.
+//
+// It returns (false, nil) if sv value can't be unmarshaled (e.g. because
+// required messages are not linked in).
+//
+// It returns (true, error) when sv was written in expanded format or an error
+// was encountered.
+func (tm *TextMarshaler) writeProto3Any(w *textWriter, sv reflect.Value) (bool, error) {
+ turl := sv.FieldByName("TypeUrl")
+ val := sv.FieldByName("Value")
+ if !turl.IsValid() || !val.IsValid() {
+ return true, errors.New("proto: invalid google.protobuf.Any message")
+ }
+
+ b, ok := val.Interface().([]byte)
+ if !ok {
+ return true, errors.New("proto: invalid google.protobuf.Any message")
+ }
+
+ parts := strings.Split(turl.String(), "/")
+ mt := MessageType(parts[len(parts)-1])
+ if mt == nil {
+ return false, nil
+ }
+ m := reflect.New(mt.Elem())
+ if err := Unmarshal(b, m.Interface().(Message)); err != nil {
+ return false, nil
+ }
+ w.Write([]byte("["))
+ u := turl.String()
+ if requiresQuotes(u) {
+ writeString(w, u)
+ } else {
+ w.Write([]byte(u))
+ }
+ if w.compact {
+ w.Write([]byte("]:<"))
+ } else {
+ w.Write([]byte("]: <\n"))
+ w.ind++
+ }
+ if err := tm.writeStruct(w, m.Elem()); err != nil {
+ return true, err
+ }
+ if w.compact {
+ w.Write([]byte("> "))
+ } else {
+ w.ind--
+ w.Write([]byte(">\n"))
+ }
+ return true, nil
+}
+
+func (tm *TextMarshaler) writeStruct(w *textWriter, sv reflect.Value) error {
+ if tm.ExpandAny && isAny(sv) {
+ if canExpand, err := tm.writeProto3Any(w, sv); canExpand {
+ return err
+ }
+ }
+ st := sv.Type()
+ sprops := GetProperties(st)
+ for i := 0; i < sv.NumField(); i++ {
+ fv := sv.Field(i)
+ props := sprops.Prop[i]
+ name := st.Field(i).Name
+
+ if name == "XXX_NoUnkeyedLiteral" {
+ continue
+ }
+
+ if strings.HasPrefix(name, "XXX_") {
+ // There are two XXX_ fields:
+ // XXX_unrecognized []byte
+ // XXX_extensions map[int32]proto.Extension
+ // The first is handled here;
+ // the second is handled at the bottom of this function.
+ if name == "XXX_unrecognized" && !fv.IsNil() {
+ if err := writeUnknownStruct(w, fv.Interface().([]byte)); err != nil {
+ return err
+ }
+ }
+ continue
+ }
+ if fv.Kind() == reflect.Ptr && fv.IsNil() {
+ // Field not filled in. This could be an optional field or
+ // a required field that wasn't filled in. Either way, there
+ // isn't anything we can show for it.
+ continue
+ }
+ if fv.Kind() == reflect.Slice && fv.IsNil() {
+ // Repeated field that is empty, or a bytes field that is unused.
+ continue
+ }
+
+ if props.Repeated && fv.Kind() == reflect.Slice {
+ // Repeated field.
+ for j := 0; j < fv.Len(); j++ {
+ if err := writeName(w, props); err != nil {
+ return err
+ }
+ if !w.compact {
+ if err := w.WriteByte(' '); err != nil {
+ return err
+ }
+ }
+ v := fv.Index(j)
+ if v.Kind() == reflect.Ptr && v.IsNil() {
+ // A nil message in a repeated field is not valid,
+ // but we can handle that more gracefully than panicking.
+ if _, err := w.Write([]byte("<nil>\n")); err != nil {
+ return err
+ }
+ continue
+ }
+ if err := tm.writeAny(w, v, props); err != nil {
+ return err
+ }
+ if err := w.WriteByte('\n'); err != nil {
+ return err
+ }
+ }
+ continue
+ }
+ if fv.Kind() == reflect.Map {
+ // Map fields are rendered as a repeated struct with key/value fields.
+ keys := fv.MapKeys()
+ sort.Sort(mapKeys(keys))
+ for _, key := range keys {
+ val := fv.MapIndex(key)
+ if err := writeName(w, props); err != nil {
+ return err
+ }
+ if !w.compact {
+ if err := w.WriteByte(' '); err != nil {
+ return err
+ }
+ }
+ // open struct
+ if err := w.WriteByte('<'); err != nil {
+ return err
+ }
+ if !w.compact {
+ if err := w.WriteByte('\n'); err != nil {
+ return err
+ }
+ }
+ w.indent()
+ // key
+ if _, err := w.WriteString("key:"); err != nil {
+ return err
+ }
+ if !w.compact {
+ if err := w.WriteByte(' '); err != nil {
+ return err
+ }
+ }
+ if err := tm.writeAny(w, key, props.MapKeyProp); err != nil {
+ return err
+ }
+ if err := w.WriteByte('\n'); err != nil {
+ return err
+ }
+ // nil values aren't legal, but we can avoid panicking because of them.
+ if val.Kind() != reflect.Ptr || !val.IsNil() {
+ // value
+ if _, err := w.WriteString("value:"); err != nil {
+ return err
+ }
+ if !w.compact {
+ if err := w.WriteByte(' '); err != nil {
+ return err
+ }
+ }
+ if err := tm.writeAny(w, val, props.MapValProp); err != nil {
+ return err
+ }
+ if err := w.WriteByte('\n'); err != nil {
+ return err
+ }
+ }
+ // close struct
+ w.unindent()
+ if err := w.WriteByte('>'); err != nil {
+ return err
+ }
+ if err := w.WriteByte('\n'); err != nil {
+ return err
+ }
+ }
+ continue
+ }
+ if props.proto3 && fv.Kind() == reflect.Slice && fv.Len() == 0 {
+ // empty bytes field
+ continue
+ }
+ if fv.Kind() != reflect.Ptr && fv.Kind() != reflect.Slice {
+ // proto3 non-repeated scalar field; skip if zero value
+ if isProto3Zero(fv) {
+ continue
+ }
+ }
+
+ if fv.Kind() == reflect.Interface {
+ // Check if it is a oneof.
+ if st.Field(i).Tag.Get("protobuf_oneof") != "" {
+ // fv is nil, or holds a pointer to generated struct.
+ // That generated struct has exactly one field,
+ // which has a protobuf struct tag.
+ if fv.IsNil() {
+ continue
+ }
+ inner := fv.Elem().Elem() // interface -> *T -> T
+ tag := inner.Type().Field(0).Tag.Get("protobuf")
+ props = new(Properties) // Overwrite the outer props var, but not its pointee.
+ props.Parse(tag)
+ // Write the value in the oneof, not the oneof itself.
+ fv = inner.Field(0)
+
+ // Special case to cope with malformed messages gracefully:
+ // If the value in the oneof is a nil pointer, don't panic
+ // in writeAny.
+ if fv.Kind() == reflect.Ptr && fv.IsNil() {
+ // Use errors.New so writeAny won't render quotes.
+ msg := errors.New("/* nil */")
+ fv = reflect.ValueOf(&msg).Elem()
+ }
+ }
+ }
+
+ if err := writeName(w, props); err != nil {
+ return err
+ }
+ if !w.compact {
+ if err := w.WriteByte(' '); err != nil {
+ return err
+ }
+ }
+
+ // Enums have a String method, so writeAny will work fine.
+ if err := tm.writeAny(w, fv, props); err != nil {
+ return err
+ }
+
+ if err := w.WriteByte('\n'); err != nil {
+ return err
+ }
+ }
+
+ // Extensions (the XXX_extensions field).
+ pv := sv.Addr()
+ if _, err := extendable(pv.Interface()); err == nil {
+ if err := tm.writeExtensions(w, pv); err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+// writeAny writes an arbitrary field.
+func (tm *TextMarshaler) writeAny(w *textWriter, v reflect.Value, props *Properties) error {
+ v = reflect.Indirect(v)
+
+ // Floats have special cases.
+ if v.Kind() == reflect.Float32 || v.Kind() == reflect.Float64 {
+ x := v.Float()
+ var b []byte
+ switch {
+ case math.IsInf(x, 1):
+ b = posInf
+ case math.IsInf(x, -1):
+ b = negInf
+ case math.IsNaN(x):
+ b = nan
+ }
+ if b != nil {
+ _, err := w.Write(b)
+ return err
+ }
+ // Other values are handled below.
+ }
+
+ // We don't attempt to serialise every possible value type; only those
+ // that can occur in protocol buffers.
+ switch v.Kind() {
+ case reflect.Slice:
+ // Should only be a []byte; repeated fields are handled in writeStruct.
+ if err := writeString(w, string(v.Bytes())); err != nil {
+ return err
+ }
+ case reflect.String:
+ if err := writeString(w, v.String()); err != nil {
+ return err
+ }
+ case reflect.Struct:
+ // Required/optional group/message.
+ var bra, ket byte = '<', '>'
+ if props != nil && props.Wire == "group" {
+ bra, ket = '{', '}'
+ }
+ if err := w.WriteByte(bra); err != nil {
+ return err
+ }
+ if !w.compact {
+ if err := w.WriteByte('\n'); err != nil {
+ return err
+ }
+ }
+ w.indent()
+ if v.CanAddr() {
+ // Calling v.Interface on a struct causes the reflect package to
+ // copy the entire struct. This is racy with the new Marshaler
+ // since we atomically update the XXX_sizecache.
+ //
+ // Thus, we retrieve a pointer to the struct if possible to avoid
+ // a race since v.Interface on the pointer doesn't copy the struct.
+ //
+ // If v is not addressable, then we are not worried about a race
+ // since it implies that the binary Marshaler cannot possibly be
+ // mutating this value.
+ v = v.Addr()
+ }
+ if etm, ok := v.Interface().(encoding.TextMarshaler); ok {
+ text, err := etm.MarshalText()
+ if err != nil {
+ return err
+ }
+ if _, err = w.Write(text); err != nil {
+ return err
+ }
+ } else {
+ if v.Kind() == reflect.Ptr {
+ v = v.Elem()
+ }
+ if err := tm.writeStruct(w, v); err != nil {
+ return err
+ }
+ }
+ w.unindent()
+ if err := w.WriteByte(ket); err != nil {
+ return err
+ }
+ default:
+ _, err := fmt.Fprint(w, v.Interface())
+ return err
+ }
+ return nil
+}
+
+// equivalent to C's isprint.
+func isprint(c byte) bool {
+ return c >= 0x20 && c < 0x7f
+}
+
+// writeString writes a string in the protocol buffer text format.
+// It is similar to strconv.Quote except we don't use Go escape sequences,
+// we treat the string as a byte sequence, and we use octal escapes.
+// These differences are to maintain interoperability with the other
+// languages' implementations of the text format.
+func writeString(w *textWriter, s string) error {
+ // use WriteByte here to get any needed indent
+ if err := w.WriteByte('"'); err != nil {
+ return err
+ }
+ // Loop over the bytes, not the runes.
+ for i := 0; i < len(s); i++ {
+ var err error
+ // Divergence from C++: we don't escape apostrophes.
+ // There's no need to escape them, and the C++ parser
+ // copes with a naked apostrophe.
+ switch c := s[i]; c {
+ case '\n':
+ _, err = w.w.Write(backslashN)
+ case '\r':
+ _, err = w.w.Write(backslashR)
+ case '\t':
+ _, err = w.w.Write(backslashT)
+ case '"':
+ _, err = w.w.Write(backslashDQ)
+ case '\\':
+ _, err = w.w.Write(backslashBS)
+ default:
+ if isprint(c) {
+ err = w.w.WriteByte(c)
+ } else {
+ _, err = fmt.Fprintf(w.w, "\\%03o", c)
+ }
+ }
+ if err != nil {
+ return err
+ }
+ }
+ return w.WriteByte('"')
+}
+
+func writeUnknownStruct(w *textWriter, data []byte) (err error) {
+ if !w.compact {
+ if _, err := fmt.Fprintf(w, "/* %d unknown bytes */\n", len(data)); err != nil {
+ return err
+ }
+ }
+ b := NewBuffer(data)
+ for b.index < len(b.buf) {
+ x, err := b.DecodeVarint()
+ if err != nil {
+ _, err := fmt.Fprintf(w, "/* %v */\n", err)
+ return err
+ }
+ wire, tag := x&7, x>>3
+ if wire == WireEndGroup {
+ w.unindent()
+ if _, err := w.Write(endBraceNewline); err != nil {
+ return err
+ }
+ continue
+ }
+ if _, err := fmt.Fprint(w, tag); err != nil {
+ return err
+ }
+ if wire != WireStartGroup {
+ if err := w.WriteByte(':'); err != nil {
+ return err
+ }
+ }
+ if !w.compact || wire == WireStartGroup {
+ if err := w.WriteByte(' '); err != nil {
+ return err
+ }
+ }
+ switch wire {
+ case WireBytes:
+ buf, e := b.DecodeRawBytes(false)
+ if e == nil {
+ _, err = fmt.Fprintf(w, "%q", buf)
+ } else {
+ _, err = fmt.Fprintf(w, "/* %v */", e)
+ }
+ case WireFixed32:
+ x, err = b.DecodeFixed32()
+ err = writeUnknownInt(w, x, err)
+ case WireFixed64:
+ x, err = b.DecodeFixed64()
+ err = writeUnknownInt(w, x, err)
+ case WireStartGroup:
+ err = w.WriteByte('{')
+ w.indent()
+ case WireVarint:
+ x, err = b.DecodeVarint()
+ err = writeUnknownInt(w, x, err)
+ default:
+ _, err = fmt.Fprintf(w, "/* unknown wire type %d */", wire)
+ }
+ if err != nil {
+ return err
+ }
+ if err = w.WriteByte('\n'); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func writeUnknownInt(w *textWriter, x uint64, err error) error {
+ if err == nil {
+ _, err = fmt.Fprint(w, x)
+ } else {
+ _, err = fmt.Fprintf(w, "/* %v */", err)
+ }
+ return err
+}
+
+type int32Slice []int32
+
+func (s int32Slice) Len() int { return len(s) }
+func (s int32Slice) Less(i, j int) bool { return s[i] < s[j] }
+func (s int32Slice) Swap(i, j int) { s[i], s[j] = s[j], s[i] }
+
+// writeExtensions writes all the extensions in pv.
+// pv is assumed to be a pointer to a protocol message struct that is extendable.
+func (tm *TextMarshaler) writeExtensions(w *textWriter, pv reflect.Value) error {
+ emap := extensionMaps[pv.Type().Elem()]
+ ep, _ := extendable(pv.Interface())
+
+ // Order the extensions by ID.
+ // This isn't strictly necessary, but it will give us
+ // canonical output, which will also make testing easier.
+ m, mu := ep.extensionsRead()
+ if m == nil {
+ return nil
+ }
+ mu.Lock()
+ ids := make([]int32, 0, len(m))
+ for id := range m {
+ ids = append(ids, id)
+ }
+ sort.Sort(int32Slice(ids))
+ mu.Unlock()
+
+ for _, extNum := range ids {
+ ext := m[extNum]
+ var desc *ExtensionDesc
+ if emap != nil {
+ desc = emap[extNum]
+ }
+ if desc == nil {
+ // Unknown extension.
+ if err := writeUnknownStruct(w, ext.enc); err != nil {
+ return err
+ }
+ continue
+ }
+
+ pb, err := GetExtension(ep, desc)
+ if err != nil {
+ return fmt.Errorf("failed getting extension: %v", err)
+ }
+
+ // Repeated extensions will appear as a slice.
+ if !desc.repeated() {
+ if err := tm.writeExtension(w, desc.Name, pb); err != nil {
+ return err
+ }
+ } else {
+ v := reflect.ValueOf(pb)
+ for i := 0; i < v.Len(); i++ {
+ if err := tm.writeExtension(w, desc.Name, v.Index(i).Interface()); err != nil {
+ return err
+ }
+ }
+ }
+ }
+ return nil
+}
+
+func (tm *TextMarshaler) writeExtension(w *textWriter, name string, pb interface{}) error {
+ if _, err := fmt.Fprintf(w, "[%s]:", name); err != nil {
+ return err
+ }
+ if !w.compact {
+ if err := w.WriteByte(' '); err != nil {
+ return err
+ }
+ }
+ if err := tm.writeAny(w, reflect.ValueOf(pb), nil); err != nil {
+ return err
+ }
+ if err := w.WriteByte('\n'); err != nil {
+ return err
+ }
+ return nil
+}
+
+func (w *textWriter) writeIndent() {
+ if !w.complete {
+ return
+ }
+ remain := w.ind * 2
+ for remain > 0 {
+ n := remain
+ if n > len(spaces) {
+ n = len(spaces)
+ }
+ w.w.Write(spaces[:n])
+ remain -= n
+ }
+ w.complete = false
+}
+
+// TextMarshaler is a configurable text format marshaler.
+type TextMarshaler struct {
+ Compact bool // use compact text format (one line).
+ ExpandAny bool // expand google.protobuf.Any messages of known types
+}
+
+// Marshal writes a given protocol buffer in text format.
+// The only errors returned are from w.
+func (tm *TextMarshaler) Marshal(w io.Writer, pb Message) error {
+ val := reflect.ValueOf(pb)
+ if pb == nil || val.IsNil() {
+ w.Write([]byte("<nil>"))
+ return nil
+ }
+ var bw *bufio.Writer
+ ww, ok := w.(writer)
+ if !ok {
+ bw = bufio.NewWriter(w)
+ ww = bw
+ }
+ aw := &textWriter{
+ w: ww,
+ complete: true,
+ compact: tm.Compact,
+ }
+
+ if etm, ok := pb.(encoding.TextMarshaler); ok {
+ text, err := etm.MarshalText()
+ if err != nil {
+ return err
+ }
+ if _, err = aw.Write(text); err != nil {
+ return err
+ }
+ if bw != nil {
+ return bw.Flush()
+ }
+ return nil
+ }
+ // Dereference the received pointer so we don't have outer < and >.
+ v := reflect.Indirect(val)
+ if err := tm.writeStruct(aw, v); err != nil {
+ return err
+ }
+ if bw != nil {
+ return bw.Flush()
+ }
+ return nil
+}
+
+// Text is the same as Marshal, but returns the string directly.
+func (tm *TextMarshaler) Text(pb Message) string {
+ var buf bytes.Buffer
+ tm.Marshal(&buf, pb)
+ return buf.String()
+}
+
+var (
+ defaultTextMarshaler = TextMarshaler{}
+ compactTextMarshaler = TextMarshaler{Compact: true}
+)
+
+// TODO: consider removing some of the Marshal functions below.
+
+// MarshalText writes a given protocol buffer in text format.
+// The only errors returned are from w.
+func MarshalText(w io.Writer, pb Message) error { return defaultTextMarshaler.Marshal(w, pb) }
+
+// MarshalTextString is the same as MarshalText, but returns the string directly.
+func MarshalTextString(pb Message) string { return defaultTextMarshaler.Text(pb) }
+
+// CompactText writes a given protocol buffer in compact text format (one line).
+func CompactText(w io.Writer, pb Message) error { return compactTextMarshaler.Marshal(w, pb) }
+
+// CompactTextString is the same as CompactText, but returns the string directly.
+func CompactTextString(pb Message) string { return compactTextMarshaler.Text(pb) }
diff --git a/vendor/github.com/golang/protobuf/proto/text_parser.go b/vendor/github.com/golang/protobuf/proto/text_parser.go
new file mode 100644
index 00000000..bb55a3af
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/proto/text_parser.go
@@ -0,0 +1,880 @@
+// Go support for Protocol Buffers - Google's data interchange format
+//
+// Copyright 2010 The Go Authors. All rights reserved.
+// https://github.com/golang/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+package proto
+
+// Functions for parsing the Text protocol buffer format.
+// TODO: message sets.
+
+import (
+ "encoding"
+ "errors"
+ "fmt"
+ "reflect"
+ "strconv"
+ "strings"
+ "unicode/utf8"
+)
+
+// Error string emitted when deserializing Any and fields are already set
+const anyRepeatedlyUnpacked = "Any message unpacked multiple times, or %q already set"
+
+type ParseError struct {
+ Message string
+ Line int // 1-based line number
+ Offset int // 0-based byte offset from start of input
+}
+
+func (p *ParseError) Error() string {
+ if p.Line == 1 {
+ // show offset only for first line
+ return fmt.Sprintf("line 1.%d: %v", p.Offset, p.Message)
+ }
+ return fmt.Sprintf("line %d: %v", p.Line, p.Message)
+}
+
+type token struct {
+ value string
+ err *ParseError
+ line int // line number
+ offset int // byte number from start of input, not start of line
+ unquoted string // the unquoted version of value, if it was a quoted string
+}
+
+func (t *token) String() string {
+ if t.err == nil {
+ return fmt.Sprintf("%q (line=%d, offset=%d)", t.value, t.line, t.offset)
+ }
+ return fmt.Sprintf("parse error: %v", t.err)
+}
+
+type textParser struct {
+ s string // remaining input
+ done bool // whether the parsing is finished (success or error)
+ backed bool // whether back() was called
+ offset, line int
+ cur token
+}
+
+func newTextParser(s string) *textParser {
+ p := new(textParser)
+ p.s = s
+ p.line = 1
+ p.cur.line = 1
+ return p
+}
+
+func (p *textParser) errorf(format string, a ...interface{}) *ParseError {
+ pe := &ParseError{fmt.Sprintf(format, a...), p.cur.line, p.cur.offset}
+ p.cur.err = pe
+ p.done = true
+ return pe
+}
+
+// Numbers and identifiers are matched by [-+._A-Za-z0-9]
+func isIdentOrNumberChar(c byte) bool {
+ switch {
+ case 'A' <= c && c <= 'Z', 'a' <= c && c <= 'z':
+ return true
+ case '0' <= c && c <= '9':
+ return true
+ }
+ switch c {
+ case '-', '+', '.', '_':
+ return true
+ }
+ return false
+}
+
+func isWhitespace(c byte) bool {
+ switch c {
+ case ' ', '\t', '\n', '\r':
+ return true
+ }
+ return false
+}
+
+func isQuote(c byte) bool {
+ switch c {
+ case '"', '\'':
+ return true
+ }
+ return false
+}
+
+func (p *textParser) skipWhitespace() {
+ i := 0
+ for i < len(p.s) && (isWhitespace(p.s[i]) || p.s[i] == '#') {
+ if p.s[i] == '#' {
+ // comment; skip to end of line or input
+ for i < len(p.s) && p.s[i] != '\n' {
+ i++
+ }
+ if i == len(p.s) {
+ break
+ }
+ }
+ if p.s[i] == '\n' {
+ p.line++
+ }
+ i++
+ }
+ p.offset += i
+ p.s = p.s[i:len(p.s)]
+ if len(p.s) == 0 {
+ p.done = true
+ }
+}
+
+func (p *textParser) advance() {
+ // Skip whitespace
+ p.skipWhitespace()
+ if p.done {
+ return
+ }
+
+ // Start of non-whitespace
+ p.cur.err = nil
+ p.cur.offset, p.cur.line = p.offset, p.line
+ p.cur.unquoted = ""
+ switch p.s[0] {
+ case '<', '>', '{', '}', ':', '[', ']', ';', ',', '/':
+ // Single symbol
+ p.cur.value, p.s = p.s[0:1], p.s[1:len(p.s)]
+ case '"', '\'':
+ // Quoted string
+ i := 1
+ for i < len(p.s) && p.s[i] != p.s[0] && p.s[i] != '\n' {
+ if p.s[i] == '\\' && i+1 < len(p.s) {
+ // skip escaped char
+ i++
+ }
+ i++
+ }
+ if i >= len(p.s) || p.s[i] != p.s[0] {
+ p.errorf("unmatched quote")
+ return
+ }
+ unq, err := unquoteC(p.s[1:i], rune(p.s[0]))
+ if err != nil {
+ p.errorf("invalid quoted string %s: %v", p.s[0:i+1], err)
+ return
+ }
+ p.cur.value, p.s = p.s[0:i+1], p.s[i+1:len(p.s)]
+ p.cur.unquoted = unq
+ default:
+ i := 0
+ for i < len(p.s) && isIdentOrNumberChar(p.s[i]) {
+ i++
+ }
+ if i == 0 {
+ p.errorf("unexpected byte %#x", p.s[0])
+ return
+ }
+ p.cur.value, p.s = p.s[0:i], p.s[i:len(p.s)]
+ }
+ p.offset += len(p.cur.value)
+}
+
+var (
+ errBadUTF8 = errors.New("proto: bad UTF-8")
+)
+
+func unquoteC(s string, quote rune) (string, error) {
+ // This is based on C++'s tokenizer.cc.
+ // Despite its name, this is *not* parsing C syntax.
+ // For instance, "\0" is an invalid quoted string.
+
+ // Avoid allocation in trivial cases.
+ simple := true
+ for _, r := range s {
+ if r == '\\' || r == quote {
+ simple = false
+ break
+ }
+ }
+ if simple {
+ return s, nil
+ }
+
+ buf := make([]byte, 0, 3*len(s)/2)
+ for len(s) > 0 {
+ r, n := utf8.DecodeRuneInString(s)
+ if r == utf8.RuneError && n == 1 {
+ return "", errBadUTF8
+ }
+ s = s[n:]
+ if r != '\\' {
+ if r < utf8.RuneSelf {
+ buf = append(buf, byte(r))
+ } else {
+ buf = append(buf, string(r)...)
+ }
+ continue
+ }
+
+ ch, tail, err := unescape(s)
+ if err != nil {
+ return "", err
+ }
+ buf = append(buf, ch...)
+ s = tail
+ }
+ return string(buf), nil
+}
+
+func unescape(s string) (ch string, tail string, err error) {
+ r, n := utf8.DecodeRuneInString(s)
+ if r == utf8.RuneError && n == 1 {
+ return "", "", errBadUTF8
+ }
+ s = s[n:]
+ switch r {
+ case 'a':
+ return "\a", s, nil
+ case 'b':
+ return "\b", s, nil
+ case 'f':
+ return "\f", s, nil
+ case 'n':
+ return "\n", s, nil
+ case 'r':
+ return "\r", s, nil
+ case 't':
+ return "\t", s, nil
+ case 'v':
+ return "\v", s, nil
+ case '?':
+ return "?", s, nil // trigraph workaround
+ case '\'', '"', '\\':
+ return string(r), s, nil
+ case '0', '1', '2', '3', '4', '5', '6', '7':
+ if len(s) < 2 {
+ return "", "", fmt.Errorf(`\%c requires 2 following digits`, r)
+ }
+ ss := string(r) + s[:2]
+ s = s[2:]
+ i, err := strconv.ParseUint(ss, 8, 8)
+ if err != nil {
+ return "", "", fmt.Errorf(`\%s contains non-octal digits`, ss)
+ }
+ return string([]byte{byte(i)}), s, nil
+ case 'x', 'X', 'u', 'U':
+ var n int
+ switch r {
+ case 'x', 'X':
+ n = 2
+ case 'u':
+ n = 4
+ case 'U':
+ n = 8
+ }
+ if len(s) < n {
+ return "", "", fmt.Errorf(`\%c requires %d following digits`, r, n)
+ }
+ ss := s[:n]
+ s = s[n:]
+ i, err := strconv.ParseUint(ss, 16, 64)
+ if err != nil {
+ return "", "", fmt.Errorf(`\%c%s contains non-hexadecimal digits`, r, ss)
+ }
+ if r == 'x' || r == 'X' {
+ return string([]byte{byte(i)}), s, nil
+ }
+ if i > utf8.MaxRune {
+ return "", "", fmt.Errorf(`\%c%s is not a valid Unicode code point`, r, ss)
+ }
+ return string(i), s, nil
+ }
+ return "", "", fmt.Errorf(`unknown escape \%c`, r)
+}
+
+// Back off the parser by one token. Can only be done between calls to next().
+// It makes the next advance() a no-op.
+func (p *textParser) back() { p.backed = true }
+
+// Advances the parser and returns the new current token.
+func (p *textParser) next() *token {
+ if p.backed || p.done {
+ p.backed = false
+ return &p.cur
+ }
+ p.advance()
+ if p.done {
+ p.cur.value = ""
+ } else if len(p.cur.value) > 0 && isQuote(p.cur.value[0]) {
+ // Look for multiple quoted strings separated by whitespace,
+ // and concatenate them.
+ cat := p.cur
+ for {
+ p.skipWhitespace()
+ if p.done || !isQuote(p.s[0]) {
+ break
+ }
+ p.advance()
+ if p.cur.err != nil {
+ return &p.cur
+ }
+ cat.value += " " + p.cur.value
+ cat.unquoted += p.cur.unquoted
+ }
+ p.done = false // parser may have seen EOF, but we want to return cat
+ p.cur = cat
+ }
+ return &p.cur
+}
+
+func (p *textParser) consumeToken(s string) error {
+ tok := p.next()
+ if tok.err != nil {
+ return tok.err
+ }
+ if tok.value != s {
+ p.back()
+ return p.errorf("expected %q, found %q", s, tok.value)
+ }
+ return nil
+}
+
+// Return a RequiredNotSetError indicating which required field was not set.
+func (p *textParser) missingRequiredFieldError(sv reflect.Value) *RequiredNotSetError {
+ st := sv.Type()
+ sprops := GetProperties(st)
+ for i := 0; i < st.NumField(); i++ {
+ if !isNil(sv.Field(i)) {
+ continue
+ }
+
+ props := sprops.Prop[i]
+ if props.Required {
+ return &RequiredNotSetError{fmt.Sprintf("%v.%v", st, props.OrigName)}
+ }
+ }
+ return &RequiredNotSetError{fmt.Sprintf("%v.<unknown field name>", st)} // should not happen
+}
+
+// Returns the index in the struct for the named field, as well as the parsed tag properties.
+func structFieldByName(sprops *StructProperties, name string) (int, *Properties, bool) {
+ i, ok := sprops.decoderOrigNames[name]
+ if ok {
+ return i, sprops.Prop[i], true
+ }
+ return -1, nil, false
+}
+
+// Consume a ':' from the input stream (if the next token is a colon),
+// returning an error if a colon is needed but not present.
+func (p *textParser) checkForColon(props *Properties, typ reflect.Type) *ParseError {
+ tok := p.next()
+ if tok.err != nil {
+ return tok.err
+ }
+ if tok.value != ":" {
+ // Colon is optional when the field is a group or message.
+ needColon := true
+ switch props.Wire {
+ case "group":
+ needColon = false
+ case "bytes":
+ // A "bytes" field is either a message, a string, or a repeated field;
+ // those three become *T, *string and []T respectively, so we can check for
+ // this field being a pointer to a non-string.
+ if typ.Kind() == reflect.Ptr {
+ // *T or *string
+ if typ.Elem().Kind() == reflect.String {
+ break
+ }
+ } else if typ.Kind() == reflect.Slice {
+ // []T or []*T
+ if typ.Elem().Kind() != reflect.Ptr {
+ break
+ }
+ } else if typ.Kind() == reflect.String {
+ // The proto3 exception is for a string field,
+ // which requires a colon.
+ break
+ }
+ needColon = false
+ }
+ if needColon {
+ return p.errorf("expected ':', found %q", tok.value)
+ }
+ p.back()
+ }
+ return nil
+}
+
+func (p *textParser) readStruct(sv reflect.Value, terminator string) error {
+ st := sv.Type()
+ sprops := GetProperties(st)
+ reqCount := sprops.reqCount
+ var reqFieldErr error
+ fieldSet := make(map[string]bool)
+ // A struct is a sequence of "name: value", terminated by one of
+ // '>' or '}', or the end of the input. A name may also be
+ // "[extension]" or "[type/url]".
+ //
+ // The whole struct can also be an expanded Any message, like:
+ // [type/url] < ... struct contents ... >
+ for {
+ tok := p.next()
+ if tok.err != nil {
+ return tok.err
+ }
+ if tok.value == terminator {
+ break
+ }
+ if tok.value == "[" {
+ // Looks like an extension or an Any.
+ //
+ // TODO: Check whether we need to handle
+ // namespace rooted names (e.g. ".something.Foo").
+ extName, err := p.consumeExtName()
+ if err != nil {
+ return err
+ }
+
+ if s := strings.LastIndex(extName, "/"); s >= 0 {
+ // If it contains a slash, it's an Any type URL.
+ messageName := extName[s+1:]
+ mt := MessageType(messageName)
+ if mt == nil {
+ return p.errorf("unrecognized message %q in google.protobuf.Any", messageName)
+ }
+ tok = p.next()
+ if tok.err != nil {
+ return tok.err
+ }
+ // consume an optional colon
+ if tok.value == ":" {
+ tok = p.next()
+ if tok.err != nil {
+ return tok.err
+ }
+ }
+ var terminator string
+ switch tok.value {
+ case "<":
+ terminator = ">"
+ case "{":
+ terminator = "}"
+ default:
+ return p.errorf("expected '{' or '<', found %q", tok.value)
+ }
+ v := reflect.New(mt.Elem())
+ if pe := p.readStruct(v.Elem(), terminator); pe != nil {
+ return pe
+ }
+ b, err := Marshal(v.Interface().(Message))
+ if err != nil {
+ return p.errorf("failed to marshal message of type %q: %v", messageName, err)
+ }
+ if fieldSet["type_url"] {
+ return p.errorf(anyRepeatedlyUnpacked, "type_url")
+ }
+ if fieldSet["value"] {
+ return p.errorf(anyRepeatedlyUnpacked, "value")
+ }
+ sv.FieldByName("TypeUrl").SetString(extName)
+ sv.FieldByName("Value").SetBytes(b)
+ fieldSet["type_url"] = true
+ fieldSet["value"] = true
+ continue
+ }
+
+ var desc *ExtensionDesc
+ // This could be faster, but it's functional.
+ // TODO: Do something smarter than a linear scan.
+ for _, d := range RegisteredExtensions(reflect.New(st).Interface().(Message)) {
+ if d.Name == extName {
+ desc = d
+ break
+ }
+ }
+ if desc == nil {
+ return p.errorf("unrecognized extension %q", extName)
+ }
+
+ props := &Properties{}
+ props.Parse(desc.Tag)
+
+ typ := reflect.TypeOf(desc.ExtensionType)
+ if err := p.checkForColon(props, typ); err != nil {
+ return err
+ }
+
+ rep := desc.repeated()
+
+ // Read the extension structure, and set it in
+ // the value we're constructing.
+ var ext reflect.Value
+ if !rep {
+ ext = reflect.New(typ).Elem()
+ } else {
+ ext = reflect.New(typ.Elem()).Elem()
+ }
+ if err := p.readAny(ext, props); err != nil {
+ if _, ok := err.(*RequiredNotSetError); !ok {
+ return err
+ }
+ reqFieldErr = err
+ }
+ ep := sv.Addr().Interface().(Message)
+ if !rep {
+ SetExtension(ep, desc, ext.Interface())
+ } else {
+ old, err := GetExtension(ep, desc)
+ var sl reflect.Value
+ if err == nil {
+ sl = reflect.ValueOf(old) // existing slice
+ } else {
+ sl = reflect.MakeSlice(typ, 0, 1)
+ }
+ sl = reflect.Append(sl, ext)
+ SetExtension(ep, desc, sl.Interface())
+ }
+ if err := p.consumeOptionalSeparator(); err != nil {
+ return err
+ }
+ continue
+ }
+
+ // This is a normal, non-extension field.
+ name := tok.value
+ var dst reflect.Value
+ fi, props, ok := structFieldByName(sprops, name)
+ if ok {
+ dst = sv.Field(fi)
+ } else if oop, ok := sprops.OneofTypes[name]; ok {
+ // It is a oneof.
+ props = oop.Prop
+ nv := reflect.New(oop.Type.Elem())
+ dst = nv.Elem().Field(0)
+ field := sv.Field(oop.Field)
+ if !field.IsNil() {
+ return p.errorf("field '%s' would overwrite already parsed oneof '%s'", name, sv.Type().Field(oop.Field).Name)
+ }
+ field.Set(nv)
+ }
+ if !dst.IsValid() {
+ return p.errorf("unknown field name %q in %v", name, st)
+ }
+
+ if dst.Kind() == reflect.Map {
+ // Consume any colon.
+ if err := p.checkForColon(props, dst.Type()); err != nil {
+ return err
+ }
+
+ // Construct the map if it doesn't already exist.
+ if dst.IsNil() {
+ dst.Set(reflect.MakeMap(dst.Type()))
+ }
+ key := reflect.New(dst.Type().Key()).Elem()
+ val := reflect.New(dst.Type().Elem()).Elem()
+
+ // The map entry should be this sequence of tokens:
+ // < key : KEY value : VALUE >
+ // However, implementations may omit key or value, and technically
+ // we should support them in any order. See b/28924776 for a time
+ // this went wrong.
+
+ tok := p.next()
+ var terminator string
+ switch tok.value {
+ case "<":
+ terminator = ">"
+ case "{":
+ terminator = "}"
+ default:
+ return p.errorf("expected '{' or '<', found %q", tok.value)
+ }
+ for {
+ tok := p.next()
+ if tok.err != nil {
+ return tok.err
+ }
+ if tok.value == terminator {
+ break
+ }
+ switch tok.value {
+ case "key":
+ if err := p.consumeToken(":"); err != nil {
+ return err
+ }
+ if err := p.readAny(key, props.MapKeyProp); err != nil {
+ return err
+ }
+ if err := p.consumeOptionalSeparator(); err != nil {
+ return err
+ }
+ case "value":
+ if err := p.checkForColon(props.MapValProp, dst.Type().Elem()); err != nil {
+ return err
+ }
+ if err := p.readAny(val, props.MapValProp); err != nil {
+ return err
+ }
+ if err := p.consumeOptionalSeparator(); err != nil {
+ return err
+ }
+ default:
+ p.back()
+ return p.errorf(`expected "key", "value", or %q, found %q`, terminator, tok.value)
+ }
+ }
+
+ dst.SetMapIndex(key, val)
+ continue
+ }
+
+ // Check that it's not already set if it's not a repeated field.
+ if !props.Repeated && fieldSet[name] {
+ return p.errorf("non-repeated field %q was repeated", name)
+ }
+
+ if err := p.checkForColon(props, dst.Type()); err != nil {
+ return err
+ }
+
+ // Parse into the field.
+ fieldSet[name] = true
+ if err := p.readAny(dst, props); err != nil {
+ if _, ok := err.(*RequiredNotSetError); !ok {
+ return err
+ }
+ reqFieldErr = err
+ }
+ if props.Required {
+ reqCount--
+ }
+
+ if err := p.consumeOptionalSeparator(); err != nil {
+ return err
+ }
+
+ }
+
+ if reqCount > 0 {
+ return p.missingRequiredFieldError(sv)
+ }
+ return reqFieldErr
+}
+
+// consumeExtName consumes extension name or expanded Any type URL and the
+// following ']'. It returns the name or URL consumed.
+func (p *textParser) consumeExtName() (string, error) {
+ tok := p.next()
+ if tok.err != nil {
+ return "", tok.err
+ }
+
+ // If extension name or type url is quoted, it's a single token.
+ if len(tok.value) > 2 && isQuote(tok.value[0]) && tok.value[len(tok.value)-1] == tok.value[0] {
+ name, err := unquoteC(tok.value[1:len(tok.value)-1], rune(tok.value[0]))
+ if err != nil {
+ return "", err
+ }
+ return name, p.consumeToken("]")
+ }
+
+ // Consume everything up to "]"
+ var parts []string
+ for tok.value != "]" {
+ parts = append(parts, tok.value)
+ tok = p.next()
+ if tok.err != nil {
+ return "", p.errorf("unrecognized type_url or extension name: %s", tok.err)
+ }
+ if p.done && tok.value != "]" {
+ return "", p.errorf("unclosed type_url or extension name")
+ }
+ }
+ return strings.Join(parts, ""), nil
+}
+
+// consumeOptionalSeparator consumes an optional semicolon or comma.
+// It is used in readStruct to provide backward compatibility.
+func (p *textParser) consumeOptionalSeparator() error {
+ tok := p.next()
+ if tok.err != nil {
+ return tok.err
+ }
+ if tok.value != ";" && tok.value != "," {
+ p.back()
+ }
+ return nil
+}
+
+func (p *textParser) readAny(v reflect.Value, props *Properties) error {
+ tok := p.next()
+ if tok.err != nil {
+ return tok.err
+ }
+ if tok.value == "" {
+ return p.errorf("unexpected EOF")
+ }
+
+ switch fv := v; fv.Kind() {
+ case reflect.Slice:
+ at := v.Type()
+ if at.Elem().Kind() == reflect.Uint8 {
+ // Special case for []byte
+ if tok.value[0] != '"' && tok.value[0] != '\'' {
+ // Deliberately written out here, as the error after
+ // this switch statement would write "invalid []byte: ...",
+ // which is not as user-friendly.
+ return p.errorf("invalid string: %v", tok.value)
+ }
+ bytes := []byte(tok.unquoted)
+ fv.Set(reflect.ValueOf(bytes))
+ return nil
+ }
+ // Repeated field.
+ if tok.value == "[" {
+ // Repeated field with list notation, like [1,2,3].
+ for {
+ fv.Set(reflect.Append(fv, reflect.New(at.Elem()).Elem()))
+ err := p.readAny(fv.Index(fv.Len()-1), props)
+ if err != nil {
+ return err
+ }
+ tok := p.next()
+ if tok.err != nil {
+ return tok.err
+ }
+ if tok.value == "]" {
+ break
+ }
+ if tok.value != "," {
+ return p.errorf("Expected ']' or ',' found %q", tok.value)
+ }
+ }
+ return nil
+ }
+ // One value of the repeated field.
+ p.back()
+ fv.Set(reflect.Append(fv, reflect.New(at.Elem()).Elem()))
+ return p.readAny(fv.Index(fv.Len()-1), props)
+ case reflect.Bool:
+ // true/1/t/True or false/f/0/False.
+ switch tok.value {
+ case "true", "1", "t", "True":
+ fv.SetBool(true)
+ return nil
+ case "false", "0", "f", "False":
+ fv.SetBool(false)
+ return nil
+ }
+ case reflect.Float32, reflect.Float64:
+ v := tok.value
+ // Ignore 'f' for compatibility with output generated by C++, but don't
+ // remove 'f' when the value is "-inf" or "inf".
+ if strings.HasSuffix(v, "f") && tok.value != "-inf" && tok.value != "inf" {
+ v = v[:len(v)-1]
+ }
+ if f, err := strconv.ParseFloat(v, fv.Type().Bits()); err == nil {
+ fv.SetFloat(f)
+ return nil
+ }
+ case reflect.Int32:
+ if x, err := strconv.ParseInt(tok.value, 0, 32); err == nil {
+ fv.SetInt(x)
+ return nil
+ }
+
+ if len(props.Enum) == 0 {
+ break
+ }
+ m, ok := enumValueMaps[props.Enum]
+ if !ok {
+ break
+ }
+ x, ok := m[tok.value]
+ if !ok {
+ break
+ }
+ fv.SetInt(int64(x))
+ return nil
+ case reflect.Int64:
+ if x, err := strconv.ParseInt(tok.value, 0, 64); err == nil {
+ fv.SetInt(x)
+ return nil
+ }
+
+ case reflect.Ptr:
+ // A basic field (indirected through pointer), or a repeated message/group
+ p.back()
+ fv.Set(reflect.New(fv.Type().Elem()))
+ return p.readAny(fv.Elem(), props)
+ case reflect.String:
+ if tok.value[0] == '"' || tok.value[0] == '\'' {
+ fv.SetString(tok.unquoted)
+ return nil
+ }
+ case reflect.Struct:
+ var terminator string
+ switch tok.value {
+ case "{":
+ terminator = "}"
+ case "<":
+ terminator = ">"
+ default:
+ return p.errorf("expected '{' or '<', found %q", tok.value)
+ }
+ // TODO: Handle nested messages which implement encoding.TextUnmarshaler.
+ return p.readStruct(fv, terminator)
+ case reflect.Uint32:
+ if x, err := strconv.ParseUint(tok.value, 0, 32); err == nil {
+ fv.SetUint(uint64(x))
+ return nil
+ }
+ case reflect.Uint64:
+ if x, err := strconv.ParseUint(tok.value, 0, 64); err == nil {
+ fv.SetUint(x)
+ return nil
+ }
+ }
+ return p.errorf("invalid %v: %v", v.Type(), tok.value)
+}
+
+// UnmarshalText reads a protocol buffer in Text format. UnmarshalText resets pb
+// before starting to unmarshal, so any existing data in pb is always removed.
+// If a required field is not set and no other error occurs,
+// UnmarshalText returns *RequiredNotSetError.
+func UnmarshalText(s string, pb Message) error {
+ if um, ok := pb.(encoding.TextUnmarshaler); ok {
+ return um.UnmarshalText([]byte(s))
+ }
+ pb.Reset()
+ v := reflect.ValueOf(pb)
+ return newTextParser(s).readStruct(v.Elem(), "")
+}
diff --git a/vendor/github.com/golang/protobuf/ptypes/any.go b/vendor/github.com/golang/protobuf/ptypes/any.go
new file mode 100644
index 00000000..70276e8f
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/ptypes/any.go
@@ -0,0 +1,141 @@
+// Go support for Protocol Buffers - Google's data interchange format
+//
+// Copyright 2016 The Go Authors. All rights reserved.
+// https://github.com/golang/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+package ptypes
+
+// This file implements functions to marshal proto.Message to/from
+// google.protobuf.Any message.
+
+import (
+ "fmt"
+ "reflect"
+ "strings"
+
+ "github.com/golang/protobuf/proto"
+ "github.com/golang/protobuf/ptypes/any"
+)
+
+const googleApis = "type.googleapis.com/"
+
+// AnyMessageName returns the name of the message contained in a google.protobuf.Any message.
+//
+// Note that regular type assertions should be done using the Is
+// function. AnyMessageName is provided for less common use cases like filtering a
+// sequence of Any messages based on a set of allowed message type names.
+func AnyMessageName(any *any.Any) (string, error) {
+ if any == nil {
+ return "", fmt.Errorf("message is nil")
+ }
+ slash := strings.LastIndex(any.TypeUrl, "/")
+ if slash < 0 {
+ return "", fmt.Errorf("message type url %q is invalid", any.TypeUrl)
+ }
+ return any.TypeUrl[slash+1:], nil
+}
+
+// MarshalAny takes the protocol buffer and encodes it into google.protobuf.Any.
+func MarshalAny(pb proto.Message) (*any.Any, error) {
+ value, err := proto.Marshal(pb)
+ if err != nil {
+ return nil, err
+ }
+ return &any.Any{TypeUrl: googleApis + proto.MessageName(pb), Value: value}, nil
+}
+
+// DynamicAny is a value that can be passed to UnmarshalAny to automatically
+// allocate a proto.Message for the type specified in a google.protobuf.Any
+// message. The allocated message is stored in the embedded proto.Message.
+//
+// Example:
+//
+// var x ptypes.DynamicAny
+// if err := ptypes.UnmarshalAny(a, &x); err != nil { ... }
+// fmt.Printf("unmarshaled message: %v", x.Message)
+type DynamicAny struct {
+ proto.Message
+}
+
+// Empty returns a new proto.Message of the type specified in a
+// google.protobuf.Any message. It returns an error if corresponding message
+// type isn't linked in.
+func Empty(any *any.Any) (proto.Message, error) {
+ aname, err := AnyMessageName(any)
+ if err != nil {
+ return nil, err
+ }
+
+ t := proto.MessageType(aname)
+ if t == nil {
+ return nil, fmt.Errorf("any: message type %q isn't linked in", aname)
+ }
+ return reflect.New(t.Elem()).Interface().(proto.Message), nil
+}
+
+// UnmarshalAny parses the protocol buffer representation in a google.protobuf.Any
+// message and places the decoded result in pb. It returns an error if type of
+// contents of Any message does not match type of pb message.
+//
+// pb can be a proto.Message, or a *DynamicAny.
+func UnmarshalAny(any *any.Any, pb proto.Message) error {
+ if d, ok := pb.(*DynamicAny); ok {
+ if d.Message == nil {
+ var err error
+ d.Message, err = Empty(any)
+ if err != nil {
+ return err
+ }
+ }
+ return UnmarshalAny(any, d.Message)
+ }
+
+ aname, err := AnyMessageName(any)
+ if err != nil {
+ return err
+ }
+
+ mname := proto.MessageName(pb)
+ if aname != mname {
+ return fmt.Errorf("mismatched message type: got %q want %q", aname, mname)
+ }
+ return proto.Unmarshal(any.Value, pb)
+}
+
+// Is returns true if any value contains a given message type.
+func Is(any *any.Any, pb proto.Message) bool {
+ // The following is equivalent to AnyMessageName(any) == proto.MessageName(pb),
+ // but it avoids scanning TypeUrl for the slash.
+ if any == nil {
+ return false
+ }
+ name := proto.MessageName(pb)
+ prefix := len(any.TypeUrl) - len(name)
+ return prefix >= 1 && any.TypeUrl[prefix-1] == '/' && any.TypeUrl[prefix:] == name
+}
diff --git a/vendor/github.com/golang/protobuf/ptypes/any/any.pb.go b/vendor/github.com/golang/protobuf/ptypes/any/any.pb.go
new file mode 100644
index 00000000..78ee5233
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/ptypes/any/any.pb.go
@@ -0,0 +1,200 @@
+// Code generated by protoc-gen-go. DO NOT EDIT.
+// source: google/protobuf/any.proto
+
+package any
+
+import (
+ fmt "fmt"
+ proto "github.com/golang/protobuf/proto"
+ math "math"
+)
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ = proto.Marshal
+var _ = fmt.Errorf
+var _ = math.Inf
+
+// This is a compile-time assertion to ensure that this generated file
+// is compatible with the proto package it is being compiled against.
+// A compilation error at this line likely means your copy of the
+// proto package needs to be updated.
+const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package
+
+// `Any` contains an arbitrary serialized protocol buffer message along with a
+// URL that describes the type of the serialized message.
+//
+// Protobuf library provides support to pack/unpack Any values in the form
+// of utility functions or additional generated methods of the Any type.
+//
+// Example 1: Pack and unpack a message in C++.
+//
+// Foo foo = ...;
+// Any any;
+// any.PackFrom(foo);
+// ...
+// if (any.UnpackTo(&foo)) {
+// ...
+// }
+//
+// Example 2: Pack and unpack a message in Java.
+//
+// Foo foo = ...;
+// Any any = Any.pack(foo);
+// ...
+// if (any.is(Foo.class)) {
+// foo = any.unpack(Foo.class);
+// }
+//
+// Example 3: Pack and unpack a message in Python.
+//
+// foo = Foo(...)
+// any = Any()
+// any.Pack(foo)
+// ...
+// if any.Is(Foo.DESCRIPTOR):
+// any.Unpack(foo)
+// ...
+//
+// Example 4: Pack and unpack a message in Go
+//
+// foo := &pb.Foo{...}
+// any, err := ptypes.MarshalAny(foo)
+// ...
+// foo := &pb.Foo{}
+// if err := ptypes.UnmarshalAny(any, foo); err != nil {
+// ...
+// }
+//
+// The pack methods provided by protobuf library will by default use
+// 'type.googleapis.com/full.type.name' as the type URL and the unpack
+// methods only use the fully qualified type name after the last '/'
+// in the type URL, for example "foo.bar.com/x/y.z" will yield type
+// name "y.z".
+//
+//
+// JSON
+// ====
+// The JSON representation of an `Any` value uses the regular
+// representation of the deserialized, embedded message, with an
+// additional field `@type` which contains the type URL. Example:
+//
+// package google.profile;
+// message Person {
+// string first_name = 1;
+// string last_name = 2;
+// }
+//
+// {
+// "@type": "type.googleapis.com/google.profile.Person",
+// "firstName": <string>,
+// "lastName": <string>
+// }
+//
+// If the embedded message type is well-known and has a custom JSON
+// representation, that representation will be embedded adding a field
+// `value` which holds the custom JSON in addition to the `@type`
+// field. Example (for message [google.protobuf.Duration][]):
+//
+// {
+// "@type": "type.googleapis.com/google.protobuf.Duration",
+// "value": "1.212s"
+// }
+//
+type Any struct {
+ // A URL/resource name that uniquely identifies the type of the serialized
+ // protocol buffer message. The last segment of the URL's path must represent
+ // the fully qualified name of the type (as in
+ // `path/google.protobuf.Duration`). The name should be in a canonical form
+ // (e.g., leading "." is not accepted).
+ //
+ // In practice, teams usually precompile into the binary all types that they
+ // expect it to use in the context of Any. However, for URLs which use the
+ // scheme `http`, `https`, or no scheme, one can optionally set up a type
+ // server that maps type URLs to message definitions as follows:
+ //
+ // * If no scheme is provided, `https` is assumed.
+ // * An HTTP GET on the URL must yield a [google.protobuf.Type][]
+ // value in binary format, or produce an error.
+ // * Applications are allowed to cache lookup results based on the
+ // URL, or have them precompiled into a binary to avoid any
+ // lookup. Therefore, binary compatibility needs to be preserved
+ // on changes to types. (Use versioned type names to manage
+ // breaking changes.)
+ //
+ // Note: this functionality is not currently available in the official
+ // protobuf release, and it is not used for type URLs beginning with
+ // type.googleapis.com.
+ //
+ // Schemes other than `http`, `https` (or the empty scheme) might be
+ // used with implementation specific semantics.
+ //
+ TypeUrl string `protobuf:"bytes,1,opt,name=type_url,json=typeUrl,proto3" json:"type_url,omitempty"`
+ // Must be a valid serialized protocol buffer of the above specified type.
+ Value []byte `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *Any) Reset() { *m = Any{} }
+func (m *Any) String() string { return proto.CompactTextString(m) }
+func (*Any) ProtoMessage() {}
+func (*Any) Descriptor() ([]byte, []int) {
+ return fileDescriptor_b53526c13ae22eb4, []int{0}
+}
+
+func (*Any) XXX_WellKnownType() string { return "Any" }
+
+func (m *Any) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_Any.Unmarshal(m, b)
+}
+func (m *Any) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_Any.Marshal(b, m, deterministic)
+}
+func (m *Any) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_Any.Merge(m, src)
+}
+func (m *Any) XXX_Size() int {
+ return xxx_messageInfo_Any.Size(m)
+}
+func (m *Any) XXX_DiscardUnknown() {
+ xxx_messageInfo_Any.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_Any proto.InternalMessageInfo
+
+func (m *Any) GetTypeUrl() string {
+ if m != nil {
+ return m.TypeUrl
+ }
+ return ""
+}
+
+func (m *Any) GetValue() []byte {
+ if m != nil {
+ return m.Value
+ }
+ return nil
+}
+
+func init() {
+ proto.RegisterType((*Any)(nil), "google.protobuf.Any")
+}
+
+func init() { proto.RegisterFile("google/protobuf/any.proto", fileDescriptor_b53526c13ae22eb4) }
+
+var fileDescriptor_b53526c13ae22eb4 = []byte{
+ // 185 bytes of a gzipped FileDescriptorProto
+ 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x92, 0x4c, 0xcf, 0xcf, 0x4f,
+ 0xcf, 0x49, 0xd5, 0x2f, 0x28, 0xca, 0x2f, 0xc9, 0x4f, 0x2a, 0x4d, 0xd3, 0x4f, 0xcc, 0xab, 0xd4,
+ 0x03, 0x73, 0x84, 0xf8, 0x21, 0x52, 0x7a, 0x30, 0x29, 0x25, 0x33, 0x2e, 0x66, 0xc7, 0xbc, 0x4a,
+ 0x21, 0x49, 0x2e, 0x8e, 0x92, 0xca, 0x82, 0xd4, 0xf8, 0xd2, 0xa2, 0x1c, 0x09, 0x46, 0x05, 0x46,
+ 0x0d, 0xce, 0x20, 0x76, 0x10, 0x3f, 0xb4, 0x28, 0x47, 0x48, 0x84, 0x8b, 0xb5, 0x2c, 0x31, 0xa7,
+ 0x34, 0x55, 0x82, 0x49, 0x81, 0x51, 0x83, 0x27, 0x08, 0xc2, 0x71, 0xca, 0xe7, 0x12, 0x4e, 0xce,
+ 0xcf, 0xd5, 0x43, 0x33, 0xce, 0x89, 0xc3, 0x31, 0xaf, 0x32, 0x00, 0xc4, 0x09, 0x60, 0x8c, 0x52,
+ 0x4d, 0xcf, 0x2c, 0xc9, 0x28, 0x4d, 0xd2, 0x4b, 0xce, 0xcf, 0xd5, 0x4f, 0xcf, 0xcf, 0x49, 0xcc,
+ 0x4b, 0x47, 0xb8, 0xa8, 0x00, 0x64, 0x7a, 0x31, 0xc8, 0x61, 0x8b, 0x98, 0x98, 0xdd, 0x03, 0x9c,
+ 0x56, 0x31, 0xc9, 0xb9, 0x43, 0x8c, 0x0a, 0x80, 0x2a, 0xd1, 0x0b, 0x4f, 0xcd, 0xc9, 0xf1, 0xce,
+ 0xcb, 0x2f, 0xcf, 0x0b, 0x01, 0x29, 0x4d, 0x62, 0x03, 0xeb, 0x35, 0x06, 0x04, 0x00, 0x00, 0xff,
+ 0xff, 0x13, 0xf8, 0xe8, 0x42, 0xdd, 0x00, 0x00, 0x00,
+}
diff --git a/vendor/github.com/golang/protobuf/ptypes/any/any.proto b/vendor/github.com/golang/protobuf/ptypes/any/any.proto
new file mode 100644
index 00000000..49329425
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/ptypes/any/any.proto
@@ -0,0 +1,154 @@
+// Protocol Buffers - Google's data interchange format
+// Copyright 2008 Google Inc. All rights reserved.
+// https://developers.google.com/protocol-buffers/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+syntax = "proto3";
+
+package google.protobuf;
+
+option csharp_namespace = "Google.Protobuf.WellKnownTypes";
+option go_package = "github.com/golang/protobuf/ptypes/any";
+option java_package = "com.google.protobuf";
+option java_outer_classname = "AnyProto";
+option java_multiple_files = true;
+option objc_class_prefix = "GPB";
+
+// `Any` contains an arbitrary serialized protocol buffer message along with a
+// URL that describes the type of the serialized message.
+//
+// Protobuf library provides support to pack/unpack Any values in the form
+// of utility functions or additional generated methods of the Any type.
+//
+// Example 1: Pack and unpack a message in C++.
+//
+// Foo foo = ...;
+// Any any;
+// any.PackFrom(foo);
+// ...
+// if (any.UnpackTo(&foo)) {
+// ...
+// }
+//
+// Example 2: Pack and unpack a message in Java.
+//
+// Foo foo = ...;
+// Any any = Any.pack(foo);
+// ...
+// if (any.is(Foo.class)) {
+// foo = any.unpack(Foo.class);
+// }
+//
+// Example 3: Pack and unpack a message in Python.
+//
+// foo = Foo(...)
+// any = Any()
+// any.Pack(foo)
+// ...
+// if any.Is(Foo.DESCRIPTOR):
+// any.Unpack(foo)
+// ...
+//
+// Example 4: Pack and unpack a message in Go
+//
+// foo := &pb.Foo{...}
+// any, err := ptypes.MarshalAny(foo)
+// ...
+// foo := &pb.Foo{}
+// if err := ptypes.UnmarshalAny(any, foo); err != nil {
+// ...
+// }
+//
+// The pack methods provided by protobuf library will by default use
+// 'type.googleapis.com/full.type.name' as the type URL and the unpack
+// methods only use the fully qualified type name after the last '/'
+// in the type URL, for example "foo.bar.com/x/y.z" will yield type
+// name "y.z".
+//
+//
+// JSON
+// ====
+// The JSON representation of an `Any` value uses the regular
+// representation of the deserialized, embedded message, with an
+// additional field `@type` which contains the type URL. Example:
+//
+// package google.profile;
+// message Person {
+// string first_name = 1;
+// string last_name = 2;
+// }
+//
+// {
+// "@type": "type.googleapis.com/google.profile.Person",
+// "firstName": <string>,
+// "lastName": <string>
+// }
+//
+// If the embedded message type is well-known and has a custom JSON
+// representation, that representation will be embedded adding a field
+// `value` which holds the custom JSON in addition to the `@type`
+// field. Example (for message [google.protobuf.Duration][]):
+//
+// {
+// "@type": "type.googleapis.com/google.protobuf.Duration",
+// "value": "1.212s"
+// }
+//
+message Any {
+ // A URL/resource name that uniquely identifies the type of the serialized
+ // protocol buffer message. The last segment of the URL's path must represent
+ // the fully qualified name of the type (as in
+ // `path/google.protobuf.Duration`). The name should be in a canonical form
+ // (e.g., leading "." is not accepted).
+ //
+ // In practice, teams usually precompile into the binary all types that they
+ // expect it to use in the context of Any. However, for URLs which use the
+ // scheme `http`, `https`, or no scheme, one can optionally set up a type
+ // server that maps type URLs to message definitions as follows:
+ //
+ // * If no scheme is provided, `https` is assumed.
+ // * An HTTP GET on the URL must yield a [google.protobuf.Type][]
+ // value in binary format, or produce an error.
+ // * Applications are allowed to cache lookup results based on the
+ // URL, or have them precompiled into a binary to avoid any
+ // lookup. Therefore, binary compatibility needs to be preserved
+ // on changes to types. (Use versioned type names to manage
+ // breaking changes.)
+ //
+ // Note: this functionality is not currently available in the official
+ // protobuf release, and it is not used for type URLs beginning with
+ // type.googleapis.com.
+ //
+ // Schemes other than `http`, `https` (or the empty scheme) might be
+ // used with implementation specific semantics.
+ //
+ string type_url = 1;
+
+ // Must be a valid serialized protocol buffer of the above specified type.
+ bytes value = 2;
+}
diff --git a/vendor/github.com/golang/protobuf/ptypes/doc.go b/vendor/github.com/golang/protobuf/ptypes/doc.go
new file mode 100644
index 00000000..c0d595da
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/ptypes/doc.go
@@ -0,0 +1,35 @@
+// Go support for Protocol Buffers - Google's data interchange format
+//
+// Copyright 2016 The Go Authors. All rights reserved.
+// https://github.com/golang/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+/*
+Package ptypes contains code for interacting with well-known types.
+*/
+package ptypes
diff --git a/vendor/github.com/golang/protobuf/ptypes/duration.go b/vendor/github.com/golang/protobuf/ptypes/duration.go
new file mode 100644
index 00000000..26d1ca2f
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/ptypes/duration.go
@@ -0,0 +1,102 @@
+// Go support for Protocol Buffers - Google's data interchange format
+//
+// Copyright 2016 The Go Authors. All rights reserved.
+// https://github.com/golang/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+package ptypes
+
+// This file implements conversions between google.protobuf.Duration
+// and time.Duration.
+
+import (
+ "errors"
+ "fmt"
+ "time"
+
+ durpb "github.com/golang/protobuf/ptypes/duration"
+)
+
+const (
+ // Range of a durpb.Duration in seconds, as specified in
+ // google/protobuf/duration.proto. This is about 10,000 years in seconds.
+ maxSeconds = int64(10000 * 365.25 * 24 * 60 * 60)
+ minSeconds = -maxSeconds
+)
+
+// validateDuration determines whether the durpb.Duration is valid according to the
+// definition in google/protobuf/duration.proto. A valid durpb.Duration
+// may still be too large to fit into a time.Duration (the range of durpb.Duration
+// is about 10,000 years, and the range of time.Duration is about 290).
+func validateDuration(d *durpb.Duration) error {
+ if d == nil {
+ return errors.New("duration: nil Duration")
+ }
+ if d.Seconds < minSeconds || d.Seconds > maxSeconds {
+ return fmt.Errorf("duration: %v: seconds out of range", d)
+ }
+ if d.Nanos <= -1e9 || d.Nanos >= 1e9 {
+ return fmt.Errorf("duration: %v: nanos out of range", d)
+ }
+ // Seconds and Nanos must have the same sign, unless d.Nanos is zero.
+ if (d.Seconds < 0 && d.Nanos > 0) || (d.Seconds > 0 && d.Nanos < 0) {
+ return fmt.Errorf("duration: %v: seconds and nanos have different signs", d)
+ }
+ return nil
+}
+
+// Duration converts a durpb.Duration to a time.Duration. Duration
+// returns an error if the durpb.Duration is invalid or is too large to be
+// represented in a time.Duration.
+func Duration(p *durpb.Duration) (time.Duration, error) {
+ if err := validateDuration(p); err != nil {
+ return 0, err
+ }
+ d := time.Duration(p.Seconds) * time.Second
+ if int64(d/time.Second) != p.Seconds {
+ return 0, fmt.Errorf("duration: %v is out of range for time.Duration", p)
+ }
+ if p.Nanos != 0 {
+ d += time.Duration(p.Nanos) * time.Nanosecond
+ if (d < 0) != (p.Nanos < 0) {
+ return 0, fmt.Errorf("duration: %v is out of range for time.Duration", p)
+ }
+ }
+ return d, nil
+}
+
+// DurationProto converts a time.Duration to a durpb.Duration.
+func DurationProto(d time.Duration) *durpb.Duration {
+ nanos := d.Nanoseconds()
+ secs := nanos / 1e9
+ nanos -= secs * 1e9
+ return &durpb.Duration{
+ Seconds: secs,
+ Nanos: int32(nanos),
+ }
+}
diff --git a/vendor/github.com/golang/protobuf/ptypes/duration/duration.pb.go b/vendor/github.com/golang/protobuf/ptypes/duration/duration.pb.go
new file mode 100644
index 00000000..0d681ee2
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/ptypes/duration/duration.pb.go
@@ -0,0 +1,161 @@
+// Code generated by protoc-gen-go. DO NOT EDIT.
+// source: google/protobuf/duration.proto
+
+package duration
+
+import (
+ fmt "fmt"
+ proto "github.com/golang/protobuf/proto"
+ math "math"
+)
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ = proto.Marshal
+var _ = fmt.Errorf
+var _ = math.Inf
+
+// This is a compile-time assertion to ensure that this generated file
+// is compatible with the proto package it is being compiled against.
+// A compilation error at this line likely means your copy of the
+// proto package needs to be updated.
+const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package
+
+// A Duration represents a signed, fixed-length span of time represented
+// as a count of seconds and fractions of seconds at nanosecond
+// resolution. It is independent of any calendar and concepts like "day"
+// or "month". It is related to Timestamp in that the difference between
+// two Timestamp values is a Duration and it can be added or subtracted
+// from a Timestamp. Range is approximately +-10,000 years.
+//
+// # Examples
+//
+// Example 1: Compute Duration from two Timestamps in pseudo code.
+//
+// Timestamp start = ...;
+// Timestamp end = ...;
+// Duration duration = ...;
+//
+// duration.seconds = end.seconds - start.seconds;
+// duration.nanos = end.nanos - start.nanos;
+//
+// if (duration.seconds < 0 && duration.nanos > 0) {
+// duration.seconds += 1;
+// duration.nanos -= 1000000000;
+// } else if (durations.seconds > 0 && duration.nanos < 0) {
+// duration.seconds -= 1;
+// duration.nanos += 1000000000;
+// }
+//
+// Example 2: Compute Timestamp from Timestamp + Duration in pseudo code.
+//
+// Timestamp start = ...;
+// Duration duration = ...;
+// Timestamp end = ...;
+//
+// end.seconds = start.seconds + duration.seconds;
+// end.nanos = start.nanos + duration.nanos;
+//
+// if (end.nanos < 0) {
+// end.seconds -= 1;
+// end.nanos += 1000000000;
+// } else if (end.nanos >= 1000000000) {
+// end.seconds += 1;
+// end.nanos -= 1000000000;
+// }
+//
+// Example 3: Compute Duration from datetime.timedelta in Python.
+//
+// td = datetime.timedelta(days=3, minutes=10)
+// duration = Duration()
+// duration.FromTimedelta(td)
+//
+// # JSON Mapping
+//
+// In JSON format, the Duration type is encoded as a string rather than an
+// object, where the string ends in the suffix "s" (indicating seconds) and
+// is preceded by the number of seconds, with nanoseconds expressed as
+// fractional seconds. For example, 3 seconds with 0 nanoseconds should be
+// encoded in JSON format as "3s", while 3 seconds and 1 nanosecond should
+// be expressed in JSON format as "3.000000001s", and 3 seconds and 1
+// microsecond should be expressed in JSON format as "3.000001s".
+//
+//
+type Duration struct {
+ // Signed seconds of the span of time. Must be from -315,576,000,000
+ // to +315,576,000,000 inclusive. Note: these bounds are computed from:
+ // 60 sec/min * 60 min/hr * 24 hr/day * 365.25 days/year * 10000 years
+ Seconds int64 `protobuf:"varint,1,opt,name=seconds,proto3" json:"seconds,omitempty"`
+ // Signed fractions of a second at nanosecond resolution of the span
+ // of time. Durations less than one second are represented with a 0
+ // `seconds` field and a positive or negative `nanos` field. For durations
+ // of one second or more, a non-zero value for the `nanos` field must be
+ // of the same sign as the `seconds` field. Must be from -999,999,999
+ // to +999,999,999 inclusive.
+ Nanos int32 `protobuf:"varint,2,opt,name=nanos,proto3" json:"nanos,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *Duration) Reset() { *m = Duration{} }
+func (m *Duration) String() string { return proto.CompactTextString(m) }
+func (*Duration) ProtoMessage() {}
+func (*Duration) Descriptor() ([]byte, []int) {
+ return fileDescriptor_23597b2ebd7ac6c5, []int{0}
+}
+
+func (*Duration) XXX_WellKnownType() string { return "Duration" }
+
+func (m *Duration) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_Duration.Unmarshal(m, b)
+}
+func (m *Duration) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_Duration.Marshal(b, m, deterministic)
+}
+func (m *Duration) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_Duration.Merge(m, src)
+}
+func (m *Duration) XXX_Size() int {
+ return xxx_messageInfo_Duration.Size(m)
+}
+func (m *Duration) XXX_DiscardUnknown() {
+ xxx_messageInfo_Duration.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_Duration proto.InternalMessageInfo
+
+func (m *Duration) GetSeconds() int64 {
+ if m != nil {
+ return m.Seconds
+ }
+ return 0
+}
+
+func (m *Duration) GetNanos() int32 {
+ if m != nil {
+ return m.Nanos
+ }
+ return 0
+}
+
+func init() {
+ proto.RegisterType((*Duration)(nil), "google.protobuf.Duration")
+}
+
+func init() { proto.RegisterFile("google/protobuf/duration.proto", fileDescriptor_23597b2ebd7ac6c5) }
+
+var fileDescriptor_23597b2ebd7ac6c5 = []byte{
+ // 190 bytes of a gzipped FileDescriptorProto
+ 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x92, 0x4b, 0xcf, 0xcf, 0x4f,
+ 0xcf, 0x49, 0xd5, 0x2f, 0x28, 0xca, 0x2f, 0xc9, 0x4f, 0x2a, 0x4d, 0xd3, 0x4f, 0x29, 0x2d, 0x4a,
+ 0x2c, 0xc9, 0xcc, 0xcf, 0xd3, 0x03, 0x8b, 0x08, 0xf1, 0x43, 0xe4, 0xf5, 0x60, 0xf2, 0x4a, 0x56,
+ 0x5c, 0x1c, 0x2e, 0x50, 0x25, 0x42, 0x12, 0x5c, 0xec, 0xc5, 0xa9, 0xc9, 0xf9, 0x79, 0x29, 0xc5,
+ 0x12, 0x8c, 0x0a, 0x8c, 0x1a, 0xcc, 0x41, 0x30, 0xae, 0x90, 0x08, 0x17, 0x6b, 0x5e, 0x62, 0x5e,
+ 0x7e, 0xb1, 0x04, 0x93, 0x02, 0xa3, 0x06, 0x6b, 0x10, 0x84, 0xe3, 0x54, 0xc3, 0x25, 0x9c, 0x9c,
+ 0x9f, 0xab, 0x87, 0x66, 0xa4, 0x13, 0x2f, 0xcc, 0xc0, 0x00, 0x90, 0x48, 0x00, 0x63, 0x94, 0x56,
+ 0x7a, 0x66, 0x49, 0x46, 0x69, 0x92, 0x5e, 0x72, 0x7e, 0xae, 0x7e, 0x7a, 0x7e, 0x4e, 0x62, 0x5e,
+ 0x3a, 0xc2, 0x7d, 0x05, 0x25, 0x95, 0x05, 0xa9, 0xc5, 0x70, 0x67, 0xfe, 0x60, 0x64, 0x5c, 0xc4,
+ 0xc4, 0xec, 0x1e, 0xe0, 0xb4, 0x8a, 0x49, 0xce, 0x1d, 0x62, 0x6e, 0x00, 0x54, 0xa9, 0x5e, 0x78,
+ 0x6a, 0x4e, 0x8e, 0x77, 0x5e, 0x7e, 0x79, 0x5e, 0x08, 0x48, 0x4b, 0x12, 0x1b, 0xd8, 0x0c, 0x63,
+ 0x40, 0x00, 0x00, 0x00, 0xff, 0xff, 0xdc, 0x84, 0x30, 0xff, 0xf3, 0x00, 0x00, 0x00,
+}
diff --git a/vendor/github.com/golang/protobuf/ptypes/duration/duration.proto b/vendor/github.com/golang/protobuf/ptypes/duration/duration.proto
new file mode 100644
index 00000000..975fce41
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/ptypes/duration/duration.proto
@@ -0,0 +1,117 @@
+// Protocol Buffers - Google's data interchange format
+// Copyright 2008 Google Inc. All rights reserved.
+// https://developers.google.com/protocol-buffers/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+syntax = "proto3";
+
+package google.protobuf;
+
+option csharp_namespace = "Google.Protobuf.WellKnownTypes";
+option cc_enable_arenas = true;
+option go_package = "github.com/golang/protobuf/ptypes/duration";
+option java_package = "com.google.protobuf";
+option java_outer_classname = "DurationProto";
+option java_multiple_files = true;
+option objc_class_prefix = "GPB";
+
+// A Duration represents a signed, fixed-length span of time represented
+// as a count of seconds and fractions of seconds at nanosecond
+// resolution. It is independent of any calendar and concepts like "day"
+// or "month". It is related to Timestamp in that the difference between
+// two Timestamp values is a Duration and it can be added or subtracted
+// from a Timestamp. Range is approximately +-10,000 years.
+//
+// # Examples
+//
+// Example 1: Compute Duration from two Timestamps in pseudo code.
+//
+// Timestamp start = ...;
+// Timestamp end = ...;
+// Duration duration = ...;
+//
+// duration.seconds = end.seconds - start.seconds;
+// duration.nanos = end.nanos - start.nanos;
+//
+// if (duration.seconds < 0 && duration.nanos > 0) {
+// duration.seconds += 1;
+// duration.nanos -= 1000000000;
+// } else if (durations.seconds > 0 && duration.nanos < 0) {
+// duration.seconds -= 1;
+// duration.nanos += 1000000000;
+// }
+//
+// Example 2: Compute Timestamp from Timestamp + Duration in pseudo code.
+//
+// Timestamp start = ...;
+// Duration duration = ...;
+// Timestamp end = ...;
+//
+// end.seconds = start.seconds + duration.seconds;
+// end.nanos = start.nanos + duration.nanos;
+//
+// if (end.nanos < 0) {
+// end.seconds -= 1;
+// end.nanos += 1000000000;
+// } else if (end.nanos >= 1000000000) {
+// end.seconds += 1;
+// end.nanos -= 1000000000;
+// }
+//
+// Example 3: Compute Duration from datetime.timedelta in Python.
+//
+// td = datetime.timedelta(days=3, minutes=10)
+// duration = Duration()
+// duration.FromTimedelta(td)
+//
+// # JSON Mapping
+//
+// In JSON format, the Duration type is encoded as a string rather than an
+// object, where the string ends in the suffix "s" (indicating seconds) and
+// is preceded by the number of seconds, with nanoseconds expressed as
+// fractional seconds. For example, 3 seconds with 0 nanoseconds should be
+// encoded in JSON format as "3s", while 3 seconds and 1 nanosecond should
+// be expressed in JSON format as "3.000000001s", and 3 seconds and 1
+// microsecond should be expressed in JSON format as "3.000001s".
+//
+//
+message Duration {
+
+ // Signed seconds of the span of time. Must be from -315,576,000,000
+ // to +315,576,000,000 inclusive. Note: these bounds are computed from:
+ // 60 sec/min * 60 min/hr * 24 hr/day * 365.25 days/year * 10000 years
+ int64 seconds = 1;
+
+ // Signed fractions of a second at nanosecond resolution of the span
+ // of time. Durations less than one second are represented with a 0
+ // `seconds` field and a positive or negative `nanos` field. For durations
+ // of one second or more, a non-zero value for the `nanos` field must be
+ // of the same sign as the `seconds` field. Must be from -999,999,999
+ // to +999,999,999 inclusive.
+ int32 nanos = 2;
+}
diff --git a/vendor/github.com/golang/protobuf/ptypes/timestamp.go b/vendor/github.com/golang/protobuf/ptypes/timestamp.go
new file mode 100644
index 00000000..8da0df01
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/ptypes/timestamp.go
@@ -0,0 +1,132 @@
+// Go support for Protocol Buffers - Google's data interchange format
+//
+// Copyright 2016 The Go Authors. All rights reserved.
+// https://github.com/golang/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+package ptypes
+
+// This file implements operations on google.protobuf.Timestamp.
+
+import (
+ "errors"
+ "fmt"
+ "time"
+
+ tspb "github.com/golang/protobuf/ptypes/timestamp"
+)
+
+const (
+ // Seconds field of the earliest valid Timestamp.
+ // This is time.Date(1, 1, 1, 0, 0, 0, 0, time.UTC).Unix().
+ minValidSeconds = -62135596800
+ // Seconds field just after the latest valid Timestamp.
+ // This is time.Date(10000, 1, 1, 0, 0, 0, 0, time.UTC).Unix().
+ maxValidSeconds = 253402300800
+)
+
+// validateTimestamp determines whether a Timestamp is valid.
+// A valid timestamp represents a time in the range
+// [0001-01-01, 10000-01-01) and has a Nanos field
+// in the range [0, 1e9).
+//
+// If the Timestamp is valid, validateTimestamp returns nil.
+// Otherwise, it returns an error that describes
+// the problem.
+//
+// Every valid Timestamp can be represented by a time.Time, but the converse is not true.
+func validateTimestamp(ts *tspb.Timestamp) error {
+ if ts == nil {
+ return errors.New("timestamp: nil Timestamp")
+ }
+ if ts.Seconds < minValidSeconds {
+ return fmt.Errorf("timestamp: %v before 0001-01-01", ts)
+ }
+ if ts.Seconds >= maxValidSeconds {
+ return fmt.Errorf("timestamp: %v after 10000-01-01", ts)
+ }
+ if ts.Nanos < 0 || ts.Nanos >= 1e9 {
+ return fmt.Errorf("timestamp: %v: nanos not in range [0, 1e9)", ts)
+ }
+ return nil
+}
+
+// Timestamp converts a google.protobuf.Timestamp proto to a time.Time.
+// It returns an error if the argument is invalid.
+//
+// Unlike most Go functions, if Timestamp returns an error, the first return value
+// is not the zero time.Time. Instead, it is the value obtained from the
+// time.Unix function when passed the contents of the Timestamp, in the UTC
+// locale. This may or may not be a meaningful time; many invalid Timestamps
+// do map to valid time.Times.
+//
+// A nil Timestamp returns an error. The first return value in that case is
+// undefined.
+func Timestamp(ts *tspb.Timestamp) (time.Time, error) {
+ // Don't return the zero value on error, because corresponds to a valid
+ // timestamp. Instead return whatever time.Unix gives us.
+ var t time.Time
+ if ts == nil {
+ t = time.Unix(0, 0).UTC() // treat nil like the empty Timestamp
+ } else {
+ t = time.Unix(ts.Seconds, int64(ts.Nanos)).UTC()
+ }
+ return t, validateTimestamp(ts)
+}
+
+// TimestampNow returns a google.protobuf.Timestamp for the current time.
+func TimestampNow() *tspb.Timestamp {
+ ts, err := TimestampProto(time.Now())
+ if err != nil {
+ panic("ptypes: time.Now() out of Timestamp range")
+ }
+ return ts
+}
+
+// TimestampProto converts the time.Time to a google.protobuf.Timestamp proto.
+// It returns an error if the resulting Timestamp is invalid.
+func TimestampProto(t time.Time) (*tspb.Timestamp, error) {
+ ts := &tspb.Timestamp{
+ Seconds: t.Unix(),
+ Nanos: int32(t.Nanosecond()),
+ }
+ if err := validateTimestamp(ts); err != nil {
+ return nil, err
+ }
+ return ts, nil
+}
+
+// TimestampString returns the RFC 3339 string for valid Timestamps. For invalid
+// Timestamps, it returns an error message in parentheses.
+func TimestampString(ts *tspb.Timestamp) string {
+ t, err := Timestamp(ts)
+ if err != nil {
+ return fmt.Sprintf("(%v)", err)
+ }
+ return t.Format(time.RFC3339Nano)
+}
diff --git a/vendor/github.com/golang/protobuf/ptypes/timestamp/timestamp.pb.go b/vendor/github.com/golang/protobuf/ptypes/timestamp/timestamp.pb.go
new file mode 100644
index 00000000..31cd846d
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/ptypes/timestamp/timestamp.pb.go
@@ -0,0 +1,179 @@
+// Code generated by protoc-gen-go. DO NOT EDIT.
+// source: google/protobuf/timestamp.proto
+
+package timestamp
+
+import (
+ fmt "fmt"
+ proto "github.com/golang/protobuf/proto"
+ math "math"
+)
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ = proto.Marshal
+var _ = fmt.Errorf
+var _ = math.Inf
+
+// This is a compile-time assertion to ensure that this generated file
+// is compatible with the proto package it is being compiled against.
+// A compilation error at this line likely means your copy of the
+// proto package needs to be updated.
+const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package
+
+// A Timestamp represents a point in time independent of any time zone
+// or calendar, represented as seconds and fractions of seconds at
+// nanosecond resolution in UTC Epoch time. It is encoded using the
+// Proleptic Gregorian Calendar which extends the Gregorian calendar
+// backwards to year one. It is encoded assuming all minutes are 60
+// seconds long, i.e. leap seconds are "smeared" so that no leap second
+// table is needed for interpretation. Range is from
+// 0001-01-01T00:00:00Z to 9999-12-31T23:59:59.999999999Z.
+// By restricting to that range, we ensure that we can convert to
+// and from RFC 3339 date strings.
+// See [https://www.ietf.org/rfc/rfc3339.txt](https://www.ietf.org/rfc/rfc3339.txt).
+//
+// # Examples
+//
+// Example 1: Compute Timestamp from POSIX `time()`.
+//
+// Timestamp timestamp;
+// timestamp.set_seconds(time(NULL));
+// timestamp.set_nanos(0);
+//
+// Example 2: Compute Timestamp from POSIX `gettimeofday()`.
+//
+// struct timeval tv;
+// gettimeofday(&tv, NULL);
+//
+// Timestamp timestamp;
+// timestamp.set_seconds(tv.tv_sec);
+// timestamp.set_nanos(tv.tv_usec * 1000);
+//
+// Example 3: Compute Timestamp from Win32 `GetSystemTimeAsFileTime()`.
+//
+// FILETIME ft;
+// GetSystemTimeAsFileTime(&ft);
+// UINT64 ticks = (((UINT64)ft.dwHighDateTime) << 32) | ft.dwLowDateTime;
+//
+// // A Windows tick is 100 nanoseconds. Windows epoch 1601-01-01T00:00:00Z
+// // is 11644473600 seconds before Unix epoch 1970-01-01T00:00:00Z.
+// Timestamp timestamp;
+// timestamp.set_seconds((INT64) ((ticks / 10000000) - 11644473600LL));
+// timestamp.set_nanos((INT32) ((ticks % 10000000) * 100));
+//
+// Example 4: Compute Timestamp from Java `System.currentTimeMillis()`.
+//
+// long millis = System.currentTimeMillis();
+//
+// Timestamp timestamp = Timestamp.newBuilder().setSeconds(millis / 1000)
+// .setNanos((int) ((millis % 1000) * 1000000)).build();
+//
+//
+// Example 5: Compute Timestamp from current time in Python.
+//
+// timestamp = Timestamp()
+// timestamp.GetCurrentTime()
+//
+// # JSON Mapping
+//
+// In JSON format, the Timestamp type is encoded as a string in the
+// [RFC 3339](https://www.ietf.org/rfc/rfc3339.txt) format. That is, the
+// format is "{year}-{month}-{day}T{hour}:{min}:{sec}[.{frac_sec}]Z"
+// where {year} is always expressed using four digits while {month}, {day},
+// {hour}, {min}, and {sec} are zero-padded to two digits each. The fractional
+// seconds, which can go up to 9 digits (i.e. up to 1 nanosecond resolution),
+// are optional. The "Z" suffix indicates the timezone ("UTC"); the timezone
+// is required. A proto3 JSON serializer should always use UTC (as indicated by
+// "Z") when printing the Timestamp type and a proto3 JSON parser should be
+// able to accept both UTC and other timezones (as indicated by an offset).
+//
+// For example, "2017-01-15T01:30:15.01Z" encodes 15.01 seconds past
+// 01:30 UTC on January 15, 2017.
+//
+// In JavaScript, one can convert a Date object to this format using the
+// standard [toISOString()](https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/Date/toISOString]
+// method. In Python, a standard `datetime.datetime` object can be converted
+// to this format using [`strftime`](https://docs.python.org/2/library/time.html#time.strftime)
+// with the time format spec '%Y-%m-%dT%H:%M:%S.%fZ'. Likewise, in Java, one
+// can use the Joda Time's [`ISODateTimeFormat.dateTime()`](
+// http://www.joda.org/joda-time/apidocs/org/joda/time/format/ISODateTimeFormat.html#dateTime--
+// ) to obtain a formatter capable of generating timestamps in this format.
+//
+//
+type Timestamp struct {
+ // Represents seconds of UTC time since Unix epoch
+ // 1970-01-01T00:00:00Z. Must be from 0001-01-01T00:00:00Z to
+ // 9999-12-31T23:59:59Z inclusive.
+ Seconds int64 `protobuf:"varint,1,opt,name=seconds,proto3" json:"seconds,omitempty"`
+ // Non-negative fractions of a second at nanosecond resolution. Negative
+ // second values with fractions must still have non-negative nanos values
+ // that count forward in time. Must be from 0 to 999,999,999
+ // inclusive.
+ Nanos int32 `protobuf:"varint,2,opt,name=nanos,proto3" json:"nanos,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *Timestamp) Reset() { *m = Timestamp{} }
+func (m *Timestamp) String() string { return proto.CompactTextString(m) }
+func (*Timestamp) ProtoMessage() {}
+func (*Timestamp) Descriptor() ([]byte, []int) {
+ return fileDescriptor_292007bbfe81227e, []int{0}
+}
+
+func (*Timestamp) XXX_WellKnownType() string { return "Timestamp" }
+
+func (m *Timestamp) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_Timestamp.Unmarshal(m, b)
+}
+func (m *Timestamp) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_Timestamp.Marshal(b, m, deterministic)
+}
+func (m *Timestamp) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_Timestamp.Merge(m, src)
+}
+func (m *Timestamp) XXX_Size() int {
+ return xxx_messageInfo_Timestamp.Size(m)
+}
+func (m *Timestamp) XXX_DiscardUnknown() {
+ xxx_messageInfo_Timestamp.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_Timestamp proto.InternalMessageInfo
+
+func (m *Timestamp) GetSeconds() int64 {
+ if m != nil {
+ return m.Seconds
+ }
+ return 0
+}
+
+func (m *Timestamp) GetNanos() int32 {
+ if m != nil {
+ return m.Nanos
+ }
+ return 0
+}
+
+func init() {
+ proto.RegisterType((*Timestamp)(nil), "google.protobuf.Timestamp")
+}
+
+func init() { proto.RegisterFile("google/protobuf/timestamp.proto", fileDescriptor_292007bbfe81227e) }
+
+var fileDescriptor_292007bbfe81227e = []byte{
+ // 191 bytes of a gzipped FileDescriptorProto
+ 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x92, 0x4f, 0xcf, 0xcf, 0x4f,
+ 0xcf, 0x49, 0xd5, 0x2f, 0x28, 0xca, 0x2f, 0xc9, 0x4f, 0x2a, 0x4d, 0xd3, 0x2f, 0xc9, 0xcc, 0x4d,
+ 0x2d, 0x2e, 0x49, 0xcc, 0x2d, 0xd0, 0x03, 0x0b, 0x09, 0xf1, 0x43, 0x14, 0xe8, 0xc1, 0x14, 0x28,
+ 0x59, 0x73, 0x71, 0x86, 0xc0, 0xd4, 0x08, 0x49, 0x70, 0xb1, 0x17, 0xa7, 0x26, 0xe7, 0xe7, 0xa5,
+ 0x14, 0x4b, 0x30, 0x2a, 0x30, 0x6a, 0x30, 0x07, 0xc1, 0xb8, 0x42, 0x22, 0x5c, 0xac, 0x79, 0x89,
+ 0x79, 0xf9, 0xc5, 0x12, 0x4c, 0x0a, 0x8c, 0x1a, 0xac, 0x41, 0x10, 0x8e, 0x53, 0x1d, 0x97, 0x70,
+ 0x72, 0x7e, 0xae, 0x1e, 0x9a, 0x99, 0x4e, 0x7c, 0x70, 0x13, 0x03, 0x40, 0x42, 0x01, 0x8c, 0x51,
+ 0xda, 0xe9, 0x99, 0x25, 0x19, 0xa5, 0x49, 0x7a, 0xc9, 0xf9, 0xb9, 0xfa, 0xe9, 0xf9, 0x39, 0x89,
+ 0x79, 0xe9, 0x08, 0x27, 0x16, 0x94, 0x54, 0x16, 0xa4, 0x16, 0x23, 0x5c, 0xfa, 0x83, 0x91, 0x71,
+ 0x11, 0x13, 0xb3, 0x7b, 0x80, 0xd3, 0x2a, 0x26, 0x39, 0x77, 0x88, 0xc9, 0x01, 0x50, 0xb5, 0x7a,
+ 0xe1, 0xa9, 0x39, 0x39, 0xde, 0x79, 0xf9, 0xe5, 0x79, 0x21, 0x20, 0x3d, 0x49, 0x6c, 0x60, 0x43,
+ 0x8c, 0x01, 0x01, 0x00, 0x00, 0xff, 0xff, 0xbc, 0x77, 0x4a, 0x07, 0xf7, 0x00, 0x00, 0x00,
+}
diff --git a/vendor/github.com/golang/protobuf/ptypes/timestamp/timestamp.proto b/vendor/github.com/golang/protobuf/ptypes/timestamp/timestamp.proto
new file mode 100644
index 00000000..eafb3fa0
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/ptypes/timestamp/timestamp.proto
@@ -0,0 +1,135 @@
+// Protocol Buffers - Google's data interchange format
+// Copyright 2008 Google Inc. All rights reserved.
+// https://developers.google.com/protocol-buffers/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+syntax = "proto3";
+
+package google.protobuf;
+
+option csharp_namespace = "Google.Protobuf.WellKnownTypes";
+option cc_enable_arenas = true;
+option go_package = "github.com/golang/protobuf/ptypes/timestamp";
+option java_package = "com.google.protobuf";
+option java_outer_classname = "TimestampProto";
+option java_multiple_files = true;
+option objc_class_prefix = "GPB";
+
+// A Timestamp represents a point in time independent of any time zone
+// or calendar, represented as seconds and fractions of seconds at
+// nanosecond resolution in UTC Epoch time. It is encoded using the
+// Proleptic Gregorian Calendar which extends the Gregorian calendar
+// backwards to year one. It is encoded assuming all minutes are 60
+// seconds long, i.e. leap seconds are "smeared" so that no leap second
+// table is needed for interpretation. Range is from
+// 0001-01-01T00:00:00Z to 9999-12-31T23:59:59.999999999Z.
+// By restricting to that range, we ensure that we can convert to
+// and from RFC 3339 date strings.
+// See [https://www.ietf.org/rfc/rfc3339.txt](https://www.ietf.org/rfc/rfc3339.txt).
+//
+// # Examples
+//
+// Example 1: Compute Timestamp from POSIX `time()`.
+//
+// Timestamp timestamp;
+// timestamp.set_seconds(time(NULL));
+// timestamp.set_nanos(0);
+//
+// Example 2: Compute Timestamp from POSIX `gettimeofday()`.
+//
+// struct timeval tv;
+// gettimeofday(&tv, NULL);
+//
+// Timestamp timestamp;
+// timestamp.set_seconds(tv.tv_sec);
+// timestamp.set_nanos(tv.tv_usec * 1000);
+//
+// Example 3: Compute Timestamp from Win32 `GetSystemTimeAsFileTime()`.
+//
+// FILETIME ft;
+// GetSystemTimeAsFileTime(&ft);
+// UINT64 ticks = (((UINT64)ft.dwHighDateTime) << 32) | ft.dwLowDateTime;
+//
+// // A Windows tick is 100 nanoseconds. Windows epoch 1601-01-01T00:00:00Z
+// // is 11644473600 seconds before Unix epoch 1970-01-01T00:00:00Z.
+// Timestamp timestamp;
+// timestamp.set_seconds((INT64) ((ticks / 10000000) - 11644473600LL));
+// timestamp.set_nanos((INT32) ((ticks % 10000000) * 100));
+//
+// Example 4: Compute Timestamp from Java `System.currentTimeMillis()`.
+//
+// long millis = System.currentTimeMillis();
+//
+// Timestamp timestamp = Timestamp.newBuilder().setSeconds(millis / 1000)
+// .setNanos((int) ((millis % 1000) * 1000000)).build();
+//
+//
+// Example 5: Compute Timestamp from current time in Python.
+//
+// timestamp = Timestamp()
+// timestamp.GetCurrentTime()
+//
+// # JSON Mapping
+//
+// In JSON format, the Timestamp type is encoded as a string in the
+// [RFC 3339](https://www.ietf.org/rfc/rfc3339.txt) format. That is, the
+// format is "{year}-{month}-{day}T{hour}:{min}:{sec}[.{frac_sec}]Z"
+// where {year} is always expressed using four digits while {month}, {day},
+// {hour}, {min}, and {sec} are zero-padded to two digits each. The fractional
+// seconds, which can go up to 9 digits (i.e. up to 1 nanosecond resolution),
+// are optional. The "Z" suffix indicates the timezone ("UTC"); the timezone
+// is required. A proto3 JSON serializer should always use UTC (as indicated by
+// "Z") when printing the Timestamp type and a proto3 JSON parser should be
+// able to accept both UTC and other timezones (as indicated by an offset).
+//
+// For example, "2017-01-15T01:30:15.01Z" encodes 15.01 seconds past
+// 01:30 UTC on January 15, 2017.
+//
+// In JavaScript, one can convert a Date object to this format using the
+// standard [toISOString()](https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/Date/toISOString]
+// method. In Python, a standard `datetime.datetime` object can be converted
+// to this format using [`strftime`](https://docs.python.org/2/library/time.html#time.strftime)
+// with the time format spec '%Y-%m-%dT%H:%M:%S.%fZ'. Likewise, in Java, one
+// can use the Joda Time's [`ISODateTimeFormat.dateTime()`](
+// http://www.joda.org/joda-time/apidocs/org/joda/time/format/ISODateTimeFormat.html#dateTime--
+// ) to obtain a formatter capable of generating timestamps in this format.
+//
+//
+message Timestamp {
+
+ // Represents seconds of UTC time since Unix epoch
+ // 1970-01-01T00:00:00Z. Must be from 0001-01-01T00:00:00Z to
+ // 9999-12-31T23:59:59Z inclusive.
+ int64 seconds = 1;
+
+ // Non-negative fractions of a second at nanosecond resolution. Negative
+ // second values with fractions must still have non-negative nanos values
+ // that count forward in time. Must be from 0 to 999,999,999
+ // inclusive.
+ int32 nanos = 2;
+}
diff --git a/vendor/github.com/google/wire/.codecov.yml b/vendor/github.com/google/wire/.codecov.yml
new file mode 100644
index 00000000..5ae6b835
--- /dev/null
+++ b/vendor/github.com/google/wire/.codecov.yml
@@ -0,0 +1,13 @@
+comment: off
+coverage:
+ status:
+ project:
+ default:
+ target: 0
+ threshold: null
+ base: auto
+ patch:
+ default:
+ target: 0
+ threshold: null
+ base: auto
diff --git a/vendor/github.com/google/wire/.contributebot b/vendor/github.com/google/wire/.contributebot
new file mode 100644
index 00000000..9a66b3ba
--- /dev/null
+++ b/vendor/github.com/google/wire/.contributebot
@@ -0,0 +1,4 @@
+{
+ "issue_title_pattern": "^.*$",
+ "pull_request_title_response": "Please edit the title of this pull request with the name of the affected component, or \"all\", followed by a colon, followed by a short summary of the change."
+}
diff --git a/vendor/github.com/google/wire/.travis.yml b/vendor/github.com/google/wire/.travis.yml
new file mode 100644
index 00000000..34beb03d
--- /dev/null
+++ b/vendor/github.com/google/wire/.travis.yml
@@ -0,0 +1,55 @@
+# Copyright 2018 The Wire Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+language: go
+go_import_path: github.com/google/wire
+
+before_install:
+ # The Bash that comes with OS X is ancient.
+ # grep is similar: it's not GNU grep, which means commands aren't portable.
+ # Homebrew installs grep as ggrep if you don't build from source, so it needs
+ # moving so it takes precedence in the PATH.
+ - if [[ "$TRAVIS_OS_NAME" == "osx" ]]; then
+ HOMEBREW_NO_AUTO_UPDATE=1 brew install bash grep;
+ mv $(brew --prefix)/bin/ggrep $(brew --prefix)/bin/grep;
+ fi
+
+install:
+ # Re-checkout files preserving line feeds. This prevents Windows builds from
+ # converting \n to \r\n.
+ - "git config --global core.autocrlf input"
+ - "git checkout -- ."
+
+script:
+ - 'internal/runtests.sh'
+
+env:
+ global:
+ - GO111MODULE=on
+ - GOPROXY=https://proxy.golang.org
+
+# When updating Go versions:
+# In addition to changing the "go:" versions below, edit the version
+# test in internal/runtests.sh.
+
+jobs:
+ include:
+ - go: "1.11.x"
+ os: linux
+ - go: "1.12.x"
+ os: linux
+ - go: "1.12.x"
+ os: osx
+ - go: "1.12.x"
+ os: windows
diff --git a/vendor/github.com/google/wire/AUTHORS b/vendor/github.com/google/wire/AUTHORS
new file mode 100644
index 00000000..4d8d4b31
--- /dev/null
+++ b/vendor/github.com/google/wire/AUTHORS
@@ -0,0 +1,18 @@
+# This is the official list of Wire authors for copyright purposes.
+# This file is distinct from the CONTRIBUTORS files.
+# See the latter for an explanation.
+
+# Names should be added to this file as one of
+# Organization's name
+# Individual's name <submission email address>
+# Individual's name <submission email address> <email2> <emailN>
+# See CONTRIBUTORS for the meaning of multiple email addresses.
+
+# Please keep the list sorted.
+
+Google LLC
+ktr <ktr@syfm.me>
+Kumbirai Tanekha <kumbirai.tanekha@gmail.com>
+Oleg Kovalov <iamolegkovalov@gmail.com>
+Yoichiro Shimizu <budougumi0617@gmail.com>
+Zachary Romero <zacromero3@gmail.com>
diff --git a/vendor/github.com/google/wire/CODE_OF_CONDUCT.md b/vendor/github.com/google/wire/CODE_OF_CONDUCT.md
new file mode 100644
index 00000000..3a8545ec
--- /dev/null
+++ b/vendor/github.com/google/wire/CODE_OF_CONDUCT.md
@@ -0,0 +1,10 @@
+# Code of Conduct
+
+This project is covered under the [Go Code of Conduct][]. In summary:
+
+- Treat everyone with respect and kindness.
+- Be thoughtful in how you communicate.
+- Don’t be destructive or inflammatory.
+- If you encounter an issue, please mail conduct@golang.org.
+
+[Go Code of Conduct]: https://golang.org/conduct
diff --git a/vendor/github.com/google/wire/CONTRIBUTING.md b/vendor/github.com/google/wire/CONTRIBUTING.md
new file mode 100644
index 00000000..68445fc4
--- /dev/null
+++ b/vendor/github.com/google/wire/CONTRIBUTING.md
@@ -0,0 +1,152 @@
+# How to Contribute
+
+We would love to accept your patches and contributions to this project. Here is
+how you can help.
+
+## Filing issues
+
+Filing issues is an important way you can contribute to the Wire Project. We
+want your feedback on things like bugs, desired API changes, or just anything
+that isn't working for you.
+
+### Bugs
+
+If your issue is a bug, open one
+[here](https://github.com/google/wire/issues/new). The easiest way to file an
+issue with all the right information is to run `go bug`. `go bug` will print out
+a handy template of questions and system information that will help us get to
+the root of the issue quicker.
+
+### Changes
+
+Unlike the core Go project, we do not have a formal proposal process for
+changes. If you have a change you would like to see in Wire, please file an
+issue with the necessary details.
+
+### Triaging
+
+The Go Cloud team triages issues at least every two weeks, but usually within
+two business days. Bugs or feature requests are either placed into a **Sprint**
+milestone which means the issue is intended to be worked on. Issues that we
+would like to address but do not have time for are placed into the [Unplanned][]
+milestone.
+
+[Unplanned]: https://github.com/google/wire/milestone/1
+
+## Contributing Code
+
+We love accepting contributions! If your change is minor, please feel free
+submit a [pull request](https://help.github.com/articles/about-pull-requests/).
+If your change is larger, or adds a feature, please file an issue beforehand so
+that we can discuss the change. You're welcome to file an implementation pull
+request immediately as well, although we generally lean towards discussing the
+change and then reviewing the implementation separately.
+
+### Finding something to work on
+
+If you want to write some code, but don't know where to start or what you might
+want to do, take a look at our [Unplanned][] milestone. This is where you can
+find issues we would like to address but can't currently find time for. See if
+any of the latest ones look interesting! If you need help before you can start
+work, you can comment on the issue and we will try to help as best we can.
+
+### Contributor License Agreement
+
+Contributions to this project can only be made by those who have signed Google's
+Contributor License Agreement. You (or your employer) retain the copyright to
+your contribution, this simply gives us permission to use and redistribute your
+contributions as part of the project. Head over to
+<https://cla.developers.google.com/> to see your current agreements on file or
+to sign a new one.
+
+As a personal contributor, you only need to sign the Google CLA once across all
+Google projects. If you've already signed the CLA, there is no need to do it
+again. If you are submitting code on behalf of your employer, there's
+[a separate corporate CLA that your employer manages for you](https://opensource.google.com/docs/cla/#external-contributors).
+
+## Making a pull request
+
+* Follow the normal
+ [pull request flow](https://help.github.com/articles/creating-a-pull-request/)
+* Build your changes using Go 1.11 with Go modules enabled. Wire's continuous
+ integration uses Go modules in order to ensure
+ [reproducible builds](https://research.swtch.com/vgo-repro).
+* Test your changes using `go test ./...`. Please add tests that show the
+ change does what it says it does, even if there wasn't a test in the first
+ place.
+* Feel free to make as many commits as you want; we will squash them all into
+ a single commit before merging your change.
+* Check the diffs, write a useful description (including something like
+ `Fixes #123` if it's fixing a bug) and send the PR out.
+* [Travis CI](http://travis-ci.com) will run tests against the PR. This should
+ happen within 10 minutes or so. If a test fails, go back to the coding stage
+ and try to fix the test and push the same branch again. You won't need to
+ make a new pull request, the changes will be rolled directly into the PR you
+ already opened. Wait for Travis again. There is no need to assign a reviewer
+ to the PR, the project team will assign someone for review during the
+ standard [triage](#triaging) process.
+
+## Code review
+
+All submissions, including submissions by project members, require review. It is
+almost never the case that a pull request is accepted without some changes
+requested, so please do not be offended!
+
+When you have finished making requested changes to your pull request, please
+make a comment containing "PTAL" (Please Take Another Look) on your pull
+request. GitHub notifications can be noisy, and it is unfortunately easy for
+things to be lost in the shuffle.
+
+Once your PR is approved (hooray!) the reviewer will squash your commits into a
+single commit, and then merge the commit onto the Wire master branch. Thank you!
+
+## Github code review workflow conventions
+
+(For project members and frequent contributors.)
+
+As a contributor:
+
+- Try hard to make each Pull Request as small and focused as possible. In
+ particular, this means that if a reviewer asks you to do something that is
+ beyond the scope of the Pull Request, the best practice is to file another
+ issue and reference it from the Pull Request rather than just adding more
+ commits to the existing PR.
+- Adding someone as a Reviewer means "please feel free to look and comment";
+ the review is optional. Choose as many Reviewers as you'd like.
+- Adding someone as an Assignee means that the Pull Request should not be
+ submitted until they approve. If you choose multiple Assignees, wait until
+ all of them approve. It is fine to ask someone if they are OK with being
+ removed as an Assignee.
+ - Note that if you don't select any assignees, ContributeBot will turn all
+ of your Reviewers into Assignees.
+- Make as many commits as you want locally, but try not to push them to Github
+ until you've addressed comments; this allows the email notification about
+ the push to be a signal to reviewers that the PR is ready to be looked at
+ again.
+- When there may be confusion about what should happen next for a PR, be
+ explicit; add a "PTAL" comment if it is ready for review again, or a "Please
+ hold off on reviewing for now" if you are still working on addressing
+ comments.
+- "Resolve" comments that you are sure you've addressed; let your reviewers
+ resolve ones that you're not sure about.
+- Do not use `git push --force`; this can cause comments from your reviewers
+ that are associated with a specific commit to be lost. This implies that
+ once you've sent a Pull Request, you should use `git merge` instead of `git
+ rebase` to incorporate commits from the master branch.
+
+As a reviewer:
+
+- Be timely in your review process, especially if you are an Assignee.
+- Try to use `Start a Review` instead of single comments, to reduce email
+ spam.
+- "Resolve" your own comments if they have been addressed.
+- If you want your review to be blocking, and are not currently an Assignee,
+ add yourself as an Assignee.
+
+When squashing-and-merging:
+
+- Ensure that **all** of the Assignees have approved.
+- Do a final review of the one-line PR summary, ensuring that it accurately
+ describes the change.
+- Delete the automatically added commit lines; these are generally not
+ interesting and make commit history harder to read.
diff --git a/vendor/github.com/google/wire/CONTRIBUTORS b/vendor/github.com/google/wire/CONTRIBUTORS
new file mode 100644
index 00000000..00a94f89
--- /dev/null
+++ b/vendor/github.com/google/wire/CONTRIBUTORS
@@ -0,0 +1,43 @@
+# This is the official list of people who can contribute
+# (and typically have contributed) code to the Wire repository.
+# The AUTHORS file lists the copyright holders; this file
+# lists people. For example, Google employees are listed here
+# but not in AUTHORS, because Google holds the copyright.
+#
+# Names should be added to this file only after verifying that
+# the individual or the individual's organization has agreed to
+# the appropriate Contributor License Agreement, found here:
+#
+# http://code.google.com/legal/individual-cla-v1.0.html
+# http://code.google.com/legal/corporate-cla-v1.0.html
+#
+# The agreement for individuals can be filled out on the web.
+#
+# When adding J Random Contributor's name to this file,
+# either J's name or J's organization's name should be
+# added to the AUTHORS file, depending on whether the
+# individual or corporate CLA was used.
+
+# Names should be added to this file like so:
+# Individual's name <submission email address>
+# Individual's name <submission email address> <email2> <emailN>
+#
+# An entry with multiple email addresses specifies that the
+# first address should be used in the submit logs and
+# that the other addresses should be recognized as the
+# same person when interacting with Git.
+
+# Please keep the list sorted.
+
+Chris Lewis <cflewis@google.com> <cflewis@golang.org> <c@chris.to>
+Christina Austin <4240737+clausti@users.noreply.github.com>
+Eno Compton <enocom@google.com>
+Issac Trotts <issactrotts@google.com> <issac.trotts@gmail.com>
+ktr <ktr@syfm.me>
+Kumbirai Tanekha <kumbirai.tanekha@gmail.com>
+Oleg Kovalov <iamolegkovalov@gmail.com>
+Robert van Gent <rvangent@google.com> <vangent@gmail.com>
+Ross Light <light@google.com> <ross@zombiezen.com>
+Tuo Shan <shantuo@google.com> <sturbo89@gmail.com>
+Yoichiro Shimizu <budougumi0617@gmail.com>
+Zachary Romero <zacromero3@gmail.com>
diff --git a/vendor/github.com/google/wire/LICENSE b/vendor/github.com/google/wire/LICENSE
new file mode 100644
index 00000000..d6456956
--- /dev/null
+++ b/vendor/github.com/google/wire/LICENSE
@@ -0,0 +1,202 @@
+
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "[]"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright [yyyy] [name of copyright owner]
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/vendor/github.com/google/wire/README.md b/vendor/github.com/google/wire/README.md
new file mode 100644
index 00000000..eef45d8b
--- /dev/null
+++ b/vendor/github.com/google/wire/README.md
@@ -0,0 +1,57 @@
+# Wire: Automated Initialization in Go
+
+[![Build Status](https://travis-ci.com/google/wire.svg?branch=master)][travis]
+[![godoc](https://godoc.org/github.com/google/wire?status.svg)][godoc]
+[![Coverage](https://codecov.io/gh/google/wire/branch/master/graph/badge.svg)](https://codecov.io/gh/google/wire)
+
+
+Wire is a code generation tool that automates connecting components using
+[dependency injection][]. Dependencies between components are represented in
+Wire as function parameters, encouraging explicit initialization instead of
+global variables. Because Wire operates without runtime state or reflection,
+code written to be used with Wire is useful even for hand-written
+initialization.
+
+For an overview, see the [introductory blog post][].
+
+[dependency injection]: https://en.wikipedia.org/wiki/Dependency_injection
+[introductory blog post]: https://blog.golang.org/wire
+[godoc]: https://godoc.org/github.com/google/wire
+[travis]: https://travis-ci.com/google/wire
+
+## Installing
+
+Install Wire by running:
+
+```shell
+go get github.com/google/wire/cmd/wire
+```
+
+and ensuring that `$GOPATH/bin` is added to your `$PATH`.
+
+## Documentation
+
+- [Tutorial][]
+- [User Guide][]
+- [Best Practices][]
+- [FAQ][]
+
+[Tutorial]: ./_tutorial/README.md
+[Best Practices]: ./docs/best-practices.md
+[FAQ]: ./docs/faq.md
+[User Guide]: ./docs/guide.md
+
+## Project status
+
+**This project is in alpha and is not yet suitable for production.**
+
+While in alpha, the API is subject to breaking changes.
+
+## Community
+
+You can contact us on the [go-cloud mailing list][].
+
+This project is covered by the Go [Code of Conduct][].
+
+[Code of Conduct]: ./CODE_OF_CONDUCT.md
+[go-cloud mailing list]: https://groups.google.com/forum/#!forum/go-cloud
diff --git a/vendor/github.com/google/wire/go.mod b/vendor/github.com/google/wire/go.mod
new file mode 100644
index 00000000..cb540ac5
--- /dev/null
+++ b/vendor/github.com/google/wire/go.mod
@@ -0,0 +1,10 @@
+module github.com/google/wire
+
+require (
+ github.com/fatih/color v1.7.0
+ github.com/google/go-cmp v0.2.0
+ github.com/google/subcommands v1.0.1
+ github.com/mattn/go-colorable v0.1.2 // indirect
+ github.com/pmezard/go-difflib v1.0.0
+ golang.org/x/tools v0.0.0-20190422233926-fe54fb35175b
+)
diff --git a/vendor/github.com/google/wire/go.sum b/vendor/github.com/google/wire/go.sum
new file mode 100644
index 00000000..83dc6644
--- /dev/null
+++ b/vendor/github.com/google/wire/go.sum
@@ -0,0 +1,20 @@
+github.com/fatih/color v1.7.0 h1:DkWD4oS2D8LGGgTQ6IvwJJXSL5Vp2ffcQg58nFV38Ys=
+github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4=
+github.com/google/go-cmp v0.2.0 h1:+dTQ8DZQJz0Mb/HjFlkptS1FeQ4cWSnN941F8aEG4SQ=
+github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M=
+github.com/google/subcommands v1.0.1 h1:/eqq+otEXm5vhfBrbREPCSVQbvofip6kIz+mX5TUH7k=
+github.com/google/subcommands v1.0.1/go.mod h1:ZjhPrFU+Olkh9WazFPsl27BQ4UPiG37m3yTrtFlrHVk=
+github.com/mattn/go-colorable v0.1.2 h1:/bC9yWikZXAL9uJdulbSfyVNIR3n3trXl+v8+1sx8mU=
+github.com/mattn/go-colorable v0.1.2/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE=
+github.com/mattn/go-isatty v0.0.8 h1:HLtExJ+uU2HOZ+wI0Tt5DtUDrx8yhUqDcp7fYERX4CE=
+github.com/mattn/go-isatty v0.0.8/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s=
+github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
+github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
+golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
+golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
+golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
+golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223 h1:DH4skfRX4EBpamg7iV4ZlCpblAHI6s6TDM39bFZumv8=
+golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
+golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
+golang.org/x/tools v0.0.0-20190422233926-fe54fb35175b h1:NVD8gBK33xpdqCaZVVtd6OFJp+3dxkXuz7+U7KaVN6s=
+golang.org/x/tools v0.0.0-20190422233926-fe54fb35175b/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
diff --git a/vendor/github.com/google/wire/wire.go b/vendor/github.com/google/wire/wire.go
new file mode 100644
index 00000000..941d6c65
--- /dev/null
+++ b/vendor/github.com/google/wire/wire.go
@@ -0,0 +1,191 @@
+// Copyright 2018 The Wire Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package wire contains directives for Wire code generation.
+// For an overview of working with Wire, see the user guide at
+// https://github.com/google/wire/blob/master/docs/guide.md
+//
+// The directives in this package are used as input to the Wire code generation
+// tool. The entry point of Wire's analysis are injector functions: function
+// templates denoted by only containing a call to Build. The arguments to Build
+// describes a set of providers and the Wire code generation tool builds a
+// directed acylic graph of the providers' output types. The generated code will
+// fill in the function template by using the providers from the provider set to
+// instantiate any needed types.
+package wire
+
+// ProviderSet is a marker type that collects a group of providers.
+type ProviderSet struct{}
+
+// NewSet creates a new provider set that includes the providers in its
+// arguments. Each argument is a function value, a provider set, a call to
+// Struct, a call to Bind, a call to Value, a call to InterfaceValue or a call
+// to FieldsOf.
+//
+// Passing a function value to NewSet declares that the function's first
+// return value type will be provided by calling the function. The arguments
+// to the function will come from the providers for their types. As such, all
+// the function's parameters must be of non-identical types. The function may
+// optionally return an error as its last return value and a cleanup function
+// as the second return value. A cleanup function must be of type func() and is
+// guaranteed to be called before the cleanup function of any of the
+// provider's inputs. If any provider returns an error, the injector function
+// will call all the appropriate cleanup functions and return the error from
+// the injector function.
+//
+// Passing a ProviderSet to NewSet is the same as if the set's contents
+// were passed as arguments to NewSet directly.
+//
+// The behavior of passing the result of a call to other functions in this
+// package are described in their respective doc comments.
+//
+// For compatibility with older versions of Wire, passing a struct value of type
+// S to NewSet declares that both S and *S will be provided by creating a new
+// value of the appropriate type by filling in each field of S using the
+// provider of the field's type. This form is deprecated and will be removed in
+// a future version of Wire: new providers sets should use wire.Struct.
+func NewSet(...interface{}) ProviderSet {
+ return ProviderSet{}
+}
+
+// Build is placed in the body of an injector function template to declare the
+// providers to use. The Wire code generation tool will fill in an
+// implementation of the function. The arguments to Build are interpreted the
+// same as NewSet: they determine the provider set presented to Wire's
+// dependency graph. Build returns an error message that can be sent to a call
+// to panic().
+//
+// The parameters of the injector function are used as inputs in the dependency
+// graph.
+//
+// Similar to provider functions passed into NewSet, the first return value is
+// the output of the injector function, the optional second return value is a
+// cleanup function, and the optional last return value is an error. If any of
+// the provider functions in the injector function's provider set return errors
+// or cleanup functions, the corresponding return value must be present in the
+// injector function template.
+//
+// Examples:
+//
+// func injector(ctx context.Context) (*sql.DB, error) {
+// wire.Build(otherpkg.FooSet, myProviderFunc)
+// return nil, nil
+// }
+//
+// func injector(ctx context.Context) (*sql.DB, error) {
+// panic(wire.Build(otherpkg.FooSet, myProviderFunc))
+// }
+func Build(...interface{}) string {
+ return "implementation not generated, run wire"
+}
+
+// A Binding maps an interface to a concrete type.
+type Binding struct{}
+
+// Bind declares that a concrete type should be used to satisfy a dependency on
+// the type of iface. iface must be a pointer to an interface type, to must be a
+// pointer to a concrete type.
+//
+// Example:
+//
+// type Fooer interface {
+// Foo()
+// }
+//
+// type MyFoo struct{}
+//
+// func (MyFoo) Foo() {}
+//
+// var MySet = wire.NewSet(
+// wire.Struct(new(MyFoo))
+// wire.Bind(new(Fooer), new(MyFoo)))
+func Bind(iface, to interface{}) Binding {
+ return Binding{}
+}
+
+// bindToUsePointer is detected by the wire tool to indicate that Bind's second argument should take a pointer.
+// See https://github.com/google/wire/issues/120 for details.
+const bindToUsePointer = true
+
+// A ProvidedValue is an expression that is copied to the generated injector.
+type ProvidedValue struct{}
+
+// Value binds an expression to provide the type of the expression.
+// The expression may not be an interface value; use InterfaceValue for that.
+//
+// Example:
+//
+// var MySet = wire.NewSet(wire.Value([]string(nil)))
+func Value(interface{}) ProvidedValue {
+ return ProvidedValue{}
+}
+
+// InterfaceValue binds an expression to provide a specific interface type.
+// The first argument is a pointer to the interface which user wants to provide.
+// The second argument is the actual variable value whose type implements the
+// interface.
+//
+// Example:
+//
+// var MySet = wire.NewSet(wire.InterfaceValue(new(io.Reader), os.Stdin))
+func InterfaceValue(typ interface{}, x interface{}) ProvidedValue {
+ return ProvidedValue{}
+}
+
+// A StructProvider represents a named struct.
+type StructProvider struct{}
+
+// Struct specifies that the given struct type will be provided by filling in the fields in the struct that have the names given.
+//
+// The first argument must be a pointer to the struct type. For a struct type
+// Foo, Wire will use field-filling to provide both Foo and *Foo. The remaining
+// arguments are field names to fill in. As a special case, if a single name "*"
+// is given, then all of the fields in the struct will be filled in.
+//
+// For example:
+//
+// type S struct {
+// MyFoo *Foo
+// MyBar *Bar
+// }
+// var Set = wire.NewSet(wire.Struct(new(S), "MyFoo")) -> inject only S.MyFoo
+// var Set = wire.NewSet(wire.Struct(new(S), "*")) -> inject all fields
+func Struct(structType interface{}, fieldNames ...string) StructProvider {
+ return StructProvider{}
+}
+
+// StructFields is a collection of the fields from a struct.
+type StructFields struct{}
+
+// FieldsOf declares that the fields named of the given struct type will be used
+// to provide the types of those fields. The structType argument must be a
+// pointer to the struct or a pointer to a pointer to the struct it wishes to reference.
+//
+// The following example would provide *Foo and *Bar using S.MyFoo and S.MyBar respectively:
+//
+// type S struct {
+// MyFoo *Foo
+// MyBar *Bar
+// }
+//
+// func NewStruct() S { /* ... */ }
+// var Set = wire.NewSet(wire.FieldsOf(new(S), "MyFoo", "MyBar"))
+//
+// or
+//
+// func NewStruct() *S { /* ... */ }
+// var Set = wire.NewSet(wire.FieldsOf(new(*S), "MyFoo", "MyBar"))
+func FieldsOf(structType interface{}, fieldNames ...string) StructFields {
+ return StructFields{}
+}
diff --git a/vendor/github.com/googleapis/gax-go/.gitignore b/vendor/github.com/googleapis/gax-go/.gitignore
new file mode 100644
index 00000000..289bf1eb
--- /dev/null
+++ b/vendor/github.com/googleapis/gax-go/.gitignore
@@ -0,0 +1 @@
+*.cover
diff --git a/vendor/github.com/googleapis/gax-go/.travis.yml b/vendor/github.com/googleapis/gax-go/.travis.yml
new file mode 100644
index 00000000..cc0a91e1
--- /dev/null
+++ b/vendor/github.com/googleapis/gax-go/.travis.yml
@@ -0,0 +1,12 @@
+sudo: false
+language: go
+go:
+ - 1.9.x
+ - 1.10.x
+ - 1.11.x
+script:
+ - gofmt -l .
+ - go tool vet .
+ - go test -coverprofile=coverage.txt -covermode=atomic
+after_success:
+ - bash <(curl -s https://codecov.io/bash)
diff --git a/vendor/github.com/googleapis/gax-go/CODE_OF_CONDUCT.md b/vendor/github.com/googleapis/gax-go/CODE_OF_CONDUCT.md
new file mode 100644
index 00000000..46b2a08e
--- /dev/null
+++ b/vendor/github.com/googleapis/gax-go/CODE_OF_CONDUCT.md
@@ -0,0 +1,43 @@
+# Contributor Code of Conduct
+
+As contributors and maintainers of this project,
+and in the interest of fostering an open and welcoming community,
+we pledge to respect all people who contribute through reporting issues,
+posting feature requests, updating documentation,
+submitting pull requests or patches, and other activities.
+
+We are committed to making participation in this project
+a harassment-free experience for everyone,
+regardless of level of experience, gender, gender identity and expression,
+sexual orientation, disability, personal appearance,
+body size, race, ethnicity, age, religion, or nationality.
+
+Examples of unacceptable behavior by participants include:
+
+* The use of sexualized language or imagery
+* Personal attacks
+* Trolling or insulting/derogatory comments
+* Public or private harassment
+* Publishing other's private information,
+such as physical or electronic
+addresses, without explicit permission
+* Other unethical or unprofessional conduct.
+
+Project maintainers have the right and responsibility to remove, edit, or reject
+comments, commits, code, wiki edits, issues, and other contributions
+that are not aligned to this Code of Conduct.
+By adopting this Code of Conduct,
+project maintainers commit themselves to fairly and consistently
+applying these principles to every aspect of managing this project.
+Project maintainers who do not follow or enforce the Code of Conduct
+may be permanently removed from the project team.
+
+This code of conduct applies both within project spaces and in public spaces
+when an individual is representing the project or its community.
+
+Instances of abusive, harassing, or otherwise unacceptable behavior
+may be reported by opening an issue
+or contacting one or more of the project maintainers.
+
+This Code of Conduct is adapted from the [Contributor Covenant](http://contributor-covenant.org), version 1.2.0,
+available at [http://contributor-covenant.org/version/1/2/0/](http://contributor-covenant.org/version/1/2/0/)
diff --git a/vendor/github.com/googleapis/gax-go/CONTRIBUTING.md b/vendor/github.com/googleapis/gax-go/CONTRIBUTING.md
new file mode 100644
index 00000000..2827b7d3
--- /dev/null
+++ b/vendor/github.com/googleapis/gax-go/CONTRIBUTING.md
@@ -0,0 +1,27 @@
+Want to contribute? Great! First, read this page (including the small print at the end).
+
+### Before you contribute
+Before we can use your code, you must sign the
+[Google Individual Contributor License Agreement]
+(https://cla.developers.google.com/about/google-individual)
+(CLA), which you can do online. The CLA is necessary mainly because you own the
+copyright to your changes, even after your contribution becomes part of our
+codebase, so we need your permission to use and distribute your code. We also
+need to be sure of various other things—for instance that you'll tell us if you
+know that your code infringes on other people's patents. You don't have to sign
+the CLA until after you've submitted your code for review and a member has
+approved it, but you must do it before we can put your code into our codebase.
+Before you start working on a larger contribution, you should get in touch with
+us first through the issue tracker with your idea so that we can help out and
+possibly guide you. Coordinating up front makes it much easier to avoid
+frustration later on.
+
+### Code reviews
+All submissions, including submissions by project members, require review. We
+use Github pull requests for this purpose.
+
+### The small print
+Contributions made by corporations are covered by a different agreement than
+the one above, the
+[Software Grant and Corporate Contributor License Agreement]
+(https://cla.developers.google.com/about/google-corporate).
diff --git a/vendor/github.com/googleapis/gax-go/LICENSE b/vendor/github.com/googleapis/gax-go/LICENSE
new file mode 100644
index 00000000..6d16b657
--- /dev/null
+++ b/vendor/github.com/googleapis/gax-go/LICENSE
@@ -0,0 +1,27 @@
+Copyright 2016, Google Inc.
+All rights reserved.
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+ * Redistributions of source code must retain the above copyright
+notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above
+copyright notice, this list of conditions and the following disclaimer
+in the documentation and/or other materials provided with the
+distribution.
+ * Neither the name of Google Inc. nor the names of its
+contributors may be used to endorse or promote products derived from
+this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/vendor/github.com/googleapis/gax-go/README.md b/vendor/github.com/googleapis/gax-go/README.md
new file mode 100644
index 00000000..d6e214ef
--- /dev/null
+++ b/vendor/github.com/googleapis/gax-go/README.md
@@ -0,0 +1,29 @@
+Google API Extensions for Go
+============================
+
+[![Build Status](https://travis-ci.org/googleapis/gax-go.svg?branch=master)](https://travis-ci.org/googleapis/gax-go)
+[![Code Coverage](https://img.shields.io/codecov/c/github/googleapis/gax-go.svg)](https://codecov.io/github/googleapis/gax-go)
+[![GoDoc](https://godoc.org/github.com/googleapis/gax-go?status.svg)](https://godoc.org/github.com/googleapis/gax-go)
+
+Google API Extensions for Go (gax-go) is a set of modules which aids the
+development of APIs for clients and servers based on `gRPC` and Google API
+conventions.
+
+To install the API extensions, use:
+
+```
+go get -u github.com/googleapis/gax-go
+```
+
+**Note:** Application code will rarely need to use this library directly,
+but the code generated automatically from API definition files can use it
+to simplify code generation and to provide more convenient and idiomatic API surface.
+
+Go Versions
+===========
+This library requires Go 1.6 or above.
+
+License
+=======
+BSD - please see [LICENSE](https://github.com/googleapis/gax-go/blob/master/LICENSE)
+for more information.
diff --git a/vendor/github.com/googleapis/gax-go/call_option.go b/vendor/github.com/googleapis/gax-go/call_option.go
new file mode 100644
index 00000000..7b621643
--- /dev/null
+++ b/vendor/github.com/googleapis/gax-go/call_option.go
@@ -0,0 +1,157 @@
+// Copyright 2016, Google Inc.
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+package gax
+
+import (
+ "math/rand"
+ "time"
+
+ "google.golang.org/grpc"
+ "google.golang.org/grpc/codes"
+ "google.golang.org/grpc/status"
+)
+
+// CallOption is an option used by Invoke to control behaviors of RPC calls.
+// CallOption works by modifying relevant fields of CallSettings.
+type CallOption interface {
+ // Resolve applies the option by modifying cs.
+ Resolve(cs *CallSettings)
+}
+
+// Retryer is used by Invoke to determine retry behavior.
+type Retryer interface {
+ // Retry reports whether a request should be retriedand how long to pause before retrying
+ // if the previous attempt returned with err. Invoke never calls Retry with nil error.
+ Retry(err error) (pause time.Duration, shouldRetry bool)
+}
+
+type retryerOption func() Retryer
+
+func (o retryerOption) Resolve(s *CallSettings) {
+ s.Retry = o
+}
+
+// WithRetry sets CallSettings.Retry to fn.
+func WithRetry(fn func() Retryer) CallOption {
+ return retryerOption(fn)
+}
+
+// OnCodes returns a Retryer that retries if and only if
+// the previous attempt returns a GRPC error whose error code is stored in cc.
+// Pause times between retries are specified by bo.
+//
+// bo is only used for its parameters; each Retryer has its own copy.
+func OnCodes(cc []codes.Code, bo Backoff) Retryer {
+ return &boRetryer{
+ backoff: bo,
+ codes: append([]codes.Code(nil), cc...),
+ }
+}
+
+type boRetryer struct {
+ backoff Backoff
+ codes []codes.Code
+}
+
+func (r *boRetryer) Retry(err error) (time.Duration, bool) {
+ st, ok := status.FromError(err)
+ if !ok {
+ return 0, false
+ }
+ c := st.Code()
+ for _, rc := range r.codes {
+ if c == rc {
+ return r.backoff.Pause(), true
+ }
+ }
+ return 0, false
+}
+
+// Backoff implements exponential backoff.
+// The wait time between retries is a random value between 0 and the "retry envelope".
+// The envelope starts at Initial and increases by the factor of Multiplier every retry,
+// but is capped at Max.
+type Backoff struct {
+ // Initial is the initial value of the retry envelope, defaults to 1 second.
+ Initial time.Duration
+
+ // Max is the maximum value of the retry envelope, defaults to 30 seconds.
+ Max time.Duration
+
+ // Multiplier is the factor by which the retry envelope increases.
+ // It should be greater than 1 and defaults to 2.
+ Multiplier float64
+
+ // cur is the current retry envelope
+ cur time.Duration
+}
+
+func (bo *Backoff) Pause() time.Duration {
+ if bo.Initial == 0 {
+ bo.Initial = time.Second
+ }
+ if bo.cur == 0 {
+ bo.cur = bo.Initial
+ }
+ if bo.Max == 0 {
+ bo.Max = 30 * time.Second
+ }
+ if bo.Multiplier < 1 {
+ bo.Multiplier = 2
+ }
+ // Select a duration between zero and the current max. It might seem counterintuitive to
+ // have so much jitter, but https://www.awsarchitectureblog.com/2015/03/backoff.html
+ // argues that that is the best strategy.
+ d := time.Duration(rand.Int63n(int64(bo.cur)))
+ bo.cur = time.Duration(float64(bo.cur) * bo.Multiplier)
+ if bo.cur > bo.Max {
+ bo.cur = bo.Max
+ }
+ return d
+}
+
+type grpcOpt []grpc.CallOption
+
+func (o grpcOpt) Resolve(s *CallSettings) {
+ s.GRPC = o
+}
+
+func WithGRPCOptions(opt ...grpc.CallOption) CallOption {
+ return grpcOpt(append([]grpc.CallOption(nil), opt...))
+}
+
+type CallSettings struct {
+ // Retry returns a Retryer to be used to control retry logic of a method call.
+ // If Retry is nil or the returned Retryer is nil, the call will not be retried.
+ Retry func() Retryer
+
+ // CallOptions to be forwarded to GRPC.
+ GRPC []grpc.CallOption
+}
diff --git a/vendor/github.com/googleapis/gax-go/gax.go b/vendor/github.com/googleapis/gax-go/gax.go
new file mode 100644
index 00000000..8b2900e7
--- /dev/null
+++ b/vendor/github.com/googleapis/gax-go/gax.go
@@ -0,0 +1,38 @@
+// Copyright 2016, Google Inc.
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Package gax contains a set of modules which aid the development of APIs
+// for clients and servers based on gRPC and Google API conventions.
+//
+// Application code will rarely need to use this library directly.
+// However, code generated automatically from API definition files can use it
+// to simplify code generation and to provide more convenient and idiomatic API surfaces.
+package gax
+
+const Version = "2.0.0"
diff --git a/vendor/github.com/googleapis/gax-go/header.go b/vendor/github.com/googleapis/gax-go/header.go
new file mode 100644
index 00000000..d81455ec
--- /dev/null
+++ b/vendor/github.com/googleapis/gax-go/header.go
@@ -0,0 +1,24 @@
+package gax
+
+import "bytes"
+
+// XGoogHeader is for use by the Google Cloud Libraries only.
+//
+// XGoogHeader formats key-value pairs.
+// The resulting string is suitable for x-goog-api-client header.
+func XGoogHeader(keyval ...string) string {
+ if len(keyval) == 0 {
+ return ""
+ }
+ if len(keyval)%2 != 0 {
+ panic("gax.Header: odd argument count")
+ }
+ var buf bytes.Buffer
+ for i := 0; i < len(keyval); i += 2 {
+ buf.WriteByte(' ')
+ buf.WriteString(keyval[i])
+ buf.WriteByte('/')
+ buf.WriteString(keyval[i+1])
+ }
+ return buf.String()[1:]
+}
diff --git a/vendor/github.com/googleapis/gax-go/invoke.go b/vendor/github.com/googleapis/gax-go/invoke.go
new file mode 100644
index 00000000..cb5cd2a9
--- /dev/null
+++ b/vendor/github.com/googleapis/gax-go/invoke.go
@@ -0,0 +1,89 @@
+// Copyright 2016, Google Inc.
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+package gax
+
+import (
+ "context"
+ "time"
+)
+
+// A user defined call stub.
+type APICall func(context.Context, CallSettings) error
+
+// Invoke calls the given APICall,
+// performing retries as specified by opts, if any.
+func Invoke(ctx context.Context, call APICall, opts ...CallOption) error {
+ var settings CallSettings
+ for _, opt := range opts {
+ opt.Resolve(&settings)
+ }
+ return invoke(ctx, call, settings, Sleep)
+}
+
+// Sleep is similar to time.Sleep, but it can be interrupted by ctx.Done() closing.
+// If interrupted, Sleep returns ctx.Err().
+func Sleep(ctx context.Context, d time.Duration) error {
+ t := time.NewTimer(d)
+ select {
+ case <-ctx.Done():
+ t.Stop()
+ return ctx.Err()
+ case <-t.C:
+ return nil
+ }
+}
+
+type sleeper func(ctx context.Context, d time.Duration) error
+
+// invoke implements Invoke, taking an additional sleeper argument for testing.
+func invoke(ctx context.Context, call APICall, settings CallSettings, sp sleeper) error {
+ var retryer Retryer
+ for {
+ err := call(ctx, settings)
+ if err == nil {
+ return nil
+ }
+ if settings.Retry == nil {
+ return err
+ }
+ if retryer == nil {
+ if r := settings.Retry(); r != nil {
+ retryer = r
+ } else {
+ return err
+ }
+ }
+ if d, ok := retryer.Retry(err); !ok {
+ return err
+ } else if err = sp(ctx, d); err != nil {
+ return err
+ }
+ }
+}
diff --git a/vendor/github.com/johannesboyne/gofakes3/.gitignore b/vendor/github.com/johannesboyne/gofakes3/.gitignore
new file mode 100644
index 00000000..66a8eef3
--- /dev/null
+++ b/vendor/github.com/johannesboyne/gofakes3/.gitignore
@@ -0,0 +1,9 @@
+*.DS_Store
+DS_Store
+node_modules/
+logs/
+*.db
+s3f_*
+*.test
+*.out
+build/
diff --git a/vendor/github.com/johannesboyne/gofakes3/.gitmodules b/vendor/github.com/johannesboyne/gofakes3/.gitmodules
new file mode 100644
index 00000000..8f12d232
--- /dev/null
+++ b/vendor/github.com/johannesboyne/gofakes3/.gitmodules
@@ -0,0 +1,3 @@
+[submodule "s3test/s3-tests"]
+ path = s3test/s3-tests
+ url = https://github.com/ceph/s3-tests
diff --git a/vendor/github.com/johannesboyne/gofakes3/Dockerfile b/vendor/github.com/johannesboyne/gofakes3/Dockerfile
new file mode 100644
index 00000000..615603d5
--- /dev/null
+++ b/vendor/github.com/johannesboyne/gofakes3/Dockerfile
@@ -0,0 +1,5 @@
+FROM scratch
+ADD ca-certificates.crt /etc/ssl/certs/
+ADD build/main /
+EXPOSE 9000
+CMD ["/main", "-backend", "mem"]
diff --git a/vendor/github.com/johannesboyne/gofakes3/GoFakeS3.png b/vendor/github.com/johannesboyne/gofakes3/GoFakeS3.png
new file mode 100644
index 00000000..38e02695
--- /dev/null
+++ b/vendor/github.com/johannesboyne/gofakes3/GoFakeS3.png
Binary files differ
diff --git a/vendor/github.com/johannesboyne/gofakes3/LICENSE b/vendor/github.com/johannesboyne/gofakes3/LICENSE
new file mode 100644
index 00000000..7f374cef
--- /dev/null
+++ b/vendor/github.com/johannesboyne/gofakes3/LICENSE
@@ -0,0 +1,21 @@
+The MIT License (MIT)
+
+Copyright (c) 2016 Johannes Boyne
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.
diff --git a/vendor/github.com/johannesboyne/gofakes3/README.md b/vendor/github.com/johannesboyne/gofakes3/README.md
new file mode 100644
index 00000000..863f3d68
--- /dev/null
+++ b/vendor/github.com/johannesboyne/gofakes3/README.md
@@ -0,0 +1,163 @@
+[![CircleCI](https://circleci.com/gh/johannesboyne/gofakes3.svg?style=svg)](https://circleci.com/gh/johannesboyne/gofakes3)
+[![Codecov](https://codecov.io/gh/johannesboyne/gofakes3/branch/master/graph/badge.svg)](https://codecov.io/gh/johannesboyne/gofakes3)
+
+![Logo](/GoFakeS3.png)
+# AWS (GOFAKE)S3
+
+AWS S3 fake server and testing library for extensive S3 test integrations.
+Either by running a test-server, e.g. for testing of AWS Lambda functions
+accessing S3. Or, to have a simple and convencience S3 mock- and test-server.
+
+## What to use it for?
+
+We're using it for the local development of S3 dependent Lambda functions,
+to test AWS S3 golang implementations and access, and
+to test browser based direct uploads to S3 locally.
+
+
+## What not to use it for?
+
+Please don't use gofakes3 as a production service. The intended use case for
+gofakes3 is currently to facilitate testing. It's not meant to be used for
+safe, persistent access to production data at the moment.
+
+There's no reason we couldn't set that as a stretch goal at a later date, but
+it's a long way down the road, especially while we have so much of the API left
+to implement; breaking changes are inevitable.
+
+In the meantime, there are more battle-hardened solutions for production
+workloads out there, some of which are listed in the "Similar Notable Projects"
+section below.
+
+
+## How to use it?
+
+### Example
+
+```golang
+// fake s3
+backend := s3mem.New()
+faker := gofakes3.New(backend)
+ts := httptest.NewServer(faker.Server())
+defer ts.Close()
+
+// configure S3 client
+s3Config := &aws.Config{
+ Credentials: credentials.NewStaticCredentials("YOUR-ACCESSKEYID", "YOUR-SECRETACCESSKEY", ""),
+ Endpoint: aws.String(ts.URL),
+ Region: aws.String("eu-central-1"),
+ DisableSSL: aws.Bool(true),
+ S3ForcePathStyle: aws.Bool(true),
+}
+newSession := session.New(s3Config)
+
+s3Client := s3.New(newSession)
+cparams := &s3.CreateBucketInput{
+ Bucket: aws.String("newbucket"),
+}
+
+// Create a new bucket using the CreateBucket call.
+_, err := s3Client.CreateBucket(cparams)
+if err != nil {
+ // Message from an error.
+ fmt.Println(err.Error())
+ return
+}
+
+// Upload a new object "testobject" with the string "Hello World!" to our "newbucket".
+_, err = s3Client.PutObject(&s3.PutObjectInput{
+ Body: strings.NewReader(`{"configuration": {"main_color": "#333"}, "screens": []}`),
+ Bucket: aws.String("newbucket"),
+ Key: aws.String("test.txt"),
+})
+
+// ... accessing of test.txt through any S3 client would now be possible
+```
+
+Please feel free to check it out and to provide useful feedback (using github
+issues), but be aware, this software is used internally and for the local
+development only. Thus, it has no demand for correctness, performance or
+security.
+
+There are two ways to run locally: using DNS, or using S3 path mode.
+
+S3 path mode is the most flexible and least restrictive, but it does require that you
+are able to modify your client code.In Go, the modification would look like so:
+
+ config := aws.Config{}
+ config.WithS3ForcePathStyle(true)
+
+S3 path mode works over the network by default for all bucket names.
+
+If you are unable to modify the code, DNS mode can be used, but it comes with further
+restrictions and requires you to be able to modify your local DNS resolution.
+
+If using `localhost` as your endpoint, you will need to add the following to
+`/etc/hosts` for *every bucket you want to fake*:
+
+ 127.0.0.1 <bucket-name>.localhost
+
+It is trickier if you want other machines to be able to use your fake S3 server
+as you need to be able to modify their DNS resolution as well.
+
+
+## Exemplary usage
+
+### Lambda Example
+
+```javascript
+var AWS = require('aws-sdk')
+
+var ep = new AWS.Endpoint('http://localhost:9000');
+var s3 = new AWS.S3({endpoint: ep});
+
+exports.handle = function (e, ctx) {
+ s3.createBucket({
+ Bucket: '<bucket-name>',
+ }, function(err, data) {
+ if (err) return console.log(err, err.stack);
+ ctx.succeed(data)
+ });
+}
+```
+
+### Upload Example
+
+```html
+<html>
+ <head>
+ <meta http-equiv="Content-Type" content="text/html; charset=UTF-8" />
+ </head>
+ <body>
+
+ <form action="http://localhost:9000/<bucket-name>/" method="post" enctype="multipart/form-data">
+ Key to upload:
+ <input type="input" name="key" value="user/user1/test/<filename>" /><br />
+ <input type="hidden" name="acl" value="public-read" />
+ <input type="hidden" name="x-amz-meta-uuid" value="14365123651274" />
+ <input type="hidden" name="x-amz-server-side-encryption" value="AES256" />
+ <input type="text" name="X-Amz-Credential" value="AKIAIOSFODNN7EXAMPLE/20151229/us-east-1/s3/aws4_request" />
+ <input type="text" name="X-Amz-Algorithm" value="AWS4-HMAC-SHA256" />
+ <input type="text" name="X-Amz-Date" value="20151229T000000Z" />
+
+ Tags for File:
+ <input type="input" name="x-amz-meta-tag" value="" /><br />
+ <input type="hidden" name="Policy" value='<Base64-encoded policy string>' />
+ <input type="hidden" name="X-Amz-Signature" value="<signature-value>" />
+ File:
+ <input type="file" name="file" /> <br />
+ <!-- The elements after this will be ignored -->
+ <input type="submit" name="submit" value="Upload to Amazon S3" />
+ </form>
+</html>
+```
+
+## Similar notable projects
+
+- https://github.com/minio/minio **not similar but powerfull ;-)**
+- https://github.com/andrewgaul/s3proxy by @andrewgaul
+
+## Contributors
+
+- [Johannes Boyne @johannesboyne](https://github.com/johannesboyne)
+- [Blake Williams @shabbyrobe](https://github.com/shabbyrobe)
diff --git a/vendor/github.com/johannesboyne/gofakes3/backend.go b/vendor/github.com/johannesboyne/gofakes3/backend.go
new file mode 100644
index 00000000..f3505600
--- /dev/null
+++ b/vendor/github.com/johannesboyne/gofakes3/backend.go
@@ -0,0 +1,307 @@
+package gofakes3
+
+import (
+ "io"
+)
+
+const (
+ DefaultBucketVersionKeys = 1000
+)
+
+// Object contains the data retrieved from a backend for the specified bucket
+// and object key.
+//
+// You MUST always call Contents.Close() otherwise you may leak resources.
+type Object struct {
+ Name string
+ Metadata map[string]string
+ Size int64
+ Contents io.ReadCloser
+ Hash []byte
+ Range *ObjectRange
+
+ // VersionID will be empty if bucket versioning has not been enabled.
+ VersionID VersionID
+
+ // If versioning is enabled for the bucket, this is true if this object version
+ // is a delete marker.
+ IsDeleteMarker bool
+}
+
+type ObjectList struct {
+ CommonPrefixes []CommonPrefix
+ Contents []*Content
+ IsTruncated bool
+ NextMarker string
+
+ // prefixes maintains an index of prefixes that have already been seen.
+ // This is a convenience for backend implementers like s3bolt and s3mem,
+ // which operate on a full, flat list of keys.
+ prefixes map[string]bool
+}
+
+func NewObjectList() *ObjectList {
+ return &ObjectList{}
+}
+
+func (b *ObjectList) Add(item *Content) {
+ b.Contents = append(b.Contents, item)
+}
+
+func (b *ObjectList) AddPrefix(prefix string) {
+ if b.prefixes == nil {
+ b.prefixes = map[string]bool{}
+ } else if b.prefixes[prefix] {
+ return
+ }
+ b.prefixes[prefix] = true
+ b.CommonPrefixes = append(b.CommonPrefixes, CommonPrefix{Prefix: prefix})
+}
+
+type ObjectDeleteResult struct {
+ // Specifies whether the versioned object that was permanently deleted was
+ // (true) or was not (false) a delete marker. In a simple DELETE, this
+ // header indicates whether (true) or not (false) a delete marker was
+ // created.
+ IsDeleteMarker bool
+
+ // Returns the version ID of the delete marker created as a result of the
+ // DELETE operation. If you delete a specific object version, the value
+ // returned by this header is the version ID of the object version deleted.
+ VersionID VersionID
+}
+
+type ListBucketVersionsPage struct {
+ // Specifies the key in the bucket that you want to start listing from.
+ // If HasKeyMarker is true, this must be non-empty.
+ KeyMarker string
+ HasKeyMarker bool
+
+ // Specifies the object version you want to start listing from. If
+ // HasVersionIDMarker is true, this must be non-empty.
+ VersionIDMarker VersionID
+ HasVersionIDMarker bool
+
+ // Sets the maximum number of keys returned in the response body. The
+ // response might contain fewer keys, but will never contain more. If
+ // additional keys satisfy the search criteria, but were not returned
+ // because max-keys was exceeded, the response contains
+ // <isTruncated>true</isTruncated>. To return the additional keys, see
+ // key-marker and version-id-marker.
+ //
+ // MaxKeys MUST be > 0, otherwise it is ignored.
+ MaxKeys int64
+}
+
+type ListBucketPage struct {
+ // Specifies the key in the bucket that represents the last item in
+ // the previous page. The first key in the returned page will be the
+ // next lexicographically (UTF-8 binary) sorted key after Marker.
+ // If HasMarker is true, this must be non-empty.
+ Marker string
+ HasMarker bool
+
+ // Sets the maximum number of keys returned in the response body. The
+ // response might contain fewer keys, but will never contain more. If
+ // additional keys satisfy the search criteria, but were not returned
+ // because max-keys was exceeded, the response contains
+ // <isTruncated>true</isTruncated>. To return the additional keys, see
+ // key-marker and version-id-marker.
+ //
+ // MaxKeys MUST be > 0, otherwise it is ignored.
+ MaxKeys int64
+}
+
+func (p ListBucketPage) IsEmpty() bool {
+ return p == ListBucketPage{}
+}
+
+type PutObjectResult struct {
+ // If versioning is enabled on the bucket, this should be set to the
+ // created version ID. If versioning is not enabled, this should be
+ // empty.
+ VersionID VersionID
+}
+
+// Backend provides a set of operations to be implemented in order to support
+// gofakes3.
+//
+// The Backend API is not yet stable; if you create your own Backend, breakage
+// is likely until this notice is removed.
+//
+type Backend interface {
+ // ListBuckets returns a list of all buckets owned by the authenticated
+ // sender of the request.
+ // https://docs.aws.amazon.com/AmazonS3/latest/API/RESTServiceGET.html
+ ListBuckets() ([]BucketInfo, error)
+
+ // ListBucket returns the contents of a bucket. Backends should use the
+ // supplied prefix to limit the contents of the bucket and to sort the
+ // matched items into the Contents and CommonPrefixes fields.
+ //
+ // ListBucket must return a gofakes3.ErrNoSuchBucket error if the bucket
+ // does not exist. See gofakes3.BucketNotFound() for a convenient way to create one.
+ //
+ // The prefix MUST be correctly handled for the backend to be valid. Each
+ // item you consider returning should be checked using prefix.Match(name),
+ // even if the prefix is empty. The Backend MUST treat a nil prefix
+ // identically to a zero prefix.
+ //
+ // At this stage, implementers MAY return gofakes3.ErrInternalPageNotImplemented
+ // if the page argument is non-empty. In this case, gofakes3 may or may
+ // not, depending on how it was configured, retry the same request with no page.
+ // We have observed (though not yet confirmed) that simple clients tend to
+ // work fine if you ignore the pagination request, but this may not suit
+ // your application. Not all backends bundled with gofakes3 correctly
+ // support this pagination yet, but that will change.
+ ListBucket(name string, prefix *Prefix, page ListBucketPage) (*ObjectList, error)
+
+ // CreateBucket creates the bucket if it does not already exist. The name
+ // should be assumed to be a valid name.
+ //
+ // If the bucket already exists, a gofakes3.ResourceError with
+ // gofakes3.ErrBucketAlreadyExists MUST be returned.
+ CreateBucket(name string) error
+
+ // BucketExists should return a boolean indicating the bucket existence, or
+ // an error if the backend was unable to determine existence.
+ BucketExists(name string) (exists bool, err error)
+
+ // DeleteBucket deletes a bucket if and only if it is empty.
+ //
+ // If the bucket is not empty, gofakes3.ResourceError with
+ // gofakes3.ErrBucketNotEmpty MUST be returned.
+ //
+ // If the bucket does not exist, gofakes3.ErrNoSuchBucket MUST be returned.
+ //
+ // AWS does not validate the bucket's name for anything other than existence.
+ DeleteBucket(name string) error
+
+ // GetObject must return a gofakes3.ErrNoSuchKey error if the object does
+ // not exist. See gofakes3.KeyNotFound() for a convenient way to create
+ // one.
+ //
+ // If the returned Object is not nil, you MUST call Object.Contents.Close(),
+ // otherwise you will leak resources. Implementers should return a no-op
+ // implementation of io.ReadCloser.
+ //
+ // If rnge is nil, it is assumed you want the entire object. If rnge is not
+ // nil, but the underlying backend does not support range requests,
+ // implementers MUST return ErrNotImplemented.
+ //
+ // If the backend is a VersionedBackend, GetObject retrieves the latest version.
+ GetObject(bucketName, objectName string, rangeRequest *ObjectRangeRequest) (*Object, error)
+
+ // HeadObject fetches the Object from the backend, but reading the Contents
+ // will return io.EOF immediately.
+ //
+ // If the returned Object is not nil, you MUST call Object.Contents.Close(),
+ // otherwise you will leak resources. Implementers should return a no-op
+ // implementation of io.ReadCloser.
+ //
+ // HeadObject should return a NotFound() error if the object does not
+ // exist.
+ HeadObject(bucketName, objectName string) (*Object, error)
+
+ // DeleteObject deletes an object from the bucket.
+ //
+ // If the backend is a VersionedBackend and versioning is enabled, this
+ // should introduce a delete marker rather than actually delete the object.
+ //
+ // DeleteObject must return a gofakes3.ErrNoSuchBucket error if the bucket
+ // does not exist. See gofakes3.BucketNotFound() for a convenient way to create one.
+ // FIXME: confirm with S3 whether this is the correct behaviour.
+ //
+ // DeleteObject must not return an error if the object does not exist. Source:
+ // https://docs.aws.amazon.com/sdk-for-go/api/service/s3/#S3.DeleteObject:
+ //
+ // Removes the null version (if there is one) of an object and inserts a
+ // delete marker, which becomes the latest version of the object. If there
+ // isn't a null version, Amazon S3 does not remove any objects.
+ //
+ DeleteObject(bucketName, objectName string) (ObjectDeleteResult, error)
+
+ // PutObject should assume that the key is valid. The map containing meta
+ // may be nil.
+ //
+ // The size can be used if the backend needs to read the whole reader; use
+ // gofakes3.ReadAll() for this job rather than ioutil.ReadAll().
+ PutObject(bucketName, key string, meta map[string]string, input io.Reader, size int64) (PutObjectResult, error)
+
+ DeleteMulti(bucketName string, objects ...string) (MultiDeleteResult, error)
+}
+
+// VersionedBackend may be optionally implemented by a Backend in order to support
+// operations on S3 object versions.
+//
+// If you don't implement VersionedBackend, requests to GoFakeS3 that attempt to
+// make use of versions will return ErrNotImplemented if GoFakesS3 is unable to
+// find another way to satisfy the request.
+//
+type VersionedBackend interface {
+ // VersioningConfiguration must return a gofakes3.ErrNoSuchBucket error if the bucket
+ // does not exist. See gofakes3.BucketNotFound() for a convenient way to create one.
+ //
+ // If the bucket has never had versioning enabled, VersioningConfiguration MUST return
+ // empty strings (S300001).
+ VersioningConfiguration(bucket string) (VersioningConfiguration, error)
+
+ // SetVersioningConfiguration must return a gofakes3.ErrNoSuchBucket error if the bucket
+ // does not exist. See gofakes3.BucketNotFound() for a convenient way to create one.
+ SetVersioningConfiguration(bucket string, v VersioningConfiguration) error
+
+ // GetObject must return a gofakes3.ErrNoSuchKey error if the object does
+ // not exist. See gofakes3.KeyNotFound() for a convenient way to create
+ // one.
+ //
+ // If the returned Object is not nil, you MUST call Object.Contents.Close(),
+ // otherwise you will leak resources. Implementers should return a no-op
+ // implementation of io.ReadCloser.
+ //
+ // GetObject must return gofakes3.ErrNoSuchVersion if the version does not
+ // exist.
+ //
+ // If versioning has been enabled on a bucket, but subsequently suspended,
+ // GetObjectVersion should still return the object version (S300001).
+ //
+ // FIXME: s3assumer test; what happens when versionID is empty? Does it
+ // return the latest?
+ GetObjectVersion(
+ bucketName, objectName string,
+ versionID VersionID,
+ rangeRequest *ObjectRangeRequest) (*Object, error)
+
+ // HeadObjectVersion fetches the Object version from the backend, but the Contents will be
+ // a no-op ReadCloser.
+ //
+ // If the returned Object is not nil, you MUST call Object.Contents.Close(),
+ // otherwise you will leak resources. Implementers should return a no-op
+ // implementation of io.ReadCloser.
+ //
+ // HeadObjectVersion should return a NotFound() error if the object does not
+ // exist.
+ HeadObjectVersion(bucketName, objectName string, versionID VersionID) (*Object, error)
+
+ // DeleteObjectVersion permanently deletes a specific object version.
+ //
+ // DeleteObjectVersion must return a gofakes3.ErrNoSuchBucket error if the bucket
+ // does not exist. See gofakes3.BucketNotFound() for a convenient way to create one.
+ //
+ // If the bucket exists and either the object does not exist (S300003) or
+ // the version does not exist (S300002), you MUST return an empty
+ // ObjectDeleteResult and a nil error.
+ DeleteObjectVersion(bucketName, objectName string, versionID VersionID) (ObjectDeleteResult, error)
+
+ // Backend implementers can assume the ListBucketVersionsPage is valid:
+ // KeyMarker and VersionIDMarker will either both be set, or both be unset. No
+ // other combination will be present (S300004).
+ //
+ // https://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketGETVersion.html
+ //
+ // This MUST return the list of current versions with an empty VersionID
+ // even if versioning has never been enabled for the bucket (S300005).
+ //
+ // The Backend MUST treat a nil prefix identically to a zero prefix, and a
+ // nil page identically to a zero page.
+ ListBucketVersions(bucketName string, prefix *Prefix, page *ListBucketVersionsPage) (*ListBucketVersionsResult, error)
+}
diff --git a/vendor/github.com/johannesboyne/gofakes3/backend/s3mem/backend.go b/vendor/github.com/johannesboyne/gofakes3/backend/s3mem/backend.go
new file mode 100644
index 00000000..dd1643e5
--- /dev/null
+++ b/vendor/github.com/johannesboyne/gofakes3/backend/s3mem/backend.go
@@ -0,0 +1,501 @@
+package s3mem
+
+import (
+ "crypto/md5"
+ "encoding/hex"
+ "io"
+ "sync"
+
+ "github.com/johannesboyne/gofakes3"
+ "github.com/johannesboyne/gofakes3/internal/goskipiter"
+)
+
+var (
+ emptyPrefix = &gofakes3.Prefix{}
+ emptyVersionsPage = &gofakes3.ListBucketVersionsPage{}
+)
+
+type Backend struct {
+ buckets map[string]*bucket
+ timeSource gofakes3.TimeSource
+ versionGenerator *versionGenerator
+ versionSeed int64
+ versionSeedSet bool
+ versionScratch []byte
+ lock sync.RWMutex
+}
+
+var _ gofakes3.Backend = &Backend{}
+var _ gofakes3.VersionedBackend = &Backend{}
+
+type Option func(b *Backend)
+
+func WithTimeSource(timeSource gofakes3.TimeSource) Option {
+ return func(b *Backend) { b.timeSource = timeSource }
+}
+
+func WithVersionSeed(seed int64) Option {
+ return func(b *Backend) { b.versionSeed = seed; b.versionSeedSet = true }
+}
+
+func New(opts ...Option) *Backend {
+ b := &Backend{
+ buckets: make(map[string]*bucket),
+ }
+ for _, opt := range opts {
+ opt(b)
+ }
+ if b.timeSource == nil {
+ b.timeSource = gofakes3.DefaultTimeSource()
+ }
+ if b.versionGenerator == nil {
+ if b.versionSeedSet {
+ b.versionGenerator = newVersionGenerator(uint64(b.versionSeed), 0)
+ } else {
+ b.versionGenerator = newVersionGenerator(uint64(b.timeSource.Now().UnixNano()), 0)
+ }
+ }
+ return b
+}
+
+func (db *Backend) ListBuckets() ([]gofakes3.BucketInfo, error) {
+ db.lock.RLock()
+ defer db.lock.RUnlock()
+
+ var buckets = make([]gofakes3.BucketInfo, 0, len(db.buckets))
+ for _, bucket := range db.buckets {
+ buckets = append(buckets, gofakes3.BucketInfo{
+ Name: bucket.name,
+ CreationDate: bucket.creationDate,
+ })
+ }
+
+ return buckets, nil
+}
+
+func (db *Backend) ListBucket(name string, prefix *gofakes3.Prefix, page gofakes3.ListBucketPage) (*gofakes3.ObjectList, error) {
+ if prefix == nil {
+ prefix = emptyPrefix
+ }
+
+ db.lock.RLock()
+ defer db.lock.RUnlock()
+
+ storedBucket := db.buckets[name]
+ if storedBucket == nil {
+ return nil, gofakes3.BucketNotFound(name)
+ }
+
+ var response = gofakes3.NewObjectList()
+ var iter = goskipiter.New(storedBucket.objects.Iterator())
+ var match gofakes3.PrefixMatch
+
+ if page.Marker != "" {
+ iter.Seek(page.Marker)
+ iter.Next() // Move to the next item after the Marker
+ }
+
+ var cnt int64 = 0
+
+ var lastMatchedPart string
+
+ for iter.Next() {
+ item := iter.Value().(*bucketObject)
+
+ if !prefix.Match(item.data.name, &match) {
+ continue
+
+ } else if match.CommonPrefix {
+ if match.MatchedPart == lastMatchedPart {
+ continue // Should not count towards keys
+ }
+ response.AddPrefix(match.MatchedPart)
+ lastMatchedPart = match.MatchedPart
+
+ } else {
+ response.Add(&gofakes3.Content{
+ Key: item.data.name,
+ LastModified: gofakes3.NewContentTime(item.data.lastModified),
+ ETag: `"` + hex.EncodeToString(item.data.hash) + `"`,
+ Size: int64(len(item.data.body)),
+ })
+ }
+
+ cnt++
+ if page.MaxKeys > 0 && cnt >= page.MaxKeys {
+ response.NextMarker = item.data.name
+ response.IsTruncated = iter.Next()
+ break
+ }
+ }
+
+ return response, nil
+}
+
+func (db *Backend) CreateBucket(name string) error {
+ db.lock.Lock()
+ defer db.lock.Unlock()
+
+ if db.buckets[name] != nil {
+ return gofakes3.ResourceError(gofakes3.ErrBucketAlreadyExists, name)
+ }
+
+ db.buckets[name] = newBucket(name, db.timeSource.Now(), db.nextVersion)
+ return nil
+}
+
+func (db *Backend) DeleteBucket(name string) error {
+ db.lock.Lock()
+ defer db.lock.Unlock()
+
+ if db.buckets[name] == nil {
+ return gofakes3.ErrNoSuchBucket
+ }
+
+ if db.buckets[name].objects.Len() > 0 {
+ return gofakes3.ResourceError(gofakes3.ErrBucketNotEmpty, name)
+ }
+
+ delete(db.buckets, name)
+
+ return nil
+}
+
+func (db *Backend) BucketExists(name string) (exists bool, err error) {
+ db.lock.RLock()
+ defer db.lock.RUnlock()
+ return db.buckets[name] != nil, nil
+}
+
+func (db *Backend) HeadObject(bucketName, objectName string) (*gofakes3.Object, error) {
+ db.lock.RLock()
+ defer db.lock.RUnlock()
+
+ bucket := db.buckets[bucketName]
+ if bucket == nil {
+ return nil, gofakes3.BucketNotFound(bucketName)
+ }
+
+ obj := bucket.object(objectName)
+ if obj == nil || obj.data.deleteMarker {
+ return nil, gofakes3.KeyNotFound(objectName)
+ }
+
+ return obj.data.toObject(nil, false)
+}
+
+func (db *Backend) GetObject(bucketName, objectName string, rangeRequest *gofakes3.ObjectRangeRequest) (*gofakes3.Object, error) {
+ db.lock.RLock()
+ defer db.lock.RUnlock()
+
+ bucket := db.buckets[bucketName]
+ if bucket == nil {
+ return nil, gofakes3.BucketNotFound(bucketName)
+ }
+
+ obj := bucket.object(objectName)
+ if obj == nil || obj.data.deleteMarker {
+ // FIXME: If the current version of the object is a delete marker,
+ // Amazon S3 behaves as if the object was deleted and includes
+ // x-amz-delete-marker: true in the response.
+ //
+ // The solution may be to return an object but no error if the object is
+ // a delete marker, and let the main GoFakeS3 class decide what to do.
+ return nil, gofakes3.KeyNotFound(objectName)
+ }
+
+ result, err := obj.data.toObject(rangeRequest, true)
+ if err != nil {
+ return nil, err
+ }
+
+ if bucket.versioning != gofakes3.VersioningEnabled {
+ result.VersionID = ""
+ }
+
+ return result, nil
+}
+
+func (db *Backend) PutObject(bucketName, objectName string, meta map[string]string, input io.Reader, size int64) (result gofakes3.PutObjectResult, err error) {
+ // No need to lock the backend while we read the data into memory; it holds
+ // the write lock open unnecessarily, and could be blocked for an unreasonably
+ // long time by a connection timing out:
+ bts, err := gofakes3.ReadAll(input, size)
+ if err != nil {
+ return result, err
+ }
+
+ db.lock.Lock()
+ defer db.lock.Unlock()
+
+ bucket := db.buckets[bucketName]
+ if bucket == nil {
+ return result, gofakes3.BucketNotFound(bucketName)
+ }
+
+ hash := md5.Sum(bts)
+
+ item := &bucketData{
+ name: objectName,
+ body: bts,
+ hash: hash[:],
+ etag: `"` + hex.EncodeToString(hash[:]) + `"`,
+ metadata: meta,
+ lastModified: db.timeSource.Now(),
+ }
+ bucket.put(objectName, item)
+
+ if bucket.versioning == gofakes3.VersioningEnabled {
+ // versionID is assigned in bucket.put()
+ result.VersionID = item.versionID
+ }
+
+ return result, nil
+}
+
+func (db *Backend) DeleteObject(bucketName, objectName string) (result gofakes3.ObjectDeleteResult, rerr error) {
+ db.lock.Lock()
+ defer db.lock.Unlock()
+
+ bucket := db.buckets[bucketName]
+ if bucket == nil {
+ return result, gofakes3.BucketNotFound(bucketName)
+ }
+
+ return bucket.rm(objectName, db.timeSource.Now())
+}
+
+func (db *Backend) DeleteMulti(bucketName string, objects ...string) (result gofakes3.MultiDeleteResult, err error) {
+ db.lock.Lock()
+ defer db.lock.Unlock()
+
+ bucket := db.buckets[bucketName]
+ if bucket == nil {
+ return result, gofakes3.BucketNotFound(bucketName)
+ }
+
+ now := db.timeSource.Now()
+
+ for _, object := range objects {
+ dresult, err := bucket.rm(object, now)
+ _ = dresult // FIXME: what to do with rm result in multi delete?
+
+ if err != nil {
+ errres := gofakes3.ErrorResultFromError(err)
+ if errres.Code == gofakes3.ErrInternal {
+ // FIXME: log
+ }
+
+ result.Error = append(result.Error, errres)
+
+ } else {
+ result.Deleted = append(result.Deleted, gofakes3.ObjectID{
+ Key: object,
+ })
+ }
+ }
+
+ return result, nil
+}
+
+func (db *Backend) VersioningConfiguration(bucketName string) (versioning gofakes3.VersioningConfiguration, rerr error) {
+ db.lock.RLock()
+ defer db.lock.RUnlock()
+
+ bucket := db.buckets[bucketName]
+ if bucket == nil {
+ return versioning, gofakes3.BucketNotFound(bucketName)
+ }
+
+ versioning.Status = bucket.versioning
+
+ return versioning, nil
+}
+
+func (db *Backend) SetVersioningConfiguration(bucketName string, v gofakes3.VersioningConfiguration) error {
+ if v.MFADelete.Enabled() {
+ return gofakes3.ErrNotImplemented
+ }
+
+ db.lock.Lock()
+ defer db.lock.Unlock()
+
+ bucket := db.buckets[bucketName]
+ if bucket == nil {
+ return gofakes3.BucketNotFound(bucketName)
+ }
+
+ bucket.setVersioning(v.Enabled())
+
+ return nil
+}
+
+func (db *Backend) GetObjectVersion(
+ bucketName, objectName string,
+ versionID gofakes3.VersionID,
+ rangeRequest *gofakes3.ObjectRangeRequest) (*gofakes3.Object, error) {
+
+ db.lock.RLock()
+ defer db.lock.RUnlock()
+
+ bucket := db.buckets[bucketName]
+ if bucket == nil {
+ return nil, gofakes3.BucketNotFound(bucketName)
+ }
+
+ ver, err := bucket.objectVersion(objectName, versionID)
+ if err != nil {
+ return nil, err
+ }
+
+ return ver.toObject(rangeRequest, true)
+}
+
+func (db *Backend) HeadObjectVersion(bucketName, objectName string, versionID gofakes3.VersionID) (*gofakes3.Object, error) {
+ db.lock.RLock()
+ defer db.lock.RUnlock()
+
+ bucket := db.buckets[bucketName]
+ if bucket == nil {
+ return nil, gofakes3.BucketNotFound(bucketName)
+ }
+
+ ver, err := bucket.objectVersion(objectName, versionID)
+ if err != nil {
+ return nil, err
+ }
+
+ return ver.toObject(nil, false)
+}
+
+func (db *Backend) DeleteObjectVersion(bucketName, objectName string, versionID gofakes3.VersionID) (result gofakes3.ObjectDeleteResult, rerr error) {
+ db.lock.Lock()
+ defer db.lock.Unlock()
+
+ bucket := db.buckets[bucketName]
+ if bucket == nil {
+ return result, gofakes3.BucketNotFound(bucketName)
+ }
+
+ return bucket.rmVersion(objectName, versionID, db.timeSource.Now())
+}
+
+func (db *Backend) ListBucketVersions(
+ bucketName string,
+ prefix *gofakes3.Prefix,
+ page *gofakes3.ListBucketVersionsPage,
+) (*gofakes3.ListBucketVersionsResult, error) {
+ if prefix == nil {
+ prefix = emptyPrefix
+ }
+ if page == nil {
+ page = emptyVersionsPage
+ }
+
+ db.lock.RLock()
+ defer db.lock.RUnlock()
+
+ result := gofakes3.NewListBucketVersionsResult(bucketName, prefix, page)
+
+ bucket := db.buckets[bucketName]
+ if bucket == nil {
+ return result, gofakes3.BucketNotFound(bucketName)
+ }
+
+ var iter = goskipiter.New(bucket.objects.Iterator())
+ var match gofakes3.PrefixMatch
+
+ if page.KeyMarker != "" {
+ if !prefix.Match(page.KeyMarker, &match) {
+ // FIXME: NO idea what S3 would do here.
+ return result, gofakes3.ErrInternal
+ }
+ iter.Seek(page.KeyMarker)
+ }
+
+ var truncated = false
+ var first = true
+ var cnt int64 = 0
+
+ // FIXME: The S3 docs have this to say on the topic of result ordering:
+ // "The following request returns objects in the order they were stored,
+ // returning the most recently stored object first starting with the value
+ // for key-marker."
+ //
+ // OK so this method....
+ // - Returns objects in the order they were stored
+ // - Returning the most recently stored object first
+ //
+ // This makes no sense at all!
+
+ for iter.Next() {
+ object := iter.Value().(*bucketObject)
+
+ if !prefix.Match(object.name, &match) {
+ continue
+ }
+
+ if match.CommonPrefix {
+ result.AddPrefix(match.MatchedPart)
+ continue
+ }
+
+ versions := iter.Value().(*bucketObject).Iterator()
+ if first {
+ if page.VersionIDMarker != "" {
+ if !versions.Seek(page.VersionIDMarker) {
+ // FIXME: log
+ return result, gofakes3.ErrInternal
+ }
+ }
+ first = false
+ }
+
+ for versions.Next() {
+ version := versions.Value()
+
+ if version.deleteMarker {
+ marker := &gofakes3.DeleteMarker{
+ Key: version.name,
+ IsLatest: version == object.data,
+ LastModified: gofakes3.NewContentTime(version.lastModified),
+ }
+ if bucket.versioning != gofakes3.VersioningNone { // S300005
+ marker.VersionID = version.versionID
+ }
+ result.Versions = append(result.Versions, marker)
+
+ } else {
+ resultVer := &gofakes3.Version{
+ Key: version.name,
+ IsLatest: version == object.data,
+ LastModified: gofakes3.NewContentTime(version.lastModified),
+ Size: int64(len(version.body)),
+ ETag: version.etag,
+ }
+ if bucket.versioning != gofakes3.VersioningNone { // S300005
+ resultVer.VersionID = version.versionID
+ }
+ result.Versions = append(result.Versions, resultVer)
+ }
+
+ cnt++
+ if page.MaxKeys > 0 && cnt >= page.MaxKeys {
+ truncated = versions.Next()
+ goto done
+ }
+ }
+ }
+
+done:
+ result.IsTruncated = truncated || iter.Next()
+
+ return result, nil
+}
+
+// nextVersion assumes the backend's lock is acquired
+func (db *Backend) nextVersion() gofakes3.VersionID {
+ v, scr := db.versionGenerator.Next(db.versionScratch)
+ db.versionScratch = scr
+ return v
+}
diff --git a/vendor/github.com/johannesboyne/gofakes3/backend/s3mem/bucket.go b/vendor/github.com/johannesboyne/gofakes3/backend/s3mem/bucket.go
new file mode 100644
index 00000000..11a659be
--- /dev/null
+++ b/vendor/github.com/johannesboyne/gofakes3/backend/s3mem/bucket.go
@@ -0,0 +1,274 @@
+package s3mem
+
+import (
+ "bytes"
+ "io"
+ "time"
+
+ "github.com/johannesboyne/gofakes3"
+ "github.com/johannesboyne/gofakes3/internal/s3io"
+ "github.com/ryszard/goskiplist/skiplist"
+)
+
+type versionGenFunc func() gofakes3.VersionID
+
+type versioningStatus int
+
+type bucket struct {
+ name string
+ versioning gofakes3.VersioningStatus
+ versionGen versionGenFunc
+ creationDate gofakes3.ContentTime
+
+ objects *skiplist.SkipList
+}
+
+func newBucket(name string, at time.Time, versionGen versionGenFunc) *bucket {
+ return &bucket{
+ name: name,
+ creationDate: gofakes3.NewContentTime(at),
+ versionGen: versionGen,
+ objects: skiplist.NewStringMap(),
+ }
+}
+
+type bucketObject struct {
+ name string
+ data *bucketData
+ versions *skiplist.SkipList
+}
+
+func (b *bucketObject) Iterator() *bucketObjectIterator {
+ var iter skiplist.Iterator
+ if b.versions != nil {
+ iter = b.versions.Iterator()
+ }
+
+ return &bucketObjectIterator{
+ data: b.data,
+ iter: iter,
+ }
+}
+
+type bucketObjectIterator struct {
+ data *bucketData
+ iter skiplist.Iterator
+ cur *bucketData
+ seenData bool
+ done bool
+}
+
+func (b *bucketObjectIterator) Seek(key gofakes3.VersionID) bool {
+ if b.iter.Seek(key) {
+ return true
+ }
+
+ b.iter = nil
+ if b.data != nil && b.data.versionID == key {
+ return true
+ }
+
+ b.data = nil
+ b.done = true
+
+ return false
+}
+
+func (b *bucketObjectIterator) Next() bool {
+ if b.done {
+ return false
+ }
+
+ if b.iter != nil {
+ iterAlive := b.iter.Next()
+ if iterAlive {
+ b.cur = b.iter.Value().(*bucketData)
+ return true
+ }
+
+ b.iter.Close()
+ b.iter = nil
+ }
+
+ if b.data != nil {
+ b.cur = b.data
+ b.data = nil
+ return true
+ }
+
+ b.done = true
+ return false
+}
+
+func (b *bucketObjectIterator) Close() {
+ if b.iter != nil {
+ b.iter.Close()
+ }
+ b.done = true
+}
+
+func (b *bucketObjectIterator) Value() *bucketData {
+ return b.cur
+}
+
+type bucketData struct {
+ name string
+ lastModified time.Time
+ versionID gofakes3.VersionID
+ deleteMarker bool
+ body []byte
+ hash []byte
+ etag string
+ metadata map[string]string
+}
+
+func (bi *bucketData) toObject(rangeRequest *gofakes3.ObjectRangeRequest, withBody bool) (obj *gofakes3.Object, err error) {
+ sz := int64(len(bi.body))
+ data := bi.body
+
+ var contents io.ReadCloser
+ var rnge *gofakes3.ObjectRange
+
+ if withBody {
+ // In case of a range request the correct part of the slice is extracted:
+ rnge, err = rangeRequest.Range(sz)
+ if err != nil {
+ return nil, err
+ }
+
+ if rnge != nil {
+ data = data[rnge.Start : rnge.Start+rnge.Length]
+ }
+
+ // The data slice should be completely replaced if the bucket item is edited, so
+ // it should be safe to return the data slice directly.
+ contents = s3io.ReaderWithDummyCloser{bytes.NewReader(data)}
+
+ } else {
+ contents = s3io.NoOpReadCloser{}
+ }
+
+ return &gofakes3.Object{
+ Name: bi.name,
+ Hash: bi.hash,
+ Metadata: bi.metadata,
+ Size: sz,
+ Range: rnge,
+ IsDeleteMarker: bi.deleteMarker,
+ VersionID: bi.versionID,
+ Contents: contents,
+ }, nil
+}
+
+func (b *bucket) setVersioning(enabled bool) {
+ if enabled {
+ b.versioning = gofakes3.VersioningEnabled
+ } else if b.versioning == gofakes3.VersioningEnabled {
+ b.versioning = gofakes3.VersioningSuspended
+ }
+}
+
+func (b *bucket) object(objectName string) (obj *bucketObject) {
+ objIface, _ := b.objects.Get(objectName)
+ if objIface == nil {
+ return nil
+ }
+ obj, _ = objIface.(*bucketObject)
+ return obj
+}
+
+func (b *bucket) objectVersion(objectName string, versionID gofakes3.VersionID) (*bucketData, error) {
+ obj := b.object(objectName)
+ if obj == nil {
+ return nil, gofakes3.KeyNotFound(objectName)
+ }
+
+ if obj.data != nil && obj.data.versionID == versionID {
+ return obj.data, nil
+ }
+ if obj.versions == nil {
+ return nil, gofakes3.ErrNoSuchVersion
+ }
+ versionIface, _ := obj.versions.Get(versionID)
+ if versionIface == nil {
+ return nil, gofakes3.ErrNoSuchVersion
+ }
+
+ return versionIface.(*bucketData), nil
+}
+
+func (b *bucket) put(name string, item *bucketData) {
+ // Always generate a version for convenience; we can just mask it on return.
+ item.versionID = b.versionGen()
+
+ object := b.object(name)
+ if object == nil {
+ object = &bucketObject{name: name}
+ b.objects.Set(name, object)
+ }
+
+ if b.versioning == gofakes3.VersioningEnabled {
+ if object.data != nil {
+ if object.versions == nil {
+ object.versions = skiplist.NewCustomMap(func(l, r interface{}) bool {
+ return l.(gofakes3.VersionID) < r.(gofakes3.VersionID)
+ })
+ }
+ object.versions.Set(object.data.versionID, object.data)
+ }
+ }
+
+ object.data = item
+}
+
+func (b *bucket) rm(name string, at time.Time) (result gofakes3.ObjectDeleteResult, rerr error) {
+ object := b.object(name)
+ if object == nil {
+ // S3 does not report an error when attemping to delete a key that does not exist
+ return result, nil
+ }
+
+ if b.versioning == gofakes3.VersioningEnabled {
+ item := &bucketData{lastModified: at, name: name, deleteMarker: true}
+ b.put(name, item)
+ result.IsDeleteMarker = true
+ result.VersionID = item.versionID
+
+ } else {
+ object.data = nil
+ if object.versions == nil || object.versions.Len() == 0 {
+ b.objects.Delete(name)
+ }
+ }
+
+ return result, nil
+}
+
+func (b *bucket) rmVersion(name string, versionID gofakes3.VersionID, at time.Time) (result gofakes3.ObjectDeleteResult, rerr error) {
+ object := b.object(name)
+ if object == nil {
+ return result, nil
+
+ } else if object.data != nil && object.data.versionID == versionID {
+ result.VersionID = versionID
+ result.IsDeleteMarker = object.data.deleteMarker
+ object.data = nil
+
+ } else if object.versions != nil {
+ versionIface, ok := object.versions.Delete(versionID)
+ if !ok {
+ // S3 does not report an error when attemping to delete a key that does not exist
+ return result, nil
+ }
+
+ version := versionIface.(*bucketData)
+ result.VersionID = version.versionID
+ result.IsDeleteMarker = version.deleteMarker
+ }
+
+ if object.data == nil && (object.versions == nil || object.versions.Len() == 0) {
+ b.objects.Delete(name)
+ }
+
+ return result, nil
+}
diff --git a/vendor/github.com/johannesboyne/gofakes3/backend/s3mem/versionid.go b/vendor/github.com/johannesboyne/gofakes3/backend/s3mem/versionid.go
new file mode 100644
index 00000000..64a4380f
--- /dev/null
+++ b/vendor/github.com/johannesboyne/gofakes3/backend/s3mem/versionid.go
@@ -0,0 +1,67 @@
+package s3mem
+
+import (
+ "encoding/base32"
+ "fmt"
+ "math/big"
+ "sync"
+
+ "github.com/johannesboyne/gofakes3"
+)
+
+var add1 = new(big.Int).SetInt64(1)
+
+type versionGenerator struct {
+ state uint64
+ size int
+ next *big.Int
+ mu sync.Mutex
+}
+
+func newVersionGenerator(seed uint64, size int) *versionGenerator {
+ if size <= 0 {
+ size = 64
+ }
+ return &versionGenerator{next: new(big.Int), state: seed}
+}
+
+func (v *versionGenerator) Next(scratch []byte) (gofakes3.VersionID, []byte) {
+ v.mu.Lock()
+
+ v.next.Add(v.next, add1)
+ idb := []byte(fmt.Sprintf("%030d", v.next))
+
+ neat := v.size/8*8 + 8 // cheap and nasty way to ensure a multiple of 8 definitely greater than size
+
+ scratchLen := len(idb) + neat + 1
+ if len(scratch) < scratchLen {
+ scratch = make([]byte, scratchLen)
+ }
+ copy(scratch, idb)
+
+ b := scratch[len(idb)+1:]
+
+ // This is a simple inline implementation of http://xoshiro.di.unimi.it/splitmix64.c.
+ // It may not ultimately be the right tool for this job but with a large
+ // enough size the collision risk should still be minuscule.
+ for i := 0; i < neat; i += 8 {
+ v.state += 0x9E3779B97F4A7C15
+ z := v.state
+ z = (z ^ (z >> 30)) * 0xBF58476D1CE4E5B9
+ z = (z ^ (z >> 27)) * 0x94D049BB133111EB
+ b[i], b[i+1], b[i+2], b[i+3], b[i+4], b[i+5], b[i+6], b[i+7] =
+ byte(z), byte(z>>8), byte(z>>16), byte(z>>24), byte(z>>32), byte(z>>40), byte(z>>48), byte(z>>56)
+ }
+
+ v.mu.Unlock()
+
+ // The version IDs that come out of S3 appear to start with '3/' and follow
+ // with a base64-URL encoded blast of god knows what. There didn't appear
+ // to be any explanation of the format beyond that, but let's copy it anyway.
+ //
+ // Base64 is not sortable though, and we need our versions to be lexicographically
+ // sortable for the SkipList key, so we have to encode it as base32hex, which _is_
+ // sortable, and just pretend that it's "Base64". Phew!
+
+ return gofakes3.VersionID(fmt.Sprintf("3/%s", base32.HexEncoding.EncodeToString(scratch))), scratch
+}
diff --git a/vendor/github.com/johannesboyne/gofakes3/ca-certificates.crt b/vendor/github.com/johannesboyne/gofakes3/ca-certificates.crt
new file mode 100644
index 00000000..39ba3368
--- /dev/null
+++ b/vendor/github.com/johannesboyne/gofakes3/ca-certificates.crt
@@ -0,0 +1,3646 @@
+##
+## Bundle of CA Root Certificates
+##
+## Certificate data from Mozilla as of: Wed Sep 20 03:12:05 2017 GMT
+##
+## This is a bundle of X.509 certificates of public Certificate Authorities
+## (CA). These were automatically extracted from Mozilla's root certificates
+## file (certdata.txt). This file can be found in the mozilla source tree:
+## https://hg.mozilla.org/releases/mozilla-release/raw-file/default/security/nss/lib/ckfw/builtins/certdata.txt
+##
+## It contains the certificates in PEM format and therefore
+## can be directly used with curl / libcurl / php_curl, or with
+## an Apache+mod_ssl webserver for SSL client authentication.
+## Just configure this file as the SSLCACertificateFile.
+##
+## Conversion done with mk-ca-bundle.pl version 1.27.
+## SHA256: 2b2dbe5244e0047e088c597998883a913f6c5fffd1cb5c0fe5a368c8466cb2ec
+##
+
+
+GlobalSign Root CA
+==================
+-----BEGIN CERTIFICATE-----
+MIIDdTCCAl2gAwIBAgILBAAAAAABFUtaw5QwDQYJKoZIhvcNAQEFBQAwVzELMAkGA1UEBhMCQkUx
+GTAXBgNVBAoTEEdsb2JhbFNpZ24gbnYtc2ExEDAOBgNVBAsTB1Jvb3QgQ0ExGzAZBgNVBAMTEkds
+b2JhbFNpZ24gUm9vdCBDQTAeFw05ODA5MDExMjAwMDBaFw0yODAxMjgxMjAwMDBaMFcxCzAJBgNV
+BAYTAkJFMRkwFwYDVQQKExBHbG9iYWxTaWduIG52LXNhMRAwDgYDVQQLEwdSb290IENBMRswGQYD
+VQQDExJHbG9iYWxTaWduIFJvb3QgQ0EwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDa
+DuaZjc6j40+Kfvvxi4Mla+pIH/EqsLmVEQS98GPR4mdmzxzdzxtIK+6NiY6arymAZavpxy0Sy6sc
+THAHoT0KMM0VjU/43dSMUBUc71DuxC73/OlS8pF94G3VNTCOXkNz8kHp1Wrjsok6Vjk4bwY8iGlb
+Kk3Fp1S4bInMm/k8yuX9ifUSPJJ4ltbcdG6TRGHRjcdGsnUOhugZitVtbNV4FpWi6cgKOOvyJBNP
+c1STE4U6G7weNLWLBYy5d4ux2x8gkasJU26Qzns3dLlwR5EiUWMWea6xrkEmCMgZK9FGqkjWZCrX
+gzT/LCrBbBlDSgeF59N89iFo7+ryUp9/k5DPAgMBAAGjQjBAMA4GA1UdDwEB/wQEAwIBBjAPBgNV
+HRMBAf8EBTADAQH/MB0GA1UdDgQWBBRge2YaRQ2XyolQL30EzTSo//z9SzANBgkqhkiG9w0BAQUF
+AAOCAQEA1nPnfE920I2/7LqivjTFKDK1fPxsnCwrvQmeU79rXqoRSLblCKOzyj1hTdNGCbM+w6Dj
+Y1Ub8rrvrTnhQ7k4o+YviiY776BQVvnGCv04zcQLcFGUl5gE38NflNUVyRRBnMRddWQVDf9VMOyG
+j/8N7yy5Y0b2qvzfvGn9LhJIZJrglfCm7ymPAbEVtQwdpf5pLGkkeB6zpxxxYu7KyJesF12KwvhH
+hm4qxFYxldBniYUr+WymXUadDKqC5JlR3XC321Y9YeRq4VzW9v493kHMB65jUr9TU/Qr6cf9tveC
+X4XSQRjbgbMEHMUfpIBvFSDJ3gyICh3WZlXi/EjJKSZp4A==
+-----END CERTIFICATE-----
+
+GlobalSign Root CA - R2
+=======================
+-----BEGIN CERTIFICATE-----
+MIIDujCCAqKgAwIBAgILBAAAAAABD4Ym5g0wDQYJKoZIhvcNAQEFBQAwTDEgMB4GA1UECxMXR2xv
+YmFsU2lnbiBSb290IENBIC0gUjIxEzARBgNVBAoTCkdsb2JhbFNpZ24xEzARBgNVBAMTCkdsb2Jh
+bFNpZ24wHhcNMDYxMjE1MDgwMDAwWhcNMjExMjE1MDgwMDAwWjBMMSAwHgYDVQQLExdHbG9iYWxT
+aWduIFJvb3QgQ0EgLSBSMjETMBEGA1UEChMKR2xvYmFsU2lnbjETMBEGA1UEAxMKR2xvYmFsU2ln
+bjCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAKbPJA6+Lm8omUVCxKs+IVSbC9N/hHD6
+ErPLv4dfxn+G07IwXNb9rfF73OX4YJYJkhD10FPe+3t+c4isUoh7SqbKSaZeqKeMWhG8eoLrvozp
+s6yWJQeXSpkqBy+0Hne/ig+1AnwblrjFuTosvNYSuetZfeLQBoZfXklqtTleiDTsvHgMCJiEbKjN
+S7SgfQx5TfC4LcshytVsW33hoCmEofnTlEnLJGKRILzdC9XZzPnqJworc5HGnRusyMvo4KD0L5CL
+TfuwNhv2GXqF4G3yYROIXJ/gkwpRl4pazq+r1feqCapgvdzZX99yqWATXgAByUr6P6TqBwMhAo6C
+ygPCm48CAwEAAaOBnDCBmTAOBgNVHQ8BAf8EBAMCAQYwDwYDVR0TAQH/BAUwAwEB/zAdBgNVHQ4E
+FgQUm+IHV2ccHsBqBt5ZtJot39wZhi4wNgYDVR0fBC8wLTAroCmgJ4YlaHR0cDovL2NybC5nbG9i
+YWxzaWduLm5ldC9yb290LXIyLmNybDAfBgNVHSMEGDAWgBSb4gdXZxwewGoG3lm0mi3f3BmGLjAN
+BgkqhkiG9w0BAQUFAAOCAQEAmYFThxxol4aR7OBKuEQLq4GsJ0/WwbgcQ3izDJr86iw8bmEbTUsp
+9Z8FHSbBuOmDAGJFtqkIk7mpM0sYmsL4h4hO291xNBrBVNpGP+DTKqttVCL1OmLNIG+6KYnX3ZHu
+01yiPqFbQfXf5WRDLenVOavSot+3i9DAgBkcRcAtjOj4LaR0VknFBbVPFd5uRHg5h6h+u/N5GJG7
+9G+dwfCMNYxdAfvDbbnvRG15RjF+Cv6pgsH/76tuIMRQyV+dTZsXjAzlAcmgQWpzU/qlULRuJQ/7
+TBj0/VLZjmmx6BEP3ojY+x1J96relc8geMJgEtslQIxq/H5COEBkEveegeGTLg==
+-----END CERTIFICATE-----
+
+Verisign Class 3 Public Primary Certification Authority - G3
+============================================================
+-----BEGIN CERTIFICATE-----
+MIIEGjCCAwICEQCbfgZJoz5iudXukEhxKe9XMA0GCSqGSIb3DQEBBQUAMIHKMQswCQYDVQQGEwJV
+UzEXMBUGA1UEChMOVmVyaVNpZ24sIEluYy4xHzAdBgNVBAsTFlZlcmlTaWduIFRydXN0IE5ldHdv
+cmsxOjA4BgNVBAsTMShjKSAxOTk5IFZlcmlTaWduLCBJbmMuIC0gRm9yIGF1dGhvcml6ZWQgdXNl
+IG9ubHkxRTBDBgNVBAMTPFZlcmlTaWduIENsYXNzIDMgUHVibGljIFByaW1hcnkgQ2VydGlmaWNh
+dGlvbiBBdXRob3JpdHkgLSBHMzAeFw05OTEwMDEwMDAwMDBaFw0zNjA3MTYyMzU5NTlaMIHKMQsw
+CQYDVQQGEwJVUzEXMBUGA1UEChMOVmVyaVNpZ24sIEluYy4xHzAdBgNVBAsTFlZlcmlTaWduIFRy
+dXN0IE5ldHdvcmsxOjA4BgNVBAsTMShjKSAxOTk5IFZlcmlTaWduLCBJbmMuIC0gRm9yIGF1dGhv
+cml6ZWQgdXNlIG9ubHkxRTBDBgNVBAMTPFZlcmlTaWduIENsYXNzIDMgUHVibGljIFByaW1hcnkg
+Q2VydGlmaWNhdGlvbiBBdXRob3JpdHkgLSBHMzCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoC
+ggEBAMu6nFL8eB8aHm8bN3O9+MlrlBIwT/A2R/XQkQr1F8ilYcEWQE37imGQ5XYgwREGfassbqb1
+EUGO+i2tKmFZpGcmTNDovFJbcCAEWNF6yaRpvIMXZK0Fi7zQWM6NjPXr8EJJC52XJ2cybuGukxUc
+cLwgTS8Y3pKI6GyFVxEa6X7jJhFUokWWVYPKMIno3Nij7SqAP395ZVc+FSBmCC+Vk7+qRy+oRpfw
+EuL+wgorUeZ25rdGt+INpsyow0xZVYnm6FNcHOqd8GIWC6fJXwzw3sJ2zq/3avL6QaaiMxTJ5Xpj
+055iN9WFZZ4O5lMkdBteHRJTW8cs54NJOxWuimi5V5cCAwEAATANBgkqhkiG9w0BAQUFAAOCAQEA
+ERSWwauSCPc/L8my/uRan2Te2yFPhpk0djZX3dAVL8WtfxUfN2JzPtTnX84XA9s1+ivbrmAJXx5f
+j267Cz3qWhMeDGBvtcC1IyIuBwvLqXTLR7sdwdela8wv0kL9Sd2nic9TutoAWii/gt/4uhMdUIaC
+/Y4wjylGsB49Ndo4YhYYSq3mtlFs3q9i6wHQHiT+eo8SGhJouPtmmRQURVyu565pF4ErWjfJXir0
+xuKhXFSbplQAz/DxwceYMBo7Nhbbo27q/a2ywtrvAkcTisDxszGtTxzhT5yvDwyd93gN2PQ1VoDa
+t20Xj50egWTh/sVFuq1ruQp6Tk9LhO5L8X3dEQ==
+-----END CERTIFICATE-----
+
+Entrust.net Premium 2048 Secure Server CA
+=========================================
+-----BEGIN CERTIFICATE-----
+MIIEKjCCAxKgAwIBAgIEOGPe+DANBgkqhkiG9w0BAQUFADCBtDEUMBIGA1UEChMLRW50cnVzdC5u
+ZXQxQDA+BgNVBAsUN3d3dy5lbnRydXN0Lm5ldC9DUFNfMjA0OCBpbmNvcnAuIGJ5IHJlZi4gKGxp
+bWl0cyBsaWFiLikxJTAjBgNVBAsTHChjKSAxOTk5IEVudHJ1c3QubmV0IExpbWl0ZWQxMzAxBgNV
+BAMTKkVudHJ1c3QubmV0IENlcnRpZmljYXRpb24gQXV0aG9yaXR5ICgyMDQ4KTAeFw05OTEyMjQx
+NzUwNTFaFw0yOTA3MjQxNDE1MTJaMIG0MRQwEgYDVQQKEwtFbnRydXN0Lm5ldDFAMD4GA1UECxQ3
+d3d3LmVudHJ1c3QubmV0L0NQU18yMDQ4IGluY29ycC4gYnkgcmVmLiAobGltaXRzIGxpYWIuKTEl
+MCMGA1UECxMcKGMpIDE5OTkgRW50cnVzdC5uZXQgTGltaXRlZDEzMDEGA1UEAxMqRW50cnVzdC5u
+ZXQgQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkgKDIwNDgpMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8A
+MIIBCgKCAQEArU1LqRKGsuqjIAcVFmQqK0vRvwtKTY7tgHalZ7d4QMBzQshowNtTK91euHaYNZOL
+Gp18EzoOH1u3Hs/lJBQesYGpjX24zGtLA/ECDNyrpUAkAH90lKGdCCmziAv1h3edVc3kw37XamSr
+hRSGlVuXMlBvPci6Zgzj/L24ScF2iUkZ/cCovYmjZy/Gn7xxGWC4LeksyZB2ZnuU4q941mVTXTzW
+nLLPKQP5L6RQstRIzgUyVYr9smRMDuSYB3Xbf9+5CFVghTAp+XtIpGmG4zU/HoZdenoVve8AjhUi
+VBcAkCaTvA5JaJG/+EfTnZVCwQ5N328mz8MYIWJmQ3DW1cAH4QIDAQABo0IwQDAOBgNVHQ8BAf8E
+BAMCAQYwDwYDVR0TAQH/BAUwAwEB/zAdBgNVHQ4EFgQUVeSB0RGAvtiJuQijMfmhJAkWuXAwDQYJ
+KoZIhvcNAQEFBQADggEBADubj1abMOdTmXx6eadNl9cZlZD7Bh/KM3xGY4+WZiT6QBshJ8rmcnPy
+T/4xmf3IDExoU8aAghOY+rat2l098c5u9hURlIIM7j+VrxGrD9cv3h8Dj1csHsm7mhpElesYT6Yf
+zX1XEC+bBAlahLVu2B064dae0Wx5XnkcFMXj0EyTO2U87d89vqbllRrDtRnDvV5bu/8j72gZyxKT
+J1wDLW8w0B62GqzeWvfRqqgnpv55gcR5mTNXuhKwqeBCbJPKVt7+bYQLCIt+jerXmCHG8+c8eS9e
+nNFMFY3h7CI3zJpDC5fcgJCNs2ebb0gIFVbPv/ErfF6adulZkMV8gzURZVE=
+-----END CERTIFICATE-----
+
+Baltimore CyberTrust Root
+=========================
+-----BEGIN CERTIFICATE-----
+MIIDdzCCAl+gAwIBAgIEAgAAuTANBgkqhkiG9w0BAQUFADBaMQswCQYDVQQGEwJJRTESMBAGA1UE
+ChMJQmFsdGltb3JlMRMwEQYDVQQLEwpDeWJlclRydXN0MSIwIAYDVQQDExlCYWx0aW1vcmUgQ3li
+ZXJUcnVzdCBSb290MB4XDTAwMDUxMjE4NDYwMFoXDTI1MDUxMjIzNTkwMFowWjELMAkGA1UEBhMC
+SUUxEjAQBgNVBAoTCUJhbHRpbW9yZTETMBEGA1UECxMKQ3liZXJUcnVzdDEiMCAGA1UEAxMZQmFs
+dGltb3JlIEN5YmVyVHJ1c3QgUm9vdDCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAKME
+uyKrmD1X6CZymrV51Cni4eiVgLGw41uOKymaZN+hXe2wCQVt2yguzmKiYv60iNoS6zjrIZ3AQSsB
+UnuId9Mcj8e6uYi1agnnc+gRQKfRzMpijS3ljwumUNKoUMMo6vWrJYeKmpYcqWe4PwzV9/lSEy/C
+G9VwcPCPwBLKBsua4dnKM3p31vjsufFoREJIE9LAwqSuXmD+tqYF/LTdB1kC1FkYmGP1pWPgkAx9
+XbIGevOF6uvUA65ehD5f/xXtabz5OTZydc93Uk3zyZAsuT3lySNTPx8kmCFcB5kpvcY67Oduhjpr
+l3RjM71oGDHweI12v/yejl0qhqdNkNwnGjkCAwEAAaNFMEMwHQYDVR0OBBYEFOWdWTCCR1jMrPoI
+VDaGezq1BE3wMBIGA1UdEwEB/wQIMAYBAf8CAQMwDgYDVR0PAQH/BAQDAgEGMA0GCSqGSIb3DQEB
+BQUAA4IBAQCFDF2O5G9RaEIFoN27TyclhAO992T9Ldcw46QQF+vaKSm2eT929hkTI7gQCvlYpNRh
+cL0EYWoSihfVCr3FvDB81ukMJY2GQE/szKN+OMY3EU/t3WgxjkzSswF07r51XgdIGn9w/xZchMB5
+hbgF/X++ZRGjD8ACtPhSNzkE1akxehi/oCr0Epn3o0WC4zxe9Z2etciefC7IpJ5OCBRLbf1wbWsa
+Y71k5h+3zvDyny67G7fyUIhzksLi4xaNmjICq44Y3ekQEe5+NauQrz4wlHrQMz2nZQ/1/I6eYs9H
+RCwBXbsdtTLSR9I4LtD+gdwyah617jzV/OeBHRnDJELqYzmp
+-----END CERTIFICATE-----
+
+AddTrust External Root
+======================
+-----BEGIN CERTIFICATE-----
+MIIENjCCAx6gAwIBAgIBATANBgkqhkiG9w0BAQUFADBvMQswCQYDVQQGEwJTRTEUMBIGA1UEChML
+QWRkVHJ1c3QgQUIxJjAkBgNVBAsTHUFkZFRydXN0IEV4dGVybmFsIFRUUCBOZXR3b3JrMSIwIAYD
+VQQDExlBZGRUcnVzdCBFeHRlcm5hbCBDQSBSb290MB4XDTAwMDUzMDEwNDgzOFoXDTIwMDUzMDEw
+NDgzOFowbzELMAkGA1UEBhMCU0UxFDASBgNVBAoTC0FkZFRydXN0IEFCMSYwJAYDVQQLEx1BZGRU
+cnVzdCBFeHRlcm5hbCBUVFAgTmV0d29yazEiMCAGA1UEAxMZQWRkVHJ1c3QgRXh0ZXJuYWwgQ0Eg
+Um9vdDCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBALf3GjPm8gAELTngTlvtH7xsD821
++iO2zt6bETOXpClMfZOfvUq8k+0DGuOPz+VtUFrWlymUWoCwSXrbLpX9uMq/NzgtHj6RQa1wVsfw
+Tz/oMp50ysiQVOnGXw94nZpAPA6sYapeFI+eh6FqUNzXmk6vBbOmcZSccbNQYArHE504B4YCqOmo
+aSYYkKtMsE8jqzpPhNjfzp/haW+710LXa0Tkx63ubUFfclpxCDezeWWkWaCUN/cALw3CknLa0Dhy
+2xSoRcRdKn23tNbE7qzNE0S3ySvdQwAl+mG5aWpYIxG3pzOPVnVZ9c0p10a3CitlttNCbxWyuHv7
+7+ldU9U0WicCAwEAAaOB3DCB2TAdBgNVHQ4EFgQUrb2YejS0Jvf6xCZU7wO94CTLVBowCwYDVR0P
+BAQDAgEGMA8GA1UdEwEB/wQFMAMBAf8wgZkGA1UdIwSBkTCBjoAUrb2YejS0Jvf6xCZU7wO94CTL
+VBqhc6RxMG8xCzAJBgNVBAYTAlNFMRQwEgYDVQQKEwtBZGRUcnVzdCBBQjEmMCQGA1UECxMdQWRk
+VHJ1c3QgRXh0ZXJuYWwgVFRQIE5ldHdvcmsxIjAgBgNVBAMTGUFkZFRydXN0IEV4dGVybmFsIENB
+IFJvb3SCAQEwDQYJKoZIhvcNAQEFBQADggEBALCb4IUlwtYj4g+WBpKdQZic2YR5gdkeWxQHIzZl
+j7DYd7usQWxHYINRsPkyPef89iYTx4AWpb9a/IfPeHmJIZriTAcKhjW88t5RxNKWt9x+Tu5w/Rw5
+6wwCURQtjr0W4MHfRnXnJK3s9EK0hZNwEGe6nQY1ShjTK3rMUUKhemPR5ruhxSvCNr4TDea9Y355
+e6cJDUCrat2PisP29owaQgVR1EX1n6diIWgVIEM8med8vSTYqZEXc4g/VhsxOBi0cQ+azcgOno4u
+G+GMmIPLHzHxREzGBHNJdmAPx/i9F4BrLunMTA5amnkPIAou1Z5jJh5VkpTYghdae9C8x49OhgQ=
+-----END CERTIFICATE-----
+
+Entrust Root Certification Authority
+====================================
+-----BEGIN CERTIFICATE-----
+MIIEkTCCA3mgAwIBAgIERWtQVDANBgkqhkiG9w0BAQUFADCBsDELMAkGA1UEBhMCVVMxFjAUBgNV
+BAoTDUVudHJ1c3QsIEluYy4xOTA3BgNVBAsTMHd3dy5lbnRydXN0Lm5ldC9DUFMgaXMgaW5jb3Jw
+b3JhdGVkIGJ5IHJlZmVyZW5jZTEfMB0GA1UECxMWKGMpIDIwMDYgRW50cnVzdCwgSW5jLjEtMCsG
+A1UEAxMkRW50cnVzdCBSb290IENlcnRpZmljYXRpb24gQXV0aG9yaXR5MB4XDTA2MTEyNzIwMjM0
+MloXDTI2MTEyNzIwNTM0MlowgbAxCzAJBgNVBAYTAlVTMRYwFAYDVQQKEw1FbnRydXN0LCBJbmMu
+MTkwNwYDVQQLEzB3d3cuZW50cnVzdC5uZXQvQ1BTIGlzIGluY29ycG9yYXRlZCBieSByZWZlcmVu
+Y2UxHzAdBgNVBAsTFihjKSAyMDA2IEVudHJ1c3QsIEluYy4xLTArBgNVBAMTJEVudHJ1c3QgUm9v
+dCBDZXJ0aWZpY2F0aW9uIEF1dGhvcml0eTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEB
+ALaVtkNC+sZtKm9I35RMOVcF7sN5EUFoNu3s/poBj6E4KPz3EEZmLk0eGrEaTsbRwJWIsMn/MYsz
+A9u3g3s+IIRe7bJWKKf44LlAcTfFy0cOlypowCKVYhXbR9n10Cv/gkvJrT7eTNuQgFA/CYqEAOww
+Cj0Yzfv9KlmaI5UXLEWeH25DeW0MXJj+SKfFI0dcXv1u5x609mhF0YaDW6KKjbHjKYD+JXGIrb68
+j6xSlkuqUY3kEzEZ6E5Nn9uss2rVvDlUccp6en+Q3X0dgNmBu1kmwhH+5pPi94DkZfs0Nw4pgHBN
+rziGLp5/V6+eF67rHMsoIV+2HNjnogQi+dPa2MsCAwEAAaOBsDCBrTAOBgNVHQ8BAf8EBAMCAQYw
+DwYDVR0TAQH/BAUwAwEB/zArBgNVHRAEJDAigA8yMDA2MTEyNzIwMjM0MlqBDzIwMjYxMTI3MjA1
+MzQyWjAfBgNVHSMEGDAWgBRokORnpKZTgMeGZqTx90tD+4S9bTAdBgNVHQ4EFgQUaJDkZ6SmU4DH
+hmak8fdLQ/uEvW0wHQYJKoZIhvZ9B0EABBAwDhsIVjcuMTo0LjADAgSQMA0GCSqGSIb3DQEBBQUA
+A4IBAQCT1DCw1wMgKtD5Y+iRDAUgqV8ZyntyTtSx29CW+1RaGSwMCPeyvIWonX9tO1KzKtvn1ISM
+Y/YPyyYBkVBs9F8U4pN0wBOeMDpQ47RgxRzwIkSNcUesyBrJ6ZuaAGAT/3B+XxFNSRuzFVJ7yVTa
+v52Vr2ua2J7p8eRDjeIRRDq/r72DQnNSi6q7pynP9WQcCk3RvKqsnyrQ/39/2n3qse0wJcGE2jTS
+W3iDVuycNsMm4hH2Z0kdkquM++v/eu6FSqdQgPCnXEqULl8FmTxSQeDNtGPPAUO6nIPcj2A781q0
+tHuu2guQOHXvgR1m0vdXcDazv/wor3ElhVsT/h5/WrQ8
+-----END CERTIFICATE-----
+
+GeoTrust Global CA
+==================
+-----BEGIN CERTIFICATE-----
+MIIDVDCCAjygAwIBAgIDAjRWMA0GCSqGSIb3DQEBBQUAMEIxCzAJBgNVBAYTAlVTMRYwFAYDVQQK
+Ew1HZW9UcnVzdCBJbmMuMRswGQYDVQQDExJHZW9UcnVzdCBHbG9iYWwgQ0EwHhcNMDIwNTIxMDQw
+MDAwWhcNMjIwNTIxMDQwMDAwWjBCMQswCQYDVQQGEwJVUzEWMBQGA1UEChMNR2VvVHJ1c3QgSW5j
+LjEbMBkGA1UEAxMSR2VvVHJ1c3QgR2xvYmFsIENBMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIB
+CgKCAQEA2swYYzD99BcjGlZ+W988bDjkcbd4kdS8odhM+KhDtgPpTSEHCIjaWC9mOSm9BXiLnTjo
+BbdqfnGk5sRgprDvgOSJKA+eJdbtg/OtppHHmMlCGDUUna2YRpIuT8rxh0PBFpVXLVDviS2Aelet
+8u5fa9IAjbkU+BQVNdnARqN7csiRv8lVK83Qlz6cJmTM386DGXHKTubU1XupGc1V3sjs0l44U+Vc
+T4wt/lAjNvxm5suOpDkZALeVAjmRCw7+OC7RHQWa9k0+bw8HHa8sHo9gOeL6NlMTOdReJivbPagU
+vTLrGAMoUgRx5aszPeE4uwc2hGKceeoWMPRfwCvocWvk+QIDAQABo1MwUTAPBgNVHRMBAf8EBTAD
+AQH/MB0GA1UdDgQWBBTAephojYn7qwVkDBF9qn1luMrMTjAfBgNVHSMEGDAWgBTAephojYn7qwVk
+DBF9qn1luMrMTjANBgkqhkiG9w0BAQUFAAOCAQEANeMpauUvXVSOKVCUn5kaFOSPeCpilKInZ57Q
+zxpeR+nBsqTP3UEaBU6bS+5Kb1VSsyShNwrrZHYqLizz/Tt1kL/6cdjHPTfStQWVYrmm3ok9Nns4
+d0iXrKYgjy6myQzCsplFAMfOEVEiIuCl6rYVSAlk6l5PdPcFPseKUgzbFbS9bZvlxrFUaKnjaZC2
+mqUPuLk/IH2uSrW4nOQdtqvmlKXBx4Ot2/Unhw4EbNX/3aBd7YdStysVAq45pmp06drE57xNNB6p
+XE0zX5IJL4hmXXeXxx12E6nV5fEWCRE11azbJHFwLJhWC9kXtNHjUStedejV0NxPNO3CBWaAocvm
+Mw==
+-----END CERTIFICATE-----
+
+GeoTrust Universal CA
+=====================
+-----BEGIN CERTIFICATE-----
+MIIFaDCCA1CgAwIBAgIBATANBgkqhkiG9w0BAQUFADBFMQswCQYDVQQGEwJVUzEWMBQGA1UEChMN
+R2VvVHJ1c3QgSW5jLjEeMBwGA1UEAxMVR2VvVHJ1c3QgVW5pdmVyc2FsIENBMB4XDTA0MDMwNDA1
+MDAwMFoXDTI5MDMwNDA1MDAwMFowRTELMAkGA1UEBhMCVVMxFjAUBgNVBAoTDUdlb1RydXN0IElu
+Yy4xHjAcBgNVBAMTFUdlb1RydXN0IFVuaXZlcnNhbCBDQTCCAiIwDQYJKoZIhvcNAQEBBQADggIP
+ADCCAgoCggIBAKYVVaCjxuAfjJ0hUNfBvitbtaSeodlyWL0AG0y/YckUHUWCq8YdgNY96xCcOq9t
+JPi8cQGeBvV8Xx7BDlXKg5pZMK4ZyzBIle0iN430SppyZj6tlcDgFgDgEB8rMQ7XlFTTQjOgNB0e
+RXbdT8oYN+yFFXoZCPzVx5zw8qkuEKmS5j1YPakWaDwvdSEYfyh3peFhF7em6fgemdtzbvQKoiFs
+7tqqhZJmr/Z6a4LauiIINQ/PQvE1+mrufislzDoR5G2vc7J2Ha3QsnhnGqQ5HFELZ1aD/ThdDc7d
+8Lsrlh/eezJS/R27tQahsiFepdaVaH/wmZ7cRQg+59IJDTWU3YBOU5fXtQlEIGQWFwMCTFMNaN7V
+qnJNk22CDtucvc+081xdVHppCZbW2xHBjXWotM85yM48vCR85mLK4b19p71XZQvk/iXttmkQ3Cga
+Rr0BHdCXteGYO8A3ZNY9lO4L4fUorgtWv3GLIylBjobFS1J72HGrH4oVpjuDWtdYAVHGTEHZf9hB
+Z3KiKN9gg6meyHv8U3NyWfWTehd2Ds735VzZC1U0oqpbtWpU5xPKV+yXbfReBi9Fi1jUIxaS5BZu
+KGNZMN9QAZxjiRqf2xeUgnA3wySemkfWWspOqGmJch+RbNt+nhutxx9z3SxPGWX9f5NAEC7S8O08
+ni4oPmkmM8V7AgMBAAGjYzBhMA8GA1UdEwEB/wQFMAMBAf8wHQYDVR0OBBYEFNq7LqqwDLiIJlF0
+XG0D08DYj3rWMB8GA1UdIwQYMBaAFNq7LqqwDLiIJlF0XG0D08DYj3rWMA4GA1UdDwEB/wQEAwIB
+hjANBgkqhkiG9w0BAQUFAAOCAgEAMXjmx7XfuJRAyXHEqDXsRh3ChfMoWIawC/yOsjmPRFWrZIRc
+aanQmjg8+uUfNeVE44B5lGiku8SfPeE0zTBGi1QrlaXv9z+ZhP015s8xxtxqv6fXIwjhmF7DWgh2
+qaavdy+3YL1ERmrvl/9zlcGO6JP7/TG37FcREUWbMPEaiDnBTzynANXH/KttgCJwpQzgXQQpAvvL
+oJHRfNbDflDVnVi+QTjruXU8FdmbyUqDWcDaU/0zuzYYm4UPFd3uLax2k7nZAY1IEKj79TiG8dsK
+xr2EoyNB3tZ3b4XUhRxQ4K5RirqNPnbiucon8l+f725ZDQbYKxek0nxru18UGkiPGkzns0ccjkxF
+KyDuSN/n3QmOGKjaQI2SJhFTYXNd673nxE0pN2HrrDktZy4W1vUAg4WhzH92xH3kt0tm7wNFYGm2
+DFKWkoRepqO1pD4r2czYG0eq8kTaT/kD6PAUyz/zg97QwVTjt+gKN02LIFkDMBmhLMi9ER/frslK
+xfMnZmaGrGiR/9nmUxwPi1xpZQomyB40w11Re9epnAahNt3ViZS82eQtDF4JbAiXfKM9fJP/P6EU
+p8+1Xevb2xzEdt+Iub1FBZUbrvxGakyvSOPOrg/SfuvmbJxPgWp6ZKy7PtXny3YuxadIwVyQD8vI
+P/rmMuGNG2+k5o7Y+SlIis5z/iw=
+-----END CERTIFICATE-----
+
+GeoTrust Universal CA 2
+=======================
+-----BEGIN CERTIFICATE-----
+MIIFbDCCA1SgAwIBAgIBATANBgkqhkiG9w0BAQUFADBHMQswCQYDVQQGEwJVUzEWMBQGA1UEChMN
+R2VvVHJ1c3QgSW5jLjEgMB4GA1UEAxMXR2VvVHJ1c3QgVW5pdmVyc2FsIENBIDIwHhcNMDQwMzA0
+MDUwMDAwWhcNMjkwMzA0MDUwMDAwWjBHMQswCQYDVQQGEwJVUzEWMBQGA1UEChMNR2VvVHJ1c3Qg
+SW5jLjEgMB4GA1UEAxMXR2VvVHJ1c3QgVW5pdmVyc2FsIENBIDIwggIiMA0GCSqGSIb3DQEBAQUA
+A4ICDwAwggIKAoICAQCzVFLByT7y2dyxUxpZKeexw0Uo5dfR7cXFS6GqdHtXr0om/Nj1XqduGdt0
+DE81WzILAePb63p3NeqqWuDW6KFXlPCQo3RWlEQwAx5cTiuFJnSCegx2oG9NzkEtoBUGFF+3Qs17
+j1hhNNwqCPkuwwGmIkQcTAeC5lvO0Ep8BNMZcyfwqph/Lq9O64ceJHdqXbboW0W63MOhBW9Wjo8Q
+JqVJwy7XQYci4E+GymC16qFjwAGXEHm9ADwSbSsVsaxLse4YuU6W3Nx2/zu+z18DwPw76L5GG//a
+QMJS9/7jOvdqdzXQ2o3rXhhqMcceujwbKNZrVMaqW9eiLBsZzKIC9ptZvTdrhrVtgrrY6slWvKk2
+WP0+GfPtDCapkzj4T8FdIgbQl+rhrcZV4IErKIM6+vR7IVEAvlI4zs1meaj0gVbi0IMJR1FbUGrP
+20gaXT73y/Zl92zxlfgCOzJWgjl6W70viRu/obTo/3+NjN8D8WBOWBFM66M/ECuDmgFz2ZRthAAn
+ZqzwcEAJQpKtT5MNYQlRJNiS1QuUYbKHsu3/mjX/hVTK7URDrBs8FmtISgocQIgfksILAAX/8sgC
+SqSqqcyZlpwvWOB94b67B9xfBHJcMTTD7F8t4D1kkCLm0ey4Lt1ZrtmhN79UNdxzMk+MBB4zsslG
+8dhcyFVQyWi9qLo2CQIDAQABo2MwYTAPBgNVHRMBAf8EBTADAQH/MB0GA1UdDgQWBBR281Xh+qQ2
++/CfXGJx7Tz0RzgQKzAfBgNVHSMEGDAWgBR281Xh+qQ2+/CfXGJx7Tz0RzgQKzAOBgNVHQ8BAf8E
+BAMCAYYwDQYJKoZIhvcNAQEFBQADggIBAGbBxiPz2eAubl/oz66wsCVNK/g7WJtAJDday6sWSf+z
+dXkzoS9tcBc0kf5nfo/sm+VegqlVHy/c1FEHEv6sFj4sNcZj/NwQ6w2jqtB8zNHQL1EuxBRa3ugZ
+4T7GzKQp5y6EqgYweHZUcyiYWTjgAA1i00J9IZ+uPTqM1fp3DRgrFg5fNuH8KrUwJM/gYwx7WBr+
+mbpCErGR9Hxo4sjoryzqyX6uuyo9DRXcNJW2GHSoag/HtPQTxORb7QrSpJdMKu0vbBKJPfEncKpq
+A1Ihn0CoZ1Dy81of398j9tx4TuaYT1U6U+Pv8vSfx3zYWK8pIpe44L2RLrB27FcRz+8pRPPphXpg
+Y+RdM4kX2TGq2tbzGDVyz4crL2MjhF2EjD9XoIj8mZEoJmmZ1I+XRL6O1UixpCgp8RW04eWe3fiP
+pm8m1wk8OhwRDqZsN/etRIcsKMfYdIKz0G9KV7s1KSegi+ghp4dkNl3M2Basx7InQJJVOCiNUW7d
+FGdTbHFcJoRNdVq2fmBWqU2t+5sel/MN2dKXVHfaPRK34B7vCAas+YWH6aLcr34YEoP9VhdBLtUp
+gn2Z9DH2canPLAEnpQW5qrJITirvn5NSUZU8UnOOVkwXQMAJKOSLakhT2+zNVVXxxvjpoixMptEm
+X36vWkzaH6byHCx+rgIW0lbQL1dTR+iS
+-----END CERTIFICATE-----
+
+Visa eCommerce Root
+===================
+-----BEGIN CERTIFICATE-----
+MIIDojCCAoqgAwIBAgIQE4Y1TR0/BvLB+WUF1ZAcYjANBgkqhkiG9w0BAQUFADBrMQswCQYDVQQG
+EwJVUzENMAsGA1UEChMEVklTQTEvMC0GA1UECxMmVmlzYSBJbnRlcm5hdGlvbmFsIFNlcnZpY2Ug
+QXNzb2NpYXRpb24xHDAaBgNVBAMTE1Zpc2EgZUNvbW1lcmNlIFJvb3QwHhcNMDIwNjI2MDIxODM2
+WhcNMjIwNjI0MDAxNjEyWjBrMQswCQYDVQQGEwJVUzENMAsGA1UEChMEVklTQTEvMC0GA1UECxMm
+VmlzYSBJbnRlcm5hdGlvbmFsIFNlcnZpY2UgQXNzb2NpYXRpb24xHDAaBgNVBAMTE1Zpc2EgZUNv
+bW1lcmNlIFJvb3QwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQCvV95WHm6h2mCxlCfL
+F9sHP4CFT8icttD0b0/Pmdjh28JIXDqsOTPHH2qLJj0rNfVIsZHBAk4ElpF7sDPwsRROEW+1QK8b
+RaVK7362rPKgH1g/EkZgPI2h4H3PVz4zHvtH8aoVlwdVZqW1LS7YgFmypw23RuwhY/81q6UCzyr0
+TP579ZRdhE2o8mCP2w4lPJ9zcc+U30rq299yOIzzlr3xF7zSujtFWsan9sYXiwGd/BmoKoMWuDpI
+/k4+oKsGGelT84ATB+0tvz8KPFUgOSwsAGl0lUq8ILKpeeUYiZGo3BxN77t+Nwtd/jmliFKMAGzs
+GHxBvfaLdXe6YJ2E5/4tAgMBAAGjQjBAMA8GA1UdEwEB/wQFMAMBAf8wDgYDVR0PAQH/BAQDAgEG
+MB0GA1UdDgQWBBQVOIMPPyw/cDMezUb+B4wg4NfDtzANBgkqhkiG9w0BAQUFAAOCAQEAX/FBfXxc
+CLkr4NWSR/pnXKUTwwMhmytMiUbPWU3J/qVAtmPN3XEolWcRzCSs00Rsca4BIGsDoo8Ytyk6feUW
+YFN4PMCvFYP3j1IzJL1kk5fui/fbGKhtcbP3LBfQdCVp9/5rPJS+TUtBjE7ic9DjkCJzQ83z7+pz
+zkWKsKZJ/0x9nXGIxHYdkFsd7v3M9+79YKWxehZx0RbQfBI8bGmX265fOZpwLwU8GUYEmSA20GBu
+YQa7FkKMcPcw++DbZqMAAb3mLNqRX6BGi01qnD093QVG/na/oAo85ADmJ7f/hC3euiInlhBx6yLt
+398znM/jra6O1I7mT1GvFpLgXPYHDw==
+-----END CERTIFICATE-----
+
+Certum Root CA
+==============
+-----BEGIN CERTIFICATE-----
+MIIDDDCCAfSgAwIBAgIDAQAgMA0GCSqGSIb3DQEBBQUAMD4xCzAJBgNVBAYTAlBMMRswGQYDVQQK
+ExJVbml6ZXRvIFNwLiB6IG8uby4xEjAQBgNVBAMTCUNlcnR1bSBDQTAeFw0wMjA2MTExMDQ2Mzla
+Fw0yNzA2MTExMDQ2MzlaMD4xCzAJBgNVBAYTAlBMMRswGQYDVQQKExJVbml6ZXRvIFNwLiB6IG8u
+by4xEjAQBgNVBAMTCUNlcnR1bSBDQTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAM6x
+wS7TT3zNJc4YPk/EjG+AanPIW1H4m9LcuwBcsaD8dQPugfCI7iNS6eYVM42sLQnFdvkrOYCJ5JdL
+kKWoePhzQ3ukYbDYWMzhbGZ+nPMJXlVjhNWo7/OxLjBos8Q82KxujZlakE403Daaj4GIULdtlkIJ
+89eVgw1BS7Bqa/j8D35in2fE7SZfECYPCE/wpFcozo+47UX2bu4lXapuOb7kky/ZR6By6/qmW6/K
+Uz/iDsaWVhFu9+lmqSbYf5VT7QqFiLpPKaVCjF62/IUgAKpoC6EahQGcxEZjgoi2IrHu/qpGWX7P
+NSzVttpd90gzFFS269lvzs2I1qsb2pY7HVkCAwEAAaMTMBEwDwYDVR0TAQH/BAUwAwEB/zANBgkq
+hkiG9w0BAQUFAAOCAQEAuI3O7+cUus/usESSbLQ5PqKEbq24IXfS1HeCh+YgQYHu4vgRt2PRFze+
+GXYkHAQaTOs9qmdvLdTN/mUxcMUbpgIKumB7bVjCmkn+YzILa+M6wKyrO7Do0wlRjBCDxjTgxSvg
+GrZgFCdsMneMvLJymM/NzD+5yCRCFNZX/OYmQ6kd5YCQzgNUKD73P9P4Te1qCjqTE5s7FCMTY5w/
+0YcneeVMUeMBrYVdGjux1XMQpNPyvG5k9VpWkKjHDkx0Dy5xO/fIR/RpbxXyEV6DHpx8Uq79AtoS
+qFlnGNu8cN2bsWntgM6JQEhqDjXKKWYVIZQs6GAqm4VKQPNriiTsBhYscw==
+-----END CERTIFICATE-----
+
+Comodo AAA Services root
+========================
+-----BEGIN CERTIFICATE-----
+MIIEMjCCAxqgAwIBAgIBATANBgkqhkiG9w0BAQUFADB7MQswCQYDVQQGEwJHQjEbMBkGA1UECAwS
+R3JlYXRlciBNYW5jaGVzdGVyMRAwDgYDVQQHDAdTYWxmb3JkMRowGAYDVQQKDBFDb21vZG8gQ0Eg
+TGltaXRlZDEhMB8GA1UEAwwYQUFBIENlcnRpZmljYXRlIFNlcnZpY2VzMB4XDTA0MDEwMTAwMDAw
+MFoXDTI4MTIzMTIzNTk1OVowezELMAkGA1UEBhMCR0IxGzAZBgNVBAgMEkdyZWF0ZXIgTWFuY2hl
+c3RlcjEQMA4GA1UEBwwHU2FsZm9yZDEaMBgGA1UECgwRQ29tb2RvIENBIExpbWl0ZWQxITAfBgNV
+BAMMGEFBQSBDZXJ0aWZpY2F0ZSBTZXJ2aWNlczCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoC
+ggEBAL5AnfRu4ep2hxxNRUSOvkbIgwadwSr+GB+O5AL686tdUIoWMQuaBtDFcCLNSS1UY8y2bmhG
+C1Pqy0wkwLxyTurxFa70VJoSCsN6sjNg4tqJVfMiWPPe3M/vg4aijJRPn2jymJBGhCfHdr/jzDUs
+i14HZGWCwEiwqJH5YZ92IFCokcdmtet4YgNW8IoaE+oxox6gmf049vYnMlhvB/VruPsUK6+3qszW
+Y19zjNoFmag4qMsXeDZRrOme9Hg6jc8P2ULimAyrL58OAd7vn5lJ8S3frHRNG5i1R8XlKdH5kBjH
+Ypy+g8cmez6KJcfA3Z3mNWgQIJ2P2N7Sw4ScDV7oL8kCAwEAAaOBwDCBvTAdBgNVHQ4EFgQUoBEK
+Iz6W8Qfs4q8p74Klf9AwpLQwDgYDVR0PAQH/BAQDAgEGMA8GA1UdEwEB/wQFMAMBAf8wewYDVR0f
+BHQwcjA4oDagNIYyaHR0cDovL2NybC5jb21vZG9jYS5jb20vQUFBQ2VydGlmaWNhdGVTZXJ2aWNl
+cy5jcmwwNqA0oDKGMGh0dHA6Ly9jcmwuY29tb2RvLm5ldC9BQUFDZXJ0aWZpY2F0ZVNlcnZpY2Vz
+LmNybDANBgkqhkiG9w0BAQUFAAOCAQEACFb8AvCb6P+k+tZ7xkSAzk/ExfYAWMymtrwUSWgEdujm
+7l3sAg9g1o1QGE8mTgHj5rCl7r+8dFRBv/38ErjHT1r0iWAFf2C3BUrz9vHCv8S5dIa2LX1rzNLz
+Rt0vxuBqw8M0Ayx9lt1awg6nCpnBBYurDC/zXDrPbDdVCYfeU0BsWO/8tqtlbgT2G9w84FoVxp7Z
+8VlIMCFlA2zs6SFz7JsDoeA3raAVGI/6ugLOpyypEBMs1OUIJqsil2D4kF501KKaU73yqWjgom7C
+12yxow+ev+to51byrvLjKzg6CYG1a4XXvi3tPxq3smPi9WIsgtRqAEFQ8TmDn5XpNpaYbg==
+-----END CERTIFICATE-----
+
+QuoVadis Root CA
+================
+-----BEGIN CERTIFICATE-----
+MIIF0DCCBLigAwIBAgIEOrZQizANBgkqhkiG9w0BAQUFADB/MQswCQYDVQQGEwJCTTEZMBcGA1UE
+ChMQUXVvVmFkaXMgTGltaXRlZDElMCMGA1UECxMcUm9vdCBDZXJ0aWZpY2F0aW9uIEF1dGhvcml0
+eTEuMCwGA1UEAxMlUXVvVmFkaXMgUm9vdCBDZXJ0aWZpY2F0aW9uIEF1dGhvcml0eTAeFw0wMTAz
+MTkxODMzMzNaFw0yMTAzMTcxODMzMzNaMH8xCzAJBgNVBAYTAkJNMRkwFwYDVQQKExBRdW9WYWRp
+cyBMaW1pdGVkMSUwIwYDVQQLExxSb290IENlcnRpZmljYXRpb24gQXV0aG9yaXR5MS4wLAYDVQQD
+EyVRdW9WYWRpcyBSb290IENlcnRpZmljYXRpb24gQXV0aG9yaXR5MIIBIjANBgkqhkiG9w0BAQEF
+AAOCAQ8AMIIBCgKCAQEAv2G1lVO6V/z68mcLOhrfEYBklbTRvM16z/Ypli4kVEAkOPcahdxYTMuk
+J0KX0J+DisPkBgNbAKVRHnAEdOLB1Dqr1607BxgFjv2DrOpm2RgbaIr1VxqYuvXtdj182d6UajtL
+F8HVj71lODqV0D1VNk7feVcxKh7YWWVJWCCYfqtffp/p1k3sg3Spx2zY7ilKhSoGFPlU5tPaZQeL
+YzcS19Dsw3sgQUSj7cugF+FxZc4dZjH3dgEZyH0DWLaVSR2mEiboxgx24ONmy+pdpibu5cxfvWen
+AScOospUxbF6lR1xHkopigPcakXBpBlebzbNw6Kwt/5cOOJSvPhEQ+aQuwIDAQABo4ICUjCCAk4w
+PQYIKwYBBQUHAQEEMTAvMC0GCCsGAQUFBzABhiFodHRwczovL29jc3AucXVvdmFkaXNvZmZzaG9y
+ZS5jb20wDwYDVR0TAQH/BAUwAwEB/zCCARoGA1UdIASCAREwggENMIIBCQYJKwYBBAG+WAABMIH7
+MIHUBggrBgEFBQcCAjCBxxqBxFJlbGlhbmNlIG9uIHRoZSBRdW9WYWRpcyBSb290IENlcnRpZmlj
+YXRlIGJ5IGFueSBwYXJ0eSBhc3N1bWVzIGFjY2VwdGFuY2Ugb2YgdGhlIHRoZW4gYXBwbGljYWJs
+ZSBzdGFuZGFyZCB0ZXJtcyBhbmQgY29uZGl0aW9ucyBvZiB1c2UsIGNlcnRpZmljYXRpb24gcHJh
+Y3RpY2VzLCBhbmQgdGhlIFF1b1ZhZGlzIENlcnRpZmljYXRlIFBvbGljeS4wIgYIKwYBBQUHAgEW
+Fmh0dHA6Ly93d3cucXVvdmFkaXMuYm0wHQYDVR0OBBYEFItLbe3TKbkGGew5Oanwl4Rqy+/fMIGu
+BgNVHSMEgaYwgaOAFItLbe3TKbkGGew5Oanwl4Rqy+/foYGEpIGBMH8xCzAJBgNVBAYTAkJNMRkw
+FwYDVQQKExBRdW9WYWRpcyBMaW1pdGVkMSUwIwYDVQQLExxSb290IENlcnRpZmljYXRpb24gQXV0
+aG9yaXR5MS4wLAYDVQQDEyVRdW9WYWRpcyBSb290IENlcnRpZmljYXRpb24gQXV0aG9yaXR5ggQ6
+tlCLMA4GA1UdDwEB/wQEAwIBBjANBgkqhkiG9w0BAQUFAAOCAQEAitQUtf70mpKnGdSkfnIYj9lo
+fFIk3WdvOXrEql494liwTXCYhGHoG+NpGA7O+0dQoE7/8CQfvbLO9Sf87C9TqnN7Az10buYWnuul
+LsS/VidQK2K6vkscPFVcQR0kvoIgR13VRH56FmjffU1RcHhXHTMe/QKZnAzNCgVPx7uOpHX6Sm2x
+gI4JVrmcGmD+XcHXetwReNDWXcG31a0ymQM6isxUJTkxgXsTIlG6Rmyhu576BGxJJnSP0nPrzDCi
+5upZIof4l/UO/erMkqQWxFIY6iHOsfHmhIHluqmGKPJDWl0Snawe2ajlCmqnf6CHKc/yiU3U7MXi
+5nrQNiOKSnQ2+Q==
+-----END CERTIFICATE-----
+
+QuoVadis Root CA 2
+==================
+-----BEGIN CERTIFICATE-----
+MIIFtzCCA5+gAwIBAgICBQkwDQYJKoZIhvcNAQEFBQAwRTELMAkGA1UEBhMCQk0xGTAXBgNVBAoT
+EFF1b1ZhZGlzIExpbWl0ZWQxGzAZBgNVBAMTElF1b1ZhZGlzIFJvb3QgQ0EgMjAeFw0wNjExMjQx
+ODI3MDBaFw0zMTExMjQxODIzMzNaMEUxCzAJBgNVBAYTAkJNMRkwFwYDVQQKExBRdW9WYWRpcyBM
+aW1pdGVkMRswGQYDVQQDExJRdW9WYWRpcyBSb290IENBIDIwggIiMA0GCSqGSIb3DQEBAQUAA4IC
+DwAwggIKAoICAQCaGMpLlA0ALa8DKYrwD4HIrkwZhR0In6spRIXzL4GtMh6QRr+jhiYaHv5+HBg6
+XJxgFyo6dIMzMH1hVBHL7avg5tKifvVrbxi3Cgst/ek+7wrGsxDp3MJGF/hd/aTa/55JWpzmM+Yk
+lvc/ulsrHHo1wtZn/qtmUIttKGAr79dgw8eTvI02kfN/+NsRE8Scd3bBrrcCaoF6qUWD4gXmuVbB
+lDePSHFjIuwXZQeVikvfj8ZaCuWw419eaxGrDPmF60Tp+ARz8un+XJiM9XOva7R+zdRcAitMOeGy
+lZUtQofX1bOQQ7dsE/He3fbE+Ik/0XX1ksOR1YqI0JDs3G3eicJlcZaLDQP9nL9bFqyS2+r+eXyt
+66/3FsvbzSUr5R/7mp/iUcw6UwxI5g69ybR2BlLmEROFcmMDBOAENisgGQLodKcftslWZvB1Jdxn
+wQ5hYIizPtGo/KPaHbDRsSNU30R2be1B2MGyIrZTHN81Hdyhdyox5C315eXbyOD/5YDXC2Og/zOh
+D7osFRXql7PSorW+8oyWHhqPHWykYTe5hnMz15eWniN9gqRMgeKh0bpnX5UHoycR7hYQe7xFSkyy
+BNKr79X9DFHOUGoIMfmR2gyPZFwDwzqLID9ujWc9Otb+fVuIyV77zGHcizN300QyNQliBJIWENie
+J0f7OyHj+OsdWwIDAQABo4GwMIGtMA8GA1UdEwEB/wQFMAMBAf8wCwYDVR0PBAQDAgEGMB0GA1Ud
+DgQWBBQahGK8SEwzJQTU7tD2A8QZRtGUazBuBgNVHSMEZzBlgBQahGK8SEwzJQTU7tD2A8QZRtGU
+a6FJpEcwRTELMAkGA1UEBhMCQk0xGTAXBgNVBAoTEFF1b1ZhZGlzIExpbWl0ZWQxGzAZBgNVBAMT
+ElF1b1ZhZGlzIFJvb3QgQ0EgMoICBQkwDQYJKoZIhvcNAQEFBQADggIBAD4KFk2fBluornFdLwUv
+Z+YTRYPENvbzwCYMDbVHZF34tHLJRqUDGCdViXh9duqWNIAXINzng/iN/Ae42l9NLmeyhP3ZRPx3
+UIHmfLTJDQtyU/h2BwdBR5YM++CCJpNVjP4iH2BlfF/nJrP3MpCYUNQ3cVX2kiF495V5+vgtJodm
+VjB3pjd4M1IQWK4/YY7yarHvGH5KWWPKjaJW1acvvFYfzznB4vsKqBUsfU16Y8Zsl0Q80m/DShcK
++JDSV6IZUaUtl0HaB0+pUNqQjZRG4T7wlP0QADj1O+hA4bRuVhogzG9Yje0uRY/W6ZM/57Es3zrW
+IozchLsib9D45MY56QSIPMO661V6bYCZJPVsAfv4l7CUW+v90m/xd2gNNWQjrLhVoQPRTUIZ3Ph1
+WVaj+ahJefivDrkRoHy3au000LYmYjgahwz46P0u05B/B5EqHdZ+XIWDmbA4CD/pXvk1B+TJYm5X
+f6dQlfe6yJvmjqIBxdZmv3lh8zwc4bmCXF2gw+nYSL0ZohEUGW6yhhtoPkg3Goi3XZZenMfvJ2II
+4pEZXNLxId26F0KCl3GBUzGpn/Z9Yr9y4aOTHcyKJloJONDO1w2AFrR4pTqHTI2KpdVGl/IsELm8
+VCLAAVBpQ570su9t+Oza8eOx79+Rj1QqCyXBJhnEUhAFZdWCEOrCMc0u
+-----END CERTIFICATE-----
+
+QuoVadis Root CA 3
+==================
+-----BEGIN CERTIFICATE-----
+MIIGnTCCBIWgAwIBAgICBcYwDQYJKoZIhvcNAQEFBQAwRTELMAkGA1UEBhMCQk0xGTAXBgNVBAoT
+EFF1b1ZhZGlzIExpbWl0ZWQxGzAZBgNVBAMTElF1b1ZhZGlzIFJvb3QgQ0EgMzAeFw0wNjExMjQx
+OTExMjNaFw0zMTExMjQxOTA2NDRaMEUxCzAJBgNVBAYTAkJNMRkwFwYDVQQKExBRdW9WYWRpcyBM
+aW1pdGVkMRswGQYDVQQDExJRdW9WYWRpcyBSb290IENBIDMwggIiMA0GCSqGSIb3DQEBAQUAA4IC
+DwAwggIKAoICAQDMV0IWVJzmmNPTTe7+7cefQzlKZbPoFog02w1ZkXTPkrgEQK0CSzGrvI2RaNgg
+DhoB4hp7Thdd4oq3P5kazethq8Jlph+3t723j/z9cI8LoGe+AaJZz3HmDyl2/7FWeUUrH556VOij
+KTVopAFPD6QuN+8bv+OPEKhyq1hX51SGyMnzW9os2l2ObjyjPtr7guXd8lyyBTNvijbO0BNO/79K
+DDRMpsMhvVAEVeuxu537RR5kFd5VAYwCdrXLoT9CabwvvWhDFlaJKjdhkf2mrk7AyxRllDdLkgbv
+BNDInIjbC3uBr7E9KsRlOni27tyAsdLTmZw67mtaa7ONt9XOnMK+pUsvFrGeaDsGb659n/je7Mwp
+p5ijJUMv7/FfJuGITfhebtfZFG4ZM2mnO4SJk8RTVROhUXhA+LjJou57ulJCg54U7QVSWllWp5f8
+nT8KKdjcT5EOE7zelaTfi5m+rJsziO+1ga8bxiJTyPbH7pcUsMV8eFLI8M5ud2CEpukqdiDtWAEX
+MJPpGovgc2PZapKUSU60rUqFxKMiMPwJ7Wgic6aIDFUhWMXhOp8q3crhkODZc6tsgLjoC2SToJyM
+Gf+z0gzskSaHirOi4XCPLArlzW1oUevaPwV/izLmE1xr/l9A4iLItLRkT9a6fUg+qGkM17uGcclz
+uD87nSVL2v9A6wIDAQABo4IBlTCCAZEwDwYDVR0TAQH/BAUwAwEB/zCB4QYDVR0gBIHZMIHWMIHT
+BgkrBgEEAb5YAAMwgcUwgZMGCCsGAQUFBwICMIGGGoGDQW55IHVzZSBvZiB0aGlzIENlcnRpZmlj
+YXRlIGNvbnN0aXR1dGVzIGFjY2VwdGFuY2Ugb2YgdGhlIFF1b1ZhZGlzIFJvb3QgQ0EgMyBDZXJ0
+aWZpY2F0ZSBQb2xpY3kgLyBDZXJ0aWZpY2F0aW9uIFByYWN0aWNlIFN0YXRlbWVudC4wLQYIKwYB
+BQUHAgEWIWh0dHA6Ly93d3cucXVvdmFkaXNnbG9iYWwuY29tL2NwczALBgNVHQ8EBAMCAQYwHQYD
+VR0OBBYEFPLAE+CCQz777i9nMpY1XNu4ywLQMG4GA1UdIwRnMGWAFPLAE+CCQz777i9nMpY1XNu4
+ywLQoUmkRzBFMQswCQYDVQQGEwJCTTEZMBcGA1UEChMQUXVvVmFkaXMgTGltaXRlZDEbMBkGA1UE
+AxMSUXVvVmFkaXMgUm9vdCBDQSAzggIFxjANBgkqhkiG9w0BAQUFAAOCAgEAT62gLEz6wPJv92ZV
+qyM07ucp2sNbtrCD2dDQ4iH782CnO11gUyeim/YIIirnv6By5ZwkajGxkHon24QRiSemd1o417+s
+hvzuXYO8BsbRd2sPbSQvS3pspweWyuOEn62Iix2rFo1bZhfZFvSLgNLd+LJ2w/w4E6oM3kJpK27z
+POuAJ9v1pkQNn1pVWQvVDVJIxa6f8i+AxeoyUDUSly7B4f/xI4hROJ/yZlZ25w9Rl6VSDE1JUZU2
+Pb+iSwwQHYaZTKrzchGT5Or2m9qoXadNt54CrnMAyNojA+j56hl0YgCUyyIgvpSnWbWCar6ZeXqp
+8kokUvd0/bpO5qgdAm6xDYBEwa7TIzdfu4V8K5Iu6H6li92Z4b8nby1dqnuH/grdS/yO9SbkbnBC
+bjPsMZ57k8HkyWkaPcBrTiJt7qtYTcbQQcEr6k8Sh17rRdhs9ZgC06DYVYoGmRmioHfRMJ6szHXu
+g/WwYjnPbFfiTNKRCw51KBuav/0aQ/HKd/s7j2G4aSgWQgRecCocIdiP4b0jWy10QJLZYxkNc91p
+vGJHvOB0K7Lrfb5BG7XARsWhIstfTsEokt4YutUqKLsRixeTmJlglFwjz1onl14LBQaTNx47aTbr
+qZ5hHY8y2o4M1nQ+ewkk2gF3R8Q7zTSMmfXK4SVhM7JZG+Ju1zdXtg2pEto=
+-----END CERTIFICATE-----
+
+Security Communication Root CA
+==============================
+-----BEGIN CERTIFICATE-----
+MIIDWjCCAkKgAwIBAgIBADANBgkqhkiG9w0BAQUFADBQMQswCQYDVQQGEwJKUDEYMBYGA1UEChMP
+U0VDT00gVHJ1c3QubmV0MScwJQYDVQQLEx5TZWN1cml0eSBDb21tdW5pY2F0aW9uIFJvb3RDQTEw
+HhcNMDMwOTMwMDQyMDQ5WhcNMjMwOTMwMDQyMDQ5WjBQMQswCQYDVQQGEwJKUDEYMBYGA1UEChMP
+U0VDT00gVHJ1c3QubmV0MScwJQYDVQQLEx5TZWN1cml0eSBDb21tdW5pY2F0aW9uIFJvb3RDQTEw
+ggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQCzs/5/022x7xZ8V6UMbXaKL0u/ZPtM7orw
+8yl89f/uKuDp6bpbZCKamm8sOiZpUQWZJtzVHGpxxpp9Hp3dfGzGjGdnSj74cbAZJ6kJDKaVv0uM
+DPpVmDvY6CKhS3E4eayXkmmziX7qIWgGmBSWh9JhNrxtJ1aeV+7AwFb9Ms+k2Y7CI9eNqPPYJayX
+5HA49LY6tJ07lyZDo6G8SVlyTCMwhwFY9k6+HGhWZq/NQV3Is00qVUarH9oe4kA92819uZKAnDfd
+DJZkndwi92SL32HeFZRSFaB9UslLqCHJxrHty8OVYNEP8Ktw+N/LTX7s1vqr2b1/VPKl6Xn62dZ2
+JChzAgMBAAGjPzA9MB0GA1UdDgQWBBSgc0mZaNyFW2XjmygvV5+9M7wHSDALBgNVHQ8EBAMCAQYw
+DwYDVR0TAQH/BAUwAwEB/zANBgkqhkiG9w0BAQUFAAOCAQEAaECpqLvkT115swW1F7NgE+vGkl3g
+0dNq/vu+m22/xwVtWSDEHPC32oRYAmP6SBbvT6UL90qY8j+eG61Ha2POCEfrUj94nK9NrvjVT8+a
+mCoQQTlSxN3Zmw7vkwGusi7KaEIkQmywszo+zenaSMQVy+n5Bw+SUEmK3TGXX8npN6o7WWWXlDLJ
+s58+OmJYxUmtYg5xpTKqL8aJdkNAExNnPaJUJRDL8Try2frbSVa7pv6nQTXD4IhhyYjH3zYQIphZ
+6rBK+1YWc26sTfcioU+tHXotRSflMMFe8toTyyVCUZVHA4xsIcx0Qu1T/zOLjw9XARYvz6buyXAi
+FL39vmwLAw==
+-----END CERTIFICATE-----
+
+Sonera Class 2 Root CA
+======================
+-----BEGIN CERTIFICATE-----
+MIIDIDCCAgigAwIBAgIBHTANBgkqhkiG9w0BAQUFADA5MQswCQYDVQQGEwJGSTEPMA0GA1UEChMG
+U29uZXJhMRkwFwYDVQQDExBTb25lcmEgQ2xhc3MyIENBMB4XDTAxMDQwNjA3Mjk0MFoXDTIxMDQw
+NjA3Mjk0MFowOTELMAkGA1UEBhMCRkkxDzANBgNVBAoTBlNvbmVyYTEZMBcGA1UEAxMQU29uZXJh
+IENsYXNzMiBDQTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAJAXSjWdyvANlsdE+hY3
+/Ei9vX+ALTU74W+oZ6m/AxxNjG8yR9VBaKQTBME1DJqEQ/xcHf+Js+gXGM2RX/uJ4+q/Tl18GybT
+dXnt5oTjV+WtKcT0OijnpXuENmmz/V52vaMtmdOQTiMofRhj8VQ7Jp12W5dCsv+u8E7s3TmVToMG
+f+dJQMjFAbJUWmYdPfz56TwKnoG4cPABi+QjVHzIrviQHgCWctRUz2EjvOr7nQKV0ba5cTppCD8P
+tOFCx4j1P5iop7oc4HFx71hXgVB6XGt0Rg6DA5jDjqhu8nYybieDwnPz3BjotJPqdURrBGAgcVeH
+nfO+oJAjPYok4doh28MCAwEAAaMzMDEwDwYDVR0TAQH/BAUwAwEB/zARBgNVHQ4ECgQISqCqWITT
+XjwwCwYDVR0PBAQDAgEGMA0GCSqGSIb3DQEBBQUAA4IBAQBazof5FnIVV0sd2ZvnoiYw7JNn39Yt
+0jSv9zilzqsWuasvfDXLrNAPtEwr/IDva4yRXzZ299uzGxnq9LIR/WFxRL8oszodv7ND6J+/3DEI
+cbCdjdY0RzKQxmUk96BKfARzjzlvF4xytb1LyHr4e4PDKE6cCepnP7JnBBvDFNr450kkkdAdavph
+Oe9r5yF1BgfYErQhIHBCcYHaPJo2vqZbDWpsmh+Re/n570K6Tk6ezAyNlNzZRZxe7EJQY670XcSx
+EtzKO6gunRRaBXW37Ndj4ro1tgQIkejanZz2ZrUYrAqmVCY0M9IbwdR/GjqOC6oybtv8TyWf2TLH
+llpwrN9M
+-----END CERTIFICATE-----
+
+Camerfirma Chambers of Commerce Root
+====================================
+-----BEGIN CERTIFICATE-----
+MIIEvTCCA6WgAwIBAgIBADANBgkqhkiG9w0BAQUFADB/MQswCQYDVQQGEwJFVTEnMCUGA1UEChMe
+QUMgQ2FtZXJmaXJtYSBTQSBDSUYgQTgyNzQzMjg3MSMwIQYDVQQLExpodHRwOi8vd3d3LmNoYW1i
+ZXJzaWduLm9yZzEiMCAGA1UEAxMZQ2hhbWJlcnMgb2YgQ29tbWVyY2UgUm9vdDAeFw0wMzA5MzAx
+NjEzNDNaFw0zNzA5MzAxNjEzNDRaMH8xCzAJBgNVBAYTAkVVMScwJQYDVQQKEx5BQyBDYW1lcmZp
+cm1hIFNBIENJRiBBODI3NDMyODcxIzAhBgNVBAsTGmh0dHA6Ly93d3cuY2hhbWJlcnNpZ24ub3Jn
+MSIwIAYDVQQDExlDaGFtYmVycyBvZiBDb21tZXJjZSBSb290MIIBIDANBgkqhkiG9w0BAQEFAAOC
+AQ0AMIIBCAKCAQEAtzZV5aVdGDDg2olUkfzIx1L4L1DZ77F1c2VHfRtbunXF/KGIJPov7coISjlU
+xFF6tdpg6jg8gbLL8bvZkSM/SAFwdakFKq0fcfPJVD0dBmpAPrMMhe5cG3nCYsS4No41XQEMIwRH
+NaqbYE6gZj3LJgqcQKH0XZi/caulAGgq7YN6D6IUtdQis4CwPAxaUWktWBiP7Zme8a7ileb2R6jW
+DA+wWFjbw2Y3npuRVDM30pQcakjJyfKl2qUMI/cjDpwyVV5xnIQFUZot/eZOKjRa3spAN2cMVCFV
+d9oKDMyXroDclDZK9D7ONhMeU+SsTjoF7Nuucpw4i9A5O4kKPnf+dQIBA6OCAUQwggFAMBIGA1Ud
+EwEB/wQIMAYBAf8CAQwwPAYDVR0fBDUwMzAxoC+gLYYraHR0cDovL2NybC5jaGFtYmVyc2lnbi5v
+cmcvY2hhbWJlcnNyb290LmNybDAdBgNVHQ4EFgQU45T1sU3p26EpW1eLTXYGduHRooowDgYDVR0P
+AQH/BAQDAgEGMBEGCWCGSAGG+EIBAQQEAwIABzAnBgNVHREEIDAegRxjaGFtYmVyc3Jvb3RAY2hh
+bWJlcnNpZ24ub3JnMCcGA1UdEgQgMB6BHGNoYW1iZXJzcm9vdEBjaGFtYmVyc2lnbi5vcmcwWAYD
+VR0gBFEwTzBNBgsrBgEEAYGHLgoDATA+MDwGCCsGAQUFBwIBFjBodHRwOi8vY3BzLmNoYW1iZXJz
+aWduLm9yZy9jcHMvY2hhbWJlcnNyb290Lmh0bWwwDQYJKoZIhvcNAQEFBQADggEBAAxBl8IahsAi
+fJ/7kPMa0QOx7xP5IV8EnNrJpY0nbJaHkb5BkAFyk+cefV/2icZdp0AJPaxJRUXcLo0waLIJuvvD
+L8y6C98/d3tGfToSJI6WjzwFCm/SlCgdbQzALogi1djPHRPH8EjX1wWnz8dHnjs8NMiAT9QUu/wN
+UPf6s+xCX6ndbcj0dc97wXImsQEcXCz9ek60AcUFV7nnPKoF2YjpB0ZBzu9Bga5Y34OirsrXdx/n
+ADydb47kMgkdTXg0eDQ8lJsm7U9xxhl6vSAiSFr+S30Dt+dYvsYyTnQeaN2oaFuzPu5ifdmA6Ap1
+erfutGWaIZDgqtCYvDi1czyL+Nw=
+-----END CERTIFICATE-----
+
+Camerfirma Global Chambersign Root
+==================================
+-----BEGIN CERTIFICATE-----
+MIIExTCCA62gAwIBAgIBADANBgkqhkiG9w0BAQUFADB9MQswCQYDVQQGEwJFVTEnMCUGA1UEChMe
+QUMgQ2FtZXJmaXJtYSBTQSBDSUYgQTgyNzQzMjg3MSMwIQYDVQQLExpodHRwOi8vd3d3LmNoYW1i
+ZXJzaWduLm9yZzEgMB4GA1UEAxMXR2xvYmFsIENoYW1iZXJzaWduIFJvb3QwHhcNMDMwOTMwMTYx
+NDE4WhcNMzcwOTMwMTYxNDE4WjB9MQswCQYDVQQGEwJFVTEnMCUGA1UEChMeQUMgQ2FtZXJmaXJt
+YSBTQSBDSUYgQTgyNzQzMjg3MSMwIQYDVQQLExpodHRwOi8vd3d3LmNoYW1iZXJzaWduLm9yZzEg
+MB4GA1UEAxMXR2xvYmFsIENoYW1iZXJzaWduIFJvb3QwggEgMA0GCSqGSIb3DQEBAQUAA4IBDQAw
+ggEIAoIBAQCicKLQn0KuWxfH2H3PFIP8T8mhtxOviteePgQKkotgVvq0Mi+ITaFgCPS3CU6gSS9J
+1tPfnZdan5QEcOw/Wdm3zGaLmFIoCQLfxS+EjXqXd7/sQJ0lcqu1PzKY+7e3/HKE5TWH+VX6ox8O
+by4o3Wmg2UIQxvi1RMLQQ3/bvOSiPGpVeAp3qdjqGTK3L/5cPxvusZjsyq16aUXjlg9V9ubtdepl
+6DJWk0aJqCWKZQbua795B9Dxt6/tLE2Su8CoX6dnfQTyFQhwrJLWfQTSM/tMtgsL+xrJxI0DqX5c
+8lCrEqWhz0hQpe/SyBoT+rB/sYIcd2oPX9wLlY/vQ37mRQklAgEDo4IBUDCCAUwwEgYDVR0TAQH/
+BAgwBgEB/wIBDDA/BgNVHR8EODA2MDSgMqAwhi5odHRwOi8vY3JsLmNoYW1iZXJzaWduLm9yZy9j
+aGFtYmVyc2lnbnJvb3QuY3JsMB0GA1UdDgQWBBRDnDafsJ4wTcbOX60Qq+UDpfqpFDAOBgNVHQ8B
+Af8EBAMCAQYwEQYJYIZIAYb4QgEBBAQDAgAHMCoGA1UdEQQjMCGBH2NoYW1iZXJzaWducm9vdEBj
+aGFtYmVyc2lnbi5vcmcwKgYDVR0SBCMwIYEfY2hhbWJlcnNpZ25yb290QGNoYW1iZXJzaWduLm9y
+ZzBbBgNVHSAEVDBSMFAGCysGAQQBgYcuCgEBMEEwPwYIKwYBBQUHAgEWM2h0dHA6Ly9jcHMuY2hh
+bWJlcnNpZ24ub3JnL2Nwcy9jaGFtYmVyc2lnbnJvb3QuaHRtbDANBgkqhkiG9w0BAQUFAAOCAQEA
+PDtwkfkEVCeR4e3t/mh/YV3lQWVPMvEYBZRqHN4fcNs+ezICNLUMbKGKfKX0j//U2K0X1S0E0T9Y
+gOKBWYi+wONGkyT+kL0mojAt6JcmVzWJdJYY9hXiryQZVgICsroPFOrGimbBhkVVi76SvpykBMdJ
+PJ7oKXqJ1/6v/2j1pReQvayZzKWGVwlnRtvWFsJG8eSpUPWP0ZIV018+xgBJOm5YstHRJw0lyDL4
+IBHNfTIzSJRUTN3cecQwn+uOuFW114hcxWokPbLTBQNRxgfvzBRydD1ucs4YKIxKoHflCStFREes
+t2d/AYoFWpO+ocH/+OcOZ6RHSXZddZAa9SaP8A==
+-----END CERTIFICATE-----
+
+XRamp Global CA Root
+====================
+-----BEGIN CERTIFICATE-----
+MIIEMDCCAxigAwIBAgIQUJRs7Bjq1ZxN1ZfvdY+grTANBgkqhkiG9w0BAQUFADCBgjELMAkGA1UE
+BhMCVVMxHjAcBgNVBAsTFXd3dy54cmFtcHNlY3VyaXR5LmNvbTEkMCIGA1UEChMbWFJhbXAgU2Vj
+dXJpdHkgU2VydmljZXMgSW5jMS0wKwYDVQQDEyRYUmFtcCBHbG9iYWwgQ2VydGlmaWNhdGlvbiBB
+dXRob3JpdHkwHhcNMDQxMTAxMTcxNDA0WhcNMzUwMTAxMDUzNzE5WjCBgjELMAkGA1UEBhMCVVMx
+HjAcBgNVBAsTFXd3dy54cmFtcHNlY3VyaXR5LmNvbTEkMCIGA1UEChMbWFJhbXAgU2VjdXJpdHkg
+U2VydmljZXMgSW5jMS0wKwYDVQQDEyRYUmFtcCBHbG9iYWwgQ2VydGlmaWNhdGlvbiBBdXRob3Jp
+dHkwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQCYJB69FbS638eMpSe2OAtp87ZOqCwu
+IR1cRN8hXX4jdP5efrRKt6atH67gBhbim1vZZ3RrXYCPKZ2GG9mcDZhtdhAoWORlsH9KmHmf4MMx
+foArtYzAQDsRhtDLooY2YKTVMIJt2W7QDxIEM5dfT2Fa8OT5kavnHTu86M/0ay00fOJIYRyO82FE
+zG+gSqmUsE3a56k0enI4qEHMPJQRfevIpoy3hsvKMzvZPTeL+3o+hiznc9cKV6xkmxnr9A8ECIqs
+AxcZZPRaJSKNNCyy9mgdEm3Tih4U2sSPpuIjhdV6Db1q4Ons7Be7QhtnqiXtRYMh/MHJfNViPvry
+xS3T/dRlAgMBAAGjgZ8wgZwwEwYJKwYBBAGCNxQCBAYeBABDAEEwCwYDVR0PBAQDAgGGMA8GA1Ud
+EwEB/wQFMAMBAf8wHQYDVR0OBBYEFMZPoj0GY4QJnM5i5ASsjVy16bYbMDYGA1UdHwQvMC0wK6Ap
+oCeGJWh0dHA6Ly9jcmwueHJhbXBzZWN1cml0eS5jb20vWEdDQS5jcmwwEAYJKwYBBAGCNxUBBAMC
+AQEwDQYJKoZIhvcNAQEFBQADggEBAJEVOQMBG2f7Shz5CmBbodpNl2L5JFMn14JkTpAuw0kbK5rc
+/Kh4ZzXxHfARvbdI4xD2Dd8/0sm2qlWkSLoC295ZLhVbO50WfUfXN+pfTXYSNrsf16GBBEYgoyxt
+qZ4Bfj8pzgCT3/3JknOJiWSe5yvkHJEs0rnOfc5vMZnT5r7SHpDwCRR5XCOrTdLaIR9NmXmd4c8n
+nxCbHIgNsIpkQTG4DmyQJKSbXHGPurt+HBvbaoAPIbzp26a3QPSyi6mx5O+aGtA9aZnuqCij4Tyz
+8LIRnM98QObd50N9otg6tamN8jSZxNQQ4Qb9CYQQO+7ETPTsJ3xCwnR8gooJybQDJbw=
+-----END CERTIFICATE-----
+
+Go Daddy Class 2 CA
+===================
+-----BEGIN CERTIFICATE-----
+MIIEADCCAuigAwIBAgIBADANBgkqhkiG9w0BAQUFADBjMQswCQYDVQQGEwJVUzEhMB8GA1UEChMY
+VGhlIEdvIERhZGR5IEdyb3VwLCBJbmMuMTEwLwYDVQQLEyhHbyBEYWRkeSBDbGFzcyAyIENlcnRp
+ZmljYXRpb24gQXV0aG9yaXR5MB4XDTA0MDYyOTE3MDYyMFoXDTM0MDYyOTE3MDYyMFowYzELMAkG
+A1UEBhMCVVMxITAfBgNVBAoTGFRoZSBHbyBEYWRkeSBHcm91cCwgSW5jLjExMC8GA1UECxMoR28g
+RGFkZHkgQ2xhc3MgMiBDZXJ0aWZpY2F0aW9uIEF1dGhvcml0eTCCASAwDQYJKoZIhvcNAQEBBQAD
+ggENADCCAQgCggEBAN6d1+pXGEmhW+vXX0iG6r7d/+TvZxz0ZWizV3GgXne77ZtJ6XCAPVYYYwhv
+2vLM0D9/AlQiVBDYsoHUwHU9S3/Hd8M+eKsaA7Ugay9qK7HFiH7Eux6wwdhFJ2+qN1j3hybX2C32
+qRe3H3I2TqYXP2WYktsqbl2i/ojgC95/5Y0V4evLOtXiEqITLdiOr18SPaAIBQi2XKVlOARFmR6j
+YGB0xUGlcmIbYsUfb18aQr4CUWWoriMYavx4A6lNf4DD+qta/KFApMoZFv6yyO9ecw3ud72a9nmY
+vLEHZ6IVDd2gWMZEewo+YihfukEHU1jPEX44dMX4/7VpkI+EdOqXG68CAQOjgcAwgb0wHQYDVR0O
+BBYEFNLEsNKR1EwRcbNhyz2h/t2oatTjMIGNBgNVHSMEgYUwgYKAFNLEsNKR1EwRcbNhyz2h/t2o
+atTjoWekZTBjMQswCQYDVQQGEwJVUzEhMB8GA1UEChMYVGhlIEdvIERhZGR5IEdyb3VwLCBJbmMu
+MTEwLwYDVQQLEyhHbyBEYWRkeSBDbGFzcyAyIENlcnRpZmljYXRpb24gQXV0aG9yaXR5ggEAMAwG
+A1UdEwQFMAMBAf8wDQYJKoZIhvcNAQEFBQADggEBADJL87LKPpH8EsahB4yOd6AzBhRckB4Y9wim
+PQoZ+YeAEW5p5JYXMP80kWNyOO7MHAGjHZQopDH2esRU1/blMVgDoszOYtuURXO1v0XJJLXVggKt
+I3lpjbi2Tc7PTMozI+gciKqdi0FuFskg5YmezTvacPd+mSYgFFQlq25zheabIZ0KbIIOqPjCDPoQ
+HmyW74cNxA9hi63ugyuV+I6ShHI56yDqg+2DzZduCLzrTia2cyvk0/ZM/iZx4mERdEr/VxqHD3VI
+Ls9RaRegAhJhldXRQLIQTO7ErBBDpqWeCtWVYpoNz4iCxTIM5CufReYNnyicsbkqWletNw+vHX/b
+vZ8=
+-----END CERTIFICATE-----
+
+Starfield Class 2 CA
+====================
+-----BEGIN CERTIFICATE-----
+MIIEDzCCAvegAwIBAgIBADANBgkqhkiG9w0BAQUFADBoMQswCQYDVQQGEwJVUzElMCMGA1UEChMc
+U3RhcmZpZWxkIFRlY2hub2xvZ2llcywgSW5jLjEyMDAGA1UECxMpU3RhcmZpZWxkIENsYXNzIDIg
+Q2VydGlmaWNhdGlvbiBBdXRob3JpdHkwHhcNMDQwNjI5MTczOTE2WhcNMzQwNjI5MTczOTE2WjBo
+MQswCQYDVQQGEwJVUzElMCMGA1UEChMcU3RhcmZpZWxkIFRlY2hub2xvZ2llcywgSW5jLjEyMDAG
+A1UECxMpU3RhcmZpZWxkIENsYXNzIDIgQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkwggEgMA0GCSqG
+SIb3DQEBAQUAA4IBDQAwggEIAoIBAQC3Msj+6XGmBIWtDBFk385N78gDGIc/oav7PKaf8MOh2tTY
+bitTkPskpD6E8J7oX+zlJ0T1KKY/e97gKvDIr1MvnsoFAZMej2YcOadN+lq2cwQlZut3f+dZxkqZ
+JRRU6ybH838Z1TBwj6+wRir/resp7defqgSHo9T5iaU0X9tDkYI22WY8sbi5gv2cOj4QyDvvBmVm
+epsZGD3/cVE8MC5fvj13c7JdBmzDI1aaK4UmkhynArPkPw2vCHmCuDY96pzTNbO8acr1zJ3o/WSN
+F4Azbl5KXZnJHoe0nRrA1W4TNSNe35tfPe/W93bC6j67eA0cQmdrBNj41tpvi/JEoAGrAgEDo4HF
+MIHCMB0GA1UdDgQWBBS/X7fRzt0fhvRbVazc1xDCDqmI5zCBkgYDVR0jBIGKMIGHgBS/X7fRzt0f
+hvRbVazc1xDCDqmI56FspGowaDELMAkGA1UEBhMCVVMxJTAjBgNVBAoTHFN0YXJmaWVsZCBUZWNo
+bm9sb2dpZXMsIEluYy4xMjAwBgNVBAsTKVN0YXJmaWVsZCBDbGFzcyAyIENlcnRpZmljYXRpb24g
+QXV0aG9yaXR5ggEAMAwGA1UdEwQFMAMBAf8wDQYJKoZIhvcNAQEFBQADggEBAAWdP4id0ckaVaGs
+afPzWdqbAYcaT1epoXkJKtv3L7IezMdeatiDh6GX70k1PncGQVhiv45YuApnP+yz3SFmH8lU+nLM
+PUxA2IGvd56Deruix/U0F47ZEUD0/CwqTRV/p2JdLiXTAAsgGh1o+Re49L2L7ShZ3U0WixeDyLJl
+xy16paq8U4Zt3VekyvggQQto8PT7dL5WXXp59fkdheMtlb71cZBDzI0fmgAKhynpVSJYACPq4xJD
+KVtHCN2MQWplBqjlIapBtJUhlbl90TSrE9atvNziPTnNvT51cKEYWQPJIrSPnNVeKtelttQKbfi3
+QBFGmh95DmK/D5fs4C8fF5Q=
+-----END CERTIFICATE-----
+
+StartCom Certification Authority
+================================
+-----BEGIN CERTIFICATE-----
+MIIHyTCCBbGgAwIBAgIBATANBgkqhkiG9w0BAQUFADB9MQswCQYDVQQGEwJJTDEWMBQGA1UEChMN
+U3RhcnRDb20gTHRkLjErMCkGA1UECxMiU2VjdXJlIERpZ2l0YWwgQ2VydGlmaWNhdGUgU2lnbmlu
+ZzEpMCcGA1UEAxMgU3RhcnRDb20gQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkwHhcNMDYwOTE3MTk0
+NjM2WhcNMzYwOTE3MTk0NjM2WjB9MQswCQYDVQQGEwJJTDEWMBQGA1UEChMNU3RhcnRDb20gTHRk
+LjErMCkGA1UECxMiU2VjdXJlIERpZ2l0YWwgQ2VydGlmaWNhdGUgU2lnbmluZzEpMCcGA1UEAxMg
+U3RhcnRDb20gQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAw
+ggIKAoICAQDBiNsJvGxGfHiflXu1M5DycmLWwTYgIiRezul38kMKogZkpMyONvg45iPwbm2xPN1y
+o4UcodM9tDMr0y+v/uqwQVlntsQGfQqedIXWeUyAN3rfOQVSWff0G0ZDpNKFhdLDcfN1YjS6LIp/
+Ho/u7TTQEceWzVI9ujPW3U3eCztKS5/CJi/6tRYccjV3yjxd5srhJosaNnZcAdt0FCX+7bWgiA/d
+eMotHweXMAEtcnn6RtYTKqi5pquDSR3l8u/d5AGOGAqPY1MWhWKpDhk6zLVmpsJrdAfkK+F2PrRt
+2PZE4XNiHzvEvqBTViVsUQn3qqvKv3b9bZvzndu/PWa8DFaqr5hIlTpL36dYUNk4dalb6kMMAv+Z
+6+hsTXBbKWWc3apdzK8BMewM69KN6Oqce+Zu9ydmDBpI125C4z/eIT574Q1w+2OqqGwaVLRcJXrJ
+osmLFqa7LH4XXgVNWG4SHQHuEhANxjJ/GP/89PrNbpHoNkm+Gkhpi8KWTRoSsmkXwQqQ1vp5Iki/
+untp+HDH+no32NgN0nZPV/+Qt+OR0t3vwmC3Zzrd/qqc8NSLf3Iizsafl7b4r4qgEKjZ+xjGtrVc
+UjyJthkqcwEKDwOzEmDyei+B26Nu/yYwl/WL3YlXtq09s68rxbd2AvCl1iuahhQqcvbjM4xdCUsT
+37uMdBNSSwIDAQABo4ICUjCCAk4wDAYDVR0TBAUwAwEB/zALBgNVHQ8EBAMCAa4wHQYDVR0OBBYE
+FE4L7xqkQFulF2mHMMo0aEPQQa7yMGQGA1UdHwRdMFswLKAqoCiGJmh0dHA6Ly9jZXJ0LnN0YXJ0
+Y29tLm9yZy9zZnNjYS1jcmwuY3JsMCugKaAnhiVodHRwOi8vY3JsLnN0YXJ0Y29tLm9yZy9zZnNj
+YS1jcmwuY3JsMIIBXQYDVR0gBIIBVDCCAVAwggFMBgsrBgEEAYG1NwEBATCCATswLwYIKwYBBQUH
+AgEWI2h0dHA6Ly9jZXJ0LnN0YXJ0Y29tLm9yZy9wb2xpY3kucGRmMDUGCCsGAQUFBwIBFilodHRw
+Oi8vY2VydC5zdGFydGNvbS5vcmcvaW50ZXJtZWRpYXRlLnBkZjCB0AYIKwYBBQUHAgIwgcMwJxYg
+U3RhcnQgQ29tbWVyY2lhbCAoU3RhcnRDb20pIEx0ZC4wAwIBARqBl0xpbWl0ZWQgTGlhYmlsaXR5
+LCByZWFkIHRoZSBzZWN0aW9uICpMZWdhbCBMaW1pdGF0aW9ucyogb2YgdGhlIFN0YXJ0Q29tIENl
+cnRpZmljYXRpb24gQXV0aG9yaXR5IFBvbGljeSBhdmFpbGFibGUgYXQgaHR0cDovL2NlcnQuc3Rh
+cnRjb20ub3JnL3BvbGljeS5wZGYwEQYJYIZIAYb4QgEBBAQDAgAHMDgGCWCGSAGG+EIBDQQrFilT
+dGFydENvbSBGcmVlIFNTTCBDZXJ0aWZpY2F0aW9uIEF1dGhvcml0eTANBgkqhkiG9w0BAQUFAAOC
+AgEAFmyZ9GYMNPXQhV59CuzaEE44HF7fpiUFS5Eyweg78T3dRAlbB0mKKctmArexmvclmAk8jhvh
+3TaHK0u7aNM5Zj2gJsfyOZEdUauCe37Vzlrk4gNXcGmXCPleWKYK34wGmkUWFjgKXlf2Ysd6AgXm
+vB618p70qSmD+LIU424oh0TDkBreOKk8rENNZEXO3SipXPJzewT4F+irsfMuXGRuczE6Eri8sxHk
+fY+BUZo7jYn0TZNmezwD7dOaHZrzZVD1oNB1ny+v8OqCQ5j4aZyJecRDjkZy42Q2Eq/3JR44iZB3
+fsNrarnDy0RLrHiQi+fHLB5LEUTINFInzQpdn4XBidUaePKVEFMy3YCEZnXZtWgo+2EuvoSoOMCZ
+EoalHmdkrQYuL6lwhceWD3yJZfWOQ1QOq92lgDmUYMA0yZZwLKMS9R9Ie70cfmu3nZD0Ijuu+Pwq
+yvqCUqDvr0tVk+vBtfAii6w0TiYiBKGHLHVKt+V9E9e4DGTANtLJL4YSjCMJwRuCO3NJo2pXh5Tl
+1njFmUNj403gdy3hZZlyaQQaRwnmDwFWJPsfvw55qVguucQJAX6Vum0ABj6y6koQOdjQK/W/7HW/
+lwLFCRsI3FU34oH7N4RDYiDK51ZLZer+bMEkkyShNOsF/5oirpt9P/FlUQqmMGqz9IgcgA38coro
+g14=
+-----END CERTIFICATE-----
+
+Taiwan GRCA
+===========
+-----BEGIN CERTIFICATE-----
+MIIFcjCCA1qgAwIBAgIQH51ZWtcvwgZEpYAIaeNe9jANBgkqhkiG9w0BAQUFADA/MQswCQYDVQQG
+EwJUVzEwMC4GA1UECgwnR292ZXJubWVudCBSb290IENlcnRpZmljYXRpb24gQXV0aG9yaXR5MB4X
+DTAyMTIwNTEzMjMzM1oXDTMyMTIwNTEzMjMzM1owPzELMAkGA1UEBhMCVFcxMDAuBgNVBAoMJ0dv
+dmVybm1lbnQgUm9vdCBDZXJ0aWZpY2F0aW9uIEF1dGhvcml0eTCCAiIwDQYJKoZIhvcNAQEBBQAD
+ggIPADCCAgoCggIBAJoluOzMonWoe/fOW1mKydGGEghU7Jzy50b2iPN86aXfTEc2pBsBHH8eV4qN
+w8XRIePaJD9IK/ufLqGU5ywck9G/GwGHU5nOp/UKIXZ3/6m3xnOUT0b3EEk3+qhZSV1qgQdW8or5
+BtD3cCJNtLdBuTK4sfCxw5w/cP1T3YGq2GN49thTbqGsaoQkclSGxtKyyhwOeYHWtXBiCAEuTk8O
+1RGvqa/lmr/czIdtJuTJV6L7lvnM4T9TjGxMfptTCAtsF/tnyMKtsc2AtJfcdgEWFelq16TheEfO
+htX7MfP6Mb40qij7cEwdScevLJ1tZqa2jWR+tSBqnTuBto9AAGdLiYa4zGX+FVPpBMHWXx1E1wov
+J5pGfaENda1UhhXcSTvxls4Pm6Dso3pdvtUqdULle96ltqqvKKyskKw4t9VoNSZ63Pc78/1Fm9G7
+Q3hub/FCVGqY8A2tl+lSXunVanLeavcbYBT0peS2cWeqH+riTcFCQP5nRhc4L0c/cZyu5SHKYS1t
+B6iEfC3uUSXxY5Ce/eFXiGvviiNtsea9P63RPZYLhY3Naye7twWb7LuRqQoHEgKXTiCQ8P8NHuJB
+O9NAOueNXdpm5AKwB1KYXA6OM5zCppX7VRluTI6uSw+9wThNXo+EHWbNxWCWtFJaBYmOlXqYwZE8
+lSOyDvR5tMl8wUohAgMBAAGjajBoMB0GA1UdDgQWBBTMzO/MKWCkO7GStjz6MmKPrCUVOzAMBgNV
+HRMEBTADAQH/MDkGBGcqBwAEMTAvMC0CAQAwCQYFKw4DAhoFADAHBgVnKgMAAAQUA5vwIhP/lSg2
+09yewDL7MTqKUWUwDQYJKoZIhvcNAQEFBQADggIBAECASvomyc5eMN1PhnR2WPWus4MzeKR6dBcZ
+TulStbngCnRiqmjKeKBMmo4sIy7VahIkv9Ro04rQ2JyftB8M3jh+Vzj8jeJPXgyfqzvS/3WXy6Tj
+Zwj/5cAWtUgBfen5Cv8b5Wppv3ghqMKnI6mGq3ZW6A4M9hPdKmaKZEk9GhiHkASfQlK3T8v+R0F2
+Ne//AHY2RTKbxkaFXeIksB7jSJaYV0eUVXoPQbFEJPPB/hprv4j9wabak2BegUqZIJxIZhm1AHlU
+D7gsL0u8qV1bYH+Mh6XgUmMqvtg7hUAV/h62ZT/FS9p+tXo1KaMuephgIqP0fSdOLeq0dDzpD6Qz
+DxARvBMB1uUO07+1EqLhRSPAzAhuYbeJq4PjJB7mXQfnHyA+z2fI56wwbSdLaG5LKlwCCDTb+Hbk
+Z6MmnD+iMsJKxYEYMRBWqoTvLQr/uB930r+lWKBi5NdLkXWNiYCYfm3LU05er/ayl4WXudpVBrkk
+7tfGOB5jGxI7leFYrPLfhNVfmS8NVVvmONsuP3LpSIXLuykTjx44VbnzssQwmSNOXfJIoRIM3BKQ
+CZBUkQM8R+XVyWXgt0t97EfTsws+rZ7QdAAO671RrcDeLMDDav7v3Aun+kbfYNucpllQdSNpc5Oy
++fwC00fmcc4QAu4njIT/rEUNE1yDMuAlpYYsfPQS
+-----END CERTIFICATE-----
+
+DigiCert Assured ID Root CA
+===========================
+-----BEGIN CERTIFICATE-----
+MIIDtzCCAp+gAwIBAgIQDOfg5RfYRv6P5WD8G/AwOTANBgkqhkiG9w0BAQUFADBlMQswCQYDVQQG
+EwJVUzEVMBMGA1UEChMMRGlnaUNlcnQgSW5jMRkwFwYDVQQLExB3d3cuZGlnaWNlcnQuY29tMSQw
+IgYDVQQDExtEaWdpQ2VydCBBc3N1cmVkIElEIFJvb3QgQ0EwHhcNMDYxMTEwMDAwMDAwWhcNMzEx
+MTEwMDAwMDAwWjBlMQswCQYDVQQGEwJVUzEVMBMGA1UEChMMRGlnaUNlcnQgSW5jMRkwFwYDVQQL
+ExB3d3cuZGlnaWNlcnQuY29tMSQwIgYDVQQDExtEaWdpQ2VydCBBc3N1cmVkIElEIFJvb3QgQ0Ew
+ggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQCtDhXO5EOAXLGH87dg+XESpa7cJpSIqvTO
+9SA5KFhgDPiA2qkVlTJhPLWxKISKityfCgyDF3qPkKyK53lTXDGEKvYPmDI2dsze3Tyoou9q+yHy
+UmHfnyDXH+Kx2f4YZNISW1/5WBg1vEfNoTb5a3/UsDg+wRvDjDPZ2C8Y/igPs6eD1sNuRMBhNZYW
+/lmci3Zt1/GiSw0r/wty2p5g0I6QNcZ4VYcgoc/lbQrISXwxmDNsIumH0DJaoroTghHtORedmTpy
+oeb6pNnVFzF1roV9Iq4/AUaG9ih5yLHa5FcXxH4cDrC0kqZWs72yl+2qp/C3xag/lRbQ/6GW6whf
+GHdPAgMBAAGjYzBhMA4GA1UdDwEB/wQEAwIBhjAPBgNVHRMBAf8EBTADAQH/MB0GA1UdDgQWBBRF
+66Kv9JLLgjEtUYunpyGd823IDzAfBgNVHSMEGDAWgBRF66Kv9JLLgjEtUYunpyGd823IDzANBgkq
+hkiG9w0BAQUFAAOCAQEAog683+Lt8ONyc3pklL/3cmbYMuRCdWKuh+vy1dneVrOfzM4UKLkNl2Bc
+EkxY5NM9g0lFWJc1aRqoR+pWxnmrEthngYTffwk8lOa4JiwgvT2zKIn3X/8i4peEH+ll74fg38Fn
+SbNd67IJKusm7Xi+fT8r87cmNW1fiQG2SVufAQWbqz0lwcy2f8Lxb4bG+mRo64EtlOtCt/qMHt1i
+8b5QZ7dsvfPxH2sMNgcWfzd8qVttevESRmCD1ycEvkvOl77DZypoEd+A5wwzZr8TDRRu838fYxAe
++o0bJW1sj6W3YQGx0qMmoRBxna3iw/nDmVG3KwcIzi7mULKn+gpFL6Lw8g==
+-----END CERTIFICATE-----
+
+DigiCert Global Root CA
+=======================
+-----BEGIN CERTIFICATE-----
+MIIDrzCCApegAwIBAgIQCDvgVpBCRrGhdWrJWZHHSjANBgkqhkiG9w0BAQUFADBhMQswCQYDVQQG
+EwJVUzEVMBMGA1UEChMMRGlnaUNlcnQgSW5jMRkwFwYDVQQLExB3d3cuZGlnaWNlcnQuY29tMSAw
+HgYDVQQDExdEaWdpQ2VydCBHbG9iYWwgUm9vdCBDQTAeFw0wNjExMTAwMDAwMDBaFw0zMTExMTAw
+MDAwMDBaMGExCzAJBgNVBAYTAlVTMRUwEwYDVQQKEwxEaWdpQ2VydCBJbmMxGTAXBgNVBAsTEHd3
+dy5kaWdpY2VydC5jb20xIDAeBgNVBAMTF0RpZ2lDZXJ0IEdsb2JhbCBSb290IENBMIIBIjANBgkq
+hkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEA4jvhEXLeqKTTo1eqUKKPC3eQyaKl7hLOllsBCSDMAZOn
+TjC3U/dDxGkAV53ijSLdhwZAAIEJzs4bg7/fzTtxRuLWZscFs3YnFo97nh6Vfe63SKMI2tavegw5
+BmV/Sl0fvBf4q77uKNd0f3p4mVmFaG5cIzJLv07A6Fpt43C/dxC//AH2hdmoRBBYMql1GNXRor5H
+4idq9Joz+EkIYIvUX7Q6hL+hqkpMfT7PT19sdl6gSzeRntwi5m3OFBqOasv+zbMUZBfHWymeMr/y
+7vrTC0LUq7dBMtoM1O/4gdW7jVg/tRvoSSiicNoxBN33shbyTApOB6jtSj1etX+jkMOvJwIDAQAB
+o2MwYTAOBgNVHQ8BAf8EBAMCAYYwDwYDVR0TAQH/BAUwAwEB/zAdBgNVHQ4EFgQUA95QNVbRTLtm
+8KPiGxvDl7I90VUwHwYDVR0jBBgwFoAUA95QNVbRTLtm8KPiGxvDl7I90VUwDQYJKoZIhvcNAQEF
+BQADggEBAMucN6pIExIK+t1EnE9SsPTfrgT1eXkIoyQY/EsrhMAtudXH/vTBH1jLuG2cenTnmCmr
+EbXjcKChzUyImZOMkXDiqw8cvpOp/2PV5Adg06O/nVsJ8dWO41P0jmP6P6fbtGbfYmbW0W5BjfIt
+tep3Sp+dWOIrWcBAI+0tKIJFPnlUkiaY4IBIqDfv8NZ5YBberOgOzW6sRBc4L0na4UU+Krk2U886
+UAb3LujEV0lsYSEY1QSteDwsOoBrp+uvFRTp2InBuThs4pFsiv9kuXclVzDAGySj4dzp30d8tbQk
+CAUw7C29C79Fv1C5qfPrmAESrciIxpg0X40KPMbp1ZWVbd4=
+-----END CERTIFICATE-----
+
+DigiCert High Assurance EV Root CA
+==================================
+-----BEGIN CERTIFICATE-----
+MIIDxTCCAq2gAwIBAgIQAqxcJmoLQJuPC3nyrkYldzANBgkqhkiG9w0BAQUFADBsMQswCQYDVQQG
+EwJVUzEVMBMGA1UEChMMRGlnaUNlcnQgSW5jMRkwFwYDVQQLExB3d3cuZGlnaWNlcnQuY29tMSsw
+KQYDVQQDEyJEaWdpQ2VydCBIaWdoIEFzc3VyYW5jZSBFViBSb290IENBMB4XDTA2MTExMDAwMDAw
+MFoXDTMxMTExMDAwMDAwMFowbDELMAkGA1UEBhMCVVMxFTATBgNVBAoTDERpZ2lDZXJ0IEluYzEZ
+MBcGA1UECxMQd3d3LmRpZ2ljZXJ0LmNvbTErMCkGA1UEAxMiRGlnaUNlcnQgSGlnaCBBc3N1cmFu
+Y2UgRVYgUm9vdCBDQTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAMbM5XPm+9S75S0t
+Mqbf5YE/yc0lSbZxKsPVlDRnogocsF9ppkCxxLeyj9CYpKlBWTrT3JTWPNt0OKRKzE0lgvdKpVMS
+OO7zSW1xkX5jtqumX8OkhPhPYlG++MXs2ziS4wblCJEMxChBVfvLWokVfnHoNb9Ncgk9vjo4UFt3
+MRuNs8ckRZqnrG0AFFoEt7oT61EKmEFBIk5lYYeBQVCmeVyJ3hlKV9Uu5l0cUyx+mM0aBhakaHPQ
+NAQTXKFx01p8VdteZOE3hzBWBOURtCmAEvF5OYiiAhF8J2a3iLd48soKqDirCmTCv2ZdlYTBoSUe
+h10aUAsgEsxBu24LUTi4S8sCAwEAAaNjMGEwDgYDVR0PAQH/BAQDAgGGMA8GA1UdEwEB/wQFMAMB
+Af8wHQYDVR0OBBYEFLE+w2kD+L9HAdSYJhoIAu9jZCvDMB8GA1UdIwQYMBaAFLE+w2kD+L9HAdSY
+JhoIAu9jZCvDMA0GCSqGSIb3DQEBBQUAA4IBAQAcGgaX3NecnzyIZgYIVyHbIUf4KmeqvxgydkAQ
+V8GK83rZEWWONfqe/EW1ntlMMUu4kehDLI6zeM7b41N5cdblIZQB2lWHmiRk9opmzN6cN82oNLFp
+myPInngiK3BD41VHMWEZ71jFhS9OMPagMRYjyOfiZRYzy78aG6A9+MpeizGLYAiJLQwGXFK3xPkK
+mNEVX58Svnw2Yzi9RKR/5CYrCsSXaQ3pjOLAEFe4yHYSkVXySGnYvCoCWw9E1CAx2/S6cCZdkGCe
+vEsXCS+0yx5DaMkHJ8HSXPfqIbloEpw8nL+e/IBcm2PN7EeqJSdnoDfzAIJ9VNep+OkuE6N36B9K
+-----END CERTIFICATE-----
+
+Certplus Class 2 Primary CA
+===========================
+-----BEGIN CERTIFICATE-----
+MIIDkjCCAnqgAwIBAgIRAIW9S/PY2uNp9pTXX8OlRCMwDQYJKoZIhvcNAQEFBQAwPTELMAkGA1UE
+BhMCRlIxETAPBgNVBAoTCENlcnRwbHVzMRswGQYDVQQDExJDbGFzcyAyIFByaW1hcnkgQ0EwHhcN
+OTkwNzA3MTcwNTAwWhcNMTkwNzA2MjM1OTU5WjA9MQswCQYDVQQGEwJGUjERMA8GA1UEChMIQ2Vy
+dHBsdXMxGzAZBgNVBAMTEkNsYXNzIDIgUHJpbWFyeSBDQTCCASIwDQYJKoZIhvcNAQEBBQADggEP
+ADCCAQoCggEBANxQltAS+DXSCHh6tlJw/W/uz7kRy1134ezpfgSN1sxvc0NXYKwzCkTsA18cgCSR
+5aiRVhKC9+Ar9NuuYS6JEI1rbLqzAr3VNsVINyPi8Fo3UjMXEuLRYE2+L0ER4/YXJQyLkcAbmXuZ
+Vg2v7tK8R1fjeUl7NIknJITesezpWE7+Tt9avkGtrAjFGA7v0lPubNCdEgETjdyAYveVqUSISnFO
+YFWe2yMZeVYHDD9jC1yw4r5+FfyUM1hBOHTE4Y+L3yasH7WLO7dDWWuwJKZtkIvEcupdM5i3y95e
+e++U8Rs+yskhwcWYAqqi9lt3m/V+llU0HGdpwPFC40es/CgcZlUCAwEAAaOBjDCBiTAPBgNVHRME
+CDAGAQH/AgEKMAsGA1UdDwQEAwIBBjAdBgNVHQ4EFgQU43Mt38sOKAze3bOkynm4jrvoMIkwEQYJ
+YIZIAYb4QgEBBAQDAgEGMDcGA1UdHwQwMC4wLKAqoCiGJmh0dHA6Ly93d3cuY2VydHBsdXMuY29t
+L0NSTC9jbGFzczIuY3JsMA0GCSqGSIb3DQEBBQUAA4IBAQCnVM+IRBnL39R/AN9WM2K191EBkOvD
+P9GIROkkXe/nFL0gt5o8AP5tn9uQ3Nf0YtaLcF3n5QRIqWh8yfFC82x/xXp8HVGIutIKPidd3i1R
+TtMTZGnkLuPT55sJmabglZvOGtd/vjzOUrMRFcEPF80Du5wlFbqidon8BvEY0JNLDnyCt6X09l/+
+7UCmnYR0ObncHoUW2ikbhiMAybuJfm6AiB4vFLQDJKgybwOaRywwvlbGp0ICcBvqQNi6BQNwB6SW
+//1IMwrh3KWBkJtN3X3n57LNXMhqlfil9o3EXXgIvnsG1knPGTZQIy4I5p4FTUcY1Rbpsda2ENW7
+l7+ijrRU
+-----END CERTIFICATE-----
+
+DST Root CA X3
+==============
+-----BEGIN CERTIFICATE-----
+MIIDSjCCAjKgAwIBAgIQRK+wgNajJ7qJMDmGLvhAazANBgkqhkiG9w0BAQUFADA/MSQwIgYDVQQK
+ExtEaWdpdGFsIFNpZ25hdHVyZSBUcnVzdCBDby4xFzAVBgNVBAMTDkRTVCBSb290IENBIFgzMB4X
+DTAwMDkzMDIxMTIxOVoXDTIxMDkzMDE0MDExNVowPzEkMCIGA1UEChMbRGlnaXRhbCBTaWduYXR1
+cmUgVHJ1c3QgQ28uMRcwFQYDVQQDEw5EU1QgUm9vdCBDQSBYMzCCASIwDQYJKoZIhvcNAQEBBQAD
+ggEPADCCAQoCggEBAN+v6ZdQCINXtMxiZfaQguzH0yxrMMpb7NnDfcdAwRgUi+DoM3ZJKuM/IUmT
+rE4Orz5Iy2Xu/NMhD2XSKtkyj4zl93ewEnu1lcCJo6m67XMuegwGMoOifooUMM0RoOEqOLl5CjH9
+UL2AZd+3UWODyOKIYepLYYHsUmu5ouJLGiifSKOeDNoJjj4XLh7dIN9bxiqKqy69cK3FCxolkHRy
+xXtqqzTWMIn/5WgTe1QLyNau7Fqckh49ZLOMxt+/yUFw7BZy1SbsOFU5Q9D8/RhcQPGX69Wam40d
+utolucbY38EVAjqr2m7xPi71XAicPNaDaeQQmxkqtilX4+U9m5/wAl0CAwEAAaNCMEAwDwYDVR0T
+AQH/BAUwAwEB/zAOBgNVHQ8BAf8EBAMCAQYwHQYDVR0OBBYEFMSnsaR7LHH62+FLkHX/xBVghYkQ
+MA0GCSqGSIb3DQEBBQUAA4IBAQCjGiybFwBcqR7uKGY3Or+Dxz9LwwmglSBd49lZRNI+DT69ikug
+dB/OEIKcdBodfpga3csTS7MgROSR6cz8faXbauX+5v3gTt23ADq1cEmv8uXrAvHRAosZy5Q6XkjE
+GB5YGV8eAlrwDPGxrancWYaLbumR9YbK+rlmM6pZW87ipxZzR8srzJmwN0jP41ZL9c8PDHIyh8bw
+RLtTcm1D9SZImlJnt1ir/md2cXjbDaJWFBM5JDGFoqgCWjBH4d1QB7wCCZAA62RjYJsWvIjJEubS
+fZGL+T0yjWW06XyxV3bqxbYoOb8VZRzI9neWagqNdwvYkQsEjgfbKbYK7p2CNTUQ
+-----END CERTIFICATE-----
+
+DST ACES CA X6
+==============
+-----BEGIN CERTIFICATE-----
+MIIECTCCAvGgAwIBAgIQDV6ZCtadt3js2AdWO4YV2TANBgkqhkiG9w0BAQUFADBbMQswCQYDVQQG
+EwJVUzEgMB4GA1UEChMXRGlnaXRhbCBTaWduYXR1cmUgVHJ1c3QxETAPBgNVBAsTCERTVCBBQ0VT
+MRcwFQYDVQQDEw5EU1QgQUNFUyBDQSBYNjAeFw0wMzExMjAyMTE5NThaFw0xNzExMjAyMTE5NTha
+MFsxCzAJBgNVBAYTAlVTMSAwHgYDVQQKExdEaWdpdGFsIFNpZ25hdHVyZSBUcnVzdDERMA8GA1UE
+CxMIRFNUIEFDRVMxFzAVBgNVBAMTDkRTVCBBQ0VTIENBIFg2MIIBIjANBgkqhkiG9w0BAQEFAAOC
+AQ8AMIIBCgKCAQEAuT31LMmU3HWKlV1j6IR3dma5WZFcRt2SPp/5DgO0PWGSvSMmtWPuktKe1jzI
+DZBfZIGxqAgNTNj50wUoUrQBJcWVHAx+PhCEdc/BGZFjz+iokYi5Q1K7gLFViYsx+tC3dr5BPTCa
+pCIlF3PoHuLTrCq9Wzgh1SpL11V94zpVvddtawJXa+ZHfAjIgrrep4c9oW24MFbCswKBXy314pow
+GCi4ZtPLAZZv6opFVdbgnf9nKxcCpk4aahELfrd755jWjHZvwTvbUJN+5dCOHze4vbrGn2zpfDPy
+MjwmR/onJALJfh1biEITajV8fTXpLmaRcpPVMibEdPVTo7NdmvYJywIDAQABo4HIMIHFMA8GA1Ud
+EwEB/wQFMAMBAf8wDgYDVR0PAQH/BAQDAgHGMB8GA1UdEQQYMBaBFHBraS1vcHNAdHJ1c3Rkc3Qu
+Y29tMGIGA1UdIARbMFkwVwYKYIZIAWUDAgEBATBJMEcGCCsGAQUFBwIBFjtodHRwOi8vd3d3LnRy
+dXN0ZHN0LmNvbS9jZXJ0aWZpY2F0ZXMvcG9saWN5L0FDRVMtaW5kZXguaHRtbDAdBgNVHQ4EFgQU
+CXIGThhDD+XWzMNqizF7eI+og7gwDQYJKoZIhvcNAQEFBQADggEBAKPYjtay284F5zLNAdMEA+V2
+5FYrnJmQ6AgwbN99Pe7lv7UkQIRJ4dEorsTCOlMwiPH1d25Ryvr/ma8kXxug/fKshMrfqfBfBC6t
+Fr8hlxCBPeP/h40y3JTlR4peahPJlJU90u7INJXQgNStMgiAVDzgvVJT11J8smk/f3rPanTK+gQq
+nExaBqXpIK1FZg9p8d2/6eMyi/rgwYZNcjwu2JN4Cir42NInPRmJX1p7ijvMDNpRrscL9yuwNwXs
+vFcj4jjSm2jzVhKIT0J8uDHEtdvkyCE06UgRNe76x5JXxZ805Mf29w4LTJxoeHtxMcfrHuBnQfO3
+oKfN5XozNmr6mis=
+-----END CERTIFICATE-----
+
+SwissSign Gold CA - G2
+======================
+-----BEGIN CERTIFICATE-----
+MIIFujCCA6KgAwIBAgIJALtAHEP1Xk+wMA0GCSqGSIb3DQEBBQUAMEUxCzAJBgNVBAYTAkNIMRUw
+EwYDVQQKEwxTd2lzc1NpZ24gQUcxHzAdBgNVBAMTFlN3aXNzU2lnbiBHb2xkIENBIC0gRzIwHhcN
+MDYxMDI1MDgzMDM1WhcNMzYxMDI1MDgzMDM1WjBFMQswCQYDVQQGEwJDSDEVMBMGA1UEChMMU3dp
+c3NTaWduIEFHMR8wHQYDVQQDExZTd2lzc1NpZ24gR29sZCBDQSAtIEcyMIICIjANBgkqhkiG9w0B
+AQEFAAOCAg8AMIICCgKCAgEAr+TufoskDhJuqVAtFkQ7kpJcyrhdhJJCEyq8ZVeCQD5XJM1QiyUq
+t2/876LQwB8CJEoTlo8jE+YoWACjR8cGp4QjK7u9lit/VcyLwVcfDmJlD909Vopz2q5+bbqBHH5C
+jCA12UNNhPqE21Is8w4ndwtrvxEvcnifLtg+5hg3Wipy+dpikJKVyh+c6bM8K8vzARO/Ws/BtQpg
+vd21mWRTuKCWs2/iJneRjOBiEAKfNA+k1ZIzUd6+jbqEemA8atufK+ze3gE/bk3lUIbLtK/tREDF
+ylqM2tIrfKjuvqblCqoOpd8FUrdVxyJdMmqXl2MT28nbeTZ7hTpKxVKJ+STnnXepgv9VHKVxaSvR
+AiTysybUa9oEVeXBCsdtMDeQKuSeFDNeFhdVxVu1yzSJkvGdJo+hB9TGsnhQ2wwMC3wLjEHXuend
+jIj3o02yMszYF9rNt85mndT9Xv+9lz4pded+p2JYryU0pUHHPbwNUMoDAw8IWh+Vc3hiv69yFGkO
+peUDDniOJihC8AcLYiAQZzlG+qkDzAQ4embvIIO1jEpWjpEA/I5cgt6IoMPiaG59je883WX0XaxR
+7ySArqpWl2/5rX3aYT+YdzylkbYcjCbaZaIJbcHiVOO5ykxMgI93e2CaHt+28kgeDrpOVG2Y4OGi
+GqJ3UM/EY5LsRxmd6+ZrzsECAwEAAaOBrDCBqTAOBgNVHQ8BAf8EBAMCAQYwDwYDVR0TAQH/BAUw
+AwEB/zAdBgNVHQ4EFgQUWyV7lqRlUX64OfPAeGZe6Drn8O4wHwYDVR0jBBgwFoAUWyV7lqRlUX64
+OfPAeGZe6Drn8O4wRgYDVR0gBD8wPTA7BglghXQBWQECAQEwLjAsBggrBgEFBQcCARYgaHR0cDov
+L3JlcG9zaXRvcnkuc3dpc3NzaWduLmNvbS8wDQYJKoZIhvcNAQEFBQADggIBACe645R88a7A3hfm
+5djV9VSwg/S7zV4Fe0+fdWavPOhWfvxyeDgD2StiGwC5+OlgzczOUYrHUDFu4Up+GC9pWbY9ZIEr
+44OE5iKHjn3g7gKZYbge9LgriBIWhMIxkziWMaa5O1M/wySTVltpkuzFwbs4AOPsF6m43Md8AYOf
+Mke6UiI0HTJ6CVanfCU2qT1L2sCCbwq7EsiHSycR+R4tx5M/nttfJmtS2S6K8RTGRI0Vqbe/vd6m
+Gu6uLftIdxf+u+yvGPUqUfA5hJeVbG4bwyvEdGB5JbAKJ9/fXtI5z0V9QkvfsywexcZdylU6oJxp
+mo/a77KwPJ+HbBIrZXAVUjEaJM9vMSNQH4xPjyPDdEFjHFWoFN0+4FFQz/EbMFYOkrCChdiDyyJk
+vC24JdVUorgG6q2SpCSgwYa1ShNqR88uC1aVVMvOmttqtKay20EIhid392qgQmwLOM7XdVAyksLf
+KzAiSNDVQTglXaTpXZ/GlHXQRf0wl0OPkKsKx4ZzYEppLd6leNcG2mqeSz53OiATIgHQv2ieY2Br
+NU0LbbqhPcCT4H8js1WtciVORvnSFu+wZMEBnunKoGqYDs/YYPIvSbjkQuE4NRb0yG5P94FW6Lqj
+viOvrv1vA+ACOzB2+httQc8Bsem4yWb02ybzOqR08kkkW8mw0FfB+j564ZfJ
+-----END CERTIFICATE-----
+
+SwissSign Silver CA - G2
+========================
+-----BEGIN CERTIFICATE-----
+MIIFvTCCA6WgAwIBAgIITxvUL1S7L0swDQYJKoZIhvcNAQEFBQAwRzELMAkGA1UEBhMCQ0gxFTAT
+BgNVBAoTDFN3aXNzU2lnbiBBRzEhMB8GA1UEAxMYU3dpc3NTaWduIFNpbHZlciBDQSAtIEcyMB4X
+DTA2MTAyNTA4MzI0NloXDTM2MTAyNTA4MzI0NlowRzELMAkGA1UEBhMCQ0gxFTATBgNVBAoTDFN3
+aXNzU2lnbiBBRzEhMB8GA1UEAxMYU3dpc3NTaWduIFNpbHZlciBDQSAtIEcyMIICIjANBgkqhkiG
+9w0BAQEFAAOCAg8AMIICCgKCAgEAxPGHf9N4Mfc4yfjDmUO8x/e8N+dOcbpLj6VzHVxumK4DV644
+N0MvFz0fyM5oEMF4rhkDKxD6LHmD9ui5aLlV8gREpzn5/ASLHvGiTSf5YXu6t+WiE7brYT7QbNHm
++/pe7R20nqA1W6GSy/BJkv6FCgU+5tkL4k+73JU3/JHpMjUi0R86TieFnbAVlDLaYQ1HTWBCrpJH
+6INaUFjpiou5XaHc3ZlKHzZnu0jkg7Y360g6rw9njxcH6ATK72oxh9TAtvmUcXtnZLi2kUpCe2Uu
+MGoM9ZDulebyzYLs2aFK7PayS+VFheZteJMELpyCbTapxDFkH4aDCyr0NQp4yVXPQbBH6TCfmb5h
+qAaEuSh6XzjZG6k4sIN/c8HDO0gqgg8hm7jMqDXDhBuDsz6+pJVpATqJAHgE2cn0mRmrVn5bi4Y5
+FZGkECwJMoBgs5PAKrYYC51+jUnyEEp/+dVGLxmSo5mnJqy7jDzmDrxHB9xzUfFwZC8I+bRHHTBs
+ROopN4WSaGa8gzj+ezku01DwH/teYLappvonQfGbGHLy9YR0SslnxFSuSGTfjNFusB3hB48IHpmc
+celM2KX3RxIfdNFRnobzwqIjQAtz20um53MGjMGg6cFZrEb65i/4z3GcRm25xBWNOHkDRUjvxF3X
+CO6HOSKGsg0PWEP3calILv3q1h8CAwEAAaOBrDCBqTAOBgNVHQ8BAf8EBAMCAQYwDwYDVR0TAQH/
+BAUwAwEB/zAdBgNVHQ4EFgQUF6DNweRBtjpbO8tFnb0cwpj6hlgwHwYDVR0jBBgwFoAUF6DNweRB
+tjpbO8tFnb0cwpj6hlgwRgYDVR0gBD8wPTA7BglghXQBWQEDAQEwLjAsBggrBgEFBQcCARYgaHR0
+cDovL3JlcG9zaXRvcnkuc3dpc3NzaWduLmNvbS8wDQYJKoZIhvcNAQEFBQADggIBAHPGgeAn0i0P
+4JUw4ppBf1AsX19iYamGamkYDHRJ1l2E6kFSGG9YrVBWIGrGvShpWJHckRE1qTodvBqlYJ7YH39F
+kWnZfrt4csEGDyrOj4VwYaygzQu4OSlWhDJOhrs9xCrZ1x9y7v5RoSJBsXECYxqCsGKrXlcSH9/L
+3XWgwF15kIwb4FDm3jH+mHtwX6WQ2K34ArZv02DdQEsixT2tOnqfGhpHkXkzuoLcMmkDlm4fS/Bx
+/uNncqCxv1yL5PqZIseEuRuNI5c/7SXgz2W79WEE790eslpBIlqhn10s6FvJbakMDHiqYMZWjwFa
+DGi8aRl5xB9+lwW/xekkUV7U1UtT7dkjWjYDZaPBA61BMPNGG4WQr2W11bHkFlt4dR2Xem1ZqSqP
+e97Dh4kQmUlzeMg9vVE1dCrV8X5pGyq7O70luJpaPXJhkGaH7gzWTdQRdAtq/gsD/KNVV4n+Ssuu
+WxcFyPKNIzFTONItaj+CuY0IavdeQXRuwxF+B6wpYJE/OMpXEA29MC/HpeZBoNquBYeaoKRlbEwJ
+DIm6uNO5wJOKMPqN5ZprFQFOZ6raYlY+hAhm0sQ2fac+EPyI4NSA5QC9qvNOBqN6avlicuMJT+ub
+DgEj8Z+7fNzcbBGXJbLytGMU0gYqZ4yD9c7qB9iaah7s5Aq7KkzrCWA5zspi2C5u
+-----END CERTIFICATE-----
+
+GeoTrust Primary Certification Authority
+========================================
+-----BEGIN CERTIFICATE-----
+MIIDfDCCAmSgAwIBAgIQGKy1av1pthU6Y2yv2vrEoTANBgkqhkiG9w0BAQUFADBYMQswCQYDVQQG
+EwJVUzEWMBQGA1UEChMNR2VvVHJ1c3QgSW5jLjExMC8GA1UEAxMoR2VvVHJ1c3QgUHJpbWFyeSBD
+ZXJ0aWZpY2F0aW9uIEF1dGhvcml0eTAeFw0wNjExMjcwMDAwMDBaFw0zNjA3MTYyMzU5NTlaMFgx
+CzAJBgNVBAYTAlVTMRYwFAYDVQQKEw1HZW9UcnVzdCBJbmMuMTEwLwYDVQQDEyhHZW9UcnVzdCBQ
+cmltYXJ5IENlcnRpZmljYXRpb24gQXV0aG9yaXR5MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIB
+CgKCAQEAvrgVe//UfH1nrYNke8hCUy3f9oQIIGHWAVlqnEQRr+92/ZV+zmEwu3qDXwK9AWbK7hWN
+b6EwnL2hhZ6UOvNWiAAxz9juapYC2e0DjPt1befquFUWBRaa9OBesYjAZIVcFU2Ix7e64HXprQU9
+nceJSOC7KMgD4TCTZF5SwFlwIjVXiIrxlQqD17wxcwE07e9GceBrAqg1cmuXm2bgyxx5X9gaBGge
+RwLmnWDiNpcB3841kt++Z8dtd1k7j53WkBWUvEI0EME5+bEnPn7WinXFsq+W06Lem+SYvn3h6YGt
+tm/81w7a4DSwDRp35+MImO9Y+pyEtzavwt+s0vQQBnBxNQIDAQABo0IwQDAPBgNVHRMBAf8EBTAD
+AQH/MA4GA1UdDwEB/wQEAwIBBjAdBgNVHQ4EFgQULNVQQZcVi/CPNmFbSvtr2ZnJM5IwDQYJKoZI
+hvcNAQEFBQADggEBAFpwfyzdtzRP9YZRqSa+S7iq8XEN3GHHoOo0Hnp3DwQ16CePbJC/kRYkRj5K
+Ts4rFtULUh38H2eiAkUxT87z+gOneZ1TatnaYzr4gNfTmeGl4b7UVXGYNTq+k+qurUKykG/g/CFN
+NWMziUnWm07Kx+dOCQD32sfvmWKZd7aVIl6KoKv0uHiYyjgZmclynnjNS6yvGaBzEi38wkG6gZHa
+Floxt/m0cYASSJlyc1pZU8FjUjPtp8nSOQJw+uCxQmYpqptR7TBUIhRf2asdweSU8Pj1K/fqynhG
+1riR/aYNKxoUAT6A8EKglQdebc3MS6RFjasS6LPeWuWgfOgPIh1a6Vk=
+-----END CERTIFICATE-----
+
+thawte Primary Root CA
+======================
+-----BEGIN CERTIFICATE-----
+MIIEIDCCAwigAwIBAgIQNE7VVyDV7exJ9C/ON9srbTANBgkqhkiG9w0BAQUFADCBqTELMAkGA1UE
+BhMCVVMxFTATBgNVBAoTDHRoYXd0ZSwgSW5jLjEoMCYGA1UECxMfQ2VydGlmaWNhdGlvbiBTZXJ2
+aWNlcyBEaXZpc2lvbjE4MDYGA1UECxMvKGMpIDIwMDYgdGhhd3RlLCBJbmMuIC0gRm9yIGF1dGhv
+cml6ZWQgdXNlIG9ubHkxHzAdBgNVBAMTFnRoYXd0ZSBQcmltYXJ5IFJvb3QgQ0EwHhcNMDYxMTE3
+MDAwMDAwWhcNMzYwNzE2MjM1OTU5WjCBqTELMAkGA1UEBhMCVVMxFTATBgNVBAoTDHRoYXd0ZSwg
+SW5jLjEoMCYGA1UECxMfQ2VydGlmaWNhdGlvbiBTZXJ2aWNlcyBEaXZpc2lvbjE4MDYGA1UECxMv
+KGMpIDIwMDYgdGhhd3RlLCBJbmMuIC0gRm9yIGF1dGhvcml6ZWQgdXNlIG9ubHkxHzAdBgNVBAMT
+FnRoYXd0ZSBQcmltYXJ5IFJvb3QgQ0EwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQCs
+oPD7gFnUnMekz52hWXMJEEUMDSxuaPFsW0hoSVk3/AszGcJ3f8wQLZU0HObrTQmnHNK4yZc2AreJ
+1CRfBsDMRJSUjQJib+ta3RGNKJpchJAQeg29dGYvajig4tVUROsdB58Hum/u6f1OCyn1PoSgAfGc
+q/gcfomk6KHYcWUNo1F77rzSImANuVud37r8UVsLr5iy6S7pBOhih94ryNdOwUxkHt3Ph1i6Sk/K
+aAcdHJ1KxtUvkcx8cXIcxcBn6zL9yZJclNqFwJu/U30rCfSMnZEfl2pSy94JNqR32HuHUETVPm4p
+afs5SSYeCaWAe0At6+gnhcn+Yf1+5nyXHdWdAgMBAAGjQjBAMA8GA1UdEwEB/wQFMAMBAf8wDgYD
+VR0PAQH/BAQDAgEGMB0GA1UdDgQWBBR7W0XPr87Lev0xkhpqtvNG61dIUDANBgkqhkiG9w0BAQUF
+AAOCAQEAeRHAS7ORtvzw6WfUDW5FvlXok9LOAz/t2iWwHVfLHjp2oEzsUHboZHIMpKnxuIvW1oeE
+uzLlQRHAd9mzYJ3rG9XRbkREqaYB7FViHXe4XI5ISXycO1cRrK1zN44veFyQaEfZYGDm/Ac9IiAX
+xPcW6cTYcvnIc3zfFi8VqT79aie2oetaupgf1eNNZAqdE8hhuvU5HIe6uL17In/2/qxAeeWsEG89
+jxt5dovEN7MhGITlNgDrYyCZuen+MwS7QcjBAvlEYyCegc5C09Y/LHbTY5xZ3Y+m4Q6gLkH3LpVH
+z7z9M/P2C2F+fpErgUfCJzDupxBdN49cOSvkBPB7jVaMaA==
+-----END CERTIFICATE-----
+
+VeriSign Class 3 Public Primary Certification Authority - G5
+============================================================
+-----BEGIN CERTIFICATE-----
+MIIE0zCCA7ugAwIBAgIQGNrRniZ96LtKIVjNzGs7SjANBgkqhkiG9w0BAQUFADCByjELMAkGA1UE
+BhMCVVMxFzAVBgNVBAoTDlZlcmlTaWduLCBJbmMuMR8wHQYDVQQLExZWZXJpU2lnbiBUcnVzdCBO
+ZXR3b3JrMTowOAYDVQQLEzEoYykgMjAwNiBWZXJpU2lnbiwgSW5jLiAtIEZvciBhdXRob3JpemVk
+IHVzZSBvbmx5MUUwQwYDVQQDEzxWZXJpU2lnbiBDbGFzcyAzIFB1YmxpYyBQcmltYXJ5IENlcnRp
+ZmljYXRpb24gQXV0aG9yaXR5IC0gRzUwHhcNMDYxMTA4MDAwMDAwWhcNMzYwNzE2MjM1OTU5WjCB
+yjELMAkGA1UEBhMCVVMxFzAVBgNVBAoTDlZlcmlTaWduLCBJbmMuMR8wHQYDVQQLExZWZXJpU2ln
+biBUcnVzdCBOZXR3b3JrMTowOAYDVQQLEzEoYykgMjAwNiBWZXJpU2lnbiwgSW5jLiAtIEZvciBh
+dXRob3JpemVkIHVzZSBvbmx5MUUwQwYDVQQDEzxWZXJpU2lnbiBDbGFzcyAzIFB1YmxpYyBQcmlt
+YXJ5IENlcnRpZmljYXRpb24gQXV0aG9yaXR5IC0gRzUwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAw
+ggEKAoIBAQCvJAgIKXo1nmAMqudLO07cfLw8RRy7K+D+KQL5VwijZIUVJ/XxrcgxiV0i6CqqpkKz
+j/i5Vbext0uz/o9+B1fs70PbZmIVYc9gDaTY3vjgw2IIPVQT60nKWVSFJuUrjxuf6/WhkcIzSdhD
+Y2pSS9KP6HBRTdGJaXvHcPaz3BJ023tdS1bTlr8Vd6Gw9KIl8q8ckmcY5fQGBO+QueQA5N06tRn/
+Arr0PO7gi+s3i+z016zy9vA9r911kTMZHRxAy3QkGSGT2RT+rCpSx4/VBEnkjWNHiDxpg8v+R70r
+fk/Fla4OndTRQ8Bnc+MUCH7lP59zuDMKz10/NIeWiu5T6CUVAgMBAAGjgbIwga8wDwYDVR0TAQH/
+BAUwAwEB/zAOBgNVHQ8BAf8EBAMCAQYwbQYIKwYBBQUHAQwEYTBfoV2gWzBZMFcwVRYJaW1hZ2Uv
+Z2lmMCEwHzAHBgUrDgMCGgQUj+XTGoasjY5rw8+AatRIGCx7GS4wJRYjaHR0cDovL2xvZ28udmVy
+aXNpZ24uY29tL3ZzbG9nby5naWYwHQYDVR0OBBYEFH/TZafC3ey78DAJ80M5+gKvMzEzMA0GCSqG
+SIb3DQEBBQUAA4IBAQCTJEowX2LP2BqYLz3q3JktvXf2pXkiOOzEp6B4Eq1iDkVwZMXnl2YtmAl+
+X6/WzChl8gGqCBpH3vn5fJJaCGkgDdk+bW48DW7Y5gaRQBi5+MHt39tBquCWIMnNZBU4gcmU7qKE
+KQsTb47bDN0lAtukixlE0kF6BWlKWE9gyn6CagsCqiUXObXbf+eEZSqVir2G3l6BFoMtEMze/aiC
+Km0oHw0LxOXnGiYZ4fQRbxC1lfznQgUy286dUV4otp6F01vvpX1FQHKOtw5rDgb7MzVIcbidJ4vE
+ZV8NhnacRHr2lVz2XTIIM6RUthg/aFzyQkqFOFSDX9HoLPKsEdao7WNq
+-----END CERTIFICATE-----
+
+SecureTrust CA
+==============
+-----BEGIN CERTIFICATE-----
+MIIDuDCCAqCgAwIBAgIQDPCOXAgWpa1Cf/DrJxhZ0DANBgkqhkiG9w0BAQUFADBIMQswCQYDVQQG
+EwJVUzEgMB4GA1UEChMXU2VjdXJlVHJ1c3QgQ29ycG9yYXRpb24xFzAVBgNVBAMTDlNlY3VyZVRy
+dXN0IENBMB4XDTA2MTEwNzE5MzExOFoXDTI5MTIzMTE5NDA1NVowSDELMAkGA1UEBhMCVVMxIDAe
+BgNVBAoTF1NlY3VyZVRydXN0IENvcnBvcmF0aW9uMRcwFQYDVQQDEw5TZWN1cmVUcnVzdCBDQTCC
+ASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAKukgeWVzfX2FI7CT8rU4niVWJxB4Q2ZQCQX
+OZEzZum+4YOvYlyJ0fwkW2Gz4BERQRwdbvC4u/jep4G6pkjGnx29vo6pQT64lO0pGtSO0gMdA+9t
+DWccV9cGrcrI9f4Or2YlSASWC12juhbDCE/RRvgUXPLIXgGZbf2IzIaowW8xQmxSPmjL8xk037uH
+GFaAJsTQ3MBv396gwpEWoGQRS0S8Hvbn+mPeZqx2pHGj7DaUaHp3pLHnDi+BeuK1cobvomuL8A/b
+01k/unK8RCSc43Oz969XL0Imnal0ugBS8kvNU3xHCzaFDmapCJcWNFfBZveA4+1wVMeT4C4oFVmH
+ursCAwEAAaOBnTCBmjATBgkrBgEEAYI3FAIEBh4EAEMAQTALBgNVHQ8EBAMCAYYwDwYDVR0TAQH/
+BAUwAwEB/zAdBgNVHQ4EFgQUQjK2FvoE/f5dS3rD/fdMQB1aQ68wNAYDVR0fBC0wKzApoCegJYYj
+aHR0cDovL2NybC5zZWN1cmV0cnVzdC5jb20vU1RDQS5jcmwwEAYJKwYBBAGCNxUBBAMCAQAwDQYJ
+KoZIhvcNAQEFBQADggEBADDtT0rhWDpSclu1pqNlGKa7UTt36Z3q059c4EVlew3KW+JwULKUBRSu
+SceNQQcSc5R+DCMh/bwQf2AQWnL1mA6s7Ll/3XpvXdMc9P+IBWlCqQVxyLesJugutIxq/3HcuLHf
+mbx8IVQr5Fiiu1cprp6poxkmD5kuCLDv/WnPmRoJjeOnnyvJNjR7JLN4TJUXpAYmHrZkUjZfYGfZ
+nMUFdAvnZyPSCPyI6a6Lf+Ew9Dd+/cYy2i2eRDAwbO4H3tI0/NL/QPZL9GZGBlSm8jIKYyYwa5vR
+3ItHuuG51WLQoqD0ZwV4KWMabwTW+MZMo5qxN7SN5ShLHZ4swrhovO0C7jE=
+-----END CERTIFICATE-----
+
+Secure Global CA
+================
+-----BEGIN CERTIFICATE-----
+MIIDvDCCAqSgAwIBAgIQB1YipOjUiolN9BPI8PjqpTANBgkqhkiG9w0BAQUFADBKMQswCQYDVQQG
+EwJVUzEgMB4GA1UEChMXU2VjdXJlVHJ1c3QgQ29ycG9yYXRpb24xGTAXBgNVBAMTEFNlY3VyZSBH
+bG9iYWwgQ0EwHhcNMDYxMTA3MTk0MjI4WhcNMjkxMjMxMTk1MjA2WjBKMQswCQYDVQQGEwJVUzEg
+MB4GA1UEChMXU2VjdXJlVHJ1c3QgQ29ycG9yYXRpb24xGTAXBgNVBAMTEFNlY3VyZSBHbG9iYWwg
+Q0EwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQCvNS7YrGxVaQZx5RNoJLNP2MwhR/jx
+YDiJiQPpvepeRlMJ3Fz1Wuj3RSoC6zFh1ykzTM7HfAo3fg+6MpjhHZevj8fcyTiW89sa/FHtaMbQ
+bqR8JNGuQsiWUGMu4P51/pinX0kuleM5M2SOHqRfkNJnPLLZ/kG5VacJjnIFHovdRIWCQtBJwB1g
+8NEXLJXr9qXBkqPFwqcIYA1gBBCWeZ4WNOaptvolRTnIHmX5k/Wq8VLcmZg9pYYaDDUz+kulBAYV
+HDGA76oYa8J719rO+TMg1fW9ajMtgQT7sFzUnKPiXB3jqUJ1XnvUd+85VLrJChgbEplJL4hL/VBi
+0XPnj3pDAgMBAAGjgZ0wgZowEwYJKwYBBAGCNxQCBAYeBABDAEEwCwYDVR0PBAQDAgGGMA8GA1Ud
+EwEB/wQFMAMBAf8wHQYDVR0OBBYEFK9EBMJBfkiD2045AuzshHrmzsmkMDQGA1UdHwQtMCswKaAn
+oCWGI2h0dHA6Ly9jcmwuc2VjdXJldHJ1c3QuY29tL1NHQ0EuY3JsMBAGCSsGAQQBgjcVAQQDAgEA
+MA0GCSqGSIb3DQEBBQUAA4IBAQBjGghAfaReUw132HquHw0LURYD7xh8yOOvaliTFGCRsoTciE6+
+OYo68+aCiV0BN7OrJKQVDpI1WkpEXk5X+nXOH0jOZvQ8QCaSmGwb7iRGDBezUqXbpZGRzzfTb+cn
+CDpOGR86p1hcF895P4vkp9MmI50mD1hp/Ed+stCNi5O/KU9DaXR2Z0vPB4zmAve14bRDtUstFJ/5
+3CYNv6ZHdAbYiNE6KTCEztI5gGIbqMdXSbxqVVFnFUq+NQfk1XWYN3kwFNspnWzFacxHVaIw98xc
+f8LDmBxrThaA63p4ZUWiABqvDA1VZDRIuJK58bRQKfJPIx/abKwfROHdI3hRW8cW
+-----END CERTIFICATE-----
+
+COMODO Certification Authority
+==============================
+-----BEGIN CERTIFICATE-----
+MIIEHTCCAwWgAwIBAgIQToEtioJl4AsC7j41AkblPTANBgkqhkiG9w0BAQUFADCBgTELMAkGA1UE
+BhMCR0IxGzAZBgNVBAgTEkdyZWF0ZXIgTWFuY2hlc3RlcjEQMA4GA1UEBxMHU2FsZm9yZDEaMBgG
+A1UEChMRQ09NT0RPIENBIExpbWl0ZWQxJzAlBgNVBAMTHkNPTU9ETyBDZXJ0aWZpY2F0aW9uIEF1
+dGhvcml0eTAeFw0wNjEyMDEwMDAwMDBaFw0yOTEyMzEyMzU5NTlaMIGBMQswCQYDVQQGEwJHQjEb
+MBkGA1UECBMSR3JlYXRlciBNYW5jaGVzdGVyMRAwDgYDVQQHEwdTYWxmb3JkMRowGAYDVQQKExFD
+T01PRE8gQ0EgTGltaXRlZDEnMCUGA1UEAxMeQ09NT0RPIENlcnRpZmljYXRpb24gQXV0aG9yaXR5
+MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEA0ECLi3LjkRv3UcEbVASY06m/weaKXTuH
++7uIzg3jLz8GlvCiKVCZrts7oVewdFFxze1CkU1B/qnI2GqGd0S7WWaXUF601CxwRM/aN5VCaTww
+xHGzUvAhTaHYujl8HJ6jJJ3ygxaYqhZ8Q5sVW7euNJH+1GImGEaaP+vB+fGQV+useg2L23IwambV
+4EajcNxo2f8ESIl33rXp+2dtQem8Ob0y2WIC8bGoPW43nOIv4tOiJovGuFVDiOEjPqXSJDlqR6sA
+1KGzqSX+DT+nHbrTUcELpNqsOO9VUCQFZUaTNE8tja3G1CEZ0o7KBWFxB3NH5YoZEr0ETc5OnKVI
+rLsm9wIDAQABo4GOMIGLMB0GA1UdDgQWBBQLWOWLxkwVN6RAqTCpIb5HNlpW/zAOBgNVHQ8BAf8E
+BAMCAQYwDwYDVR0TAQH/BAUwAwEB/zBJBgNVHR8EQjBAMD6gPKA6hjhodHRwOi8vY3JsLmNvbW9k
+b2NhLmNvbS9DT01PRE9DZXJ0aWZpY2F0aW9uQXV0aG9yaXR5LmNybDANBgkqhkiG9w0BAQUFAAOC
+AQEAPpiem/Yb6dc5t3iuHXIYSdOH5EOC6z/JqvWote9VfCFSZfnVDeFs9D6Mk3ORLgLETgdxb8CP
+OGEIqB6BCsAvIC9Bi5HcSEW88cbeunZrM8gALTFGTO3nnc+IlP8zwFboJIYmuNg4ON8qa90SzMc/
+RxdMosIGlgnW2/4/PEZB31jiVg88O8EckzXZOFKs7sjsLjBOlDW0JB9LeGna8gI4zJVSk/BwJVmc
+IGfE7vmLV2H0knZ9P4SNVbfo5azV8fUZVqZa+5Acr5Pr5RzUZ5ddBA6+C4OmF4O5MBKgxTMVBbkN
++8cFduPYSo38NBejxiEovjBFMR7HeL5YYTisO+IBZQ==
+-----END CERTIFICATE-----
+
+Network Solutions Certificate Authority
+=======================================
+-----BEGIN CERTIFICATE-----
+MIID5jCCAs6gAwIBAgIQV8szb8JcFuZHFhfjkDFo4DANBgkqhkiG9w0BAQUFADBiMQswCQYDVQQG
+EwJVUzEhMB8GA1UEChMYTmV0d29yayBTb2x1dGlvbnMgTC5MLkMuMTAwLgYDVQQDEydOZXR3b3Jr
+IFNvbHV0aW9ucyBDZXJ0aWZpY2F0ZSBBdXRob3JpdHkwHhcNMDYxMjAxMDAwMDAwWhcNMjkxMjMx
+MjM1OTU5WjBiMQswCQYDVQQGEwJVUzEhMB8GA1UEChMYTmV0d29yayBTb2x1dGlvbnMgTC5MLkMu
+MTAwLgYDVQQDEydOZXR3b3JrIFNvbHV0aW9ucyBDZXJ0aWZpY2F0ZSBBdXRob3JpdHkwggEiMA0G
+CSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDkvH6SMG3G2I4rC7xGzuAnlt7e+foS0zwzc7MEL7xx
+jOWftiJgPl9dzgn/ggwbmlFQGiaJ3dVhXRncEg8tCqJDXRfQNJIg6nPPOCwGJgl6cvf6UDL4wpPT
+aaIjzkGxzOTVHzbRijr4jGPiFFlp7Q3Tf2vouAPlT2rlmGNpSAW+Lv8ztumXWWn4Zxmuk2GWRBXT
+crA/vGp97Eh/jcOrqnErU2lBUzS1sLnFBgrEsEX1QV1uiUV7PTsmjHTC5dLRfbIR1PtYMiKagMnc
+/Qzpf14Dl847ABSHJ3A4qY5usyd2mFHgBeMhqxrVhSI8KbWaFsWAqPS7azCPL0YCorEMIuDTAgMB
+AAGjgZcwgZQwHQYDVR0OBBYEFCEwyfsA106Y2oeqKtCnLrFAMadMMA4GA1UdDwEB/wQEAwIBBjAP
+BgNVHRMBAf8EBTADAQH/MFIGA1UdHwRLMEkwR6BFoEOGQWh0dHA6Ly9jcmwubmV0c29sc3NsLmNv
+bS9OZXR3b3JrU29sdXRpb25zQ2VydGlmaWNhdGVBdXRob3JpdHkuY3JsMA0GCSqGSIb3DQEBBQUA
+A4IBAQC7rkvnt1frf6ott3NHhWrB5KUd5Oc86fRZZXe1eltajSU24HqXLjjAV2CDmAaDn7l2em5Q
+4LqILPxFzBiwmZVRDuwduIj/h1AcgsLj4DKAv6ALR8jDMe+ZZzKATxcheQxpXN5eNK4CtSbqUN9/
+GGUsyfJj4akH/nxxH2szJGoeBfcFaMBqEssuXmHLrijTfsK0ZpEmXzwuJF/LWA/rKOyvEZbz3Htv
+wKeI8lN3s2Berq4o2jUsbzRF0ybh3uxbTydrFny9RAQYgrOJeRcQcT16ohZO9QHNpGxlaKFJdlxD
+ydi8NmdspZS11My5vWo1ViHe2MPr+8ukYEywVaCge1ey
+-----END CERTIFICATE-----
+
+COMODO ECC Certification Authority
+==================================
+-----BEGIN CERTIFICATE-----
+MIICiTCCAg+gAwIBAgIQH0evqmIAcFBUTAGem2OZKjAKBggqhkjOPQQDAzCBhTELMAkGA1UEBhMC
+R0IxGzAZBgNVBAgTEkdyZWF0ZXIgTWFuY2hlc3RlcjEQMA4GA1UEBxMHU2FsZm9yZDEaMBgGA1UE
+ChMRQ09NT0RPIENBIExpbWl0ZWQxKzApBgNVBAMTIkNPTU9ETyBFQ0MgQ2VydGlmaWNhdGlvbiBB
+dXRob3JpdHkwHhcNMDgwMzA2MDAwMDAwWhcNMzgwMTE4MjM1OTU5WjCBhTELMAkGA1UEBhMCR0Ix
+GzAZBgNVBAgTEkdyZWF0ZXIgTWFuY2hlc3RlcjEQMA4GA1UEBxMHU2FsZm9yZDEaMBgGA1UEChMR
+Q09NT0RPIENBIExpbWl0ZWQxKzApBgNVBAMTIkNPTU9ETyBFQ0MgQ2VydGlmaWNhdGlvbiBBdXRo
+b3JpdHkwdjAQBgcqhkjOPQIBBgUrgQQAIgNiAAQDR3svdcmCFYX7deSRFtSrYpn1PlILBs5BAH+X
+4QokPB0BBO490o0JlwzgdeT6+3eKKvUDYEs2ixYjFq0JcfRK9ChQtP6IHG4/bC8vCVlbpVsLM5ni
+wz2J+Wos77LTBumjQjBAMB0GA1UdDgQWBBR1cacZSBm8nZ3qQUfflMRId5nTeTAOBgNVHQ8BAf8E
+BAMCAQYwDwYDVR0TAQH/BAUwAwEB/zAKBggqhkjOPQQDAwNoADBlAjEA7wNbeqy3eApyt4jf/7VG
+FAkK+qDmfQjGGoe9GKhzvSbKYAydzpmfz1wPMOG+FDHqAjAU9JM8SaczepBGR7NjfRObTrdvGDeA
+U/7dIOA1mjbRxwG55tzd8/8dLDoWV9mSOdY=
+-----END CERTIFICATE-----
+
+Security Communication EV RootCA1
+=================================
+-----BEGIN CERTIFICATE-----
+MIIDfTCCAmWgAwIBAgIBADANBgkqhkiG9w0BAQUFADBgMQswCQYDVQQGEwJKUDElMCMGA1UEChMc
+U0VDT00gVHJ1c3QgU3lzdGVtcyBDTy4sTFRELjEqMCgGA1UECxMhU2VjdXJpdHkgQ29tbXVuaWNh
+dGlvbiBFViBSb290Q0ExMB4XDTA3MDYwNjAyMTIzMloXDTM3MDYwNjAyMTIzMlowYDELMAkGA1UE
+BhMCSlAxJTAjBgNVBAoTHFNFQ09NIFRydXN0IFN5c3RlbXMgQ08uLExURC4xKjAoBgNVBAsTIVNl
+Y3VyaXR5IENvbW11bmljYXRpb24gRVYgUm9vdENBMTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCC
+AQoCggEBALx/7FebJOD+nLpCeamIivqA4PUHKUPqjgo0No0c+qe1OXj/l3X3L+SqawSERMqm4miO
+/VVQYg+kcQ7OBzgtQoVQrTyWb4vVog7P3kmJPdZkLjjlHmy1V4qe70gOzXppFodEtZDkBp2uoQSX
+WHnvIEqCa4wiv+wfD+mEce3xDuS4GBPMVjZd0ZoeUWs5bmB2iDQL87PRsJ3KYeJkHcFGB7hj3R4z
+ZbOOCVVSPbW9/wfrrWFVGCypaZhKqkDFMxRldAD5kd6vA0jFQFTcD4SQaCDFkpbcLuUCRarAX1T4
+bepJz11sS6/vmsJWXMY1VkJqMF/Cq/biPT+zyRGPMUzXn0kCAwEAAaNCMEAwHQYDVR0OBBYEFDVK
+9U2vP9eCOKyrcWUXdYydVZPmMA4GA1UdDwEB/wQEAwIBBjAPBgNVHRMBAf8EBTADAQH/MA0GCSqG
+SIb3DQEBBQUAA4IBAQCoh+ns+EBnXcPBZsdAS5f8hxOQWsTvoMpfi7ent/HWtWS3irO4G8za+6xm
+iEHO6Pzk2x6Ipu0nUBsCMCRGef4Eh3CXQHPRwMFXGZpppSeZq51ihPZRwSzJIxXYKLerJRO1RuGG
+Av8mjMSIkh1W/hln8lXkgKNrnKt34VFxDSDbEJrbvXZ5B3eZKK2aXtqxT0QsNY6llsf9g/BYxnnW
+mHyojf6GPgcWkuF75x3sM3Z+Qi5KhfmRiWiEA4Glm5q+4zfFVKtWOxgtQaQM+ELbmaDgcm+7XeEW
+T1MKZPlO9L9OVL14bIjqv5wTJMJwaaJ/D8g8rQjJsJhAoyrniIPtd490
+-----END CERTIFICATE-----
+
+OISTE WISeKey Global Root GA CA
+===============================
+-----BEGIN CERTIFICATE-----
+MIID8TCCAtmgAwIBAgIQQT1yx/RrH4FDffHSKFTfmjANBgkqhkiG9w0BAQUFADCBijELMAkGA1UE
+BhMCQ0gxEDAOBgNVBAoTB1dJU2VLZXkxGzAZBgNVBAsTEkNvcHlyaWdodCAoYykgMjAwNTEiMCAG
+A1UECxMZT0lTVEUgRm91bmRhdGlvbiBFbmRvcnNlZDEoMCYGA1UEAxMfT0lTVEUgV0lTZUtleSBH
+bG9iYWwgUm9vdCBHQSBDQTAeFw0wNTEyMTExNjAzNDRaFw0zNzEyMTExNjA5NTFaMIGKMQswCQYD
+VQQGEwJDSDEQMA4GA1UEChMHV0lTZUtleTEbMBkGA1UECxMSQ29weXJpZ2h0IChjKSAyMDA1MSIw
+IAYDVQQLExlPSVNURSBGb3VuZGF0aW9uIEVuZG9yc2VkMSgwJgYDVQQDEx9PSVNURSBXSVNlS2V5
+IEdsb2JhbCBSb290IEdBIENBMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAy0+zAJs9
+Nt350UlqaxBJH+zYK7LG+DKBKUOVTJoZIyEVRd7jyBxRVVuuk+g3/ytr6dTqvirdqFEr12bDYVxg
+Asj1znJ7O7jyTmUIms2kahnBAbtzptf2w93NvKSLtZlhuAGio9RN1AU9ka34tAhxZK9w8RxrfvbD
+d50kc3vkDIzh2TbhmYsFmQvtRTEJysIA2/dyoJaqlYfQjse2YXMNdmaM3Bu0Y6Kff5MTMPGhJ9vZ
+/yxViJGg4E8HsChWjBgbl0SOid3gF27nKu+POQoxhILYQBRJLnpB5Kf+42TMwVlxSywhp1t94B3R
+LoGbw9ho972WG6xwsRYUC9tguSYBBQIDAQABo1EwTzALBgNVHQ8EBAMCAYYwDwYDVR0TAQH/BAUw
+AwEB/zAdBgNVHQ4EFgQUswN+rja8sHnR3JQmthG+IbJphpQwEAYJKwYBBAGCNxUBBAMCAQAwDQYJ
+KoZIhvcNAQEFBQADggEBAEuh/wuHbrP5wUOxSPMowB0uyQlB+pQAHKSkq0lPjz0e701vvbyk9vIm
+MMkQyh2I+3QZH4VFvbBsUfk2ftv1TDI6QU9bR8/oCy22xBmddMVHxjtqD6wU2zz0c5ypBd8A3HR4
++vg1YFkCExh8vPtNsCBtQ7tgMHpnM1zFmdH4LTlSc/uMqpclXHLZCB6rTjzjgTGfA6b7wP4piFXa
+hNVQA7bihKOmNqoROgHhGEvWRGizPflTdISzRpFGlgC3gCy24eMQ4tui5yiPAZZiFj4A4xylNoEY
+okxSdsARo27mHbrjWr42U8U+dY+GaSlYU7Wcu2+fXMUY7N0v4ZjJ/L7fCg0=
+-----END CERTIFICATE-----
+
+Certigna
+========
+-----BEGIN CERTIFICATE-----
+MIIDqDCCApCgAwIBAgIJAP7c4wEPyUj/MA0GCSqGSIb3DQEBBQUAMDQxCzAJBgNVBAYTAkZSMRIw
+EAYDVQQKDAlEaGlteW90aXMxETAPBgNVBAMMCENlcnRpZ25hMB4XDTA3MDYyOTE1MTMwNVoXDTI3
+MDYyOTE1MTMwNVowNDELMAkGA1UEBhMCRlIxEjAQBgNVBAoMCURoaW15b3RpczERMA8GA1UEAwwI
+Q2VydGlnbmEwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDIaPHJ1tazNHUmgh7stL7q
+XOEm7RFHYeGifBZ4QCHkYJ5ayGPhxLGWkv8YbWkj4Sti993iNi+RB7lIzw7sebYs5zRLcAglozyH
+GxnygQcPOJAZ0xH+hrTy0V4eHpbNgGzOOzGTtvKg0KmVEn2lmsxryIRWijOp5yIVUxbwzBfsV1/p
+ogqYCd7jX5xv3EjjhQsVWqa6n6xI4wmy9/Qy3l40vhx4XUJbzg4ij02Q130yGLMLLGq/jj8UEYkg
+DncUtT2UCIf3JR7VsmAA7G8qKCVuKj4YYxclPz5EIBb2JsglrgVKtOdjLPOMFlN+XPsRGgjBRmKf
+Irjxwo1p3Po6WAbfAgMBAAGjgbwwgbkwDwYDVR0TAQH/BAUwAwEB/zAdBgNVHQ4EFgQUGu3+QTmQ
+tCRZvgHyUtVF9lo53BEwZAYDVR0jBF0wW4AUGu3+QTmQtCRZvgHyUtVF9lo53BGhOKQ2MDQxCzAJ
+BgNVBAYTAkZSMRIwEAYDVQQKDAlEaGlteW90aXMxETAPBgNVBAMMCENlcnRpZ25hggkA/tzjAQ/J
+SP8wDgYDVR0PAQH/BAQDAgEGMBEGCWCGSAGG+EIBAQQEAwIABzANBgkqhkiG9w0BAQUFAAOCAQEA
+hQMeknH2Qq/ho2Ge6/PAD/Kl1NqV5ta+aDY9fm4fTIrv0Q8hbV6lUmPOEvjvKtpv6zf+EwLHyzs+
+ImvaYS5/1HI93TDhHkxAGYwP15zRgzB7mFncfca5DClMoTOi62c6ZYTTluLtdkVwj7Ur3vkj1klu
+PBS1xp81HlDQwY9qcEQCYsuuHWhBp6pX6FOqB9IG9tUUBguRA3UsbHK1YZWaDYu5Def131TN3ubY
+1gkIl2PlwS6wt0QmwCbAr1UwnjvVNioZBPRcHv/PLLf/0P2HQBHVESO7SMAhqaQoLf0V+LBOK/Qw
+WyH8EZE0vkHve52Xdf+XlcCWWC/qu0bXu+TZLg==
+-----END CERTIFICATE-----
+
+Deutsche Telekom Root CA 2
+==========================
+-----BEGIN CERTIFICATE-----
+MIIDnzCCAoegAwIBAgIBJjANBgkqhkiG9w0BAQUFADBxMQswCQYDVQQGEwJERTEcMBoGA1UEChMT
+RGV1dHNjaGUgVGVsZWtvbSBBRzEfMB0GA1UECxMWVC1UZWxlU2VjIFRydXN0IENlbnRlcjEjMCEG
+A1UEAxMaRGV1dHNjaGUgVGVsZWtvbSBSb290IENBIDIwHhcNOTkwNzA5MTIxMTAwWhcNMTkwNzA5
+MjM1OTAwWjBxMQswCQYDVQQGEwJERTEcMBoGA1UEChMTRGV1dHNjaGUgVGVsZWtvbSBBRzEfMB0G
+A1UECxMWVC1UZWxlU2VjIFRydXN0IENlbnRlcjEjMCEGA1UEAxMaRGV1dHNjaGUgVGVsZWtvbSBS
+b290IENBIDIwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQCrC6M14IspFLEUha88EOQ5
+bzVdSq7d6mGNlUn0b2SjGmBmpKlAIoTZ1KXleJMOaAGtuU1cOs7TuKhCQN/Po7qCWWqSG6wcmtoI
+KyUn+WkjR/Hg6yx6m/UTAtB+NHzCnjwAWav12gz1MjwrrFDa1sPeg5TKqAyZMg4ISFZbavva4VhY
+AUlfckE8FQYBjl2tqriTtM2e66foai1SNNs671x1Udrb8zH57nGYMsRUFUQM+ZtV7a3fGAigo4aK
+Se5TBY8ZTNXeWHmb0mocQqvF1afPaA+W5OFhmHZhyJF81j4A4pFQh+GdCuatl9Idxjp9y7zaAzTV
+jlsB9WoHtxa2bkp/AgMBAAGjQjBAMB0GA1UdDgQWBBQxw3kbuvVT1xfgiXotF2wKsyudMzAPBgNV
+HRMECDAGAQH/AgEFMA4GA1UdDwEB/wQEAwIBBjANBgkqhkiG9w0BAQUFAAOCAQEAlGRZrTlk5ynr
+E/5aw4sTV8gEJPB0d8Bg42f76Ymmg7+Wgnxu1MM9756AbrsptJh6sTtU6zkXR34ajgv8HzFZMQSy
+zhfzLMdiNlXiItiJVbSYSKpk+tYcNthEeFpaIzpXl/V6ME+un2pMSyuOoAPjPuCp1NJ70rOo4nI8
+rZ7/gFnkm0W09juwzTkZmDLl6iFhkOQxIY40sfcvNUqFENrnijchvllj4PKFiDFT1FQUhXB59C4G
+dyd1Lx+4ivn+xbrYNuSD7Odlt79jWvNGr4GUN9RBjNYj1h7P9WgbRGOiWrqnNVmh5XAFmw4jV5mU
+Cm26OWMohpLzGITY+9HPBVZkVw==
+-----END CERTIFICATE-----
+
+Cybertrust Global Root
+======================
+-----BEGIN CERTIFICATE-----
+MIIDoTCCAomgAwIBAgILBAAAAAABD4WqLUgwDQYJKoZIhvcNAQEFBQAwOzEYMBYGA1UEChMPQ3li
+ZXJ0cnVzdCwgSW5jMR8wHQYDVQQDExZDeWJlcnRydXN0IEdsb2JhbCBSb290MB4XDTA2MTIxNTA4
+MDAwMFoXDTIxMTIxNTA4MDAwMFowOzEYMBYGA1UEChMPQ3liZXJ0cnVzdCwgSW5jMR8wHQYDVQQD
+ExZDeWJlcnRydXN0IEdsb2JhbCBSb290MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEA
++Mi8vRRQZhP/8NN57CPytxrHjoXxEnOmGaoQ25yiZXRadz5RfVb23CO21O1fWLE3TdVJDm71aofW
+0ozSJ8bi/zafmGWgE07GKmSb1ZASzxQG9Dvj1Ci+6A74q05IlG2OlTEQXO2iLb3VOm2yHLtgwEZL
+AfVJrn5GitB0jaEMAs7u/OePuGtm839EAL9mJRQr3RAwHQeWP032a7iPt3sMpTjr3kfb1V05/Iin
+89cqdPHoWqI7n1C6poxFNcJQZZXcY4Lv3b93TZxiyWNzFtApD0mpSPCzqrdsxacwOUBdrsTiXSZT
+8M4cIwhhqJQZugRiQOwfOHB3EgZxpzAYXSUnpQIDAQABo4GlMIGiMA4GA1UdDwEB/wQEAwIBBjAP
+BgNVHRMBAf8EBTADAQH/MB0GA1UdDgQWBBS2CHsNesysIEyGVjJez6tuhS1wVzA/BgNVHR8EODA2
+MDSgMqAwhi5odHRwOi8vd3d3Mi5wdWJsaWMtdHJ1c3QuY29tL2NybC9jdC9jdHJvb3QuY3JsMB8G
+A1UdIwQYMBaAFLYIew16zKwgTIZWMl7Pq26FLXBXMA0GCSqGSIb3DQEBBQUAA4IBAQBW7wojoFRO
+lZfJ+InaRcHUowAl9B8Tq7ejhVhpwjCt2BWKLePJzYFa+HMjWqd8BfP9IjsO0QbE2zZMcwSO5bAi
+5MXzLqXZI+O4Tkogp24CJJ8iYGd7ix1yCcUxXOl5n4BHPa2hCwcUPUf/A2kaDAtE52Mlp3+yybh2
+hO0j9n0Hq0V+09+zv+mKts2oomcrUtW3ZfA5TGOgkXmTUg9U3YO7n9GPp1Nzw8v/MOx8BLjYRB+T
+X3EJIrduPuocA06dGiBh+4E37F78CkWr1+cXVdCg6mCbpvbjjFspwgZgFJ0tl0ypkxWdYcQBX0jW
+WL1WMRJOEcgh4LMRkWXbtKaIOM5V
+-----END CERTIFICATE-----
+
+ePKI Root Certification Authority
+=================================
+-----BEGIN CERTIFICATE-----
+MIIFsDCCA5igAwIBAgIQFci9ZUdcr7iXAF7kBtK8nTANBgkqhkiG9w0BAQUFADBeMQswCQYDVQQG
+EwJUVzEjMCEGA1UECgwaQ2h1bmdod2EgVGVsZWNvbSBDby4sIEx0ZC4xKjAoBgNVBAsMIWVQS0kg
+Um9vdCBDZXJ0aWZpY2F0aW9uIEF1dGhvcml0eTAeFw0wNDEyMjAwMjMxMjdaFw0zNDEyMjAwMjMx
+MjdaMF4xCzAJBgNVBAYTAlRXMSMwIQYDVQQKDBpDaHVuZ2h3YSBUZWxlY29tIENvLiwgTHRkLjEq
+MCgGA1UECwwhZVBLSSBSb290IENlcnRpZmljYXRpb24gQXV0aG9yaXR5MIICIjANBgkqhkiG9w0B
+AQEFAAOCAg8AMIICCgKCAgEA4SUP7o3biDN1Z82tH306Tm2d0y8U82N0ywEhajfqhFAHSyZbCUNs
+IZ5qyNUD9WBpj8zwIuQf5/dqIjG3LBXy4P4AakP/h2XGtRrBp0xtInAhijHyl3SJCRImHJ7K2RKi
+lTza6We/CKBk49ZCt0Xvl/T29de1ShUCWH2YWEtgvM3XDZoTM1PRYfl61dd4s5oz9wCGzh1NlDiv
+qOx4UXCKXBCDUSH3ET00hl7lSM2XgYI1TBnsZfZrxQWh7kcT1rMhJ5QQCtkkO7q+RBNGMD+XPNjX
+12ruOzjjK9SXDrkb5wdJfzcq+Xd4z1TtW0ado4AOkUPB1ltfFLqfpo0kR0BZv3I4sjZsN/+Z0V0O
+WQqraffAsgRFelQArr5T9rXn4fg8ozHSqf4hUmTFpmfwdQcGlBSBVcYn5AGPF8Fqcde+S/uUWH1+
+ETOxQvdibBjWzwloPn9s9h6PYq2lY9sJpx8iQkEeb5mKPtf5P0B6ebClAZLSnT0IFaUQAS2zMnao
+lQ2zepr7BxB4EW/hj8e6DyUadCrlHJhBmd8hh+iVBmoKs2pHdmX2Os+PYhcZewoozRrSgx4hxyy/
+vv9haLdnG7t4TY3OZ+XkwY63I2binZB1NJipNiuKmpS5nezMirH4JYlcWrYvjB9teSSnUmjDhDXi
+Zo1jDiVN1Rmy5nk3pyKdVDECAwEAAaNqMGgwHQYDVR0OBBYEFB4M97Zn8uGSJglFwFU5Lnc/Qkqi
+MAwGA1UdEwQFMAMBAf8wOQYEZyoHAAQxMC8wLQIBADAJBgUrDgMCGgUAMAcGBWcqAwAABBRFsMLH
+ClZ87lt4DJX5GFPBphzYEDANBgkqhkiG9w0BAQUFAAOCAgEACbODU1kBPpVJufGBuvl2ICO1J2B0
+1GqZNF5sAFPZn/KmsSQHRGoqxqWOeBLoR9lYGxMqXnmbnwoqZ6YlPwZpVnPDimZI+ymBV3QGypzq
+KOg4ZyYr8dW1P2WT+DZdjo2NQCCHGervJ8A9tDkPJXtoUHRVnAxZfVo9QZQlUgjgRywVMRnVvwdV
+xrsStZf0X4OFunHB2WyBEXYKCrC/gpf36j36+uwtqSiUO1bd0lEursC9CBWMd1I0ltabrNMdjmEP
+NXubrjlpC2JgQCA2j6/7Nu4tCEoduL+bXPjqpRugc6bY+G7gMwRfaKonh+3ZwZCc7b3jajWvY9+r
+GNm65ulK6lCKD2GTHuItGeIwlDWSXQ62B68ZgI9HkFFLLk3dheLSClIKF5r8GrBQAuUBo2M3IUxE
+xJtRmREOc5wGj1QupyheRDmHVi03vYVElOEMSyycw5KFNGHLD7ibSkNS/jQ6fbjpKdx2qcgw+BRx
+gMYeNkh0IkFch4LoGHGLQYlE535YW6i4jRPpp2zDR+2zGp1iro2C6pSe3VkQw63d4k3jMdXH7Ojy
+sP6SHhYKGvzZ8/gntsm+HbRsZJB/9OTEW9c3rkIO3aQab3yIVMUWbuF6aC74Or8NpDyJO3inTmOD
+BCEIZ43ygknQW/2xzQ+DhNQ+IIX3Sj0rnP0qCglN6oH4EZw=
+-----END CERTIFICATE-----
+
+T\xc3\x9c\x42\xC4\xB0TAK UEKAE K\xC3\xB6k Sertifika Hizmet Sa\xC4\x9Flay\xc4\xb1\x63\xc4\xb1s\xc4\xb1 - S\xC3\xBCr\xC3\xBCm 3
+=============================================================================================================================
+-----BEGIN CERTIFICATE-----
+MIIFFzCCA/+gAwIBAgIBETANBgkqhkiG9w0BAQUFADCCASsxCzAJBgNVBAYTAlRSMRgwFgYDVQQH
+DA9HZWJ6ZSAtIEtvY2FlbGkxRzBFBgNVBAoMPlTDvHJraXllIEJpbGltc2VsIHZlIFRla25vbG9q
+aWsgQXJhxZ90xLFybWEgS3VydW11IC0gVMOcQsSwVEFLMUgwRgYDVQQLDD9VbHVzYWwgRWxla3Ry
+b25payB2ZSBLcmlwdG9sb2ppIEFyYcWfdMSxcm1hIEVuc3RpdMO8c8O8IC0gVUVLQUUxIzAhBgNV
+BAsMGkthbXUgU2VydGlmaWthc3lvbiBNZXJrZXppMUowSAYDVQQDDEFUw5xCxLBUQUsgVUVLQUUg
+S8O2ayBTZXJ0aWZpa2EgSGl6bWV0IFNhxJ9sYXnEsWPEsXPEsSAtIFPDvHLDvG0gMzAeFw0wNzA4
+MjQxMTM3MDdaFw0xNzA4MjExMTM3MDdaMIIBKzELMAkGA1UEBhMCVFIxGDAWBgNVBAcMD0dlYnpl
+IC0gS29jYWVsaTFHMEUGA1UECgw+VMO8cmtpeWUgQmlsaW1zZWwgdmUgVGVrbm9sb2ppayBBcmHF
+n3TEsXJtYSBLdXJ1bXUgLSBUw5xCxLBUQUsxSDBGBgNVBAsMP1VsdXNhbCBFbGVrdHJvbmlrIHZl
+IEtyaXB0b2xvamkgQXJhxZ90xLFybWEgRW5zdGl0w7xzw7wgLSBVRUtBRTEjMCEGA1UECwwaS2Ft
+dSBTZXJ0aWZpa2FzeW9uIE1lcmtlemkxSjBIBgNVBAMMQVTDnELEsFRBSyBVRUtBRSBLw7ZrIFNl
+cnRpZmlrYSBIaXptZXQgU2HEn2xhecSxY8Sxc8SxIC0gU8O8csO8bSAzMIIBIjANBgkqhkiG9w0B
+AQEFAAOCAQ8AMIIBCgKCAQEAim1L/xCIOsP2fpTo6iBkcK4hgb46ezzb8R1Sf1n68yJMlaCQvEhO
+Eav7t7WNeoMojCZG2E6VQIdhn8WebYGHV2yKO7Rm6sxA/OOqbLLLAdsyv9Lrhc+hDVXDWzhXcLh1
+xnnRFDDtG1hba+818qEhTsXOfJlfbLm4IpNQp81McGq+agV/E5wrHur+R84EpW+sky58K5+eeROR
+6Oqeyjh1jmKwlZMq5d/pXpduIF9fhHpEORlAHLpVK/swsoHvhOPc7Jg4OQOFCKlUAwUp8MmPi+oL
+hmUZEdPpCSPeaJMDyTYcIW7OjGbxmTDY17PDHfiBLqi9ggtm/oLL4eAagsNAgQIDAQABo0IwQDAd
+BgNVHQ4EFgQUvYiHyY/2pAoLquvF/pEjnatKijIwDgYDVR0PAQH/BAQDAgEGMA8GA1UdEwEB/wQF
+MAMBAf8wDQYJKoZIhvcNAQEFBQADggEBAB18+kmPNOm3JpIWmgV050vQbTlswyb2zrgxvMTfvCr4
+N5EY3ATIZJkrGG2AA1nJrvhY0D7twyOfaTyGOBye79oneNGEN3GKPEs5z35FBtYt2IpNeBLWrcLT
+y9LQQfMmNkqblWwM7uXRQydmwYj3erMgbOqwaSvHIOgMA8RBBZniP+Rr+KCGgceExh/VS4ESshYh
+LBOhgLJeDEoTniDYYkCrkOpkSi+sDQESeUWoL4cZaMjihccwsnX5OD+ywJO0a+IDRM5noN+J1q2M
+dqMTw5RhK2vZbMEHCiIHhWyFJEapvj+LeISCfiQMnf2BN+MlqO02TpUsyZyQ2uypQjyttgI=
+-----END CERTIFICATE-----
+
+certSIGN ROOT CA
+================
+-----BEGIN CERTIFICATE-----
+MIIDODCCAiCgAwIBAgIGIAYFFnACMA0GCSqGSIb3DQEBBQUAMDsxCzAJBgNVBAYTAlJPMREwDwYD
+VQQKEwhjZXJ0U0lHTjEZMBcGA1UECxMQY2VydFNJR04gUk9PVCBDQTAeFw0wNjA3MDQxNzIwMDRa
+Fw0zMTA3MDQxNzIwMDRaMDsxCzAJBgNVBAYTAlJPMREwDwYDVQQKEwhjZXJ0U0lHTjEZMBcGA1UE
+CxMQY2VydFNJR04gUk9PVCBDQTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBALczuX7I
+JUqOtdu0KBuqV5Do0SLTZLrTk+jUrIZhQGpgV2hUhE28alQCBf/fm5oqrl0Hj0rDKH/v+yv6efHH
+rfAQUySQi2bJqIirr1qjAOm+ukbuW3N7LBeCgV5iLKECZbO9xSsAfsT8AzNXDe3i+s5dRdY4zTW2
+ssHQnIFKquSyAVwdj1+ZxLGt24gh65AIgoDzMKND5pCCrlUoSe1b16kQOA7+j0xbm0bqQfWwCHTD
+0IgztnzXdN/chNFDDnU5oSVAKOp4yw4sLjmdjItuFhwvJoIQ4uNllAoEwF73XVv4EOLQunpL+943
+AAAaWyjj0pxzPjKHmKHJUS/X3qwzs08CAwEAAaNCMEAwDwYDVR0TAQH/BAUwAwEB/zAOBgNVHQ8B
+Af8EBAMCAcYwHQYDVR0OBBYEFOCMm9slSbPxfIbWskKHC9BroNnkMA0GCSqGSIb3DQEBBQUAA4IB
+AQA+0hyJLjX8+HXd5n9liPRyTMks1zJO890ZeUe9jjtbkw9QSSQTaxQGcu8J06Gh40CEyecYMnQ8
+SG4Pn0vU9x7Tk4ZkVJdjclDVVc/6IJMCopvDI5NOFlV2oHB5bc0hH88vLbwZ44gx+FkagQnIl6Z0
+x2DEW8xXjrJ1/RsCCdtZb3KTafcxQdaIOL+Hsr0Wefmq5L6IJd1hJyMctTEHBDa0GpC9oHRxUIlt
+vBTjD4au8as+x6AJzKNI0eDbZOeStc+vckNwi/nDhDwTqn6Sm1dTk/pwwpEOMfmbZ13pljheX7Nz
+TogVZ96edhBiIL5VaZVDADlN9u6wWk5JRFRYX0KD
+-----END CERTIFICATE-----
+
+GeoTrust Primary Certification Authority - G3
+=============================================
+-----BEGIN CERTIFICATE-----
+MIID/jCCAuagAwIBAgIQFaxulBmyeUtB9iepwxgPHzANBgkqhkiG9w0BAQsFADCBmDELMAkGA1UE
+BhMCVVMxFjAUBgNVBAoTDUdlb1RydXN0IEluYy4xOTA3BgNVBAsTMChjKSAyMDA4IEdlb1RydXN0
+IEluYy4gLSBGb3IgYXV0aG9yaXplZCB1c2Ugb25seTE2MDQGA1UEAxMtR2VvVHJ1c3QgUHJpbWFy
+eSBDZXJ0aWZpY2F0aW9uIEF1dGhvcml0eSAtIEczMB4XDTA4MDQwMjAwMDAwMFoXDTM3MTIwMTIz
+NTk1OVowgZgxCzAJBgNVBAYTAlVTMRYwFAYDVQQKEw1HZW9UcnVzdCBJbmMuMTkwNwYDVQQLEzAo
+YykgMjAwOCBHZW9UcnVzdCBJbmMuIC0gRm9yIGF1dGhvcml6ZWQgdXNlIG9ubHkxNjA0BgNVBAMT
+LUdlb1RydXN0IFByaW1hcnkgQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkgLSBHMzCCASIwDQYJKoZI
+hvcNAQEBBQADggEPADCCAQoCggEBANziXmJYHTNXOTIz+uvLh4yn1ErdBojqZI4xmKU4kB6Yzy5j
+K/BGvESyiaHAKAxJcCGVn2TAppMSAmUmhsalifD614SgcK9PGpc/BkTVyetyEH3kMSj7HGHmKAdE
+c5IiaacDiGydY8hS2pgn5whMcD60yRLBxWeDXTPzAxHsatBT4tG6NmCUgLthY2xbF37fQJQeqw3C
+IShwiP/WJmxsYAQlTlV+fe+/lEjetx3dcI0FX4ilm/LC7urRQEFtYjgdVgbFA0dRIBn8exALDmKu
+dlW/X3e+PkkBUz2YJQN2JFodtNuJ6nnltrM7P7pMKEF/BqxqjsHQ9gUdfeZChuOl1UcCAwEAAaNC
+MEAwDwYDVR0TAQH/BAUwAwEB/zAOBgNVHQ8BAf8EBAMCAQYwHQYDVR0OBBYEFMR5yo6hTgMdHNxr
+2zFblD4/MH8tMA0GCSqGSIb3DQEBCwUAA4IBAQAtxRPPVoB7eni9n64smefv2t+UXglpp+duaIy9
+cr5HqQ6XErhK8WTTOd8lNNTBzU6B8A8ExCSzNJbGpqow32hhc9f5joWJ7w5elShKKiePEI4ufIbE
+Ap7aDHdlDkQNkv39sxY2+hENHYwOB4lqKVb3cvTdFZx3NWZXqxNT2I7BQMXXExZacse3aQHEerGD
+AWh9jUGhlBjBJVz88P6DAod8DQ3PLghcSkANPuyBYeYk28rgDi0Hsj5W3I31QYUHSJsMC8tJP33s
+t/3LjWeJGqvtux6jAAgIFyqCXDFdRootD4abdNlF+9RAsXqqaC2Gspki4cErx5z481+oghLrGREt
+-----END CERTIFICATE-----
+
+thawte Primary Root CA - G2
+===========================
+-----BEGIN CERTIFICATE-----
+MIICiDCCAg2gAwIBAgIQNfwmXNmET8k9Jj1Xm67XVjAKBggqhkjOPQQDAzCBhDELMAkGA1UEBhMC
+VVMxFTATBgNVBAoTDHRoYXd0ZSwgSW5jLjE4MDYGA1UECxMvKGMpIDIwMDcgdGhhd3RlLCBJbmMu
+IC0gRm9yIGF1dGhvcml6ZWQgdXNlIG9ubHkxJDAiBgNVBAMTG3RoYXd0ZSBQcmltYXJ5IFJvb3Qg
+Q0EgLSBHMjAeFw0wNzExMDUwMDAwMDBaFw0zODAxMTgyMzU5NTlaMIGEMQswCQYDVQQGEwJVUzEV
+MBMGA1UEChMMdGhhd3RlLCBJbmMuMTgwNgYDVQQLEy8oYykgMjAwNyB0aGF3dGUsIEluYy4gLSBG
+b3IgYXV0aG9yaXplZCB1c2Ugb25seTEkMCIGA1UEAxMbdGhhd3RlIFByaW1hcnkgUm9vdCBDQSAt
+IEcyMHYwEAYHKoZIzj0CAQYFK4EEACIDYgAEotWcgnuVnfFSeIf+iha/BebfowJPDQfGAFG6DAJS
+LSKkQjnE/o/qycG+1E3/n3qe4rF8mq2nhglzh9HnmuN6papu+7qzcMBniKI11KOasf2twu8x+qi5
+8/sIxpHR+ymVo0IwQDAPBgNVHRMBAf8EBTADAQH/MA4GA1UdDwEB/wQEAwIBBjAdBgNVHQ4EFgQU
+mtgAMADna3+FGO6Lts6KDPgR4bswCgYIKoZIzj0EAwMDaQAwZgIxAN344FdHW6fmCsO99YCKlzUN
+G4k8VIZ3KMqh9HneteY4sPBlcIx/AlTCv//YoT7ZzwIxAMSNlPzcU9LcnXgWHxUzI1NS41oxXZ3K
+rr0TKUQNJ1uo52icEvdYPy5yAlejj6EULg==
+-----END CERTIFICATE-----
+
+thawte Primary Root CA - G3
+===========================
+-----BEGIN CERTIFICATE-----
+MIIEKjCCAxKgAwIBAgIQYAGXt0an6rS0mtZLL/eQ+zANBgkqhkiG9w0BAQsFADCBrjELMAkGA1UE
+BhMCVVMxFTATBgNVBAoTDHRoYXd0ZSwgSW5jLjEoMCYGA1UECxMfQ2VydGlmaWNhdGlvbiBTZXJ2
+aWNlcyBEaXZpc2lvbjE4MDYGA1UECxMvKGMpIDIwMDggdGhhd3RlLCBJbmMuIC0gRm9yIGF1dGhv
+cml6ZWQgdXNlIG9ubHkxJDAiBgNVBAMTG3RoYXd0ZSBQcmltYXJ5IFJvb3QgQ0EgLSBHMzAeFw0w
+ODA0MDIwMDAwMDBaFw0zNzEyMDEyMzU5NTlaMIGuMQswCQYDVQQGEwJVUzEVMBMGA1UEChMMdGhh
+d3RlLCBJbmMuMSgwJgYDVQQLEx9DZXJ0aWZpY2F0aW9uIFNlcnZpY2VzIERpdmlzaW9uMTgwNgYD
+VQQLEy8oYykgMjAwOCB0aGF3dGUsIEluYy4gLSBGb3IgYXV0aG9yaXplZCB1c2Ugb25seTEkMCIG
+A1UEAxMbdGhhd3RlIFByaW1hcnkgUm9vdCBDQSAtIEczMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8A
+MIIBCgKCAQEAsr8nLPvb2FvdeHsbnndmgcs+vHyu86YnmjSjaDFxODNi5PNxZnmxqWWjpYvVj2At
+P0LMqmsywCPLLEHd5N/8YZzic7IilRFDGF/Eth9XbAoFWCLINkw6fKXRz4aviKdEAhN0cXMKQlkC
++BsUa0Lfb1+6a4KinVvnSr0eAXLbS3ToO39/fR8EtCab4LRarEc9VbjXsCZSKAExQGbY2SS99irY
+7CFJXJv2eul/VTV+lmuNk5Mny5K76qxAwJ/C+IDPXfRa3M50hqY+bAtTyr2SzhkGcuYMXDhpxwTW
+vGzOW/b3aJzcJRVIiKHpqfiYnODz1TEoYRFsZ5aNOZnLwkUkOQIDAQABo0IwQDAPBgNVHRMBAf8E
+BTADAQH/MA4GA1UdDwEB/wQEAwIBBjAdBgNVHQ4EFgQUrWyqlGCc7eT/+j4KdCtjA/e2Wb8wDQYJ
+KoZIhvcNAQELBQADggEBABpA2JVlrAmSicY59BDlqQ5mU1143vokkbvnRFHfxhY0Cu9qRFHqKweK
+A3rD6z8KLFIWoCtDuSWQP3CpMyVtRRooOyfPqsMpQhvfO0zAMzRbQYi/aytlryjvsvXDqmbOe1bu
+t8jLZ8HJnBoYuMTDSQPxYA5QzUbF83d597YV4Djbxy8ooAw/dyZ02SUS2jHaGh7cKUGRIjxpp7sC
+8rZcJwOJ9Abqm+RyguOhCcHpABnTPtRwa7pxpqpYrvS76Wy274fMm7v/OeZWYdMKp8RcTGB7BXcm
+er/YB1IsYvdwY9k5vG8cwnncdimvzsUsZAReiDZuMdRAGmI0Nj81Aa6sY6A=
+-----END CERTIFICATE-----
+
+GeoTrust Primary Certification Authority - G2
+=============================================
+-----BEGIN CERTIFICATE-----
+MIICrjCCAjWgAwIBAgIQPLL0SAoA4v7rJDteYD7DazAKBggqhkjOPQQDAzCBmDELMAkGA1UEBhMC
+VVMxFjAUBgNVBAoTDUdlb1RydXN0IEluYy4xOTA3BgNVBAsTMChjKSAyMDA3IEdlb1RydXN0IElu
+Yy4gLSBGb3IgYXV0aG9yaXplZCB1c2Ugb25seTE2MDQGA1UEAxMtR2VvVHJ1c3QgUHJpbWFyeSBD
+ZXJ0aWZpY2F0aW9uIEF1dGhvcml0eSAtIEcyMB4XDTA3MTEwNTAwMDAwMFoXDTM4MDExODIzNTk1
+OVowgZgxCzAJBgNVBAYTAlVTMRYwFAYDVQQKEw1HZW9UcnVzdCBJbmMuMTkwNwYDVQQLEzAoYykg
+MjAwNyBHZW9UcnVzdCBJbmMuIC0gRm9yIGF1dGhvcml6ZWQgdXNlIG9ubHkxNjA0BgNVBAMTLUdl
+b1RydXN0IFByaW1hcnkgQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkgLSBHMjB2MBAGByqGSM49AgEG
+BSuBBAAiA2IABBWx6P0DFUPlrOuHNxFi79KDNlJ9RVcLSo17VDs6bl8VAsBQps8lL33KSLjHUGMc
+KiEIfJo22Av+0SbFWDEwKCXzXV2juLaltJLtbCyf691DiaI8S0iRHVDsJt/WYC69IaNCMEAwDwYD
+VR0TAQH/BAUwAwEB/zAOBgNVHQ8BAf8EBAMCAQYwHQYDVR0OBBYEFBVfNVdRVfslsq0DafwBo/q+
+EVXVMAoGCCqGSM49BAMDA2cAMGQCMGSWWaboCd6LuvpaiIjwH5HTRqjySkwCY/tsXzjbLkGTqQ7m
+ndwxHLKgpxgceeHHNgIwOlavmnRs9vuD4DPTCF+hnMJbn0bWtsuRBmOiBuczrD6ogRLQy7rQkgu2
+npaqBA+K
+-----END CERTIFICATE-----
+
+VeriSign Universal Root Certification Authority
+===============================================
+-----BEGIN CERTIFICATE-----
+MIIEuTCCA6GgAwIBAgIQQBrEZCGzEyEDDrvkEhrFHTANBgkqhkiG9w0BAQsFADCBvTELMAkGA1UE
+BhMCVVMxFzAVBgNVBAoTDlZlcmlTaWduLCBJbmMuMR8wHQYDVQQLExZWZXJpU2lnbiBUcnVzdCBO
+ZXR3b3JrMTowOAYDVQQLEzEoYykgMjAwOCBWZXJpU2lnbiwgSW5jLiAtIEZvciBhdXRob3JpemVk
+IHVzZSBvbmx5MTgwNgYDVQQDEy9WZXJpU2lnbiBVbml2ZXJzYWwgUm9vdCBDZXJ0aWZpY2F0aW9u
+IEF1dGhvcml0eTAeFw0wODA0MDIwMDAwMDBaFw0zNzEyMDEyMzU5NTlaMIG9MQswCQYDVQQGEwJV
+UzEXMBUGA1UEChMOVmVyaVNpZ24sIEluYy4xHzAdBgNVBAsTFlZlcmlTaWduIFRydXN0IE5ldHdv
+cmsxOjA4BgNVBAsTMShjKSAyMDA4IFZlcmlTaWduLCBJbmMuIC0gRm9yIGF1dGhvcml6ZWQgdXNl
+IG9ubHkxODA2BgNVBAMTL1ZlcmlTaWduIFVuaXZlcnNhbCBSb290IENlcnRpZmljYXRpb24gQXV0
+aG9yaXR5MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAx2E3XrEBNNti1xWb/1hajCMj
+1mCOkdeQmIN65lgZOIzF9uVkhbSicfvtvbnazU0AtMgtc6XHaXGVHzk8skQHnOgO+k1KxCHfKWGP
+MiJhgsWHH26MfF8WIFFE0XBPV+rjHOPMee5Y2A7Cs0WTwCznmhcrewA3ekEzeOEz4vMQGn+HLL72
+9fdC4uW/h2KJXwBL38Xd5HVEMkE6HnFuacsLdUYI0crSK5XQz/u5QGtkjFdN/BMReYTtXlT2NJ8I
+AfMQJQYXStrxHXpma5hgZqTZ79IugvHw7wnqRMkVauIDbjPTrJ9VAMf2CGqUuV/c4DPxhGD5WycR
+tPwW8rtWaoAljQIDAQABo4GyMIGvMA8GA1UdEwEB/wQFMAMBAf8wDgYDVR0PAQH/BAQDAgEGMG0G
+CCsGAQUFBwEMBGEwX6FdoFswWTBXMFUWCWltYWdlL2dpZjAhMB8wBwYFKw4DAhoEFI/l0xqGrI2O
+a8PPgGrUSBgsexkuMCUWI2h0dHA6Ly9sb2dvLnZlcmlzaWduLmNvbS92c2xvZ28uZ2lmMB0GA1Ud
+DgQWBBS2d/ppSEefUxLVwuoHMnYH0ZcHGTANBgkqhkiG9w0BAQsFAAOCAQEASvj4sAPmLGd75JR3
+Y8xuTPl9Dg3cyLk1uXBPY/ok+myDjEedO2Pzmvl2MpWRsXe8rJq+seQxIcaBlVZaDrHC1LGmWazx
+Y8u4TB1ZkErvkBYoH1quEPuBUDgMbMzxPcP1Y+Oz4yHJJDnp/RVmRvQbEdBNc6N9Rvk97ahfYtTx
+P/jgdFcrGJ2BtMQo2pSXpXDrrB2+BxHw1dvd5Yzw1TKwg+ZX4o+/vqGqvz0dtdQ46tewXDpPaj+P
+wGZsY6rp2aQW9IHRlRQOfc2VNNnSj3BzgXucfr2YYdhFh5iQxeuGMMY1v/D/w1WIg0vvBZIGcfK4
+mJO37M2CYfE45k+XmCpajQ==
+-----END CERTIFICATE-----
+
+VeriSign Class 3 Public Primary Certification Authority - G4
+============================================================
+-----BEGIN CERTIFICATE-----
+MIIDhDCCAwqgAwIBAgIQL4D+I4wOIg9IZxIokYesszAKBggqhkjOPQQDAzCByjELMAkGA1UEBhMC
+VVMxFzAVBgNVBAoTDlZlcmlTaWduLCBJbmMuMR8wHQYDVQQLExZWZXJpU2lnbiBUcnVzdCBOZXR3
+b3JrMTowOAYDVQQLEzEoYykgMjAwNyBWZXJpU2lnbiwgSW5jLiAtIEZvciBhdXRob3JpemVkIHVz
+ZSBvbmx5MUUwQwYDVQQDEzxWZXJpU2lnbiBDbGFzcyAzIFB1YmxpYyBQcmltYXJ5IENlcnRpZmlj
+YXRpb24gQXV0aG9yaXR5IC0gRzQwHhcNMDcxMTA1MDAwMDAwWhcNMzgwMTE4MjM1OTU5WjCByjEL
+MAkGA1UEBhMCVVMxFzAVBgNVBAoTDlZlcmlTaWduLCBJbmMuMR8wHQYDVQQLExZWZXJpU2lnbiBU
+cnVzdCBOZXR3b3JrMTowOAYDVQQLEzEoYykgMjAwNyBWZXJpU2lnbiwgSW5jLiAtIEZvciBhdXRo
+b3JpemVkIHVzZSBvbmx5MUUwQwYDVQQDEzxWZXJpU2lnbiBDbGFzcyAzIFB1YmxpYyBQcmltYXJ5
+IENlcnRpZmljYXRpb24gQXV0aG9yaXR5IC0gRzQwdjAQBgcqhkjOPQIBBgUrgQQAIgNiAASnVnp8
+Utpkmw4tXNherJI9/gHmGUo9FANL+mAnINmDiWn6VMaaGF5VKmTeBvaNSjutEDxlPZCIBIngMGGz
+rl0Bp3vefLK+ymVhAIau2o970ImtTR1ZmkGxvEeA3J5iw/mjgbIwga8wDwYDVR0TAQH/BAUwAwEB
+/zAOBgNVHQ8BAf8EBAMCAQYwbQYIKwYBBQUHAQwEYTBfoV2gWzBZMFcwVRYJaW1hZ2UvZ2lmMCEw
+HzAHBgUrDgMCGgQUj+XTGoasjY5rw8+AatRIGCx7GS4wJRYjaHR0cDovL2xvZ28udmVyaXNpZ24u
+Y29tL3ZzbG9nby5naWYwHQYDVR0OBBYEFLMWkf3upm7ktS5Jj4d4gYDs5bG1MAoGCCqGSM49BAMD
+A2gAMGUCMGYhDBgmYFo4e1ZC4Kf8NoRRkSAsdk1DPcQdhCPQrNZ8NQbOzWm9kA3bbEhCHQ6qQgIx
+AJw9SDkjOVgaFRJZap7v1VmyHVIsmXHNxynfGyphe3HR3vPA5Q06Sqotp9iGKt0uEA==
+-----END CERTIFICATE-----
+
+NetLock Arany (Class Gold) Főtanúsítvány
+========================================
+-----BEGIN CERTIFICATE-----
+MIIEFTCCAv2gAwIBAgIGSUEs5AAQMA0GCSqGSIb3DQEBCwUAMIGnMQswCQYDVQQGEwJIVTERMA8G
+A1UEBwwIQnVkYXBlc3QxFTATBgNVBAoMDE5ldExvY2sgS2Z0LjE3MDUGA1UECwwuVGFuw7pzw610
+dsOhbnlraWFkw7NrIChDZXJ0aWZpY2F0aW9uIFNlcnZpY2VzKTE1MDMGA1UEAwwsTmV0TG9jayBB
+cmFueSAoQ2xhc3MgR29sZCkgRsWRdGFuw7pzw610dsOhbnkwHhcNMDgxMjExMTUwODIxWhcNMjgx
+MjA2MTUwODIxWjCBpzELMAkGA1UEBhMCSFUxETAPBgNVBAcMCEJ1ZGFwZXN0MRUwEwYDVQQKDAxO
+ZXRMb2NrIEtmdC4xNzA1BgNVBAsMLlRhbsO6c8OtdHbDoW55a2lhZMOzayAoQ2VydGlmaWNhdGlv
+biBTZXJ2aWNlcykxNTAzBgNVBAMMLE5ldExvY2sgQXJhbnkgKENsYXNzIEdvbGQpIEbFkXRhbsO6
+c8OtdHbDoW55MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAxCRec75LbRTDofTjl5Bu
+0jBFHjzuZ9lk4BqKf8owyoPjIMHj9DrTlF8afFttvzBPhCf2nx9JvMaZCpDyD/V/Q4Q3Y1GLeqVw
+/HpYzY6b7cNGbIRwXdrzAZAj/E4wqX7hJ2Pn7WQ8oLjJM2P+FpD/sLj916jAwJRDC7bVWaaeVtAk
+H3B5r9s5VA1lddkVQZQBr17s9o3x/61k/iCa11zr/qYfCGSji3ZVrR47KGAuhyXoqq8fxmRGILdw
+fzzeSNuWU7c5d+Qa4scWhHaXWy+7GRWF+GmF9ZmnqfI0p6m2pgP8b4Y9VHx2BJtr+UBdADTHLpl1
+neWIA6pN+APSQnbAGwIDAKiLo0UwQzASBgNVHRMBAf8ECDAGAQH/AgEEMA4GA1UdDwEB/wQEAwIB
+BjAdBgNVHQ4EFgQUzPpnk/C2uNClwB7zU/2MU9+D15YwDQYJKoZIhvcNAQELBQADggEBAKt/7hwW
+qZw8UQCgwBEIBaeZ5m8BiFRhbvG5GK1Krf6BQCOUL/t1fC8oS2IkgYIL9WHxHG64YTjrgfpioTta
+YtOUZcTh5m2C+C8lcLIhJsFyUR+MLMOEkMNaj7rP9KdlpeuY0fsFskZ1FSNqb4VjMIDw1Z4fKRzC
+bLBQWV2QWzuoDTDPv31/zvGdg73JRm4gpvlhUbohL3u+pRVjodSVh/GeufOJ8z2FuLjbvrW5Kfna
+NwUASZQDhETnv0Mxz3WLJdH0pmT1kvarBes96aULNmLazAZfNou2XjG4Kvte9nHfRCaexOYNkbQu
+dZWAUWpLMKawYqGT8ZvYzsRjdT9ZR7E=
+-----END CERTIFICATE-----
+
+Staat der Nederlanden Root CA - G2
+==================================
+-----BEGIN CERTIFICATE-----
+MIIFyjCCA7KgAwIBAgIEAJiWjDANBgkqhkiG9w0BAQsFADBaMQswCQYDVQQGEwJOTDEeMBwGA1UE
+CgwVU3RhYXQgZGVyIE5lZGVybGFuZGVuMSswKQYDVQQDDCJTdGFhdCBkZXIgTmVkZXJsYW5kZW4g
+Um9vdCBDQSAtIEcyMB4XDTA4MDMyNjExMTgxN1oXDTIwMDMyNTExMDMxMFowWjELMAkGA1UEBhMC
+TkwxHjAcBgNVBAoMFVN0YWF0IGRlciBOZWRlcmxhbmRlbjErMCkGA1UEAwwiU3RhYXQgZGVyIE5l
+ZGVybGFuZGVuIFJvb3QgQ0EgLSBHMjCCAiIwDQYJKoZIhvcNAQEBBQADggIPADCCAgoCggIBAMVZ
+5291qj5LnLW4rJ4L5PnZyqtdj7U5EILXr1HgO+EASGrP2uEGQxGZqhQlEq0i6ABtQ8SpuOUfiUtn
+vWFI7/3S4GCI5bkYYCjDdyutsDeqN95kWSpGV+RLufg3fNU254DBtvPUZ5uW6M7XxgpT0GtJlvOj
+CwV3SPcl5XCsMBQgJeN/dVrlSPhOewMHBPqCYYdu8DvEpMfQ9XQ+pV0aCPKbJdL2rAQmPlU6Yiil
+e7Iwr/g3wtG61jj99O9JMDeZJiFIhQGp5Rbn3JBV3w/oOM2ZNyFPXfUib2rFEhZgF1XyZWampzCR
+OME4HYYEhLoaJXhena/MUGDWE4dS7WMfbWV9whUYdMrhfmQpjHLYFhN9C0lK8SgbIHRrxT3dsKpI
+CT0ugpTNGmXZK4iambwYfp/ufWZ8Pr2UuIHOzZgweMFvZ9C+X+Bo7d7iscksWXiSqt8rYGPy5V65
+48r6f1CGPqI0GAwJaCgRHOThuVw+R7oyPxjMW4T182t0xHJ04eOLoEq9jWYv6q012iDTiIJh8BIi
+trzQ1aTsr1SIJSQ8p22xcik/Plemf1WvbibG/ufMQFxRRIEKeN5KzlW/HdXZt1bv8Hb/C3m1r737
+qWmRRpdogBQ2HbN/uymYNqUg+oJgYjOk7Na6B6duxc8UpufWkjTYgfX8HV2qXB72o007uPc5AgMB
+AAGjgZcwgZQwDwYDVR0TAQH/BAUwAwEB/zBSBgNVHSAESzBJMEcGBFUdIAAwPzA9BggrBgEFBQcC
+ARYxaHR0cDovL3d3dy5wa2lvdmVyaGVpZC5ubC9wb2xpY2llcy9yb290LXBvbGljeS1HMjAOBgNV
+HQ8BAf8EBAMCAQYwHQYDVR0OBBYEFJFoMocVHYnitfGsNig0jQt8YojrMA0GCSqGSIb3DQEBCwUA
+A4ICAQCoQUpnKpKBglBu4dfYszk78wIVCVBR7y29JHuIhjv5tLySCZa59sCrI2AGeYwRTlHSeYAz
++51IvuxBQ4EffkdAHOV6CMqqi3WtFMTC6GY8ggen5ieCWxjmD27ZUD6KQhgpxrRW/FYQoAUXvQwj
+f/ST7ZwaUb7dRUG/kSS0H4zpX897IZmflZ85OkYcbPnNe5yQzSipx6lVu6xiNGI1E0sUOlWDuYaN
+kqbG9AclVMwWVxJKgnjIFNkXgiYtXSAfea7+1HAWFpWD2DU5/1JddRwWxRNVz0fMdWVSSt7wsKfk
+CpYL+63C4iWEst3kvX5ZbJvw8NjnyvLplzh+ib7M+zkXYT9y2zqR2GUBGR2tUKRXCnxLvJxxcypF
+URmFzI79R6d0lR2o0a9OF7FpJsKqeFdbxU2n5Z4FF5TKsl+gSRiNNOkmbEgeqmiSBeGCc1qb3Adb
+CG19ndeNIdn8FCCqwkXfP+cAslHkwvgFuXkajDTznlvkN1trSt8sV4pAWja63XVECDdCcAz+3F4h
+oKOKwJCcaNpQ5kUQR3i2TtJlycM33+FCY7BXN0Ute4qcvwXqZVUz9zkQxSgqIXobisQk+T8VyJoV
+IPVVYpbtbZNQvOSqeK3Zywplh6ZmwcSBo3c6WB4L7oOLnR7SUqTMHW+wmG2UMbX4cQrcufx9MmDm
+66+KAQ==
+-----END CERTIFICATE-----
+
+Hongkong Post Root CA 1
+=======================
+-----BEGIN CERTIFICATE-----
+MIIDMDCCAhigAwIBAgICA+gwDQYJKoZIhvcNAQEFBQAwRzELMAkGA1UEBhMCSEsxFjAUBgNVBAoT
+DUhvbmdrb25nIFBvc3QxIDAeBgNVBAMTF0hvbmdrb25nIFBvc3QgUm9vdCBDQSAxMB4XDTAzMDUx
+NTA1MTMxNFoXDTIzMDUxNTA0NTIyOVowRzELMAkGA1UEBhMCSEsxFjAUBgNVBAoTDUhvbmdrb25n
+IFBvc3QxIDAeBgNVBAMTF0hvbmdrb25nIFBvc3QgUm9vdCBDQSAxMIIBIjANBgkqhkiG9w0BAQEF
+AAOCAQ8AMIIBCgKCAQEArP84tulmAknjorThkPlAj3n54r15/gK97iSSHSL22oVyaf7XPwnU3ZG1
+ApzQjVrhVcNQhrkpJsLj2aDxaQMoIIBFIi1WpztUlVYiWR8o3x8gPW2iNr4joLFutbEnPzlTCeqr
+auh0ssJlXI6/fMN4hM2eFvz1Lk8gKgifd/PFHsSaUmYeSF7jEAaPIpjhZY4bXSNmO7ilMlHIhqqh
+qZ5/dpTCpmy3QfDVyAY45tQM4vM7TG1QjMSDJ8EThFk9nnV0ttgCXjqQesBCNnLsak3c78QA3xMY
+V18meMjWCnl3v/evt3a5pQuEF10Q6m/hq5URX208o1xNg1vysxmKgIsLhwIDAQABoyYwJDASBgNV
+HRMBAf8ECDAGAQH/AgEDMA4GA1UdDwEB/wQEAwIBxjANBgkqhkiG9w0BAQUFAAOCAQEADkbVPK7i
+h9legYsCmEEIjEy82tvuJxuC52pF7BaLT4Wg87JwvVqWuspube5Gi27nKi6Wsxkz67SfqLI37pio
+l7Yutmcn1KZJ/RyTZXaeQi/cImyaT/JaFTmxcdcrUehtHJjA2Sr0oYJ71clBoiMBdDhViw+5Lmei
+IAQ32pwL0xch4I+XeTRvhEgCIDMb5jREn5Fw9IBehEPCKdJsEhTkYY2sEJCehFC78JZvRZ+K88ps
+T/oROhUVRsPNH4NbLUES7VBnQRM9IauUiqpOfMGx+6fWtScvl6tu4B3i0RwsH0Ti/L6RoZz71ilT
+c4afU9hDDl3WY4JxHYB0yvbiAmvZWg==
+-----END CERTIFICATE-----
+
+SecureSign RootCA11
+===================
+-----BEGIN CERTIFICATE-----
+MIIDbTCCAlWgAwIBAgIBATANBgkqhkiG9w0BAQUFADBYMQswCQYDVQQGEwJKUDErMCkGA1UEChMi
+SmFwYW4gQ2VydGlmaWNhdGlvbiBTZXJ2aWNlcywgSW5jLjEcMBoGA1UEAxMTU2VjdXJlU2lnbiBS
+b290Q0ExMTAeFw0wOTA0MDgwNDU2NDdaFw0yOTA0MDgwNDU2NDdaMFgxCzAJBgNVBAYTAkpQMSsw
+KQYDVQQKEyJKYXBhbiBDZXJ0aWZpY2F0aW9uIFNlcnZpY2VzLCBJbmMuMRwwGgYDVQQDExNTZWN1
+cmVTaWduIFJvb3RDQTExMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEA/XeqpRyQBTvL
+TJszi1oURaTnkBbR31fSIRCkF/3frNYfp+TbfPfs37gD2pRY/V1yfIw/XwFndBWW4wI8h9uuywGO
+wvNmxoVF9ALGOrVisq/6nL+k5tSAMJjzDbaTj6nU2DbysPyKyiyhFTOVMdrAG/LuYpmGYz+/3ZMq
+g6h2uRMft85OQoWPIucuGvKVCbIFtUROd6EgvanyTgp9UK31BQ1FT0Zx/Sg+U/sE2C3XZR1KG/rP
+O7AxmjVuyIsG0wCR8pQIZUyxNAYAeoni8McDWc/V1uinMrPmmECGxc0nEovMe863ETxiYAcjPitA
+bpSACW22s293bzUIUPsCh8U+iQIDAQABo0IwQDAdBgNVHQ4EFgQUW/hNT7KlhtQ60vFjmqC+CfZX
+t94wDgYDVR0PAQH/BAQDAgEGMA8GA1UdEwEB/wQFMAMBAf8wDQYJKoZIhvcNAQEFBQADggEBAKCh
+OBZmLqdWHyGcBvod7bkixTgm2E5P7KN/ed5GIaGHd48HCJqypMWvDzKYC3xmKbabfSVSSUOrTC4r
+bnpwrxYO4wJs+0LmGJ1F2FXI6Dvd5+H0LgscNFxsWEr7jIhQX5Ucv+2rIrVls4W6ng+4reV6G4pQ
+Oh29Dbx7VFALuUKvVaAYga1lme++5Jy/xIWrQbJUb9wlze144o4MjQlJ3WN7WmmWAiGovVJZ6X01
+y8hSyn+B/tlr0/cR7SXf+Of5pPpyl4RTDaXQMhhRdlkUbA/r7F+AjHVDg8OFmP9Mni0N5HeDk061
+lgeLKBObjBmNQSdJQO7e5iNEOdyhIta6A/I=
+-----END CERTIFICATE-----
+
+ACEDICOM Root
+=============
+-----BEGIN CERTIFICATE-----
+MIIFtTCCA52gAwIBAgIIYY3HhjsBggUwDQYJKoZIhvcNAQEFBQAwRDEWMBQGA1UEAwwNQUNFRElD
+T00gUm9vdDEMMAoGA1UECwwDUEtJMQ8wDQYDVQQKDAZFRElDT00xCzAJBgNVBAYTAkVTMB4XDTA4
+MDQxODE2MjQyMloXDTI4MDQxMzE2MjQyMlowRDEWMBQGA1UEAwwNQUNFRElDT00gUm9vdDEMMAoG
+A1UECwwDUEtJMQ8wDQYDVQQKDAZFRElDT00xCzAJBgNVBAYTAkVTMIICIjANBgkqhkiG9w0BAQEF
+AAOCAg8AMIICCgKCAgEA/5KV4WgGdrQsyFhIyv2AVClVYyT/kGWbEHV7w2rbYgIB8hiGtXxaOLHk
+WLn709gtn70yN78sFW2+tfQh0hOR2QetAQXW8713zl9CgQr5auODAKgrLlUTY4HKRxx7XBZXehuD
+YAQ6PmXDzQHe3qTWDLqO3tkE7hdWIpuPY/1NFgu3e3eM+SW10W2ZEi5PGrjm6gSSrj0RuVFCPYew
+MYWveVqc/udOXpJPQ/yrOq2lEiZmueIM15jO1FillUAKt0SdE3QrwqXrIhWYENiLxQSfHY9g5QYb
+m8+5eaA9oiM/Qj9r+hwDezCNzmzAv+YbX79nuIQZ1RXve8uQNjFiybwCq0Zfm/4aaJQ0PZCOrfbk
+HQl/Sog4P75n/TSW9R28MHTLOO7VbKvU/PQAtwBbhTIWdjPp2KOZnQUAqhbm84F9b32qhm2tFXTT
+xKJxqvQUfecyuB+81fFOvW8XAjnXDpVCOscAPukmYxHqC9FK/xidstd7LzrZlvvoHpKuE1XI2Sf2
+3EgbsCTBheN3nZqk8wwRHQ3ItBTutYJXCb8gWH8vIiPYcMt5bMlL8qkqyPyHK9caUPgn6C9D4zq9
+2Fdx/c6mUlv53U3t5fZvie27k5x2IXXwkkwp9y+cAS7+UEaeZAwUswdbxcJzbPEHXEUkFDWug/Fq
+TYl6+rPYLWbwNof1K1MCAwEAAaOBqjCBpzAPBgNVHRMBAf8EBTADAQH/MB8GA1UdIwQYMBaAFKaz
+4SsrSbbXc6GqlPUB53NlTKxQMA4GA1UdDwEB/wQEAwIBhjAdBgNVHQ4EFgQUprPhKytJttdzoaqU
+9QHnc2VMrFAwRAYDVR0gBD0wOzA5BgRVHSAAMDEwLwYIKwYBBQUHAgEWI2h0dHA6Ly9hY2VkaWNv
+bS5lZGljb21ncm91cC5jb20vZG9jMA0GCSqGSIb3DQEBBQUAA4ICAQDOLAtSUWImfQwng4/F9tqg
+aHtPkl7qpHMyEVNEskTLnewPeUKzEKbHDZ3Ltvo/Onzqv4hTGzz3gvoFNTPhNahXwOf9jU8/kzJP
+eGYDdwdY6ZXIfj7QeQCM8htRM5u8lOk6e25SLTKeI6RF+7YuE7CLGLHdztUdp0J/Vb77W7tH1Pwk
+zQSulgUV1qzOMPPKC8W64iLgpq0i5ALudBF/TP94HTXa5gI06xgSYXcGCRZj6hitoocf8seACQl1
+ThCojz2GuHURwCRiipZ7SkXp7FnFvmuD5uHorLUwHv4FB4D54SMNUI8FmP8sX+g7tq3PgbUhh8oI
+KiMnMCArz+2UW6yyetLHKKGKC5tNSixthT8Jcjxn4tncB7rrZXtaAWPWkFtPF2Y9fwsZo5NjEFIq
+nxQWWOLcpfShFosOkYuByptZ+thrkQdlVV9SH686+5DdaaVbnG0OLLb6zqylfDJKZ0DcMDQj3dcE
+I2bw/FWAp/tmGYI1Z2JwOV5vx+qQQEQIHriy1tvuWacNGHk0vFQYXlPKNFHtRQrmjseCNj6nOGOp
+MCwXEGCSn1WHElkQwg9naRHMTh5+Spqtr0CodaxWkHS4oJyleW/c6RrIaQXpuvoDs3zk4E7Czp3o
+tkYNbn5XOmeUwssfnHdKZ05phkOTOPu220+DkdRgfks+KzgHVZhepA==
+-----END CERTIFICATE-----
+
+Microsec e-Szigno Root CA 2009
+==============================
+-----BEGIN CERTIFICATE-----
+MIIECjCCAvKgAwIBAgIJAMJ+QwRORz8ZMA0GCSqGSIb3DQEBCwUAMIGCMQswCQYDVQQGEwJIVTER
+MA8GA1UEBwwIQnVkYXBlc3QxFjAUBgNVBAoMDU1pY3Jvc2VjIEx0ZC4xJzAlBgNVBAMMHk1pY3Jv
+c2VjIGUtU3ppZ25vIFJvb3QgQ0EgMjAwOTEfMB0GCSqGSIb3DQEJARYQaW5mb0BlLXN6aWduby5o
+dTAeFw0wOTA2MTYxMTMwMThaFw0yOTEyMzAxMTMwMThaMIGCMQswCQYDVQQGEwJIVTERMA8GA1UE
+BwwIQnVkYXBlc3QxFjAUBgNVBAoMDU1pY3Jvc2VjIEx0ZC4xJzAlBgNVBAMMHk1pY3Jvc2VjIGUt
+U3ppZ25vIFJvb3QgQ0EgMjAwOTEfMB0GCSqGSIb3DQEJARYQaW5mb0BlLXN6aWduby5odTCCASIw
+DQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAOn4j/NjrdqG2KfgQvvPkd6mJviZpWNwrZuuyjNA
+fW2WbqEORO7hE52UQlKavXWFdCyoDh2Tthi3jCyoz/tccbna7P7ofo/kLx2yqHWH2Leh5TvPmUpG
+0IMZfcChEhyVbUr02MelTTMuhTlAdX4UfIASmFDHQWe4oIBhVKZsTh/gnQ4H6cm6M+f+wFUoLAKA
+pxn1ntxVUwOXewdI/5n7N4okxFnMUBBjjqqpGrCEGob5X7uxUG6k0QrM1XF+H6cbfPVTbiJfyyvm
+1HxdrtbCxkzlBQHZ7Vf8wSN5/PrIJIOV87VqUQHQd9bpEqH5GoP7ghu5sJf0dgYzQ0mg/wu1+rUC
+AwEAAaOBgDB+MA8GA1UdEwEB/wQFMAMBAf8wDgYDVR0PAQH/BAQDAgEGMB0GA1UdDgQWBBTLD8bf
+QkPMPcu1SCOhGnqmKrs0aDAfBgNVHSMEGDAWgBTLD8bfQkPMPcu1SCOhGnqmKrs0aDAbBgNVHREE
+FDASgRBpbmZvQGUtc3ppZ25vLmh1MA0GCSqGSIb3DQEBCwUAA4IBAQDJ0Q5eLtXMs3w+y/w9/w0o
+lZMEyL/azXm4Q5DwpL7v8u8hmLzU1F0G9u5C7DBsoKqpyvGvivo/C3NqPuouQH4frlRheesuCDfX
+I/OMn74dseGkddug4lQUsbocKaQY9hK6ohQU4zE1yED/t+AFdlfBHFny+L/k7SViXITwfn4fs775
+tyERzAMBVnCnEJIeGzSBHq2cGsMEPO0CYdYeBvNfOofyK/FFh+U9rNHHV4S9a67c2Pm2G2JwCz02
+yULyMtd6YebS2z3PyKnJm9zbWETXbzivf3jTo60adbocwTZ8jx5tHMN1Rq41Bab2XD0h7lbwyYIi
+LXpUq3DDfSJlgnCW
+-----END CERTIFICATE-----
+
+GlobalSign Root CA - R3
+=======================
+-----BEGIN CERTIFICATE-----
+MIIDXzCCAkegAwIBAgILBAAAAAABIVhTCKIwDQYJKoZIhvcNAQELBQAwTDEgMB4GA1UECxMXR2xv
+YmFsU2lnbiBSb290IENBIC0gUjMxEzARBgNVBAoTCkdsb2JhbFNpZ24xEzARBgNVBAMTCkdsb2Jh
+bFNpZ24wHhcNMDkwMzE4MTAwMDAwWhcNMjkwMzE4MTAwMDAwWjBMMSAwHgYDVQQLExdHbG9iYWxT
+aWduIFJvb3QgQ0EgLSBSMzETMBEGA1UEChMKR2xvYmFsU2lnbjETMBEGA1UEAxMKR2xvYmFsU2ln
+bjCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAMwldpB5BngiFvXAg7aEyiie/QV2EcWt
+iHL8RgJDx7KKnQRfJMsuS+FggkbhUqsMgUdwbN1k0ev1LKMPgj0MK66X17YUhhB5uzsTgHeMCOFJ
+0mpiLx9e+pZo34knlTifBtc+ycsmWQ1z3rDI6SYOgxXG71uL0gRgykmmKPZpO/bLyCiR5Z2KYVc3
+rHQU3HTgOu5yLy6c+9C7v/U9AOEGM+iCK65TpjoWc4zdQQ4gOsC0p6Hpsk+QLjJg6VfLuQSSaGjl
+OCZgdbKfd/+RFO+uIEn8rUAVSNECMWEZXriX7613t2Saer9fwRPvm2L7DWzgVGkWqQPabumDk3F2
+xmmFghcCAwEAAaNCMEAwDgYDVR0PAQH/BAQDAgEGMA8GA1UdEwEB/wQFMAMBAf8wHQYDVR0OBBYE
+FI/wS3+oLkUkrk1Q+mOai97i3Ru8MA0GCSqGSIb3DQEBCwUAA4IBAQBLQNvAUKr+yAzv95ZURUm7
+lgAJQayzE4aGKAczymvmdLm6AC2upArT9fHxD4q/c2dKg8dEe3jgr25sbwMpjjM5RcOO5LlXbKr8
+EpbsU8Yt5CRsuZRj+9xTaGdWPoO4zzUhw8lo/s7awlOqzJCK6fBdRoyV3XpYKBovHd7NADdBj+1E
+bddTKJd+82cEHhXXipa0095MJ6RMG3NzdvQXmcIfeg7jLQitChws/zyrVQ4PkX4268NXSb7hLi18
+YIvDQVETI53O9zJrlAGomecsMx86OyXShkDOOyyGeMlhLxS67ttVb9+E7gUJTb0o2HLO02JQZR7r
+kpeDMdmztcpHWD9f
+-----END CERTIFICATE-----
+
+Autoridad de Certificacion Firmaprofesional CIF A62634068
+=========================================================
+-----BEGIN CERTIFICATE-----
+MIIGFDCCA/ygAwIBAgIIU+w77vuySF8wDQYJKoZIhvcNAQEFBQAwUTELMAkGA1UEBhMCRVMxQjBA
+BgNVBAMMOUF1dG9yaWRhZCBkZSBDZXJ0aWZpY2FjaW9uIEZpcm1hcHJvZmVzaW9uYWwgQ0lGIEE2
+MjYzNDA2ODAeFw0wOTA1MjAwODM4MTVaFw0zMDEyMzEwODM4MTVaMFExCzAJBgNVBAYTAkVTMUIw
+QAYDVQQDDDlBdXRvcmlkYWQgZGUgQ2VydGlmaWNhY2lvbiBGaXJtYXByb2Zlc2lvbmFsIENJRiBB
+NjI2MzQwNjgwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQDKlmuO6vj78aI14H9M2uDD
+Utd9thDIAl6zQyrET2qyyhxdKJp4ERppWVevtSBC5IsP5t9bpgOSL/UR5GLXMnE42QQMcas9UX4P
+B99jBVzpv5RvwSmCwLTaUbDBPLutN0pcyvFLNg4kq7/DhHf9qFD0sefGL9ItWY16Ck6WaVICqjaY
+7Pz6FIMMNx/Jkjd/14Et5cS54D40/mf0PmbR0/RAz15iNA9wBj4gGFrO93IbJWyTdBSTo3OxDqqH
+ECNZXyAFGUftaI6SEspd/NYrspI8IM/hX68gvqB2f3bl7BqGYTM+53u0P6APjqK5am+5hyZvQWyI
+plD9amML9ZMWGxmPsu2bm8mQ9QEM3xk9Dz44I8kvjwzRAv4bVdZO0I08r0+k8/6vKtMFnXkIoctX
+MbScyJCyZ/QYFpM6/EfY0XiWMR+6KwxfXZmtY4laJCB22N/9q06mIqqdXuYnin1oKaPnirjaEbsX
+LZmdEyRG98Xi2J+Of8ePdG1asuhy9azuJBCtLxTa/y2aRnFHvkLfuwHb9H/TKI8xWVvTyQKmtFLK
+bpf7Q8UIJm+K9Lv9nyiqDdVF8xM6HdjAeI9BZzwelGSuewvF6NkBiDkal4ZkQdU7hwxu+g/GvUgU
+vzlN1J5Bto+WHWOWk9mVBngxaJ43BjuAiUVhOSPHG0SjFeUc+JIwuwIDAQABo4HvMIHsMBIGA1Ud
+EwEB/wQIMAYBAf8CAQEwDgYDVR0PAQH/BAQDAgEGMB0GA1UdDgQWBBRlzeurNR4APn7VdMActHNH
+DhpkLzCBpgYDVR0gBIGeMIGbMIGYBgRVHSAAMIGPMC8GCCsGAQUFBwIBFiNodHRwOi8vd3d3LmZp
+cm1hcHJvZmVzaW9uYWwuY29tL2NwczBcBggrBgEFBQcCAjBQHk4AUABhAHMAZQBvACAAZABlACAA
+bABhACAAQgBvAG4AYQBuAG8AdgBhACAANAA3ACAAQgBhAHIAYwBlAGwAbwBuAGEAIAAwADgAMAAx
+ADcwDQYJKoZIhvcNAQEFBQADggIBABd9oPm03cXF661LJLWhAqvdpYhKsg9VSytXjDvlMd3+xDLx
+51tkljYyGOylMnfX40S2wBEqgLk9am58m9Ot/MPWo+ZkKXzR4Tgegiv/J2Wv+xYVxC5xhOW1//qk
+R71kMrv2JYSiJ0L1ILDCExARzRAVukKQKtJE4ZYm6zFIEv0q2skGz3QeqUvVhyj5eTSSPi5E6PaP
+T481PyWzOdxjKpBrIF/EUhJOlywqrJ2X3kjyo2bbwtKDlaZmp54lD+kLM5FlClrD2VQS3a/DTg4f
+Jl4N3LON7NWBcN7STyQF82xO9UxJZo3R/9ILJUFI/lGExkKvgATP0H5kSeTy36LssUzAKh3ntLFl
+osS88Zj0qnAHY7S42jtM+kAiMFsRpvAFDsYCA0irhpuF3dvd6qJ2gHN99ZwExEWN57kci57q13XR
+crHedUTnQn3iV2t93Jm8PYMo6oCTjcVMZcFwgbg4/EMxsvYDNEeyrPsiBsse3RdHHF9mudMaotoR
+saS8I8nkvof/uZS2+F0gStRf571oe2XyFR7SOqkt6dhrJKyXWERHrVkY8SFlcN7ONGCoQPHzPKTD
+KCOM/iczQ0CgFzzr6juwcqajuUpLXhZI9LK8yIySxZ2frHI2vDSANGupi5LAuBft7HZT9SQBjLMi
+6Et8Vcad+qMUu2WFbm5PEn4KPJ2V
+-----END CERTIFICATE-----
+
+Izenpe.com
+==========
+-----BEGIN CERTIFICATE-----
+MIIF8TCCA9mgAwIBAgIQALC3WhZIX7/hy/WL1xnmfTANBgkqhkiG9w0BAQsFADA4MQswCQYDVQQG
+EwJFUzEUMBIGA1UECgwLSVpFTlBFIFMuQS4xEzARBgNVBAMMCkl6ZW5wZS5jb20wHhcNMDcxMjEz
+MTMwODI4WhcNMzcxMjEzMDgyNzI1WjA4MQswCQYDVQQGEwJFUzEUMBIGA1UECgwLSVpFTlBFIFMu
+QS4xEzARBgNVBAMMCkl6ZW5wZS5jb20wggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQDJ
+03rKDx6sp4boFmVqscIbRTJxldn+EFvMr+eleQGPicPK8lVx93e+d5TzcqQsRNiekpsUOqHnJJAK
+ClaOxdgmlOHZSOEtPtoKct2jmRXagaKH9HtuJneJWK3W6wyyQXpzbm3benhB6QiIEn6HLmYRY2xU
++zydcsC8Lv/Ct90NduM61/e0aL6i9eOBbsFGb12N4E3GVFWJGjMxCrFXuaOKmMPsOzTFlUFpfnXC
+PCDFYbpRR6AgkJOhkEvzTnyFRVSa0QUmQbC1TR0zvsQDyCV8wXDbO/QJLVQnSKwv4cSsPsjLkkxT
+OTcj7NMB+eAJRE1NZMDhDVqHIrytG6P+JrUV86f8hBnp7KGItERphIPzidF0BqnMC9bC3ieFUCbK
+F7jJeodWLBoBHmy+E60QrLUk9TiRodZL2vG70t5HtfG8gfZZa88ZU+mNFctKy6lvROUbQc/hhqfK
+0GqfvEyNBjNaooXlkDWgYlwWTvDjovoDGrQscbNYLN57C9saD+veIR8GdwYDsMnvmfzAuU8Lhij+
+0rnq49qlw0dpEuDb8PYZi+17cNcC1u2HGCgsBCRMd+RIihrGO5rUD8r6ddIBQFqNeb+Lz0vPqhbB
+leStTIo+F5HUsWLlguWABKQDfo2/2n+iD5dPDNMN+9fR5XJ+HMh3/1uaD7euBUbl8agW7EekFwID
+AQABo4H2MIHzMIGwBgNVHREEgagwgaWBD2luZm9AaXplbnBlLmNvbaSBkTCBjjFHMEUGA1UECgw+
+SVpFTlBFIFMuQS4gLSBDSUYgQTAxMzM3MjYwLVJNZXJjLlZpdG9yaWEtR2FzdGVpeiBUMTA1NSBG
+NjIgUzgxQzBBBgNVBAkMOkF2ZGEgZGVsIE1lZGl0ZXJyYW5lbyBFdG9yYmlkZWEgMTQgLSAwMTAx
+MCBWaXRvcmlhLUdhc3RlaXowDwYDVR0TAQH/BAUwAwEB/zAOBgNVHQ8BAf8EBAMCAQYwHQYDVR0O
+BBYEFB0cZQ6o8iV7tJHP5LGx5r1VdGwFMA0GCSqGSIb3DQEBCwUAA4ICAQB4pgwWSp9MiDrAyw6l
+Fn2fuUhfGI8NYjb2zRlrrKvV9pF9rnHzP7MOeIWblaQnIUdCSnxIOvVFfLMMjlF4rJUT3sb9fbga
+kEyrkgPH7UIBzg/YsfqikuFgba56awmqxinuaElnMIAkejEWOVt+8Rwu3WwJrfIxwYJOubv5vr8q
+hT/AQKM6WfxZSzwoJNu0FXWuDYi6LnPAvViH5ULy617uHjAimcs30cQhbIHsvm0m5hzkQiCeR7Cs
+g1lwLDXWrzY0tM07+DKo7+N4ifuNRSzanLh+QBxh5z6ikixL8s36mLYp//Pye6kfLqCTVyvehQP5
+aTfLnnhqBbTFMXiJ7HqnheG5ezzevh55hM6fcA5ZwjUukCox2eRFekGkLhObNA5me0mrZJfQRsN5
+nXJQY6aYWwa9SG3YOYNw6DXwBdGqvOPbyALqfP2C2sJbUjWumDqtujWTI6cfSN01RpiyEGjkpTHC
+ClguGYEQyVB1/OpaFs4R1+7vUIgtYf8/QnMFlEPVjjxOAToZpR9GTnfQXeWBIiGH/pR9hNiTrdZo
+Q0iy2+tzJOeRf1SktoA+naM8THLCV8Sg1Mw4J87VBp6iSNnpn86CcDaTmjvfliHjWbcM2pE38P1Z
+WrOZyGlsQyYBNWNgVYkDOnXYukrZVP/u3oDYLdE41V4tC5h9Pmzb/CaIxw==
+-----END CERTIFICATE-----
+
+Chambers of Commerce Root - 2008
+================================
+-----BEGIN CERTIFICATE-----
+MIIHTzCCBTegAwIBAgIJAKPaQn6ksa7aMA0GCSqGSIb3DQEBBQUAMIGuMQswCQYDVQQGEwJFVTFD
+MEEGA1UEBxM6TWFkcmlkIChzZWUgY3VycmVudCBhZGRyZXNzIGF0IHd3dy5jYW1lcmZpcm1hLmNv
+bS9hZGRyZXNzKTESMBAGA1UEBRMJQTgyNzQzMjg3MRswGQYDVQQKExJBQyBDYW1lcmZpcm1hIFMu
+QS4xKTAnBgNVBAMTIENoYW1iZXJzIG9mIENvbW1lcmNlIFJvb3QgLSAyMDA4MB4XDTA4MDgwMTEy
+Mjk1MFoXDTM4MDczMTEyMjk1MFowga4xCzAJBgNVBAYTAkVVMUMwQQYDVQQHEzpNYWRyaWQgKHNl
+ZSBjdXJyZW50IGFkZHJlc3MgYXQgd3d3LmNhbWVyZmlybWEuY29tL2FkZHJlc3MpMRIwEAYDVQQF
+EwlBODI3NDMyODcxGzAZBgNVBAoTEkFDIENhbWVyZmlybWEgUy5BLjEpMCcGA1UEAxMgQ2hhbWJl
+cnMgb2YgQ29tbWVyY2UgUm9vdCAtIDIwMDgwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoIC
+AQCvAMtwNyuAWko6bHiUfaN/Gh/2NdW928sNRHI+JrKQUrpjOyhYb6WzbZSm891kDFX29ufyIiKA
+XuFixrYp4YFs8r/lfTJqVKAyGVn+H4vXPWCGhSRv4xGzdz4gljUha7MI2XAuZPeEklPWDrCQiorj
+h40G072QDuKZoRuGDtqaCrsLYVAGUvGef3bsyw/QHg3PmTA9HMRFEFis1tPo1+XqxQEHd9ZR5gN/
+ikilTWh1uem8nk4ZcfUyS5xtYBkL+8ydddy/Js2Pk3g5eXNeJQ7KXOt3EgfLZEFHcpOrUMPrCXZk
+NNI5t3YRCQ12RcSprj1qr7V9ZS+UWBDsXHyvfuK2GNnQm05aSd+pZgvMPMZ4fKecHePOjlO+Bd5g
+D2vlGts/4+EhySnB8esHnFIbAURRPHsl18TlUlRdJQfKFiC4reRB7noI/plvg6aRArBsNlVq5331
+lubKgdaX8ZSD6e2wsWsSaR6s+12pxZjptFtYer49okQ6Y1nUCyXeG0+95QGezdIp1Z8XGQpvvwyQ
+0wlf2eOKNcx5Wk0ZN5K3xMGtr/R5JJqyAQuxr1yW84Ay+1w9mPGgP0revq+ULtlVmhduYJ1jbLhj
+ya6BXBg14JC7vjxPNyK5fuvPnnchpj04gftI2jE9K+OJ9dC1vX7gUMQSibMjmhAxhduub+84Mxh2
+EQIDAQABo4IBbDCCAWgwEgYDVR0TAQH/BAgwBgEB/wIBDDAdBgNVHQ4EFgQU+SSsD7K1+HnA+mCI
+G8TZTQKeFxkwgeMGA1UdIwSB2zCB2IAU+SSsD7K1+HnA+mCIG8TZTQKeFxmhgbSkgbEwga4xCzAJ
+BgNVBAYTAkVVMUMwQQYDVQQHEzpNYWRyaWQgKHNlZSBjdXJyZW50IGFkZHJlc3MgYXQgd3d3LmNh
+bWVyZmlybWEuY29tL2FkZHJlc3MpMRIwEAYDVQQFEwlBODI3NDMyODcxGzAZBgNVBAoTEkFDIENh
+bWVyZmlybWEgUy5BLjEpMCcGA1UEAxMgQ2hhbWJlcnMgb2YgQ29tbWVyY2UgUm9vdCAtIDIwMDiC
+CQCj2kJ+pLGu2jAOBgNVHQ8BAf8EBAMCAQYwPQYDVR0gBDYwNDAyBgRVHSAAMCowKAYIKwYBBQUH
+AgEWHGh0dHA6Ly9wb2xpY3kuY2FtZXJmaXJtYS5jb20wDQYJKoZIhvcNAQEFBQADggIBAJASryI1
+wqM58C7e6bXpeHxIvj99RZJe6dqxGfwWPJ+0W2aeaufDuV2I6A+tzyMP3iU6XsxPpcG1Lawk0lgH
+3qLPaYRgM+gQDROpI9CF5Y57pp49chNyM/WqfcZjHwj0/gF/JM8rLFQJ3uIrbZLGOU8W6jx+ekbU
+RWpGqOt1glanq6B8aBMz9p0w8G8nOSQjKpD9kCk18pPfNKXG9/jvjA9iSnyu0/VU+I22mlaHFoI6
+M6taIgj3grrqLuBHmrS1RaMFO9ncLkVAO+rcf+g769HsJtg1pDDFOqxXnrN2pSB7+R5KBWIBpih1
+YJeSDW4+TTdDDZIVnBgizVGZoCkaPF+KMjNbMMeJL0eYD6MDxvbxrN8y8NmBGuScvfaAFPDRLLmF
+9dijscilIeUcE5fuDr3fKanvNFNb0+RqE4QGtjICxFKuItLcsiFCGtpA8CnJ7AoMXOLQusxI0zcK
+zBIKinmwPQN/aUv0NCB9szTqjktk9T79syNnFQ0EuPAtwQlRPLJsFfClI9eDdOTlLsn+mCdCxqvG
+nrDQWzilm1DefhiYtUU79nm06PcaewaD+9CL2rvHvRirCG88gGtAPxkZumWK5r7VXNM21+9AUiRg
+OGcEMeyP84LG3rlV8zsxkVrctQgVrXYlCg17LofiDKYGvCYQbTed7N14jHyAxfDZd0jQ
+-----END CERTIFICATE-----
+
+Global Chambersign Root - 2008
+==============================
+-----BEGIN CERTIFICATE-----
+MIIHSTCCBTGgAwIBAgIJAMnN0+nVfSPOMA0GCSqGSIb3DQEBBQUAMIGsMQswCQYDVQQGEwJFVTFD
+MEEGA1UEBxM6TWFkcmlkIChzZWUgY3VycmVudCBhZGRyZXNzIGF0IHd3dy5jYW1lcmZpcm1hLmNv
+bS9hZGRyZXNzKTESMBAGA1UEBRMJQTgyNzQzMjg3MRswGQYDVQQKExJBQyBDYW1lcmZpcm1hIFMu
+QS4xJzAlBgNVBAMTHkdsb2JhbCBDaGFtYmVyc2lnbiBSb290IC0gMjAwODAeFw0wODA4MDExMjMx
+NDBaFw0zODA3MzExMjMxNDBaMIGsMQswCQYDVQQGEwJFVTFDMEEGA1UEBxM6TWFkcmlkIChzZWUg
+Y3VycmVudCBhZGRyZXNzIGF0IHd3dy5jYW1lcmZpcm1hLmNvbS9hZGRyZXNzKTESMBAGA1UEBRMJ
+QTgyNzQzMjg3MRswGQYDVQQKExJBQyBDYW1lcmZpcm1hIFMuQS4xJzAlBgNVBAMTHkdsb2JhbCBD
+aGFtYmVyc2lnbiBSb290IC0gMjAwODCCAiIwDQYJKoZIhvcNAQEBBQADggIPADCCAgoCggIBAMDf
+VtPkOpt2RbQT2//BthmLN0EYlVJH6xedKYiONWwGMi5HYvNJBL99RDaxccy9Wglz1dmFRP+RVyXf
+XjaOcNFccUMd2drvXNL7G706tcuto8xEpw2uIRU/uXpbknXYpBI4iRmKt4DS4jJvVpyR1ogQC7N0
+ZJJ0YPP2zxhPYLIj0Mc7zmFLmY/CDNBAspjcDahOo7kKrmCgrUVSY7pmvWjg+b4aqIG7HkF4ddPB
+/gBVsIdU6CeQNR1MM62X/JcumIS/LMmjv9GYERTtY/jKmIhYF5ntRQOXfjyGHoiMvvKRhI9lNNgA
+TH23MRdaKXoKGCQwoze1eqkBfSbW+Q6OWfH9GzO1KTsXO0G2Id3UwD2ln58fQ1DJu7xsepeY7s2M
+H/ucUa6LcL0nn3HAa6x9kGbo1106DbDVwo3VyJ2dwW3Q0L9R5OP4wzg2rtandeavhENdk5IMagfe
+Ox2YItaswTXbo6Al/3K1dh3ebeksZixShNBFks4c5eUzHdwHU1SjqoI7mjcv3N2gZOnm3b2u/GSF
+HTynyQbehP9r6GsaPMWis0L7iwk+XwhSx2LE1AVxv8Rk5Pihg+g+EpuoHtQ2TS9x9o0o9oOpE9Jh
+wZG7SMA0j0GMS0zbaRL/UJScIINZc+18ofLx/d33SdNDWKBWY8o9PeU1VlnpDsogzCtLkykPAgMB
+AAGjggFqMIIBZjASBgNVHRMBAf8ECDAGAQH/AgEMMB0GA1UdDgQWBBS5CcqcHtvTbDprru1U8VuT
+BjUuXjCB4QYDVR0jBIHZMIHWgBS5CcqcHtvTbDprru1U8VuTBjUuXqGBsqSBrzCBrDELMAkGA1UE
+BhMCRVUxQzBBBgNVBAcTOk1hZHJpZCAoc2VlIGN1cnJlbnQgYWRkcmVzcyBhdCB3d3cuY2FtZXJm
+aXJtYS5jb20vYWRkcmVzcykxEjAQBgNVBAUTCUE4Mjc0MzI4NzEbMBkGA1UEChMSQUMgQ2FtZXJm
+aXJtYSBTLkEuMScwJQYDVQQDEx5HbG9iYWwgQ2hhbWJlcnNpZ24gUm9vdCAtIDIwMDiCCQDJzdPp
+1X0jzjAOBgNVHQ8BAf8EBAMCAQYwPQYDVR0gBDYwNDAyBgRVHSAAMCowKAYIKwYBBQUHAgEWHGh0
+dHA6Ly9wb2xpY3kuY2FtZXJmaXJtYS5jb20wDQYJKoZIhvcNAQEFBQADggIBAICIf3DekijZBZRG
+/5BXqfEv3xoNa/p8DhxJJHkn2EaqbylZUohwEurdPfWbU1Rv4WCiqAm57OtZfMY18dwY6fFn5a+6
+ReAJ3spED8IXDneRRXozX1+WLGiLwUePmJs9wOzL9dWCkoQ10b42OFZyMVtHLaoXpGNR6woBrX/s
+dZ7LoR/xfxKxueRkf2fWIyr0uDldmOghp+G9PUIadJpwr2hsUF1Jz//7Dl3mLEfXgTpZALVza2Mg
+9jFFCDkO9HB+QHBaP9BrQql0PSgvAm11cpUJjUhjxsYjV5KTXjXBjfkK9yydYhz2rXzdpjEetrHH
+foUm+qRqtdpjMNHvkzeyZi99Bffnt0uYlDXA2TopwZ2yUDMdSqlapskD7+3056huirRXhOukP9Du
+qqqHW2Pok+JrqNS4cnhrG+055F3Lm6qH1U9OAP7Zap88MQ8oAgF9mOinsKJknnn4SPIVqczmyETr
+P3iZ8ntxPjzxmKfFGBI/5rsoM0LpRQp8bfKGeS/Fghl9CYl8slR2iK7ewfPM4W7bMdaTrpmg7yVq
+c5iJWzouE4gev8CSlDQb4ye3ix5vQv/n6TebUB0tovkC7stYWDpxvGjjqsGvHCgfotwjZT+B6q6Z
+09gwzxMNTxXJhLynSC34MCN32EZLeW32jO06f2ARePTpm67VVMB0gNELQp/B
+-----END CERTIFICATE-----
+
+Go Daddy Root Certificate Authority - G2
+========================================
+-----BEGIN CERTIFICATE-----
+MIIDxTCCAq2gAwIBAgIBADANBgkqhkiG9w0BAQsFADCBgzELMAkGA1UEBhMCVVMxEDAOBgNVBAgT
+B0FyaXpvbmExEzARBgNVBAcTClNjb3R0c2RhbGUxGjAYBgNVBAoTEUdvRGFkZHkuY29tLCBJbmMu
+MTEwLwYDVQQDEyhHbyBEYWRkeSBSb290IENlcnRpZmljYXRlIEF1dGhvcml0eSAtIEcyMB4XDTA5
+MDkwMTAwMDAwMFoXDTM3MTIzMTIzNTk1OVowgYMxCzAJBgNVBAYTAlVTMRAwDgYDVQQIEwdBcml6
+b25hMRMwEQYDVQQHEwpTY290dHNkYWxlMRowGAYDVQQKExFHb0RhZGR5LmNvbSwgSW5jLjExMC8G
+A1UEAxMoR28gRGFkZHkgUm9vdCBDZXJ0aWZpY2F0ZSBBdXRob3JpdHkgLSBHMjCCASIwDQYJKoZI
+hvcNAQEBBQADggEPADCCAQoCggEBAL9xYgjx+lk09xvJGKP3gElY6SKDE6bFIEMBO4Tx5oVJnyfq
+9oQbTqC023CYxzIBsQU+B07u9PpPL1kwIuerGVZr4oAH/PMWdYA5UXvl+TW2dE6pjYIT5LY/qQOD
++qK+ihVqf94Lw7YZFAXK6sOoBJQ7RnwyDfMAZiLIjWltNowRGLfTshxgtDj6AozO091GB94KPutd
+fMh8+7ArU6SSYmlRJQVhGkSBjCypQ5Yj36w6gZoOKcUcqeldHraenjAKOc7xiID7S13MMuyFYkMl
+NAJWJwGRtDtwKj9useiciAF9n9T521NtYJ2/LOdYq7hfRvzOxBsDPAnrSTFcaUaz4EcCAwEAAaNC
+MEAwDwYDVR0TAQH/BAUwAwEB/zAOBgNVHQ8BAf8EBAMCAQYwHQYDVR0OBBYEFDqahQcQZyi27/a9
+BUFuIMGU2g/eMA0GCSqGSIb3DQEBCwUAA4IBAQCZ21151fmXWWcDYfF+OwYxdS2hII5PZYe096ac
+vNjpL9DbWu7PdIxztDhC2gV7+AJ1uP2lsdeu9tfeE8tTEH6KRtGX+rcuKxGrkLAngPnon1rpN5+r
+5N9ss4UXnT3ZJE95kTXWXwTrgIOrmgIttRD02JDHBHNA7XIloKmf7J6raBKZV8aPEjoJpL1E/QYV
+N8Gb5DKj7Tjo2GTzLH4U/ALqn83/B2gX2yKQOC16jdFU8WnjXzPKej17CuPKf1855eJ1usV2GDPO
+LPAvTK33sefOT6jEm0pUBsV/fdUID+Ic/n4XuKxe9tQWskMJDE32p2u0mYRlynqI4uJEvlz36hz1
+-----END CERTIFICATE-----
+
+Starfield Root Certificate Authority - G2
+=========================================
+-----BEGIN CERTIFICATE-----
+MIID3TCCAsWgAwIBAgIBADANBgkqhkiG9w0BAQsFADCBjzELMAkGA1UEBhMCVVMxEDAOBgNVBAgT
+B0FyaXpvbmExEzARBgNVBAcTClNjb3R0c2RhbGUxJTAjBgNVBAoTHFN0YXJmaWVsZCBUZWNobm9s
+b2dpZXMsIEluYy4xMjAwBgNVBAMTKVN0YXJmaWVsZCBSb290IENlcnRpZmljYXRlIEF1dGhvcml0
+eSAtIEcyMB4XDTA5MDkwMTAwMDAwMFoXDTM3MTIzMTIzNTk1OVowgY8xCzAJBgNVBAYTAlVTMRAw
+DgYDVQQIEwdBcml6b25hMRMwEQYDVQQHEwpTY290dHNkYWxlMSUwIwYDVQQKExxTdGFyZmllbGQg
+VGVjaG5vbG9naWVzLCBJbmMuMTIwMAYDVQQDEylTdGFyZmllbGQgUm9vdCBDZXJ0aWZpY2F0ZSBB
+dXRob3JpdHkgLSBHMjCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAL3twQP89o/8ArFv
+W59I2Z154qK3A2FWGMNHttfKPTUuiUP3oWmb3ooa/RMgnLRJdzIpVv257IzdIvpy3Cdhl+72WoTs
+bhm5iSzchFvVdPtrX8WJpRBSiUZV9Lh1HOZ/5FSuS/hVclcCGfgXcVnrHigHdMWdSL5stPSksPNk
+N3mSwOxGXn/hbVNMYq/NHwtjuzqd+/x5AJhhdM8mgkBj87JyahkNmcrUDnXMN/uLicFZ8WJ/X7Nf
+ZTD4p7dNdloedl40wOiWVpmKs/B/pM293DIxfJHP4F8R+GuqSVzRmZTRouNjWwl2tVZi4Ut0HZbU
+JtQIBFnQmA4O5t78w+wfkPECAwEAAaNCMEAwDwYDVR0TAQH/BAUwAwEB/zAOBgNVHQ8BAf8EBAMC
+AQYwHQYDVR0OBBYEFHwMMh+n2TB/xH1oo2Kooc6rB1snMA0GCSqGSIb3DQEBCwUAA4IBAQARWfol
+TwNvlJk7mh+ChTnUdgWUXuEok21iXQnCoKjUsHU48TRqneSfioYmUeYs0cYtbpUgSpIB7LiKZ3sx
+4mcujJUDJi5DnUox9g61DLu34jd/IroAow57UvtruzvE03lRTs2Q9GcHGcg8RnoNAX3FWOdt5oUw
+F5okxBDgBPfg8n/Uqgr/Qh037ZTlZFkSIHc40zI+OIF1lnP6aI+xy84fxez6nH7PfrHxBy22/L/K
+pL/QlwVKvOoYKAKQvVR4CSFx09F9HdkWsKlhPdAKACL8x3vLCWRFCztAgfd9fDL1mMpYjn0q7pBZ
+c2T5NnReJaH1ZgUufzkVqSr7UIuOhWn0
+-----END CERTIFICATE-----
+
+Starfield Services Root Certificate Authority - G2
+==================================================
+-----BEGIN CERTIFICATE-----
+MIID7zCCAtegAwIBAgIBADANBgkqhkiG9w0BAQsFADCBmDELMAkGA1UEBhMCVVMxEDAOBgNVBAgT
+B0FyaXpvbmExEzARBgNVBAcTClNjb3R0c2RhbGUxJTAjBgNVBAoTHFN0YXJmaWVsZCBUZWNobm9s
+b2dpZXMsIEluYy4xOzA5BgNVBAMTMlN0YXJmaWVsZCBTZXJ2aWNlcyBSb290IENlcnRpZmljYXRl
+IEF1dGhvcml0eSAtIEcyMB4XDTA5MDkwMTAwMDAwMFoXDTM3MTIzMTIzNTk1OVowgZgxCzAJBgNV
+BAYTAlVTMRAwDgYDVQQIEwdBcml6b25hMRMwEQYDVQQHEwpTY290dHNkYWxlMSUwIwYDVQQKExxT
+dGFyZmllbGQgVGVjaG5vbG9naWVzLCBJbmMuMTswOQYDVQQDEzJTdGFyZmllbGQgU2VydmljZXMg
+Um9vdCBDZXJ0aWZpY2F0ZSBBdXRob3JpdHkgLSBHMjCCASIwDQYJKoZIhvcNAQEBBQADggEPADCC
+AQoCggEBANUMOsQq+U7i9b4Zl1+OiFOxHz/Lz58gE20pOsgPfTz3a3Y4Y9k2YKibXlwAgLIvWX/2
+h/klQ4bnaRtSmpDhcePYLQ1Ob/bISdm28xpWriu2dBTrz/sm4xq6HZYuajtYlIlHVv8loJNwU4Pa
+hHQUw2eeBGg6345AWh1KTs9DkTvnVtYAcMtS7nt9rjrnvDH5RfbCYM8TWQIrgMw0R9+53pBlbQLP
+LJGmpufehRhJfGZOozptqbXuNC66DQO4M99H67FrjSXZm86B0UVGMpZwh94CDklDhbZsc7tk6mFB
+rMnUVN+HL8cisibMn1lUaJ/8viovxFUcdUBgF4UCVTmLfwUCAwEAAaNCMEAwDwYDVR0TAQH/BAUw
+AwEB/zAOBgNVHQ8BAf8EBAMCAQYwHQYDVR0OBBYEFJxfAN+qAdcwKziIorhtSpzyEZGDMA0GCSqG
+SIb3DQEBCwUAA4IBAQBLNqaEd2ndOxmfZyMIbw5hyf2E3F/YNoHN2BtBLZ9g3ccaaNnRbobhiCPP
+E95Dz+I0swSdHynVv/heyNXBve6SbzJ08pGCL72CQnqtKrcgfU28elUSwhXqvfdqlS5sdJ/PHLTy
+xQGjhdByPq1zqwubdQxtRbeOlKyWN7Wg0I8VRw7j6IPdj/3vQQF3zCepYoUz8jcI73HPdwbeyBkd
+iEDPfUYd/x7H4c7/I9vG+o1VTqkC50cRRj70/b17KSa7qWFiNyi2LSr2EIZkyXCn0q23KXB56jza
+YyWf/Wi3MOxw+3WKt21gZ7IeyLnp2KhvAotnDU0mV3HaIPzBSlCNsSi6
+-----END CERTIFICATE-----
+
+AffirmTrust Commercial
+======================
+-----BEGIN CERTIFICATE-----
+MIIDTDCCAjSgAwIBAgIId3cGJyapsXwwDQYJKoZIhvcNAQELBQAwRDELMAkGA1UEBhMCVVMxFDAS
+BgNVBAoMC0FmZmlybVRydXN0MR8wHQYDVQQDDBZBZmZpcm1UcnVzdCBDb21tZXJjaWFsMB4XDTEw
+MDEyOTE0MDYwNloXDTMwMTIzMTE0MDYwNlowRDELMAkGA1UEBhMCVVMxFDASBgNVBAoMC0FmZmly
+bVRydXN0MR8wHQYDVQQDDBZBZmZpcm1UcnVzdCBDb21tZXJjaWFsMIIBIjANBgkqhkiG9w0BAQEF
+AAOCAQ8AMIIBCgKCAQEA9htPZwcroRX1BiLLHwGy43NFBkRJLLtJJRTWzsO3qyxPxkEylFf6Eqdb
+DuKPHx6GGaeqtS25Xw2Kwq+FNXkyLbscYjfysVtKPcrNcV/pQr6U6Mje+SJIZMblq8Yrba0F8PrV
+C8+a5fBQpIs7R6UjW3p6+DM/uO+Zl+MgwdYoic+U+7lF7eNAFxHUdPALMeIrJmqbTFeurCA+ukV6
+BfO9m2kVrn1OIGPENXY6BwLJN/3HR+7o8XYdcxXyl6S1yHp52UKqK39c/s4mT6NmgTWvRLpUHhww
+MmWd5jyTXlBOeuM61G7MGvv50jeuJCqrVwMiKA1JdX+3KNp1v47j3A55MQIDAQABo0IwQDAdBgNV
+HQ4EFgQUnZPGU4teyq8/nx4P5ZmVvCT2lI8wDwYDVR0TAQH/BAUwAwEB/zAOBgNVHQ8BAf8EBAMC
+AQYwDQYJKoZIhvcNAQELBQADggEBAFis9AQOzcAN/wr91LoWXym9e2iZWEnStB03TX8nfUYGXUPG
+hi4+c7ImfU+TqbbEKpqrIZcUsd6M06uJFdhrJNTxFq7YpFzUf1GO7RgBsZNjvbz4YYCanrHOQnDi
+qX0GJX0nof5v7LMeJNrjS1UaADs1tDvZ110w/YETifLCBivtZ8SOyUOyXGsViQK8YvxO8rUzqrJv
+0wqiUOP2O+guRMLbZjipM1ZI8W0bM40NjD9gN53Tym1+NH4Nn3J2ixufcv1SNUFFApYvHLKac0kh
+sUlHRUe072o0EclNmsxZt9YCnlpOZbWUrhvfKbAW8b8Angc6F2S1BLUjIZkKlTuXfO8=
+-----END CERTIFICATE-----
+
+AffirmTrust Networking
+======================
+-----BEGIN CERTIFICATE-----
+MIIDTDCCAjSgAwIBAgIIfE8EORzUmS0wDQYJKoZIhvcNAQEFBQAwRDELMAkGA1UEBhMCVVMxFDAS
+BgNVBAoMC0FmZmlybVRydXN0MR8wHQYDVQQDDBZBZmZpcm1UcnVzdCBOZXR3b3JraW5nMB4XDTEw
+MDEyOTE0MDgyNFoXDTMwMTIzMTE0MDgyNFowRDELMAkGA1UEBhMCVVMxFDASBgNVBAoMC0FmZmly
+bVRydXN0MR8wHQYDVQQDDBZBZmZpcm1UcnVzdCBOZXR3b3JraW5nMIIBIjANBgkqhkiG9w0BAQEF
+AAOCAQ8AMIIBCgKCAQEAtITMMxcua5Rsa2FSoOujz3mUTOWUgJnLVWREZY9nZOIG41w3SfYvm4SE
+Hi3yYJ0wTsyEheIszx6e/jarM3c1RNg1lho9Nuh6DtjVR6FqaYvZ/Ls6rnla1fTWcbuakCNrmreI
+dIcMHl+5ni36q1Mr3Lt2PpNMCAiMHqIjHNRqrSK6mQEubWXLviRmVSRLQESxG9fhwoXA3hA/Pe24
+/PHxI1Pcv2WXb9n5QHGNfb2V1M6+oF4nI979ptAmDgAp6zxG8D1gvz9Q0twmQVGeFDdCBKNwV6gb
+h+0t+nvujArjqWaJGctB+d1ENmHP4ndGyH329JKBNv3bNPFyfvMMFr20FQIDAQABo0IwQDAdBgNV
+HQ4EFgQUBx/S55zawm6iQLSwelAQUHTEyL0wDwYDVR0TAQH/BAUwAwEB/zAOBgNVHQ8BAf8EBAMC
+AQYwDQYJKoZIhvcNAQEFBQADggEBAIlXshZ6qML91tmbmzTCnLQyFE2npN/svqe++EPbkTfOtDIu
+UFUaNU52Q3Eg75N3ThVwLofDwR1t3Mu1J9QsVtFSUzpE0nPIxBsFZVpikpzuQY0x2+c06lkh1QF6
+12S4ZDnNye2v7UsDSKegmQGA3GWjNq5lWUhPgkvIZfFXHeVZLgo/bNjR9eUJtGxUAArgFU2HdW23
+WJZa3W3SAKD0m0i+wzekujbgfIeFlxoVot4uolu9rxj5kFDNcFn4J2dHy8egBzp90SxdbBk6ZrV9
+/ZFvgrG+CJPbFEfxojfHRZ48x3evZKiT3/Zpg4Jg8klCNO1aAFSFHBY2kgxc+qatv9s=
+-----END CERTIFICATE-----
+
+AffirmTrust Premium
+===================
+-----BEGIN CERTIFICATE-----
+MIIFRjCCAy6gAwIBAgIIbYwURrGmCu4wDQYJKoZIhvcNAQEMBQAwQTELMAkGA1UEBhMCVVMxFDAS
+BgNVBAoMC0FmZmlybVRydXN0MRwwGgYDVQQDDBNBZmZpcm1UcnVzdCBQcmVtaXVtMB4XDTEwMDEy
+OTE0MTAzNloXDTQwMTIzMTE0MTAzNlowQTELMAkGA1UEBhMCVVMxFDASBgNVBAoMC0FmZmlybVRy
+dXN0MRwwGgYDVQQDDBNBZmZpcm1UcnVzdCBQcmVtaXVtMIICIjANBgkqhkiG9w0BAQEFAAOCAg8A
+MIICCgKCAgEAxBLfqV/+Qd3d9Z+K4/as4Tx4mrzY8H96oDMq3I0gW64tb+eT2TZwamjPjlGjhVtn
+BKAQJG9dKILBl1fYSCkTtuG+kU3fhQxTGJoeJKJPj/CihQvL9Cl/0qRY7iZNyaqoe5rZ+jjeRFcV
+5fiMyNlI4g0WJx0eyIOFJbe6qlVBzAMiSy2RjYvmia9mx+n/K+k8rNrSs8PhaJyJ+HoAVt70VZVs
++7pk3WKL3wt3MutizCaam7uqYoNMtAZ6MMgpv+0GTZe5HMQxK9VfvFMSF5yZVylmd2EhMQcuJUmd
+GPLu8ytxjLW6OQdJd/zvLpKQBY0tL3d770O/Nbua2Plzpyzy0FfuKE4mX4+QaAkvuPjcBukumj5R
+p9EixAqnOEhss/n/fauGV+O61oV4d7pD6kh/9ti+I20ev9E2bFhc8e6kGVQa9QPSdubhjL08s9NI
+S+LI+H+SqHZGnEJlPqQewQcDWkYtuJfzt9WyVSHvutxMAJf7FJUnM7/oQ0dG0giZFmA7mn7S5u04
+6uwBHjxIVkkJx0w3AJ6IDsBz4W9m6XJHMD4Q5QsDyZpCAGzFlH5hxIrff4IaC1nEWTJ3s7xgaVY5
+/bQGeyzWZDbZvUjthB9+pSKPKrhC9IK31FOQeE4tGv2Bb0TXOwF0lkLgAOIua+rF7nKsu7/+6qqo
++Nz2snmKtmcCAwEAAaNCMEAwHQYDVR0OBBYEFJ3AZ6YMItkm9UWrpmVSESfYRaxjMA8GA1UdEwEB
+/wQFMAMBAf8wDgYDVR0PAQH/BAQDAgEGMA0GCSqGSIb3DQEBDAUAA4ICAQCzV00QYk465KzquByv
+MiPIs0laUZx2KI15qldGF9X1Uva3ROgIRL8YhNILgM3FEv0AVQVhh0HctSSePMTYyPtwni94loMg
+Nt58D2kTiKV1NpgIpsbfrM7jWNa3Pt668+s0QNiigfV4Py/VpfzZotReBA4Xrf5B8OWycvpEgjNC
+6C1Y91aMYj+6QrCcDFx+LmUmXFNPALJ4fqENmS2NuB2OosSw/WDQMKSOyARiqcTtNd56l+0OOF6S
+L5Nwpamcb6d9Ex1+xghIsV5n61EIJenmJWtSKZGc0jlzCFfemQa0W50QBuHCAKi4HEoCChTQwUHK
++4w1IX2COPKpVJEZNZOUbWo6xbLQu4mGk+ibyQ86p3q4ofB4Rvr8Ny/lioTz3/4E2aFooC8k4gmV
+BtWVyuEklut89pMFu+1z6S3RdTnX5yTb2E5fQ4+e0BQ5v1VwSJlXMbSc7kqYA5YwH2AG7hsj/oFg
+IxpHYoWlzBk0gG+zrBrjn/B7SK3VAdlntqlyk+otZrWyuOQ9PLLvTIzq6we/qzWaVYa8GKa1qF60
+g2xraUDTn9zxw2lrueFtCfTxqlB2Cnp9ehehVZZCmTEJ3WARjQUwfuaORtGdFNrHF+QFlozEJLUb
+zxQHskD4o55BhrwE0GuWyCqANP2/7waj3VjFhT0+j/6eKeC2uAloGRwYQw==
+-----END CERTIFICATE-----
+
+AffirmTrust Premium ECC
+=======================
+-----BEGIN CERTIFICATE-----
+MIIB/jCCAYWgAwIBAgIIdJclisc/elQwCgYIKoZIzj0EAwMwRTELMAkGA1UEBhMCVVMxFDASBgNV
+BAoMC0FmZmlybVRydXN0MSAwHgYDVQQDDBdBZmZpcm1UcnVzdCBQcmVtaXVtIEVDQzAeFw0xMDAx
+MjkxNDIwMjRaFw00MDEyMzExNDIwMjRaMEUxCzAJBgNVBAYTAlVTMRQwEgYDVQQKDAtBZmZpcm1U
+cnVzdDEgMB4GA1UEAwwXQWZmaXJtVHJ1c3QgUHJlbWl1bSBFQ0MwdjAQBgcqhkjOPQIBBgUrgQQA
+IgNiAAQNMF4bFZ0D0KF5Nbc6PJJ6yhUczWLznCZcBz3lVPqj1swS6vQUX+iOGasvLkjmrBhDeKzQ
+N8O9ss0s5kfiGuZjuD0uL3jET9v0D6RoTFVya5UdThhClXjMNzyR4ptlKymjQjBAMB0GA1UdDgQW
+BBSaryl6wBE1NSZRMADDav5A1a7WPDAPBgNVHRMBAf8EBTADAQH/MA4GA1UdDwEB/wQEAwIBBjAK
+BggqhkjOPQQDAwNnADBkAjAXCfOHiFBar8jAQr9HX/VsaobgxCd05DhT1wV/GzTjxi+zygk8N53X
+57hG8f2h4nECMEJZh0PUUd+60wkyWs6Iflc9nF9Ca/UHLbXwgpP5WW+uZPpY5Yse42O+tYHNbwKM
+eQ==
+-----END CERTIFICATE-----
+
+Certum Trusted Network CA
+=========================
+-----BEGIN CERTIFICATE-----
+MIIDuzCCAqOgAwIBAgIDBETAMA0GCSqGSIb3DQEBBQUAMH4xCzAJBgNVBAYTAlBMMSIwIAYDVQQK
+ExlVbml6ZXRvIFRlY2hub2xvZ2llcyBTLkEuMScwJQYDVQQLEx5DZXJ0dW0gQ2VydGlmaWNhdGlv
+biBBdXRob3JpdHkxIjAgBgNVBAMTGUNlcnR1bSBUcnVzdGVkIE5ldHdvcmsgQ0EwHhcNMDgxMDIy
+MTIwNzM3WhcNMjkxMjMxMTIwNzM3WjB+MQswCQYDVQQGEwJQTDEiMCAGA1UEChMZVW5pemV0byBU
+ZWNobm9sb2dpZXMgUy5BLjEnMCUGA1UECxMeQ2VydHVtIENlcnRpZmljYXRpb24gQXV0aG9yaXR5
+MSIwIAYDVQQDExlDZXJ0dW0gVHJ1c3RlZCBOZXR3b3JrIENBMIIBIjANBgkqhkiG9w0BAQEFAAOC
+AQ8AMIIBCgKCAQEA4/t9o3K6wvDJFIf1awFO4W5AB7ptJ11/91sts1rHUV+rpDKmYYe2bg+G0jAC
+l/jXaVehGDldamR5xgFZrDwxSjh80gTSSyjoIF87B6LMTXPb865Px1bVWqeWifrzq2jUI4ZZJ88J
+J7ysbnKDHDBy3+Ci6dLhdHUZvSqeexVUBBvXQzmtVSjF4hq79MDkrjhJM8x2hZ85RdKknvISjFH4
+fOQtf/WsX+sWn7Et0brMkUJ3TCXJkDhv2/DM+44el1k+1WBO5gUo7Ul5E0u6SNsv+XLTOcr+H9g0
+cvW0QM8xAcPs3hEtF10fuFDRXhmnad4HMyjKUJX5p1TLVIZQRan5SQIDAQABo0IwQDAPBgNVHRMB
+Af8EBTADAQH/MB0GA1UdDgQWBBQIds3LB/8k9sXN7buQvOKEN0Z19zAOBgNVHQ8BAf8EBAMCAQYw
+DQYJKoZIhvcNAQEFBQADggEBAKaorSLOAT2mo/9i0Eidi15ysHhE49wcrwn9I0j6vSrEuVUEtRCj
+jSfeC4Jj0O7eDDd5QVsisrCaQVymcODU0HfLI9MA4GxWL+FpDQ3Zqr8hgVDZBqWo/5U30Kr+4rP1
+mS1FhIrlQgnXdAIv94nYmem8J9RHjboNRhx3zxSkHLmkMcScKHQDNP8zGSal6Q10tz6XxnboJ5aj
+Zt3hrvJBW8qYVoNzcOSGGtIxQbovvi0TWnZvTuhOgQ4/WwMioBK+ZlgRSssDxLQqKi2WF+A5VLxI
+03YnnZotBqbJ7DnSq9ufmgsnAjUpsUCV5/nonFWIGUbWtzT1fs45mtk48VH3Tyw=
+-----END CERTIFICATE-----
+
+Certinomis - Autorité Racine
+============================
+-----BEGIN CERTIFICATE-----
+MIIFnDCCA4SgAwIBAgIBATANBgkqhkiG9w0BAQUFADBjMQswCQYDVQQGEwJGUjETMBEGA1UEChMK
+Q2VydGlub21pczEXMBUGA1UECxMOMDAwMiA0MzM5OTg5MDMxJjAkBgNVBAMMHUNlcnRpbm9taXMg
+LSBBdXRvcml0w6kgUmFjaW5lMB4XDTA4MDkxNzA4Mjg1OVoXDTI4MDkxNzA4Mjg1OVowYzELMAkG
+A1UEBhMCRlIxEzARBgNVBAoTCkNlcnRpbm9taXMxFzAVBgNVBAsTDjAwMDIgNDMzOTk4OTAzMSYw
+JAYDVQQDDB1DZXJ0aW5vbWlzIC0gQXV0b3JpdMOpIFJhY2luZTCCAiIwDQYJKoZIhvcNAQEBBQAD
+ggIPADCCAgoCggIBAJ2Fn4bT46/HsmtuM+Cet0I0VZ35gb5j2CN2DpdUzZlMGvE5x4jYF1AMnmHa
+wE5V3udauHpOd4cN5bjr+p5eex7Ezyh0x5P1FMYiKAT5kcOrJ3NqDi5N8y4oH3DfVS9O7cdxbwly
+Lu3VMpfQ8Vh30WC8Tl7bmoT2R2FFK/ZQpn9qcSdIhDWerP5pqZ56XjUl+rSnSTV3lqc2W+HN3yNw
+2F1MpQiD8aYkOBOo7C+ooWfHpi2GR+6K/OybDnT0K0kCe5B1jPyZOQE51kqJ5Z52qz6WKDgmi92N
+jMD2AR5vpTESOH2VwnHu7XSu5DaiQ3XV8QCb4uTXzEIDS3h65X27uK4uIJPT5GHfceF2Z5c/tt9q
+c1pkIuVC28+BA5PY9OMQ4HL2AHCs8MF6DwV/zzRpRbWT5BnbUhYjBYkOjUjkJW+zeL9i9Qf6lSTC
+lrLooyPCXQP8w9PlfMl1I9f09bze5N/NgL+RiH2nE7Q5uiy6vdFrzPOlKO1Enn1So2+WLhl+HPNb
+xxaOu2B9d2ZHVIIAEWBsMsGoOBvrbpgT1u449fCfDu/+MYHB0iSVL1N6aaLwD4ZFjliCK0wi1F6g
+530mJ0jfJUaNSih8hp75mxpZuWW/Bd22Ql095gBIgl4g9xGC3srYn+Y3RyYe63j3YcNBZFgCQfna
+4NH4+ej9Uji29YnfAgMBAAGjWzBZMA8GA1UdEwEB/wQFMAMBAf8wDgYDVR0PAQH/BAQDAgEGMB0G
+A1UdDgQWBBQNjLZh2kS40RR9w759XkjwzspqsDAXBgNVHSAEEDAOMAwGCiqBegFWAgIAAQEwDQYJ
+KoZIhvcNAQEFBQADggIBACQ+YAZ+He86PtvqrxyaLAEL9MW12Ukx9F1BjYkMTv9sov3/4gbIOZ/x
+WqndIlgVqIrTseYyCYIDbNc/CMf4uboAbbnW/FIyXaR/pDGUu7ZMOH8oMDX/nyNTt7buFHAAQCva
+R6s0fl6nVjBhK4tDrP22iCj1a7Y+YEq6QpA0Z43q619FVDsXrIvkxmUP7tCMXWY5zjKn2BCXwH40
+nJ+U8/aGH88bc62UeYdocMMzpXDn2NU4lG9jeeu/Cg4I58UvD0KgKxRA/yHgBcUn4YQRE7rWhh1B
+CxMjidPJC+iKunqjo3M3NYB9Ergzd0A4wPpeMNLytqOx1qKVl4GbUu1pTP+A5FPbVFsDbVRfsbjv
+JL1vnxHDx2TCDyhihWZeGnuyt++uNckZM6i4J9szVb9o4XVIRFb7zdNIu0eJOqxp9YDG5ERQL1TE
+qkPFMTFYvZbF6nVsmnWxTfj3l/+WFvKXTej28xH5On2KOG4Ey+HTRRWqpdEdnV1j6CTmNhTih60b
+WfVEm/vXd3wfAXBioSAaosUaKPQhA+4u2cGA6rnZgtZbdsLLO7XSAPCjDuGtbkD326C00EauFddE
+wk01+dIL8hf2rGbVJLJP0RyZwG71fet0BLj5TXcJ17TPBzAJ8bgAVtkXFhYKK4bfjwEZGuW7gmP/
+vgt2Fl43N+bYdJeimUV5
+-----END CERTIFICATE-----
+
+TWCA Root Certification Authority
+=================================
+-----BEGIN CERTIFICATE-----
+MIIDezCCAmOgAwIBAgIBATANBgkqhkiG9w0BAQUFADBfMQswCQYDVQQGEwJUVzESMBAGA1UECgwJ
+VEFJV0FOLUNBMRAwDgYDVQQLDAdSb290IENBMSowKAYDVQQDDCFUV0NBIFJvb3QgQ2VydGlmaWNh
+dGlvbiBBdXRob3JpdHkwHhcNMDgwODI4MDcyNDMzWhcNMzAxMjMxMTU1OTU5WjBfMQswCQYDVQQG
+EwJUVzESMBAGA1UECgwJVEFJV0FOLUNBMRAwDgYDVQQLDAdSb290IENBMSowKAYDVQQDDCFUV0NB
+IFJvb3QgQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEK
+AoIBAQCwfnK4pAOU5qfeCTiRShFAh6d8WWQUe7UREN3+v9XAu1bihSX0NXIP+FPQQeFEAcK0HMMx
+QhZHhTMidrIKbw/lJVBPhYa+v5guEGcevhEFhgWQxFnQfHgQsIBct+HHK3XLfJ+utdGdIzdjp9xC
+oi2SBBtQwXu4PhvJVgSLL1KbralW6cH/ralYhzC2gfeXRfwZVzsrb+RH9JlF/h3x+JejiB03HFyP
+4HYlmlD4oFT/RJB2I9IyxsOrBr/8+7/zrX2SYgJbKdM1o5OaQ2RgXbL6Mv87BK9NQGr5x+PvI/1r
+y+UPizgN7gr8/g+YnzAx3WxSZfmLgb4i4RxYA7qRG4kHAgMBAAGjQjBAMA4GA1UdDwEB/wQEAwIB
+BjAPBgNVHRMBAf8EBTADAQH/MB0GA1UdDgQWBBRqOFsmjd6LWvJPelSDGRjjCDWmujANBgkqhkiG
+9w0BAQUFAAOCAQEAPNV3PdrfibqHDAhUaiBQkr6wQT25JmSDCi/oQMCXKCeCMErJk/9q56YAf4lC
+mtYR5VPOL8zy2gXE/uJQxDqGfczafhAJO5I1KlOy/usrBdlsXebQ79NqZp4VKIV66IIArB6nCWlW
+QtNoURi+VJq/REG6Sb4gumlc7rh3zc5sH62Dlhh9DrUUOYTxKOkto557HnpyWoOzeW/vtPzQCqVY
+T0bf+215WfKEIlKuD8z7fDvnaspHYcN6+NOSBB+4IIThNlQWx0DeO4pz3N/GCUzf7Nr/1FNCocny
+Yh0igzyXxfkZYiesZSLX0zzG5Y6yU8xJzrww/nsOM5D77dIUkR8Hrw==
+-----END CERTIFICATE-----
+
+Security Communication RootCA2
+==============================
+-----BEGIN CERTIFICATE-----
+MIIDdzCCAl+gAwIBAgIBADANBgkqhkiG9w0BAQsFADBdMQswCQYDVQQGEwJKUDElMCMGA1UEChMc
+U0VDT00gVHJ1c3QgU3lzdGVtcyBDTy4sTFRELjEnMCUGA1UECxMeU2VjdXJpdHkgQ29tbXVuaWNh
+dGlvbiBSb290Q0EyMB4XDTA5MDUyOTA1MDAzOVoXDTI5MDUyOTA1MDAzOVowXTELMAkGA1UEBhMC
+SlAxJTAjBgNVBAoTHFNFQ09NIFRydXN0IFN5c3RlbXMgQ08uLExURC4xJzAlBgNVBAsTHlNlY3Vy
+aXR5IENvbW11bmljYXRpb24gUm9vdENBMjCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEB
+ANAVOVKxUrO6xVmCxF1SrjpDZYBLx/KWvNs2l9amZIyoXvDjChz335c9S672XewhtUGrzbl+dp++
++T42NKA7wfYxEUV0kz1XgMX5iZnK5atq1LXaQZAQwdbWQonCv/Q4EpVMVAX3NuRFg3sUZdbcDE3R
+3n4MqzvEFb46VqZab3ZpUql6ucjrappdUtAtCms1FgkQhNBqyjoGADdH5H5XTz+L62e4iKrFvlNV
+spHEfbmwhRkGeC7bYRr6hfVKkaHnFtWOojnflLhwHyg/i/xAXmODPIMqGplrz95Zajv8bxbXH/1K
+EOtOghY6rCcMU/Gt1SSwawNQwS08Ft1ENCcadfsCAwEAAaNCMEAwHQYDVR0OBBYEFAqFqXdlBZh8
+QIH4D5csOPEK7DzPMA4GA1UdDwEB/wQEAwIBBjAPBgNVHRMBAf8EBTADAQH/MA0GCSqGSIb3DQEB
+CwUAA4IBAQBMOqNErLlFsceTfsgLCkLfZOoc7llsCLqJX2rKSpWeeo8HxdpFcoJxDjrSzG+ntKEj
+u/Ykn8sX/oymzsLS28yN/HH8AynBbF0zX2S2ZTuJbxh2ePXcokgfGT+Ok+vx+hfuzU7jBBJV1uXk
+3fs+BXziHV7Gp7yXT2g69ekuCkO2r1dcYmh8t/2jioSgrGK+KwmHNPBqAbubKVY8/gA3zyNs8U6q
+tnRGEmyR7jTV7JqR50S+kDFy1UkC9gLl9B/rfNmWVan/7Ir5mUf/NVoCqgTLiluHcSmRvaS0eg29
+mvVXIwAHIRc/SjnRBUkLp7Y3gaVdjKozXoEofKd9J+sAro03
+-----END CERTIFICATE-----
+
+EC-ACC
+======
+-----BEGIN CERTIFICATE-----
+MIIFVjCCBD6gAwIBAgIQ7is969Qh3hSoYqwE893EATANBgkqhkiG9w0BAQUFADCB8zELMAkGA1UE
+BhMCRVMxOzA5BgNVBAoTMkFnZW5jaWEgQ2F0YWxhbmEgZGUgQ2VydGlmaWNhY2lvIChOSUYgUS0w
+ODAxMTc2LUkpMSgwJgYDVQQLEx9TZXJ2ZWlzIFB1YmxpY3MgZGUgQ2VydGlmaWNhY2lvMTUwMwYD
+VQQLEyxWZWdldSBodHRwczovL3d3dy5jYXRjZXJ0Lm5ldC92ZXJhcnJlbCAoYykwMzE1MDMGA1UE
+CxMsSmVyYXJxdWlhIEVudGl0YXRzIGRlIENlcnRpZmljYWNpbyBDYXRhbGFuZXMxDzANBgNVBAMT
+BkVDLUFDQzAeFw0wMzAxMDcyMzAwMDBaFw0zMTAxMDcyMjU5NTlaMIHzMQswCQYDVQQGEwJFUzE7
+MDkGA1UEChMyQWdlbmNpYSBDYXRhbGFuYSBkZSBDZXJ0aWZpY2FjaW8gKE5JRiBRLTA4MDExNzYt
+SSkxKDAmBgNVBAsTH1NlcnZlaXMgUHVibGljcyBkZSBDZXJ0aWZpY2FjaW8xNTAzBgNVBAsTLFZl
+Z2V1IGh0dHBzOi8vd3d3LmNhdGNlcnQubmV0L3ZlcmFycmVsIChjKTAzMTUwMwYDVQQLEyxKZXJh
+cnF1aWEgRW50aXRhdHMgZGUgQ2VydGlmaWNhY2lvIENhdGFsYW5lczEPMA0GA1UEAxMGRUMtQUND
+MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAsyLHT+KXQpWIR4NA9h0X84NzJB5R85iK
+w5K4/0CQBXCHYMkAqbWUZRkiFRfCQ2xmRJoNBD45b6VLeqpjt4pEndljkYRm4CgPukLjbo73FCeT
+ae6RDqNfDrHrZqJyTxIThmV6PttPB/SnCWDaOkKZx7J/sxaVHMf5NLWUhdWZXqBIoH7nF2W4onW4
+HvPlQn2v7fOKSGRdghST2MDk/7NQcvJ29rNdQlB50JQ+awwAvthrDk4q7D7SzIKiGGUzE3eeml0a
+E9jD2z3Il3rucO2n5nzbcc8tlGLfbdb1OL4/pYUKGbio2Al1QnDE6u/LDsg0qBIimAy4E5S2S+zw
+0JDnJwIDAQABo4HjMIHgMB0GA1UdEQQWMBSBEmVjX2FjY0BjYXRjZXJ0Lm5ldDAPBgNVHRMBAf8E
+BTADAQH/MA4GA1UdDwEB/wQEAwIBBjAdBgNVHQ4EFgQUoMOLRKo3pUW/l4Ba0fF4opvpXY0wfwYD
+VR0gBHgwdjB0BgsrBgEEAfV4AQMBCjBlMCwGCCsGAQUFBwIBFiBodHRwczovL3d3dy5jYXRjZXJ0
+Lm5ldC92ZXJhcnJlbDA1BggrBgEFBQcCAjApGidWZWdldSBodHRwczovL3d3dy5jYXRjZXJ0Lm5l
+dC92ZXJhcnJlbCAwDQYJKoZIhvcNAQEFBQADggEBAKBIW4IB9k1IuDlVNZyAelOZ1Vr/sXE7zDkJ
+lF7W2u++AVtd0x7Y/X1PzaBB4DSTv8vihpw3kpBWHNzrKQXlxJ7HNd+KDM3FIUPpqojlNcAZQmNa
+Al6kSBg6hW/cnbw/nZzBh7h6YQjpdwt/cKt63dmXLGQehb+8dJahw3oS7AwaboMMPOhyRp/7SNVe
+l+axofjk70YllJyJ22k4vuxcDlbHZVHlUIiIv0LVKz3l+bqeLrPK9HOSAgu+TGbrIP65y7WZf+a2
+E/rKS03Z7lNGBjvGTq2TWoF+bCpLagVFjPIhpDGQh2xlnJ2lYJU6Un/10asIbvPuW/mIPX64b24D
+5EI=
+-----END CERTIFICATE-----
+
+Hellenic Academic and Research Institutions RootCA 2011
+=======================================================
+-----BEGIN CERTIFICATE-----
+MIIEMTCCAxmgAwIBAgIBADANBgkqhkiG9w0BAQUFADCBlTELMAkGA1UEBhMCR1IxRDBCBgNVBAoT
+O0hlbGxlbmljIEFjYWRlbWljIGFuZCBSZXNlYXJjaCBJbnN0aXR1dGlvbnMgQ2VydC4gQXV0aG9y
+aXR5MUAwPgYDVQQDEzdIZWxsZW5pYyBBY2FkZW1pYyBhbmQgUmVzZWFyY2ggSW5zdGl0dXRpb25z
+IFJvb3RDQSAyMDExMB4XDTExMTIwNjEzNDk1MloXDTMxMTIwMTEzNDk1MlowgZUxCzAJBgNVBAYT
+AkdSMUQwQgYDVQQKEztIZWxsZW5pYyBBY2FkZW1pYyBhbmQgUmVzZWFyY2ggSW5zdGl0dXRpb25z
+IENlcnQuIEF1dGhvcml0eTFAMD4GA1UEAxM3SGVsbGVuaWMgQWNhZGVtaWMgYW5kIFJlc2VhcmNo
+IEluc3RpdHV0aW9ucyBSb290Q0EgMjAxMTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEB
+AKlTAOMupvaO+mDYLZU++CwqVE7NuYRhlFhPjz2L5EPzdYmNUeTDN9KKiE15HrcS3UN4SoqS5tdI
+1Q+kOilENbgH9mgdVc04UfCMJDGFr4PJfel3r+0ae50X+bOdOFAPplp5kYCvN66m0zH7tSYJnTxa
+71HFK9+WXesyHgLacEnsbgzImjeN9/E2YEsmLIKe0HjzDQ9jpFEw4fkrJxIH2Oq9GGKYsFk3fb7u
+8yBRQlqD75O6aRXxYp2fmTmCobd0LovUxQt7L/DICto9eQqakxylKHJzkUOap9FNhYS5qXSPFEDH
+3N6sQWRstBmbAmNtJGSPRLIl6s5ddAxjMlyNh+UCAwEAAaOBiTCBhjAPBgNVHRMBAf8EBTADAQH/
+MAsGA1UdDwQEAwIBBjAdBgNVHQ4EFgQUppFC/RNhSiOeCKQp5dgTBCPuQSUwRwYDVR0eBEAwPqA8
+MAWCAy5ncjAFggMuZXUwBoIELmVkdTAGggQub3JnMAWBAy5ncjAFgQMuZXUwBoEELmVkdTAGgQQu
+b3JnMA0GCSqGSIb3DQEBBQUAA4IBAQAf73lB4XtuP7KMhjdCSk4cNx6NZrokgclPEg8hwAOXhiVt
+XdMiKahsog2p6z0GW5k6x8zDmjR/qw7IThzh+uTczQ2+vyT+bOdrwg3IBp5OjWEopmr95fZi6hg8
+TqBTnbI6nOulnJEWtk2C4AwFSKls9cz4y51JtPACpf1wA+2KIaWuE4ZJwzNzvoc7dIsXRSZMFpGD
+/md9zU1jZ/rzAxKWeAaNsWftjj++n08C9bMJL/NMh98qy5V8AcysNnq/onN694/BtZqhFLKPM58N
+7yLcZnuEvUUXBj08yrl3NI/K6s8/MT7jiOOASSXIl7WdmplNsDz4SgCbZN2fOUvRJ9e4
+-----END CERTIFICATE-----
+
+Actalis Authentication Root CA
+==============================
+-----BEGIN CERTIFICATE-----
+MIIFuzCCA6OgAwIBAgIIVwoRl0LE48wwDQYJKoZIhvcNAQELBQAwazELMAkGA1UEBhMCSVQxDjAM
+BgNVBAcMBU1pbGFuMSMwIQYDVQQKDBpBY3RhbGlzIFMucC5BLi8wMzM1ODUyMDk2NzEnMCUGA1UE
+AwweQWN0YWxpcyBBdXRoZW50aWNhdGlvbiBSb290IENBMB4XDTExMDkyMjExMjIwMloXDTMwMDky
+MjExMjIwMlowazELMAkGA1UEBhMCSVQxDjAMBgNVBAcMBU1pbGFuMSMwIQYDVQQKDBpBY3RhbGlz
+IFMucC5BLi8wMzM1ODUyMDk2NzEnMCUGA1UEAwweQWN0YWxpcyBBdXRoZW50aWNhdGlvbiBSb290
+IENBMIICIjANBgkqhkiG9w0BAQEFAAOCAg8AMIICCgKCAgEAp8bEpSmkLO/lGMWwUKNvUTufClrJ
+wkg4CsIcoBh/kbWHuUA/3R1oHwiD1S0eiKD4j1aPbZkCkpAW1V8IbInX4ay8IMKx4INRimlNAJZa
+by/ARH6jDuSRzVju3PvHHkVH3Se5CAGfpiEd9UEtL0z9KK3giq0itFZljoZUj5NDKd45RnijMCO6
+zfB9E1fAXdKDa0hMxKufgFpbOr3JpyI/gCczWw63igxdBzcIy2zSekciRDXFzMwujt0q7bd9Zg1f
+YVEiVRvjRuPjPdA1YprbrxTIW6HMiRvhMCb8oJsfgadHHwTrozmSBp+Z07/T6k9QnBn+locePGX2
+oxgkg4YQ51Q+qDp2JE+BIcXjDwL4k5RHILv+1A7TaLndxHqEguNTVHnd25zS8gebLra8Pu2Fbe8l
+EfKXGkJh90qX6IuxEAf6ZYGyojnP9zz/GPvG8VqLWeICrHuS0E4UT1lF9gxeKF+w6D9Fz8+vm2/7
+hNN3WpVvrJSEnu68wEqPSpP4RCHiMUVhUE4Q2OM1fEwZtN4Fv6MGn8i1zeQf1xcGDXqVdFUNaBr8
+EBtiZJ1t4JWgw5QHVw0U5r0F+7if5t+L4sbnfpb2U8WANFAoWPASUHEXMLrmeGO89LKtmyuy/uE5
+jF66CyCU3nuDuP/jVo23Eek7jPKxwV2dpAtMK9myGPW1n0sCAwEAAaNjMGEwHQYDVR0OBBYEFFLY
+iDrIn3hm7YnzezhwlMkCAjbQMA8GA1UdEwEB/wQFMAMBAf8wHwYDVR0jBBgwFoAUUtiIOsifeGbt
+ifN7OHCUyQICNtAwDgYDVR0PAQH/BAQDAgEGMA0GCSqGSIb3DQEBCwUAA4ICAQALe3KHwGCmSUyI
+WOYdiPcUZEim2FgKDk8TNd81HdTtBjHIgT5q1d07GjLukD0R0i70jsNjLiNmsGe+b7bAEzlgqqI0
+JZN1Ut6nna0Oh4lScWoWPBkdg/iaKWW+9D+a2fDzWochcYBNy+A4mz+7+uAwTc+G02UQGRjRlwKx
+K3JCaKygvU5a2hi/a5iB0P2avl4VSM0RFbnAKVy06Ij3Pjaut2L9HmLecHgQHEhb2rykOLpn7VU+
+Xlff1ANATIGk0k9jpwlCCRT8AKnCgHNPLsBA2RF7SOp6AsDT6ygBJlh0wcBzIm2Tlf05fbsq4/aC
+4yyXX04fkZT6/iyj2HYauE2yOE+b+h1IYHkm4vP9qdCa6HCPSXrW5b0KDtst842/6+OkfcvHlXHo
+2qN8xcL4dJIEG4aspCJTQLas/kx2z/uUMsA1n3Y/buWQbqCmJqK4LL7RK4X9p2jIugErsWx0Hbhz
+lefut8cl8ABMALJ+tguLHPPAUJ4lueAI3jZm/zel0btUZCzJJ7VLkn5l/9Mt4blOvH+kQSGQQXem
+OR/qnuOf0GZvBeyqdn6/axag67XH/JJULysRJyU3eExRarDzzFhdFPFqSBX/wge2sY0PjlxQRrM9
+vwGYT7JZVEc+NHt4bVaTLnPqZih4zR0Uv6CPLy64Lo7yFIrM6bV8+2ydDKXhlg==
+-----END CERTIFICATE-----
+
+Trustis FPS Root CA
+===================
+-----BEGIN CERTIFICATE-----
+MIIDZzCCAk+gAwIBAgIQGx+ttiD5JNM2a/fH8YygWTANBgkqhkiG9w0BAQUFADBFMQswCQYDVQQG
+EwJHQjEYMBYGA1UEChMPVHJ1c3RpcyBMaW1pdGVkMRwwGgYDVQQLExNUcnVzdGlzIEZQUyBSb290
+IENBMB4XDTAzMTIyMzEyMTQwNloXDTI0MDEyMTExMzY1NFowRTELMAkGA1UEBhMCR0IxGDAWBgNV
+BAoTD1RydXN0aXMgTGltaXRlZDEcMBoGA1UECxMTVHJ1c3RpcyBGUFMgUm9vdCBDQTCCASIwDQYJ
+KoZIhvcNAQEBBQADggEPADCCAQoCggEBAMVQe547NdDfxIzNjpvto8A2mfRC6qc+gIMPpqdZh8mQ
+RUN+AOqGeSoDvT03mYlmt+WKVoaTnGhLaASMk5MCPjDSNzoiYYkchU59j9WvezX2fihHiTHcDnlk
+H5nSW7r+f2C/revnPDgpai/lkQtV/+xvWNUtyd5MZnGPDNcE2gfmHhjjvSkCqPoc4Vu5g6hBSLwa
+cY3nYuUtsuvffM/bq1rKMfFMIvMFE/eC+XN5DL7XSxzA0RU8k0Fk0ea+IxciAIleH2ulrG6nS4zt
+o3Lmr2NNL4XSFDWaLk6M6jKYKIahkQlBOrTh4/L68MkKokHdqeMDx4gVOxzUGpTXn2RZEm0CAwEA
+AaNTMFEwDwYDVR0TAQH/BAUwAwEB/zAfBgNVHSMEGDAWgBS6+nEleYtXQSUhhgtx67JkDoshZzAd
+BgNVHQ4EFgQUuvpxJXmLV0ElIYYLceuyZA6LIWcwDQYJKoZIhvcNAQEFBQADggEBAH5Y//01GX2c
+GE+esCu8jowU/yyg2kdbw++BLa8F6nRIW/M+TgfHbcWzk88iNVy2P3UnXwmWzaD+vkAMXBJV+JOC
+yinpXj9WV4s4NvdFGkwozZ5BuO1WTISkQMi4sKUraXAEasP41BIy+Q7DsdwyhEQsb8tGD+pmQQ9P
+8Vilpg0ND2HepZ5dfWWhPBfnqFVO76DH7cZEf1T1o+CP8HxVIo8ptoGj4W1OLBuAZ+ytIJ8MYmHV
+l/9D7S3B2l0pKoU/rGXuhg8FjZBf3+6f9L/uHfuY5H+QK4R4EA5sSVPvFVtlRkpdr7r7OnIdzfYl
+iB6XzCGcKQENZetX2fNXlrtIzYE=
+-----END CERTIFICATE-----
+
+StartCom Certification Authority
+================================
+-----BEGIN CERTIFICATE-----
+MIIHhzCCBW+gAwIBAgIBLTANBgkqhkiG9w0BAQsFADB9MQswCQYDVQQGEwJJTDEWMBQGA1UEChMN
+U3RhcnRDb20gTHRkLjErMCkGA1UECxMiU2VjdXJlIERpZ2l0YWwgQ2VydGlmaWNhdGUgU2lnbmlu
+ZzEpMCcGA1UEAxMgU3RhcnRDb20gQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkwHhcNMDYwOTE3MTk0
+NjM3WhcNMzYwOTE3MTk0NjM2WjB9MQswCQYDVQQGEwJJTDEWMBQGA1UEChMNU3RhcnRDb20gTHRk
+LjErMCkGA1UECxMiU2VjdXJlIERpZ2l0YWwgQ2VydGlmaWNhdGUgU2lnbmluZzEpMCcGA1UEAxMg
+U3RhcnRDb20gQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAw
+ggIKAoICAQDBiNsJvGxGfHiflXu1M5DycmLWwTYgIiRezul38kMKogZkpMyONvg45iPwbm2xPN1y
+o4UcodM9tDMr0y+v/uqwQVlntsQGfQqedIXWeUyAN3rfOQVSWff0G0ZDpNKFhdLDcfN1YjS6LIp/
+Ho/u7TTQEceWzVI9ujPW3U3eCztKS5/CJi/6tRYccjV3yjxd5srhJosaNnZcAdt0FCX+7bWgiA/d
+eMotHweXMAEtcnn6RtYTKqi5pquDSR3l8u/d5AGOGAqPY1MWhWKpDhk6zLVmpsJrdAfkK+F2PrRt
+2PZE4XNiHzvEvqBTViVsUQn3qqvKv3b9bZvzndu/PWa8DFaqr5hIlTpL36dYUNk4dalb6kMMAv+Z
+6+hsTXBbKWWc3apdzK8BMewM69KN6Oqce+Zu9ydmDBpI125C4z/eIT574Q1w+2OqqGwaVLRcJXrJ
+osmLFqa7LH4XXgVNWG4SHQHuEhANxjJ/GP/89PrNbpHoNkm+Gkhpi8KWTRoSsmkXwQqQ1vp5Iki/
+untp+HDH+no32NgN0nZPV/+Qt+OR0t3vwmC3Zzrd/qqc8NSLf3Iizsafl7b4r4qgEKjZ+xjGtrVc
+UjyJthkqcwEKDwOzEmDyei+B26Nu/yYwl/WL3YlXtq09s68rxbd2AvCl1iuahhQqcvbjM4xdCUsT
+37uMdBNSSwIDAQABo4ICEDCCAgwwDwYDVR0TAQH/BAUwAwEB/zAOBgNVHQ8BAf8EBAMCAQYwHQYD
+VR0OBBYEFE4L7xqkQFulF2mHMMo0aEPQQa7yMB8GA1UdIwQYMBaAFE4L7xqkQFulF2mHMMo0aEPQ
+Qa7yMIIBWgYDVR0gBIIBUTCCAU0wggFJBgsrBgEEAYG1NwEBATCCATgwLgYIKwYBBQUHAgEWImh0
+dHA6Ly93d3cuc3RhcnRzc2wuY29tL3BvbGljeS5wZGYwNAYIKwYBBQUHAgEWKGh0dHA6Ly93d3cu
+c3RhcnRzc2wuY29tL2ludGVybWVkaWF0ZS5wZGYwgc8GCCsGAQUFBwICMIHCMCcWIFN0YXJ0IENv
+bW1lcmNpYWwgKFN0YXJ0Q29tKSBMdGQuMAMCAQEagZZMaW1pdGVkIExpYWJpbGl0eSwgcmVhZCB0
+aGUgc2VjdGlvbiAqTGVnYWwgTGltaXRhdGlvbnMqIG9mIHRoZSBTdGFydENvbSBDZXJ0aWZpY2F0
+aW9uIEF1dGhvcml0eSBQb2xpY3kgYXZhaWxhYmxlIGF0IGh0dHA6Ly93d3cuc3RhcnRzc2wuY29t
+L3BvbGljeS5wZGYwEQYJYIZIAYb4QgEBBAQDAgAHMDgGCWCGSAGG+EIBDQQrFilTdGFydENvbSBG
+cmVlIFNTTCBDZXJ0aWZpY2F0aW9uIEF1dGhvcml0eTANBgkqhkiG9w0BAQsFAAOCAgEAjo/n3JR5
+fPGFf59Jb2vKXfuM/gTFwWLRfUKKvFO3lANmMD+x5wqnUCBVJX92ehQN6wQOQOY+2IirByeDqXWm
+N3PH/UvSTa0XQMhGvjt/UfzDtgUx3M2FIk5xt/JxXrAaxrqTi3iSSoX4eA+D/i+tLPfkpLst0OcN
+Org+zvZ49q5HJMqjNTbOx8aHmNrs++myziebiMMEofYLWWivydsQD032ZGNcpRJvkrKTlMeIFw6T
+tn5ii5B/q06f/ON1FE8qMt9bDeD1e5MNq6HPh+GlBEXoPBKlCcWw0bdT82AUuoVpaiF8H3VhFyAX
+e2w7QSlc4axa0c2Mm+tgHRns9+Ww2vl5GKVFP0lDV9LdJNUso/2RjSe15esUBppMeyG7Oq0wBhjA
+2MFrLH9ZXF2RsXAiV+uKa0hK1Q8p7MZAwC+ITGgBF3f0JBlPvfrhsiAhS90a2Cl9qrjeVOwhVYBs
+HvUwyKMQ5bLmKhQxw4UtjJixhlpPiVktucf3HMiKf8CdBUrmQk9io20ppB+Fq9vlgcitKj1MXVuE
+JnHEhV5xJMqlG2zYYdMa4FTbzrqpMrUi9nNBCV24F10OD5mQ1kfabwo6YigUZ4LZ8dCAWZvLMdib
+D4x3TrVoivJs9iQOLWxwxXPR3hTQcY+203sC9uO41Alua551hDnmfyWl8kgAwKQB2j8=
+-----END CERTIFICATE-----
+
+StartCom Certification Authority G2
+===================================
+-----BEGIN CERTIFICATE-----
+MIIFYzCCA0ugAwIBAgIBOzANBgkqhkiG9w0BAQsFADBTMQswCQYDVQQGEwJJTDEWMBQGA1UEChMN
+U3RhcnRDb20gTHRkLjEsMCoGA1UEAxMjU3RhcnRDb20gQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkg
+RzIwHhcNMTAwMTAxMDEwMDAxWhcNMzkxMjMxMjM1OTAxWjBTMQswCQYDVQQGEwJJTDEWMBQGA1UE
+ChMNU3RhcnRDb20gTHRkLjEsMCoGA1UEAxMjU3RhcnRDb20gQ2VydGlmaWNhdGlvbiBBdXRob3Jp
+dHkgRzIwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQC2iTZbB7cgNr2Cu+EWIAOVeq8O
+o1XJJZlKxdBWQYeQTSFgpBSHO839sj60ZwNq7eEPS8CRhXBF4EKe3ikj1AENoBB5uNsDvfOpL9HG
+4A/LnooUCri99lZi8cVytjIl2bLzvWXFDSxu1ZJvGIsAQRSCb0AgJnooD/Uefyf3lLE3PbfHkffi
+Aez9lInhzG7TNtYKGXmu1zSCZf98Qru23QumNK9LYP5/Q0kGi4xDuFby2X8hQxfqp0iVAXV16iul
+Q5XqFYSdCI0mblWbq9zSOdIxHWDirMxWRST1HFSr7obdljKF+ExP6JV2tgXdNiNnvP8V4so75qbs
+O+wmETRIjfaAKxojAuuKHDp2KntWFhxyKrOq42ClAJ8Em+JvHhRYW6Vsi1g8w7pOOlz34ZYrPu8H
+vKTlXcxNnw3h3Kq74W4a7I/htkxNeXJdFzULHdfBR9qWJODQcqhaX2YtENwvKhOuJv4KHBnM0D4L
+nMgJLvlblnpHnOl68wVQdJVznjAJ85eCXuaPOQgeWeU1FEIT/wCc976qUM/iUUjXuG+v+E5+M5iS
+FGI6dWPPe/regjupuznixL0sAA7IF6wT700ljtizkC+p2il9Ha90OrInwMEePnWjFqmveiJdnxMa
+z6eg6+OGCtP95paV1yPIN93EfKo2rJgaErHgTuixO/XWb/Ew1wIDAQABo0IwQDAPBgNVHRMBAf8E
+BTADAQH/MA4GA1UdDwEB/wQEAwIBBjAdBgNVHQ4EFgQUS8W0QGutHLOlHGVuRjaJhwUMDrYwDQYJ
+KoZIhvcNAQELBQADggIBAHNXPyzVlTJ+N9uWkusZXn5T50HsEbZH77Xe7XRcxfGOSeD8bpkTzZ+K
+2s06Ctg6Wgk/XzTQLwPSZh0avZyQN8gMjgdalEVGKua+etqhqaRpEpKwfTbURIfXUfEpY9Z1zRbk
+J4kd+MIySP3bmdCPX1R0zKxnNBFi2QwKN4fRoxdIjtIXHfbX/dtl6/2o1PXWT6RbdejF0mCy2wl+
+JYt7ulKSnj7oxXehPOBKc2thz4bcQ///If4jXSRK9dNtD2IEBVeC2m6kMyV5Sy5UGYvMLD0w6dEG
+/+gyRr61M3Z3qAFdlsHB1b6uJcDJHgoJIIihDsnzb02CVAAgp9KP5DlUFy6NHrgbuxu9mk47EDTc
+nIhT76IxW1hPkWLIwpqazRVdOKnWvvgTtZ8SafJQYqz7Fzf07rh1Z2AQ+4NQ+US1dZxAF7L+/Xld
+blhYXzD8AK6vM8EOTmy6p6ahfzLbOOCxchcKK5HsamMm7YnUeMx0HgX4a/6ManY5Ka5lIxKVCCIc
+l85bBu4M4ru8H0ST9tg4RQUh7eStqxK2A6RCLi3ECToDZ2mEmuFZkIoohdVddLHRDiBYmxOlsGOm
+7XtH/UVVMKTumtTm4ofvmMkyghEpIrwACjFeLQ/Ajulrso8uBtjRkcfGEvRM/TAXw8HaOFvjqerm
+obp573PYtlNXLfbQ4ddI
+-----END CERTIFICATE-----
+
+Buypass Class 2 Root CA
+=======================
+-----BEGIN CERTIFICATE-----
+MIIFWTCCA0GgAwIBAgIBAjANBgkqhkiG9w0BAQsFADBOMQswCQYDVQQGEwJOTzEdMBsGA1UECgwU
+QnV5cGFzcyBBUy05ODMxNjMzMjcxIDAeBgNVBAMMF0J1eXBhc3MgQ2xhc3MgMiBSb290IENBMB4X
+DTEwMTAyNjA4MzgwM1oXDTQwMTAyNjA4MzgwM1owTjELMAkGA1UEBhMCTk8xHTAbBgNVBAoMFEJ1
+eXBhc3MgQVMtOTgzMTYzMzI3MSAwHgYDVQQDDBdCdXlwYXNzIENsYXNzIDIgUm9vdCBDQTCCAiIw
+DQYJKoZIhvcNAQEBBQADggIPADCCAgoCggIBANfHXvfBB9R3+0Mh9PT1aeTuMgHbo4Yf5FkNuud1
+g1Lr6hxhFUi7HQfKjK6w3Jad6sNgkoaCKHOcVgb/S2TwDCo3SbXlzwx87vFKu3MwZfPVL4O2fuPn
+9Z6rYPnT8Z2SdIrkHJasW4DptfQxh6NR/Md+oW+OU3fUl8FVM5I+GC911K2GScuVr1QGbNgGE41b
+/+EmGVnAJLqBcXmQRFBoJJRfuLMR8SlBYaNByyM21cHxMlAQTn/0hpPshNOOvEu/XAFOBz3cFIqU
+CqTqc/sLUegTBxj6DvEr0VQVfTzh97QZQmdiXnfgolXsttlpF9U6r0TtSsWe5HonfOV116rLJeff
+awrbD02TTqigzXsu8lkBarcNuAeBfos4GzjmCleZPe4h6KP1DBbdi+w0jpwqHAAVF41og9JwnxgI
+zRFo1clrUs3ERo/ctfPYV3Me6ZQ5BL/T3jjetFPsaRyifsSP5BtwrfKi+fv3FmRmaZ9JUaLiFRhn
+Bkp/1Wy1TbMz4GHrXb7pmA8y1x1LPC5aAVKRCfLf6o3YBkBjqhHk/sM3nhRSP/TizPJhk9H9Z2vX
+Uq6/aKtAQ6BXNVN48FP4YUIHZMbXb5tMOA1jrGKvNouicwoN9SG9dKpN6nIDSdvHXx1iY8f93ZHs
+M+71bbRuMGjeyNYmsHVee7QHIJihdjK4TWxPAgMBAAGjQjBAMA8GA1UdEwEB/wQFMAMBAf8wHQYD
+VR0OBBYEFMmAd+BikoL1RpzzuvdMw964o605MA4GA1UdDwEB/wQEAwIBBjANBgkqhkiG9w0BAQsF
+AAOCAgEAU18h9bqwOlI5LJKwbADJ784g7wbylp7ppHR/ehb8t/W2+xUbP6umwHJdELFx7rxP462s
+A20ucS6vxOOto70MEae0/0qyexAQH6dXQbLArvQsWdZHEIjzIVEpMMpghq9Gqx3tOluwlN5E40EI
+osHsHdb9T7bWR9AUC8rmyrV7d35BH16Dx7aMOZawP5aBQW9gkOLo+fsicdl9sz1Gv7SEr5AcD48S
+aq/v7h56rgJKihcrdv6sVIkkLE8/trKnToyokZf7KcZ7XC25y2a2t6hbElGFtQl+Ynhw/qlqYLYd
+DnkM/crqJIByw5c/8nerQyIKx+u2DISCLIBrQYoIwOula9+ZEsuK1V6ADJHgJgg2SMX6OBE1/yWD
+LfJ6v9r9jv6ly0UsH8SIU653DtmadsWOLB2jutXsMq7Aqqz30XpN69QH4kj3Io6wpJ9qzo6ysmD0
+oyLQI+uUWnpp3Q+/QFesa1lQ2aOZ4W7+jQF5JyMV3pKdewlNWudLSDBaGOYKbeaP4NK75t98biGC
+wWg5TbSYWGZizEqQXsP6JwSxeRV0mcy+rSDeJmAc61ZRpqPq5KM/p/9h3PFaTWwyI0PurKju7koS
+CTxdccK+efrCh2gdC/1cacwG0Jp9VJkqyTkaGa9LKkPzY11aWOIv4x3kqdbQCtCev9eBCfHJxyYN
+rJgWVqA=
+-----END CERTIFICATE-----
+
+Buypass Class 3 Root CA
+=======================
+-----BEGIN CERTIFICATE-----
+MIIFWTCCA0GgAwIBAgIBAjANBgkqhkiG9w0BAQsFADBOMQswCQYDVQQGEwJOTzEdMBsGA1UECgwU
+QnV5cGFzcyBBUy05ODMxNjMzMjcxIDAeBgNVBAMMF0J1eXBhc3MgQ2xhc3MgMyBSb290IENBMB4X
+DTEwMTAyNjA4Mjg1OFoXDTQwMTAyNjA4Mjg1OFowTjELMAkGA1UEBhMCTk8xHTAbBgNVBAoMFEJ1
+eXBhc3MgQVMtOTgzMTYzMzI3MSAwHgYDVQQDDBdCdXlwYXNzIENsYXNzIDMgUm9vdCBDQTCCAiIw
+DQYJKoZIhvcNAQEBBQADggIPADCCAgoCggIBAKXaCpUWUOOV8l6ddjEGMnqb8RB2uACatVI2zSRH
+sJ8YZLya9vrVediQYkwiL944PdbgqOkcLNt4EemOaFEVcsfzM4fkoF0LXOBXByow9c3EN3coTRiR
+5r/VUv1xLXA+58bEiuPwKAv0dpihi4dVsjoT/Lc+JzeOIuOoTyrvYLs9tznDDgFHmV0ST9tD+leh
+7fmdvhFHJlsTmKtdFoqwNxxXnUX/iJY2v7vKB3tvh2PX0DJq1l1sDPGzbjniazEuOQAnFN44wOwZ
+ZoYS6J1yFhNkUsepNxz9gjDthBgd9K5c/3ATAOux9TN6S9ZV+AWNS2mw9bMoNlwUxFFzTWsL8TQH
+2xc519woe2v1n/MuwU8XKhDzzMro6/1rqy6any2CbgTUUgGTLT2G/H783+9CHaZr77kgxve9oKeV
+/afmiSTYzIw0bOIjL9kSGiG5VZFvC5F5GQytQIgLcOJ60g7YaEi7ghM5EFjp2CoHxhLbWNvSO1UQ
+RwUVZ2J+GGOmRj8JDlQyXr8NYnon74Do29lLBlo3WiXQCBJ31G8JUJc9yB3D34xFMFbG02SrZvPA
+Xpacw8Tvw3xrizp5f7NJzz3iiZ+gMEuFuZyUJHmPfWupRWgPK9Dx2hzLabjKSWJtyNBjYt1gD1iq
+j6G8BaVmos8bdrKEZLFMOVLAMLrwjEsCsLa3AgMBAAGjQjBAMA8GA1UdEwEB/wQFMAMBAf8wHQYD
+VR0OBBYEFEe4zf/lb+74suwvTg75JbCOPGvDMA4GA1UdDwEB/wQEAwIBBjANBgkqhkiG9w0BAQsF
+AAOCAgEAACAjQTUEkMJAYmDv4jVM1z+s4jSQuKFvdvoWFqRINyzpkMLyPPgKn9iB5btb2iUspKdV
+cSQy9sgL8rxq+JOssgfCX5/bzMiKqr5qb+FJEMwx14C7u8jYog5kV+qi9cKpMRXSIGrs/CIBKM+G
+uIAeqcwRpTzyFrNHnfzSgCHEy9BHcEGhyoMZCCxt8l13nIoUE9Q2HJLw5QY33KbmkJs4j1xrG0aG
+Q0JfPgEHU1RdZX33inOhmlRaHylDFCfChQ+1iHsaO5S3HWCntZznKWlXWpuTekMwGwPXYshApqr8
+ZORK15FTAaggiG6cX0S5y2CBNOxv033aSF/rtJC8LakcC6wc1aJoIIAE1vyxjy+7SjENSoYc6+I2
+KSb12tjE8nVhz36udmNKekBlk4f4HoCMhuWG1o8O/FMsYOgWYRqiPkN7zTlgVGr18okmAWiDSKIz
+6MkEkbIRNBE+6tBDGR8Dk5AM/1E9V/RBbuHLoL7ryWPNbczk+DaqaJ3tvV2XcEQNtg413OEMXbug
+UZTLfhbrES+jkkXITHHZvMmZUldGL1DPvTVp9D0VzgalLA8+9oG6lLvDu79leNKGef9JOxqDDPDe
+eOzI8k1MGt6CKfjBWtrt7uYnXuhF0J0cUahoq0Tj0Itq4/g7u9xN12TyUb7mqqta6THuBrxzvxNi
+Cp/HuZc=
+-----END CERTIFICATE-----
+
+T-TeleSec GlobalRoot Class 3
+============================
+-----BEGIN CERTIFICATE-----
+MIIDwzCCAqugAwIBAgIBATANBgkqhkiG9w0BAQsFADCBgjELMAkGA1UEBhMCREUxKzApBgNVBAoM
+IlQtU3lzdGVtcyBFbnRlcnByaXNlIFNlcnZpY2VzIEdtYkgxHzAdBgNVBAsMFlQtU3lzdGVtcyBU
+cnVzdCBDZW50ZXIxJTAjBgNVBAMMHFQtVGVsZVNlYyBHbG9iYWxSb290IENsYXNzIDMwHhcNMDgx
+MDAxMTAyOTU2WhcNMzMxMDAxMjM1OTU5WjCBgjELMAkGA1UEBhMCREUxKzApBgNVBAoMIlQtU3lz
+dGVtcyBFbnRlcnByaXNlIFNlcnZpY2VzIEdtYkgxHzAdBgNVBAsMFlQtU3lzdGVtcyBUcnVzdCBD
+ZW50ZXIxJTAjBgNVBAMMHFQtVGVsZVNlYyBHbG9iYWxSb290IENsYXNzIDMwggEiMA0GCSqGSIb3
+DQEBAQUAA4IBDwAwggEKAoIBAQC9dZPwYiJvJK7genasfb3ZJNW4t/zN8ELg63iIVl6bmlQdTQyK
+9tPPcPRStdiTBONGhnFBSivwKixVA9ZIw+A5OO3yXDw/RLyTPWGrTs0NvvAgJ1gORH8EGoel15YU
+NpDQSXuhdfsaa3Ox+M6pCSzyU9XDFES4hqX2iys52qMzVNn6chr3IhUciJFrf2blw2qAsCTz34ZF
+iP0Zf3WHHx+xGwpzJFu5ZeAsVMhg02YXP+HMVDNzkQI6pn97djmiH5a2OK61yJN0HZ65tOVgnS9W
+0eDrXltMEnAMbEQgqxHY9Bn20pxSN+f6tsIxO0rUFJmtxxr1XV/6B7h8DR/Wgx6zAgMBAAGjQjBA
+MA8GA1UdEwEB/wQFMAMBAf8wDgYDVR0PAQH/BAQDAgEGMB0GA1UdDgQWBBS1A/d2O2GCahKqGFPr
+AyGUv/7OyjANBgkqhkiG9w0BAQsFAAOCAQEAVj3vlNW92nOyWL6ukK2YJ5f+AbGwUgC4TeQbIXQb
+fsDuXmkqJa9c1h3a0nnJ85cp4IaH3gRZD/FZ1GSFS5mvJQQeyUapl96Cshtwn5z2r3Ex3XsFpSzT
+ucpH9sry9uetuUg/vBa3wW306gmv7PO15wWeph6KU1HWk4HMdJP2udqmJQV0eVp+QD6CSyYRMG7h
+P0HHRwA11fXT91Q+gT3aSWqas+8QPebrb9HIIkfLzM8BMZLZGOMivgkeGj5asuRrDFR6fUNOuIml
+e9eiPZaGzPImNC1qkp2aGtAw4l1OBLBfiyB+d8E9lYLRRpo7PHi4b6HQDWSieB4pTpPDpFQUWw==
+-----END CERTIFICATE-----
+
+EE Certification Centre Root CA
+===============================
+-----BEGIN CERTIFICATE-----
+MIIEAzCCAuugAwIBAgIQVID5oHPtPwBMyonY43HmSjANBgkqhkiG9w0BAQUFADB1MQswCQYDVQQG
+EwJFRTEiMCAGA1UECgwZQVMgU2VydGlmaXRzZWVyaW1pc2tlc2t1czEoMCYGA1UEAwwfRUUgQ2Vy
+dGlmaWNhdGlvbiBDZW50cmUgUm9vdCBDQTEYMBYGCSqGSIb3DQEJARYJcGtpQHNrLmVlMCIYDzIw
+MTAxMDMwMTAxMDMwWhgPMjAzMDEyMTcyMzU5NTlaMHUxCzAJBgNVBAYTAkVFMSIwIAYDVQQKDBlB
+UyBTZXJ0aWZpdHNlZXJpbWlza2Vza3VzMSgwJgYDVQQDDB9FRSBDZXJ0aWZpY2F0aW9uIENlbnRy
+ZSBSb290IENBMRgwFgYJKoZIhvcNAQkBFglwa2lAc2suZWUwggEiMA0GCSqGSIb3DQEBAQUAA4IB
+DwAwggEKAoIBAQDIIMDs4MVLqwd4lfNE7vsLDP90jmG7sWLqI9iroWUyeuuOF0+W2Ap7kaJjbMeM
+TC55v6kF/GlclY1i+blw7cNRfdCT5mzrMEvhvH2/UpvObntl8jixwKIy72KyaOBhU8E2lf/slLo2
+rpwcpzIP5Xy0xm90/XsY6KxX7QYgSzIwWFv9zajmofxwvI6Sc9uXp3whrj3B9UiHbCe9nyV0gVWw
+93X2PaRka9ZP585ArQ/dMtO8ihJTmMmJ+xAdTX7Nfh9WDSFwhfYggx/2uh8Ej+p3iDXE/+pOoYtN
+P2MbRMNE1CV2yreN1x5KZmTNXMWcg+HCCIia7E6j8T4cLNlsHaFLAgMBAAGjgYowgYcwDwYDVR0T
+AQH/BAUwAwEB/zAOBgNVHQ8BAf8EBAMCAQYwHQYDVR0OBBYEFBLyWj7qVhy/zQas8fElyalL1BSZ
+MEUGA1UdJQQ+MDwGCCsGAQUFBwMCBggrBgEFBQcDAQYIKwYBBQUHAwMGCCsGAQUFBwMEBggrBgEF
+BQcDCAYIKwYBBQUHAwkwDQYJKoZIhvcNAQEFBQADggEBAHv25MANqhlHt01Xo/6tu7Fq1Q+e2+Rj
+xY6hUFaTlrg4wCQiZrxTFGGVv9DHKpY5P30osxBAIWrEr7BSdxjhlthWXePdNl4dp1BUoMUq5KqM
+lIpPnTX/dqQGE5Gion0ARD9V04I8GtVbvFZMIi5GQ4okQC3zErg7cBqklrkar4dBGmoYDQZPxz5u
+uSlNDUmJEYcyW+ZLBMjkXOZ0c5RdFpgTlf7727FE5TpwrDdr5rMzcijJs1eg9gIWiAYLtqZLICjU
+3j2LrTcFU3T+bsy8QxdxXvnFzBqpYe73dgzzcvRyrc9yAjYHR8/vGVCJYMzpJJUPwssd8m92kMfM
+dcGWxZ0=
+-----END CERTIFICATE-----
+
+TURKTRUST Certificate Services Provider Root 2007
+=================================================
+-----BEGIN CERTIFICATE-----
+MIIEPTCCAyWgAwIBAgIBATANBgkqhkiG9w0BAQUFADCBvzE/MD0GA1UEAww2VMOcUktUUlVTVCBF
+bGVrdHJvbmlrIFNlcnRpZmlrYSBIaXptZXQgU2HEn2xhecSxY8Sxc8SxMQswCQYDVQQGEwJUUjEP
+MA0GA1UEBwwGQW5rYXJhMV4wXAYDVQQKDFVUw5xSS1RSVVNUIEJpbGdpIMSwbGV0acWfaW0gdmUg
+QmlsacWfaW0gR8O8dmVubGnEn2kgSGl6bWV0bGVyaSBBLsWeLiAoYykgQXJhbMSxayAyMDA3MB4X
+DTA3MTIyNTE4MzcxOVoXDTE3MTIyMjE4MzcxOVowgb8xPzA9BgNVBAMMNlTDnFJLVFJVU1QgRWxl
+a3Ryb25payBTZXJ0aWZpa2EgSGl6bWV0IFNhxJ9sYXnEsWPEsXPEsTELMAkGA1UEBhMCVFIxDzAN
+BgNVBAcMBkFua2FyYTFeMFwGA1UECgxVVMOcUktUUlVTVCBCaWxnaSDEsGxldGnFn2ltIHZlIEJp
+bGnFn2ltIEfDvHZlbmxpxJ9pIEhpem1ldGxlcmkgQS7Fni4gKGMpIEFyYWzEsWsgMjAwNzCCASIw
+DQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAKu3PgqMyKVYFeaK7yc9SrToJdPNM8Ig3BnuiD9N
+YvDdE3ePYakqtdTyuTFYKTsvP2qcb3N2Je40IIDu6rfwxArNK4aUyeNgsURSsloptJGXg9i3phQv
+KUmi8wUG+7RP2qFsmmaf8EMJyupyj+sA1zU511YXRxcw9L6/P8JorzZAwan0qafoEGsIiveGHtya
+KhUG9qPw9ODHFNRRf8+0222vR5YXm3dx2KdxnSQM9pQ/hTEST7ruToK4uT6PIzdezKKqdfcYbwnT
+rqdUKDT74eA7YH2gvnmJhsifLfkKS8RQouf9eRbHegsYz85M733WB2+Y8a+xwXrXgTW4qhe04MsC
+AwEAAaNCMEAwHQYDVR0OBBYEFCnFkKslrxHkYb+j/4hhkeYO/pyBMA4GA1UdDwEB/wQEAwIBBjAP
+BgNVHRMBAf8EBTADAQH/MA0GCSqGSIb3DQEBBQUAA4IBAQAQDdr4Ouwo0RSVgrESLFF6QSU2TJ/s
+Px+EnWVUXKgWAkD6bho3hO9ynYYKVZ1WKKxmLNA6VpM0ByWtCLCPyA8JWcqdmBzlVPi5RX9ql2+I
+aE1KBiY3iAIOtsbWcpnOa3faYjGkVh+uX4132l32iPwa2Z61gfAyuOOI0JzzaqC5mxRZNTZPz/OO
+Xl0XrRWV2N2y1RVuAE6zS89mlOTgzbUF2mNXi+WzqtvALhyQRNsaXRik7r4EW5nVcV9VZWRi1aKb
+BFmGyGJ353yCRWo9F7/snXUMrqNvWtMvmDb08PUZqxFdyKbjKlhqQgnDvZImZjINXQhVdP+MmNAK
+poRq0Tl9
+-----END CERTIFICATE-----
+
+D-TRUST Root Class 3 CA 2 2009
+==============================
+-----BEGIN CERTIFICATE-----
+MIIEMzCCAxugAwIBAgIDCYPzMA0GCSqGSIb3DQEBCwUAME0xCzAJBgNVBAYTAkRFMRUwEwYDVQQK
+DAxELVRydXN0IEdtYkgxJzAlBgNVBAMMHkQtVFJVU1QgUm9vdCBDbGFzcyAzIENBIDIgMjAwOTAe
+Fw0wOTExMDUwODM1NThaFw0yOTExMDUwODM1NThaME0xCzAJBgNVBAYTAkRFMRUwEwYDVQQKDAxE
+LVRydXN0IEdtYkgxJzAlBgNVBAMMHkQtVFJVU1QgUm9vdCBDbGFzcyAzIENBIDIgMjAwOTCCASIw
+DQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBANOySs96R+91myP6Oi/WUEWJNTrGa9v+2wBoqOAD
+ER03UAifTUpolDWzU9GUY6cgVq/eUXjsKj3zSEhQPgrfRlWLJ23DEE0NkVJD2IfgXU42tSHKXzlA
+BF9bfsyjxiupQB7ZNoTWSPOSHjRGICTBpFGOShrvUD9pXRl/RcPHAY9RySPocq60vFYJfxLLHLGv
+KZAKyVXMD9O0Gu1HNVpK7ZxzBCHQqr0ME7UAyiZsxGsMlFqVlNpQmvH/pStmMaTJOKDfHR+4CS7z
+p+hnUquVH+BGPtikw8paxTGA6Eian5Rp/hnd2HN8gcqW3o7tszIFZYQ05ub9VxC1X3a/L7AQDcUC
+AwEAAaOCARowggEWMA8GA1UdEwEB/wQFMAMBAf8wHQYDVR0OBBYEFP3aFMSfMN4hvR5COfyrYyNJ
+4PGEMA4GA1UdDwEB/wQEAwIBBjCB0wYDVR0fBIHLMIHIMIGAoH6gfIZ6bGRhcDovL2RpcmVjdG9y
+eS5kLXRydXN0Lm5ldC9DTj1ELVRSVVNUJTIwUm9vdCUyMENsYXNzJTIwMyUyMENBJTIwMiUyMDIw
+MDksTz1ELVRydXN0JTIwR21iSCxDPURFP2NlcnRpZmljYXRlcmV2b2NhdGlvbmxpc3QwQ6BBoD+G
+PWh0dHA6Ly93d3cuZC10cnVzdC5uZXQvY3JsL2QtdHJ1c3Rfcm9vdF9jbGFzc18zX2NhXzJfMjAw
+OS5jcmwwDQYJKoZIhvcNAQELBQADggEBAH+X2zDI36ScfSF6gHDOFBJpiBSVYEQBrLLpME+bUMJm
+2H6NMLVwMeniacfzcNsgFYbQDfC+rAF1hM5+n02/t2A7nPPKHeJeaNijnZflQGDSNiH+0LS4F9p0
+o3/U37CYAqxva2ssJSRyoWXuJVrl5jLn8t+rSfrzkGkj2wTZ51xY/GXUl77M/C4KzCUqNQT4YJEV
+dT1B/yMfGchs64JTBKbkTCJNjYy6zltz7GRUUG3RnFX7acM2w4y8PIWmawomDeCTmGCufsYkl4ph
+X5GOZpIJhzbNi5stPvZR1FDUWSi9g/LMKHtThm3YJohw1+qRzT65ysCQblrGXnRl11z+o+I=
+-----END CERTIFICATE-----
+
+D-TRUST Root Class 3 CA 2 EV 2009
+=================================
+-----BEGIN CERTIFICATE-----
+MIIEQzCCAyugAwIBAgIDCYP0MA0GCSqGSIb3DQEBCwUAMFAxCzAJBgNVBAYTAkRFMRUwEwYDVQQK
+DAxELVRydXN0IEdtYkgxKjAoBgNVBAMMIUQtVFJVU1QgUm9vdCBDbGFzcyAzIENBIDIgRVYgMjAw
+OTAeFw0wOTExMDUwODUwNDZaFw0yOTExMDUwODUwNDZaMFAxCzAJBgNVBAYTAkRFMRUwEwYDVQQK
+DAxELVRydXN0IEdtYkgxKjAoBgNVBAMMIUQtVFJVU1QgUm9vdCBDbGFzcyAzIENBIDIgRVYgMjAw
+OTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAJnxhDRwui+3MKCOvXwEz75ivJn9gpfS
+egpnljgJ9hBOlSJzmY3aFS3nBfwZcyK3jpgAvDw9rKFs+9Z5JUut8Mxk2og+KbgPCdM03TP1YtHh
+zRnp7hhPTFiu4h7WDFsVWtg6uMQYZB7jM7K1iXdODL/ZlGsTl28So/6ZqQTMFexgaDbtCHu39b+T
+7WYxg4zGcTSHThfqr4uRjRxWQa4iN1438h3Z0S0NL2lRp75mpoo6Kr3HGrHhFPC+Oh25z1uxav60
+sUYgovseO3Dvk5h9jHOW8sXvhXCtKSb8HgQ+HKDYD8tSg2J87otTlZCpV6LqYQXY+U3EJ/pure35
+11H3a6UCAwEAAaOCASQwggEgMA8GA1UdEwEB/wQFMAMBAf8wHQYDVR0OBBYEFNOUikxiEyoZLsyv
+cop9NteaHNxnMA4GA1UdDwEB/wQEAwIBBjCB3QYDVR0fBIHVMIHSMIGHoIGEoIGBhn9sZGFwOi8v
+ZGlyZWN0b3J5LmQtdHJ1c3QubmV0L0NOPUQtVFJVU1QlMjBSb290JTIwQ2xhc3MlMjAzJTIwQ0El
+MjAyJTIwRVYlMjAyMDA5LE89RC1UcnVzdCUyMEdtYkgsQz1ERT9jZXJ0aWZpY2F0ZXJldm9jYXRp
+b25saXN0MEagRKBChkBodHRwOi8vd3d3LmQtdHJ1c3QubmV0L2NybC9kLXRydXN0X3Jvb3RfY2xh
+c3NfM19jYV8yX2V2XzIwMDkuY3JsMA0GCSqGSIb3DQEBCwUAA4IBAQA07XtaPKSUiO8aEXUHL7P+
+PPoeUSbrh/Yp3uDx1MYkCenBz1UbtDDZzhr+BlGmFaQt77JLvyAoJUnRpjZ3NOhk31KxEcdzes05
+nsKtjHEh8lprr988TlWvsoRlFIm5d8sqMb7Po23Pb0iUMkZv53GMoKaEGTcH8gNFCSuGdXzfX2lX
+ANtu2KZyIktQ1HWYVt+3GP9DQ1CuekR78HlR10M9p9OB0/DJT7naxpeG0ILD5EJt/rDiZE4OJudA
+NCa1CInXCGNjOCd1HjPqbqjdn5lPdE2BiYBL3ZqXKVwvvoFBuYz/6n1gBp7N1z3TLqMVvKjmJuVv
+w9y4AyHqnxbxLFS1
+-----END CERTIFICATE-----
+
+PSCProcert
+==========
+-----BEGIN CERTIFICATE-----
+MIIJhjCCB26gAwIBAgIBCzANBgkqhkiG9w0BAQsFADCCAR4xPjA8BgNVBAMTNUF1dG9yaWRhZCBk
+ZSBDZXJ0aWZpY2FjaW9uIFJhaXogZGVsIEVzdGFkbyBWZW5lem9sYW5vMQswCQYDVQQGEwJWRTEQ
+MA4GA1UEBxMHQ2FyYWNhczEZMBcGA1UECBMQRGlzdHJpdG8gQ2FwaXRhbDE2MDQGA1UEChMtU2lz
+dGVtYSBOYWNpb25hbCBkZSBDZXJ0aWZpY2FjaW9uIEVsZWN0cm9uaWNhMUMwQQYDVQQLEzpTdXBl
+cmludGVuZGVuY2lhIGRlIFNlcnZpY2lvcyBkZSBDZXJ0aWZpY2FjaW9uIEVsZWN0cm9uaWNhMSUw
+IwYJKoZIhvcNAQkBFhZhY3JhaXpAc3VzY2VydGUuZ29iLnZlMB4XDTEwMTIyODE2NTEwMFoXDTIw
+MTIyNTIzNTk1OVowgdExJjAkBgkqhkiG9w0BCQEWF2NvbnRhY3RvQHByb2NlcnQubmV0LnZlMQ8w
+DQYDVQQHEwZDaGFjYW8xEDAOBgNVBAgTB01pcmFuZGExKjAoBgNVBAsTIVByb3ZlZWRvciBkZSBD
+ZXJ0aWZpY2Fkb3MgUFJPQ0VSVDE2MDQGA1UEChMtU2lzdGVtYSBOYWNpb25hbCBkZSBDZXJ0aWZp
+Y2FjaW9uIEVsZWN0cm9uaWNhMQswCQYDVQQGEwJWRTETMBEGA1UEAxMKUFNDUHJvY2VydDCCAiIw
+DQYJKoZIhvcNAQEBBQADggIPADCCAgoCggIBANW39KOUM6FGqVVhSQ2oh3NekS1wwQYalNo97BVC
+wfWMrmoX8Yqt/ICV6oNEolt6Vc5Pp6XVurgfoCfAUFM+jbnADrgV3NZs+J74BCXfgI8Qhd19L3uA
+3VcAZCP4bsm+lU/hdezgfl6VzbHvvnpC2Mks0+saGiKLt38GieU89RLAu9MLmV+QfI4tL3czkkoh
+RqipCKzx9hEC2ZUWno0vluYC3XXCFCpa1sl9JcLB/KpnheLsvtF8PPqv1W7/U0HU9TI4seJfxPmO
+EO8GqQKJ/+MMbpfg353bIdD0PghpbNjU5Db4g7ayNo+c7zo3Fn2/omnXO1ty0K+qP1xmk6wKImG2
+0qCZyFSTXai20b1dCl53lKItwIKOvMoDKjSuc/HUtQy9vmebVOvh+qBa7Dh+PsHMosdEMXXqP+UH
+0quhJZb25uSgXTcYOWEAM11G1ADEtMo88aKjPvM6/2kwLkDd9p+cJsmWN63nOaK/6mnbVSKVUyqU
+td+tFjiBdWbjxywbk5yqjKPK2Ww8F22c3HxT4CAnQzb5EuE8XL1mv6JpIzi4mWCZDlZTOpx+FIyw
+Bm/xhnaQr/2v/pDGj59/i5IjnOcVdo/Vi5QTcmn7K2FjiO/mpF7moxdqWEfLcU8UC17IAggmosvp
+r2uKGcfLFFb14dq12fy/czja+eevbqQ34gcnAgMBAAGjggMXMIIDEzASBgNVHRMBAf8ECDAGAQH/
+AgEBMDcGA1UdEgQwMC6CD3N1c2NlcnRlLmdvYi52ZaAbBgVghl4CAqASDBBSSUYtRy0yMDAwNDAz
+Ni0wMB0GA1UdDgQWBBRBDxk4qpl/Qguk1yeYVKIXTC1RVDCCAVAGA1UdIwSCAUcwggFDgBStuyId
+xuDSAaj9dlBSk+2YwU2u06GCASakggEiMIIBHjE+MDwGA1UEAxM1QXV0b3JpZGFkIGRlIENlcnRp
+ZmljYWNpb24gUmFpeiBkZWwgRXN0YWRvIFZlbmV6b2xhbm8xCzAJBgNVBAYTAlZFMRAwDgYDVQQH
+EwdDYXJhY2FzMRkwFwYDVQQIExBEaXN0cml0byBDYXBpdGFsMTYwNAYDVQQKEy1TaXN0ZW1hIE5h
+Y2lvbmFsIGRlIENlcnRpZmljYWNpb24gRWxlY3Ryb25pY2ExQzBBBgNVBAsTOlN1cGVyaW50ZW5k
+ZW5jaWEgZGUgU2VydmljaW9zIGRlIENlcnRpZmljYWNpb24gRWxlY3Ryb25pY2ExJTAjBgkqhkiG
+9w0BCQEWFmFjcmFpekBzdXNjZXJ0ZS5nb2IudmWCAQowDgYDVR0PAQH/BAQDAgEGME0GA1UdEQRG
+MESCDnByb2NlcnQubmV0LnZloBUGBWCGXgIBoAwMClBTQy0wMDAwMDKgGwYFYIZeAgKgEgwQUklG
+LUotMzE2MzUzNzMtNzB2BgNVHR8EbzBtMEagRKBChkBodHRwOi8vd3d3LnN1c2NlcnRlLmdvYi52
+ZS9sY3IvQ0VSVElGSUNBRE8tUkFJWi1TSEEzODRDUkxERVIuY3JsMCOgIaAfhh1sZGFwOi8vYWNy
+YWl6LnN1c2NlcnRlLmdvYi52ZTA3BggrBgEFBQcBAQQrMCkwJwYIKwYBBQUHMAGGG2h0dHA6Ly9v
+Y3NwLnN1c2NlcnRlLmdvYi52ZTBBBgNVHSAEOjA4MDYGBmCGXgMBAjAsMCoGCCsGAQUFBwIBFh5o
+dHRwOi8vd3d3LnN1c2NlcnRlLmdvYi52ZS9kcGMwDQYJKoZIhvcNAQELBQADggIBACtZ6yKZu4Sq
+T96QxtGGcSOeSwORR3C7wJJg7ODU523G0+1ng3dS1fLld6c2suNUvtm7CpsR72H0xpkzmfWvADmN
+g7+mvTV+LFwxNG9s2/NkAZiqlCxB3RWGymspThbASfzXg0gTB1GEMVKIu4YXx2sviiCtxQuPcD4q
+uxtxj7mkoP3YldmvWb8lK5jpY5MvYB7Eqvh39YtsL+1+LrVPQA3uvFd359m21D+VJzog1eWuq2w1
+n8GhHVnchIHuTQfiSLaeS5UtQbHh6N5+LwUeaO6/u5BlOsju6rEYNxxik6SgMexxbJHmpHmJWhSn
+FFAFTKQAVzAswbVhltw+HoSvOULP5dAssSS830DD7X9jSr3hTxJkhpXzsOfIt+FTvZLm8wyWuevo
+5pLtp4EJFAv8lXrPj9Y0TzYS3F7RNHXGRoAvlQSMx4bEqCaJqD8Zm4G7UaRKhqsLEQ+xrmNTbSjq
+3TNWOByyrYDT13K9mmyZY+gAu0F2BbdbmRiKw7gSXFbPVgx96OLP7bx0R/vu0xdOIk9W/1DzLuY5
+poLWccret9W6aAjtmcz9opLLabid+Qqkpj5PkygqYWwHJgD/ll9ohri4zspV4KuxPX+Y1zMOWj3Y
+eMLEYC/HYvBhkdI4sPaeVdtAgAUSM84dkpvRabP/v/GSCmE1P93+hvS84Bpxs2Km
+-----END CERTIFICATE-----
+
+CA Disig Root R1
+================
+-----BEGIN CERTIFICATE-----
+MIIFaTCCA1GgAwIBAgIJAMMDmu5QkG4oMA0GCSqGSIb3DQEBBQUAMFIxCzAJBgNVBAYTAlNLMRMw
+EQYDVQQHEwpCcmF0aXNsYXZhMRMwEQYDVQQKEwpEaXNpZyBhLnMuMRkwFwYDVQQDExBDQSBEaXNp
+ZyBSb290IFIxMB4XDTEyMDcxOTA5MDY1NloXDTQyMDcxOTA5MDY1NlowUjELMAkGA1UEBhMCU0sx
+EzARBgNVBAcTCkJyYXRpc2xhdmExEzARBgNVBAoTCkRpc2lnIGEucy4xGTAXBgNVBAMTEENBIERp
+c2lnIFJvb3QgUjEwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQCqw3j33Jijp1pedxiy
+3QRkD2P9m5YJgNXoqqXinCaUOuiZc4yd39ffg/N4T0Dhf9Kn0uXKE5Pn7cZ3Xza1lK/oOI7bm+V8
+u8yN63Vz4STN5qctGS7Y1oprFOsIYgrY3LMATcMjfF9DCCMyEtztDK3AfQ+lekLZWnDZv6fXARz2
+m6uOt0qGeKAeVjGu74IKgEH3G8muqzIm1Cxr7X1r5OJeIgpFy4QxTaz+29FHuvlglzmxZcfe+5nk
+CiKxLU3lSCZpq+Kq8/v8kiky6bM+TR8noc2OuRf7JT7JbvN32g0S9l3HuzYQ1VTW8+DiR0jm3hTa
+YVKvJrT1cU/J19IG32PK/yHoWQbgCNWEFVP3Q+V8xaCJmGtzxmjOZd69fwX3se72V6FglcXM6pM6
+vpmumwKjrckWtc7dXpl4fho5frLABaTAgqWjR56M6ly2vGfb5ipN0gTco65F97yLnByn1tUD3AjL
+LhbKXEAz6GfDLuemROoRRRw1ZS0eRWEkG4IupZ0zXWX4Qfkuy5Q/H6MMMSRE7cderVC6xkGbrPAX
+ZcD4XW9boAo0PO7X6oifmPmvTiT6l7Jkdtqr9O3jw2Dv1fkCyC2fg69naQanMVXVz0tv/wQFx1is
+XxYb5dKj6zHbHzMVTdDypVP1y+E9Tmgt2BLdqvLmTZtJ5cUoobqwWsagtQIDAQABo0IwQDAPBgNV
+HRMBAf8EBTADAQH/MA4GA1UdDwEB/wQEAwIBBjAdBgNVHQ4EFgQUiQq0OJMa5qvum5EY+fU8PjXQ
+04IwDQYJKoZIhvcNAQEFBQADggIBADKL9p1Kyb4U5YysOMo6CdQbzoaz3evUuii+Eq5FLAR0rBNR
+xVgYZk2C2tXck8An4b58n1KeElb21Zyp9HWc+jcSjxyT7Ff+Bw+r1RL3D65hXlaASfX8MPWbTx9B
+LxyE04nH4toCdu0Jz2zBuByDHBb6lM19oMgY0sidbvW9adRtPTXoHqJPYNcHKfyyo6SdbhWSVhlM
+CrDpfNIZTUJG7L399ldb3Zh+pE3McgODWF3vkzpBemOqfDqo9ayk0d2iLbYq/J8BjuIQscTK5Gfb
+VSUZP/3oNn6z4eGBrxEWi1CXYBmCAMBrTXO40RMHPuq2MU/wQppt4hF05ZSsjYSVPCGvxdpHyN85
+YmLLW1AL14FABZyb7bq2ix4Eb5YgOe2kfSnbSM6C3NQCjR0EMVrHS/BsYVLXtFHCgWzN4funodKS
+ds+xDzdYpPJScWc/DIh4gInByLUfkmO+p3qKViwaqKactV2zY9ATIKHrkWzQjX2v3wvkF7mGnjix
+lAxYjOBVqjtjbZqJYLhkKpLGN/R+Q0O3c+gB53+XD9fyexn9GtePyfqFa3qdnom2piiZk4hA9z7N
+UaPK6u95RyG1/jLix8NRb76AdPCkwzryT+lf3xkK8jsTQ6wxpLPn6/wY1gGp8yqPNg7rtLG8t0zJ
+a7+h89n07eLw4+1knj0vllJPgFOL
+-----END CERTIFICATE-----
+
+CA Disig Root R2
+================
+-----BEGIN CERTIFICATE-----
+MIIFaTCCA1GgAwIBAgIJAJK4iNuwisFjMA0GCSqGSIb3DQEBCwUAMFIxCzAJBgNVBAYTAlNLMRMw
+EQYDVQQHEwpCcmF0aXNsYXZhMRMwEQYDVQQKEwpEaXNpZyBhLnMuMRkwFwYDVQQDExBDQSBEaXNp
+ZyBSb290IFIyMB4XDTEyMDcxOTA5MTUzMFoXDTQyMDcxOTA5MTUzMFowUjELMAkGA1UEBhMCU0sx
+EzARBgNVBAcTCkJyYXRpc2xhdmExEzARBgNVBAoTCkRpc2lnIGEucy4xGTAXBgNVBAMTEENBIERp
+c2lnIFJvb3QgUjIwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQCio8QACdaFXS1tFPbC
+w3OeNcJxVX6B+6tGUODBfEl45qt5WDza/3wcn9iXAng+a0EE6UG9vgMsRfYvZNSrXaNHPWSb6Wia
+xswbP7q+sos0Ai6YVRn8jG+qX9pMzk0DIaPY0jSTVpbLTAwAFjxfGs3Ix2ymrdMxp7zo5eFm1tL7
+A7RBZckQrg4FY8aAamkw/dLukO8NJ9+flXP04SXabBbeQTg06ov80egEFGEtQX6sx3dOy1FU+16S
+GBsEWmjGycT6txOgmLcRK7fWV8x8nhfRyyX+hk4kLlYMeE2eARKmK6cBZW58Yh2EhN/qwGu1pSqV
+g8NTEQxzHQuyRpDRQjrOQG6Vrf/GlK1ul4SOfW+eioANSW1z4nuSHsPzwfPrLgVv2RvPN3YEyLRa
+5Beny912H9AZdugsBbPWnDTYltxhh5EF5EQIM8HauQhl1K6yNg3ruji6DOWbnuuNZt2Zz9aJQfYE
+koopKW1rOhzndX0CcQ7zwOe9yxndnWCywmZgtrEE7snmhrmaZkCo5xHtgUUDi/ZnWejBBhG93c+A
+Ak9lQHhcR1DIm+YfgXvkRKhbhZri3lrVx/k6RGZL5DJUfORsnLMOPReisjQS1n6yqEm70XooQL6i
+Fh/f5DcfEXP7kAplQ6INfPgGAVUzfbANuPT1rqVCV3w2EYx7XsQDnYx5nQIDAQABo0IwQDAPBgNV
+HRMBAf8EBTADAQH/MA4GA1UdDwEB/wQEAwIBBjAdBgNVHQ4EFgQUtZn4r7CU9eMg1gqtzk5WpC5u
+Qu0wDQYJKoZIhvcNAQELBQADggIBACYGXnDnZTPIgm7ZnBc6G3pmsgH2eDtpXi/q/075KMOYKmFM
+tCQSin1tERT3nLXK5ryeJ45MGcipvXrA1zYObYVybqjGom32+nNjf7xueQgcnYqfGopTpti72TVV
+sRHFqQOzVju5hJMiXn7B9hJSi+osZ7z+Nkz1uM/Rs0mSO9MpDpkblvdhuDvEK7Z4bLQjb/D907Je
+dR+Zlais9trhxTF7+9FGs9K8Z7RiVLoJ92Owk6Ka+elSLotgEqv89WBW7xBci8QaQtyDW2QOy7W8
+1k/BfDxujRNt+3vrMNDcTa/F1balTFtxyegxvug4BkihGuLq0t4SOVga/4AOgnXmt8kHbA7v/zjx
+mHHEt38OFdAlab0inSvtBfZGR6ztwPDUO+Ls7pZbkBNOHlY667DvlruWIxG68kOGdGSVyCh13x01
+utI3gzhTODY7z2zp+WsO0PsE6E9312UBeIYMej4hYvF/Y3EMyZ9E26gnonW+boE+18DrG5gPcFw0
+sorMwIUY6256s/daoQe/qUKS82Ail+QUoQebTnbAjn39pCXHR+3/H3OszMOl6W8KjptlwlCFtaOg
+UxLMVYdh84GuEEZhvUQhuMI9dM9+JDX6HAcOmz0iyu8xL4ysEr3vQCj8KWefshNPZiTEUxnpHikV
+7+ZtsH8tZ/3zbBt1RqPlShfppNcL
+-----END CERTIFICATE-----
+
+ACCVRAIZ1
+=========
+-----BEGIN CERTIFICATE-----
+MIIH0zCCBbugAwIBAgIIXsO3pkN/pOAwDQYJKoZIhvcNAQEFBQAwQjESMBAGA1UEAwwJQUNDVlJB
+SVoxMRAwDgYDVQQLDAdQS0lBQ0NWMQ0wCwYDVQQKDARBQ0NWMQswCQYDVQQGEwJFUzAeFw0xMTA1
+MDUwOTM3MzdaFw0zMDEyMzEwOTM3MzdaMEIxEjAQBgNVBAMMCUFDQ1ZSQUlaMTEQMA4GA1UECwwH
+UEtJQUNDVjENMAsGA1UECgwEQUNDVjELMAkGA1UEBhMCRVMwggIiMA0GCSqGSIb3DQEBAQUAA4IC
+DwAwggIKAoICAQCbqau/YUqXry+XZpp0X9DZlv3P4uRm7x8fRzPCRKPfmt4ftVTdFXxpNRFvu8gM
+jmoYHtiP2Ra8EEg2XPBjs5BaXCQ316PWywlxufEBcoSwfdtNgM3802/J+Nq2DoLSRYWoG2ioPej0
+RGy9ocLLA76MPhMAhN9KSMDjIgro6TenGEyxCQ0jVn8ETdkXhBilyNpAlHPrzg5XPAOBOp0KoVdD
+aaxXbXmQeOW1tDvYvEyNKKGno6e6Ak4l0Squ7a4DIrhrIA8wKFSVf+DuzgpmndFALW4ir50awQUZ
+0m/A8p/4e7MCQvtQqR0tkw8jq8bBD5L/0KIV9VMJcRz/RROE5iZe+OCIHAr8Fraocwa48GOEAqDG
+WuzndN9wrqODJerWx5eHk6fGioozl2A3ED6XPm4pFdahD9GILBKfb6qkxkLrQaLjlUPTAYVtjrs7
+8yM2x/474KElB0iryYl0/wiPgL/AlmXz7uxLaL2diMMxs0Dx6M/2OLuc5NF/1OVYm3z61PMOm3WR
+5LpSLhl+0fXNWhn8ugb2+1KoS5kE3fj5tItQo05iifCHJPqDQsGH+tUtKSpacXpkatcnYGMN285J
+9Y0fkIkyF/hzQ7jSWpOGYdbhdQrqeWZ2iE9x6wQl1gpaepPluUsXQA+xtrn13k/c4LOsOxFwYIRK
+Q26ZIMApcQrAZQIDAQABo4ICyzCCAscwfQYIKwYBBQUHAQEEcTBvMEwGCCsGAQUFBzAChkBodHRw
+Oi8vd3d3LmFjY3YuZXMvZmlsZWFkbWluL0FyY2hpdm9zL2NlcnRpZmljYWRvcy9yYWl6YWNjdjEu
+Y3J0MB8GCCsGAQUFBzABhhNodHRwOi8vb2NzcC5hY2N2LmVzMB0GA1UdDgQWBBTSh7Tj3zcnk1X2
+VuqB5TbMjB4/vTAPBgNVHRMBAf8EBTADAQH/MB8GA1UdIwQYMBaAFNKHtOPfNyeTVfZW6oHlNsyM
+Hj+9MIIBcwYDVR0gBIIBajCCAWYwggFiBgRVHSAAMIIBWDCCASIGCCsGAQUFBwICMIIBFB6CARAA
+QQB1AHQAbwByAGkAZABhAGQAIABkAGUAIABDAGUAcgB0AGkAZgBpAGMAYQBjAGkA8wBuACAAUgBh
+AO0AegAgAGQAZQAgAGwAYQAgAEEAQwBDAFYAIAAoAEEAZwBlAG4AYwBpAGEAIABkAGUAIABUAGUA
+YwBuAG8AbABvAGcA7QBhACAAeQAgAEMAZQByAHQAaQBmAGkAYwBhAGMAaQDzAG4AIABFAGwAZQBj
+AHQAcgDzAG4AaQBjAGEALAAgAEMASQBGACAAUQA0ADYAMAAxADEANQA2AEUAKQAuACAAQwBQAFMA
+IABlAG4AIABoAHQAdABwADoALwAvAHcAdwB3AC4AYQBjAGMAdgAuAGUAczAwBggrBgEFBQcCARYk
+aHR0cDovL3d3dy5hY2N2LmVzL2xlZ2lzbGFjaW9uX2MuaHRtMFUGA1UdHwROMEwwSqBIoEaGRGh0
+dHA6Ly93d3cuYWNjdi5lcy9maWxlYWRtaW4vQXJjaGl2b3MvY2VydGlmaWNhZG9zL3JhaXphY2N2
+MV9kZXIuY3JsMA4GA1UdDwEB/wQEAwIBBjAXBgNVHREEEDAOgQxhY2N2QGFjY3YuZXMwDQYJKoZI
+hvcNAQEFBQADggIBAJcxAp/n/UNnSEQU5CmH7UwoZtCPNdpNYbdKl02125DgBS4OxnnQ8pdpD70E
+R9m+27Up2pvZrqmZ1dM8MJP1jaGo/AaNRPTKFpV8M9xii6g3+CfYCS0b78gUJyCpZET/LtZ1qmxN
+YEAZSUNUY9rizLpm5U9EelvZaoErQNV/+QEnWCzI7UiRfD+mAM/EKXMRNt6GGT6d7hmKG9Ww7Y49
+nCrADdg9ZuM8Db3VlFzi4qc1GwQA9j9ajepDvV+JHanBsMyZ4k0ACtrJJ1vnE5Bc5PUzolVt3OAJ
+TS+xJlsndQAJxGJ3KQhfnlmstn6tn1QwIgPBHnFk/vk4CpYY3QIUrCPLBhwepH2NDd4nQeit2hW3
+sCPdK6jT2iWH7ehVRE2I9DZ+hJp4rPcOVkkO1jMl1oRQQmwgEh0q1b688nCBpHBgvgW1m54ERL5h
+I6zppSSMEYCUWqKiuUnSwdzRp+0xESyeGabu4VXhwOrPDYTkF7eifKXeVSUG7szAh1xA2syVP1Xg
+Nce4hL60Xc16gwFy7ofmXx2utYXGJt/mwZrpHgJHnyqobalbz+xFd3+YJ5oyXSrjhO7FmGYvliAd
+3djDJ9ew+f7Zfc3Qn48LFFhRny+Lwzgt3uiP1o2HpPVWQxaZLPSkVrQ0uGE3ycJYgBugl6H8WY3p
+EfbRD0tVNEYqi4Y7
+-----END CERTIFICATE-----
+
+TWCA Global Root CA
+===================
+-----BEGIN CERTIFICATE-----
+MIIFQTCCAymgAwIBAgICDL4wDQYJKoZIhvcNAQELBQAwUTELMAkGA1UEBhMCVFcxEjAQBgNVBAoT
+CVRBSVdBTi1DQTEQMA4GA1UECxMHUm9vdCBDQTEcMBoGA1UEAxMTVFdDQSBHbG9iYWwgUm9vdCBD
+QTAeFw0xMjA2MjcwNjI4MzNaFw0zMDEyMzExNTU5NTlaMFExCzAJBgNVBAYTAlRXMRIwEAYDVQQK
+EwlUQUlXQU4tQ0ExEDAOBgNVBAsTB1Jvb3QgQ0ExHDAaBgNVBAMTE1RXQ0EgR2xvYmFsIFJvb3Qg
+Q0EwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQCwBdvI64zEbooh745NnHEKH1Jw7W2C
+nJfF10xORUnLQEK1EjRsGcJ0pDFfhQKX7EMzClPSnIyOt7h52yvVavKOZsTuKwEHktSz0ALfUPZV
+r2YOy+BHYC8rMjk1Ujoog/h7FsYYuGLWRyWRzvAZEk2tY/XTP3VfKfChMBwqoJimFb3u/Rk28OKR
+Q4/6ytYQJ0lM793B8YVwm8rqqFpD/G2Gb3PpN0Wp8DbHzIh1HrtsBv+baz4X7GGqcXzGHaL3SekV
+tTzWoWH1EfcFbx39Eb7QMAfCKbAJTibc46KokWofwpFFiFzlmLhxpRUZyXx1EcxwdE8tmx2RRP1W
+KKD+u4ZqyPpcC1jcxkt2yKsi2XMPpfRaAok/T54igu6idFMqPVMnaR1sjjIsZAAmY2E2TqNGtz99
+sy2sbZCilaLOz9qC5wc0GZbpuCGqKX6mOL6OKUohZnkfs8O1CWfe1tQHRvMq2uYiN2DLgbYPoA/p
+yJV/v1WRBXrPPRXAb94JlAGD1zQbzECl8LibZ9WYkTunhHiVJqRaCPgrdLQABDzfuBSO6N+pjWxn
+kjMdwLfS7JLIvgm/LCkFbwJrnu+8vyq8W8BQj0FwcYeyTbcEqYSjMq+u7msXi7Kx/mzhkIyIqJdI
+zshNy/MGz19qCkKxHh53L46g5pIOBvwFItIm4TFRfTLcDwIDAQABoyMwITAOBgNVHQ8BAf8EBAMC
+AQYwDwYDVR0TAQH/BAUwAwEB/zANBgkqhkiG9w0BAQsFAAOCAgEAXzSBdu+WHdXltdkCY4QWwa6g
+cFGn90xHNcgL1yg9iXHZqjNB6hQbbCEAwGxCGX6faVsgQt+i0trEfJdLjbDorMjupWkEmQqSpqsn
+LhpNgb+E1HAerUf+/UqdM+DyucRFCCEK2mlpc3INvjT+lIutwx4116KD7+U4x6WFH6vPNOw/KP4M
+8VeGTslV9xzU2KV9Bnpv1d8Q34FOIWWxtuEXeZVFBs5fzNxGiWNoRI2T9GRwoD2dKAXDOXC4Ynsg
+/eTb6QihuJ49CcdP+yz4k3ZB3lLg4VfSnQO8d57+nile98FRYB/e2guyLXW3Q0iT5/Z5xoRdgFlg
+lPx4mI88k1HtQJAH32RjJMtOcQWh15QaiDLxInQirqWm2BJpTGCjAu4r7NRjkgtevi92a6O2JryP
+A9gK8kxkRr05YuWW6zRjESjMlfGt7+/cgFhI6Uu46mWs6fyAtbXIRfmswZ/ZuepiiI7E8UuDEq3m
+i4TWnsLrgxifarsbJGAzcMzs9zLzXNl5fe+epP7JI8Mk7hWSsT2RTyaGvWZzJBPqpK5jwa19hAM8
+EHiGG3njxPPyBJUgriOCxLM6AGK/5jYk4Ve6xx6QddVfP5VhK8E7zeWzaGHQRiapIVJpLesux+t3
+zqY6tQMzT3bR51xUAV3LePTJDL/PEo4XLSNolOer/qmyKwbQBM0=
+-----END CERTIFICATE-----
+
+TeliaSonera Root CA v1
+======================
+-----BEGIN CERTIFICATE-----
+MIIFODCCAyCgAwIBAgIRAJW+FqD3LkbxezmCcvqLzZYwDQYJKoZIhvcNAQEFBQAwNzEUMBIGA1UE
+CgwLVGVsaWFTb25lcmExHzAdBgNVBAMMFlRlbGlhU29uZXJhIFJvb3QgQ0EgdjEwHhcNMDcxMDE4
+MTIwMDUwWhcNMzIxMDE4MTIwMDUwWjA3MRQwEgYDVQQKDAtUZWxpYVNvbmVyYTEfMB0GA1UEAwwW
+VGVsaWFTb25lcmEgUm9vdCBDQSB2MTCCAiIwDQYJKoZIhvcNAQEBBQADggIPADCCAgoCggIBAMK+
+6yfwIaPzaSZVfp3FVRaRXP3vIb9TgHot0pGMYzHw7CTww6XScnwQbfQ3t+XmfHnqjLWCi65ItqwA
+3GV17CpNX8GH9SBlK4GoRz6JI5UwFpB/6FcHSOcZrr9FZ7E3GwYq/t75rH2D+1665I+XZ75Ljo1k
+B1c4VWk0Nj0TSO9P4tNmHqTPGrdeNjPUtAa9GAH9d4RQAEX1jF3oI7x+/jXh7VB7qTCNGdMJjmhn
+Xb88lxhTuylixcpecsHHltTbLaC0H2kD7OriUPEMPPCs81Mt8Bz17Ww5OXOAFshSsCPN4D7c3TxH
+oLs1iuKYaIu+5b9y7tL6pe0S7fyYGKkmdtwoSxAgHNN/Fnct7W+A90m7UwW7XWjH1Mh1Fj+JWov3
+F0fUTPHSiXk+TT2YqGHeOh7S+F4D4MHJHIzTjU3TlTazN19jY5szFPAtJmtTfImMMsJu7D0hADnJ
+oWjiUIMusDor8zagrC/kb2HCUQk5PotTubtn2txTuXZZNp1D5SDgPTJghSJRt8czu90VL6R4pgd7
+gUY2BIbdeTXHlSw7sKMXNeVzH7RcWe/a6hBle3rQf5+ztCo3O3CLm1u5K7fsslESl1MpWtTwEhDc
+TwK7EpIvYtQ/aUN8Ddb8WHUBiJ1YFkveupD/RwGJBmr2X7KQarMCpgKIv7NHfirZ1fpoeDVNAgMB
+AAGjPzA9MA8GA1UdEwEB/wQFMAMBAf8wCwYDVR0PBAQDAgEGMB0GA1UdDgQWBBTwj1k4ALP1j5qW
+DNXr+nuqF+gTEjANBgkqhkiG9w0BAQUFAAOCAgEAvuRcYk4k9AwI//DTDGjkk0kiP0Qnb7tt3oNm
+zqjMDfz1mgbldxSR651Be5kqhOX//CHBXfDkH1e3damhXwIm/9fH907eT/j3HEbAek9ALCI18Bmx
+0GtnLLCo4MBANzX2hFxc469CeP6nyQ1Q6g2EdvZR74NTxnr/DlZJLo961gzmJ1TjTQpgcmLNkQfW
+pb/ImWvtxBnmq0wROMVvMeJuScg/doAmAyYp4Db29iBT4xdwNBedY2gea+zDTYa4EzAvXUYNR0PV
+G6pZDrlcjQZIrXSHX8f8MVRBE+LHIQ6e4B4N4cB7Q4WQxYpYxmUKeFfyxiMPAdkgS94P+5KFdSpc
+c41teyWRyu5FrgZLAMzTsVlQ2jqIOylDRl6XK1TOU2+NSueW+r9xDkKLfP0ooNBIytrEgUy7onOT
+JsjrDNYmiLbAJM+7vVvrdX3pCI6GMyx5dwlppYn8s3CQh3aP0yK7Qs69cwsgJirQmz1wHiRszYd2
+qReWt88NkvuOGKmYSdGe/mBEciG5Ge3C9THxOUiIkCR1VBatzvT4aRRkOfujuLpwQMcnHL/EVlP6
+Y2XQ8xwOFvVrhlhNGNTkDY6lnVuR3HYkUD/GKvvZt5y11ubQ2egZixVxSK236thZiNSQvxaz2ems
+WWFUyBy6ysHK4bkgTI86k4mloMy/0/Z1pHWWbVY=
+-----END CERTIFICATE-----
+
+E-Tugra Certification Authority
+===============================
+-----BEGIN CERTIFICATE-----
+MIIGSzCCBDOgAwIBAgIIamg+nFGby1MwDQYJKoZIhvcNAQELBQAwgbIxCzAJBgNVBAYTAlRSMQ8w
+DQYDVQQHDAZBbmthcmExQDA+BgNVBAoMN0UtVHXEn3JhIEVCRyBCaWxpxZ9pbSBUZWtub2xvamls
+ZXJpIHZlIEhpem1ldGxlcmkgQS7Fni4xJjAkBgNVBAsMHUUtVHVncmEgU2VydGlmaWthc3lvbiBN
+ZXJrZXppMSgwJgYDVQQDDB9FLVR1Z3JhIENlcnRpZmljYXRpb24gQXV0aG9yaXR5MB4XDTEzMDMw
+NTEyMDk0OFoXDTIzMDMwMzEyMDk0OFowgbIxCzAJBgNVBAYTAlRSMQ8wDQYDVQQHDAZBbmthcmEx
+QDA+BgNVBAoMN0UtVHXEn3JhIEVCRyBCaWxpxZ9pbSBUZWtub2xvamlsZXJpIHZlIEhpem1ldGxl
+cmkgQS7Fni4xJjAkBgNVBAsMHUUtVHVncmEgU2VydGlmaWthc3lvbiBNZXJrZXppMSgwJgYDVQQD
+DB9FLVR1Z3JhIENlcnRpZmljYXRpb24gQXV0aG9yaXR5MIICIjANBgkqhkiG9w0BAQEFAAOCAg8A
+MIICCgKCAgEA4vU/kwVRHoViVF56C/UYB4Oufq9899SKa6VjQzm5S/fDxmSJPZQuVIBSOTkHS0vd
+hQd2h8y/L5VMzH2nPbxHD5hw+IyFHnSOkm0bQNGZDbt1bsipa5rAhDGvykPL6ys06I+XawGb1Q5K
+CKpbknSFQ9OArqGIW66z6l7LFpp3RMih9lRozt6Plyu6W0ACDGQXwLWTzeHxE2bODHnv0ZEoq1+g
+ElIwcxmOj+GMB6LDu0rw6h8VqO4lzKRG+Bsi77MOQ7osJLjFLFzUHPhdZL3Dk14opz8n8Y4e0ypQ
+BaNV2cvnOVPAmJ6MVGKLJrD3fY185MaeZkJVgkfnsliNZvcHfC425lAcP9tDJMW/hkd5s3kc91r0
+E+xs+D/iWR+V7kI+ua2oMoVJl0b+SzGPWsutdEcf6ZG33ygEIqDUD13ieU/qbIWGvaimzuT6w+Gz
+rt48Ue7LE3wBf4QOXVGUnhMMti6lTPk5cDZvlsouDERVxcr6XQKj39ZkjFqzAQqptQpHF//vkUAq
+jqFGOjGY5RH8zLtJVor8udBhmm9lbObDyz51Sf6Pp+KJxWfXnUYTTjF2OySznhFlhqt/7x3U+Lzn
+rFpct1pHXFXOVbQicVtbC/DP3KBhZOqp12gKY6fgDT+gr9Oq0n7vUaDmUStVkhUXU8u3Zg5mTPj5
+dUyQ5xJwx0UCAwEAAaNjMGEwHQYDVR0OBBYEFC7j27JJ0JxUeVz6Jyr+zE7S6E5UMA8GA1UdEwEB
+/wQFMAMBAf8wHwYDVR0jBBgwFoAULuPbsknQnFR5XPonKv7MTtLoTlQwDgYDVR0PAQH/BAQDAgEG
+MA0GCSqGSIb3DQEBCwUAA4ICAQAFNzr0TbdF4kV1JI+2d1LoHNgQk2Xz8lkGpD4eKexd0dCrfOAK
+kEh47U6YA5n+KGCRHTAduGN8qOY1tfrTYXbm1gdLymmasoR6d5NFFxWfJNCYExL/u6Au/U5Mh/jO
+XKqYGwXgAEZKgoClM4so3O0409/lPun++1ndYYRP0lSWE2ETPo+Aab6TR7U1Q9Jauz1c77NCR807
+VRMGsAnb/WP2OogKmW9+4c4bU2pEZiNRCHu8W1Ki/QY3OEBhj0qWuJA3+GbHeJAAFS6LrVE1Uweo
+a2iu+U48BybNCAVwzDk/dr2l02cmAYamU9JgO3xDf1WKvJUawSg5TB9D0pH0clmKuVb8P7Sd2nCc
+dlqMQ1DujjByTd//SffGqWfZbawCEeI6FiWnWAjLb1NBnEg4R2gz0dfHj9R0IdTDBZB6/86WiLEV
+KV0jq9BgoRJP3vQXzTLlyb/IQ639Lo7xr+L0mPoSHyDYwKcMhcWQ9DstliaxLL5Mq+ux0orJ23gT
+Dx4JnW2PAJ8C2sH6H3p6CcRK5ogql5+Ji/03X186zjhZhkuvcQu02PJwT58yE+Owp1fl2tpDy4Q0
+8ijE6m30Ku/Ba3ba+367hTzSU8JNvnHhRdH9I2cNE3X7z2VnIp2usAnRCf8dNL/+I5c30jn6PQ0G
+C7TbO6Orb1wdtn7os4I07QZcJA==
+-----END CERTIFICATE-----
+
+T-TeleSec GlobalRoot Class 2
+============================
+-----BEGIN CERTIFICATE-----
+MIIDwzCCAqugAwIBAgIBATANBgkqhkiG9w0BAQsFADCBgjELMAkGA1UEBhMCREUxKzApBgNVBAoM
+IlQtU3lzdGVtcyBFbnRlcnByaXNlIFNlcnZpY2VzIEdtYkgxHzAdBgNVBAsMFlQtU3lzdGVtcyBU
+cnVzdCBDZW50ZXIxJTAjBgNVBAMMHFQtVGVsZVNlYyBHbG9iYWxSb290IENsYXNzIDIwHhcNMDgx
+MDAxMTA0MDE0WhcNMzMxMDAxMjM1OTU5WjCBgjELMAkGA1UEBhMCREUxKzApBgNVBAoMIlQtU3lz
+dGVtcyBFbnRlcnByaXNlIFNlcnZpY2VzIEdtYkgxHzAdBgNVBAsMFlQtU3lzdGVtcyBUcnVzdCBD
+ZW50ZXIxJTAjBgNVBAMMHFQtVGVsZVNlYyBHbG9iYWxSb290IENsYXNzIDIwggEiMA0GCSqGSIb3
+DQEBAQUAA4IBDwAwggEKAoIBAQCqX9obX+hzkeXaXPSi5kfl82hVYAUdAqSzm1nzHoqvNK38DcLZ
+SBnuaY/JIPwhqgcZ7bBcrGXHX+0CfHt8LRvWurmAwhiCFoT6ZrAIxlQjgeTNuUk/9k9uN0goOA/F
+vudocP05l03Sx5iRUKrERLMjfTlH6VJi1hKTXrcxlkIF+3anHqP1wvzpesVsqXFP6st4vGCvx970
+2cu+fjOlbpSD8DT6IavqjnKgP6TeMFvvhk1qlVtDRKgQFRzlAVfFmPHmBiiRqiDFt1MmUUOyCxGV
+WOHAD3bZwI18gfNycJ5v/hqO2V81xrJvNHy+SE/iWjnX2J14np+GPgNeGYtEotXHAgMBAAGjQjBA
+MA8GA1UdEwEB/wQFMAMBAf8wDgYDVR0PAQH/BAQDAgEGMB0GA1UdDgQWBBS/WSA2AHmgoCJrjNXy
+YdK4LMuCSjANBgkqhkiG9w0BAQsFAAOCAQEAMQOiYQsfdOhyNsZt+U2e+iKo4YFWz827n+qrkRk4
+r6p8FU3ztqONpfSO9kSpp+ghla0+AGIWiPACuvxhI+YzmzB6azZie60EI4RYZeLbK4rnJVM3YlNf
+vNoBYimipidx5joifsFvHZVwIEoHNN/q/xWA5brXethbdXwFeilHfkCoMRN3zUA7tFFHei4R40cR
+3p1m0IvVVGb6g1XqfMIpiRvpb7PO4gWEyS8+eIVibslfwXhjdFjASBgMmTnrpMwatXlajRWc2BQN
+9noHV8cigwUtPJslJj0Ys6lDfMjIq2SPDqO/nBudMNva0Bkuqjzx+zOAduTNrRlPBSeOE6Fuwg==
+-----END CERTIFICATE-----
+
+Atos TrustedRoot 2011
+=====================
+-----BEGIN CERTIFICATE-----
+MIIDdzCCAl+gAwIBAgIIXDPLYixfszIwDQYJKoZIhvcNAQELBQAwPDEeMBwGA1UEAwwVQXRvcyBU
+cnVzdGVkUm9vdCAyMDExMQ0wCwYDVQQKDARBdG9zMQswCQYDVQQGEwJERTAeFw0xMTA3MDcxNDU4
+MzBaFw0zMDEyMzEyMzU5NTlaMDwxHjAcBgNVBAMMFUF0b3MgVHJ1c3RlZFJvb3QgMjAxMTENMAsG
+A1UECgwEQXRvczELMAkGA1UEBhMCREUwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQCV
+hTuXbyo7LjvPpvMpNb7PGKw+qtn4TaA+Gke5vJrf8v7MPkfoepbCJI419KkM/IL9bcFyYie96mvr
+54rMVD6QUM+A1JX76LWC1BTFtqlVJVfbsVD2sGBkWXppzwO3bw2+yj5vdHLqqjAqc2K+SZFhyBH+
+DgMq92og3AIVDV4VavzjgsG1xZ1kCWyjWZgHJ8cblithdHFsQ/H3NYkQ4J7sVaE3IqKHBAUsR320
+HLliKWYoyrfhk/WklAOZuXCFteZI6o1Q/NnezG8HDt0Lcp2AMBYHlT8oDv3FdU9T1nSatCQujgKR
+z3bFmx5VdJx4IbHwLfELn8LVlhgf8FQieowHAgMBAAGjfTB7MB0GA1UdDgQWBBSnpQaxLKYJYO7R
+l+lwrrw7GWzbITAPBgNVHRMBAf8EBTADAQH/MB8GA1UdIwQYMBaAFKelBrEspglg7tGX6XCuvDsZ
+bNshMBgGA1UdIAQRMA8wDQYLKwYBBAGwLQMEAQEwDgYDVR0PAQH/BAQDAgGGMA0GCSqGSIb3DQEB
+CwUAA4IBAQAmdzTblEiGKkGdLD4GkGDEjKwLVLgfuXvTBznk+j57sj1O7Z8jvZfza1zv7v1Apt+h
+k6EKhqzvINB5Ab149xnYJDE0BAGmuhWawyfc2E8PzBhj/5kPDpFrdRbhIfzYJsdHt6bPWHJxfrrh
+TZVHO8mvbaG0weyJ9rQPOLXiZNwlz6bb65pcmaHFCN795trV1lpFDMS3wrUU77QR/w4VtfX128a9
+61qn8FYiqTxlVMYVqL2Gns2Dlmh6cYGJ4Qvh6hEbaAjMaZ7snkGeRDImeuKHCnE96+RapNLbxc3G
+3mB/ufNPRJLvKrcYPqcZ2Qt9sTdBQrC6YB3y/gkRsPCHe6ed
+-----END CERTIFICATE-----
+
+QuoVadis Root CA 1 G3
+=====================
+-----BEGIN CERTIFICATE-----
+MIIFYDCCA0igAwIBAgIUeFhfLq0sGUvjNwc1NBMotZbUZZMwDQYJKoZIhvcNAQELBQAwSDELMAkG
+A1UEBhMCQk0xGTAXBgNVBAoTEFF1b1ZhZGlzIExpbWl0ZWQxHjAcBgNVBAMTFVF1b1ZhZGlzIFJv
+b3QgQ0EgMSBHMzAeFw0xMjAxMTIxNzI3NDRaFw00MjAxMTIxNzI3NDRaMEgxCzAJBgNVBAYTAkJN
+MRkwFwYDVQQKExBRdW9WYWRpcyBMaW1pdGVkMR4wHAYDVQQDExVRdW9WYWRpcyBSb290IENBIDEg
+RzMwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQCgvlAQjunybEC0BJyFuTHK3C3kEakE
+PBtVwedYMB0ktMPvhd6MLOHBPd+C5k+tR4ds7FtJwUrVu4/sh6x/gpqG7D0DmVIB0jWerNrwU8lm
+PNSsAgHaJNM7qAJGr6Qc4/hzWHa39g6QDbXwz8z6+cZM5cOGMAqNF34168Xfuw6cwI2H44g4hWf6
+Pser4BOcBRiYz5P1sZK0/CPTz9XEJ0ngnjybCKOLXSoh4Pw5qlPafX7PGglTvF0FBM+hSo+LdoIN
+ofjSxxR3W5A2B4GbPgb6Ul5jxaYA/qXpUhtStZI5cgMJYr2wYBZupt0lwgNm3fME0UDiTouG9G/l
+g6AnhF4EwfWQvTA9xO+oabw4m6SkltFi2mnAAZauy8RRNOoMqv8hjlmPSlzkYZqn0ukqeI1RPToV
+7qJZjqlc3sX5kCLliEVx3ZGZbHqfPT2YfF72vhZooF6uCyP8Wg+qInYtyaEQHeTTRCOQiJ/GKubX
+9ZqzWB4vMIkIG1SitZgj7Ah3HJVdYdHLiZxfokqRmu8hqkkWCKi9YSgxyXSthfbZxbGL0eUQMk1f
+iyA6PEkfM4VZDdvLCXVDaXP7a3F98N/ETH3Goy7IlXnLc6KOTk0k+17kBL5yG6YnLUlamXrXXAkg
+t3+UuU/xDRxeiEIbEbfnkduebPRq34wGmAOtzCjvpUfzUwIDAQABo0IwQDAPBgNVHRMBAf8EBTAD
+AQH/MA4GA1UdDwEB/wQEAwIBBjAdBgNVHQ4EFgQUo5fW816iEOGrRZ88F2Q87gFwnMwwDQYJKoZI
+hvcNAQELBQADggIBABj6W3X8PnrHX3fHyt/PX8MSxEBd1DKquGrX1RUVRpgjpeaQWxiZTOOtQqOC
+MTaIzen7xASWSIsBx40Bz1szBpZGZnQdT+3Btrm0DWHMY37XLneMlhwqI2hrhVd2cDMT/uFPpiN3
+GPoajOi9ZcnPP/TJF9zrx7zABC4tRi9pZsMbj/7sPtPKlL92CiUNqXsCHKnQO18LwIE6PWThv6ct
+Tr1NxNgpxiIY0MWscgKCP6o6ojoilzHdCGPDdRS5YCgtW2jgFqlmgiNR9etT2DGbe+m3nUvriBbP
++V04ikkwj+3x6xn0dxoxGE1nVGwvb2X52z3sIexe9PSLymBlVNFxZPT5pqOBMzYzcfCkeF9OrYMh
+3jRJjehZrJ3ydlo28hP0r+AJx2EqbPfgna67hkooby7utHnNkDPDs3b69fBsnQGQ+p6Q9pxyz0fa
+wx/kNSBT8lTR32GDpgLiJTjehTItXnOQUl1CxM49S+H5GYQd1aJQzEH7QRTDvdbJWqNjZgKAvQU6
+O0ec7AAmTPWIUb+oI38YB7AL7YsmoWTTYUrrXJ/es69nA7Mf3W1daWhpq1467HxpvMc7hU6eFbm0
+FU/DlXpY18ls6Wy58yljXrQs8C097Vpl4KlbQMJImYFtnh8GKjwStIsPm6Ik8KaN1nrgS7ZklmOV
+hMJKzRwuJIczYOXD
+-----END CERTIFICATE-----
+
+QuoVadis Root CA 2 G3
+=====================
+-----BEGIN CERTIFICATE-----
+MIIFYDCCA0igAwIBAgIURFc0JFuBiZs18s64KztbpybwdSgwDQYJKoZIhvcNAQELBQAwSDELMAkG
+A1UEBhMCQk0xGTAXBgNVBAoTEFF1b1ZhZGlzIExpbWl0ZWQxHjAcBgNVBAMTFVF1b1ZhZGlzIFJv
+b3QgQ0EgMiBHMzAeFw0xMjAxMTIxODU5MzJaFw00MjAxMTIxODU5MzJaMEgxCzAJBgNVBAYTAkJN
+MRkwFwYDVQQKExBRdW9WYWRpcyBMaW1pdGVkMR4wHAYDVQQDExVRdW9WYWRpcyBSb290IENBIDIg
+RzMwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQChriWyARjcV4g/Ruv5r+LrI3HimtFh
+ZiFfqq8nUeVuGxbULX1QsFN3vXg6YOJkApt8hpvWGo6t/x8Vf9WVHhLL5hSEBMHfNrMWn4rjyduY
+NM7YMxcoRvynyfDStNVNCXJJ+fKH46nafaF9a7I6JaltUkSs+L5u+9ymc5GQYaYDFCDy54ejiK2t
+oIz/pgslUiXnFgHVy7g1gQyjO/Dh4fxaXc6AcW34Sas+O7q414AB+6XrW7PFXmAqMaCvN+ggOp+o
+MiwMzAkd056OXbxMmO7FGmh77FOm6RQ1o9/NgJ8MSPsc9PG/Srj61YxxSscfrf5BmrODXfKEVu+l
+V0POKa2Mq1W/xPtbAd0jIaFYAI7D0GoT7RPjEiuA3GfmlbLNHiJuKvhB1PLKFAeNilUSxmn1uIZo
+L1NesNKqIcGY5jDjZ1XHm26sGahVpkUG0CM62+tlXSoREfA7T8pt9DTEceT/AFr2XK4jYIVz8eQQ
+sSWu1ZK7E8EM4DnatDlXtas1qnIhO4M15zHfeiFuuDIIfR0ykRVKYnLP43ehvNURG3YBZwjgQQvD
+6xVu+KQZ2aKrr+InUlYrAoosFCT5v0ICvybIxo/gbjh9Uy3l7ZizlWNof/k19N+IxWA1ksB8aRxh
+lRbQ694Lrz4EEEVlWFA4r0jyWbYW8jwNkALGcC4BrTwV1wIDAQABo0IwQDAPBgNVHRMBAf8EBTAD
+AQH/MA4GA1UdDwEB/wQEAwIBBjAdBgNVHQ4EFgQU7edvdlq/YOxJW8ald7tyFnGbxD0wDQYJKoZI
+hvcNAQELBQADggIBAJHfgD9DCX5xwvfrs4iP4VGyvD11+ShdyLyZm3tdquXK4Qr36LLTn91nMX66
+AarHakE7kNQIXLJgapDwyM4DYvmL7ftuKtwGTTwpD4kWilhMSA/ohGHqPHKmd+RCroijQ1h5fq7K
+pVMNqT1wvSAZYaRsOPxDMuHBR//47PERIjKWnML2W2mWeyAMQ0GaW/ZZGYjeVYg3UQt4XAoeo0L9
+x52ID8DyeAIkVJOviYeIyUqAHerQbj5hLja7NQ4nlv1mNDthcnPxFlxHBlRJAHpYErAK74X9sbgz
+dWqTHBLmYF5vHX/JHyPLhGGfHoJE+V+tYlUkmlKY7VHnoX6XOuYvHxHaU4AshZ6rNRDbIl9qxV6X
+U/IyAgkwo1jwDQHVcsaxfGl7w/U2Rcxhbl5MlMVerugOXou/983g7aEOGzPuVBj+D77vfoRrQ+Nw
+mNtddbINWQeFFSM51vHfqSYP1kjHs6Yi9TM3WpVHn3u6GBVv/9YUZINJ0gpnIdsPNWNgKCLjsZWD
+zYWm3S8P52dSbrsvhXz1SnPnxT7AvSESBT/8twNJAlvIJebiVDj1eYeMHVOyToV7BjjHLPj4sHKN
+JeV3UvQDHEimUF+IIDBu8oJDqz2XhOdT+yHBTw8imoa4WSr2Rz0ZiC3oheGe7IUIarFsNMkd7Egr
+O3jtZsSOeWmD3n+M
+-----END CERTIFICATE-----
+
+QuoVadis Root CA 3 G3
+=====================
+-----BEGIN CERTIFICATE-----
+MIIFYDCCA0igAwIBAgIULvWbAiin23r/1aOp7r0DoM8Sah0wDQYJKoZIhvcNAQELBQAwSDELMAkG
+A1UEBhMCQk0xGTAXBgNVBAoTEFF1b1ZhZGlzIExpbWl0ZWQxHjAcBgNVBAMTFVF1b1ZhZGlzIFJv
+b3QgQ0EgMyBHMzAeFw0xMjAxMTIyMDI2MzJaFw00MjAxMTIyMDI2MzJaMEgxCzAJBgNVBAYTAkJN
+MRkwFwYDVQQKExBRdW9WYWRpcyBMaW1pdGVkMR4wHAYDVQQDExVRdW9WYWRpcyBSb290IENBIDMg
+RzMwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQCzyw4QZ47qFJenMioKVjZ/aEzHs286
+IxSR/xl/pcqs7rN2nXrpixurazHb+gtTTK/FpRp5PIpM/6zfJd5O2YIyC0TeytuMrKNuFoM7pmRL
+Mon7FhY4futD4tN0SsJiCnMK3UmzV9KwCoWdcTzeo8vAMvMBOSBDGzXRU7Ox7sWTaYI+FrUoRqHe
+6okJ7UO4BUaKhvVZR74bbwEhELn9qdIoyhA5CcoTNs+cra1AdHkrAj80//ogaX3T7mH1urPnMNA3
+I4ZyYUUpSFlob3emLoG+B01vr87ERRORFHAGjx+f+IdpsQ7vw4kZ6+ocYfx6bIrc1gMLnia6Et3U
+VDmrJqMz6nWB2i3ND0/kA9HvFZcba5DFApCTZgIhsUfei5pKgLlVj7WiL8DWM2fafsSntARE60f7
+5li59wzweyuxwHApw0BiLTtIadwjPEjrewl5qW3aqDCYz4ByA4imW0aucnl8CAMhZa634RylsSqi
+Md5mBPfAdOhx3v89WcyWJhKLhZVXGqtrdQtEPREoPHtht+KPZ0/l7DxMYIBpVzgeAVuNVejH38DM
+dyM0SXV89pgR6y3e7UEuFAUCf+D+IOs15xGsIs5XPd7JMG0QA4XN8f+MFrXBsj6IbGB/kE+V9/Yt
+rQE5BwT6dYB9v0lQ7e/JxHwc64B+27bQ3RP+ydOc17KXqQIDAQABo0IwQDAPBgNVHRMBAf8EBTAD
+AQH/MA4GA1UdDwEB/wQEAwIBBjAdBgNVHQ4EFgQUxhfQvKjqAkPyGwaZXSuQILnXnOQwDQYJKoZI
+hvcNAQELBQADggIBADRh2Va1EodVTd2jNTFGu6QHcrxfYWLopfsLN7E8trP6KZ1/AvWkyaiTt3px
+KGmPc+FSkNrVvjrlt3ZqVoAh313m6Tqe5T72omnHKgqwGEfcIHB9UqM+WXzBusnIFUBhynLWcKzS
+t/Ac5IYp8M7vaGPQtSCKFWGafoaYtMnCdvvMujAWzKNhxnQT5WvvoxXqA/4Ti2Tk08HS6IT7SdEQ
+TXlm66r99I0xHnAUrdzeZxNMgRVhvLfZkXdxGYFgu/BYpbWcC/ePIlUnwEsBbTuZDdQdm2NnL9Du
+DcpmvJRPpq3t/O5jrFc/ZSXPsoaP0Aj/uHYUbt7lJ+yreLVTubY/6CD50qi+YUbKh4yE8/nxoGib
+Ih6BJpsQBJFxwAYf3KDTuVan45gtf4Od34wrnDKOMpTwATwiKp9Dwi7DmDkHOHv8XgBCH/MyJnmD
+hPbl8MFREsALHgQjDFSlTC9JxUrRtm5gDWv8a4uFJGS3iQ6rJUdbPM9+Sb3H6QrG2vd+DhcI00iX
+0HGS8A85PjRqHH3Y8iKuu2n0M7SmSFXRDw4m6Oy2Cy2nhTXN/VnIn9HNPlopNLk9hM6xZdRZkZFW
+dSHBd575euFgndOtBBj0fOtek49TSiIp+EgrPk2GrFt/ywaZWWDYWGWVjUTR939+J399roD1B0y2
+PpxxVJkES/1Y+Zj0
+-----END CERTIFICATE-----
+
+DigiCert Assured ID Root G2
+===========================
+-----BEGIN CERTIFICATE-----
+MIIDljCCAn6gAwIBAgIQC5McOtY5Z+pnI7/Dr5r0SzANBgkqhkiG9w0BAQsFADBlMQswCQYDVQQG
+EwJVUzEVMBMGA1UEChMMRGlnaUNlcnQgSW5jMRkwFwYDVQQLExB3d3cuZGlnaWNlcnQuY29tMSQw
+IgYDVQQDExtEaWdpQ2VydCBBc3N1cmVkIElEIFJvb3QgRzIwHhcNMTMwODAxMTIwMDAwWhcNMzgw
+MTE1MTIwMDAwWjBlMQswCQYDVQQGEwJVUzEVMBMGA1UEChMMRGlnaUNlcnQgSW5jMRkwFwYDVQQL
+ExB3d3cuZGlnaWNlcnQuY29tMSQwIgYDVQQDExtEaWdpQ2VydCBBc3N1cmVkIElEIFJvb3QgRzIw
+ggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDZ5ygvUj82ckmIkzTz+GoeMVSAn61UQbVH
+35ao1K+ALbkKz3X9iaV9JPrjIgwrvJUXCzO/GU1BBpAAvQxNEP4HteccbiJVMWWXvdMX0h5i89vq
+bFCMP4QMls+3ywPgym2hFEwbid3tALBSfK+RbLE4E9HpEgjAALAcKxHad3A2m67OeYfcgnDmCXRw
+VWmvo2ifv922ebPynXApVfSr/5Vh88lAbx3RvpO704gqu52/clpWcTs/1PPRCv4o76Pu2ZmvA9OP
+YLfykqGxvYmJHzDNw6YuYjOuFgJ3RFrngQo8p0Quebg/BLxcoIfhG69Rjs3sLPr4/m3wOnyqi+Rn
+lTGNAgMBAAGjQjBAMA8GA1UdEwEB/wQFMAMBAf8wDgYDVR0PAQH/BAQDAgGGMB0GA1UdDgQWBBTO
+w0q5mVXyuNtgv6l+vVa1lzan1jANBgkqhkiG9w0BAQsFAAOCAQEAyqVVjOPIQW5pJ6d1Ee88hjZv
+0p3GeDgdaZaikmkuOGybfQTUiaWxMTeKySHMq2zNixya1r9I0jJmwYrA8y8678Dj1JGG0VDjA9tz
+d29KOVPt3ibHtX2vK0LRdWLjSisCx1BL4GnilmwORGYQRI+tBev4eaymG+g3NJ1TyWGqolKvSnAW
+hsI6yLETcDbYz+70CjTVW0z9B5yiutkBclzzTcHdDrEcDcRjvq30FPuJ7KJBDkzMyFdA0G4Dqs0M
+jomZmWzwPDCvON9vvKO+KSAnq3T/EyJ43pdSVR6DtVQgA+6uwE9W3jfMw3+qBCe703e4YtsXfJwo
+IhNzbM8m9Yop5w==
+-----END CERTIFICATE-----
+
+DigiCert Assured ID Root G3
+===========================
+-----BEGIN CERTIFICATE-----
+MIICRjCCAc2gAwIBAgIQC6Fa+h3foLVJRK/NJKBs7DAKBggqhkjOPQQDAzBlMQswCQYDVQQGEwJV
+UzEVMBMGA1UEChMMRGlnaUNlcnQgSW5jMRkwFwYDVQQLExB3d3cuZGlnaWNlcnQuY29tMSQwIgYD
+VQQDExtEaWdpQ2VydCBBc3N1cmVkIElEIFJvb3QgRzMwHhcNMTMwODAxMTIwMDAwWhcNMzgwMTE1
+MTIwMDAwWjBlMQswCQYDVQQGEwJVUzEVMBMGA1UEChMMRGlnaUNlcnQgSW5jMRkwFwYDVQQLExB3
+d3cuZGlnaWNlcnQuY29tMSQwIgYDVQQDExtEaWdpQ2VydCBBc3N1cmVkIElEIFJvb3QgRzMwdjAQ
+BgcqhkjOPQIBBgUrgQQAIgNiAAQZ57ysRGXtzbg/WPuNsVepRC0FFfLvC/8QdJ+1YlJfZn4f5dwb
+RXkLzMZTCp2NXQLZqVneAlr2lSoOjThKiknGvMYDOAdfVdp+CW7if17QRSAPWXYQ1qAk8C3eNvJs
+KTmjQjBAMA8GA1UdEwEB/wQFMAMBAf8wDgYDVR0PAQH/BAQDAgGGMB0GA1UdDgQWBBTL0L2p4ZgF
+UaFNN6KDec6NHSrkhDAKBggqhkjOPQQDAwNnADBkAjAlpIFFAmsSS3V0T8gj43DydXLefInwz5Fy
+YZ5eEJJZVrmDxxDnOOlYJjZ91eQ0hjkCMHw2U/Aw5WJjOpnitqM7mzT6HtoQknFekROn3aRukswy
+1vUhZscv6pZjamVFkpUBtA==
+-----END CERTIFICATE-----
+
+DigiCert Global Root G2
+=======================
+-----BEGIN CERTIFICATE-----
+MIIDjjCCAnagAwIBAgIQAzrx5qcRqaC7KGSxHQn65TANBgkqhkiG9w0BAQsFADBhMQswCQYDVQQG
+EwJVUzEVMBMGA1UEChMMRGlnaUNlcnQgSW5jMRkwFwYDVQQLExB3d3cuZGlnaWNlcnQuY29tMSAw
+HgYDVQQDExdEaWdpQ2VydCBHbG9iYWwgUm9vdCBHMjAeFw0xMzA4MDExMjAwMDBaFw0zODAxMTUx
+MjAwMDBaMGExCzAJBgNVBAYTAlVTMRUwEwYDVQQKEwxEaWdpQ2VydCBJbmMxGTAXBgNVBAsTEHd3
+dy5kaWdpY2VydC5jb20xIDAeBgNVBAMTF0RpZ2lDZXJ0IEdsb2JhbCBSb290IEcyMIIBIjANBgkq
+hkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAuzfNNNx7a8myaJCtSnX/RrohCgiN9RlUyfuI2/Ou8jqJ
+kTx65qsGGmvPrC3oXgkkRLpimn7Wo6h+4FR1IAWsULecYxpsMNzaHxmx1x7e/dfgy5SDN67sH0NO
+3Xss0r0upS/kqbitOtSZpLYl6ZtrAGCSYP9PIUkY92eQq2EGnI/yuum06ZIya7XzV+hdG82MHauV
+BJVJ8zUtluNJbd134/tJS7SsVQepj5WztCO7TG1F8PapspUwtP1MVYwnSlcUfIKdzXOS0xZKBgyM
+UNGPHgm+F6HmIcr9g+UQvIOlCsRnKPZzFBQ9RnbDhxSJITRNrw9FDKZJobq7nMWxM4MphQIDAQAB
+o0IwQDAPBgNVHRMBAf8EBTADAQH/MA4GA1UdDwEB/wQEAwIBhjAdBgNVHQ4EFgQUTiJUIBiV5uNu
+5g/6+rkS7QYXjzkwDQYJKoZIhvcNAQELBQADggEBAGBnKJRvDkhj6zHd6mcY1Yl9PMWLSn/pvtsr
+F9+wX3N3KjITOYFnQoQj8kVnNeyIv/iPsGEMNKSuIEyExtv4NeF22d+mQrvHRAiGfzZ0JFrabA0U
+WTW98kndth/Jsw1HKj2ZL7tcu7XUIOGZX1NGFdtom/DzMNU+MeKNhJ7jitralj41E6Vf8PlwUHBH
+QRFXGU7Aj64GxJUTFy8bJZ918rGOmaFvE7FBcf6IKshPECBV1/MUReXgRPTqh5Uykw7+U0b6LJ3/
+iyK5S9kJRaTepLiaWN0bfVKfjllDiIGknibVb63dDcY3fe0Dkhvld1927jyNxF1WW6LZZm6zNTfl
+MrY=
+-----END CERTIFICATE-----
+
+DigiCert Global Root G3
+=======================
+-----BEGIN CERTIFICATE-----
+MIICPzCCAcWgAwIBAgIQBVVWvPJepDU1w6QP1atFcjAKBggqhkjOPQQDAzBhMQswCQYDVQQGEwJV
+UzEVMBMGA1UEChMMRGlnaUNlcnQgSW5jMRkwFwYDVQQLExB3d3cuZGlnaWNlcnQuY29tMSAwHgYD
+VQQDExdEaWdpQ2VydCBHbG9iYWwgUm9vdCBHMzAeFw0xMzA4MDExMjAwMDBaFw0zODAxMTUxMjAw
+MDBaMGExCzAJBgNVBAYTAlVTMRUwEwYDVQQKEwxEaWdpQ2VydCBJbmMxGTAXBgNVBAsTEHd3dy5k
+aWdpY2VydC5jb20xIDAeBgNVBAMTF0RpZ2lDZXJ0IEdsb2JhbCBSb290IEczMHYwEAYHKoZIzj0C
+AQYFK4EEACIDYgAE3afZu4q4C/sLfyHS8L6+c/MzXRq8NOrexpu80JX28MzQC7phW1FGfp4tn+6O
+YwwX7Adw9c+ELkCDnOg/QW07rdOkFFk2eJ0DQ+4QE2xy3q6Ip6FrtUPOZ9wj/wMco+I+o0IwQDAP
+BgNVHRMBAf8EBTADAQH/MA4GA1UdDwEB/wQEAwIBhjAdBgNVHQ4EFgQUs9tIpPmhxdiuNkHMEWNp
+Yim8S8YwCgYIKoZIzj0EAwMDaAAwZQIxAK288mw/EkrRLTnDCgmXc/SINoyIJ7vmiI1Qhadj+Z4y
+3maTD/HMsQmP3Wyr+mt/oAIwOWZbwmSNuJ5Q3KjVSaLtx9zRSX8XAbjIho9OjIgrqJqpisXRAL34
+VOKa5Vt8sycX
+-----END CERTIFICATE-----
+
+DigiCert Trusted Root G4
+========================
+-----BEGIN CERTIFICATE-----
+MIIFkDCCA3igAwIBAgIQBZsbV56OITLiOQe9p3d1XDANBgkqhkiG9w0BAQwFADBiMQswCQYDVQQG
+EwJVUzEVMBMGA1UEChMMRGlnaUNlcnQgSW5jMRkwFwYDVQQLExB3d3cuZGlnaWNlcnQuY29tMSEw
+HwYDVQQDExhEaWdpQ2VydCBUcnVzdGVkIFJvb3QgRzQwHhcNMTMwODAxMTIwMDAwWhcNMzgwMTE1
+MTIwMDAwWjBiMQswCQYDVQQGEwJVUzEVMBMGA1UEChMMRGlnaUNlcnQgSW5jMRkwFwYDVQQLExB3
+d3cuZGlnaWNlcnQuY29tMSEwHwYDVQQDExhEaWdpQ2VydCBUcnVzdGVkIFJvb3QgRzQwggIiMA0G
+CSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQC/5pBzaN675F1KPDAiMGkz7MKnJS7JIT3yithZwuEp
+pz1Yq3aaza57G4QNxDAf8xukOBbrVsaXbR2rsnnyyhHS5F/WBTxSD1Ifxp4VpX6+n6lXFllVcq9o
+k3DCsrp1mWpzMpTREEQQLt+C8weE5nQ7bXHiLQwb7iDVySAdYyktzuxeTsiT+CFhmzTrBcZe7Fsa
+vOvJz82sNEBfsXpm7nfISKhmV1efVFiODCu3T6cw2Vbuyntd463JT17lNecxy9qTXtyOj4DatpGY
+QJB5w3jHtrHEtWoYOAMQjdjUN6QuBX2I9YI+EJFwq1WCQTLX2wRzKm6RAXwhTNS8rhsDdV14Ztk6
+MUSaM0C/CNdaSaTC5qmgZ92kJ7yhTzm1EVgX9yRcRo9k98FpiHaYdj1ZXUJ2h4mXaXpI8OCiEhtm
+mnTK3kse5w5jrubU75KSOp493ADkRSWJtppEGSt+wJS00mFt6zPZxd9LBADMfRyVw4/3IbKyEbe7
+f/LVjHAsQWCqsWMYRJUadmJ+9oCw++hkpjPRiQfhvbfmQ6QYuKZ3AeEPlAwhHbJUKSWJbOUOUlFH
+dL4mrLZBdd56rF+NP8m800ERElvlEFDrMcXKchYiCd98THU/Y+whX8QgUWtvsauGi0/C1kVfnSD8
+oR7FwI+isX4KJpn15GkvmB0t9dmpsh3lGwIDAQABo0IwQDAPBgNVHRMBAf8EBTADAQH/MA4GA1Ud
+DwEB/wQEAwIBhjAdBgNVHQ4EFgQU7NfjgtJxXWRM3y5nP+e6mK4cD08wDQYJKoZIhvcNAQEMBQAD
+ggIBALth2X2pbL4XxJEbw6GiAI3jZGgPVs93rnD5/ZpKmbnJeFwMDF/k5hQpVgs2SV1EY+CtnJYY
+ZhsjDT156W1r1lT40jzBQ0CuHVD1UvyQO7uYmWlrx8GnqGikJ9yd+SeuMIW59mdNOj6PWTkiU0Tr
+yF0Dyu1Qen1iIQqAyHNm0aAFYF/opbSnr6j3bTWcfFqK1qI4mfN4i/RN0iAL3gTujJtHgXINwBQy
+7zBZLq7gcfJW5GqXb5JQbZaNaHqasjYUegbyJLkJEVDXCLG4iXqEI2FCKeWjzaIgQdfRnGTZ6iah
+ixTXTBmyUEFxPT9NcCOGDErcgdLMMpSEDQgJlxxPwO5rIHQw0uA5NBCFIRUBCOhVMt5xSdkoF1BN
+5r5N0XWs0Mr7QbhDparTwwVETyw2m+L64kW4I1NsBm9nVX9GtUw/bihaeSbSpKhil9Ie4u1Ki7wb
+/UdKDd9nZn6yW0HQO+T0O/QEY+nvwlQAUaCKKsnOeMzV6ocEGLPOr0mIr/OSmbaz5mEP0oUA51Aa
+5BuVnRmhuZyxm7EAHu/QD09CbMkKvO5D+jpxpchNJqU1/YldvIViHTLSoCtU7ZpXwdv6EM8Zt4tK
+G48BtieVU+i2iW1bvGjUI+iLUaJW+fCmgKDWHrO8Dw9TdSmq6hN35N6MgSGtBxBHEa2HPQfRdbzP
+82Z+
+-----END CERTIFICATE-----
+
+WoSign
+======
+-----BEGIN CERTIFICATE-----
+MIIFdjCCA16gAwIBAgIQXmjWEXGUY1BWAGjzPsnFkTANBgkqhkiG9w0BAQUFADBVMQswCQYDVQQG
+EwJDTjEaMBgGA1UEChMRV29TaWduIENBIExpbWl0ZWQxKjAoBgNVBAMTIUNlcnRpZmljYXRpb24g
+QXV0aG9yaXR5IG9mIFdvU2lnbjAeFw0wOTA4MDgwMTAwMDFaFw0zOTA4MDgwMTAwMDFaMFUxCzAJ
+BgNVBAYTAkNOMRowGAYDVQQKExFXb1NpZ24gQ0EgTGltaXRlZDEqMCgGA1UEAxMhQ2VydGlmaWNh
+dGlvbiBBdXRob3JpdHkgb2YgV29TaWduMIICIjANBgkqhkiG9w0BAQEFAAOCAg8AMIICCgKCAgEA
+vcqNrLiRFVaXe2tcesLea9mhsMMQI/qnobLMMfo+2aYpbxY94Gv4uEBf2zmoAHqLoE1UfcIiePyO
+CbiohdfMlZdLdNiefvAA5A6JrkkoRBoQmTIPJYhTpA2zDxIIFgsDcSccf+Hb0v1naMQFXQoOXXDX
+2JegvFNBmpGN9J42Znp+VsGQX+axaCA2pIwkLCxHC1l2ZjC1vt7tj/id07sBMOby8w7gLJKA84X5
+KIq0VC6a7fd2/BVoFutKbOsuEo/Uz/4Mx1wdC34FMr5esAkqQtXJTpCzWQ27en7N1QhatH/YHGkR
++ScPewavVIMYe+HdVHpRaG53/Ma/UkpmRqGyZxq7o093oL5d//xWC0Nyd5DKnvnyOfUNqfTq1+ez
+EC8wQjchzDBwyYaYD8xYTYO7feUapTeNtqwylwA6Y3EkHp43xP901DfA4v6IRmAR3Qg/UDaruHqk
+lWJqbrDKaiFaafPz+x1wOZXzp26mgYmhiMU7ccqjUu6Du/2gd/Tkb+dC221KmYo0SLwX3OSACCK2
+8jHAPwQ+658geda4BmRkAjHXqc1S+4RFaQkAKtxVi8QGRkvASh0JWzko/amrzgD5LkhLJuYwTKVY
+yrREgk/nkR4zw7CT/xH8gdLKH3Ep3XZPkiWvHYG3Dy+MwwbMLyejSuQOmbp8HkUff6oZRZb9/D0C
+AwEAAaNCMEAwDgYDVR0PAQH/BAQDAgEGMA8GA1UdEwEB/wQFMAMBAf8wHQYDVR0OBBYEFOFmzw7R
+8bNLtwYgFP6HEtX2/vs+MA0GCSqGSIb3DQEBBQUAA4ICAQCoy3JAsnbBfnv8rWTjMnvMPLZdRtP1
+LOJwXcgu2AZ9mNELIaCJWSQBnfmvCX0KI4I01fx8cpm5o9dU9OpScA7F9dY74ToJMuYhOZO9sxXq
+T2r09Ys/L3yNWC7F4TmgPsc9SnOeQHrAK2GpZ8nzJLmzbVUsWh2eJXLOC62qx1ViC777Y7NhRCOj
+y+EaDveaBk3e1CNOIZZbOVtXHS9dCF4Jef98l7VNg64N1uajeeAz0JmWAjCnPv/So0M/BVoG6kQC
+2nz4SNAzqfkHx5Xh9T71XXG68pWpdIhhWeO/yloTunK0jF02h+mmxTwTv97QRCbut+wucPrXnbes
+5cVAWubXbHssw1abR80LzvobtCHXt2a49CUwi1wNuepnsvRtrtWhnk/Yn+knArAdBtaP4/tIEp9/
+EaEQPkxROpaw0RPxx9gmrjrKkcRpnd8BKWRRb2jaFOwIQZeQjdCygPLPwj2/kWjFgGcexGATVdVh
+mVd8upUPYUk6ynW8yQqTP2cOEvIo4jEbwFcW3wh8GcF+Dx+FHgo2fFt+J7x6v+Db9NpSvd4MVHAx
+kUOVyLzwPt0JfjBkUO1/AaQzZ01oT74V77D2AhGiGxMlOtzCWfHjXEa7ZywCRuoeSKbmW9m1vFGi
+kpbbqsY3Iqb+zCB0oy2pLmvLwIIRIbWTee5Ehr7XHuQe+w==
+-----END CERTIFICATE-----
+
+WoSign China
+============
+-----BEGIN CERTIFICATE-----
+MIIFWDCCA0CgAwIBAgIQUHBrzdgT/BtOOzNy0hFIjTANBgkqhkiG9w0BAQsFADBGMQswCQYDVQQG
+EwJDTjEaMBgGA1UEChMRV29TaWduIENBIExpbWl0ZWQxGzAZBgNVBAMMEkNBIOayg+mAmuagueiv
+geS5pjAeFw0wOTA4MDgwMTAwMDFaFw0zOTA4MDgwMTAwMDFaMEYxCzAJBgNVBAYTAkNOMRowGAYD
+VQQKExFXb1NpZ24gQ0EgTGltaXRlZDEbMBkGA1UEAwwSQ0Eg5rKD6YCa5qC56K+B5LmmMIICIjAN
+BgkqhkiG9w0BAQEFAAOCAg8AMIICCgKCAgEA0EkhHiX8h8EqwqzbdoYGTufQdDTc7WU1/FDWiD+k
+8H/rD195L4mx/bxjWDeTmzj4t1up+thxx7S8gJeNbEvxUNUqKaqoGXqW5pWOdO2XCld19AXbbQs5
+uQF/qvbW2mzmBeCkTVL829B0txGMe41P/4eDrv8FAxNXUDf+jJZSEExfv5RxadmWPgxDT74wwJ85
+dE8GRV2j1lY5aAfMh09Qd5Nx2UQIsYo06Yms25tO4dnkUkWMLhQfkWsZHWgpLFbE4h4TV2TwYeO5
+Ed+w4VegG63XX9Gv2ystP9Bojg/qnw+LNVgbExz03jWhCl3W6t8Sb8D7aQdGctyB9gQjF+BNdeFy
+b7Ao65vh4YOhn0pdr8yb+gIgthhid5E7o9Vlrdx8kHccREGkSovrlXLp9glk3Kgtn3R46MGiCWOc
+76DbT52VqyBPt7D3h1ymoOQ3OMdc4zUPLK2jgKLsLl3Az+2LBcLmc272idX10kaO6m1jGx6KyX2m
++Jzr5dVjhU1zZmkR/sgO9MHHZklTfuQZa/HpelmjbX7FF+Ynxu8b22/8DU0GAbQOXDBGVWCvOGU6
+yke6rCzMRh+yRpY/8+0mBe53oWprfi1tWFxK1I5nuPHa1UaKJ/kR8slC/k7e3x9cxKSGhxYzoacX
+GKUN5AXlK8IrC6KVkLn9YDxOiT7nnO4fuwECAwEAAaNCMEAwDgYDVR0PAQH/BAQDAgEGMA8GA1Ud
+EwEB/wQFMAMBAf8wHQYDVR0OBBYEFOBNv9ybQV0T6GTwp+kVpOGBwboxMA0GCSqGSIb3DQEBCwUA
+A4ICAQBqinA4WbbaixjIvirTthnVZil6Xc1bL3McJk6jfW+rtylNpumlEYOnOXOvEESS5iVdT2H6
+yAa+Tkvv/vMx/sZ8cApBWNromUuWyXi8mHwCKe0JgOYKOoICKuLJL8hWGSbueBwj/feTZU7n85iY
+r83d2Z5AiDEoOqsuC7CsDCT6eiaY8xJhEPRdF/d+4niXVOKM6Cm6jBAyvd0zaziGfjk9DgNyp115
+j0WKWa5bIW4xRtVZjc8VX90xJc/bYNaBRHIpAlf2ltTW/+op2znFuCyKGo3Oy+dCMYYFaA6eFN0A
+kLppRQjbbpCBhqcqBT/mhDn4t/lXX0ykeVoQDF7Va/81XwVRHmyjdanPUIPTfPRm94KNPQx96N97
+qA4bLJyuQHCH2u2nFoJavjVsIE4iYdm8UXrNemHcSxH5/mc0zy4EZmFcV5cjjPOGG0jfKq+nwf/Y
+jj4Du9gqsPoUJbJRa4ZDhS4HIxaAjUz7tGM7zMN07RujHv41D198HRaG9Q7DlfEvr10lO1Hm13ZB
+ONFLAzkopR6RctR9q5czxNM+4Gm2KHmgCY0c0f9BckgG/Jou5yD5m6Leie2uPAmvylezkolwQOQv
+T8Jwg0DXJCxr5wkf09XHwQj02w47HAcLQxGEIYbpgNR12KvxAmLBsX5VYc8T1yaw15zLKYs4SgsO
+kI26oQ==
+-----END CERTIFICATE-----
+
+COMODO RSA Certification Authority
+==================================
+-----BEGIN CERTIFICATE-----
+MIIF2DCCA8CgAwIBAgIQTKr5yttjb+Af907YWwOGnTANBgkqhkiG9w0BAQwFADCBhTELMAkGA1UE
+BhMCR0IxGzAZBgNVBAgTEkdyZWF0ZXIgTWFuY2hlc3RlcjEQMA4GA1UEBxMHU2FsZm9yZDEaMBgG
+A1UEChMRQ09NT0RPIENBIExpbWl0ZWQxKzApBgNVBAMTIkNPTU9ETyBSU0EgQ2VydGlmaWNhdGlv
+biBBdXRob3JpdHkwHhcNMTAwMTE5MDAwMDAwWhcNMzgwMTE4MjM1OTU5WjCBhTELMAkGA1UEBhMC
+R0IxGzAZBgNVBAgTEkdyZWF0ZXIgTWFuY2hlc3RlcjEQMA4GA1UEBxMHU2FsZm9yZDEaMBgGA1UE
+ChMRQ09NT0RPIENBIExpbWl0ZWQxKzApBgNVBAMTIkNPTU9ETyBSU0EgQ2VydGlmaWNhdGlvbiBB
+dXRob3JpdHkwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQCR6FSS0gpWsawNJN3Fz0Rn
+dJkrN6N9I3AAcbxT38T6KhKPS38QVr2fcHK3YX/JSw8Xpz3jsARh7v8Rl8f0hj4K+j5c+ZPmNHrZ
+FGvnnLOFoIJ6dq9xkNfs/Q36nGz637CC9BR++b7Epi9Pf5l/tfxnQ3K9DADWietrLNPtj5gcFKt+
+5eNu/Nio5JIk2kNrYrhV/erBvGy2i/MOjZrkm2xpmfh4SDBF1a3hDTxFYPwyllEnvGfDyi62a+pG
+x8cgoLEfZd5ICLqkTqnyg0Y3hOvozIFIQ2dOciqbXL1MGyiKXCJ7tKuY2e7gUYPDCUZObT6Z+pUX
+2nwzV0E8jVHtC7ZcryxjGt9XyD+86V3Em69FmeKjWiS0uqlWPc9vqv9JWL7wqP/0uK3pN/u6uPQL
+OvnoQ0IeidiEyxPx2bvhiWC4jChWrBQdnArncevPDt09qZahSL0896+1DSJMwBGB7FY79tOi4lu3
+sgQiUpWAk2nojkxl8ZEDLXB0AuqLZxUpaVICu9ffUGpVRr+goyhhf3DQw6KqLCGqR84onAZFdr+C
+GCe01a60y1Dma/RMhnEw6abfFobg2P9A3fvQQoh/ozM6LlweQRGBY84YcWsr7KaKtzFcOmpH4MN5
+WdYgGq/yapiqcrxXStJLnbsQ/LBMQeXtHT1eKJ2czL+zUdqnR+WEUwIDAQABo0IwQDAdBgNVHQ4E
+FgQUu69+Aj36pvE8hI6t7jiY7NkyMtQwDgYDVR0PAQH/BAQDAgEGMA8GA1UdEwEB/wQFMAMBAf8w
+DQYJKoZIhvcNAQEMBQADggIBAArx1UaEt65Ru2yyTUEUAJNMnMvlwFTPoCWOAvn9sKIN9SCYPBMt
+rFaisNZ+EZLpLrqeLppysb0ZRGxhNaKatBYSaVqM4dc+pBroLwP0rmEdEBsqpIt6xf4FpuHA1sj+
+nq6PK7o9mfjYcwlYRm6mnPTXJ9OV2jeDchzTc+CiR5kDOF3VSXkAKRzH7JsgHAckaVd4sjn8OoSg
+tZx8jb8uk2IntznaFxiuvTwJaP+EmzzV1gsD41eeFPfR60/IvYcjt7ZJQ3mFXLrrkguhxuhoqEwW
+sRqZCuhTLJK7oQkYdQxlqHvLI7cawiiFwxv/0Cti76R7CZGYZ4wUAc1oBmpjIXUDgIiKboHGhfKp
+pC3n9KUkEEeDys30jXlYsQab5xoq2Z0B15R97QNKyvDb6KkBPvVWmckejkk9u+UJueBPSZI9FoJA
+zMxZxuY67RIuaTxslbH9qh17f4a+Hg4yRvv7E491f0yLS0Zj/gA0QHDBw7mh3aZw4gSzQbzpgJHq
+ZJx64SIDqZxubw5lT2yHh17zbqD5daWbQOhTsiedSrnAdyGN/4fy3ryM7xfft0kL0fJuMAsaDk52
+7RH89elWsn2/x20Kk4yl0MC2Hb46TpSi125sC8KKfPog88Tk5c0NqMuRkrF8hey1FGlmDoLnzc7I
+LaZRfyHBNVOFBkpdn627G190
+-----END CERTIFICATE-----
+
+USERTrust RSA Certification Authority
+=====================================
+-----BEGIN CERTIFICATE-----
+MIIF3jCCA8agAwIBAgIQAf1tMPyjylGoG7xkDjUDLTANBgkqhkiG9w0BAQwFADCBiDELMAkGA1UE
+BhMCVVMxEzARBgNVBAgTCk5ldyBKZXJzZXkxFDASBgNVBAcTC0plcnNleSBDaXR5MR4wHAYDVQQK
+ExVUaGUgVVNFUlRSVVNUIE5ldHdvcmsxLjAsBgNVBAMTJVVTRVJUcnVzdCBSU0EgQ2VydGlmaWNh
+dGlvbiBBdXRob3JpdHkwHhcNMTAwMjAxMDAwMDAwWhcNMzgwMTE4MjM1OTU5WjCBiDELMAkGA1UE
+BhMCVVMxEzARBgNVBAgTCk5ldyBKZXJzZXkxFDASBgNVBAcTC0plcnNleSBDaXR5MR4wHAYDVQQK
+ExVUaGUgVVNFUlRSVVNUIE5ldHdvcmsxLjAsBgNVBAMTJVVTRVJUcnVzdCBSU0EgQ2VydGlmaWNh
+dGlvbiBBdXRob3JpdHkwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQCAEmUXNg7D2wiz
+0KxXDXbtzSfTTK1Qg2HiqiBNCS1kCdzOiZ/MPans9s/B3PHTsdZ7NygRK0faOca8Ohm0X6a9fZ2j
+Y0K2dvKpOyuR+OJv0OwWIJAJPuLodMkYtJHUYmTbf6MG8YgYapAiPLz+E/CHFHv25B+O1ORRxhFn
+RghRy4YUVD+8M/5+bJz/Fp0YvVGONaanZshyZ9shZrHUm3gDwFA66Mzw3LyeTP6vBZY1H1dat//O
++T23LLb2VN3I5xI6Ta5MirdcmrS3ID3KfyI0rn47aGYBROcBTkZTmzNg95S+UzeQc0PzMsNT79uq
+/nROacdrjGCT3sTHDN/hMq7MkztReJVni+49Vv4M0GkPGw/zJSZrM233bkf6c0Plfg6lZrEpfDKE
+Y1WJxA3Bk1QwGROs0303p+tdOmw1XNtB1xLaqUkL39iAigmTYo61Zs8liM2EuLE/pDkP2QKe6xJM
+lXzzawWpXhaDzLhn4ugTncxbgtNMs+1b/97lc6wjOy0AvzVVdAlJ2ElYGn+SNuZRkg7zJn0cTRe8
+yexDJtC/QV9AqURE9JnnV4eeUB9XVKg+/XRjL7FQZQnmWEIuQxpMtPAlR1n6BB6T1CZGSlCBst6+
+eLf8ZxXhyVeEHg9j1uliutZfVS7qXMYoCAQlObgOK6nyTJccBz8NUvXt7y+CDwIDAQABo0IwQDAd
+BgNVHQ4EFgQUU3m/WqorSs9UgOHYm8Cd8rIDZsswDgYDVR0PAQH/BAQDAgEGMA8GA1UdEwEB/wQF
+MAMBAf8wDQYJKoZIhvcNAQEMBQADggIBAFzUfA3P9wF9QZllDHPFUp/L+M+ZBn8b2kMVn54CVVeW
+FPFSPCeHlCjtHzoBN6J2/FNQwISbxmtOuowhT6KOVWKR82kV2LyI48SqC/3vqOlLVSoGIG1VeCkZ
+7l8wXEskEVX/JJpuXior7gtNn3/3ATiUFJVDBwn7YKnuHKsSjKCaXqeYalltiz8I+8jRRa8YFWSQ
+Eg9zKC7F4iRO/Fjs8PRF/iKz6y+O0tlFYQXBl2+odnKPi4w2r78NBc5xjeambx9spnFixdjQg3IM
+8WcRiQycE0xyNN+81XHfqnHd4blsjDwSXWXavVcStkNr/+XeTWYRUc+ZruwXtuhxkYzeSf7dNXGi
+FSeUHM9h4ya7b6NnJSFd5t0dCy5oGzuCr+yDZ4XUmFF0sbmZgIn/f3gZXHlKYC6SQK5MNyosycdi
+yA5d9zZbyuAlJQG03RoHnHcAP9Dc1ew91Pq7P8yF1m9/qS3fuQL39ZeatTXaw2ewh0qpKJ4jjv9c
+J2vhsE/zB+4ALtRZh8tSQZXq9EfX7mRBVXyNWQKV3WKdwrnuWih0hKWbt5DHDAff9Yk2dDLWKMGw
+sAvgnEzDHNb842m1R0aBL6KCq9NjRHDEjf8tM7qtj3u1cIiuPhnPQCjY/MiQu12ZIvVS5ljFH4gx
+Q+6IHdfGjjxDah2nGN59PRbxYvnKkKj9
+-----END CERTIFICATE-----
+
+USERTrust ECC Certification Authority
+=====================================
+-----BEGIN CERTIFICATE-----
+MIICjzCCAhWgAwIBAgIQXIuZxVqUxdJxVt7NiYDMJjAKBggqhkjOPQQDAzCBiDELMAkGA1UEBhMC
+VVMxEzARBgNVBAgTCk5ldyBKZXJzZXkxFDASBgNVBAcTC0plcnNleSBDaXR5MR4wHAYDVQQKExVU
+aGUgVVNFUlRSVVNUIE5ldHdvcmsxLjAsBgNVBAMTJVVTRVJUcnVzdCBFQ0MgQ2VydGlmaWNhdGlv
+biBBdXRob3JpdHkwHhcNMTAwMjAxMDAwMDAwWhcNMzgwMTE4MjM1OTU5WjCBiDELMAkGA1UEBhMC
+VVMxEzARBgNVBAgTCk5ldyBKZXJzZXkxFDASBgNVBAcTC0plcnNleSBDaXR5MR4wHAYDVQQKExVU
+aGUgVVNFUlRSVVNUIE5ldHdvcmsxLjAsBgNVBAMTJVVTRVJUcnVzdCBFQ0MgQ2VydGlmaWNhdGlv
+biBBdXRob3JpdHkwdjAQBgcqhkjOPQIBBgUrgQQAIgNiAAQarFRaqfloI+d61SRvU8Za2EurxtW2
+0eZzca7dnNYMYf3boIkDuAUU7FfO7l0/4iGzzvfUinngo4N+LZfQYcTxmdwlkWOrfzCjtHDix6Ez
+nPO/LlxTsV+zfTJ/ijTjeXmjQjBAMB0GA1UdDgQWBBQ64QmG1M8ZwpZ2dEl23OA1xmNjmjAOBgNV
+HQ8BAf8EBAMCAQYwDwYDVR0TAQH/BAUwAwEB/zAKBggqhkjOPQQDAwNoADBlAjA2Z6EWCNzklwBB
+HU6+4WMBzzuqQhFkoJ2UOQIReVx7Hfpkue4WQrO/isIJxOzksU0CMQDpKmFHjFJKS04YcPbWRNZu
+9YO6bVi9JNlWSOrvxKJGgYhqOkbRqZtNyWHa0V1Xahg=
+-----END CERTIFICATE-----
+
+GlobalSign ECC Root CA - R4
+===========================
+-----BEGIN CERTIFICATE-----
+MIIB4TCCAYegAwIBAgIRKjikHJYKBN5CsiilC+g0mAIwCgYIKoZIzj0EAwIwUDEkMCIGA1UECxMb
+R2xvYmFsU2lnbiBFQ0MgUm9vdCBDQSAtIFI0MRMwEQYDVQQKEwpHbG9iYWxTaWduMRMwEQYDVQQD
+EwpHbG9iYWxTaWduMB4XDTEyMTExMzAwMDAwMFoXDTM4MDExOTAzMTQwN1owUDEkMCIGA1UECxMb
+R2xvYmFsU2lnbiBFQ0MgUm9vdCBDQSAtIFI0MRMwEQYDVQQKEwpHbG9iYWxTaWduMRMwEQYDVQQD
+EwpHbG9iYWxTaWduMFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEuMZ5049sJQ6fLjkZHAOkrprl
+OQcJFspjsbmG+IpXwVfOQvpzofdlQv8ewQCybnMO/8ch5RikqtlxP6jUuc6MHaNCMEAwDgYDVR0P
+AQH/BAQDAgEGMA8GA1UdEwEB/wQFMAMBAf8wHQYDVR0OBBYEFFSwe61FuOJAf/sKbvu+M8k8o4TV
+MAoGCCqGSM49BAMCA0gAMEUCIQDckqGgE6bPA7DmxCGXkPoUVy0D7O48027KqGx2vKLeuwIgJ6iF
+JzWbVsaj8kfSt24bAgAXqmemFZHe+pTsewv4n4Q=
+-----END CERTIFICATE-----
+
+GlobalSign ECC Root CA - R5
+===========================
+-----BEGIN CERTIFICATE-----
+MIICHjCCAaSgAwIBAgIRYFlJ4CYuu1X5CneKcflK2GwwCgYIKoZIzj0EAwMwUDEkMCIGA1UECxMb
+R2xvYmFsU2lnbiBFQ0MgUm9vdCBDQSAtIFI1MRMwEQYDVQQKEwpHbG9iYWxTaWduMRMwEQYDVQQD
+EwpHbG9iYWxTaWduMB4XDTEyMTExMzAwMDAwMFoXDTM4MDExOTAzMTQwN1owUDEkMCIGA1UECxMb
+R2xvYmFsU2lnbiBFQ0MgUm9vdCBDQSAtIFI1MRMwEQYDVQQKEwpHbG9iYWxTaWduMRMwEQYDVQQD
+EwpHbG9iYWxTaWduMHYwEAYHKoZIzj0CAQYFK4EEACIDYgAER0UOlvt9Xb/pOdEh+J8LttV7HpI6
+SFkc8GIxLcB6KP4ap1yztsyX50XUWPrRd21DosCHZTQKH3rd6zwzocWdTaRvQZU4f8kehOvRnkmS
+h5SHDDqFSmafnVmTTZdhBoZKo0IwQDAOBgNVHQ8BAf8EBAMCAQYwDwYDVR0TAQH/BAUwAwEB/zAd
+BgNVHQ4EFgQUPeYpSJvqB8ohREom3m7e0oPQn1kwCgYIKoZIzj0EAwMDaAAwZQIxAOVpEslu28Yx
+uglB4Zf4+/2a4n0Sye18ZNPLBSWLVtmg515dTguDnFt2KaAJJiFqYgIwcdK1j1zqO+F4CYWodZI7
+yFz9SO8NdCKoCOJuxUnOxwy8p2Fp8fc74SrL+SvzZpA3
+-----END CERTIFICATE-----
+
+Staat der Nederlanden Root CA - G3
+==================================
+-----BEGIN CERTIFICATE-----
+MIIFdDCCA1ygAwIBAgIEAJiiOTANBgkqhkiG9w0BAQsFADBaMQswCQYDVQQGEwJOTDEeMBwGA1UE
+CgwVU3RhYXQgZGVyIE5lZGVybGFuZGVuMSswKQYDVQQDDCJTdGFhdCBkZXIgTmVkZXJsYW5kZW4g
+Um9vdCBDQSAtIEczMB4XDTEzMTExNDExMjg0MloXDTI4MTExMzIzMDAwMFowWjELMAkGA1UEBhMC
+TkwxHjAcBgNVBAoMFVN0YWF0IGRlciBOZWRlcmxhbmRlbjErMCkGA1UEAwwiU3RhYXQgZGVyIE5l
+ZGVybGFuZGVuIFJvb3QgQ0EgLSBHMzCCAiIwDQYJKoZIhvcNAQEBBQADggIPADCCAgoCggIBAL4y
+olQPcPssXFnrbMSkUeiFKrPMSjTysF/zDsccPVMeiAho2G89rcKezIJnByeHaHE6n3WWIkYFsO2t
+x1ueKt6c/DrGlaf1F2cY5y9JCAxcz+bMNO14+1Cx3Gsy8KL+tjzk7FqXxz8ecAgwoNzFs21v0IJy
+EavSgWhZghe3eJJg+szeP4TrjTgzkApyI/o1zCZxMdFyKJLZWyNtZrVtB0LrpjPOktvA9mxjeM3K
+Tj215VKb8b475lRgsGYeCasH/lSJEULR9yS6YHgamPfJEf0WwTUaVHXvQ9Plrk7O53vDxk5hUUur
+mkVLoR9BvUhTFXFkC4az5S6+zqQbwSmEorXLCCN2QyIkHxcE1G6cxvx/K2Ya7Irl1s9N9WMJtxU5
+1nus6+N86U78dULI7ViVDAZCopz35HCz33JvWjdAidiFpNfxC95DGdRKWCyMijmev4SH8RY7Ngzp
+07TKbBlBUgmhHbBqv4LvcFEhMtwFdozL92TkA1CvjJFnq8Xy7ljY3r735zHPbMk7ccHViLVlvMDo
+FxcHErVc0qsgk7TmgoNwNsXNo42ti+yjwUOH5kPiNL6VizXtBznaqB16nzaeErAMZRKQFWDZJkBE
+41ZgpRDUajz9QdwOWke275dhdU/Z/seyHdTtXUmzqWrLZoQT1Vyg3N9udwbRcXXIV2+vD3dbAgMB
+AAGjQjBAMA8GA1UdEwEB/wQFMAMBAf8wDgYDVR0PAQH/BAQDAgEGMB0GA1UdDgQWBBRUrfrHkleu
+yjWcLhL75LpdINyUVzANBgkqhkiG9w0BAQsFAAOCAgEAMJmdBTLIXg47mAE6iqTnB/d6+Oea31BD
+U5cqPco8R5gu4RV78ZLzYdqQJRZlwJ9UXQ4DO1t3ApyEtg2YXzTdO2PCwyiBwpwpLiniyMMB8jPq
+KqrMCQj3ZWfGzd/TtiunvczRDnBfuCPRy5FOCvTIeuXZYzbB1N/8Ipf3YF3qKS9Ysr1YvY2WTxB1
+v0h7PVGHoTx0IsL8B3+A3MSs/mrBcDCw6Y5p4ixpgZQJut3+TcCDjJRYwEYgr5wfAvg1VUkvRtTA
+8KCWAg8zxXHzniN9lLf9OtMJgwYh/WA9rjLA0u6NpvDntIJ8CsxwyXmA+P5M9zWEGYox+wrZ13+b
+8KKaa8MFSu1BYBQw0aoRQm7TIwIEC8Zl3d1Sd9qBa7Ko+gE4uZbqKmxnl4mUnrzhVNXkanjvSr0r
+mj1AfsbAddJu+2gw7OyLnflJNZoaLNmzlTnVHpL3prllL+U9bTpITAjc5CgSKL59NVzq4BZ+Extq
+1z7XnvwtdbLBFNUjA9tbbws+eC8N3jONFrdI54OagQ97wUNNVQQXOEpR1VmiiXTTn74eS9fGbbeI
+JG9gkaSChVtWQbzQRKtqE77RLFi3EjNYsjdj3BP1lB0/QFH1T/U67cjF68IeHRaVesd+QnGTbksV
+tzDfqu1XhUisHWrdOWnk4Xl4vs4Fv6EM94B7IWcnMFk=
+-----END CERTIFICATE-----
+
+Staat der Nederlanden EV Root CA
+================================
+-----BEGIN CERTIFICATE-----
+MIIFcDCCA1igAwIBAgIEAJiWjTANBgkqhkiG9w0BAQsFADBYMQswCQYDVQQGEwJOTDEeMBwGA1UE
+CgwVU3RhYXQgZGVyIE5lZGVybGFuZGVuMSkwJwYDVQQDDCBTdGFhdCBkZXIgTmVkZXJsYW5kZW4g
+RVYgUm9vdCBDQTAeFw0xMDEyMDgxMTE5MjlaFw0yMjEyMDgxMTEwMjhaMFgxCzAJBgNVBAYTAk5M
+MR4wHAYDVQQKDBVTdGFhdCBkZXIgTmVkZXJsYW5kZW4xKTAnBgNVBAMMIFN0YWF0IGRlciBOZWRl
+cmxhbmRlbiBFViBSb290IENBMIICIjANBgkqhkiG9w0BAQEFAAOCAg8AMIICCgKCAgEA48d+ifkk
+SzrSM4M1LGns3Amk41GoJSt5uAg94JG6hIXGhaTK5skuU6TJJB79VWZxXSzFYGgEt9nCUiY4iKTW
+O0Cmws0/zZiTs1QUWJZV1VD+hq2kY39ch/aO5ieSZxeSAgMs3NZmdO3dZ//BYY1jTw+bbRcwJu+r
+0h8QoPnFfxZpgQNH7R5ojXKhTbImxrpsX23Wr9GxE46prfNeaXUmGD5BKyF/7otdBwadQ8QpCiv8
+Kj6GyzyDOvnJDdrFmeK8eEEzduG/L13lpJhQDBXd4Pqcfzho0LKmeqfRMb1+ilgnQ7O6M5HTp5gV
+XJrm0w912fxBmJc+qiXbj5IusHsMX/FjqTf5m3VpTCgmJdrV8hJwRVXj33NeN/UhbJCONVrJ0yPr
+08C+eKxCKFhmpUZtcALXEPlLVPxdhkqHz3/KRawRWrUgUY0viEeXOcDPusBCAUCZSCELa6fS/ZbV
+0b5GnUngC6agIk440ME8MLxwjyx1zNDFjFE7PZQIZCZhfbnDZY8UnCHQqv0XcgOPvZuM5l5Tnrmd
+74K74bzickFbIZTTRTeU0d8JOV3nI6qaHcptqAqGhYqCvkIH1vI4gnPah1vlPNOePqc7nvQDs/nx
+fRN0Av+7oeX6AHkcpmZBiFxgV6YuCcS6/ZrPpx9Aw7vMWgpVSzs4dlG4Y4uElBbmVvMCAwEAAaNC
+MEAwDwYDVR0TAQH/BAUwAwEB/zAOBgNVHQ8BAf8EBAMCAQYwHQYDVR0OBBYEFP6rAJCYniT8qcwa
+ivsnuL8wbqg7MA0GCSqGSIb3DQEBCwUAA4ICAQDPdyxuVr5Os7aEAJSrR8kN0nbHhp8dB9O2tLsI
+eK9p0gtJ3jPFrK3CiAJ9Brc1AsFgyb/E6JTe1NOpEyVa/m6irn0F3H3zbPB+po3u2dfOWBfoqSmu
+c0iH55vKbimhZF8ZE/euBhD/UcabTVUlT5OZEAFTdfETzsemQUHSv4ilf0X8rLiltTMMgsT7B/Zq
+5SWEXwbKwYY5EdtYzXc7LMJMD16a4/CrPmEbUCTCwPTxGfARKbalGAKb12NMcIxHowNDXLldRqAN
+b/9Zjr7dn3LDWyvfjFvO5QxGbJKyCqNMVEIYFRIYvdr8unRu/8G2oGTYqV9Vrp9canaW2HNnh/tN
+f1zuacpzEPuKqf2evTY4SUmH9A4U8OmHuD+nT3pajnnUk+S7aFKErGzp85hwVXIy+TSrK0m1zSBi
+5Dp6Z2Orltxtrpfs/J92VoguZs9btsmksNcFuuEnL5O7Jiqik7Ab846+HUCjuTaPPoIaGl6I6lD4
+WeKDRikL40Rc4ZW2aZCaFG+XroHPaO+Zmr615+F/+PoTRxZMzG0IQOeLeG9QgkRQP2YGiqtDhFZK
+DyAthg710tvSeopLzaXoTvFeJiUBWSOgftL2fiFX1ye8FVdMpEbB4IMeDExNH08GGeL5qPQ6gqGy
+eUN51q1veieQA6TqJIc/2b3Z6fJfUEkc7uzXLg==
+-----END CERTIFICATE-----
+
+IdenTrust Commercial Root CA 1
+==============================
+-----BEGIN CERTIFICATE-----
+MIIFYDCCA0igAwIBAgIQCgFCgAAAAUUjyES1AAAAAjANBgkqhkiG9w0BAQsFADBKMQswCQYDVQQG
+EwJVUzESMBAGA1UEChMJSWRlblRydXN0MScwJQYDVQQDEx5JZGVuVHJ1c3QgQ29tbWVyY2lhbCBS
+b290IENBIDEwHhcNMTQwMTE2MTgxMjIzWhcNMzQwMTE2MTgxMjIzWjBKMQswCQYDVQQGEwJVUzES
+MBAGA1UEChMJSWRlblRydXN0MScwJQYDVQQDEx5JZGVuVHJ1c3QgQ29tbWVyY2lhbCBSb290IENB
+IDEwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQCnUBneP5k91DNG8W9RYYKyqU+PZ4ld
+hNlT3Qwo2dfw/66VQ3KZ+bVdfIrBQuExUHTRgQ18zZshq0PirK1ehm7zCYofWjK9ouuU+ehcCuz/
+mNKvcbO0U59Oh++SvL3sTzIwiEsXXlfEU8L2ApeN2WIrvyQfYo3fw7gpS0l4PJNgiCL8mdo2yMKi
+1CxUAGc1bnO/AljwpN3lsKImesrgNqUZFvX9t++uP0D1bVoE/c40yiTcdCMbXTMTEl3EASX2MN0C
+XZ/g1Ue9tOsbobtJSdifWwLziuQkkORiT0/Br4sOdBeo0XKIanoBScy0RnnGF7HamB4HWfp1IYVl
+3ZBWzvurpWCdxJ35UrCLvYf5jysjCiN2O/cz4ckA82n5S6LgTrx+kzmEB/dEcH7+B1rlsazRGMzy
+NeVJSQjKVsk9+w8YfYs7wRPCTY/JTw436R+hDmrfYi7LNQZReSzIJTj0+kuniVyc0uMNOYZKdHzV
+WYfCP04MXFL0PfdSgvHqo6z9STQaKPNBiDoT7uje/5kdX7rL6B7yuVBgwDHTc+XvvqDtMwt0viAg
+xGds8AgDelWAf0ZOlqf0Hj7h9tgJ4TNkK2PXMl6f+cB7D3hvl7yTmvmcEpB4eoCHFddydJxVdHix
+uuFucAS6T6C6aMN7/zHwcz09lCqxC0EOoP5NiGVreTO01wIDAQABo0IwQDAOBgNVHQ8BAf8EBAMC
+AQYwDwYDVR0TAQH/BAUwAwEB/zAdBgNVHQ4EFgQU7UQZwNPwBovupHu+QucmVMiONnYwDQYJKoZI
+hvcNAQELBQADggIBAA2ukDL2pkt8RHYZYR4nKM1eVO8lvOMIkPkp165oCOGUAFjvLi5+U1KMtlwH
+6oi6mYtQlNeCgN9hCQCTrQ0U5s7B8jeUeLBfnLOic7iPBZM4zY0+sLj7wM+x8uwtLRvM7Kqas6pg
+ghstO8OEPVeKlh6cdbjTMM1gCIOQ045U8U1mwF10A0Cj7oV+wh93nAbowacYXVKV7cndJZ5t+qnt
+ozo00Fl72u1Q8zW/7esUTTHHYPTa8Yec4kjixsU3+wYQ+nVZZjFHKdp2mhzpgq7vmrlR94gjmmmV
+YjzlVYA211QC//G5Xc7UI2/YRYRKW2XviQzdFKcgyxilJbQN+QHwotL0AMh0jqEqSI5l2xPE4iUX
+feu+h1sXIFRRk0pTAwvsXcoz7WL9RccvW9xYoIA55vrX/hMUpu09lEpCdNTDd1lzzY9GvlU47/ro
+kTLql1gEIt44w8y8bckzOmoKaT+gyOpyj4xjhiO9bTyWnpXgSUyqorkqG5w2gXjtw+hG4iZZRHUe
+2XWJUc0QhJ1hYMtd+ZciTY6Y5uN/9lu7rs3KSoFrXgvzUeF0K+l+J6fZmUlO+KWA2yUPHGNiiskz
+Z2s8EIPGrd6ozRaOjfAHN3Gf8qv8QfXBi+wAN10J5U6A7/qxXDgGpRtK4dw4LTzcqx+QGtVKnO7R
+cGzM7vRX+Bi6hG6H
+-----END CERTIFICATE-----
+
+IdenTrust Public Sector Root CA 1
+=================================
+-----BEGIN CERTIFICATE-----
+MIIFZjCCA06gAwIBAgIQCgFCgAAAAUUjz0Z8AAAAAjANBgkqhkiG9w0BAQsFADBNMQswCQYDVQQG
+EwJVUzESMBAGA1UEChMJSWRlblRydXN0MSowKAYDVQQDEyFJZGVuVHJ1c3QgUHVibGljIFNlY3Rv
+ciBSb290IENBIDEwHhcNMTQwMTE2MTc1MzMyWhcNMzQwMTE2MTc1MzMyWjBNMQswCQYDVQQGEwJV
+UzESMBAGA1UEChMJSWRlblRydXN0MSowKAYDVQQDEyFJZGVuVHJ1c3QgUHVibGljIFNlY3RvciBS
+b290IENBIDEwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQC2IpT8pEiv6EdrCvsnduTy
+P4o7ekosMSqMjbCpwzFrqHd2hCa2rIFCDQjrVVi7evi8ZX3yoG2LqEfpYnYeEe4IFNGyRBb06tD6
+Hi9e28tzQa68ALBKK0CyrOE7S8ItneShm+waOh7wCLPQ5CQ1B5+ctMlSbdsHyo+1W/CD80/HLaXI
+rcuVIKQxKFdYWuSNG5qrng0M8gozOSI5Cpcu81N3uURF/YTLNiCBWS2ab21ISGHKTN9T0a9SvESf
+qy9rg3LvdYDaBjMbXcjaY8ZNzaxmMc3R3j6HEDbhuaR672BQssvKplbgN6+rNBM5Jeg5ZuSYeqoS
+mJxZZoY+rfGwyj4GD3vwEUs3oERte8uojHH01bWRNszwFcYr3lEXsZdMUD2xlVl8BX0tIdUAvwFn
+ol57plzy9yLxkA2T26pEUWbMfXYD62qoKjgZl3YNa4ph+bz27nb9cCvdKTz4Ch5bQhyLVi9VGxyh
+LrXHFub4qjySjmm2AcG1hp2JDws4lFTo6tyePSW8Uybt1as5qsVATFSrsrTZ2fjXctscvG29ZV/v
+iDUqZi/u9rNl8DONfJhBaUYPQxxp+pu10GFqzcpL2UyQRqsVWaFHVCkugyhfHMKiq3IXAAaOReyL
+4jM9f9oZRORicsPfIsbyVtTdX5Vy7W1f90gDW/3FKqD2cyOEEBsB5wIDAQABo0IwQDAOBgNVHQ8B
+Af8EBAMCAQYwDwYDVR0TAQH/BAUwAwEB/zAdBgNVHQ4EFgQU43HgntinQtnbcZFrlJPrw6PRFKMw
+DQYJKoZIhvcNAQELBQADggIBAEf63QqwEZE4rU1d9+UOl1QZgkiHVIyqZJnYWv6IAcVYpZmxI1Qj
+t2odIFflAWJBF9MJ23XLblSQdf4an4EKwt3X9wnQW3IV5B4Jaj0z8yGa5hV+rVHVDRDtfULAj+7A
+mgjVQdZcDiFpboBhDhXAuM/FSRJSzL46zNQuOAXeNf0fb7iAaJg9TaDKQGXSc3z1i9kKlT/YPyNt
+GtEqJBnZhbMX73huqVjRI9PHE+1yJX9dsXNw0H8GlwmEKYBhHfpe/3OsoOOJuBxxFcbeMX8S3OFt
+m6/n6J91eEyrRjuazr8FGF1NFTwWmhlQBJqymm9li1JfPFgEKCXAZmExfrngdbkaqIHWchezxQMx
+NRF4eKLg6TCMf4DfWN88uieW4oA0beOY02QnrEh+KHdcxiVhJfiFDGX6xDIvpZgF5PgLZxYWxoK4
+Mhn5+bl53B/N66+rDt0b20XkeucC4pVd/GnwU2lhlXV5C15V5jgclKlZM57IcXR5f1GJtshquDDI
+ajjDbp7hNxbqBWJMWxJH7ae0s1hWx0nzfxJoCTFx8G34Tkf71oXuxVhAGaQdp/lLQzfcaFpPz+vC
+ZHTetBXZ9FRUGi8c15dxVJCO2SCdUyt/q4/i6jC8UDfv8Ue1fXwsBOxonbRJRBD0ckscZOf85muQ
+3Wl9af0AVqW3rLatt8o+Ae+c
+-----END CERTIFICATE-----
+
+Entrust Root Certification Authority - G2
+=========================================
+-----BEGIN CERTIFICATE-----
+MIIEPjCCAyagAwIBAgIESlOMKDANBgkqhkiG9w0BAQsFADCBvjELMAkGA1UEBhMCVVMxFjAUBgNV
+BAoTDUVudHJ1c3QsIEluYy4xKDAmBgNVBAsTH1NlZSB3d3cuZW50cnVzdC5uZXQvbGVnYWwtdGVy
+bXMxOTA3BgNVBAsTMChjKSAyMDA5IEVudHJ1c3QsIEluYy4gLSBmb3IgYXV0aG9yaXplZCB1c2Ug
+b25seTEyMDAGA1UEAxMpRW50cnVzdCBSb290IENlcnRpZmljYXRpb24gQXV0aG9yaXR5IC0gRzIw
+HhcNMDkwNzA3MTcyNTU0WhcNMzAxMjA3MTc1NTU0WjCBvjELMAkGA1UEBhMCVVMxFjAUBgNVBAoT
+DUVudHJ1c3QsIEluYy4xKDAmBgNVBAsTH1NlZSB3d3cuZW50cnVzdC5uZXQvbGVnYWwtdGVybXMx
+OTA3BgNVBAsTMChjKSAyMDA5IEVudHJ1c3QsIEluYy4gLSBmb3IgYXV0aG9yaXplZCB1c2Ugb25s
+eTEyMDAGA1UEAxMpRW50cnVzdCBSb290IENlcnRpZmljYXRpb24gQXV0aG9yaXR5IC0gRzIwggEi
+MA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQC6hLZy254Ma+KZ6TABp3bqMriVQRrJ2mFOWHLP
+/vaCeb9zYQYKpSfYs1/TRU4cctZOMvJyig/3gxnQaoCAAEUesMfnmr8SVycco2gvCoe9amsOXmXz
+HHfV1IWNcCG0szLni6LVhjkCsbjSR87kyUnEO6fe+1R9V77w6G7CebI6C1XiUJgWMhNcL3hWwcKU
+s/Ja5CeanyTXxuzQmyWC48zCxEXFjJd6BmsqEZ+pCm5IO2/b1BEZQvePB7/1U1+cPvQXLOZprE4y
+TGJ36rfo5bs0vBmLrpxR57d+tVOxMyLlbc9wPBr64ptntoP0jaWvYkxN4FisZDQSA/i2jZRjJKRx
+AgMBAAGjQjBAMA4GA1UdDwEB/wQEAwIBBjAPBgNVHRMBAf8EBTADAQH/MB0GA1UdDgQWBBRqciZ6
+0B7vfec7aVHUbI2fkBJmqzANBgkqhkiG9w0BAQsFAAOCAQEAeZ8dlsa2eT8ijYfThwMEYGprmi5Z
+iXMRrEPR9RP/jTkrwPK9T3CMqS/qF8QLVJ7UG5aYMzyorWKiAHarWWluBh1+xLlEjZivEtRh2woZ
+Rkfz6/djwUAFQKXSt/S1mja/qYh2iARVBCuch38aNzx+LaUa2NSJXsq9rD1s2G2v1fN2D807iDgi
+nWyTmsQ9v4IbZT+mD12q/OWyFcq1rca8PdCE6OoGcrBNOTJ4vz4RnAuknZoh8/CbCzB428Hch0P+
+vGOaysXCHMnHjf87ElgI5rY97HosTvuDls4MPGmHVHOkc8KT/1EQrBVUAdj8BbGJoX90g5pJ19xO
+e4pIb4tF9g==
+-----END CERTIFICATE-----
+
+Entrust Root Certification Authority - EC1
+==========================================
+-----BEGIN CERTIFICATE-----
+MIIC+TCCAoCgAwIBAgINAKaLeSkAAAAAUNCR+TAKBggqhkjOPQQDAzCBvzELMAkGA1UEBhMCVVMx
+FjAUBgNVBAoTDUVudHJ1c3QsIEluYy4xKDAmBgNVBAsTH1NlZSB3d3cuZW50cnVzdC5uZXQvbGVn
+YWwtdGVybXMxOTA3BgNVBAsTMChjKSAyMDEyIEVudHJ1c3QsIEluYy4gLSBmb3IgYXV0aG9yaXpl
+ZCB1c2Ugb25seTEzMDEGA1UEAxMqRW50cnVzdCBSb290IENlcnRpZmljYXRpb24gQXV0aG9yaXR5
+IC0gRUMxMB4XDTEyMTIxODE1MjUzNloXDTM3MTIxODE1NTUzNlowgb8xCzAJBgNVBAYTAlVTMRYw
+FAYDVQQKEw1FbnRydXN0LCBJbmMuMSgwJgYDVQQLEx9TZWUgd3d3LmVudHJ1c3QubmV0L2xlZ2Fs
+LXRlcm1zMTkwNwYDVQQLEzAoYykgMjAxMiBFbnRydXN0LCBJbmMuIC0gZm9yIGF1dGhvcml6ZWQg
+dXNlIG9ubHkxMzAxBgNVBAMTKkVudHJ1c3QgUm9vdCBDZXJ0aWZpY2F0aW9uIEF1dGhvcml0eSAt
+IEVDMTB2MBAGByqGSM49AgEGBSuBBAAiA2IABIQTydC6bUF74mzQ61VfZgIaJPRbiWlH47jCffHy
+AsWfoPZb1YsGGYZPUxBtByQnoaD41UcZYUx9ypMn6nQM72+WCf5j7HBdNq1nd67JnXxVRDqiY1Ef
+9eNi1KlHBz7MIKNCMEAwDgYDVR0PAQH/BAQDAgEGMA8GA1UdEwEB/wQFMAMBAf8wHQYDVR0OBBYE
+FLdj5xrdjekIplWDpOBqUEFlEUJJMAoGCCqGSM49BAMDA2cAMGQCMGF52OVCR98crlOZF7ZvHH3h
+vxGU0QOIdeSNiaSKd0bebWHvAvX7td/M/k7//qnmpwIwW5nXhTcGtXsI/esni0qU+eH6p44mCOh8
+kmhtc9hvJqwhAriZtyZBWyVgrtBIGu4G
+-----END CERTIFICATE-----
+
+CFCA EV ROOT
+============
+-----BEGIN CERTIFICATE-----
+MIIFjTCCA3WgAwIBAgIEGErM1jANBgkqhkiG9w0BAQsFADBWMQswCQYDVQQGEwJDTjEwMC4GA1UE
+CgwnQ2hpbmEgRmluYW5jaWFsIENlcnRpZmljYXRpb24gQXV0aG9yaXR5MRUwEwYDVQQDDAxDRkNB
+IEVWIFJPT1QwHhcNMTIwODA4MDMwNzAxWhcNMjkxMjMxMDMwNzAxWjBWMQswCQYDVQQGEwJDTjEw
+MC4GA1UECgwnQ2hpbmEgRmluYW5jaWFsIENlcnRpZmljYXRpb24gQXV0aG9yaXR5MRUwEwYDVQQD
+DAxDRkNBIEVWIFJPT1QwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQDXXWvNED8fBVnV
+BU03sQ7smCuOFR36k0sXgiFxEFLXUWRwFsJVaU2OFW2fvwwbwuCjZ9YMrM8irq93VCpLTIpTUnrD
+7i7es3ElweldPe6hL6P3KjzJIx1qqx2hp/Hz7KDVRM8Vz3IvHWOX6Jn5/ZOkVIBMUtRSqy5J35DN
+uF++P96hyk0g1CXohClTt7GIH//62pCfCqktQT+x8Rgp7hZZLDRJGqgG16iI0gNyejLi6mhNbiyW
+ZXvKWfry4t3uMCz7zEasxGPrb382KzRzEpR/38wmnvFyXVBlWY9ps4deMm/DGIq1lY+wejfeWkU7
+xzbh72fROdOXW3NiGUgthxwG+3SYIElz8AXSG7Ggo7cbcNOIabla1jj0Ytwli3i/+Oh+uFzJlU9f
+py25IGvPa931DfSCt/SyZi4QKPaXWnuWFo8BGS1sbn85WAZkgwGDg8NNkt0yxoekN+kWzqotaK8K
+gWU6cMGbrU1tVMoqLUuFG7OA5nBFDWteNfB/O7ic5ARwiRIlk9oKmSJgamNgTnYGmE69g60dWIol
+hdLHZR4tjsbftsbhf4oEIRUpdPA+nJCdDC7xij5aqgwJHsfVPKPtl8MeNPo4+QgO48BdK4PRVmrJ
+tqhUUy54Mmc9gn900PvhtgVguXDbjgv5E1hvcWAQUhC5wUEJ73IfZzF4/5YFjQIDAQABo2MwYTAf
+BgNVHSMEGDAWgBTj/i39KNALtbq2osS/BqoFjJP7LzAPBgNVHRMBAf8EBTADAQH/MA4GA1UdDwEB
+/wQEAwIBBjAdBgNVHQ4EFgQU4/4t/SjQC7W6tqLEvwaqBYyT+y8wDQYJKoZIhvcNAQELBQADggIB
+ACXGumvrh8vegjmWPfBEp2uEcwPenStPuiB/vHiyz5ewG5zz13ku9Ui20vsXiObTej/tUxPQ4i9q
+ecsAIyjmHjdXNYmEwnZPNDatZ8POQQaIxffu2Bq41gt/UP+TqhdLjOztUmCypAbqTuv0axn96/Ua
+4CUqmtzHQTb3yHQFhDmVOdYLO6Qn+gjYXB74BGBSESgoA//vU2YApUo0FmZ8/Qmkrp5nGm9BC2sG
+E5uPhnEFtC+NiWYzKXZUmhH4J/qyP5Hgzg0b8zAarb8iXRvTvyUFTeGSGn+ZnzxEk8rUQElsgIfX
+BDrDMlI1Dlb4pd19xIsNER9Tyx6yF7Zod1rg1MvIB671Oi6ON7fQAUtDKXeMOZePglr4UeWJoBjn
+aH9dCi77o0cOPaYjesYBx4/IXr9tgFa+iiS6M+qf4TIRnvHST4D2G0CvOJ4RUHlzEhLN5mydLIhy
+PDCBBpEi6lmt2hkuIsKNuYyH4Ga8cyNfIWRjgEj1oDwYPZTISEEdQLpe/v5WOaHIz16eGWRGENoX
+kbcFgKyLmZJ956LYBws2J+dIeWCKw9cTXPhyQN9Ky8+ZAAoACxGV2lZFA4gKn2fQ1XmxqI1AbQ3C
+ekD6819kR5LLU7m7Wc5P/dAVUwHY3+vZ5nbv0CO7O6l5s9UCKc2Jo5YPSjXnTkLAdc0Hz+Ys63su
+-----END CERTIFICATE-----
+
+TÜRKTRUST Elektronik Sertifika Hizmet Sağlayıcısı H5
+====================================================
+-----BEGIN CERTIFICATE-----
+MIIEJzCCAw+gAwIBAgIHAI4X/iQggTANBgkqhkiG9w0BAQsFADCBsTELMAkGA1UEBhMCVFIxDzAN
+BgNVBAcMBkFua2FyYTFNMEsGA1UECgxEVMOcUktUUlVTVCBCaWxnaSDEsGxldGnFn2ltIHZlIEJp
+bGnFn2ltIEfDvHZlbmxpxJ9pIEhpem1ldGxlcmkgQS7Fni4xQjBABgNVBAMMOVTDnFJLVFJVU1Qg
+RWxla3Ryb25payBTZXJ0aWZpa2EgSGl6bWV0IFNhxJ9sYXnEsWPEsXPEsSBINTAeFw0xMzA0MzAw
+ODA3MDFaFw0yMzA0MjgwODA3MDFaMIGxMQswCQYDVQQGEwJUUjEPMA0GA1UEBwwGQW5rYXJhMU0w
+SwYDVQQKDERUw5xSS1RSVVNUIEJpbGdpIMSwbGV0acWfaW0gdmUgQmlsacWfaW0gR8O8dmVubGnE
+n2kgSGl6bWV0bGVyaSBBLsWeLjFCMEAGA1UEAww5VMOcUktUUlVTVCBFbGVrdHJvbmlrIFNlcnRp
+ZmlrYSBIaXptZXQgU2HEn2xhecSxY8Sxc8SxIEg1MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIB
+CgKCAQEApCUZ4WWe60ghUEoI5RHwWrom/4NZzkQqL/7hzmAD/I0Dpe3/a6i6zDQGn1k19uwsu537
+jVJp45wnEFPzpALFp/kRGml1bsMdi9GYjZOHp3GXDSHHmflS0yxjXVW86B8BSLlg/kJK9siArs1m
+ep5Fimh34khon6La8eHBEJ/rPCmBp+EyCNSgBbGM+42WAA4+Jd9ThiI7/PS98wl+d+yG6w8z5UNP
+9FR1bSmZLmZaQ9/LXMrI5Tjxfjs1nQ/0xVqhzPMggCTTV+wVunUlm+hkS7M0hO8EuPbJbKoCPrZV
+4jI3X/xml1/N1p7HIL9Nxqw/dV8c7TKcfGkAaZHjIxhT6QIDAQABo0IwQDAdBgNVHQ4EFgQUVpkH
+HtOsDGlktAxQR95DLL4gwPswDgYDVR0PAQH/BAQDAgEGMA8GA1UdEwEB/wQFMAMBAf8wDQYJKoZI
+hvcNAQELBQADggEBAJ5FdnsXSDLyOIspve6WSk6BGLFRRyDN0GSxDsnZAdkJzsiZ3GglE9Rc8qPo
+BP5yCccLqh0lVX6Wmle3usURehnmp349hQ71+S4pL+f5bFgWV1Al9j4uPqrtd3GqqpmWRgqujuwq
+URawXs3qZwQcWDD1YIq9pr1N5Za0/EKJAWv2cMhQOQwt1WbZyNKzMrcbGW3LM/nfpeYVhDfwwvJl
+lpKQd/Ct9JDpEXjXk4nAPQu6KfTomZ1yju2dL+6SfaHx/126M2CFYv4HAqGEVka+lgqaE9chTLd8
+B59OTj+RdPsnnRHM3eaxynFNExc5JsUpISuTKWqW+qtB4Uu2NQvAmxU=
+-----END CERTIFICATE-----
+
+Certinomis - Root CA
+====================
+-----BEGIN CERTIFICATE-----
+MIIFkjCCA3qgAwIBAgIBATANBgkqhkiG9w0BAQsFADBaMQswCQYDVQQGEwJGUjETMBEGA1UEChMK
+Q2VydGlub21pczEXMBUGA1UECxMOMDAwMiA0MzM5OTg5MDMxHTAbBgNVBAMTFENlcnRpbm9taXMg
+LSBSb290IENBMB4XDTEzMTAyMTA5MTcxOFoXDTMzMTAyMTA5MTcxOFowWjELMAkGA1UEBhMCRlIx
+EzARBgNVBAoTCkNlcnRpbm9taXMxFzAVBgNVBAsTDjAwMDIgNDMzOTk4OTAzMR0wGwYDVQQDExRD
+ZXJ0aW5vbWlzIC0gUm9vdCBDQTCCAiIwDQYJKoZIhvcNAQEBBQADggIPADCCAgoCggIBANTMCQos
+P5L2fxSeC5yaah1AMGT9qt8OHgZbn1CF6s2Nq0Nn3rD6foCWnoR4kkjW4znuzuRZWJflLieY6pOo
+d5tK8O90gC3rMB+12ceAnGInkYjwSond3IjmFPnVAy//ldu9n+ws+hQVWZUKxkd8aRi5pwP5ynap
+z8dvtF4F/u7BUrJ1Mofs7SlmO/NKFoL21prbcpjp3vDFTKWrteoB4owuZH9kb/2jJZOLyKIOSY00
+8B/sWEUuNKqEUL3nskoTuLAPrjhdsKkb5nPJWqHZZkCqqU2mNAKthH6yI8H7KsZn9DS2sJVqM09x
+RLWtwHkziOC/7aOgFLScCbAK42C++PhmiM1b8XcF4LVzbsF9Ri6OSyemzTUK/eVNfaoqoynHWmgE
+6OXWk6RiwsXm9E/G+Z8ajYJJGYrKWUM66A0ywfRMEwNvbqY/kXPLynNvEiCL7sCCeN5LLsJJwx3t
+FvYk9CcbXFcx3FXuqB5vbKziRcxXV4p1VxngtViZSTYxPDMBbRZKzbgqg4SGm/lg0h9tkQPTYKbV
+PZrdd5A9NaSfD171UkRpucC63M9933zZxKyGIjK8e2uR73r4F2iw4lNVYC2vPsKD2NkJK/DAZNuH
+i5HMkesE/Xa0lZrmFAYb1TQdvtj/dBxThZngWVJKYe2InmtJiUZ+IFrZ50rlau7SZRFDAgMBAAGj
+YzBhMA4GA1UdDwEB/wQEAwIBBjAPBgNVHRMBAf8EBTADAQH/MB0GA1UdDgQWBBTvkUz1pcMw6C8I
+6tNxIqSSaHh02TAfBgNVHSMEGDAWgBTvkUz1pcMw6C8I6tNxIqSSaHh02TANBgkqhkiG9w0BAQsF
+AAOCAgEAfj1U2iJdGlg+O1QnurrMyOMaauo++RLrVl89UM7g6kgmJs95Vn6RHJk/0KGRHCwPT5iV
+WVO90CLYiF2cN/z7ZMF4jIuaYAnq1fohX9B0ZedQxb8uuQsLrbWwF6YSjNRieOpWauwK0kDDPAUw
+Pk2Ut59KA9N9J0u2/kTO+hkzGm2kQtHdzMjI1xZSg081lLMSVX3l4kLr5JyTCcBMWwerx20RoFAX
+lCOotQqSD7J6wWAsOMwaplv/8gzjqh8c3LigkyfeY+N/IZ865Z764BNqdeuWXGKRlI5nU7aJ+BIJ
+y29SWwNyhlCVCNSNh4YVH5Uk2KRvms6knZtt0rJ2BobGVgjF6wnaNsIbW0G+YSrjcOa4pvi2WsS9
+Iff/ql+hbHY5ZtbqTFXhADObE5hjyW/QASAJN1LnDE8+zbz1X5YnpyACleAu6AdBBR8Vbtaw5Bng
+DwKTACdyxYvRVB9dSsNAl35VpnzBMwQUAR1JIGkLGZOdblgi90AMRgwjY/M50n92Uaf0yKHxDHYi
+I0ZSKS3io0EHVmmY0gUJvGnHWmHNj4FgFU2A3ZDifcRQ8ow7bkrHxuaAKzyBvBGAFhAn1/DNP3nM
+cyrDflOR1m749fPH0FFNjkulW+YZFzvWgQncItzujrnEj1PhZ7szuIgVRs/taTX/dQ1G885x4cVr
+hkIGuUE=
+-----END CERTIFICATE-----
+
+OISTE WISeKey Global Root GB CA
+===============================
+-----BEGIN CERTIFICATE-----
+MIIDtTCCAp2gAwIBAgIQdrEgUnTwhYdGs/gjGvbCwDANBgkqhkiG9w0BAQsFADBtMQswCQYDVQQG
+EwJDSDEQMA4GA1UEChMHV0lTZUtleTEiMCAGA1UECxMZT0lTVEUgRm91bmRhdGlvbiBFbmRvcnNl
+ZDEoMCYGA1UEAxMfT0lTVEUgV0lTZUtleSBHbG9iYWwgUm9vdCBHQiBDQTAeFw0xNDEyMDExNTAw
+MzJaFw0zOTEyMDExNTEwMzFaMG0xCzAJBgNVBAYTAkNIMRAwDgYDVQQKEwdXSVNlS2V5MSIwIAYD
+VQQLExlPSVNURSBGb3VuZGF0aW9uIEVuZG9yc2VkMSgwJgYDVQQDEx9PSVNURSBXSVNlS2V5IEds
+b2JhbCBSb290IEdCIENBMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEA2Be3HEokKtaX
+scriHvt9OO+Y9bI5mE4nuBFde9IllIiCFSZqGzG7qFshISvYD06fWvGxWuR51jIjK+FTzJlFXHtP
+rby/h0oLS5daqPZI7H17Dc0hBt+eFf1Biki3IPShehtX1F1Q/7pn2COZH8g/497/b1t3sWtuuMlk
+9+HKQUYOKXHQuSP8yYFfTvdv37+ErXNku7dCjmn21HYdfp2nuFeKUWdy19SouJVUQHMD9ur06/4o
+Qnc/nSMbsrY9gBQHTC5P99UKFg29ZkM3fiNDecNAhvVMKdqOmq0NpQSHiB6F4+lT1ZvIiwNjeOvg
+GUpuuy9rM2RYk61pv48b74JIxwIDAQABo1EwTzALBgNVHQ8EBAMCAYYwDwYDVR0TAQH/BAUwAwEB
+/zAdBgNVHQ4EFgQUNQ/INmNe4qPs+TtmFc5RUuORmj0wEAYJKwYBBAGCNxUBBAMCAQAwDQYJKoZI
+hvcNAQELBQADggEBAEBM+4eymYGQfp3FsLAmzYh7KzKNbrghcViXfa43FK8+5/ea4n32cZiZBKpD
+dHij40lhPnOMTZTg+XHEthYOU3gf1qKHLwI5gSk8rxWYITD+KJAAjNHhy/peyP34EEY7onhCkRd0
+VQreUGdNZtGn//3ZwLWoo4rOZvUPQ82nK1d7Y0Zqqi5S2PTt4W2tKZB4SLrhI6qjiey1q5bAtEui
+HZeeevJuQHHfaPFlTc58Bd9TZaml8LGXBHAVRgOY1NK/VLSgWH1Sb9pWJmLU2NuJMW8c8CLC02Ic
+Nc1MaRVUGpCY3useX8p3x8uOPUNpnJpY0CQ73xtAln41rYHHTnG6iBM=
+-----END CERTIFICATE-----
+
+Certification Authority of WoSign G2
+====================================
+-----BEGIN CERTIFICATE-----
+MIIDfDCCAmSgAwIBAgIQayXaioidfLwPBbOxemFFRDANBgkqhkiG9w0BAQsFADBYMQswCQYDVQQG
+EwJDTjEaMBgGA1UEChMRV29TaWduIENBIExpbWl0ZWQxLTArBgNVBAMTJENlcnRpZmljYXRpb24g
+QXV0aG9yaXR5IG9mIFdvU2lnbiBHMjAeFw0xNDExMDgwMDU4NThaFw00NDExMDgwMDU4NThaMFgx
+CzAJBgNVBAYTAkNOMRowGAYDVQQKExFXb1NpZ24gQ0EgTGltaXRlZDEtMCsGA1UEAxMkQ2VydGlm
+aWNhdGlvbiBBdXRob3JpdHkgb2YgV29TaWduIEcyMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIB
+CgKCAQEAvsXEoCKASU+/2YcRxlPhuw+9YH+v9oIOH9ywjj2X4FA8jzrvZjtFB5sg+OPXJYY1kBai
+XW8wGQiHC38Gsp1ij96vkqVg1CuAmlI/9ZqD6TRay9nVYlzmDuDfBpgOgHzKtB0TiGsOqCR3A9Du
+W/PKaZE1OVbFbeP3PU9ekzgkyhjpJMuSA93MHD0JcOQg5PGurLtzaaNjOg9FD6FKmsLRY6zLEPg9
+5k4ot+vElbGs/V6r+kHLXZ1L3PR8du9nfwB6jdKgGlxNIuG12t12s9R23164i5jIFFTMaxeSt+BK
+v0mUYQs4kI9dJGwlezt52eJ+na2fmKEG/HgUYFf47oB3sQIDAQABo0IwQDAOBgNVHQ8BAf8EBAMC
+AQYwDwYDVR0TAQH/BAUwAwEB/zAdBgNVHQ4EFgQU+mCp62XF3RYUCE4MD42b4Pdkr2cwDQYJKoZI
+hvcNAQELBQADggEBAFfDejaCnI2Y4qtAqkePx6db7XznPWZaOzG73/MWM5H8fHulwqZm46qwtyeY
+P0nXYGdnPzZPSsvxFPpahygc7Y9BMsaV+X3avXtbwrAh449G3CE4Q3RM+zD4F3LBMvzIkRfEzFg3
+TgvMWvchNSiDbGAtROtSjFA9tWwS1/oJu2yySrHFieT801LYYRf+epSEj3m2M1m6D8QL4nCgS3gu
++sif/a+RZQp4OBXllxcU3fngLDT4ONCEIgDAFFEYKwLcMFrw6AF8NTojrwjkr6qOKEJJLvD1mTS+
+7Q9LGOHSJDy7XUe3IfKN0QqZjuNuPq1w4I+5ysxugTH2e5x6eeRncRg=
+-----END CERTIFICATE-----
+
+CA WoSign ECC Root
+==================
+-----BEGIN CERTIFICATE-----
+MIICCTCCAY+gAwIBAgIQaEpYcIBr8I8C+vbe6LCQkDAKBggqhkjOPQQDAzBGMQswCQYDVQQGEwJD
+TjEaMBgGA1UEChMRV29TaWduIENBIExpbWl0ZWQxGzAZBgNVBAMTEkNBIFdvU2lnbiBFQ0MgUm9v
+dDAeFw0xNDExMDgwMDU4NThaFw00NDExMDgwMDU4NThaMEYxCzAJBgNVBAYTAkNOMRowGAYDVQQK
+ExFXb1NpZ24gQ0EgTGltaXRlZDEbMBkGA1UEAxMSQ0EgV29TaWduIEVDQyBSb290MHYwEAYHKoZI
+zj0CAQYFK4EEACIDYgAE4f2OuEMkq5Z7hcK6C62N4DrjJLnSsb6IOsq/Srj57ywvr1FQPEd1bPiU
+t5v8KB7FVMxjnRZLU8HnIKvNrCXSf4/CwVqCXjCLelTOA7WRf6qU0NGKSMyCBSah1VES1ns2o0Iw
+QDAOBgNVHQ8BAf8EBAMCAQYwDwYDVR0TAQH/BAUwAwEB/zAdBgNVHQ4EFgQUqv3VWqP2h4syhf3R
+MluARZPzA7gwCgYIKoZIzj0EAwMDaAAwZQIxAOSkhLCB1T2wdKyUpOgOPQB0TKGXa/kNUTyh2Tv0
+Daupn75OcsqF1NnstTJFGG+rrQIwfcf3aWMvoeGY7xMQ0Xk/0f7qO3/eVvSQsRUR2LIiFdAvwyYu
+a/GRspBl9JrmkO5K
+-----END CERTIFICATE-----
+
+SZAFIR ROOT CA2
+===============
+-----BEGIN CERTIFICATE-----
+MIIDcjCCAlqgAwIBAgIUPopdB+xV0jLVt+O2XwHrLdzk1uQwDQYJKoZIhvcNAQELBQAwUTELMAkG
+A1UEBhMCUEwxKDAmBgNVBAoMH0tyYWpvd2EgSXpiYSBSb3psaWN6ZW5pb3dhIFMuQS4xGDAWBgNV
+BAMMD1NaQUZJUiBST09UIENBMjAeFw0xNTEwMTkwNzQzMzBaFw0zNTEwMTkwNzQzMzBaMFExCzAJ
+BgNVBAYTAlBMMSgwJgYDVQQKDB9LcmFqb3dhIEl6YmEgUm96bGljemVuaW93YSBTLkEuMRgwFgYD
+VQQDDA9TWkFGSVIgUk9PVCBDQTIwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQC3vD5Q
+qEvNQLXOYeeWyrSh2gwisPq1e3YAd4wLz32ohswmUeQgPYUM1ljj5/QqGJ3a0a4m7utT3PSQ1hNK
+DJA8w/Ta0o4NkjrcsbH/ON7Dui1fgLkCvUqdGw+0w8LBZwPd3BucPbOw3gAeqDRHu5rr/gsUvTaE
+2g0gv/pby6kWIK05YO4vdbbnl5z5Pv1+TW9NL++IDWr63fE9biCloBK0TXC5ztdyO4mTp4CEHCdJ
+ckm1/zuVnsHMyAHs6A6KCpbns6aH5db5BSsNl0BwPLqsdVqc1U2dAgrSS5tmS0YHF2Wtn2yIANwi
+ieDhZNRnvDF5YTy7ykHNXGoAyDw4jlivAgMBAAGjQjBAMA8GA1UdEwEB/wQFMAMBAf8wDgYDVR0P
+AQH/BAQDAgEGMB0GA1UdDgQWBBQuFqlKGLXLzPVvUPMjX/hd56zwyDANBgkqhkiG9w0BAQsFAAOC
+AQEAtXP4A9xZWx126aMqe5Aosk3AM0+qmrHUuOQn/6mWmc5G4G18TKI4pAZw8PRBEew/R40/cof5
+O/2kbytTAOD/OblqBw7rHRz2onKQy4I9EYKL0rufKq8h5mOGnXkZ7/e7DDWQw4rtTw/1zBLZpD67
+oPwglV9PJi8RI4NOdQcPv5vRtB3pEAT+ymCPoky4rc/hkA/NrgrHXXu3UNLUYfrVFdvXn4dRVOul
+4+vJhaAlIDf7js4MNIThPIGyd05DpYhfhmehPea0XGG2Ptv+tyjFogeutcrKjSoS75ftwjCkySp6
++/NNIxuZMzSgLvWpCz/UXeHPhJ/iGcJfitYgHuNztw==
+-----END CERTIFICATE-----
+
+Certum Trusted Network CA 2
+===========================
+-----BEGIN CERTIFICATE-----
+MIIF0jCCA7qgAwIBAgIQIdbQSk8lD8kyN/yqXhKN6TANBgkqhkiG9w0BAQ0FADCBgDELMAkGA1UE
+BhMCUEwxIjAgBgNVBAoTGVVuaXpldG8gVGVjaG5vbG9naWVzIFMuQS4xJzAlBgNVBAsTHkNlcnR1
+bSBDZXJ0aWZpY2F0aW9uIEF1dGhvcml0eTEkMCIGA1UEAxMbQ2VydHVtIFRydXN0ZWQgTmV0d29y
+ayBDQSAyMCIYDzIwMTExMDA2MDgzOTU2WhgPMjA0NjEwMDYwODM5NTZaMIGAMQswCQYDVQQGEwJQ
+TDEiMCAGA1UEChMZVW5pemV0byBUZWNobm9sb2dpZXMgUy5BLjEnMCUGA1UECxMeQ2VydHVtIENl
+cnRpZmljYXRpb24gQXV0aG9yaXR5MSQwIgYDVQQDExtDZXJ0dW0gVHJ1c3RlZCBOZXR3b3JrIENB
+IDIwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQC9+Xj45tWADGSdhhuWZGc/IjoedQF9
+7/tcZ4zJzFxrqZHmuULlIEub2pt7uZld2ZuAS9eEQCsn0+i6MLs+CRqnSZXvK0AkwpfHp+6bJe+o
+CgCXhVqqndwpyeI1B+twTUrWwbNWuKFBOJvR+zF/j+Bf4bE/D44WSWDXBo0Y+aomEKsq09DRZ40b
+Rr5HMNUuctHFY9rnY3lEfktjJImGLjQ/KUxSiyqnwOKRKIm5wFv5HdnnJ63/mgKXwcZQkpsCLL2p
+uTRZCr+ESv/f/rOf69me4Jgj7KZrdxYq28ytOxykh9xGc14ZYmhFV+SQgkK7QtbwYeDBoz1mo130
+GO6IyY0XRSmZMnUCMe4pJshrAua1YkV/NxVaI2iJ1D7eTiew8EAMvE0Xy02isx7QBlrd9pPPV3WZ
+9fqGGmd4s7+W/jTcvedSVuWz5XV710GRBdxdaeOVDUO5/IOWOZV7bIBaTxNyxtd9KXpEulKkKtVB
+Rgkg/iKgtlswjbyJDNXXcPiHUv3a76xRLgezTv7QCdpw75j6VuZt27VXS9zlLCUVyJ4ueE742pye
+hizKV/Ma5ciSixqClnrDvFASadgOWkaLOusm+iPJtrCBvkIApPjW/jAux9JG9uWOdf3yzLnQh1vM
+BhBgu4M1t15n3kfsmUjxpKEV/q2MYo45VU85FrmxY53/twIDAQABo0IwQDAPBgNVHRMBAf8EBTAD
+AQH/MB0GA1UdDgQWBBS2oVQ5AsOgP46KvPrU+Bym0ToO/TAOBgNVHQ8BAf8EBAMCAQYwDQYJKoZI
+hvcNAQENBQADggIBAHGlDs7k6b8/ONWJWsQCYftMxRQXLYtPU2sQF/xlhMcQSZDe28cmk4gmb3DW
+Al45oPePq5a1pRNcgRRtDoGCERuKTsZPpd1iHkTfCVn0W3cLN+mLIMb4Ck4uWBzrM9DPhmDJ2vuA
+L55MYIR4PSFk1vtBHxgP58l1cb29XN40hz5BsA72udY/CROWFC/emh1auVbONTqwX3BNXuMp8SMo
+clm2q8KMZiYcdywmdjWLKKdpoPk79SPdhRB0yZADVpHnr7pH1BKXESLjokmUbOe3lEu6LaTaM4tM
+pkT/WjzGHWTYtTHkpjx6qFcL2+1hGsvxznN3Y6SHb0xRONbkX8eftoEq5IVIeVheO/jbAoJnwTnb
+w3RLPTYe+SmTiGhbqEQZIfCn6IENLOiTNrQ3ssqwGyZ6miUfmpqAnksqP/ujmv5zMnHCnsZy4Ypo
+J/HkD7TETKVhk/iXEAcqMCWpuchxuO9ozC1+9eB+D4Kob7a6bINDd82Kkhehnlt4Fj1F4jNy3eFm
+ypnTycUm/Q1oBEauttmbjL4ZvrHG8hnjXALKLNhvSgfZyTXaQHXyxKcZb55CEJh15pWLYLztxRLX
+is7VmFxWlgPF7ncGNf/P5O4/E2Hu29othfDNrp2yGAlFw5Khchf8R7agCyzxxN5DaAhqXzvwdmP7
+zAYspsbiDrW5viSP
+-----END CERTIFICATE-----
+
+Hellenic Academic and Research Institutions RootCA 2015
+=======================================================
+-----BEGIN CERTIFICATE-----
+MIIGCzCCA/OgAwIBAgIBADANBgkqhkiG9w0BAQsFADCBpjELMAkGA1UEBhMCR1IxDzANBgNVBAcT
+BkF0aGVuczFEMEIGA1UEChM7SGVsbGVuaWMgQWNhZGVtaWMgYW5kIFJlc2VhcmNoIEluc3RpdHV0
+aW9ucyBDZXJ0LiBBdXRob3JpdHkxQDA+BgNVBAMTN0hlbGxlbmljIEFjYWRlbWljIGFuZCBSZXNl
+YXJjaCBJbnN0aXR1dGlvbnMgUm9vdENBIDIwMTUwHhcNMTUwNzA3MTAxMTIxWhcNNDAwNjMwMTAx
+MTIxWjCBpjELMAkGA1UEBhMCR1IxDzANBgNVBAcTBkF0aGVuczFEMEIGA1UEChM7SGVsbGVuaWMg
+QWNhZGVtaWMgYW5kIFJlc2VhcmNoIEluc3RpdHV0aW9ucyBDZXJ0LiBBdXRob3JpdHkxQDA+BgNV
+BAMTN0hlbGxlbmljIEFjYWRlbWljIGFuZCBSZXNlYXJjaCBJbnN0aXR1dGlvbnMgUm9vdENBIDIw
+MTUwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQDC+Kk/G4n8PDwEXT2QNrCROnk8Zlrv
+bTkBSRq0t89/TSNTt5AA4xMqKKYx8ZEA4yjsriFBzh/a/X0SWwGDD7mwX5nh8hKDgE0GPt+sr+eh
+iGsxr/CL0BgzuNtFajT0AoAkKAoCFZVedioNmToUW/bLy1O8E00BiDeUJRtCvCLYjqOWXjrZMts+
+6PAQZe104S+nfK8nNLspfZu2zwnI5dMK/IhlZXQK3HMcXM1AsRzUtoSMTFDPaI6oWa7CJ06CojXd
+FPQf/7J31Ycvqm59JCfnxssm5uX+Zwdj2EUN3TpZZTlYepKZcj2chF6IIbjV9Cz82XBST3i4vTwr
+i5WY9bPRaM8gFH5MXF/ni+X1NYEZN9cRCLdmvtNKzoNXADrDgfgXy5I2XdGj2HUb4Ysn6npIQf1F
+GQatJ5lOwXBH3bWfgVMS5bGMSF0xQxfjjMZ6Y5ZLKTBOhE5iGV48zpeQpX8B653g+IuJ3SWYPZK2
+fu/Z8VFRfS0myGlZYeCsargqNhEEelC9MoS+L9xy1dcdFkfkR2YgP/SWxa+OAXqlD3pk9Q0Yh9mu
+iNX6hME6wGkoLfINaFGq46V3xqSQDqE3izEjR8EJCOtu93ib14L8hCCZSRm2Ekax+0VVFqmjZayc
+Bw/qa9wfLgZy7IaIEuQt218FL+TwA9MmM+eAws1CoRc0CwIDAQABo0IwQDAPBgNVHRMBAf8EBTAD
+AQH/MA4GA1UdDwEB/wQEAwIBBjAdBgNVHQ4EFgQUcRVnyMjJvXVdctA4GGqd83EkVAswDQYJKoZI
+hvcNAQELBQADggIBAHW7bVRLqhBYRjTyYtcWNl0IXtVsyIe9tC5G8jH4fOpCtZMWVdyhDBKg2mF+
+D1hYc2Ryx+hFjtyp8iY/xnmMsVMIM4GwVhO+5lFc2JsKT0ucVlMC6U/2DWDqTUJV6HwbISHTGzrM
+d/K4kPFox/la/vot9L/J9UUbzjgQKjeKeaO04wlshYaT/4mWJ3iBj2fjRnRUjtkNaeJK9E10A/+y
+d+2VZ5fkscWrv2oj6NSU4kQoYsRL4vDY4ilrGnB+JGGTe08DMiUNRSQrlrRGar9KC/eaj8GsGsVn
+82800vpzY4zvFrCopEYq+OsS7HK07/grfoxSwIuEVPkvPuNVqNxmsdnhX9izjFk0WaSrT2y7Hxjb
+davYy5LNlDhhDgcGH0tGEPEVvo2FXDtKK4F5D7Rpn0lQl033DlZdwJVqwjbDG2jJ9SrcR5q+ss7F
+Jej6A7na+RZukYT1HCjI/CbM1xyQVqdfbzoEvM14iQuODy+jqk+iGxI9FghAD/FGTNeqewjBCvVt
+J94Cj8rDtSvK6evIIVM4pcw72Hc3MKJP2W/R8kCtQXoXxdZKNYm3QdV8hn9VTYNKpXMgwDqvkPGa
+JI7ZjnHKe7iG2rKPmT4dEw0SEe7Uq/DpFXYC5ODfqiAeW2GFZECpkJcNrVPSWh2HagCXZWK0vm9q
+p/UsQu0yrbYhnr68
+-----END CERTIFICATE-----
+
+Hellenic Academic and Research Institutions ECC RootCA 2015
+===========================================================
+-----BEGIN CERTIFICATE-----
+MIICwzCCAkqgAwIBAgIBADAKBggqhkjOPQQDAjCBqjELMAkGA1UEBhMCR1IxDzANBgNVBAcTBkF0
+aGVuczFEMEIGA1UEChM7SGVsbGVuaWMgQWNhZGVtaWMgYW5kIFJlc2VhcmNoIEluc3RpdHV0aW9u
+cyBDZXJ0LiBBdXRob3JpdHkxRDBCBgNVBAMTO0hlbGxlbmljIEFjYWRlbWljIGFuZCBSZXNlYXJj
+aCBJbnN0aXR1dGlvbnMgRUNDIFJvb3RDQSAyMDE1MB4XDTE1MDcwNzEwMzcxMloXDTQwMDYzMDEw
+MzcxMlowgaoxCzAJBgNVBAYTAkdSMQ8wDQYDVQQHEwZBdGhlbnMxRDBCBgNVBAoTO0hlbGxlbmlj
+IEFjYWRlbWljIGFuZCBSZXNlYXJjaCBJbnN0aXR1dGlvbnMgQ2VydC4gQXV0aG9yaXR5MUQwQgYD
+VQQDEztIZWxsZW5pYyBBY2FkZW1pYyBhbmQgUmVzZWFyY2ggSW5zdGl0dXRpb25zIEVDQyBSb290
+Q0EgMjAxNTB2MBAGByqGSM49AgEGBSuBBAAiA2IABJKgQehLgoRc4vgxEZmGZE4JJS+dQS8KrjVP
+dJWyUWRrjWvmP3CV8AVER6ZyOFB2lQJajq4onvktTpnvLEhvTCUp6NFxW98dwXU3tNf6e3pCnGoK
+Vlp8aQuqgAkkbH7BRqNCMEAwDwYDVR0TAQH/BAUwAwEB/zAOBgNVHQ8BAf8EBAMCAQYwHQYDVR0O
+BBYEFLQiC4KZJAEOnLvkDv2/+5cgk5kqMAoGCCqGSM49BAMCA2cAMGQCMGfOFmI4oqxiRaeplSTA
+GiecMjvAwNW6qef4BENThe5SId6d9SWDPp5YSy/XZxMOIQIwBeF1Ad5o7SofTUwJCA3sS61kFyjn
+dc5FZXIhF8siQQ6ME5g4mlRtm8rifOoCWCKR
+-----END CERTIFICATE-----
+
+Certplus Root CA G1
+===================
+-----BEGIN CERTIFICATE-----
+MIIFazCCA1OgAwIBAgISESBVg+QtPlRWhS2DN7cs3EYRMA0GCSqGSIb3DQEBDQUAMD4xCzAJBgNV
+BAYTAkZSMREwDwYDVQQKDAhDZXJ0cGx1czEcMBoGA1UEAwwTQ2VydHBsdXMgUm9vdCBDQSBHMTAe
+Fw0xNDA1MjYwMDAwMDBaFw0zODAxMTUwMDAwMDBaMD4xCzAJBgNVBAYTAkZSMREwDwYDVQQKDAhD
+ZXJ0cGx1czEcMBoGA1UEAwwTQ2VydHBsdXMgUm9vdCBDQSBHMTCCAiIwDQYJKoZIhvcNAQEBBQAD
+ggIPADCCAgoCggIBANpQh7bauKk+nWT6VjOaVj0W5QOVsjQcmm1iBdTYj+eJZJ+622SLZOZ5KmHN
+r49aiZFluVj8tANfkT8tEBXgfs+8/H9DZ6itXjYj2JizTfNDnjl8KvzsiNWI7nC9hRYt6kuJPKNx
+Qv4c/dMcLRC4hlTqQ7jbxofaqK6AJc96Jh2qkbBIb6613p7Y1/oA/caP0FG7Yn2ksYyy/yARujVj
+BYZHYEMzkPZHogNPlk2dT8Hq6pyi/jQu3rfKG3akt62f6ajUeD94/vI4CTYd0hYCyOwqaK/1jpTv
+LRN6HkJKHRUxrgwEV/xhc/MxVoYxgKDEEW4wduOU8F8ExKyHcomYxZ3MVwia9Az8fXoFOvpHgDm2
+z4QTd28n6v+WZxcIbekN1iNQMLAVdBM+5S//Ds3EC0pd8NgAM0lm66EYfFkuPSi5YXHLtaW6uOrc
+4nBvCGrch2c0798wct3zyT8j/zXhviEpIDCB5BmlIOklynMxdCm+4kLV87ImZsdo/Rmz5yCTmehd
+4F6H50boJZwKKSTUzViGUkAksnsPmBIgJPaQbEfIDbsYIC7Z/fyL8inqh3SV4EJQeIQEQWGw9CEj
+jy3LKCHyamz0GqbFFLQ3ZU+V/YDI+HLlJWvEYLF7bY5KinPOWftwenMGE9nTdDckQQoRb5fc5+R+
+ob0V8rqHDz1oihYHAgMBAAGjYzBhMA4GA1UdDwEB/wQEAwIBBjAPBgNVHRMBAf8EBTADAQH/MB0G
+A1UdDgQWBBSowcCbkahDFXxdBie0KlHYlwuBsTAfBgNVHSMEGDAWgBSowcCbkahDFXxdBie0KlHY
+lwuBsTANBgkqhkiG9w0BAQ0FAAOCAgEAnFZvAX7RvUz1isbwJh/k4DgYzDLDKTudQSk0YcbX8ACh
+66Ryj5QXvBMsdbRX7gp8CXrc1cqh0DQT+Hern+X+2B50ioUHj3/MeXrKls3N/U/7/SMNkPX0XtPG
+YX2eEeAC7gkE2Qfdpoq3DIMku4NQkv5gdRE+2J2winq14J2by5BSS7CTKtQ+FjPlnsZlFT5kOwQ/
+2wyPX1wdaR+v8+khjPPvl/aatxm2hHSco1S1cE5j2FddUyGbQJJD+tZ3VTNPZNX70Cxqjm0lpu+F
+6ALEUz65noe8zDUa3qHpimOHZR4RKttjd5cUvpoUmRGywO6wT/gUITJDT5+rosuoD6o7BlXGEilX
+CNQ314cnrUlZp5GrRHpejXDbl85IULFzk/bwg2D5zfHhMf1bfHEhYxQUqq/F3pN+aLHsIqKqkHWe
+tUNy6mSjhEv9DKgma3GX7lZjZuhCVPnHHd/Qj1vfyDBviP4NxDMcU6ij/UgQ8uQKTuEVV/xuZDDC
+VRHc6qnNSlSsKWNEz0pAoNZoWRsz+e86i9sgktxChL8Bq4fA1SCC28a5g4VCXA9DO2pJNdWY9BW/
++mGBDAkgGNLQFwzLSABQ6XaCjGTXOqAHVcweMcDvOrRl++O/QmueD6i9a5jc2NvLi6Td11n0bt3+
+qsOR0C5CB8AMTVPNJLFMWx5R9N/pkvo=
+-----END CERTIFICATE-----
+
+Certplus Root CA G2
+===================
+-----BEGIN CERTIFICATE-----
+MIICHDCCAaKgAwIBAgISESDZkc6uo+jF5//pAq/Pc7xVMAoGCCqGSM49BAMDMD4xCzAJBgNVBAYT
+AkZSMREwDwYDVQQKDAhDZXJ0cGx1czEcMBoGA1UEAwwTQ2VydHBsdXMgUm9vdCBDQSBHMjAeFw0x
+NDA1MjYwMDAwMDBaFw0zODAxMTUwMDAwMDBaMD4xCzAJBgNVBAYTAkZSMREwDwYDVQQKDAhDZXJ0
+cGx1czEcMBoGA1UEAwwTQ2VydHBsdXMgUm9vdCBDQSBHMjB2MBAGByqGSM49AgEGBSuBBAAiA2IA
+BM0PW1aC3/BFGtat93nwHcmsltaeTpwftEIRyoa/bfuFo8XlGVzX7qY/aWfYeOKmycTbLXku54uN
+Am8xIk0G42ByRZ0OQneezs/lf4WbGOT8zC5y0xaTTsqZY1yhBSpsBqNjMGEwDgYDVR0PAQH/BAQD
+AgEGMA8GA1UdEwEB/wQFMAMBAf8wHQYDVR0OBBYEFNqDYwJ5jtpMxjwjFNiPwyCrKGBZMB8GA1Ud
+IwQYMBaAFNqDYwJ5jtpMxjwjFNiPwyCrKGBZMAoGCCqGSM49BAMDA2gAMGUCMHD+sAvZ94OX7PNV
+HdTcswYO/jOYnYs5kGuUIe22113WTNchp+e/IQ8rzfcq3IUHnQIxAIYUFuXcsGXCwI4Un78kFmjl
+vPl5adytRSv3tjFzzAalU5ORGpOucGpnutee5WEaXw==
+-----END CERTIFICATE-----
+
+OpenTrust Root CA G1
+====================
+-----BEGIN CERTIFICATE-----
+MIIFbzCCA1egAwIBAgISESCzkFU5fX82bWTCp59rY45nMA0GCSqGSIb3DQEBCwUAMEAxCzAJBgNV
+BAYTAkZSMRIwEAYDVQQKDAlPcGVuVHJ1c3QxHTAbBgNVBAMMFE9wZW5UcnVzdCBSb290IENBIEcx
+MB4XDTE0MDUyNjA4NDU1MFoXDTM4MDExNTAwMDAwMFowQDELMAkGA1UEBhMCRlIxEjAQBgNVBAoM
+CU9wZW5UcnVzdDEdMBsGA1UEAwwUT3BlblRydXN0IFJvb3QgQ0EgRzEwggIiMA0GCSqGSIb3DQEB
+AQUAA4ICDwAwggIKAoICAQD4eUbalsUwXopxAy1wpLuwxQjczeY1wICkES3d5oeuXT2R0odsN7fa
+Yp6bwiTXj/HbpqbfRm9RpnHLPhsxZ2L3EVs0J9V5ToybWL0iEA1cJwzdMOWo010hOHQX/uMftk87
+ay3bfWAfjH1MBcLrARYVmBSO0ZB3Ij/swjm4eTrwSSTilZHcYTSSjFR077F9jAHiOH3BX2pfJLKO
+YheteSCtqx234LSWSE9mQxAGFiQD4eCcjsZGT44ameGPuY4zbGneWK2gDqdkVBFpRGZPTBKnjix9
+xNRbxQA0MMHZmf4yzgeEtE7NCv82TWLxp2NX5Ntqp66/K7nJ5rInieV+mhxNaMbBGN4zK1FGSxyO
+9z0M+Yo0FMT7MzUj8czxKselu7Cizv5Ta01BG2Yospb6p64KTrk5M0ScdMGTHPjgniQlQ/GbI4Kq
+3ywgsNw2TgOzfALU5nsaqocTvz6hdLubDuHAk5/XpGbKuxs74zD0M1mKB3IDVedzagMxbm+WG+Oi
+n6+Sx+31QrclTDsTBM8clq8cIqPQqwWyTBIjUtz9GVsnnB47ev1CI9sjgBPwvFEVVJSmdz7QdFG9
+URQIOTfLHzSpMJ1ShC5VkLG631UAC9hWLbFJSXKAqWLXwPYYEQRVzXR7z2FwefR7LFxckvzluFqr
+TJOVoSfupb7PcSNCupt2LQIDAQABo2MwYTAOBgNVHQ8BAf8EBAMCAQYwDwYDVR0TAQH/BAUwAwEB
+/zAdBgNVHQ4EFgQUl0YhVyE12jZVx/PxN3DlCPaTKbYwHwYDVR0jBBgwFoAUl0YhVyE12jZVx/Px
+N3DlCPaTKbYwDQYJKoZIhvcNAQELBQADggIBAB3dAmB84DWn5ph76kTOZ0BP8pNuZtQ5iSas000E
+PLuHIT839HEl2ku6q5aCgZG27dmxpGWX4m9kWaSW7mDKHyP7Rbr/jyTwyqkxf3kfgLMtMrpkZ2Cv
+uVnN35pJ06iCsfmYlIrM4LvgBBuZYLFGZdwIorJGnkSI6pN+VxbSFXJfLkur1J1juONI5f6ELlgK
+n0Md/rcYkoZDSw6cMoYsYPXpSOqV7XAp8dUv/TW0V8/bhUiZucJvbI/NeJWsZCj9VrDDb8O+WVLh
+X4SPgPL0DTatdrOjteFkdjpY3H1PXlZs5VVZV6Xf8YpmMIzUUmI4d7S+KNfKNsSbBfD4Fdvb8e80
+nR14SohWZ25g/4/Ii+GOvUKpMwpZQhISKvqxnUOOBZuZ2mKtVzazHbYNeS2WuOvyDEsMpZTGMKcm
+GS3tTAZQMPH9WD25SxdfGbRqhFS0OE85og2WaMMolP3tLR9Ka0OWLpABEPs4poEL0L9109S5zvE/
+bw4cHjdx5RiHdRk/ULlepEU0rbDK5uUTdg8xFKmOLZTW1YVNcxVPS/KyPu1svf0OnWZzsD2097+o
+4BGkxK51CUpjAEggpsadCwmKtODmzj7HPiY46SvepghJAwSQiumPv+i2tCqjI40cHLI5kqiPAlxA
+OXXUc0ECd97N4EOH1uS6SsNsEn/+KuYj1oxx
+-----END CERTIFICATE-----
+
+OpenTrust Root CA G2
+====================
+-----BEGIN CERTIFICATE-----
+MIIFbzCCA1egAwIBAgISESChaRu/vbm9UpaPI+hIvyYRMA0GCSqGSIb3DQEBDQUAMEAxCzAJBgNV
+BAYTAkZSMRIwEAYDVQQKDAlPcGVuVHJ1c3QxHTAbBgNVBAMMFE9wZW5UcnVzdCBSb290IENBIEcy
+MB4XDTE0MDUyNjAwMDAwMFoXDTM4MDExNTAwMDAwMFowQDELMAkGA1UEBhMCRlIxEjAQBgNVBAoM
+CU9wZW5UcnVzdDEdMBsGA1UEAwwUT3BlblRydXN0IFJvb3QgQ0EgRzIwggIiMA0GCSqGSIb3DQEB
+AQUAA4ICDwAwggIKAoICAQDMtlelM5QQgTJT32F+D3Y5z1zCU3UdSXqWON2ic2rxb95eolq5cSG+
+Ntmh/LzubKh8NBpxGuga2F8ORAbtp+Dz0mEL4DKiltE48MLaARf85KxP6O6JHnSrT78eCbY2albz
+4e6WiWYkBuTNQjpK3eCasMSCRbP+yatcfD7J6xcvDH1urqWPyKwlCm/61UWY0jUJ9gNDlP7ZvyCV
+eYCYitmJNbtRG6Q3ffyZO6v/v6wNj0OxmXsWEH4db0fEFY8ElggGQgT4hNYdvJGmQr5J1WqIP7wt
+UdGejeBSzFfdNTVY27SPJIjki9/ca1TSgSuyzpJLHB9G+h3Ykst2Z7UJmQnlrBcUVXDGPKBWCgOz
+3GIZ38i1MH/1PCZ1Eb3XG7OHngevZXHloM8apwkQHZOJZlvoPGIytbU6bumFAYueQ4xncyhZW+vj
+3CzMpSZyYhK05pyDRPZRpOLAeiRXyg6lPzq1O4vldu5w5pLeFlwoW5cZJ5L+epJUzpM5ChaHvGOz
+9bGTXOBut9Dq+WIyiET7vycotjCVXRIouZW+j1MY5aIYFuJWpLIsEPUdN6b4t/bQWVyJ98LVtZR0
+0dX+G7bw5tYee9I8y6jj9RjzIR9u701oBnstXW5DiabA+aC/gh7PU3+06yzbXfZqfUAkBXKJOAGT
+y3HCOV0GEfZvePg3DTmEJwIDAQABo2MwYTAOBgNVHQ8BAf8EBAMCAQYwDwYDVR0TAQH/BAUwAwEB
+/zAdBgNVHQ4EFgQUajn6QiL35okATV59M4PLuG53hq8wHwYDVR0jBBgwFoAUajn6QiL35okATV59
+M4PLuG53hq8wDQYJKoZIhvcNAQENBQADggIBAJjLq0A85TMCl38th6aP1F5Kr7ge57tx+4BkJamz
+Gj5oXScmp7oq4fBXgwpkTx4idBvpkF/wrM//T2h6OKQQbA2xx6R3gBi2oihEdqc0nXGEL8pZ0keI
+mUEiyTCYYW49qKgFbdEfwFFEVn8nNQLdXpgKQuswv42hm1GqO+qTRmTFAHneIWv2V6CG1wZy7HBG
+S4tz3aAhdT7cHcCP009zHIXZ/n9iyJVvttN7jLpTwm+bREx50B1ws9efAvSyB7DH5fitIw6mVskp
+EndI2S9G/Tvw/HRwkqWOOAgfZDC2t0v7NqwQjqBSM2OdAzVWxWm9xiNaJ5T2pBL4LTM8oValX9YZ
+6e18CL13zSdkzJTaTkZQh+D5wVOAHrut+0dSixv9ovneDiK3PTNZbNTe9ZUGMg1RGUFcPk8G97kr
+gCf2o6p6fAbhQ8MTOWIaNr3gKC6UAuQpLmBVrkA9sHSSXvAgZJY/X0VdiLWK2gKgW0VU3jg9CcCo
+SmVGFvyqv1ROTVu+OEO3KMqLM6oaJbolXCkvW0pujOotnCr2BXbgd5eAiN1nE28daCSLT7d0geX0
+YJ96Vdc+N9oWaz53rK4YcJUIeSkDiv7BO7M/Gg+kO14fWKGVyasvc0rQLW6aWQ9VGHgtPFGml4vm
+u7JwqkwR3v98KzfUetF3NI/n+UL3PIEMS1IK
+-----END CERTIFICATE-----
+
+OpenTrust Root CA G3
+====================
+-----BEGIN CERTIFICATE-----
+MIICITCCAaagAwIBAgISESDm+Ez8JLC+BUCs2oMbNGA/MAoGCCqGSM49BAMDMEAxCzAJBgNVBAYT
+AkZSMRIwEAYDVQQKDAlPcGVuVHJ1c3QxHTAbBgNVBAMMFE9wZW5UcnVzdCBSb290IENBIEczMB4X
+DTE0MDUyNjAwMDAwMFoXDTM4MDExNTAwMDAwMFowQDELMAkGA1UEBhMCRlIxEjAQBgNVBAoMCU9w
+ZW5UcnVzdDEdMBsGA1UEAwwUT3BlblRydXN0IFJvb3QgQ0EgRzMwdjAQBgcqhkjOPQIBBgUrgQQA
+IgNiAARK7liuTcpm3gY6oxH84Bjwbhy6LTAMidnW7ptzg6kjFYwvWYpa3RTqnVkrQ7cG7DK2uu5B
+ta1doYXM6h0UZqNnfkbilPPntlahFVmhTzeXuSIevRHr9LIfXsMUmuXZl5mjYzBhMA4GA1UdDwEB
+/wQEAwIBBjAPBgNVHRMBAf8EBTADAQH/MB0GA1UdDgQWBBRHd8MUi2I5DMlv4VBN0BBY3JWIbTAf
+BgNVHSMEGDAWgBRHd8MUi2I5DMlv4VBN0BBY3JWIbTAKBggqhkjOPQQDAwNpADBmAjEAj6jcnboM
+BBf6Fek9LykBl7+BFjNAk2z8+e2AcG+qj9uEwov1NcoG3GRvaBbhj5G5AjEA2Euly8LQCGzpGPta
+3U1fJAuwACEl74+nBCZx4nxp5V2a+EEfOzmTk51V6s2N8fvB
+-----END CERTIFICATE-----
+
+ISRG Root X1
+============
+-----BEGIN CERTIFICATE-----
+MIIFazCCA1OgAwIBAgIRAIIQz7DSQONZRGPgu2OCiwAwDQYJKoZIhvcNAQELBQAwTzELMAkGA1UE
+BhMCVVMxKTAnBgNVBAoTIEludGVybmV0IFNlY3VyaXR5IFJlc2VhcmNoIEdyb3VwMRUwEwYDVQQD
+EwxJU1JHIFJvb3QgWDEwHhcNMTUwNjA0MTEwNDM4WhcNMzUwNjA0MTEwNDM4WjBPMQswCQYDVQQG
+EwJVUzEpMCcGA1UEChMgSW50ZXJuZXQgU2VjdXJpdHkgUmVzZWFyY2ggR3JvdXAxFTATBgNVBAMT
+DElTUkcgUm9vdCBYMTCCAiIwDQYJKoZIhvcNAQEBBQADggIPADCCAgoCggIBAK3oJHP0FDfzm54r
+Vygch77ct984kIxuPOZXoHj3dcKi/vVqbvYATyjb3miGbESTtrFj/RQSa78f0uoxmyF+0TM8ukj1
+3Xnfs7j/EvEhmkvBioZxaUpmZmyPfjxwv60pIgbz5MDmgK7iS4+3mX6UA5/TR5d8mUgjU+g4rk8K
+b4Mu0UlXjIB0ttov0DiNewNwIRt18jA8+o+u3dpjq+sWT8KOEUt+zwvo/7V3LvSye0rgTBIlDHCN
+Aymg4VMk7BPZ7hm/ELNKjD+Jo2FR3qyHB5T0Y3HsLuJvW5iB4YlcNHlsdu87kGJ55tukmi8mxdAQ
+4Q7e2RCOFvu396j3x+UCB5iPNgiV5+I3lg02dZ77DnKxHZu8A/lJBdiB3QW0KtZB6awBdpUKD9jf
+1b0SHzUvKBds0pjBqAlkd25HN7rOrFleaJ1/ctaJxQZBKT5ZPt0m9STJEadao0xAH0ahmbWnOlFu
+hjuefXKnEgV4We0+UXgVCwOPjdAvBbI+e0ocS3MFEvzG6uBQE3xDk3SzynTnjh8BCNAw1FtxNrQH
+usEwMFxIt4I7mKZ9YIqioymCzLq9gwQbooMDQaHWBfEbwrbwqHyGO0aoSCqI3Haadr8faqU9GY/r
+OPNk3sgrDQoo//fb4hVC1CLQJ13hef4Y53CIrU7m2Ys6xt0nUW7/vGT1M0NPAgMBAAGjQjBAMA4G
+A1UdDwEB/wQEAwIBBjAPBgNVHRMBAf8EBTADAQH/MB0GA1UdDgQWBBR5tFnme7bl5AFzgAiIyBpY
+9umbbjANBgkqhkiG9w0BAQsFAAOCAgEAVR9YqbyyqFDQDLHYGmkgJykIrGF1XIpu+ILlaS/V9lZL
+ubhzEFnTIZd+50xx+7LSYK05qAvqFyFWhfFQDlnrzuBZ6brJFe+GnY+EgPbk6ZGQ3BebYhtF8GaV
+0nxvwuo77x/Py9auJ/GpsMiu/X1+mvoiBOv/2X/qkSsisRcOj/KKNFtY2PwByVS5uCbMiogziUwt
+hDyC3+6WVwW6LLv3xLfHTjuCvjHIInNzktHCgKQ5ORAzI4JMPJ+GslWYHb4phowim57iaztXOoJw
+TdwJx4nLCgdNbOhdjsnvzqvHu7UrTkXWStAmzOVyyghqpZXjFaH3pO3JLF+l+/+sKAIuvtd7u+Nx
+e5AW0wdeRlN8NwdCjNPElpzVmbUq4JUagEiuTDkHzsxHpFKVK7q4+63SM1N95R1NbdWhscdCb+ZA
+JzVcoyi3B43njTOQ5yOf+1CceWxG1bQVs5ZufpsMljq4Ui0/1lvh+wjChP4kqKOJ2qxq4RgqsahD
+YVvTH9w7jXbyLeiNdd8XM2w9U/t7y0Ff/9yi0GE44Za4rF2LN9d11TPAmRGunUHBcnWEvgJBQl9n
+JEiU0Zsnvgc/ubhPgXRR4Xq37Z0j4r7g1SgEEzwxA57demyPxgcYxn/eR44/KJ4EBs+lVDR3veyJ
+m+kXQ99b21/+jh5Xos1AnX5iItreGCc=
+-----END CERTIFICATE-----
+
+AC RAIZ FNMT-RCM
+================
+-----BEGIN CERTIFICATE-----
+MIIFgzCCA2ugAwIBAgIPXZONMGc2yAYdGsdUhGkHMA0GCSqGSIb3DQEBCwUAMDsxCzAJBgNVBAYT
+AkVTMREwDwYDVQQKDAhGTk1ULVJDTTEZMBcGA1UECwwQQUMgUkFJWiBGTk1ULVJDTTAeFw0wODEw
+MjkxNTU5NTZaFw0zMDAxMDEwMDAwMDBaMDsxCzAJBgNVBAYTAkVTMREwDwYDVQQKDAhGTk1ULVJD
+TTEZMBcGA1UECwwQQUMgUkFJWiBGTk1ULVJDTTCCAiIwDQYJKoZIhvcNAQEBBQADggIPADCCAgoC
+ggIBALpxgHpMhm5/yBNtwMZ9HACXjywMI7sQmkCpGreHiPibVmr75nuOi5KOpyVdWRHbNi63URcf
+qQgfBBckWKo3Shjf5TnUV/3XwSyRAZHiItQDwFj8d0fsjz50Q7qsNI1NOHZnjrDIbzAzWHFctPVr
+btQBULgTfmxKo0nRIBnuvMApGGWn3v7v3QqQIecaZ5JCEJhfTzC8PhxFtBDXaEAUwED653cXeuYL
+j2VbPNmaUtu1vZ5Gzz3rkQUCwJaydkxNEJY7kvqcfw+Z374jNUUeAlz+taibmSXaXvMiwzn15Cou
+08YfxGyqxRxqAQVKL9LFwag0Jl1mpdICIfkYtwb1TplvqKtMUejPUBjFd8g5CSxJkjKZqLsXF3mw
+WsXmo8RZZUc1g16p6DULmbvkzSDGm0oGObVo/CK67lWMK07q87Hj/LaZmtVC+nFNCM+HHmpxffnT
+tOmlcYF7wk5HlqX2doWjKI/pgG6BU6VtX7hI+cL5NqYuSf+4lsKMB7ObiFj86xsc3i1w4peSMKGJ
+47xVqCfWS+2QrYv6YyVZLag13cqXM7zlzced0ezvXg5KkAYmY6252TUtB7p2ZSysV4999AeU14EC
+ll2jB0nVetBX+RvnU0Z1qrB5QstocQjpYL05ac70r8NWQMetUqIJ5G+GR4of6ygnXYMgrwTJbFaa
+i0b1AgMBAAGjgYMwgYAwDwYDVR0TAQH/BAUwAwEB/zAOBgNVHQ8BAf8EBAMCAQYwHQYDVR0OBBYE
+FPd9xf3E6Jobd2Sn9R2gzL+HYJptMD4GA1UdIAQ3MDUwMwYEVR0gADArMCkGCCsGAQUFBwIBFh1o
+dHRwOi8vd3d3LmNlcnQuZm5tdC5lcy9kcGNzLzANBgkqhkiG9w0BAQsFAAOCAgEAB5BK3/MjTvDD
+nFFlm5wioooMhfNzKWtN/gHiqQxjAb8EZ6WdmF/9ARP67Jpi6Yb+tmLSbkyU+8B1RXxlDPiyN8+s
+D8+Nb/kZ94/sHvJwnvDKuO+3/3Y3dlv2bojzr2IyIpMNOmqOFGYMLVN0V2Ue1bLdI4E7pWYjJ2cJ
+j+F3qkPNZVEI7VFY/uY5+ctHhKQV8Xa7pO6kO8Rf77IzlhEYt8llvhjho6Tc+hj507wTmzl6NLrT
+Qfv6MooqtyuGC2mDOL7Nii4LcK2NJpLuHvUBKwrZ1pebbuCoGRw6IYsMHkCtA+fdZn71uSANA+iW
++YJF1DngoABd15jmfZ5nc8OaKveri6E6FO80vFIOiZiaBECEHX5FaZNXzuvO+FB8TxxuBEOb+dY7
+Ixjp6o7RTUaN8Tvkasq6+yO3m/qZASlaWFot4/nUbQ4mrcFuNLwy+AwF+mWj2zs3gyLp1txyM/1d
+8iC9djwj2ij3+RvrWWTV3F9yfiD8zYm1kGdNYno/Tq0dwzn+evQoFt9B9kiABdcPUXmsEKvU7ANm
+5mqwujGSQkBqvjrTcuFqN1W8rB2Vt2lh8kORdOag0wokRqEIr9baRRmW1FMdW4R58MD3R++Lj8UG
+rp1MYp3/RgT408m2ECVAdf4WqslKYIYvuu8wd+RU4riEmViAqhOLUTpPSPaLtrM=
+-----END CERTIFICATE-----
+
+Amazon Root CA 1
+================
+-----BEGIN CERTIFICATE-----
+MIIDQTCCAimgAwIBAgITBmyfz5m/jAo54vB4ikPmljZbyjANBgkqhkiG9w0BAQsFADA5MQswCQYD
+VQQGEwJVUzEPMA0GA1UEChMGQW1hem9uMRkwFwYDVQQDExBBbWF6b24gUm9vdCBDQSAxMB4XDTE1
+MDUyNjAwMDAwMFoXDTM4MDExNzAwMDAwMFowOTELMAkGA1UEBhMCVVMxDzANBgNVBAoTBkFtYXpv
+bjEZMBcGA1UEAxMQQW1hem9uIFJvb3QgQ0EgMTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoC
+ggEBALJ4gHHKeNXjca9HgFB0fW7Y14h29Jlo91ghYPl0hAEvrAIthtOgQ3pOsqTQNroBvo3bSMgH
+FzZM9O6II8c+6zf1tRn4SWiw3te5djgdYZ6k/oI2peVKVuRF4fn9tBb6dNqcmzU5L/qwIFAGbHrQ
+gLKm+a/sRxmPUDgH3KKHOVj4utWp+UhnMJbulHheb4mjUcAwhmahRWa6VOujw5H5SNz/0egwLX0t
+dHA114gk957EWW67c4cX8jJGKLhD+rcdqsq08p8kDi1L93FcXmn/6pUCyziKrlA4b9v7LWIbxcce
+VOF34GfID5yHI9Y/QCB/IIDEgEw+OyQmjgSubJrIqg0CAwEAAaNCMEAwDwYDVR0TAQH/BAUwAwEB
+/zAOBgNVHQ8BAf8EBAMCAYYwHQYDVR0OBBYEFIQYzIU07LwMlJQuCFmcx7IQTgoIMA0GCSqGSIb3
+DQEBCwUAA4IBAQCY8jdaQZChGsV2USggNiMOruYou6r4lK5IpDB/G/wkjUu0yKGX9rbxenDIU5PM
+CCjjmCXPI6T53iHTfIUJrU6adTrCC2qJeHZERxhlbI1Bjjt/msv0tadQ1wUsN+gDS63pYaACbvXy
+8MWy7Vu33PqUXHeeE6V/Uq2V8viTO96LXFvKWlJbYK8U90vvo/ufQJVtMVT8QtPHRh8jrdkPSHCa
+2XV4cdFyQzR1bldZwgJcJmApzyMZFo6IQ6XU5MsI+yMRQ+hDKXJioaldXgjUkK642M4UwtBV8ob2
+xJNDd2ZhwLnoQdeXeGADbkpyrqXRfboQnoZsG4q5WTP468SQvvG5
+-----END CERTIFICATE-----
+
+Amazon Root CA 2
+================
+-----BEGIN CERTIFICATE-----
+MIIFQTCCAymgAwIBAgITBmyf0pY1hp8KD+WGePhbJruKNzANBgkqhkiG9w0BAQwFADA5MQswCQYD
+VQQGEwJVUzEPMA0GA1UEChMGQW1hem9uMRkwFwYDVQQDExBBbWF6b24gUm9vdCBDQSAyMB4XDTE1
+MDUyNjAwMDAwMFoXDTQwMDUyNjAwMDAwMFowOTELMAkGA1UEBhMCVVMxDzANBgNVBAoTBkFtYXpv
+bjEZMBcGA1UEAxMQQW1hem9uIFJvb3QgQ0EgMjCCAiIwDQYJKoZIhvcNAQEBBQADggIPADCCAgoC
+ggIBAK2Wny2cSkxKgXlRmeyKy2tgURO8TW0G/LAIjd0ZEGrHJgw12MBvIITplLGbhQPDW9tK6Mj4
+kHbZW0/jTOgGNk3Mmqw9DJArktQGGWCsN0R5hYGCrVo34A3MnaZMUnbqQ523BNFQ9lXg1dKmSYXp
+N+nKfq5clU1Imj+uIFptiJXZNLhSGkOQsL9sBbm2eLfq0OQ6PBJTYv9K8nu+NQWpEjTj82R0Yiw9
+AElaKP4yRLuH3WUnAnE72kr3H9rN9yFVkE8P7K6C4Z9r2UXTu/Bfh+08LDmG2j/e7HJV63mjrdvd
+fLC6HM783k81ds8P+HgfajZRRidhW+mez/CiVX18JYpvL7TFz4QuK/0NURBs+18bvBt+xa47mAEx
+kv8LV/SasrlX6avvDXbR8O70zoan4G7ptGmh32n2M8ZpLpcTnqWHsFcQgTfJU7O7f/aS0ZzQGPSS
+btqDT6ZjmUyl+17vIWR6IF9sZIUVyzfpYgwLKhbcAS4y2j5L9Z469hdAlO+ekQiG+r5jqFoz7Mt0
+Q5X5bGlSNscpb/xVA1wf+5+9R+vnSUeVC06JIglJ4PVhHvG/LopyboBZ/1c6+XUyo05f7O0oYtlN
+c/LMgRdg7c3r3NunysV+Ar3yVAhU/bQtCSwXVEqY0VThUWcI0u1ufm8/0i2BWSlmy5A5lREedCf+
+3euvAgMBAAGjQjBAMA8GA1UdEwEB/wQFMAMBAf8wDgYDVR0PAQH/BAQDAgGGMB0GA1UdDgQWBBSw
+DPBMMPQFWAJI/TPlUq9LhONmUjANBgkqhkiG9w0BAQwFAAOCAgEAqqiAjw54o+Ci1M3m9Zh6O+oA
+A7CXDpO8Wqj2LIxyh6mx/H9z/WNxeKWHWc8w4Q0QshNabYL1auaAn6AFC2jkR2vHat+2/XcycuUY
++gn0oJMsXdKMdYV2ZZAMA3m3MSNjrXiDCYZohMr/+c8mmpJ5581LxedhpxfL86kSk5Nrp+gvU5LE
+YFiwzAJRGFuFjWJZY7attN6a+yb3ACfAXVU3dJnJUH/jWS5E4ywl7uxMMne0nxrpS10gxdr9HIcW
+xkPo1LsmmkVwXqkLN1PiRnsn/eBG8om3zEK2yygmbtmlyTrIQRNg91CMFa6ybRoVGld45pIq2WWQ
+gj9sAq+uEjonljYE1x2igGOpm/HlurR8FLBOybEfdF849lHqm/osohHUqS0nGkWxr7JOcQ3AWEbW
+aQbLU8uz/mtBzUF+fUwPfHJ5elnNXkoOrJupmHN5fLT0zLm4BwyydFy4x2+IoZCn9Kr5v2c69BoV
+Yh63n749sSmvZ6ES8lgQGVMDMBu4Gon2nL2XA46jCfMdiyHxtN/kHNGfZQIG6lzWE7OE76KlXIx3
+KadowGuuQNKotOrN8I1LOJwZmhsoVLiJkO/KdYE+HvJkJMcYr07/R54H9jVlpNMKVv/1F2Rs76gi
+JUmTtt8AF9pYfl3uxRuw0dFfIRDH+fO6AgonB8Xx1sfT4PsJYGw=
+-----END CERTIFICATE-----
+
+Amazon Root CA 3
+================
+-----BEGIN CERTIFICATE-----
+MIIBtjCCAVugAwIBAgITBmyf1XSXNmY/Owua2eiedgPySjAKBggqhkjOPQQDAjA5MQswCQYDVQQG
+EwJVUzEPMA0GA1UEChMGQW1hem9uMRkwFwYDVQQDExBBbWF6b24gUm9vdCBDQSAzMB4XDTE1MDUy
+NjAwMDAwMFoXDTQwMDUyNjAwMDAwMFowOTELMAkGA1UEBhMCVVMxDzANBgNVBAoTBkFtYXpvbjEZ
+MBcGA1UEAxMQQW1hem9uIFJvb3QgQ0EgMzBZMBMGByqGSM49AgEGCCqGSM49AwEHA0IABCmXp8ZB
+f8ANm+gBG1bG8lKlui2yEujSLtf6ycXYqm0fc4E7O5hrOXwzpcVOho6AF2hiRVd9RFgdszflZwjr
+Zt6jQjBAMA8GA1UdEwEB/wQFMAMBAf8wDgYDVR0PAQH/BAQDAgGGMB0GA1UdDgQWBBSrttvXBp43
+rDCGB5Fwx5zEGbF4wDAKBggqhkjOPQQDAgNJADBGAiEA4IWSoxe3jfkrBqWTrBqYaGFy+uGh0Psc
+eGCmQ5nFuMQCIQCcAu/xlJyzlvnrxir4tiz+OpAUFteMYyRIHN8wfdVoOw==
+-----END CERTIFICATE-----
+
+Amazon Root CA 4
+================
+-----BEGIN CERTIFICATE-----
+MIIB8jCCAXigAwIBAgITBmyf18G7EEwpQ+Vxe3ssyBrBDjAKBggqhkjOPQQDAzA5MQswCQYDVQQG
+EwJVUzEPMA0GA1UEChMGQW1hem9uMRkwFwYDVQQDExBBbWF6b24gUm9vdCBDQSA0MB4XDTE1MDUy
+NjAwMDAwMFoXDTQwMDUyNjAwMDAwMFowOTELMAkGA1UEBhMCVVMxDzANBgNVBAoTBkFtYXpvbjEZ
+MBcGA1UEAxMQQW1hem9uIFJvb3QgQ0EgNDB2MBAGByqGSM49AgEGBSuBBAAiA2IABNKrijdPo1MN
+/sGKe0uoe0ZLY7Bi9i0b2whxIdIA6GO9mif78DluXeo9pcmBqqNbIJhFXRbb/egQbeOc4OO9X4Ri
+83BkM6DLJC9wuoihKqB1+IGuYgbEgds5bimwHvouXKNCMEAwDwYDVR0TAQH/BAUwAwEB/zAOBgNV
+HQ8BAf8EBAMCAYYwHQYDVR0OBBYEFNPsxzplbszh2naaVvuc84ZtV+WBMAoGCCqGSM49BAMDA2gA
+MGUCMDqLIfG9fhGt0O9Yli/W651+kI0rz2ZVwyzjKKlwCkcO8DdZEv8tmZQoTipPNU0zWgIxAOp1
+AE47xDqUEpHJWEadIRNyp4iciuRMStuW1KyLa2tJElMzrdfkviT8tQp21KW8EA==
+-----END CERTIFICATE-----
+
+LuxTrust Global Root 2
+======================
+-----BEGIN CERTIFICATE-----
+MIIFwzCCA6ugAwIBAgIUCn6m30tEntpqJIWe5rgV0xZ/u7EwDQYJKoZIhvcNAQELBQAwRjELMAkG
+A1UEBhMCTFUxFjAUBgNVBAoMDUx1eFRydXN0IFMuQS4xHzAdBgNVBAMMFkx1eFRydXN0IEdsb2Jh
+bCBSb290IDIwHhcNMTUwMzA1MTMyMTU3WhcNMzUwMzA1MTMyMTU3WjBGMQswCQYDVQQGEwJMVTEW
+MBQGA1UECgwNTHV4VHJ1c3QgUy5BLjEfMB0GA1UEAwwWTHV4VHJ1c3QgR2xvYmFsIFJvb3QgMjCC
+AiIwDQYJKoZIhvcNAQEBBQADggIPADCCAgoCggIBANeFl78RmOnwYoNMPIf5U2o3C/IPPIfOb9wm
+Kb3FibrJgz337spbxm1Jc7TJRqMbNBM/wYlFV/TZsfs2ZUv7COJIcRHIbjuend+JZTemhfY7RBi2
+xjcwYkSSl2l9QjAk5A0MiWtj3sXh306pFGxT4GHO9hcvHTy95iJMHZP1EMShduxq3sVs35a0VkBC
+wGKSMKEtFZSg0iAGCW5qbeXrt77U8PEVfIvmTroTzEsnXpk8F12PgX8zPU/TPxvsXD/wPEx1bvKm
+1Z3aLQdjAsZy6ZS8TEmVT4hSyNvoaYL4zDRbIvCGp4m9SAptZoFtyMhk+wHh9OHe2Z7d21vUKpkm
+FRseTJIpgp7VkoGSQXAZ96Tlk0u8d2cx3Rz9MXANF5kM+Qw5GSoXtTBxVdUPrljhPS80m8+f9niF
+wpN6cj5mj5wWEWCPnolvZ77gR1o7DJpni89Gxq44o/KnvObWhWszJHAiS8sIm7vI+AIpHb4gDEa/
+a4ebsypmQjVGbKq6rfmYe+lQVRQxv7HaLe2ArWgk+2mr2HETMOZns4dA/Yl+8kPREd8vZS9kzl8U
+ubG/Mb2HeFpZZYiq/FkySIbWTLkpS5XTdvN3JW1CHDiDTf2jX5t/Lax5Gw5CMZdjpPuKadUiDTSQ
+MC6otOBttpSsvItO13D8xTiOZCXhTTmQzsmHhFhxAgMBAAGjgagwgaUwDwYDVR0TAQH/BAUwAwEB
+/zBCBgNVHSAEOzA5MDcGByuBKwEBAQowLDAqBggrBgEFBQcCARYeaHR0cHM6Ly9yZXBvc2l0b3J5
+Lmx1eHRydXN0Lmx1MA4GA1UdDwEB/wQEAwIBBjAfBgNVHSMEGDAWgBT/GCh2+UgFLKGu8SsbK7JT
++Et8szAdBgNVHQ4EFgQU/xgodvlIBSyhrvErGyuyU/hLfLMwDQYJKoZIhvcNAQELBQADggIBAGoZ
+FO1uecEsh9QNcH7X9njJCwROxLHOk3D+sFTAMs2ZMGQXvw/l4jP9BzZAcg4atmpZ1gDlaCDdLnIN
+H2pkMSCEfUmmWjfrRcmF9dTHF5kH5ptV5AzoqbTOjFu1EVzPig4N1qx3gf4ynCSecs5U89BvolbW
+7MM3LGVYvlcAGvI1+ut7MV3CwRI9loGIlonBWVx65n9wNOeD4rHh4bhY79SV5GCc8JaXcozrhAIu
+ZY+kt9J/Z93I055cqqmkoCUUBpvsT34tC38ddfEz2O3OuHVtPlu5mB0xDVbYQw8wkbIEa91WvpWA
+VWe+2M2D2RjuLg+GLZKecBPs3lHJQ3gCpU3I+V/EkVhGFndadKpAvAefMLmx9xIX3eP/JEAdemrR
+TxgKqpAd60Ae36EeRJIQmvKN4dFLRp7oRUKX6kWZ8+xm1QL68qZKJKrezrnK+T+Tb/mjuuqlPpmt
+/f97mfVl7vBZKGfXkJWkE4SphMHozs51k2MavDzq1WQfLSoSOcbDWjLtR5EWDrw4wVDej8oqkDQc
+7kGUnF4ZLvhFSZl0kbAEb+MEWrGrKqv+x9CWttrhSmQGbmBNvUJO/3jaJMobtNeWOWyu8Q6qp31I
+iyBMz2TWuJdGsE7RKlY6oJO9r4Ak4Ap+58rVyuiFVdw2KuGUaJPHZnJED4AhMmwlxyOAgwrr
+-----END CERTIFICATE-----
+
+TUBITAK Kamu SM SSL Kok Sertifikasi - Surum 1
+=============================================
+-----BEGIN CERTIFICATE-----
+MIIEYzCCA0ugAwIBAgIBATANBgkqhkiG9w0BAQsFADCB0jELMAkGA1UEBhMCVFIxGDAWBgNVBAcT
+D0dlYnplIC0gS29jYWVsaTFCMEAGA1UEChM5VHVya2l5ZSBCaWxpbXNlbCB2ZSBUZWtub2xvamlr
+IEFyYXN0aXJtYSBLdXJ1bXUgLSBUVUJJVEFLMS0wKwYDVQQLEyRLYW11IFNlcnRpZmlrYXN5b24g
+TWVya2V6aSAtIEthbXUgU00xNjA0BgNVBAMTLVRVQklUQUsgS2FtdSBTTSBTU0wgS29rIFNlcnRp
+ZmlrYXNpIC0gU3VydW0gMTAeFw0xMzExMjUwODI1NTVaFw00MzEwMjUwODI1NTVaMIHSMQswCQYD
+VQQGEwJUUjEYMBYGA1UEBxMPR2ViemUgLSBLb2NhZWxpMUIwQAYDVQQKEzlUdXJraXllIEJpbGlt
+c2VsIHZlIFRla25vbG9qaWsgQXJhc3Rpcm1hIEt1cnVtdSAtIFRVQklUQUsxLTArBgNVBAsTJEth
+bXUgU2VydGlmaWthc3lvbiBNZXJrZXppIC0gS2FtdSBTTTE2MDQGA1UEAxMtVFVCSVRBSyBLYW11
+IFNNIFNTTCBLb2sgU2VydGlmaWthc2kgLSBTdXJ1bSAxMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8A
+MIIBCgKCAQEAr3UwM6q7a9OZLBI3hNmNe5eA027n/5tQlT6QlVZC1xl8JoSNkvoBHToP4mQ4t4y8
+6Ij5iySrLqP1N+RAjhgleYN1Hzv/bKjFxlb4tO2KRKOrbEz8HdDc72i9z+SqzvBV96I01INrN3wc
+wv61A+xXzry0tcXtAA9TNypN9E8Mg/uGz8v+jE69h/mniyFXnHrfA2eJLJ2XYacQuFWQfw4tJzh0
+3+f92k4S400VIgLI4OD8D62K18lUUMw7D8oWgITQUVbDjlZ/iSIzL+aFCr2lqBs23tPcLG07xxO9
+WSMs5uWk99gL7eqQQESolbuT1dCANLZGeA4fAJNG4e7p+exPFwIDAQABo0IwQDAdBgNVHQ4EFgQU
+ZT/HiobGPN08VFw1+DrtUgxHV8gwDgYDVR0PAQH/BAQDAgEGMA8GA1UdEwEB/wQFMAMBAf8wDQYJ
+KoZIhvcNAQELBQADggEBACo/4fEyjq7hmFxLXs9rHmoJ0iKpEsdeV31zVmSAhHqT5Am5EM2fKifh
+AHe+SMg1qIGf5LgsyX8OsNJLN13qudULXjS99HMpw+0mFZx+CFOKWI3QSyjfwbPfIPP54+M638yc
+lNhOT8NrF7f3cuitZjO1JVOr4PhMqZ398g26rrnZqsZr+ZO7rqu4lzwDGrpDxpa5RXI4s6ehlj2R
+e37AIVNMh+3yC1SVUZPVIqUNivGTDj5UDrDYyU7c8jEyVupk+eq1nRZmQnLzf9OxMUP8pI4X8W0j
+q5Rm+K37DwhuJi1/FwcJsoz7UMCflo3Ptv0AnVoUmr8CRPXBwp8iXqIPoeM=
+-----END CERTIFICATE-----
diff --git a/vendor/github.com/johannesboyne/gofakes3/constants.go b/vendor/github.com/johannesboyne/gofakes3/constants.go
new file mode 100644
index 00000000..de8453c4
--- /dev/null
+++ b/vendor/github.com/johannesboyne/gofakes3/constants.go
@@ -0,0 +1,44 @@
+package gofakes3
+
+import "time"
+
+const (
+ // From https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingMetadata.html:
+ // "The name for a key is a sequence of Unicode characters whose UTF-8
+ // encoding is at most 1024 bytes long."
+ KeySizeLimit = 1024
+
+ // From https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingMetadata.html:
+ // Within the PUT request header, the user-defined metadata is limited to 2
+ // KB in size. The size of user-defined metadata is measured by taking the
+ // sum of the number of bytes in the UTF-8 encoding of each key and value.
+ //
+ // As this does not specify KB or KiB, KB is used in gofakes3. The reason
+ // for this is if gofakes3 is used for testing, and your tests show that
+ // 2KiB works, but Amazon uses 2KB... that's a much worse time to discover
+ // the disparity!
+ DefaultMetadataSizeLimit = 2000
+
+ // Like DefaultMetadataSizeLimit, the docs don't specify MB or MiB, so we
+ // will accept 5MB for now. The Go client SDK rejects 5MB with the error
+ // "part size must be at least 5242880 bytes", which is a hint that it
+ // has been interpreted as MiB at least _somewhere_, but we should remain
+ // liberal in what we accept in the face of ambiguity.
+ DefaultUploadPartSize = 5 * 1000 * 1000
+
+ DefaultSkewLimit = 15 * time.Minute
+
+ MaxUploadsLimit = 1000
+ DefaultMaxUploads = 1000
+ MaxUploadPartsLimit = 1000
+ DefaultMaxUploadParts = 1000
+
+ MaxBucketKeys = 1000
+ DefaultMaxBucketKeys = 1000
+
+ MaxBucketVersionKeys = 1000
+ DefaultMaxBucketVersionKeys = 1000
+
+ // From the docs: "Part numbers can be any number from 1 to 10,000, inclusive."
+ MaxUploadPartNumber = 10000
+)
diff --git a/vendor/github.com/johannesboyne/gofakes3/cors.go b/vendor/github.com/johannesboyne/gofakes3/cors.go
new file mode 100644
index 00000000..5da63c8c
--- /dev/null
+++ b/vendor/github.com/johannesboyne/gofakes3/cors.go
@@ -0,0 +1,43 @@
+package gofakes3
+
+import (
+ "net/http"
+ "strings"
+)
+
+var (
+ corsHeaders = []string{
+ "Accept",
+ "Accept-Encoding",
+ "Authorization",
+ "Content-Disposition",
+ "Content-Length",
+ "Content-Type",
+ "X-Amz-Date",
+ "X-Amz-User-Agent",
+ "X-CSRF-Token",
+ "x-amz-acl",
+ "x-amz-meta-filename",
+ "x-amz-meta-from",
+ "x-amz-meta-private",
+ "x-amz-meta-to",
+ }
+ corsHeadersString = strings.Join(corsHeaders, ", ")
+)
+
+type withCORS struct {
+ r http.Handler
+ log Logger
+}
+
+func (s *withCORS) ServeHTTP(w http.ResponseWriter, r *http.Request) {
+ w.Header().Set("Access-Control-Allow-Origin", "*")
+ w.Header().Set("Access-Control-Allow-Methods", "POST, GET, OPTIONS, PUT, DELETE, HEAD")
+ w.Header().Set("Access-Control-Allow-Headers", corsHeadersString)
+
+ if r.Method == "OPTIONS" {
+ return
+ }
+
+ s.r.ServeHTTP(w, r)
+}
diff --git a/vendor/github.com/johannesboyne/gofakes3/error.go b/vendor/github.com/johannesboyne/gofakes3/error.go
new file mode 100644
index 00000000..9d8eead4
--- /dev/null
+++ b/vendor/github.com/johannesboyne/gofakes3/error.go
@@ -0,0 +1,344 @@
+package gofakes3
+
+import (
+ "encoding/xml"
+ "fmt"
+ "net/http"
+ "time"
+)
+
+// Error codes are documented here:
+// https://docs.aws.amazon.com/AmazonS3/latest/API/ErrorResponses.html
+//
+// If you add a code to this list, please also add it to ErrorCode.Status().
+//
+const (
+ ErrNone ErrorCode = ""
+
+ // The Content-MD5 you specified did not match what we received.
+ ErrBadDigest ErrorCode = "BadDigest"
+
+ ErrBucketAlreadyExists ErrorCode = "BucketAlreadyExists"
+
+ // Raised when attempting to delete a bucket that still contains items.
+ ErrBucketNotEmpty ErrorCode = "BucketNotEmpty"
+
+ // "Indicates that the versioning configuration specified in the request is invalid"
+ ErrIllegalVersioningConfiguration ErrorCode = "IllegalVersioningConfigurationException"
+
+ // You did not provide the number of bytes specified by the Content-Length
+ // HTTP header:
+ ErrIncompleteBody ErrorCode = "IncompleteBody"
+
+ // POST requires exactly one file upload per request.
+ ErrIncorrectNumberOfFilesInPostRequest ErrorCode = "IncorrectNumberOfFilesInPostRequest"
+
+ // InlineDataTooLarge occurs when using the PutObjectInline method of the
+ // SOAP interface
+ // (https://docs.aws.amazon.com/AmazonS3/latest/API/SOAPPutObjectInline.html).
+ // This is not documented on the errors page; the error is included here
+ // only for reference.
+ ErrInlineDataTooLarge ErrorCode = "InlineDataTooLarge"
+
+ ErrInvalidArgument ErrorCode = "InvalidArgument"
+
+ // https://docs.aws.amazon.com/AmazonS3/latest/dev/BucketRestrictions.html#bucketnamingrules
+ ErrInvalidBucketName ErrorCode = "InvalidBucketName"
+
+ // The Content-MD5 you specified is not valid.
+ ErrInvalidDigest ErrorCode = "InvalidDigest"
+
+ ErrInvalidRange ErrorCode = "InvalidRange"
+ ErrInvalidToken ErrorCode = "InvalidToken"
+ ErrKeyTooLong ErrorCode = "KeyTooLongError" // This is not a typo: Error is part of the string, but redundant in the constant name
+ ErrMalformedPOSTRequest ErrorCode = "MalformedPOSTRequest"
+
+ // One or more of the specified parts could not be found. The part might
+ // not have been uploaded, or the specified entity tag might not have
+ // matched the part's entity tag.
+ ErrInvalidPart ErrorCode = "InvalidPart"
+
+ // The list of parts was not in ascending order. Parts list must be
+ // specified in order by part number.
+ ErrInvalidPartOrder ErrorCode = "InvalidPartOrder"
+
+ ErrInvalidURI ErrorCode = "InvalidURI"
+
+ ErrMetadataTooLarge ErrorCode = "MetadataTooLarge"
+ ErrMethodNotAllowed ErrorCode = "MethodNotAllowed"
+ ErrMalformedXML ErrorCode = "MalformedXML"
+
+ // You must provide the Content-Length HTTP header.
+ ErrMissingContentLength ErrorCode = "MissingContentLength"
+
+ // See BucketNotFound() for a helper function for this error:
+ ErrNoSuchBucket ErrorCode = "NoSuchBucket"
+
+ // See KeyNotFound() for a helper function for this error:
+ ErrNoSuchKey ErrorCode = "NoSuchKey"
+
+ // The specified multipart upload does not exist. The upload ID might be
+ // invalid, or the multipart upload might have been aborted or completed.
+ ErrNoSuchUpload ErrorCode = "NoSuchUpload"
+
+ ErrNoSuchVersion ErrorCode = "NoSuchVersion"
+
+ ErrRequestTimeTooSkewed ErrorCode = "RequestTimeTooSkewed"
+ ErrTooManyBuckets ErrorCode = "TooManyBuckets"
+ ErrNotImplemented ErrorCode = "NotImplemented"
+
+ ErrInternal ErrorCode = "InternalError"
+)
+
+// INTERNAL errors! These are not part of the S3 interface, they are codes
+// we have declared ourselves. Should all map to a 500 status code:
+const (
+ ErrInternalPageNotImplemented InternalErrorCode = "PaginationNotImplemented"
+)
+
+// errorResponse should be implemented by any type that needs to be handled by
+// ensureErrorResponse.
+type errorResponse interface {
+ Error
+ enrich(requestID string)
+}
+
+func ensureErrorResponse(err error, requestID string) Error {
+ switch err := err.(type) {
+ case errorResponse:
+ err.enrich(requestID)
+ return err
+
+ case ErrorCode:
+ return &ErrorResponse{
+ Code: err,
+ RequestID: requestID,
+ Message: string(err),
+ }
+
+ default:
+ return &ErrorResponse{
+ Code: ErrInternal,
+ Message: "Internal Error",
+ RequestID: requestID,
+ }
+ }
+}
+
+type Error interface {
+ error
+ ErrorCode() ErrorCode
+}
+
+// ErrorResponse is the base error type returned by S3 when any error occurs.
+//
+// Some errors contain their own additional fields in the response, for example
+// ErrRequestTimeTooSkewed, which contains the server time and the skew limit.
+// To create one of these responses, subclass it (but please don't export it):
+//
+// type notQuiteRightResponse struct {
+// ErrorResponse
+// ExtraField int
+// }
+//
+// Next, create a constructor that populates the error. Interfaces won't work
+// for this job as the error itself does double-duty as the XML response
+// object. Fill the struct out however you please, but don't forget to assign
+// Code and Message:
+//
+// func NotQuiteRight(at time.Time, max time.Duration) error {
+// code := ErrNotQuiteRight
+// return &notQuiteRightResponse{
+// ErrorResponse{Code: code, Message: code.Message()},
+// 123456789,
+// }
+// }
+//
+type ErrorResponse struct {
+ XMLName xml.Name `xml:"Error"`
+
+ Code ErrorCode
+ Message string `xml:",omitempty"`
+ RequestID string `xml:"RequestId,omitempty"`
+ HostID string `xml:"HostId,omitempty"`
+}
+
+func (e *ErrorResponse) ErrorCode() ErrorCode { return e.Code }
+
+func (e *ErrorResponse) Error() string {
+ return fmt.Sprintf("%s: %s", e.Code, e.Message)
+}
+
+func (r *ErrorResponse) enrich(requestID string) {
+ r.RequestID = requestID
+}
+
+func ErrorMessage(code ErrorCode, message string) error {
+ return &ErrorResponse{Code: code, Message: message}
+}
+
+func ErrorMessagef(code ErrorCode, message string, args ...interface{}) error {
+ return &ErrorResponse{Code: code, Message: fmt.Sprintf(message, args...)}
+}
+
+type ErrorInvalidArgumentResponse struct {
+ ErrorResponse
+
+ ArgumentName string `xml:"ArgumentName"`
+ ArgumentValue string `xml:"ArgumentValue"`
+}
+
+func ErrorInvalidArgument(name, value, message string) error {
+ return &ErrorInvalidArgumentResponse{
+ ErrorResponse: ErrorResponse{Code: ErrInvalidArgument, Message: message},
+ ArgumentName: name, ArgumentValue: value}
+}
+
+// ErrorCode represents an S3 error code, documented here:
+// https://docs.aws.amazon.com/AmazonS3/latest/API/ErrorResponses.html
+type ErrorCode string
+
+func (e ErrorCode) ErrorCode() ErrorCode { return e }
+func (e ErrorCode) Error() string { return string(e) }
+
+// InternalErrorCode represents an GoFakeS3 error code. It maps to ErrInternal
+// when constructing a response.
+type InternalErrorCode string
+
+func (e InternalErrorCode) ErrorCode() ErrorCode { return ErrInternal }
+func (e InternalErrorCode) Error() string { return string(ErrInternal) }
+
+// Message tries to return the same string as S3 would return for the error
+// response, when it is known, or nothing when it is not. If you see the status
+// text for a code we don't have listed in here in the wild, please let us
+// know!
+func (e ErrorCode) Message() string {
+ switch e {
+ case ErrNoSuchBucket:
+ return "The specified bucket does not exist"
+ case ErrRequestTimeTooSkewed:
+ return "The difference between the request time and the current time is too large"
+ case ErrMalformedXML:
+ return "The XML you provided was not well-formed or did not validate against our published schema"
+ default:
+ return ""
+ }
+}
+
+func (e ErrorCode) Status() int {
+ switch e {
+ case ErrBucketAlreadyExists,
+ ErrBucketNotEmpty:
+ return http.StatusConflict
+
+ case ErrBadDigest,
+ ErrIllegalVersioningConfiguration,
+ ErrIncompleteBody,
+ ErrIncorrectNumberOfFilesInPostRequest,
+ ErrInlineDataTooLarge,
+ ErrInvalidArgument,
+ ErrInvalidBucketName,
+ ErrInvalidDigest,
+ ErrInvalidPart,
+ ErrInvalidPartOrder,
+ ErrInvalidToken,
+ ErrInvalidURI,
+ ErrKeyTooLong,
+ ErrMetadataTooLarge,
+ ErrMethodNotAllowed,
+ ErrMalformedPOSTRequest,
+ ErrMalformedXML,
+ ErrTooManyBuckets:
+ return http.StatusBadRequest
+
+ case ErrRequestTimeTooSkewed:
+ return http.StatusForbidden
+
+ case ErrInvalidRange:
+ return http.StatusRequestedRangeNotSatisfiable
+
+ case ErrNoSuchBucket,
+ ErrNoSuchKey,
+ ErrNoSuchUpload,
+ ErrNoSuchVersion:
+ return http.StatusNotFound
+
+ case ErrNotImplemented:
+ return http.StatusNotImplemented
+
+ case ErrMissingContentLength:
+ return http.StatusLengthRequired
+
+ case ErrInternal:
+ return http.StatusInternalServerError
+ }
+
+ return http.StatusInternalServerError
+}
+
+// HasErrorCode asserts that the error has a specific error code:
+//
+// if HasErrorCode(err, ErrNoSuchBucket) {
+// // handle condition
+// }
+//
+// If err is nil and code is ErrNone, HasErrorCode returns true.
+//
+func HasErrorCode(err error, code ErrorCode) bool {
+ if err == nil && code == "" {
+ return true
+ }
+ s3err, ok := err.(interface{ ErrorCode() ErrorCode })
+ if !ok {
+ return false
+ }
+ return s3err.ErrorCode() == code
+}
+
+// IsAlreadyExists asserts that the error is a kind that indicates the resource
+// already exists, similar to os.IsExist.
+func IsAlreadyExists(err error) bool {
+ return HasErrorCode(err, ErrBucketAlreadyExists)
+}
+
+type resourceErrorResponse struct {
+ ErrorResponse
+ Resource string
+}
+
+var _ errorResponse = &resourceErrorResponse{}
+
+func ResourceError(code ErrorCode, resource string) error {
+ return &resourceErrorResponse{
+ ErrorResponse{Code: code, Message: code.Message()},
+ resource,
+ }
+}
+
+func BucketNotFound(bucket string) error { return ResourceError(ErrNoSuchBucket, bucket) }
+func KeyNotFound(key string) error { return ResourceError(ErrNoSuchKey, key) }
+
+type requestTimeTooSkewedResponse struct {
+ ErrorResponse
+ ServerTime time.Time
+ MaxAllowedSkewMilliseconds durationAsMilliseconds
+}
+
+var _ errorResponse = &requestTimeTooSkewedResponse{}
+
+func requestTimeTooSkewed(at time.Time, max time.Duration) error {
+ code := ErrRequestTimeTooSkewed
+ return &requestTimeTooSkewedResponse{
+ ErrorResponse{Code: code, Message: code.Message()},
+ at, durationAsMilliseconds(max),
+ }
+}
+
+// durationAsMilliseconds tricks xml.Marsha into serialising a time.Duration as
+// truncated milliseconds instead of nanoseconds.
+type durationAsMilliseconds time.Duration
+
+func (m durationAsMilliseconds) MarshalXML(e *xml.Encoder, start xml.StartElement) error {
+ var s = fmt.Sprintf("%d", time.Duration(m)/time.Millisecond)
+ return e.EncodeElement(s, start)
+}
diff --git a/vendor/github.com/johannesboyne/gofakes3/go.mod b/vendor/github.com/johannesboyne/gofakes3/go.mod
new file mode 100644
index 00000000..bef71a6e
--- /dev/null
+++ b/vendor/github.com/johannesboyne/gofakes3/go.mod
@@ -0,0 +1,16 @@
+module github.com/johannesboyne/gofakes3
+
+require (
+ github.com/aws/aws-sdk-go v1.17.4
+ github.com/boltdb/bolt v1.3.1
+ github.com/davecgh/go-spew v1.1.0
+ github.com/ryszard/goskiplist v0.0.0-20150312221310-2dfbae5fcf46
+ github.com/shabbyrobe/gocovmerge v0.0.0-20180507124511-f6ea450bfb63
+ github.com/spf13/afero v1.2.1
+ github.com/stretchr/testify v1.3.0 // indirect
+ golang.org/x/net v0.0.0-20190310074541-c10a0554eabf // indirect
+ golang.org/x/sys v0.0.0-20190310054646-10058d7d4faa // indirect
+ golang.org/x/tools v0.0.0-20190308174544-00c44ba9c14f
+ gopkg.in/mgo.v2 v2.0.0-20180705113604-9856a29383ce
+ gopkg.in/yaml.v2 v2.2.2 // indirect
+)
diff --git a/vendor/github.com/johannesboyne/gofakes3/go.sum b/vendor/github.com/johannesboyne/gofakes3/go.sum
new file mode 100644
index 00000000..72785961
--- /dev/null
+++ b/vendor/github.com/johannesboyne/gofakes3/go.sum
@@ -0,0 +1,35 @@
+github.com/aws/aws-sdk-go v1.17.4 h1:L2KFocQhg48kIzEAV98SnSz3nmIZ3UDFP+vU647KO3c=
+github.com/aws/aws-sdk-go v1.17.4/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo=
+github.com/boltdb/bolt v1.3.1 h1:JQmyP4ZBrce+ZQu0dY660FMfatumYDLun9hBCUVIkF4=
+github.com/boltdb/bolt v1.3.1/go.mod h1:clJnj/oiGkjum5o1McbSZDSLxVThjynRyGBgiAx27Ps=
+github.com/davecgh/go-spew v1.1.0 h1:ZDRjVQ15GmhC3fiQ8ni8+OwkZQO4DARzQgrnXU1Liz8=
+github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
+github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af h1:pmfjZENx5imkbgOkpRUYLnmbU7UEFbjtDA2hxJ1ichM=
+github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k=
+github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
+github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
+github.com/ryszard/goskiplist v0.0.0-20150312221310-2dfbae5fcf46 h1:GHRpF1pTW19a8tTFrMLUcfWwyC0pnifVo2ClaLq+hP8=
+github.com/ryszard/goskiplist v0.0.0-20150312221310-2dfbae5fcf46/go.mod h1:uAQ5PCi+MFsC7HjREoAz1BU+Mq60+05gifQSsHSDG/8=
+github.com/shabbyrobe/gocovmerge v0.0.0-20180507124511-f6ea450bfb63 h1:J6qvD6rbmOil46orKqJaRPG+zTpoGlBTUdyv8ki63L0=
+github.com/shabbyrobe/gocovmerge v0.0.0-20180507124511-f6ea450bfb63/go.mod h1:n+VKSARF5y/tS9XFSP7vWDfS+GUC5vs/YT7M5XDTUEM=
+github.com/spf13/afero v1.2.1 h1:qgMbHoJbPbw579P+1zVY+6n4nIFuIchaIjzZ/I/Yq8M=
+github.com/spf13/afero v1.2.1/go.mod h1:9ZxEEn6pIJ8Rxe320qSDBk6AsU0r9pR7Q4OcevTdifk=
+github.com/stretchr/objx v0.1.0 h1:4G4v2dO3VZwixGIRoQ5Lfboy6nUhCyYzaqnIAPPhYs4=
+github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
+github.com/stretchr/testify v1.3.0 h1:TivCn/peBQ7UY8ooIcPgZFpTNSz0Q2U6UrFlUfqbe0Q=
+github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
+golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
+golang.org/x/net v0.0.0-20190310074541-c10a0554eabf h1:J7RqX9u0J9ZB37CGaFc2VC+QZZT6E6jnDbrboEFVo0U=
+golang.org/x/net v0.0.0-20190310074541-c10a0554eabf/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
+golang.org/x/sys v0.0.0-20190310054646-10058d7d4faa h1:lqti/xP+yD/6zH5TqEwx2MilNIJY5Vbc6Qr8J3qyPIQ=
+golang.org/x/sys v0.0.0-20190310054646-10058d7d4faa/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/text v0.3.0 h1:g61tztE5qeGQ89tm6NTjjM9VPIm088od1l6aSorWRWg=
+golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
+golang.org/x/tools v0.0.0-20190308174544-00c44ba9c14f h1:SUQ6L9W8e5xt2GFO9s+i18JGITAfem+a0AQuFU8Ls74=
+golang.org/x/tools v0.0.0-20190308174544-00c44ba9c14f/go.mod h1:25r3+/G6/xytQM8iWZKq3Hn0kr0rgFKPUNVEL/dr3z4=
+gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM=
+gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
+gopkg.in/mgo.v2 v2.0.0-20180705113604-9856a29383ce h1:xcEWjVhvbDy+nHP67nPDDpbYrY+ILlfndk4bRioVHaU=
+gopkg.in/mgo.v2 v2.0.0-20180705113604-9856a29383ce/go.mod h1:yeKp02qBN3iKW1OzL3MGk2IdtZzaj7SFntXj72NppTA=
+gopkg.in/yaml.v2 v2.2.2 h1:ZCJp+EgiOT7lHqUV2J862kp8Qj64Jo6az82+3Td9dZw=
+gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
diff --git a/vendor/github.com/johannesboyne/gofakes3/gofakes3.go b/vendor/github.com/johannesboyne/gofakes3/gofakes3.go
new file mode 100644
index 00000000..25c6ec03
--- /dev/null
+++ b/vendor/github.com/johannesboyne/gofakes3/gofakes3.go
@@ -0,0 +1,967 @@
+package gofakes3
+
+import (
+ "bytes"
+ "encoding/base64"
+ "encoding/hex"
+ "encoding/xml"
+ "fmt"
+ "io"
+ "io/ioutil"
+ "math"
+ "net/http"
+ "net/textproto"
+ "net/url"
+ "strconv"
+ "strings"
+ "sync/atomic"
+ "time"
+)
+
+// GoFakeS3 implements HTTP handlers for processing S3 requests and returning
+// S3 responses.
+//
+// Logic is delegated to other components, like Backend or uploader.
+type GoFakeS3 struct {
+ storage Backend
+ versioned VersionedBackend
+
+ timeSource TimeSource
+ timeSkew time.Duration
+ metadataSizeLimit int
+ integrityCheck bool
+ failOnUnimplementedPage bool
+ hostBucket bool
+ uploader *uploader
+ requestID uint64
+ log Logger
+}
+
+// New creates a new GoFakeS3 using the supplied Backend. Backends are pluggable.
+// Several Backend implementations ship with GoFakeS3, which can be found in the
+// gofakes3/backends package.
+func New(backend Backend, options ...Option) *GoFakeS3 {
+ s3 := &GoFakeS3{
+ storage: backend,
+ timeSkew: DefaultSkewLimit,
+ metadataSizeLimit: DefaultMetadataSizeLimit,
+ integrityCheck: true,
+ uploader: newUploader(),
+ requestID: 0,
+ }
+
+ // versioned MUST be set before options as one of the options disables it:
+ s3.versioned, _ = backend.(VersionedBackend)
+
+ for _, opt := range options {
+ opt(s3)
+ }
+ if s3.log == nil {
+ s3.log = DiscardLog()
+ }
+ if s3.timeSource == nil {
+ s3.timeSource = DefaultTimeSource()
+ }
+
+ return s3
+}
+
+func (g *GoFakeS3) nextRequestID() uint64 {
+ return atomic.AddUint64(&g.requestID, 1)
+}
+
+// Create the AWS S3 API
+func (g *GoFakeS3) Server() http.Handler {
+ var handler http.Handler = &withCORS{r: http.HandlerFunc(g.routeBase), log: g.log}
+
+ if g.timeSkew != 0 {
+ handler = g.timeSkewMiddleware(handler)
+ }
+
+ if g.hostBucket {
+ handler = g.hostBucketMiddleware(handler)
+ }
+
+ return handler
+}
+
+func (g *GoFakeS3) timeSkewMiddleware(handler http.Handler) http.Handler {
+ return http.HandlerFunc(func(w http.ResponseWriter, rq *http.Request) {
+ timeHdr := rq.Header.Get("x-amz-date")
+
+ if timeHdr != "" {
+ rqTime, _ := time.Parse("20060102T150405Z", timeHdr)
+ at := g.timeSource.Now()
+ skew := at.Sub(rqTime)
+
+ if skew < -g.timeSkew || skew > g.timeSkew {
+ g.httpError(w, rq, requestTimeTooSkewed(at, g.timeSkew))
+ return
+ }
+ }
+
+ handler.ServeHTTP(w, rq)
+ })
+}
+
+// hostBucketMiddleware forces the server to use VirtualHost-style bucket URLs:
+// https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingBucket.html
+func (g *GoFakeS3) hostBucketMiddleware(handler http.Handler) http.Handler {
+ return http.HandlerFunc(func(w http.ResponseWriter, rq *http.Request) {
+ parts := strings.SplitN(rq.Host, ".", 2)
+ bucket := parts[0]
+
+ p := rq.URL.Path
+ rq.URL.Path = "/" + bucket
+ if p != "/" {
+ rq.URL.Path += p
+ }
+ g.log.Print(LogInfo, p, "=>", rq.URL)
+
+ handler.ServeHTTP(w, rq)
+ })
+}
+
+func (g *GoFakeS3) httpError(w http.ResponseWriter, r *http.Request, err error) {
+ resp := ensureErrorResponse(err, "") // FIXME: request id
+ if resp.ErrorCode() == ErrInternal {
+ g.log.Print(LogErr, err)
+ }
+
+ w.WriteHeader(resp.ErrorCode().Status())
+
+ if r.Method != http.MethodHead {
+ if err := g.xmlEncoder(w).Encode(resp); err != nil {
+ g.log.Print(LogErr, err)
+ return
+ }
+ }
+}
+
+func (g *GoFakeS3) listBuckets(w http.ResponseWriter, r *http.Request) error {
+ buckets, err := g.storage.ListBuckets()
+ if err != nil {
+ return err
+ }
+
+ s := &Storage{
+ Xmlns: "http://s3.amazonaws.com/doc/2006-03-01/",
+ Buckets: buckets,
+ Owner: &UserInfo{
+ ID: "fe7272ea58be830e56fe1663b10fafef",
+ DisplayName: "GoFakeS3",
+ },
+ }
+
+ return g.xmlEncoder(w).Encode(s)
+}
+
+// S3 has two versions of this API, both of which are close to identical. We manage that
+// jank in here so the Backend doesn't have to with the following tricks:
+//
+// - Hiding the NextMarker inside the ContinuationToken for V2 calls
+// - Masking the Owner in the response for V2 calls
+//
+// The wrapping response objects are slightly different too, but the list of
+// objects is pretty much the same.
+//
+// - https://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketGET.html
+// - https://docs.aws.amazon.com/AmazonS3/latest/API/v2-RESTBucketGET.html
+//
+func (g *GoFakeS3) listBucket(bucketName string, w http.ResponseWriter, r *http.Request) error {
+ g.log.Print(LogInfo, "LIST BUCKET")
+
+ q := r.URL.Query()
+ prefix := prefixFromQuery(q)
+ page, err := listBucketPageFromQuery(q)
+ if err != nil {
+ return err
+ }
+
+ isVersion2 := q.Get("list-type") == "2"
+
+ g.log.Print(LogInfo, "bucketname:", bucketName)
+ g.log.Print(LogInfo, "prefix :", prefix)
+ g.log.Print(LogInfo, "page :", fmt.Sprintf("%+v", page))
+
+ objects, err := g.storage.ListBucket(bucketName, &prefix, page)
+
+ if err != nil {
+ if err == ErrInternalPageNotImplemented && !g.failOnUnimplementedPage {
+ // We have observed (though not yet confirmed) that simple clients
+ // tend to work fine if you simply ignore pagination, so the
+ // default if this is not implemented is to retry without it. If
+ // you care about this performance impact for some weird reason,
+ // you'll need to handle it yourself.
+ objects, err = g.storage.ListBucket(bucketName, &prefix, ListBucketPage{})
+ if err != nil {
+ return err
+ }
+
+ } else if err == ErrInternalPageNotImplemented && g.failOnUnimplementedPage {
+ return ErrNotImplemented
+ } else {
+ return err
+ }
+ }
+
+ base := ListBucketResultBase{
+ Xmlns: "http://s3.amazonaws.com/doc/2006-03-01/",
+ Name: bucketName,
+ CommonPrefixes: objects.CommonPrefixes,
+ Contents: objects.Contents,
+ IsTruncated: objects.IsTruncated,
+ Delimiter: prefix.Delimiter,
+ Prefix: prefix.Prefix,
+ MaxKeys: page.MaxKeys,
+ }
+
+ if !isVersion2 {
+ var result = &ListBucketResult{
+ ListBucketResultBase: base,
+ Marker: page.Marker,
+ }
+ if base.Delimiter != "" {
+ // From the S3 docs: "This element is returned only if you specify
+ // a delimiter request parameter." Dunno why. This hack has been moved
+ // into GoFakeS3 to spare backend implementers the trouble.
+ result.NextMarker = objects.NextMarker
+ }
+ return g.xmlEncoder(w).Encode(result)
+
+ } else {
+ var result = &ListBucketResultV2{
+ ListBucketResultBase: base,
+ KeyCount: int64(len(objects.CommonPrefixes) + len(objects.Contents)),
+ StartAfter: q.Get("start-after"),
+ ContinuationToken: q.Get("continuation-token"),
+ }
+ if objects.NextMarker != "" {
+ // We are just cheating with these continuation tokens; they're just the NextMarker
+ // from v1 in disguise! That may change at any time and should not be relied upon
+ // though.
+ result.NextContinuationToken = base64.URLEncoding.EncodeToString([]byte(objects.NextMarker))
+ }
+
+ // On the topic of "fetch-owner", the AWS docs say, in typically vague style:
+ // "If you want the owner information in the response, you can specify
+ // this parameter with the value set to true."
+ //
+ // What does the bare word 'true' mean when we're talking about a query
+ // string parameter, which can only be a string? Does it mean the word
+ // 'true'? Does it mean 'any truthy string'? Does it mean only the key
+ // needs to be present (i.e. '?fetch-owner'), which we are assuming
+ // for now? This is why you need proper technical writers.
+ //
+ // Probably need to hit up the s3assumer at some point, but until then, here's
+ // another FIXME!
+ if _, ok := q["fetch-owner"]; !ok {
+ for _, v := range result.Contents {
+ v.Owner = nil
+ }
+ }
+
+ return g.xmlEncoder(w).Encode(result)
+ }
+}
+
+func (g *GoFakeS3) listBucketVersions(bucketName string, w http.ResponseWriter, r *http.Request) error {
+ if g.versioned == nil {
+ return ErrNotImplemented
+ }
+
+ q := r.URL.Query()
+ prefix := prefixFromQuery(q)
+ page, err := listBucketVersionsPageFromQuery(q)
+ if err != nil {
+ return err
+ }
+
+ // S300004:
+ if page.HasVersionIDMarker {
+ if page.VersionIDMarker == "" {
+ return ErrorInvalidArgument("version-id-marker", "", "A version-id marker cannot be empty.")
+ } else if !page.HasKeyMarker {
+ return ErrorInvalidArgument("version-id-marker", "", "A version-id marker cannot be specified without a key marker.")
+ }
+
+ } else if page.HasKeyMarker && page.KeyMarker == "" {
+ // S300004: S3 ignores everything if you pass an empty key marker so
+ // let's hide that bit of ugliness from Backend.
+ page = ListBucketVersionsPage{}
+ }
+
+ bucket, err := g.versioned.ListBucketVersions(bucketName, &prefix, &page)
+ if err != nil {
+ return err
+ }
+
+ for _, ver := range bucket.Versions {
+ // S300005: S3 returns the _string_ 'null' for the version ID if the
+ // bucket has never had versioning enabled. GoFakeS3 backend
+ // implementers should be able to simply return the empty string;
+ // GoFakeS3 itself should handle this particular bit of jank once and
+ // once only.
+ if ver.GetVersionID() == "" {
+ ver.setVersionID("null")
+ }
+ }
+
+ return g.xmlEncoder(w).Encode(bucket)
+}
+
+// CreateBucket creates a new S3 bucket in the BoltDB storage.
+func (g *GoFakeS3) createBucket(bucket string, w http.ResponseWriter, r *http.Request) error {
+ g.log.Print(LogInfo, "CREATE BUCKET:", bucket)
+
+ if err := ValidateBucketName(bucket); err != nil {
+ return err
+ }
+ if err := g.storage.CreateBucket(bucket); err != nil {
+ return err
+ }
+
+ w.Header().Set("Location", "/"+bucket)
+ w.Write([]byte{})
+ return nil
+}
+
+// DeleteBucket deletes the bucket in the underlying backend, if and only if it
+// contains no items.
+func (g *GoFakeS3) deleteBucket(bucket string, w http.ResponseWriter, r *http.Request) error {
+ g.log.Print(LogInfo, "DELETE BUCKET:", bucket)
+ if err := g.storage.DeleteBucket(bucket); err != nil {
+ return err
+ }
+ w.WriteHeader(http.StatusNoContent)
+ return nil
+}
+
+// HeadBucket checks whether a bucket exists.
+func (g *GoFakeS3) headBucket(bucket string, w http.ResponseWriter, r *http.Request) error {
+ g.log.Print(LogInfo, "HEAD BUCKET", bucket)
+ g.log.Print(LogInfo, "bucketname:", bucket)
+
+ if err := g.ensureBucketExists(bucket); err != nil {
+ return err
+ }
+
+ w.Write([]byte{})
+ return nil
+}
+
+// GetObject retrievs a bucket object.
+func (g *GoFakeS3) getObject(
+ bucket, object string,
+ versionID VersionID,
+ w http.ResponseWriter,
+ r *http.Request,
+) error {
+
+ g.log.Print(LogInfo, "GET OBJECT")
+ g.log.Print(LogInfo, "Bucket:", bucket)
+ g.log.Print(LogInfo, "└── Object:", object)
+
+ rnge, err := parseRangeHeader(r.Header.Get("Range"))
+ if err != nil {
+ return err
+ }
+
+ var obj *Object
+
+ { // get object from backend
+ if versionID == "" {
+ obj, err = g.storage.GetObject(bucket, object, rnge)
+ if err != nil {
+ return err
+ }
+ } else {
+ if g.versioned == nil {
+ return ErrNotImplemented
+ }
+ obj, err = g.versioned.GetObjectVersion(bucket, object, versionID, rnge)
+ if err != nil {
+ return err
+ }
+ }
+ }
+
+ if obj == nil {
+ g.log.Print(LogErr, "unexpected nil object for key", bucket, object)
+ return ErrInternal
+ }
+ defer obj.Contents.Close()
+
+ if err := g.writeGetOrHeadObjectResponse(obj, w, r); err != nil {
+ return err
+ }
+
+ // Writes Content-Length, and Content-Range if applicable:
+ obj.Range.writeHeader(obj.Size, w)
+
+ if _, err := io.Copy(w, obj.Contents); err != nil {
+ return err
+ }
+
+ return nil
+}
+
+// writeGetOrHeadObjectResponse contains shared logic for constructing headers for
+// a HEAD and a GET request for a /bucket/object URL.
+func (g *GoFakeS3) writeGetOrHeadObjectResponse(obj *Object, w http.ResponseWriter, r *http.Request) error {
+ // "If the current version of the object is a delete marker, Amazon S3
+ // behaves as if the object was deleted and includes x-amz-delete-marker:
+ // true in the response."
+ if obj.IsDeleteMarker {
+ w.Header().Set("x-amz-version-id", string(obj.VersionID))
+ w.Header().Set("x-amz-delete-marker", "true")
+ return KeyNotFound(obj.Name)
+ }
+
+ for mk, mv := range obj.Metadata {
+ w.Header().Set(mk, mv)
+ }
+ w.Header().Set("Last-Modified", formatHeaderTime(g.timeSource.Now()))
+ w.Header().Set("Accept-Ranges", "bytes")
+ w.Header().Set("ETag", `"`+hex.EncodeToString(obj.Hash)+`"`)
+
+ if obj.VersionID != "" {
+ w.Header().Set("x-amz-version-id", string(obj.VersionID))
+ }
+ return nil
+}
+
+// headObject retrieves only meta information of an object and not the whole.
+func (g *GoFakeS3) headObject(
+ bucket, object string,
+ versionID VersionID,
+ w http.ResponseWriter,
+ r *http.Request,
+) error {
+
+ g.log.Print(LogInfo, "HEAD OBJECT")
+ g.log.Print(LogInfo, "Bucket:", bucket)
+ g.log.Print(LogInfo, "└── Object:", object)
+
+ obj, err := g.storage.HeadObject(bucket, object)
+ if err != nil {
+ return err
+ }
+ if obj == nil {
+ g.log.Print(LogErr, "unexpected nil object for key", bucket, object)
+ return ErrInternal
+ }
+ defer obj.Contents.Close()
+
+ if err := g.writeGetOrHeadObjectResponse(obj, w, r); err != nil {
+ return err
+ }
+
+ w.Header().Set("Content-Length", fmt.Sprintf("%d", obj.Size))
+
+ return nil
+}
+
+// createObjectBrowserUpload allows objects to be created from a multipart upload initiated
+// by a browser form.
+func (g *GoFakeS3) createObjectBrowserUpload(bucket string, w http.ResponseWriter, r *http.Request) error {
+ g.log.Print(LogInfo, "CREATE OBJECT THROUGH BROWSER UPLOAD")
+
+ const _24MB = (1 << 20) * 24 // maximum amount of memory before temp files are used
+ if err := r.ParseMultipartForm(_24MB); nil != err {
+ return ErrMalformedPOSTRequest
+ }
+
+ keyValues := r.MultipartForm.Value["key"]
+ if len(keyValues) != 1 {
+ return ErrIncorrectNumberOfFilesInPostRequest
+ }
+ key := keyValues[0]
+
+ g.log.Print(LogInfo, "(BUC)", bucket)
+ g.log.Print(LogInfo, "(KEY)", key)
+
+ fileValues := r.MultipartForm.File["file"]
+ if len(fileValues) != 1 {
+ return ErrIncorrectNumberOfFilesInPostRequest
+ }
+ fileHeader := fileValues[0]
+
+ infile, err := fileHeader.Open()
+ if err != nil {
+ return err
+ }
+ defer infile.Close()
+
+ meta, err := metadataHeaders(r.MultipartForm.Value, g.timeSource.Now(), g.metadataSizeLimit)
+ if err != nil {
+ return err
+ }
+
+ if len(key) > KeySizeLimit {
+ return ResourceError(ErrKeyTooLong, key)
+ }
+
+ // FIXME: how does Content-MD5 get sent when using the browser? does it?
+ rdr, err := newHashingReader(infile, "")
+ if err != nil {
+ return err
+ }
+
+ result, err := g.storage.PutObject(bucket, key, meta, rdr, fileHeader.Size)
+ if err != nil {
+ return err
+ }
+ if result.VersionID != "" {
+ w.Header().Set("x-amz-version-id", string(result.VersionID))
+ }
+
+ w.Header().Set("ETag", `"`+hex.EncodeToString(rdr.Sum(nil))+`"`)
+ return nil
+}
+
+// CreateObject creates a new S3 object.
+func (g *GoFakeS3) createObject(bucket, object string, w http.ResponseWriter, r *http.Request) (err error) {
+ g.log.Print(LogInfo, "CREATE OBJECT:", bucket, object)
+
+ meta, err := metadataHeaders(r.Header, g.timeSource.Now(), g.metadataSizeLimit)
+ if err != nil {
+ return err
+ }
+
+ size, err := strconv.ParseInt(r.Header.Get("Content-Length"), 10, 64)
+ if err != nil || size <= 0 {
+ return ErrMissingContentLength
+ }
+
+ if len(object) > KeySizeLimit {
+ return ResourceError(ErrKeyTooLong, object)
+ }
+
+ var md5Base64 string
+ if g.integrityCheck {
+ md5Base64 = r.Header.Get("Content-MD5")
+
+ if _, ok := r.Header[textproto.CanonicalMIMEHeaderKey("Content-MD5")]; ok && md5Base64 == "" {
+ return ErrInvalidDigest // Satisfies s3tests
+ }
+ }
+
+ // hashingReader is still needed to get the ETag even if integrityCheck
+ // is set to false:
+ rdr, err := newHashingReader(r.Body, md5Base64)
+ defer r.Body.Close()
+ if err != nil {
+ return err
+ }
+
+ result, err := g.storage.PutObject(bucket, object, meta, rdr, size)
+ if err != nil {
+ return err
+ }
+
+ if result.VersionID != "" {
+ g.log.Print(LogInfo, "CREATED VERSION:", bucket, object, result.VersionID)
+ w.Header().Set("x-amz-version-id", string(result.VersionID))
+ }
+ w.Header().Set("ETag", `"`+hex.EncodeToString(rdr.Sum(nil))+`"`)
+
+ return nil
+}
+
+func (g *GoFakeS3) deleteObject(bucket, object string, w http.ResponseWriter, r *http.Request) error {
+ g.log.Print(LogInfo, "DELETE:", bucket, object)
+ result, err := g.storage.DeleteObject(bucket, object)
+ if err != nil {
+ return err
+ }
+
+ if result.IsDeleteMarker {
+ w.Header().Set("x-amz-delete-marker", "true")
+ } else {
+ w.Header().Set("x-amz-delete-marker", "false")
+ }
+
+ if result.VersionID != "" {
+ w.Header().Set("x-amz-version-id", string(result.VersionID))
+ }
+
+ w.WriteHeader(http.StatusNoContent)
+ return nil
+}
+
+func (g *GoFakeS3) deleteObjectVersion(bucket, object string, version VersionID, w http.ResponseWriter, r *http.Request) error {
+ if g.versioned == nil {
+ return ErrNotImplemented
+ }
+
+ g.log.Print(LogInfo, "DELETE VERSION:", bucket, object, version)
+ result, err := g.versioned.DeleteObjectVersion(bucket, object, version)
+ if err != nil {
+ return err
+ }
+ g.log.Print(LogInfo, "DELETED VERSION:", bucket, object, version)
+
+ if result.IsDeleteMarker {
+ w.Header().Set("x-amz-delete-marker", "true")
+ } else {
+ w.Header().Set("x-amz-delete-marker", "false")
+ }
+
+ if result.VersionID != "" {
+ w.Header().Set("x-amz-version-id", string(result.VersionID))
+ }
+
+ w.WriteHeader(http.StatusNoContent)
+ return nil
+}
+
+// deleteMulti deletes multiple S3 objects from the bucket.
+// https://docs.aws.amazon.com/AmazonS3/latest/API/multiobjectdeleteapi.html
+func (g *GoFakeS3) deleteMulti(bucket string, w http.ResponseWriter, r *http.Request) error {
+ g.log.Print(LogInfo, "delete multi", bucket)
+
+ var in DeleteRequest
+
+ defer r.Body.Close()
+ dc := xml.NewDecoder(r.Body)
+ if err := dc.Decode(&in); err != nil {
+ return ErrorMessage(ErrMalformedXML, err.Error())
+ }
+
+ keys := make([]string, len(in.Objects))
+ for i, o := range in.Objects {
+ keys[i] = o.Key
+ }
+
+ out, err := g.storage.DeleteMulti(bucket, keys...)
+ if err != nil {
+ return err
+ }
+
+ if in.Quiet {
+ out.Deleted = nil
+ }
+
+ return g.xmlEncoder(w).Encode(out)
+}
+
+func (g *GoFakeS3) initiateMultipartUpload(bucket, object string, w http.ResponseWriter, r *http.Request) error {
+ g.log.Print(LogInfo, "initiate multipart upload", bucket, object)
+
+ meta, err := metadataHeaders(r.Header, g.timeSource.Now(), g.metadataSizeLimit)
+ if err != nil {
+ return err
+ }
+ if err := g.ensureBucketExists(bucket); err != nil {
+ return err
+ }
+
+ upload := g.uploader.Begin(bucket, object, meta, g.timeSource.Now())
+ out := InitiateMultipartUpload{
+ UploadID: upload.ID,
+ Bucket: bucket,
+ Key: object,
+ }
+ return g.xmlEncoder(w).Encode(out)
+}
+
+// From the docs:
+// A part number uniquely identifies a part and also defines its position
+// within the object being created. If you upload a new part using the same
+// part number that was used with a previous part, the previously uploaded part
+// is overwritten. Each part must be at least 5 MB in size, except the last
+// part. There is no size limit on the last part of your multipart upload.
+//
+func (g *GoFakeS3) putMultipartUploadPart(bucket, object string, uploadID UploadID, w http.ResponseWriter, r *http.Request) error {
+ g.log.Print(LogInfo, "put multipart upload", bucket, object, uploadID)
+
+ partNumber, err := strconv.ParseInt(r.URL.Query().Get("partNumber"), 10, 0)
+ if err != nil || partNumber <= 0 || partNumber > MaxUploadPartNumber {
+ return ErrInvalidPart
+ }
+
+ size, err := strconv.ParseInt(r.Header.Get("Content-Length"), 10, 64)
+ if err != nil || size <= 0 {
+ return ErrMissingContentLength
+ }
+
+ upload, err := g.uploader.Get(bucket, object, uploadID)
+ if err != nil {
+ // FIXME: What happens with S3 when you abort a multipart upload while
+ // part uploads are still in progress? In this case, we will retain the
+ // reference to the part even though another request goroutine may
+ // delete it; it will be available for GC when this function finishes.
+ return err
+ }
+
+ defer r.Body.Close()
+ var rdr io.Reader = r.Body
+
+ if g.integrityCheck {
+ md5Base64 := r.Header.Get("Content-MD5")
+ if _, ok := r.Header[textproto.CanonicalMIMEHeaderKey("Content-MD5")]; ok && md5Base64 == "" {
+ return ErrInvalidDigest // Satisfies s3tests
+ }
+
+ if md5Base64 != "" {
+ var err error
+ rdr, err = newHashingReader(rdr, md5Base64)
+ if err != nil {
+ return err
+ }
+ }
+ }
+
+ body, err := ReadAll(rdr, size)
+ if err != nil {
+ return err
+ }
+
+ if int64(len(body)) != r.ContentLength {
+ return ErrIncompleteBody
+ }
+
+ etag, err := upload.AddPart(int(partNumber), g.timeSource.Now(), body)
+ if err != nil {
+ return err
+ }
+
+ w.Header().Add("ETag", etag)
+ return nil
+}
+
+func (g *GoFakeS3) abortMultipartUpload(bucket, object string, uploadID UploadID, w http.ResponseWriter, r *http.Request) error {
+ g.log.Print(LogInfo, "abort multipart upload", bucket, object, uploadID)
+ if _, err := g.uploader.Complete(bucket, object, uploadID); err != nil {
+ return err
+ }
+ w.WriteHeader(http.StatusNoContent)
+ return nil
+}
+
+func (g *GoFakeS3) completeMultipartUpload(bucket, object string, uploadID UploadID, w http.ResponseWriter, r *http.Request) error {
+ g.log.Print(LogInfo, "complete multipart upload", bucket, object, uploadID)
+
+ var in CompleteMultipartUploadRequest
+ if err := g.xmlDecodeBody(r.Body, &in); err != nil {
+ return err
+ }
+
+ upload, err := g.uploader.Complete(bucket, object, uploadID)
+ if err != nil {
+ return err
+ }
+
+ fileBody, etag, err := upload.Reassemble(&in)
+ if err != nil {
+ return err
+ }
+
+ result, err := g.storage.PutObject(bucket, object, upload.Meta, bytes.NewReader(fileBody), int64(len(fileBody)))
+ if err != nil {
+ return err
+ }
+ if result.VersionID != "" {
+ w.Header().Set("x-amz-version-id", string(result.VersionID))
+ }
+
+ return g.xmlEncoder(w).Encode(&CompleteMultipartUploadResult{
+ ETag: etag,
+ Bucket: bucket,
+ Key: object,
+ })
+}
+
+func (g *GoFakeS3) listMultipartUploads(bucket string, w http.ResponseWriter, r *http.Request) error {
+ query := r.URL.Query()
+ prefix := prefixFromQuery(query)
+ marker := uploadListMarkerFromQuery(query)
+
+ maxUploads, err := parseClampedInt(query.Get("max-uploads"), DefaultMaxUploads, 0, MaxUploadsLimit)
+ if err != nil {
+ return ErrInvalidURI
+ }
+ if maxUploads == 0 {
+ maxUploads = DefaultMaxUploads
+ }
+
+ out, err := g.uploader.List(bucket, marker, prefix, maxUploads)
+ if err != nil {
+ return err
+ }
+
+ return g.xmlEncoder(w).Encode(out)
+}
+
+func (g *GoFakeS3) listMultipartUploadParts(bucket, object string, uploadID UploadID, w http.ResponseWriter, r *http.Request) error {
+ query := r.URL.Query()
+
+ marker, err := parseClampedInt(query.Get("part-number-marker"), 0, 0, math.MaxInt64)
+ if err != nil {
+ return ErrInvalidURI
+ }
+
+ maxParts, err := parseClampedInt(query.Get("max-parts"), DefaultMaxUploadParts, 0, MaxUploadPartsLimit)
+ if err != nil {
+ return ErrInvalidURI
+ }
+
+ out, err := g.uploader.ListParts(bucket, object, uploadID, int(marker), maxParts)
+ if err != nil {
+ return err
+ }
+
+ return g.xmlEncoder(w).Encode(out)
+}
+
+func (g *GoFakeS3) getBucketVersioning(bucket string, w http.ResponseWriter, r *http.Request) error {
+ var config VersioningConfiguration
+
+ if g.versioned != nil {
+ var err error
+ config, err = g.versioned.VersioningConfiguration(bucket)
+ if err != nil {
+ return err
+ }
+ }
+
+ return g.xmlEncoder(w).Encode(config)
+}
+
+func (g *GoFakeS3) putBucketVersioning(bucket string, w http.ResponseWriter, r *http.Request) error {
+ var in VersioningConfiguration
+ if err := g.xmlDecodeBody(r.Body, &in); err != nil {
+ return err
+ }
+
+ if g.versioned == nil {
+ if in.MFADelete == MFADeleteEnabled || in.Status == VersioningEnabled {
+ // We only need to respond that this is not implemented if there's an
+ // attempt to enable it. If we receive a request to disable it, or an
+ // empty request, that matches the current state and has no effect so
+ // we can accept it.
+ return ErrNotImplemented
+ } else {
+ return nil
+ }
+ }
+
+ g.log.Print(LogInfo, "PUT VERSIONING:", in.Status)
+ return g.versioned.SetVersioningConfiguration(bucket, in)
+}
+
+func (g *GoFakeS3) ensureBucketExists(bucket string) error {
+ exists, err := g.storage.BucketExists(bucket)
+ if err != nil {
+ return err
+ }
+ if !exists {
+ return ResourceError(ErrNoSuchBucket, bucket)
+ }
+ return nil
+}
+
+func (g *GoFakeS3) xmlEncoder(w http.ResponseWriter) *xml.Encoder {
+ w.Write([]byte(xml.Header))
+ w.Header().Set("Content-Type", "application/xml")
+
+ xe := xml.NewEncoder(w)
+ xe.Indent("", " ")
+ return xe
+}
+
+func (g *GoFakeS3) xmlDecodeBody(rdr io.ReadCloser, into interface{}) error {
+ body, err := ioutil.ReadAll(rdr)
+ defer rdr.Close()
+ if err != nil {
+ return err
+ }
+
+ if err := xml.Unmarshal(body, into); err != nil {
+ return ErrorMessage(ErrMalformedXML, err.Error())
+ }
+
+ return nil
+}
+
+func formatHeaderTime(t time.Time) string {
+ // https://github.com/aws/aws-sdk-go/issues/1937 - FIXED
+ // https://github.com/aws/aws-sdk-go-v2/issues/178 - Still open
+ // .Format("Mon, 2 Jan 2006 15:04:05 MST")
+
+ tc := t.In(time.UTC)
+ return tc.Format("Mon, 02 Jan 2006 15:04:05") + " GMT"
+}
+
+func metadataSize(meta map[string]string) int {
+ total := 0
+ for k, v := range meta {
+ total += len(k) + len(v)
+ }
+ return total
+}
+
+func metadataHeaders(headers map[string][]string, at time.Time, sizeLimit int) (map[string]string, error) {
+ meta := make(map[string]string)
+ for hk, hv := range headers {
+ if strings.HasPrefix(hk, "X-Amz-") {
+ meta[hk] = hv[0]
+ }
+ }
+ meta["Last-Modified"] = formatHeaderTime(at)
+
+ if sizeLimit > 0 && metadataSize(meta) > sizeLimit {
+ return meta, ErrMetadataTooLarge
+ }
+
+ return meta, nil
+}
+
+func listBucketPageFromQuery(query url.Values) (page ListBucketPage, rerr error) {
+ maxKeys, err := parseClampedInt(query.Get("max-keys"), DefaultMaxBucketKeys, 0, MaxBucketKeys)
+ if err != nil {
+ return page, err
+ }
+
+ page.MaxKeys = maxKeys
+
+ if _, page.HasMarker = query["marker"]; page.HasMarker {
+ // List Objects V1 uses marker only:
+ page.Marker = query.Get("marker")
+
+ } else if _, page.HasMarker = query["continuation-token"]; page.HasMarker {
+ // List Objects V2 uses continuation-token preferentially, or
+ // start-after if continuation-token is missing. continuation-token is
+ // an opaque value that looks like this: 1ueGcxLPRx1Tr/XYExHnhbYLgveDs2J/wm36Hy4vbOwM=.
+ // This just looks like base64 junk so we just cheat and base64 encode
+ // the next marker and hide it in a continuation-token.
+ tok, err := base64.URLEncoding.DecodeString(query.Get("continuation-token"))
+ if err != nil {
+ // FIXME: log
+ return page, ErrInvalidToken // FIXME: confirm for sure what AWS does here
+ }
+ page.Marker = string(tok)
+
+ } else if _, page.HasMarker = query["start-after"]; page.HasMarker {
+ // List Objects V2 uses start-after if continuation-token is missing:
+ page.Marker = query.Get("start-after")
+ }
+
+ return page, nil
+}
+
+func listBucketVersionsPageFromQuery(query url.Values) (page ListBucketVersionsPage, rerr error) {
+ maxKeys, err := parseClampedInt(query.Get("max-keys"), DefaultMaxBucketVersionKeys, 0, MaxBucketVersionKeys)
+ if err != nil {
+ return page, err
+ }
+
+ page.MaxKeys = maxKeys
+ page.KeyMarker = query.Get("key-marker")
+ page.VersionIDMarker = VersionID(query.Get("version-id-marker"))
+ _, page.HasKeyMarker = query["key-marker"]
+ _, page.HasVersionIDMarker = query["version-id-marker"]
+
+ return page, nil
+}
diff --git a/vendor/github.com/johannesboyne/gofakes3/hash.go b/vendor/github.com/johannesboyne/gofakes3/hash.go
new file mode 100644
index 00000000..e0425ae1
--- /dev/null
+++ b/vendor/github.com/johannesboyne/gofakes3/hash.go
@@ -0,0 +1,78 @@
+package gofakes3
+
+import (
+ "bytes"
+ "crypto/md5"
+ "encoding/base64"
+ "fmt"
+ "hash"
+ "io"
+)
+
+// hashingReader proxies an existing io.Reader, passing each read block to the
+// given hash.Hash.
+//
+// If the expected hash is not empty, once the underlying reader returns EOF,
+// the hash is checked.
+type hashingReader struct {
+ inner io.Reader
+ expected []byte
+ hash hash.Hash
+ sum []byte
+}
+
+func newHashingReader(inner io.Reader, expectedMD5Base64 string) (*hashingReader, error) {
+ var md5Bytes []byte
+ var err error
+
+ if expectedMD5Base64 != "" {
+ md5Bytes, err = base64.StdEncoding.DecodeString(expectedMD5Base64)
+ if err != nil {
+ return nil, ErrInvalidDigest
+ }
+ if len(md5Bytes) != 16 {
+ return nil, ErrInvalidDigest
+ }
+ }
+
+ return &hashingReader{
+ inner: inner,
+ expected: md5Bytes,
+ hash: md5.New(),
+ }, nil
+}
+
+// Sum returns the hash of the data read from the inner reader so far.
+// If into is passed, it may be used if the hash needs to be computed.
+func (h *hashingReader) Sum(into []byte) []byte {
+ if h.sum != nil {
+ return h.sum
+ }
+ return h.hash.Sum(into)
+}
+
+func (h *hashingReader) Read(p []byte) (n int, err error) {
+ n, err = h.inner.Read(p)
+
+ if n != 0 {
+ wn, _ := h.hash.Write(p[:n]) // Hash.Write never returns an error.
+ if wn != n {
+ return n, fmt.Errorf("short write to hasher")
+ }
+ }
+
+ if err != nil {
+ if err == io.EOF {
+ h.sum = h.hash.Sum(nil)
+
+ if h.expected != nil && !bytes.Equal(h.sum, h.expected) {
+ // FIXME: some more context here would be useful; need to flush out
+ // what S3 responds with in this case.
+ return n, ErrBadDigest
+ }
+ }
+ return n, err
+ }
+
+ return n, nil
+}
diff --git a/vendor/github.com/johannesboyne/gofakes3/internal/goskipiter/iter.go b/vendor/github.com/johannesboyne/gofakes3/internal/goskipiter/iter.go
new file mode 100644
index 00000000..7e56c993
--- /dev/null
+++ b/vendor/github.com/johannesboyne/gofakes3/internal/goskipiter/iter.go
@@ -0,0 +1,66 @@
+package goskipiter
+
+import "github.com/ryszard/goskiplist/skiplist"
+
+// Iterator wraps goskiplist's iterator, which is a bit janky; seeking doesn't
+// play nice with the iteration idiom. If you seek, then iterate using the
+// examples provided in the godoc, your iteration will always skip the first
+// result. It would be less error prone and astonishing if Seek meant that the
+// next call to Next() would give you what you expect.
+type Iterator struct {
+ inner skiplist.Iterator
+ didSeek bool
+ seekWasOK bool
+}
+
+func New(inner skiplist.Iterator) *Iterator {
+ return &Iterator{inner: inner}
+}
+
+// Next returns true if the iterator contains subsequent elements
+// and advances its state to the next element if that is possible.
+func (iter *Iterator) Next() (ok bool) {
+ if iter.didSeek {
+ iter.didSeek = false
+ return iter.seekWasOK
+ } else {
+ return iter.inner.Next()
+ }
+}
+
+// Previous returns true if the iterator contains previous elements
+// and rewinds its state to the previous element if that is possible.
+func (iter *Iterator) Previous() (ok bool) {
+ if iter.didSeek {
+ panic("not implemented")
+ }
+ return iter.inner.Previous()
+}
+
+// Key returns the current key.
+func (iter *Iterator) Key() interface{} {
+ return iter.inner.Key()
+}
+
+// Value returns the current value.
+func (iter *Iterator) Value() interface{} {
+ return iter.inner.Value()
+}
+
+// Seek reduces iterative seek costs for searching forward into the Skip List
+// by remarking the range of keys over which it has scanned before. If the
+// requested key occurs prior to the point, the Skip List will start searching
+// as a safeguard. It returns true if the key is within the known range of
+// the list.
+func (iter *Iterator) Seek(key interface{}) (ok bool) {
+ iter.didSeek = true
+ ok = iter.inner.Seek(key)
+ iter.seekWasOK = ok
+ return ok
+}
+
+// Close this iterator to reap resources associated with it. While not
+// strictly required, it will provide extra hints for the garbage collector.
+func (iter *Iterator) Close() {
+ iter.inner.Close()
+}
diff --git a/vendor/github.com/johannesboyne/gofakes3/internal/s3io/io.go b/vendor/github.com/johannesboyne/gofakes3/internal/s3io/io.go
new file mode 100644
index 00000000..9bba9aaf
--- /dev/null
+++ b/vendor/github.com/johannesboyne/gofakes3/internal/s3io/io.go
@@ -0,0 +1,13 @@
+package s3io
+
+import "io"
+
+type ReaderWithDummyCloser struct{ io.Reader }
+
+func (d ReaderWithDummyCloser) Close() error { return nil }
+
+type NoOpReadCloser struct{}
+
+func (d NoOpReadCloser) Read(b []byte) (n int, err error) { return 0, io.EOF }
+
+func (d NoOpReadCloser) Close() error { return nil }
diff --git a/vendor/github.com/johannesboyne/gofakes3/log.go b/vendor/github.com/johannesboyne/gofakes3/log.go
new file mode 100644
index 00000000..178726f8
--- /dev/null
+++ b/vendor/github.com/johannesboyne/gofakes3/log.go
@@ -0,0 +1,111 @@
+package gofakes3
+
+import "log"
+
+type LogLevel string
+
+const (
+ LogErr LogLevel = "ERR"
+ LogWarn LogLevel = "WARN"
+ LogInfo LogLevel = "INFO"
+)
+
+// Logger provides a very minimal target for logging implementations to hit to
+// allow arbitrary logging dependencies to be used with GoFakeS3.
+//
+// Only an interface to the standard library's log package is provided with
+// GoFakeS3, other libraries will require an adapter. Adapters are trivial to
+// write.
+//
+// For zap:
+//
+// type LogrusLog struct {
+// log *zap.Logger
+// }
+//
+// func (l LogrusLog) Print(level LogLevel, v ...interface{}) {
+// switch level {
+// case gofakes3.LogErr:
+// l.log.Error(fmt.Sprint(v...))
+// case gofakes3.LogWarn:
+// l.log.Warn(fmt.Sprint(v...))
+// case gofakes3.LogInfo:
+// l.log.Info(fmt.Sprint(v...))
+// default:
+// panic("unknown level")
+// }
+// }
+//
+//
+// For logrus:
+//
+// type LogrusLog struct {
+// log *logrus.Logger
+// }
+//
+// func (l LogrusLog) Print(level LogLevel, v ...interface{}) {
+// switch level {
+// case gofakes3.LogErr:
+// l.log.Errorln(v...)
+// case gofakes3.LogWarn:
+// l.log.Warnln(v...)
+// case gofakes3.LogInfo:
+// l.log.Infoln(v...)
+// default:
+// panic("unknown level")
+// }
+// }
+//
+type Logger interface {
+ Print(level LogLevel, v ...interface{})
+}
+
+// GlobalLog creates a Logger that uses the global log.Println() function.
+//
+// All levels are reported by default. If you pass levels to this function,
+// it will act as a level whitelist.
+func GlobalLog(levels ...LogLevel) Logger {
+ return newStdLog(log.Println, levels...)
+}
+
+// StdLog creates a Logger that uses the stdlib's log.Logger type.
+//
+// All levels are reported by default. If you pass levels to this function,
+// it will act as a level whitelist.
+func StdLog(log *log.Logger, levels ...LogLevel) Logger {
+ return newStdLog(log.Println, levels...)
+}
+
+// DiscardLog creates a Logger that discards all messages.
+func DiscardLog() Logger {
+ return &discardLog{}
+}
+
+type stdLog struct {
+ log func(v ...interface{})
+ levels map[LogLevel]bool
+}
+
+func newStdLog(log func(v ...interface{}), levels ...LogLevel) Logger {
+ sl := &stdLog{log: log}
+ if len(levels) > 0 {
+ sl.levels = map[LogLevel]bool{}
+ for _, lv := range levels {
+ sl.levels[lv] = true
+ }
+ }
+ return sl
+}
+
+func (s *stdLog) Print(level LogLevel, v ...interface{}) {
+ if s.levels == nil || s.levels[level] {
+ v = append(v, nil)
+ copy(v[1:], v)
+ v[0] = level
+ s.log(v...)
+ }
+}
+
+type discardLog struct{}
+
+func (d discardLog) Print(level LogLevel, v ...interface{}) {}
diff --git a/vendor/github.com/johannesboyne/gofakes3/logo.sketch b/vendor/github.com/johannesboyne/gofakes3/logo.sketch
new file mode 100644
index 00000000..45947fa7
--- /dev/null
+++ b/vendor/github.com/johannesboyne/gofakes3/logo.sketch
Binary files differ
diff --git a/vendor/github.com/johannesboyne/gofakes3/makefile.go b/vendor/github.com/johannesboyne/gofakes3/makefile.go
new file mode 100644
index 00000000..decc8fc2
--- /dev/null
+++ b/vendor/github.com/johannesboyne/gofakes3/makefile.go
@@ -0,0 +1,202 @@
+//+build tools
+
+// Run this script like so:
+//
+// go run makefile.go <cmd> <args>...
+//
+
+package main
+
+import (
+ "bufio"
+ "bytes"
+ "fmt"
+ "io"
+ "io/ioutil"
+ "log"
+ "os"
+ "os/exec"
+ "strings"
+
+ "github.com/shabbyrobe/gocovmerge"
+ "golang.org/x/tools/cover"
+)
+
+func main() {
+ if err := run(); err != nil {
+ log.Fatal(err)
+ }
+}
+
+func run() error {
+ var commandNames = []string{
+ "cover",
+ }
+ if len(os.Args) < 2 {
+ return fmt.Errorf("command missing: expected %s", commandNames)
+ }
+
+ command, args := os.Args[1], os.Args[2:]
+ switch command {
+ case "cover":
+ return runCover(args)
+ case "builddocker":
+ return runBuildDocker()
+ case "buildrelease":
+ return runBuildRelease()
+ case "release":
+ return runRelease(args)
+ default:
+ return fmt.Errorf("unknown command %v: expected %s", command, commandNames)
+ }
+
+ return nil
+}
+
+func runBuildDocker() error {
+ cmd := command(
+ "go", "build", "-a",
+ "-installsuffix", "cgo",
+ "-o", "./build/main",
+ "./cmd/gofakes3",
+ )
+ cmd.Env = append(cmd.Env, "GO111MODULE=on", "CGO_ENABLED=0", "GOOS=linux")
+ if err := cmd.Run(); err != nil {
+ return err
+ }
+
+ cmd = command("docker", "build", "-t", "johannesboyne/gofakes3", ".")
+ if err := cmd.Run(); err != nil {
+ return err
+ }
+
+ return nil
+}
+
+func runRelease(args []string) error {
+ if len(args) != 1 {
+ return fmt.Errorf("missing <version> argument")
+ }
+
+ version := args[0]
+ fmt.Printf("[+] releasing %s\n", version)
+ fmt.Println("[+] re-generating")
+
+ fmt.Println("[+] building")
+ if err := runBuildRelease(); err != nil {
+ return err
+ }
+
+ fmt.Println("[+] comitting")
+ if err := command("git", "tag", version).Run(); err != nil {
+ return err
+ }
+
+ fmt.Println("[+] complete")
+ return nil
+}
+
+func runBuildRelease() error {
+ var builds = map[string][]string{
+ "linux_amd64": {"GOOS=linux", "GOARCH=amd64"},
+ "linux_arm7": {"GOOS=linux", "GOARCH=arm", "GOARM=7"},
+ "darwin_amd64": {"GOOS=darwin", "GOARCH=amd64"},
+ "windows_amd64.exe": {"GOOS=windows", "GOARCH=amd64"},
+ }
+
+ for suffix, build := range builds {
+ cmd := command(
+ "go", "build",
+ "-o", fmt.Sprintf("./build/gofakes3_%s", suffix),
+ "./cmd/gofakes3",
+ )
+ cmd.Env = append(cmd.Env, "GO111MODULE=on")
+ cmd.Env = append(cmd.Env, build...)
+ if err := cmd.Run(); err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+// runCover collects true code coverage for all packages in gofakes3.
+// It does so by running 'go test' for each child package (enumerated by
+// 'go list ./...') with the '-coverpkg' flag, populated with the same
+// 'go list'.
+func runCover(args []string) error {
+ pkgs := goList()
+
+ var files []string
+
+ for _, pkg := range pkgs {
+ covFile, err := ioutil.TempFile("", "")
+ if err != nil {
+ return err
+ }
+ covFile.Close()
+ defer os.Remove(covFile.Name())
+
+ files = append(files, covFile.Name())
+ cmd := exec.Command("go", "test",
+ "-covermode=atomic",
+ fmt.Sprintf("-coverprofile=%s", covFile.Name()),
+ fmt.Sprintf("-coverpkg=%s", strings.Join(pkgs, ",")),
+ pkg,
+ )
+ if err := cmd.Run(); err != nil {
+ return err
+ }
+ }
+
+ var merged []*cover.Profile
+ for _, file := range files {
+ profiles, err := cover.ParseProfiles(file)
+ if err != nil {
+ return fmt.Errorf("failed to parse profiles: %v", err)
+ }
+ for _, p := range profiles {
+ merged = gocovmerge.AddProfile(merged, p)
+ }
+ }
+
+ var out io.WriteCloser = os.Stdout
+ if len(args) > 0 {
+ var err error
+ out, err = os.Create(args[0])
+ if err != nil {
+ return err
+ }
+ }
+ defer out.Close()
+
+ return gocovmerge.DumpProfiles(merged, out)
+}
+
+func command(name string, args ...string) *exec.Cmd {
+ cmd := exec.Command(name, args...)
+ cmd.Stdout = os.Stdout
+ cmd.Stderr = os.Stderr
+ cmd.Env = append([]string{}, os.Environ()...)
+ return cmd
+}
+
+func goList() (pkgs []string) {
+ cmd := exec.Command("go", "list", "./...")
+
+ var stdout bytes.Buffer
+ cmd.Stdout = &stdout
+ if err := cmd.Run(); err != nil {
+ panic(err)
+ }
+
+ scanner := bufio.NewScanner(&stdout)
+ for scanner.Scan() {
+ line := strings.TrimSpace(scanner.Text())
+ if len(line) == 0 {
+ continue
+ }
+ pkgs = append(pkgs, line)
+ }
+ return pkgs
+}
diff --git a/vendor/github.com/johannesboyne/gofakes3/messages.go b/vendor/github.com/johannesboyne/gofakes3/messages.go
new file mode 100644
index 00000000..30df6f9c
--- /dev/null
+++ b/vendor/github.com/johannesboyne/gofakes3/messages.go
@@ -0,0 +1,527 @@
+package gofakes3
+
+import (
+ "encoding/xml"
+ "fmt"
+ "sort"
+ "strings"
+ "time"
+)
+
+type Storage struct {
+ XMLName xml.Name `xml:"ListAllMyBucketsResult"`
+ Xmlns string `xml:"xmlns,attr"`
+ Owner *UserInfo `xml:"Owner,omitempty"`
+ Buckets Buckets `xml:"Buckets>Bucket"`
+}
+
+type UserInfo struct {
+ ID string `xml:"ID"`
+ DisplayName string `xml:"DisplayName"`
+}
+
+type Buckets []BucketInfo
+
+// Names is a deterministic convenience function returning a sorted list of bucket names.
+func (b Buckets) Names() []string {
+ out := make([]string, len(b))
+ for i, v := range b {
+ out[i] = v.Name
+ }
+ sort.Strings(out)
+ return out
+}
+
+// BucketInfo represents a single bucket returned by the ListBuckets response.
+type BucketInfo struct {
+ Name string `xml:"Name"`
+
+ // CreationDate is required; without it, boto returns the error "('String
+ // does not contain a date:', '')"
+ CreationDate ContentTime `xml:"CreationDate"`
+}
+
+// CommonPrefix is used in Bucket.CommonPrefixes to list partial delimited keys
+// that represent pseudo-directories.
+type CommonPrefix struct {
+ Prefix string `xml:"Prefix"`
+}
+
+type CompletedPart struct {
+ PartNumber int `xml:"PartNumber"`
+ ETag string `xml:"ETag"`
+}
+
+type CompleteMultipartUploadRequest struct {
+ Parts []CompletedPart `xml:"Part"`
+}
+
+func (c CompleteMultipartUploadRequest) partsAreSorted() bool {
+ return sort.IntsAreSorted(c.partIDs())
+}
+
+func (c CompleteMultipartUploadRequest) partIDs() []int {
+ inParts := make([]int, 0, len(c.Parts))
+ for _, inputPart := range c.Parts {
+ inParts = append(inParts, inputPart.PartNumber)
+ }
+ sort.Ints(inParts)
+ return inParts
+}
+
+type CompleteMultipartUploadResult struct {
+ Location string `xml:"Location"`
+ Bucket string `xml:"Bucket"`
+ Key string `xml:"Key"`
+ ETag string `xml:"ETag"`
+}
+
+type Content struct {
+ Key string `xml:"Key"`
+ LastModified ContentTime `xml:"LastModified"`
+ ETag string `xml:"ETag"`
+ Size int64 `xml:"Size"`
+ StorageClass StorageClass `xml:"StorageClass,omitempty"`
+ Owner *UserInfo `xml:"Owner,omitempty"`
+}
+
+type ContentTime struct {
+ time.Time
+}
+
+func NewContentTime(t time.Time) ContentTime {
+ return ContentTime{t}
+}
+
+func (c ContentTime) MarshalXML(e *xml.Encoder, start xml.StartElement) error {
+ // This is the format expected by the aws xml code, not the default.
+ if !c.IsZero() {
+ var s = c.Format("2006-01-02T15:04:05.999Z")
+ return e.EncodeElement(s, start)
+ }
+ return nil
+}
+
+type DeleteRequest struct {
+ Objects []ObjectID `xml:"Object"`
+
+ // Element to enable quiet mode for the request. When you add this element,
+ // you must set its value to true.
+ //
+ // By default, the operation uses verbose mode in which the response
+ // includes the result of deletion of each key in your request. In quiet
+ // mode the response includes only keys where the delete operation
+ // encountered an error. For a successful deletion, the operation does not
+ // return any information about the delete in the response body.
+ Quiet bool `xml:"Quiet"`
+}
+
+// MultiDeleteResult contains the response from a multi delete operation.
+type MultiDeleteResult struct {
+ XMLName xml.Name `xml:"DeleteResult"`
+ Deleted []ObjectID `xml:"Deleted"`
+ Error []ErrorResult `xml:",omitempty"`
+}
+
+func (d MultiDeleteResult) AsError() error {
+ if len(d.Error) == 0 {
+ return nil
+ }
+ var strs = make([]string, 0, len(d.Error))
+ for _, er := range d.Error {
+ strs = append(strs, er.String())
+ }
+ return fmt.Errorf("gofakes3: multi delete failed:\n%s", strings.Join(strs, "\n"))
+}
+
+type ErrorResult struct {
+ XMLName xml.Name `xml:"Error"`
+ Key string `xml:"Key,omitempty"`
+ Code ErrorCode `xml:"Code,omitempty"`
+ Message string `xml:"Message,omitempty"`
+ Resource string `xml:"Resource,omitempty"`
+ RequestID string `xml:"RequestId,omitempty"`
+}
+
+func ErrorResultFromError(err error) ErrorResult {
+ switch err := err.(type) {
+ case *resourceErrorResponse:
+ return ErrorResult{
+ Resource: err.Resource,
+ RequestID: err.RequestID,
+ Message: err.Message,
+ Code: err.Code,
+ }
+ case *ErrorResponse:
+ return ErrorResult{
+ RequestID: err.RequestID,
+ Message: err.Message,
+ Code: err.Code,
+ }
+ case Error:
+ return ErrorResult{Code: err.ErrorCode()}
+ default:
+ return ErrorResult{Code: ErrInternal}
+ }
+}
+
+func (er ErrorResult) String() string {
+ return fmt.Sprintf("%s: [%s] %s", er.Key, er.Code, er.Message)
+}
+
+type InitiateMultipartUpload struct {
+ Bucket string `xml:"Bucket"`
+ Key string `xml:"Key"`
+ UploadID UploadID `xml:"UploadId"`
+}
+
+type ListBucketResultBase struct {
+ XMLName xml.Name `xml:"ListBucketResult"`
+ Xmlns string `xml:"xmlns,attr"`
+
+ // Name of the bucket.
+ Name string `xml:"Name"`
+
+ // Specifies whether (true) or not (false) all of the results were
+ // returned. If the number of results exceeds that specified by MaxKeys,
+ // all of the results might not be returned.
+ IsTruncated bool `xml:"IsTruncated,omitempty"`
+
+ // Causes keys that contain the same string between the prefix and the
+ // first occurrence of the delimiter to be rolled up into a single result
+ // element in the CommonPrefixes collection. These rolled-up keys are not
+ // returned elsewhere in the response.
+ //
+ // NOTE: Each rolled-up result in CommonPrefixes counts as only one return
+ // against the MaxKeys value. (BW: been waiting to find some confirmation of
+ // that for a while!)
+ Delimiter string `xml:"Delimiter,omitempty"`
+
+ Prefix string `xml:"Prefix"`
+
+ MaxKeys int64 `xml:"MaxKeys,omitempty"`
+
+ CommonPrefixes []CommonPrefix `xml:"CommonPrefixes,omitempty"`
+ Contents []*Content `xml:"Contents"`
+}
+
+type ListBucketResult struct {
+ ListBucketResultBase
+
+ // Indicates where in the bucket listing begins. Marker is included in the
+ // response if it was sent with the request.
+ Marker string `xml:"Marker"`
+
+ // When the response is truncated (that is, the IsTruncated element value
+ // in the response is true), you can use the key name in this field as a
+ // marker in the subsequent request to get next set of objects. Amazon S3
+ // lists objects in UTF-8 character encoding in lexicographical order.
+ //
+ // NOTE: This element is returned only if you specify a delimiter request
+ // parameter. If the response does not include the NextMarker and it is
+ // truncated, you can use the value of the last Key in the response as the
+ // marker in the subsequent request to get the next set of object keys.
+ NextMarker string `xml:"NextMarker,omitempty"`
+}
+
+type ListBucketResultV2 struct {
+ ListBucketResultBase
+
+ // If ContinuationToken was sent with the request, it is included in the
+ // response.
+ ContinuationToken string `xml:"ContinuationToken,omitempty"`
+
+ // Returns the number of keys included in the response. The value is always
+ // less than or equal to the MaxKeys value.
+ KeyCount int64 `xml:"KeyCount,omitempty"`
+
+ // If the response is truncated, Amazon S3 returns this parameter with a
+ // continuation token. You can specify the token as the continuation-token
+ // in your next request to retrieve the next set of keys.
+ NextContinuationToken string `xml:"NextContinuationToken,omitempty"`
+
+ // If StartAfter was sent with the request, it is included in the response.
+ StartAfter string `xml:"StartAfter,omitempty"`
+}
+
+type DeleteMarker struct {
+ XMLName xml.Name `xml:"DeleteMarker"`
+ Key string `xml:"Key"`
+ VersionID VersionID `xml:"VersionId"`
+ IsLatest bool `xml:"IsLatest"`
+ LastModified ContentTime `xml:"LastModified,omitempty"`
+ Owner *UserInfo `xml:"Owner,omitempty"`
+}
+
+var _ VersionItem = &DeleteMarker{}
+
+func (d DeleteMarker) GetVersionID() VersionID { return d.VersionID }
+func (d *DeleteMarker) setVersionID(i VersionID) { d.VersionID = i }
+
+type Version struct {
+ XMLName xml.Name `xml:"Version"`
+ Key string `xml:"Key"`
+ VersionID VersionID `xml:"VersionId"`
+ IsLatest bool `xml:"IsLatest"`
+ LastModified ContentTime `xml:"LastModified,omitempty"`
+ Size int64 `xml:"Size"`
+
+ // According to the S3 docs, this is always STANDARD for a Version:
+ StorageClass StorageClass `xml:"StorageClass"`
+
+ ETag string `xml:"ETag"`
+ Owner *UserInfo `xml:"Owner,omitempty"`
+}
+
+var _ VersionItem = &Version{}
+
+func (v Version) GetVersionID() VersionID { return v.VersionID }
+func (v *Version) setVersionID(i VersionID) { v.VersionID = i }
+
+type VersionItem interface {
+ GetVersionID() VersionID
+ setVersionID(v VersionID)
+}
+
+type ListBucketVersionsResult struct {
+ XMLName xml.Name `xml:"ListBucketVersionsResult"`
+ Xmlns string `xml:"xmlns,attr"`
+ Name string `xml:"Name"`
+ Delimiter string `xml:"Delimiter,omitempty"`
+ Prefix string `xml:"Prefix,omitempty"`
+ CommonPrefixes []CommonPrefix `xml:"CommonPrefixes,omitempty"`
+ IsTruncated bool `xml:"IsTruncated"`
+ MaxKeys int64 `xml:"MaxKeys"`
+
+ // Marks the last Key returned in a truncated response.
+ KeyMarker string `xml:"KeyMarker,omitempty"`
+
+ // When the number of responses exceeds the value of MaxKeys, NextKeyMarker
+ // specifies the first key not returned that satisfies the search criteria.
+ // Use this value for the key-marker request parameter in a subsequent
+ // request.
+ NextKeyMarker string `xml:"NextKeyMarker,omitempty"`
+
+ // Marks the last version of the Key returned in a truncated response.
+ VersionIDMarker VersionID `xml:"VersionIdMarker,omitempty"`
+
+ // When the number of responses exceeds the value of MaxKeys,
+ // NextVersionIdMarker specifies the first object version not returned that
+ // satisfies the search criteria. Use this value for the version-id-marker
+ // request parameter in a subsequent request.
+ NextVersionIDMarker VersionID `xml:"NextVersionIdMarker,omitempty"`
+
+ // AWS responds with a list of either <Version> or <DeleteMarker> objects. The order
+ // needs to be preserved and they need to be direct of ListBucketVersionsResult:
+ // <ListBucketVersionsResult>
+ // <DeleteMarker ... />
+ // <Version ... />
+ // <DeleteMarker ... />
+ // <Version ... />
+ // </ListBucketVersionsResult>
+ Versions []VersionItem
+
+ // prefixes maintains an index of prefixes that have already been seen.
+ // This is a convenience for backend implementers like s3bolt and s3mem,
+ // which operate on a full, flat list of keys.
+ prefixes map[string]bool
+}
+
+func NewListBucketVersionsResult(
+ bucketName string,
+ prefix *Prefix,
+ page *ListBucketVersionsPage,
+) *ListBucketVersionsResult {
+
+ result := &ListBucketVersionsResult{
+ Xmlns: "http://s3.amazonaws.com/doc/2006-03-01/",
+ Name: bucketName,
+ }
+ if prefix != nil {
+ result.Prefix = prefix.Prefix
+ result.Delimiter = prefix.Delimiter
+ }
+ if page != nil {
+ result.MaxKeys = page.MaxKeys
+ result.KeyMarker = page.KeyMarker
+ result.VersionIDMarker = page.VersionIDMarker
+ }
+ return result
+}
+
+func (b *ListBucketVersionsResult) AddPrefix(prefix string) {
+ if b.prefixes == nil {
+ b.prefixes = map[string]bool{}
+ } else if b.prefixes[prefix] {
+ return
+ }
+ b.prefixes[prefix] = true
+ b.CommonPrefixes = append(b.CommonPrefixes, CommonPrefix{Prefix: prefix})
+}
+
+type ListMultipartUploadsResult struct {
+ Bucket string `xml:"Bucket"`
+
+ // Together with upload-id-marker, this parameter specifies the multipart upload
+ // after which listing should begin.
+ KeyMarker string `xml:"KeyMarker,omitempty"`
+
+ // Together with key-marker, specifies the multipart upload after which listing
+ // should begin. If key-marker is not specified, the upload-id-marker parameter
+ // is ignored.
+ UploadIDMarker UploadID `xml:"UploadIdMarker,omitempty"`
+
+ NextKeyMarker string `xml:"NextKeyMarker,omitempty"`
+ NextUploadIDMarker UploadID `xml:"NextUploadIdMarker,omitempty"`
+
+ // Sets the maximum number of multipart uploads, from 1 to 1,000, to return
+ // in the response body. 1,000 is the maximum number of uploads that can be
+ // returned in a response.
+ MaxUploads int64 `xml:"MaxUploads,omitempty"`
+
+ Delimiter string `xml:"Delimiter,omitempty"`
+
+ // Lists in-progress uploads only for those keys that begin with the specified
+ // prefix.
+ Prefix string `xml:"Prefix,omitempty"`
+
+ CommonPrefixes []CommonPrefix `xml:"CommonPrefixes,omitempty"`
+ IsTruncated bool `xml:"IsTruncated,omitempty"`
+
+ Uploads []ListMultipartUploadItem `xml:"Upload"`
+}
+
+type ListMultipartUploadItem struct {
+ Key string `xml:"Key"`
+ UploadID UploadID `xml:"UploadId"`
+ Initiator *UserInfo `xml:"Initiator,omitempty"`
+ Owner *UserInfo `xml:"Owner,omitempty"`
+ StorageClass StorageClass `xml:"StorageClass,omitempty"`
+ Initiated ContentTime `xml:"Initiated,omitempty"`
+}
+
+type ListMultipartUploadPartsResult struct {
+ XMLName xml.Name `xml:"ListPartsResult"`
+
+ Bucket string `xml:"Bucket"`
+ Key string `xml:"Key"`
+ UploadID UploadID `xml:"UploadId"`
+ StorageClass StorageClass `xml:"StorageClass,omitempty"`
+ Initiator *UserInfo `xml:"Initiator,omitempty"`
+ Owner *UserInfo `xml:"Owner,omitempty"`
+ PartNumberMarker int `xml:"PartNumberMarker"`
+ NextPartNumberMarker int `xml:"NextPartNumberMarker"`
+ MaxParts int64 `xml:"MaxParts"`
+ IsTruncated bool `xml:"IsTruncated,omitempty"`
+
+ Parts []ListMultipartUploadPartItem `xml:"Part"`
+}
+
+type ListMultipartUploadPartItem struct {
+ PartNumber int `xml:"PartNumber"`
+ LastModified ContentTime `xml:"LastModified,omitempty"`
+ ETag string `xml:"ETag,omitempty"`
+ Size int64 `xml:"Size"`
+}
+
+// MFADeleteStatus is used by VersioningConfiguration.
+type MFADeleteStatus string
+
+func (v MFADeleteStatus) Enabled() bool { return v == MFADeleteEnabled }
+
+func (v *MFADeleteStatus) UnmarshalXML(d *xml.Decoder, start xml.StartElement) error {
+ var s string
+ if err := d.DecodeElement(&s, &start); err != nil {
+ // FIXME: this doesn't seem to detect or report errors if the element is the wrong type.
+ return err
+ }
+ s = strings.ToLower(strings.TrimSpace(s))
+ if s == "enabled" {
+ *v = MFADeleteEnabled
+ } else if s == "disabled" {
+ *v = MFADeleteDisabled
+ } else {
+ return ErrorMessagef(ErrIllegalVersioningConfiguration, "unexpected value %q for MFADeleteStatus, expected 'Enabled' or 'Disabled'", s)
+ }
+ return nil
+}
+
+const (
+ MFADeleteNone MFADeleteStatus = ""
+ MFADeleteEnabled MFADeleteStatus = "Enabled"
+ MFADeleteDisabled MFADeleteStatus = "Disabled"
+)
+
+type ObjectID struct {
+ Key string `xml:"Key"`
+
+ // Versions not supported in GoFakeS3 yet.
+ VersionID string `xml:"VersionId,omitempty" json:"VersionId,omitempty"`
+}
+
+type StorageClass string
+
+func (s StorageClass) MarshalXML(e *xml.Encoder, start xml.StartElement) error {
+ if s == "" {
+ s = StorageStandard
+ }
+ return e.EncodeElement(string(s), start)
+}
+
+const (
+ StorageStandard StorageClass = "STANDARD"
+)
+
+// UploadID uses a string as the underlying type, but the string should only
+// represent a decimal integer. See uploader.uploadID for details.
+type UploadID string
+
+type VersionID string
+
+type VersioningConfiguration struct {
+ XMLName xml.Name `xml:"VersioningConfiguration"`
+
+ Status VersioningStatus `xml:"Status"`
+
+ // When enabled, the bucket owner must include the x-amz-mfa request header
+ // in requests to change the versioning state of a bucket and to
+ // permanently delete a versioned object.
+ MFADelete MFADeleteStatus `xml:"MfaDelete"`
+}
+
+func (v *VersioningConfiguration) Enabled() bool {
+ return v.Status == VersioningEnabled
+}
+
+func (v *VersioningConfiguration) SetEnabled(enabled bool) {
+ if enabled {
+ v.Status = VersioningEnabled
+ } else {
+ v.Status = VersioningSuspended
+ }
+}
+
+type VersioningStatus string
+
+func (v *VersioningStatus) UnmarshalXML(d *xml.Decoder, start xml.StartElement) error {
+ var s string
+ if err := d.DecodeElement(&s, &start); err != nil {
+ // FIXME: this doesn't seem to detect or report errors if the element is the wrong type.
+ return err
+ }
+ s = strings.ToLower(strings.TrimSpace(s))
+ if s == "enabled" {
+ *v = VersioningEnabled
+ } else if s == "suspended" {
+ *v = VersioningSuspended
+ } else {
+ return ErrorMessagef(ErrIllegalVersioningConfiguration, "unexpected value %q for Status, expected 'Enabled' or 'Suspended'", s)
+ }
+ return nil
+}
+
+const (
+ VersioningNone VersioningStatus = ""
+ VersioningEnabled VersioningStatus = "Enabled"
+ VersioningSuspended VersioningStatus = "Suspended"
+)
diff --git a/vendor/github.com/johannesboyne/gofakes3/option.go b/vendor/github.com/johannesboyne/gofakes3/option.go
new file mode 100644
index 00000000..3056abbd
--- /dev/null
+++ b/vendor/github.com/johannesboyne/gofakes3/option.go
@@ -0,0 +1,82 @@
+package gofakes3
+
+import "time"
+
+type Option func(g *GoFakeS3)
+
+// WithTimeSource allows you to substitute the behaviour of time.Now() and
+// time.Since() within GoFakeS3. This can be used to trigger time skew errors,
+// or to ensure the output of the commands is deterministic.
+//
+// See gofakes3.FixedTimeSource(), gofakes3.LocalTimeSource(tz).
+func WithTimeSource(timeSource TimeSource) Option {
+ return func(g *GoFakeS3) { g.timeSource = timeSource }
+}
+
+// WithTimeSkewLimit allows you to reconfigure the allowed skew between the
+// client's clock and the server's clock. The AWS client SDKs will send the
+// "x-amz-date" header containing the time at the client, which is used to
+// calculate the skew.
+//
+// See DefaultSkewLimit for the starting value, set to '0' to disable.
+//
+func WithTimeSkewLimit(skew time.Duration) Option {
+ return func(g *GoFakeS3) { g.timeSkew = skew }
+}
+
+// WithMetadataSizeLimit allows you to reconfigure the maximum allowed metadata
+// size.
+//
+// See DefaultMetadataSizeLimit for the starting value, set to '0' to disable.
+func WithMetadataSizeLimit(size int) Option {
+ return func(g *GoFakeS3) { g.metadataSizeLimit = size }
+}
+
+// WithIntegrityCheck enables or disables Content-MD5 validation when
+// putting an Object.
+func WithIntegrityCheck(check bool) Option {
+ return func(g *GoFakeS3) { g.integrityCheck = check }
+}
+
+// WithLogger allows you to supply a logger to GoFakeS3 for debugging/tracing.
+// logger may be nil.
+func WithLogger(logger Logger) Option {
+ return func(g *GoFakeS3) { g.log = logger }
+}
+
+// WithGlobalLog configures gofakes3 to use GlobalLog() for logging, which uses
+// the standard library's log.Println() call to log messages.
+func WithGlobalLog() Option {
+ return WithLogger(GlobalLog())
+}
+
+// WithRequestID sets the starting ID used to generate the "x-amz-request-id"
+// header.
+func WithRequestID(id uint64) Option {
+ return func(g *GoFakeS3) { g.requestID = id }
+}
+
+// WithHostBucket enables or disables bucket rewriting in the router.
+// If active, the URL 'http://mybucket.localhost/object' will be routed
+// as if the URL path was '/mybucket/object'.
+//
+// See https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingBucket.html
+// for details.
+func WithHostBucket(enabled bool) Option {
+ return func(g *GoFakeS3) { g.hostBucket = enabled }
+}
+
+// WithoutVersioning disables versioning on the passed backend, if it supported it.
+func WithoutVersioning() Option {
+ return func(g *GoFakeS3) { g.versioned = nil }
+}
+
+// WithUnimplementedPageError allows you to enable or disable the error that occurs
+// if the Backend does not implement paging.
+//
+// By default, GoFakeS3 will simply retry a request for a page of objects
+// without the page if the Backend does not implement pagination. This can
+// be used to enable an error in that condition instead.
+func WithUnimplementedPageError() Option {
+ return func(g *GoFakeS3) { g.failOnUnimplementedPage = true }
+}
diff --git a/vendor/github.com/johannesboyne/gofakes3/prefix.go b/vendor/github.com/johannesboyne/gofakes3/prefix.go
new file mode 100644
index 00000000..33082fde
--- /dev/null
+++ b/vendor/github.com/johannesboyne/gofakes3/prefix.go
@@ -0,0 +1,171 @@
+package gofakes3
+
+import (
+ "fmt"
+ "net/url"
+ "strings"
+)
+
+type Prefix struct {
+ HasPrefix bool
+ Prefix string
+
+ HasDelimiter bool
+ Delimiter string
+}
+
+func prefixFromQuery(query url.Values) Prefix {
+ prefix := Prefix{
+ Prefix: query.Get("prefix"),
+ Delimiter: query.Get("delimiter"),
+ }
+ _, prefix.HasPrefix = query["prefix"]
+ _, prefix.HasDelimiter = query["delimiter"]
+ return prefix
+}
+
+func NewPrefix(prefix, delim *string) (p Prefix) {
+ if prefix != nil {
+ p.HasPrefix, p.Prefix = true, *prefix
+ }
+ if delim != nil {
+ p.HasDelimiter, p.Delimiter = true, *delim
+ }
+ return p
+}
+
+func NewFolderPrefix(prefix string) (p Prefix) {
+ p.HasPrefix, p.Prefix = true, prefix
+ p.HasDelimiter, p.Delimiter = true, "/"
+ return p
+}
+
+// FilePrefix returns the path portion, then the remaining portion of the
+// Prefix if the Delimiter is "/". If the Delimiter is not set, or not "/",
+// ok will be false.
+//
+// For example:
+// /foo/bar/ : path: /foo/bar remaining: ""
+// /foo/bar/b : path: /foo/bar remaining: "b"
+// /foo/bar : path: /foo remaining: "bar"
+//
+func (p Prefix) FilePrefix() (path, remaining string, ok bool) {
+ if !p.HasPrefix || !p.HasDelimiter || p.Delimiter != "/" {
+ return "", "", ok
+ }
+
+ idx := strings.LastIndexByte(p.Prefix, '/')
+ if idx < 0 {
+ return "", p.Prefix, true
+ } else {
+ return p.Prefix[:idx], p.Prefix[idx+1:], true
+ }
+}
+
+// PrefixMatch checks whether key starts with prefix. If the prefix does not
+// match, nil is returned.
+//
+// It is a best-effort attempt to implement the prefix/delimiter matching found
+// in S3.
+//
+// To check whether the key belongs in Contents or CommonPrefixes, compare the
+// result to key.
+//
+func (p Prefix) Match(key string, match *PrefixMatch) (ok bool) {
+ if !p.HasPrefix && !p.HasDelimiter {
+ // If there is no prefix in the search, the match is the prefix:
+ if match != nil {
+ *match = PrefixMatch{Key: key, MatchedPart: key}
+ }
+ return true
+ }
+
+ if !p.HasDelimiter {
+ // If the request does not contain a delimiter, prefix matching is a
+ // simple string prefix:
+ if strings.HasPrefix(key, p.Prefix) {
+ if match != nil {
+ *match = PrefixMatch{Key: key, MatchedPart: p.Prefix}
+ }
+ return true
+ }
+ return false
+ }
+
+ // Delimited + Prefix matches, for example:
+ // $ aws s3 ls s3://my-bucket/
+ // PRE AWSLogs/
+ // $ aws s3 ls s3://my-bucket/AWSLogs
+ // PRE AWSLogs/
+ // $ aws s3 ls s3://my-bucket/AWSLogs/
+ // PRE 260839334643/
+ // $ aws s3 ls s3://my-bucket/AWSLogs/2608
+ // PRE 260839334643/
+
+ keyParts := strings.Split(strings.TrimLeft(key, p.Delimiter), p.Delimiter)
+ preParts := strings.Split(strings.TrimLeft(p.Prefix, p.Delimiter), p.Delimiter)
+
+ if len(keyParts) < len(preParts) {
+ return false
+ }
+
+ // If the key exactly matches the prefix, but only up to a delimiter,
+ // AWS appends the delimiter to the result:
+ // $ aws s3 ls s3://my-bucket/AWSLogs
+ // PRE AWSLogs/
+ appendDelim := len(keyParts) != len(preParts)
+ matched := 0
+
+ last := len(preParts) - 1
+ for i := 0; i < len(preParts); i++ {
+ if i == last {
+ if !strings.HasPrefix(keyParts[i], preParts[i]) {
+ return false
+ }
+
+ } else {
+ if keyParts[i] != preParts[i] {
+ return false
+ }
+ }
+ matched++
+ }
+
+ if matched == 0 {
+ return false
+ }
+
+ out := strings.Join(keyParts[:matched], p.Delimiter)
+ if appendDelim {
+ out += p.Delimiter
+ }
+
+ if match != nil {
+ *match = PrefixMatch{Key: key, CommonPrefix: out != key, MatchedPart: out}
+ }
+ return true
+}
+
+func (p Prefix) String() string {
+ if p.HasDelimiter {
+ return fmt.Sprintf("prefix:%q, delim:%q", p.Prefix, p.Delimiter)
+ } else {
+ return fmt.Sprintf("prefix:%q", p.Prefix)
+ }
+}
+
+type PrefixMatch struct {
+ // Input key passed to PrefixMatch.
+ Key string
+
+ // CommonPrefix indicates whether this key should be returned in the bucket
+ // contents or the common prefixes part of the "list bucket" response.
+ CommonPrefix bool
+
+ // The longest matched part of the key.
+ MatchedPart string
+}
+
+func (match *PrefixMatch) AsCommonPrefix() CommonPrefix {
+ return CommonPrefix{Prefix: match.MatchedPart}
+}
diff --git a/vendor/github.com/johannesboyne/gofakes3/range.go b/vendor/github.com/johannesboyne/gofakes3/range.go
new file mode 100644
index 00000000..370e0e04
--- /dev/null
+++ b/vendor/github.com/johannesboyne/gofakes3/range.go
@@ -0,0 +1,126 @@
+package gofakes3
+
+import (
+ "fmt"
+ "net/http"
+ "strconv"
+ "strings"
+)
+
+type ObjectRange struct {
+ Start, Length int64
+}
+
+func (o *ObjectRange) writeHeader(sz int64, w http.ResponseWriter) {
+ if o != nil {
+ w.Header().Set("Content-Range", fmt.Sprintf("bytes %d-%d/%d", o.Start, o.Start+o.Length-1, sz))
+ w.Header().Set("Content-Length", fmt.Sprintf("%d", o.Length))
+ } else {
+ w.Header().Set("Content-Length", fmt.Sprintf("%d", sz))
+ }
+}
+
+type ObjectRangeRequest struct {
+ Start, End int64
+ FromEnd bool
+}
+
+const RangeNoEnd = -1
+
+func (o *ObjectRangeRequest) Range(size int64) (*ObjectRange, error) {
+ if o == nil {
+ return nil, nil
+ }
+
+ var start, length int64
+
+ if !o.FromEnd {
+ start = o.Start
+ end := o.End
+
+ if o.End == RangeNoEnd {
+ // If no end is specified, range extends to end of the file.
+ length = size - start
+ } else {
+ length = end - start + 1
+ }
+
+ } else {
+ // If no start is specified, end specifies the range start relative
+ // to the end of the file.
+ end := o.End
+ start = size - end
+ length = size - start
+ }
+
+ if start < 0 || length < 0 || start >= size {
+ return nil, ErrInvalidRange
+ }
+
+ if start+length > size {
+ return &ObjectRange{Start: start, Length: size - start}, nil
+ }
+
+ return &ObjectRange{Start: start, Length: length}, nil
+}
+
+// parseRangeHeader parses a single byte range from the Range header.
+//
+// Amazon S3 doesn't support retrieving multiple ranges of data per GET request:
+// https://docs.aws.amazon.com/AmazonS3/latest/API/RESTObjectGET.html
+func parseRangeHeader(s string) (*ObjectRangeRequest, error) {
+ if s == "" {
+ return nil, nil
+ }
+
+ const b = "bytes="
+ if !strings.HasPrefix(s, b) {
+ return nil, ErrInvalidRange
+ }
+
+ ranges := strings.Split(s[len(b):], ",")
+ if len(ranges) > 1 {
+ return nil, ErrorMessage(ErrNotImplemented, "multiple ranges not supported")
+ }
+
+ rnge := strings.TrimSpace(ranges[0])
+ if len(rnge) == 0 {
+ return nil, ErrInvalidRange
+ }
+
+ i := strings.Index(rnge, "-")
+ if i < 0 {
+ return nil, ErrInvalidRange
+ }
+
+ var o ObjectRangeRequest
+
+ start, end := strings.TrimSpace(rnge[:i]), strings.TrimSpace(rnge[i+1:])
+ if start == "" {
+ o.FromEnd = true
+
+ i, err := strconv.ParseInt(end, 10, 64)
+ if err != nil {
+ return nil, ErrInvalidRange
+ }
+ o.End = i
+
+ } else {
+ i, err := strconv.ParseInt(start, 10, 64)
+ if err != nil || i < 0 {
+ return nil, ErrInvalidRange
+ }
+ o.Start = i
+ if end != "" {
+ i, err := strconv.ParseInt(end, 10, 64)
+ if err != nil || o.Start > i {
+ return nil, ErrInvalidRange
+ }
+ o.End = i
+ } else {
+ o.End = RangeNoEnd
+ }
+ }
+
+ return &o, nil
+}
diff --git a/vendor/github.com/johannesboyne/gofakes3/routing.go b/vendor/github.com/johannesboyne/gofakes3/routing.go
new file mode 100644
index 00000000..fe0dc462
--- /dev/null
+++ b/vendor/github.com/johannesboyne/gofakes3/routing.go
@@ -0,0 +1,196 @@
+package gofakes3
+
+import (
+ "encoding/base64"
+ "fmt"
+ "net/http"
+ "strings"
+)
+
+// routeBase is a http.HandlerFunc that dispatches top level routes for
+// GoFakeS3.
+//
+// URLs are assumed to break down into two common path segments, in the
+// following format:
+// /<bucket>/<object>
+//
+// The operation for most of the core functionality is built around HTTP
+// verbs, but outside the core functionality, the clean separation starts
+// to degrade, especially around multipart uploads.
+//
+func (g *GoFakeS3) routeBase(w http.ResponseWriter, r *http.Request) {
+ var (
+ path = strings.Trim(r.URL.Path, "/")
+ parts = strings.SplitN(path, "/", 2)
+ bucket = parts[0]
+ query = r.URL.Query()
+ object = ""
+ err error
+ )
+
+ hdr := w.Header()
+
+ id := fmt.Sprintf("%016X", g.nextRequestID())
+ hdr.Set("x-amz-id-2", base64.StdEncoding.EncodeToString([]byte(id+id+id+id))) // x-amz-id-2 is 48 bytes of random stuff
+ hdr.Set("x-amz-request-id", id)
+ hdr.Set("Server", "AmazonS3")
+
+ if len(parts) == 2 {
+ object = parts[1]
+ }
+
+ if uploadID := UploadID(query.Get("uploadId")); uploadID != "" {
+ err = g.routeMultipartUpload(bucket, object, uploadID, w, r)
+
+ } else if _, ok := query["uploads"]; ok {
+ err = g.routeMultipartUploadBase(bucket, object, w, r)
+
+ } else if _, ok := query["versioning"]; ok {
+ err = g.routeVersioning(bucket, w, r)
+
+ } else if _, ok := query["versions"]; ok {
+ err = g.routeVersions(bucket, w, r)
+
+ } else if versionID := versionFromQuery(query["versionId"]); versionID != "" {
+ err = g.routeVersion(bucket, object, VersionID(versionID), w, r)
+
+ } else if bucket != "" && object != "" {
+ err = g.routeObject(bucket, object, w, r)
+
+ } else if bucket != "" {
+ err = g.routeBucket(bucket, w, r)
+
+ } else if r.Method == "GET" {
+ err = g.listBuckets(w, r)
+
+ } else {
+ http.NotFound(w, r)
+ return
+ }
+
+ if err != nil {
+ g.httpError(w, r, err)
+ }
+}
+
+// routeObject oandles URLs that contain both a bucket path segment and an
+// object path segment.
+func (g *GoFakeS3) routeObject(bucket, object string, w http.ResponseWriter, r *http.Request) (err error) {
+ switch r.Method {
+ case "GET":
+ return g.getObject(bucket, object, "", w, r)
+ case "HEAD":
+ return g.headObject(bucket, object, "", w, r)
+ case "PUT":
+ return g.createObject(bucket, object, w, r)
+ case "DELETE":
+ return g.deleteObject(bucket, object, w, r)
+ default:
+ return ErrMethodNotAllowed
+ }
+}
+
+// routeBucket handles URLs that contain only a bucket path segment, not an
+// object path segment.
+func (g *GoFakeS3) routeBucket(bucket string, w http.ResponseWriter, r *http.Request) (err error) {
+ switch r.Method {
+ case "GET":
+ return g.listBucket(bucket, w, r)
+ case "PUT":
+ return g.createBucket(bucket, w, r)
+ case "DELETE":
+ return g.deleteBucket(bucket, w, r)
+ case "HEAD":
+ return g.headBucket(bucket, w, r)
+ case "POST":
+ if _, ok := r.URL.Query()["delete"]; ok {
+ return g.deleteMulti(bucket, w, r)
+ } else {
+ return g.createObjectBrowserUpload(bucket, w, r)
+ }
+ default:
+ return ErrMethodNotAllowed
+ }
+}
+
+// routeMultipartUploadBase operates on routes that contain '?uploads' in the
+// query string. These routes may or may not have a value for bucket or object;
+// this is validated and handled in the target handler functions.
+func (g *GoFakeS3) routeMultipartUploadBase(bucket, object string, w http.ResponseWriter, r *http.Request) error {
+ switch r.Method {
+ case "GET":
+ return g.listMultipartUploads(bucket, w, r)
+ case "POST":
+ return g.initiateMultipartUpload(bucket, object, w, r)
+ default:
+ return ErrMethodNotAllowed
+ }
+}
+
+// routeVersioningBase operates on routes that contain '?versioning' in the
+// query string. These routes may or may not have a value for bucket; this is
+// validated and handled in the target handler functions.
+func (g *GoFakeS3) routeVersioning(bucket string, w http.ResponseWriter, r *http.Request) error {
+ switch r.Method {
+ case "GET":
+ return g.getBucketVersioning(bucket, w, r)
+ case "PUT":
+ return g.putBucketVersioning(bucket, w, r)
+ default:
+ return ErrMethodNotAllowed
+ }
+}
+
+// routeVersions operates on routes that contain '?versions' in the query string.
+func (g *GoFakeS3) routeVersions(bucket string, w http.ResponseWriter, r *http.Request) error {
+ switch r.Method {
+ case "GET":
+ return g.listBucketVersions(bucket, w, r)
+ default:
+ return ErrMethodNotAllowed
+ }
+}
+
+// routeVersion operates on routes that contain '?versionId=<id>' in the
+// query string.
+func (g *GoFakeS3) routeVersion(bucket, object string, versionID VersionID, w http.ResponseWriter, r *http.Request) error {
+ switch r.Method {
+ case "GET":
+ return g.getObject(bucket, object, versionID, w, r)
+ case "HEAD":
+ return g.headObject(bucket, object, versionID, w, r)
+ case "DELETE":
+ return g.deleteObjectVersion(bucket, object, versionID, w, r)
+ default:
+ return ErrMethodNotAllowed
+ }
+}
+
+// routeMultipartUpload operates on routes that contain '?uploadId=<id>' in the
+// query string.
+func (g *GoFakeS3) routeMultipartUpload(bucket, object string, uploadID UploadID, w http.ResponseWriter, r *http.Request) error {
+ switch r.Method {
+ case "GET":
+ return g.listMultipartUploadParts(bucket, object, uploadID, w, r)
+ case "PUT":
+ return g.putMultipartUploadPart(bucket, object, uploadID, w, r)
+ case "DELETE":
+ return g.abortMultipartUpload(bucket, object, uploadID, w, r)
+ case "POST":
+ return g.completeMultipartUpload(bucket, object, uploadID, w, r)
+ default:
+ return ErrMethodNotAllowed
+ }
+}
+
+func versionFromQuery(qv []string) string {
+ // The versionId subresource may be the string 'null'; this has been
+ // observed coming in via Boto. The S3 documentation for the "DELETE
+ // object" endpoint describes a 'null' version explicitly, but we don't
+ // want backend implementers to have to special-case this string, so
+ // let's hide it in here:
+ if len(qv) > 0 && qv[0] != "" && qv[0] != "null" {
+ return qv[0]
+ }
+ return ""
+}
diff --git a/vendor/github.com/johannesboyne/gofakes3/time.go b/vendor/github.com/johannesboyne/gofakes3/time.go
new file mode 100644
index 00000000..9092a7d4
--- /dev/null
+++ b/vendor/github.com/johannesboyne/gofakes3/time.go
@@ -0,0 +1,58 @@
+package gofakes3
+
+import "time"
+
+type TimeSource interface {
+ Now() time.Time
+ Since(time.Time) time.Duration
+}
+
+type TimeSourceAdvancer interface {
+ TimeSource
+ Advance(by time.Duration)
+}
+
+// FixedTimeSource provides a source of time that always returns the
+// specified time.
+func FixedTimeSource(at time.Time) TimeSourceAdvancer {
+ return &fixedTimeSource{time: at}
+}
+
+func DefaultTimeSource() TimeSource {
+ return &locatedTimeSource{
+ // XXX: uses time.FixedZone to 'fake' the GMT timezone that S3 uses
+ // (which is basically just UTC with a different name) to avoid
+ // time.LoadLocation, which requires zoneinfo.zip to be available and
+ // can break spectacularly on Windows (https://github.com/golang/go/issues/21881)
+ // or Docker.
+ timeLocation: time.FixedZone("GMT", 0),
+ }
+}
+
+type locatedTimeSource struct {
+ timeLocation *time.Location
+}
+
+func (l *locatedTimeSource) Now() time.Time {
+ return time.Now().In(l.timeLocation)
+}
+
+func (l *locatedTimeSource) Since(t time.Time) time.Duration {
+ return time.Since(t)
+}
+
+type fixedTimeSource struct {
+ time time.Time
+}
+
+func (l *fixedTimeSource) Now() time.Time {
+ return l.time
+}
+
+func (l *fixedTimeSource) Since(t time.Time) time.Duration {
+ return l.time.Sub(t)
+}
+
+func (l *fixedTimeSource) Advance(by time.Duration) {
+ l.time = l.time.Add(by)
+}
diff --git a/vendor/github.com/johannesboyne/gofakes3/uploader.go b/vendor/github.com/johannesboyne/gofakes3/uploader.go
new file mode 100644
index 00000000..f7e0f82e
--- /dev/null
+++ b/vendor/github.com/johannesboyne/gofakes3/uploader.go
@@ -0,0 +1,517 @@
+package gofakes3
+
+import (
+ "crypto/md5"
+ "encoding/hex"
+ "fmt"
+ "math/big"
+ "net/url"
+ "sync"
+ "time"
+
+ "github.com/johannesboyne/gofakes3/internal/goskipiter"
+ "github.com/ryszard/goskiplist/skiplist"
+)
+
+var add1 = new(big.Int).SetInt64(1)
+
+/*
+bucketUploads maintains a map of buckets to the list of multipart uploads
+for that bucket.
+
+A skiplist that maps object keys to upload ids is also maintained to
+support the ListMultipartUploads operation.
+
+From the docs:
+ In the response, the uploads are sorted by key. If your application has
+ initiated more than one multipart upload using the same object key,
+ then uploads in the response are first sorted by key. Additionally,
+ uploads are sorted in ascending order within each key by the upload
+ initiation time.
+
+It's ambiguous whether "sorted by key" means "sorted by the upload ID"
+or "sorted by the object key". It's also ambiguous whether the docs mean
+the sorting applies only within an individual page of results, or to the
+whole result across all paginations. This is supported somewhat, though
+not unambiguously, by the documentation for "key-marker" and
+"upload-id-marker":
+
+ key-marker: Together with upload-id-marker, this parameter specifies the
+ multipart upload after which listing should begin.
+
+ If upload-id-marker is not specified, only the keys lexicographically
+ greater than the specified key-marker will be included in the list.
+
+ If upload-id-marker is specified, any multipart uploads for a key equal to
+ the key-marker might also be included, provided those multipart uploads
+ have upload IDs lexicographically greater than the specified
+ upload-id-marker.
+
+ upload-id-marker: Together with key-marker, specifies the multipart upload
+ after which listing should begin. If key-marker is not specified, the
+ upload-id-marker parameter is ignored.
+
+This implementation assumes "sorted by key" means "sorted by the object
+key" and that the sorting applies across the full pagination set.
+
+The SkipList provides O(log n) performance, but the slices inside are
+linear-time. This should provide an acceptable trade-off for simplicity;
+on my 2013-era i7 machine, a simple linear search for the last element
+in a 100,000 element array of 80-ish byte strings takes barely 1ms.
+*/
+type bucketUploads struct {
+ // uploads should be protected by the coarse lock in uploader:
+ uploads map[UploadID]*multipartUpload
+
+ // objectIndex provides sorted traversal of the bucket uploads.
+ //
+ // The keys in this skiplist are the object keys, the values are the slice
+ // of *multipartUpload structs associated with that key. The skiplist
+ // satisfies the map ordering constraint, the slice satisfies the upload
+ // initiation time constraint.
+ objectIndex *skiplist.SkipList // effectively map[ObjectKey][]*multipartUpload
+}
+
+func newBucketUploads() *bucketUploads {
+ return &bucketUploads{
+ uploads: map[UploadID]*multipartUpload{},
+ objectIndex: skiplist.NewStringMap(),
+ }
+}
+
+// add assumes uploader.mu is acquired
+func (bu *bucketUploads) add(mpu *multipartUpload) {
+ bu.uploads[mpu.ID] = mpu
+
+ uploads, ok := bu.objectIndex.Get(mpu.Object)
+ if !ok {
+ uploads = []*multipartUpload{mpu}
+ } else {
+ uploads = append(uploads.([]*multipartUpload), mpu)
+ }
+ bu.objectIndex.Set(mpu.Object, uploads)
+}
+
+// remove assumes uploader.mu is acquired
+func (bu *bucketUploads) remove(uploadID UploadID) {
+ upload := bu.uploads[uploadID]
+ delete(bu.uploads, uploadID)
+
+ var uploads []*multipartUpload
+ {
+ upv, ok := bu.objectIndex.Get(upload.Object)
+ if !ok || upv == nil {
+ return
+ }
+ uploads = upv.([]*multipartUpload)
+ }
+
+ var found = -1
+ var v *multipartUpload
+ for found, v = range uploads {
+ if v.ID == uploadID {
+ break
+ }
+ }
+
+ if found >= 0 {
+ uploads = append(uploads[:found], uploads[found+1:]...) // delete the found index
+ }
+
+ if len(uploads) == 0 {
+ bu.objectIndex.Delete(upload.Object)
+ } else {
+ bu.objectIndex.Set(upload.Object, uploads)
+ }
+}
+
+// uploader manages multipart uploads.
+//
+// Multipart upload support has the following rather severe limitations (which
+// will hopefully be addressed in the future):
+//
+// - uploads do not interface with the Backend, so they do not
+// currently persist across reboots
+//
+// - upload parts are held in memory, so if you want to upload something huge
+// in multiple parts (which is pretty much exactly what you'd want multipart
+// uploads for), you'll need to make sure your memory is also sufficiently
+// huge!
+//
+// At this stage, the current thinking would be to add a second optional
+// Backend interface that allows persistent operations on multipart upload
+// data, and if a Backend does not implement it, this limited in-memory
+// behaviour can be the fallback. If that can be made to work, it would provide
+// good convenience for Backend implementers if their use case did not require
+// persistent multipart upload handling, or it could be satisfied by this
+// naive implementation.
+//
+type uploader struct {
+ // uploadIDs use a big.Int to allow unbounded IDs (not that you'd be
+ // expected to ever generate 4.2 billion of these but who are we to judge?)
+ uploadID *big.Int
+
+ buckets map[string]*bucketUploads
+ mu sync.Mutex
+}
+
+func newUploader() *uploader {
+ return &uploader{
+ buckets: make(map[string]*bucketUploads),
+ uploadID: new(big.Int),
+ }
+}
+
+func (u *uploader) Begin(bucket, object string, meta map[string]string, initiated time.Time) *multipartUpload {
+ u.mu.Lock()
+ defer u.mu.Unlock()
+
+ u.uploadID.Add(u.uploadID, add1)
+
+ mpu := &multipartUpload{
+ ID: UploadID(u.uploadID.String()),
+ Bucket: bucket,
+ Object: object,
+ Meta: meta,
+ Initiated: initiated,
+ }
+
+ // FIXME: make sure the uploader responds to DeleteBucket
+ bucketUploads := u.buckets[bucket]
+ if bucketUploads == nil {
+ u.buckets[bucket] = newBucketUploads()
+ bucketUploads = u.buckets[bucket]
+ }
+
+ bucketUploads.add(mpu)
+
+ return mpu
+}
+
+func (u *uploader) ListParts(bucket, object string, uploadID UploadID, marker int, limit int64) (*ListMultipartUploadPartsResult, error) {
+ u.mu.Lock()
+ defer u.mu.Unlock()
+
+ mpu, err := u.getUnlocked(bucket, object, uploadID)
+ if err != nil {
+ return nil, err
+ }
+
+ var result = ListMultipartUploadPartsResult{
+ Bucket: bucket,
+ Key: object,
+ UploadID: uploadID,
+ MaxParts: limit,
+ PartNumberMarker: marker,
+ StorageClass: "STANDARD", // FIXME
+ }
+
+ var cnt int64
+ for partNumber, part := range mpu.parts[marker:] {
+ if part == nil {
+ continue
+ }
+
+ if cnt >= limit {
+ result.IsTruncated = true
+ result.NextPartNumberMarker = partNumber
+ break
+ }
+
+ result.Parts = append(result.Parts, ListMultipartUploadPartItem{
+ ETag: part.ETag,
+ Size: int64(len(part.Body)),
+ PartNumber: partNumber,
+ LastModified: part.LastModified,
+ })
+
+ cnt++
+ }
+
+ return &result, nil
+}
+
+func (u *uploader) List(bucket string, marker *UploadListMarker, prefix Prefix, limit int64) (*ListMultipartUploadsResult, error) {
+ u.mu.Lock()
+ defer u.mu.Unlock()
+
+ bucketUploads, ok := u.buckets[bucket]
+ if !ok {
+ return nil, ErrNoSuchUpload
+ }
+
+ var result = ListMultipartUploadsResult{
+ Bucket: bucket,
+ Delimiter: prefix.Delimiter,
+ Prefix: prefix.Prefix,
+ MaxUploads: limit,
+ }
+
+ // we only need to use the uploadID to start the page if one was actually
+ // supplied, otherwise assume we can start from the start of the iterator:
+ var firstFound = true
+
+ var iter = goskipiter.New(bucketUploads.objectIndex.Iterator())
+ if marker != nil {
+ iter.Seek(marker.Object)
+ firstFound = marker.UploadID == ""
+ result.UploadIDMarker = marker.UploadID
+ result.KeyMarker = marker.Object
+ }
+
+ // Indicates whether the returned list of multipart uploads is truncated.
+ // The list can be truncated if the number of multipart uploads exceeds
+ // the limit allowed or specified by MaxUploads.
+ //
+ // In our case, this could be because there are still objects left in the
+ // iterator, or because there are still uploadIDs left in the slice inside
+ // the iteration.
+ var truncated bool
+
+ var cnt int64
+ var seenPrefixes = map[string]bool{}
+ var match PrefixMatch
+
+ for iter.Next() {
+ object := iter.Key().(string)
+ uploads := iter.Value().([]*multipartUpload)
+
+ retry:
+ matched := prefix.Match(object, &match)
+ if !matched {
+ continue
+ }
+
+ if !firstFound {
+ for idx, mpu := range uploads {
+ if mpu.ID == marker.UploadID {
+ firstFound = true
+ uploads = uploads[idx:]
+ goto retry
+ }
+ }
+
+ } else {
+ if match.CommonPrefix {
+ if !seenPrefixes[match.MatchedPart] {
+ result.CommonPrefixes = append(result.CommonPrefixes, match.AsCommonPrefix())
+ seenPrefixes[match.MatchedPart] = true
+ }
+
+ } else {
+ for idx, upload := range uploads {
+ result.Uploads = append(result.Uploads, ListMultipartUploadItem{
+ StorageClass: "STANDARD", // FIXME
+ Key: object,
+ UploadID: upload.ID,
+ Initiated: ContentTime{Time: upload.Initiated},
+ })
+
+ cnt++
+ if cnt >= limit {
+ if idx != len(uploads)-1 { // if this is not the last iteration, we have truncated
+ truncated = true
+ result.NextUploadIDMarker = uploads[idx+1].ID
+ result.NextKeyMarker = object
+ }
+ goto done
+ }
+ }
+ }
+ }
+ }
+
+done:
+ // If we did not truncate while in the middle of an object's upload ID list,
+ // we need to see if there are more objects in the outer iteration:
+ if !truncated {
+ for iter.Next() {
+ object := iter.Key().(string)
+ if matched := prefix.Match(object, &match); matched && !match.CommonPrefix {
+ truncated = true
+
+ // This is not especially defensive; it assumes the rest of the code works
+ // as it should. Could be something to clean up later:
+ result.NextUploadIDMarker = iter.Value().([]*multipartUpload)[0].ID
+ result.NextKeyMarker = object
+ break
+ }
+ }
+ }
+
+ result.IsTruncated = truncated
+
+ return &result, nil
+}
+
+func (u *uploader) Complete(bucket, object string, id UploadID) (*multipartUpload, error) {
+ u.mu.Lock()
+ defer u.mu.Unlock()
+ up, err := u.getUnlocked(bucket, object, id)
+ if err != nil {
+ return nil, err
+ }
+
+ // if getUnlocked succeeded, so will this:
+ u.buckets[bucket].remove(id)
+
+ return up, nil
+}
+
+func (u *uploader) Get(bucket, object string, id UploadID) (mu *multipartUpload, err error) {
+ u.mu.Lock()
+ defer u.mu.Unlock()
+ return u.getUnlocked(bucket, object, id)
+}
+
+func (u *uploader) getUnlocked(bucket, object string, id UploadID) (mu *multipartUpload, err error) {
+ bucketUps, ok := u.buckets[bucket]
+ if !ok {
+ return nil, ErrNoSuchUpload
+ }
+
+ mu, ok = bucketUps.uploads[id]
+ if !ok {
+ return nil, ErrNoSuchUpload
+ }
+
+ if mu.Bucket != bucket || mu.Object != object {
+ // FIXME: investigate what AWS does here; essentially if you initiate a
+ // multipart upload at '/ObjectName1?uploads', then complete the upload
+ // at '/ObjectName2?uploads', what happens?
+ return nil, ErrNoSuchUpload
+ }
+
+ return mu, nil
+}
+
+// UploadListMarker is used to seek to the start of a page in a ListMultipartUploads operation.
+type UploadListMarker struct {
+ // Represents the key-marker query parameter. Together with 'uploadID',
+ // this parameter specifies the multipart upload after which listing should
+ // begin.
+ //
+ // If 'uploadID' is not specified, only the keys lexicographically greater
+ // than the specified key-marker will be included in the list.
+ //
+ // If 'uploadID' is specified, any multipart uploads for a key equal to
+ // 'object' might also be included, provided those multipart uploads have
+ // upload IDs lexicographically greater than the specified uploadID.
+ Object string
+
+ // Represents the upload-id-marker query parameter to the
+ // ListMultipartUploads operation. Together with 'object', specifies the
+ // multipart upload after which listing should begin. If 'object' is not
+ // specified, the 'uploadID' parameter is ignored.
+ UploadID UploadID
+}
+
+// uploadListMarkerFromQuery collects the upload-id-marker and key-marker query parameters
+// to the ListMultipartUploads operation.
+func uploadListMarkerFromQuery(q url.Values) *UploadListMarker {
+ object := q.Get("key-marker")
+ if object == "" {
+ return nil
+ }
+ return &UploadListMarker{Object: object, UploadID: UploadID(q.Get("upload-id-marker"))}
+}
+
+type multipartUploadPart struct {
+ PartNumber int
+ ETag string
+ Body []byte
+ LastModified ContentTime
+}
+
+type multipartUpload struct {
+ ID UploadID
+ Bucket string
+ Object string
+ Meta map[string]string
+ Initiated time.Time
+
+ // Part numbers are limited in S3 to 10,000, so we can be a little wasteful.
+ // If a new part number is added, the slice is grown to that size. Depending
+ // on how bad the input is, this could mean you have a 10,000 element slice
+ // that is almost all nils. This shouldn't be a problem in practice.
+ //
+ // We need to use a slice here so we can get deterministic ordering in order
+ // to support pagination when listing the upload parts.
+ //
+ // The minimum part ID is 1, which means the first item in this slice will
+ // always be nil.
+ //
+ // Do not attempt to access parts without locking mu.
+ parts []*multipartUploadPart
+
+ mu sync.Mutex
+}
+
+func (mpu *multipartUpload) AddPart(partNumber int, at time.Time, body []byte) (etag string, err error) {
+ if partNumber > MaxUploadPartNumber {
+ return "", ErrInvalidPart
+ }
+
+ mpu.mu.Lock()
+ defer mpu.mu.Unlock()
+
+ // What the ETag actually is is not specified, so let's just invent any old thing
+ // from guaranteed unique input:
+ hash := md5.New()
+ hash.Write([]byte(body))
+ etag = fmt.Sprintf(`"%s"`, hex.EncodeToString(hash.Sum(nil)))
+
+ part := multipartUploadPart{
+ PartNumber: partNumber,
+ Body: body,
+ ETag: etag,
+ LastModified: NewContentTime(at),
+ }
+ if partNumber >= len(mpu.parts) {
+ mpu.parts = append(mpu.parts, make([]*multipartUploadPart, partNumber-len(mpu.parts)+1)...)
+ }
+ mpu.parts[partNumber] = &part
+ return etag, nil
+}
+
+func (mpu *multipartUpload) Reassemble(input *CompleteMultipartUploadRequest) (body []byte, etag string, err error) {
+ mpu.mu.Lock()
+ defer mpu.mu.Unlock()
+
+ mpuPartsLen := len(mpu.parts)
+
+ // FIXME: what does AWS do when mpu.Parts > input.Parts? Presumably you may
+ // end up uploading more parts than you need to assemble, so it should
+ // probably just ignore that?
+ if len(input.Parts) > mpuPartsLen {
+ return nil, "", ErrInvalidPart
+ }
+
+ if !input.partsAreSorted() {
+ return nil, "", ErrInvalidPartOrder
+ }
+
+ var size int64
+
+ for _, inPart := range input.Parts {
+ if inPart.PartNumber >= mpuPartsLen || mpu.parts[inPart.PartNumber] == nil {
+ return nil, "", ErrorMessagef(ErrInvalidPart, "unexpected part number %d in complete request", inPart.PartNumber)
+ }
+
+ upPart := mpu.parts[inPart.PartNumber]
+ if inPart.ETag != upPart.ETag {
+ return nil, "", ErrorMessagef(ErrInvalidPart, "unexpected part etag for number %d in complete request", inPart.PartNumber)
+ }
+
+ size += int64(len(upPart.Body))
+ }
+
+ body = make([]byte, 0, size)
+ for _, part := range input.Parts {
+ body = append(body, mpu.parts[part.PartNumber].Body...)
+ }
+
+ hash := fmt.Sprintf("%x", md5.Sum(body))
+
+ return body, hash, nil
+}
diff --git a/vendor/github.com/johannesboyne/gofakes3/util.go b/vendor/github.com/johannesboyne/gofakes3/util.go
new file mode 100644
index 00000000..cd521795
--- /dev/null
+++ b/vendor/github.com/johannesboyne/gofakes3/util.go
@@ -0,0 +1,58 @@
+package gofakes3
+
+import (
+ "io"
+ "io/ioutil"
+ "strconv"
+)
+
+func parseClampedInt(in string, defaultValue, min, max int64) (int64, error) {
+ var v int64
+ if in == "" {
+ v = defaultValue
+ } else {
+ var err error
+ v, err = strconv.ParseInt(in, 10, 0)
+ if err != nil {
+ return defaultValue, ErrInvalidArgument
+ }
+ }
+
+ if v < min {
+ v = min
+ } else if v > max {
+ v = max
+ }
+
+ return v, nil
+}
+
+// ReadAll is a fakeS3-centric replacement for ioutil.ReadAll(), for use when
+// the size of the result is known ahead of time. It is considerably faster to
+// preallocate the entire slice than to allow growslice to be triggered
+// repeatedly, especially with larger buffers.
+//
+// It also reports S3-specific errors in certain conditions, like
+// ErrIncompleteBody.
+func ReadAll(r io.Reader, size int64) (b []byte, err error) {
+ var n int
+ b = make([]byte, size)
+ n, err = io.ReadFull(r, b)
+ if err == io.ErrUnexpectedEOF {
+ return nil, ErrIncompleteBody
+ } else if err != nil {
+ return nil, err
+ }
+
+ if n != int(size) {
+ return nil, ErrIncompleteBody
+ }
+
+ if extra, err := ioutil.ReadAll(r); err != nil {
+ return nil, err
+ } else if len(extra) > 0 {
+ return nil, ErrIncompleteBody
+ }
+
+ return b, nil
+}
diff --git a/vendor/github.com/johannesboyne/gofakes3/validation.go b/vendor/github.com/johannesboyne/gofakes3/validation.go
new file mode 100644
index 00000000..01aecb9a
--- /dev/null
+++ b/vendor/github.com/johannesboyne/gofakes3/validation.go
@@ -0,0 +1,55 @@
+package gofakes3
+
+import (
+ "net"
+ "regexp"
+ "strings"
+)
+
+// This pattern can be used to match both the entire bucket name (including period-
+// separated labels) and the individual label components, presuming you have already
+// split the string by period.
+var bucketNamePattern = regexp.MustCompile(`^[a-z0-9]([a-z0-9\.-]+)[a-z0-9]$`)
+
+// ValidateBucketName applies the rules from the AWS docs:
+// https://docs.aws.amazon.com/AmazonS3/latest/dev/BucketRestrictions.html#bucketnamingrules
+//
+// 1. Bucket names must comply with DNS naming conventions.
+// 2. Bucket names must be at least 3 and no more than 63 characters long.
+// 3. Bucket names must not contain uppercase characters or underscores.
+// 4. Bucket names must start with a lowercase letter or number.
+//
+// The DNS RFC confirms that the valid range of characters in an LDH label is 'a-z0-9-':
+// https://tools.ietf.org/html/rfc5890#section-2.3.1
+//
+func ValidateBucketName(name string) error {
+ if len(name) < 3 || len(name) > 63 {
+ return ErrorMessage(ErrInvalidBucketName, "bucket name must be >= 3 characters and <= 63")
+ }
+ if !bucketNamePattern.MatchString(name) {
+ return ErrorMessage(ErrInvalidBucketName, "bucket must start and end with 'a-z, 0-9', and contain only 'a-z, 0-9, -' in between")
+ }
+
+ if net.ParseIP(name) != nil {
+ return ErrorMessage(ErrInvalidBucketName, "bucket names must not be formatted as an IP address")
+ }
+
+ // Bucket names must be a series of one or more labels. Adjacent labels are
+ // separated by a single period (.). Bucket names can contain lowercase
+ // letters, numbers, and hyphens. Each label must start and end with a
+ // lowercase letter or a number.
+ labels := strings.Split(name, ".")
+ for _, label := range labels {
+ if !bucketNamePattern.MatchString(label) {
+ return ErrorMessage(ErrInvalidBucketName, "label must start and end with 'a-z, 0-9', and contain only 'a-z, 0-9, -' in between")
+ }
+ }
+
+ return nil
+}
+
+var etagPattern = regexp.MustCompile(`^"[a-z0-9]+"$`)
+
+func validETag(v string) bool {
+ return etagPattern.MatchString(v)
+}
diff --git a/vendor/github.com/ryszard/goskiplist/AUTHORS b/vendor/github.com/ryszard/goskiplist/AUTHORS
new file mode 100644
index 00000000..0f1991ba
--- /dev/null
+++ b/vendor/github.com/ryszard/goskiplist/AUTHORS
@@ -0,0 +1,12 @@
+# This is the official list of goskiplist authors for copyright purposes.
+# This file is distinct from the CONTRIBUTORS files.
+# See the latter for an explanation.
+
+# Names should be added to this file as
+# Name or Organization <email address>
+# The email address is not required for organizations.
+
+# Please keep the list sorted.
+
+Google Inc.
+SoundCloud, Ltd.
diff --git a/vendor/github.com/ryszard/goskiplist/CONTRIBUTORS b/vendor/github.com/ryszard/goskiplist/CONTRIBUTORS
new file mode 100644
index 00000000..46e22435
--- /dev/null
+++ b/vendor/github.com/ryszard/goskiplist/CONTRIBUTORS
@@ -0,0 +1,35 @@
+# This is the official list of people who can contribute (and typically
+# have contributed) code to the gomock repository.
+# The AUTHORS file lists the copyright holders; this file
+# lists people. For example, Google employees are listed here
+# but not in AUTHORS, because Google holds the copyright.
+#
+# The submission process automatically checks to make sure
+# that people submitting code are listed in this file (by email address).
+#
+# Names should be added to this file only after verifying that
+# the individual or the individual's organization has agreed to
+# the appropriate Contributor License Agreement, found here:
+#
+# http://code.google.com/legal/individual-cla-v1.0.html
+# http://code.google.com/legal/corporate-cla-v1.0.html
+#
+# The agreement for individuals can be filled out on the web.
+#
+# When adding J Random Contributor's name to this file,
+# either J's name or J's organization's name should be
+# added to the AUTHORS file, depending on whether the
+# individual or corporate CLA was used.
+
+# Names should be added to this file like so:
+# Name <email address>
+#
+# An entry with two email addresses specifies that the
+# first address should be used in the submit logs and
+# that the second address should be recognized as the
+# same person when interacting with Rietveld.
+
+# Please keep the list sorted.
+
+Matt T. Proud (mtp) <matt.proud@gmail.com>
+Ric Szopa (Ryszard) <ryszard.szopa@gmail.com>
diff --git a/vendor/github.com/ryszard/goskiplist/LICENSE b/vendor/github.com/ryszard/goskiplist/LICENSE
new file mode 100644
index 00000000..d6456956
--- /dev/null
+++ b/vendor/github.com/ryszard/goskiplist/LICENSE
@@ -0,0 +1,202 @@
+
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "[]"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright [yyyy] [name of copyright owner]
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/vendor/github.com/ryszard/goskiplist/skiplist/skiplist.go b/vendor/github.com/ryszard/goskiplist/skiplist/skiplist.go
new file mode 100644
index 00000000..6aa932ef
--- /dev/null
+++ b/vendor/github.com/ryszard/goskiplist/skiplist/skiplist.go
@@ -0,0 +1,635 @@
+// Copyright 2012 Google Inc. All rights reserved.
+// Author: Ric Szopa (Ryszard) <ryszard.szopa@gmail.com>
+
+// Package skiplist implements skip list based maps and sets.
+//
+// Skip lists are a data structure that can be used in place of
+// balanced trees. Skip lists use probabilistic balancing rather than
+// strictly enforced balancing and as a result the algorithms for
+// insertion and deletion in skip lists are much simpler and
+// significantly faster than equivalent algorithms for balanced trees.
+//
+// Skip lists were first described in Pugh, William (June 1990). "Skip
+// lists: a probabilistic alternative to balanced
+// trees". Communications of the ACM 33 (6): 668–676
+package skiplist
+
+import (
+ "math/rand"
+)
+
+// TODO(ryszard):
+// - A separately seeded source of randomness
+
+// p is the fraction of nodes with level i pointers that also have
+// level i+1 pointers. p equal to 1/4 is a good value from the point
+// of view of speed and space requirements. If variability of running
+// times is a concern, 1/2 is a better value for p.
+const p = 0.25
+
+const DefaultMaxLevel = 32
+
+// A node is a container for key-value pairs that are stored in a skip
+// list.
+type node struct {
+ forward []*node
+ backward *node
+ key, value interface{}
+}
+
+// next returns the next node in the skip list containing n.
+func (n *node) next() *node {
+ if len(n.forward) == 0 {
+ return nil
+ }
+ return n.forward[0]
+}
+
+// previous returns the previous node in the skip list containing n.
+func (n *node) previous() *node {
+ return n.backward
+}
+
+// hasNext returns true if n has a next node.
+func (n *node) hasNext() bool {
+ return n.next() != nil
+}
+
+// hasPrevious returns true if n has a previous node.
+func (n *node) hasPrevious() bool {
+ return n.previous() != nil
+}
+
+// A SkipList is a map-like data structure that maintains an ordered
+// collection of key-value pairs. Insertion, lookup, and deletion are
+// all O(log n) operations. A SkipList can efficiently store up to
+// 2^MaxLevel items.
+//
+// To iterate over a skip list (where s is a
+// *SkipList):
+//
+// for i := s.Iterator(); i.Next(); {
+// // do something with i.Key() and i.Value()
+// }
+type SkipList struct {
+ lessThan func(l, r interface{}) bool
+ header *node
+ footer *node
+ length int
+ // MaxLevel determines how many items the SkipList can store
+ // efficiently (2^MaxLevel).
+ //
+ // It is safe to increase MaxLevel to accomodate more
+ // elements. If you decrease MaxLevel and the skip list
+ // already contains nodes on higer levels, the effective
+ // MaxLevel will be the greater of the new MaxLevel and the
+ // level of the highest node.
+ //
+ // A SkipList with MaxLevel equal to 0 is equivalent to a
+ // standard linked list and will not have any of the nice
+ // properties of skip lists (probably not what you want).
+ MaxLevel int
+}
+
+// Len returns the length of s.
+func (s *SkipList) Len() int {
+ return s.length
+}
+
+// Iterator is an interface that you can use to iterate through the
+// skip list (in its entirety or fragments). For an use example, see
+// the documentation of SkipList.
+//
+// Key and Value return the key and the value of the current node.
+type Iterator interface {
+ // Next returns true if the iterator contains subsequent elements
+ // and advances its state to the next element if that is possible.
+ Next() (ok bool)
+ // Previous returns true if the iterator contains previous elements
+ // and rewinds its state to the previous element if that is possible.
+ Previous() (ok bool)
+ // Key returns the current key.
+ Key() interface{}
+ // Value returns the current value.
+ Value() interface{}
+ // Seek reduces iterative seek costs for searching forward into the Skip List
+ // by remarking the range of keys over which it has scanned before. If the
+ // requested key occurs prior to the point, the Skip List will start searching
+ // as a safeguard. It returns true if the key is within the known range of
+ // the list.
+ Seek(key interface{}) (ok bool)
+ // Close this iterator to reap resources associated with it. While not
+ // strictly required, it will provide extra hints for the garbage collector.
+ Close()
+}
+
+type iter struct {
+ current *node
+ key interface{}
+ list *SkipList
+ value interface{}
+}
+
+func (i iter) Key() interface{} {
+ return i.key
+}
+
+func (i iter) Value() interface{} {
+ return i.value
+}
+
+func (i *iter) Next() bool {
+ if !i.current.hasNext() {
+ return false
+ }
+
+ i.current = i.current.next()
+ i.key = i.current.key
+ i.value = i.current.value
+
+ return true
+}
+
+func (i *iter) Previous() bool {
+ if !i.current.hasPrevious() {
+ return false
+ }
+
+ i.current = i.current.previous()
+ i.key = i.current.key
+ i.value = i.current.value
+
+ return true
+}
+
+func (i *iter) Seek(key interface{}) (ok bool) {
+ current := i.current
+ list := i.list
+
+ // If the existing iterator outside of the known key range, we should set the
+ // position back to the beginning of the list.
+ if current == nil {
+ current = list.header
+ }
+
+ // If the target key occurs before the current key, we cannot take advantage
+ // of the heretofore spent traversal cost to find it; resetting back to the
+ // beginning is the safest choice.
+ if current.key != nil && list.lessThan(key, current.key) {
+ current = list.header
+ }
+
+ // We should back up to the so that we can seek to our present value if that
+ // is requested for whatever reason.
+ if current.backward == nil {
+ current = list.header
+ } else {
+ current = current.backward
+ }
+
+ current = list.getPath(current, nil, key)
+
+ if current == nil {
+ return
+ }
+
+ i.current = current
+ i.key = current.key
+ i.value = current.value
+
+ return true
+}
+
+func (i *iter) Close() {
+ i.key = nil
+ i.value = nil
+ i.current = nil
+ i.list = nil
+}
+
+type rangeIterator struct {
+ iter
+ upperLimit interface{}
+ lowerLimit interface{}
+}
+
+func (i *rangeIterator) Next() bool {
+ if !i.current.hasNext() {
+ return false
+ }
+
+ next := i.current.next()
+
+ if !i.list.lessThan(next.key, i.upperLimit) {
+ return false
+ }
+
+ i.current = i.current.next()
+ i.key = i.current.key
+ i.value = i.current.value
+ return true
+}
+
+func (i *rangeIterator) Previous() bool {
+ if !i.current.hasPrevious() {
+ return false
+ }
+
+ previous := i.current.previous()
+
+ if i.list.lessThan(previous.key, i.lowerLimit) {
+ return false
+ }
+
+ i.current = i.current.previous()
+ i.key = i.current.key
+ i.value = i.current.value
+ return true
+}
+
+func (i *rangeIterator) Seek(key interface{}) (ok bool) {
+ if i.list.lessThan(key, i.lowerLimit) {
+ return
+ } else if !i.list.lessThan(key, i.upperLimit) {
+ return
+ }
+
+ return i.iter.Seek(key)
+}
+
+func (i *rangeIterator) Close() {
+ i.iter.Close()
+ i.upperLimit = nil
+ i.lowerLimit = nil
+}
+
+// Iterator returns an Iterator that will go through all elements s.
+func (s *SkipList) Iterator() Iterator {
+ return &iter{
+ current: s.header,
+ list: s,
+ }
+}
+
+// Seek returns a bidirectional iterator starting with the first element whose
+// key is greater or equal to key; otherwise, a nil iterator is returned.
+func (s *SkipList) Seek(key interface{}) Iterator {
+ current := s.getPath(s.header, nil, key)
+ if current == nil {
+ return nil
+ }
+
+ return &iter{
+ current: current,
+ key: current.key,
+ list: s,
+ value: current.value,
+ }
+}
+
+// SeekToFirst returns a bidirectional iterator starting from the first element
+// in the list if the list is populated; otherwise, a nil iterator is returned.
+func (s *SkipList) SeekToFirst() Iterator {
+ if s.length == 0 {
+ return nil
+ }
+
+ current := s.header.next()
+
+ return &iter{
+ current: current,
+ key: current.key,
+ list: s,
+ value: current.value,
+ }
+}
+
+// SeekToLast returns a bidirectional iterator starting from the last element
+// in the list if the list is populated; otherwise, a nil iterator is returned.
+func (s *SkipList) SeekToLast() Iterator {
+ current := s.footer
+ if current == nil {
+ return nil
+ }
+
+ return &iter{
+ current: current,
+ key: current.key,
+ list: s,
+ value: current.value,
+ }
+}
+
+// Range returns an iterator that will go through all the
+// elements of the skip list that are greater or equal than from, but
+// less than to.
+func (s *SkipList) Range(from, to interface{}) Iterator {
+ start := s.getPath(s.header, nil, from)
+ return &rangeIterator{
+ iter: iter{
+ current: &node{
+ forward: []*node{start},
+ backward: start,
+ },
+ list: s,
+ },
+ upperLimit: to,
+ lowerLimit: from,
+ }
+}
+
+func (s *SkipList) level() int {
+ return len(s.header.forward) - 1
+}
+
+func maxInt(x, y int) int {
+ if x > y {
+ return x
+ }
+ return y
+}
+
+func (s *SkipList) effectiveMaxLevel() int {
+ return maxInt(s.level(), s.MaxLevel)
+}
+
+// Returns a new random level.
+func (s SkipList) randomLevel() (n int) {
+ for n = 0; n < s.effectiveMaxLevel() && rand.Float64() < p; n++ {
+ }
+ return
+}
+
+// Get returns the value associated with key from s (nil if the key is
+// not present in s). The second return value is true when the key is
+// present.
+func (s *SkipList) Get(key interface{}) (value interface{}, ok bool) {
+ candidate := s.getPath(s.header, nil, key)
+
+ if candidate == nil || candidate.key != key {
+ return nil, false
+ }
+
+ return candidate.value, true
+}
+
+// GetGreaterOrEqual finds the node whose key is greater than or equal
+// to min. It returns its value, its actual key, and whether such a
+// node is present in the skip list.
+func (s *SkipList) GetGreaterOrEqual(min interface{}) (actualKey, value interface{}, ok bool) {
+ candidate := s.getPath(s.header, nil, min)
+
+ if candidate != nil {
+ return candidate.key, candidate.value, true
+ }
+ return nil, nil, false
+}
+
+// getPath populates update with nodes that constitute the path to the
+// node that may contain key. The candidate node will be returned. If
+// update is nil, it will be left alone (the candidate node will still
+// be returned). If update is not nil, but it doesn't have enough
+// slots for all the nodes in the path, getPath will panic.
+func (s *SkipList) getPath(current *node, update []*node, key interface{}) *node {
+ depth := len(current.forward) - 1
+
+ for i := depth; i >= 0; i-- {
+ for current.forward[i] != nil && s.lessThan(current.forward[i].key, key) {
+ current = current.forward[i]
+ }
+ if update != nil {
+ update[i] = current
+ }
+ }
+ return current.next()
+}
+
+// Sets set the value associated with key in s.
+func (s *SkipList) Set(key, value interface{}) {
+ if key == nil {
+ panic("goskiplist: nil keys are not supported")
+ }
+ // s.level starts from 0, so we need to allocate one.
+ update := make([]*node, s.level()+1, s.effectiveMaxLevel()+1)
+ candidate := s.getPath(s.header, update, key)
+
+ if candidate != nil && candidate.key == key {
+ candidate.value = value
+ return
+ }
+
+ newLevel := s.randomLevel()
+
+ if currentLevel := s.level(); newLevel > currentLevel {
+ // there are no pointers for the higher levels in
+ // update. Header should be there. Also add higher
+ // level links to the header.
+ for i := currentLevel + 1; i <= newLevel; i++ {
+ update = append(update, s.header)
+ s.header.forward = append(s.header.forward, nil)
+ }
+ }
+
+ newNode := &node{
+ forward: make([]*node, newLevel+1, s.effectiveMaxLevel()+1),
+ key: key,
+ value: value,
+ }
+
+ if previous := update[0]; previous.key != nil {
+ newNode.backward = previous
+ }
+
+ for i := 0; i <= newLevel; i++ {
+ newNode.forward[i] = update[i].forward[i]
+ update[i].forward[i] = newNode
+ }
+
+ s.length++
+
+ if newNode.forward[0] != nil {
+ if newNode.forward[0].backward != newNode {
+ newNode.forward[0].backward = newNode
+ }
+ }
+
+ if s.footer == nil || s.lessThan(s.footer.key, key) {
+ s.footer = newNode
+ }
+}
+
+// Delete removes the node with the given key.
+//
+// It returns the old value and whether the node was present.
+func (s *SkipList) Delete(key interface{}) (value interface{}, ok bool) {
+ if key == nil {
+ panic("goskiplist: nil keys are not supported")
+ }
+ update := make([]*node, s.level()+1, s.effectiveMaxLevel())
+ candidate := s.getPath(s.header, update, key)
+
+ if candidate == nil || candidate.key != key {
+ return nil, false
+ }
+
+ previous := candidate.backward
+ if s.footer == candidate {
+ s.footer = previous
+ }
+
+ next := candidate.next()
+ if next != nil {
+ next.backward = previous
+ }
+
+ for i := 0; i <= s.level() && update[i].forward[i] == candidate; i++ {
+ update[i].forward[i] = candidate.forward[i]
+ }
+
+ for s.level() > 0 && s.header.forward[s.level()] == nil {
+ s.header.forward = s.header.forward[:s.level()]
+ }
+ s.length--
+
+ return candidate.value, true
+}
+
+// NewCustomMap returns a new SkipList that will use lessThan as the
+// comparison function. lessThan should define a linear order on keys
+// you intend to use with the SkipList.
+func NewCustomMap(lessThan func(l, r interface{}) bool) *SkipList {
+ return &SkipList{
+ lessThan: lessThan,
+ header: &node{
+ forward: []*node{nil},
+ },
+ MaxLevel: DefaultMaxLevel,
+ }
+}
+
+// Ordered is an interface which can be linearly ordered by the
+// LessThan method, whereby this instance is deemed to be less than
+// other. Additionally, Ordered instances should behave properly when
+// compared using == and !=.
+type Ordered interface {
+ LessThan(other Ordered) bool
+}
+
+// New returns a new SkipList.
+//
+// Its keys must implement the Ordered interface.
+func New() *SkipList {
+ comparator := func(left, right interface{}) bool {
+ return left.(Ordered).LessThan(right.(Ordered))
+ }
+ return NewCustomMap(comparator)
+
+}
+
+// NewIntKey returns a SkipList that accepts int keys.
+func NewIntMap() *SkipList {
+ return NewCustomMap(func(l, r interface{}) bool {
+ return l.(int) < r.(int)
+ })
+}
+
+// NewStringMap returns a SkipList that accepts string keys.
+func NewStringMap() *SkipList {
+ return NewCustomMap(func(l, r interface{}) bool {
+ return l.(string) < r.(string)
+ })
+}
+
+// Set is an ordered set data structure.
+//
+// Its elements must implement the Ordered interface. It uses a
+// SkipList for storage, and it gives you similar performance
+// guarantees.
+//
+// To iterate over a set (where s is a *Set):
+//
+// for i := s.Iterator(); i.Next(); {
+// // do something with i.Key().
+// // i.Value() will be nil.
+// }
+type Set struct {
+ skiplist SkipList
+}
+
+// NewSet returns a new Set.
+func NewSet() *Set {
+ comparator := func(left, right interface{}) bool {
+ return left.(Ordered).LessThan(right.(Ordered))
+ }
+ return NewCustomSet(comparator)
+}
+
+// NewCustomSet returns a new Set that will use lessThan as the
+// comparison function. lessThan should define a linear order on
+// elements you intend to use with the Set.
+func NewCustomSet(lessThan func(l, r interface{}) bool) *Set {
+ return &Set{skiplist: SkipList{
+ lessThan: lessThan,
+ header: &node{
+ forward: []*node{nil},
+ },
+ MaxLevel: DefaultMaxLevel,
+ }}
+}
+
+// NewIntSet returns a new Set that accepts int elements.
+func NewIntSet() *Set {
+ return NewCustomSet(func(l, r interface{}) bool {
+ return l.(int) < r.(int)
+ })
+}
+
+// NewStringSet returns a new Set that accepts string elements.
+func NewStringSet() *Set {
+ return NewCustomSet(func(l, r interface{}) bool {
+ return l.(string) < r.(string)
+ })
+}
+
+// Add adds key to s.
+func (s *Set) Add(key interface{}) {
+ s.skiplist.Set(key, nil)
+}
+
+// Remove tries to remove key from the set. It returns true if key was
+// present.
+func (s *Set) Remove(key interface{}) (ok bool) {
+ _, ok = s.skiplist.Delete(key)
+ return ok
+}
+
+// Len returns the length of the set.
+func (s *Set) Len() int {
+ return s.skiplist.Len()
+}
+
+// Contains returns true if key is present in s.
+func (s *Set) Contains(key interface{}) bool {
+ _, ok := s.skiplist.Get(key)
+ return ok
+}
+
+func (s *Set) Iterator() Iterator {
+ return s.skiplist.Iterator()
+}
+
+// Range returns an iterator that will go through all the elements of
+// the set that are greater or equal than from, but less than to.
+func (s *Set) Range(from, to interface{}) Iterator {
+ return s.skiplist.Range(from, to)
+}
+
+// SetMaxLevel sets MaxLevel in the underlying skip list.
+func (s *Set) SetMaxLevel(newMaxLevel int) {
+ s.skiplist.MaxLevel = newMaxLevel
+}
+
+// GetMaxLevel returns MaxLevel fo the underlying skip list.
+func (s *Set) GetMaxLevel() int {
+ return s.skiplist.MaxLevel
+}
diff --git a/vendor/github.com/shabbyrobe/gocovmerge/LICENSE b/vendor/github.com/shabbyrobe/gocovmerge/LICENSE
new file mode 100644
index 00000000..455fb108
--- /dev/null
+++ b/vendor/github.com/shabbyrobe/gocovmerge/LICENSE
@@ -0,0 +1,22 @@
+Copyright (c) 2015, Wade Simmons
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are met:
+
+1. Redistributions of source code must retain the above copyright notice, this
+ list of conditions and the following disclaimer.
+2. Redistributions in binary form must reproduce the above copyright notice,
+ this list of conditions and the following disclaimer in the documentation
+ and/or other materials provided with the distribution.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/vendor/github.com/shabbyrobe/gocovmerge/README.md b/vendor/github.com/shabbyrobe/gocovmerge/README.md
new file mode 100644
index 00000000..7e1f1d72
--- /dev/null
+++ b/vendor/github.com/shabbyrobe/gocovmerge/README.md
@@ -0,0 +1,22 @@
+gocovmerge
+==========
+
+This is a fork of https://github.com/wadey/gocovmerge. It converts it into a
+library and introduces idiomatic error handling. The original command's
+functionality is provided by github.com/shabbyrobe/gocovmerge/cmd/gocovmerge.
+
+--
+
+gocovmerge takes the results from multiple `go test -coverprofile` runs and
+merges them into one profile.
+
+usage
+-----
+
+ gocovmerge [coverprofiles...]
+
+gocovmerge takes the source coverprofiles as the arguments (output from
+`go test -coverprofile coverage.out`) and outputs a merged version of the
+files to standard out. You can only merge profiles that were generated from the
+same source code. If there are source lines that overlap or do not merge, the
+process will exit with an error code.
diff --git a/vendor/github.com/shabbyrobe/gocovmerge/gocovmerge.go b/vendor/github.com/shabbyrobe/gocovmerge/gocovmerge.go
new file mode 100644
index 00000000..931c2c44
--- /dev/null
+++ b/vendor/github.com/shabbyrobe/gocovmerge/gocovmerge.go
@@ -0,0 +1,103 @@
+// Package gocovmerge takes the results from multiple `go test -coverprofile`
+// runs and merges them into one profile
+package gocovmerge
+
+import (
+ "fmt"
+ "io"
+ "sort"
+
+ "golang.org/x/tools/cover"
+)
+
+func AddProfile(profiles []*cover.Profile, p *cover.Profile) []*cover.Profile {
+ i := sort.Search(len(profiles), func(i int) bool { return profiles[i].FileName >= p.FileName })
+ if i < len(profiles) && profiles[i].FileName == p.FileName {
+ MergeProfiles(profiles[i], p)
+ } else {
+ profiles = append(profiles, nil)
+ copy(profiles[i+1:], profiles[i:])
+ profiles[i] = p
+ }
+ return profiles
+}
+
+func DumpProfiles(profiles []*cover.Profile, out io.Writer) error {
+ if len(profiles) == 0 {
+ return nil
+ }
+ if _, err := fmt.Fprintf(out, "mode: %s\n", profiles[0].Mode); err != nil {
+ return err
+ }
+ for _, p := range profiles {
+ for _, b := range p.Blocks {
+ if _, err := fmt.Fprintf(out, "%s:%d.%d,%d.%d %d %d\n", p.FileName, b.StartLine, b.StartCol, b.EndLine, b.EndCol, b.NumStmt, b.Count); err != nil {
+ return err
+ }
+ }
+ }
+ return nil
+}
+
+func MergeProfiles(into *cover.Profile, merge *cover.Profile) error {
+ if into.Mode != merge.Mode {
+ return fmt.Errorf("cannot merge profiles with different modes")
+ }
+ // Since the blocks are sorted, we can keep track of where the last block
+ // was inserted and only look at the blocks after that as targets for merge
+ startIndex := 0
+ for _, b := range merge.Blocks {
+ var err error
+ startIndex, err = mergeProfileBlock(into, b, startIndex)
+ if err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func mergeProfileBlock(p *cover.Profile, pb cover.ProfileBlock, startIndex int) (int, error) {
+ sortFunc := func(i int) bool {
+ pi := p.Blocks[i+startIndex]
+ return pi.StartLine >= pb.StartLine && (pi.StartLine != pb.StartLine || pi.StartCol >= pb.StartCol)
+ }
+
+ i := 0
+ if sortFunc(i) != true {
+ i = sort.Search(len(p.Blocks)-startIndex, sortFunc)
+ }
+
+ i += startIndex
+ if i < len(p.Blocks) && p.Blocks[i].StartLine == pb.StartLine && p.Blocks[i].StartCol == pb.StartCol {
+ if p.Blocks[i].EndLine != pb.EndLine || p.Blocks[i].EndCol != pb.EndCol {
+ return i, fmt.Errorf("gocovmerge: overlapping merge %v %v %v", p.FileName, p.Blocks[i], pb)
+ }
+ switch p.Mode {
+ case "set":
+ p.Blocks[i].Count |= pb.Count
+ case "count", "atomic":
+ p.Blocks[i].Count += pb.Count
+ default:
+ return i, fmt.Errorf("gocovmerge: unsupported covermode '%s'", p.Mode)
+ }
+
+ } else {
+ if i > 0 {
+ pa := p.Blocks[i-1]
+ if pa.EndLine >= pb.EndLine && (pa.EndLine != pb.EndLine || pa.EndCol > pb.EndCol) {
+ return i, fmt.Errorf("gocovmerge: overlap before %v %v %v", p.FileName, pa, pb)
+ }
+ }
+ if i < len(p.Blocks)-1 {
+ pa := p.Blocks[i+1]
+ if pa.StartLine <= pb.StartLine && (pa.StartLine != pb.StartLine || pa.StartCol < pb.StartCol) {
+ return i, fmt.Errorf("gocovmerge: overlap after %v %v %v", p.FileName, pa, pb)
+ }
+ }
+ p.Blocks = append(p.Blocks, cover.ProfileBlock{})
+ copy(p.Blocks[i+1:], p.Blocks[i:])
+ p.Blocks[i] = pb
+ }
+
+ return i + 1, nil
+}