tile38/vendor/github.com/aws/aws-sdk-go/service/firehose/api.go

6620 lines
240 KiB
Go
Raw Permalink Blame History

This file contains invisible Unicode characters

This file contains invisible Unicode characters that are indistinguishable to humans but may be processed differently by a computer. If you think that this is intentional, you can safely ignore this warning. Use the Escape button to reveal them.

// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT.
package firehose
import (
"fmt"
"time"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/awsutil"
"github.com/aws/aws-sdk-go/aws/request"
"github.com/aws/aws-sdk-go/private/protocol"
"github.com/aws/aws-sdk-go/private/protocol/jsonrpc"
)
const opCreateDeliveryStream = "CreateDeliveryStream"
// CreateDeliveryStreamRequest generates a "aws/request.Request" representing the
// client's request for the CreateDeliveryStream operation. The "output" return
// value will be populated with the request's response once the request completes
// successfully.
//
// Use "Send" method on the returned Request to send the API call to the service.
// the "output" return value is not valid until after Send returns without error.
//
// See CreateDeliveryStream for more information on using the CreateDeliveryStream
// API call, and error handling.
//
// This method is useful when you want to inject custom logic or configuration
// into the SDK's request lifecycle. Such as custom headers, or retry logic.
//
//
// // Example sending a request using the CreateDeliveryStreamRequest method.
// req, resp := client.CreateDeliveryStreamRequest(params)
//
// err := req.Send()
// if err == nil { // resp is now filled
// fmt.Println(resp)
// }
//
// See also, https://docs.aws.amazon.com/goto/WebAPI/firehose-2015-08-04/CreateDeliveryStream
func (c *Firehose) CreateDeliveryStreamRequest(input *CreateDeliveryStreamInput) (req *request.Request, output *CreateDeliveryStreamOutput) {
op := &request.Operation{
Name: opCreateDeliveryStream,
HTTPMethod: "POST",
HTTPPath: "/",
}
if input == nil {
input = &CreateDeliveryStreamInput{}
}
output = &CreateDeliveryStreamOutput{}
req = c.newRequest(op, input, output)
return
}
// CreateDeliveryStream API operation for Amazon Kinesis Firehose.
//
// Creates a Kinesis Data Firehose delivery stream.
//
// By default, you can create up to 50 delivery streams per AWS Region.
//
// This is an asynchronous operation that immediately returns. The initial status
// of the delivery stream is CREATING. After the delivery stream is created,
// its status is ACTIVE and it now accepts data. Attempts to send data to a
// delivery stream that is not in the ACTIVE state cause an exception. To check
// the state of a delivery stream, use DescribeDeliveryStream.
//
// A Kinesis Data Firehose delivery stream can be configured to receive records
// directly from providers using PutRecord or PutRecordBatch, or it can be configured
// to use an existing Kinesis stream as its source. To specify a Kinesis data
// stream as input, set the DeliveryStreamType parameter to KinesisStreamAsSource,
// and provide the Kinesis stream Amazon Resource Name (ARN) and role ARN in
// the KinesisStreamSourceConfiguration parameter.
//
// A delivery stream is configured with a single destination: Amazon S3, Amazon
// ES, Amazon Redshift, or Splunk. You must specify only one of the following
// destination configuration parameters: ExtendedS3DestinationConfiguration,
// S3DestinationConfiguration, ElasticsearchDestinationConfiguration, RedshiftDestinationConfiguration,
// or SplunkDestinationConfiguration.
//
// When you specify S3DestinationConfiguration, you can also provide the following
// optional values: BufferingHints, EncryptionConfiguration, and CompressionFormat.
// By default, if no BufferingHints value is provided, Kinesis Data Firehose
// buffers data up to 5 MB or for 5 minutes, whichever condition is satisfied
// first. BufferingHints is a hint, so there are some cases where the service
// cannot adhere to these conditions strictly. For example, record boundaries
// might be such that the size is a little over or under the configured buffering
// size. By default, no encryption is performed. We strongly recommend that
// you enable encryption to ensure secure data storage in Amazon S3.
//
// A few notes about Amazon Redshift as a destination:
//
// * An Amazon Redshift destination requires an S3 bucket as intermediate
// location. Kinesis Data Firehose first delivers data to Amazon S3 and then
// uses COPY syntax to load data into an Amazon Redshift table. This is specified
// in the RedshiftDestinationConfiguration.S3Configuration parameter.
//
// * The compression formats SNAPPY or ZIP cannot be specified in RedshiftDestinationConfiguration.S3Configuration
// because the Amazon Redshift COPY operation that reads from the S3 bucket
// doesn't support these compression formats.
//
// * We strongly recommend that you use the user name and password you provide
// exclusively with Kinesis Data Firehose, and that the permissions for the
// account are restricted for Amazon Redshift INSERT permissions.
//
// Kinesis Data Firehose assumes the IAM role that is configured as part of
// the destination. The role should allow the Kinesis Data Firehose principal
// to assume the role, and the role should have permissions that allow the service
// to deliver the data. For more information, see Grant Kinesis Data Firehose
// Access to an Amazon S3 Destination (http://docs.aws.amazon.com/firehose/latest/dev/controlling-access.html#using-iam-s3)
// in the Amazon Kinesis Data Firehose Developer Guide.
//
// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
// with awserr.Error's Code and Message methods to get detailed information about
// the error.
//
// See the AWS API reference guide for Amazon Kinesis Firehose's
// API operation CreateDeliveryStream for usage and error information.
//
// Returned Error Codes:
// * ErrCodeInvalidArgumentException "InvalidArgumentException"
// The specified input parameter has a value that is not valid.
//
// * ErrCodeLimitExceededException "LimitExceededException"
// You have already reached the limit for a requested resource.
//
// * ErrCodeResourceInUseException "ResourceInUseException"
// The resource is already in use and not available for this operation.
//
// See also, https://docs.aws.amazon.com/goto/WebAPI/firehose-2015-08-04/CreateDeliveryStream
func (c *Firehose) CreateDeliveryStream(input *CreateDeliveryStreamInput) (*CreateDeliveryStreamOutput, error) {
req, out := c.CreateDeliveryStreamRequest(input)
return out, req.Send()
}
// CreateDeliveryStreamWithContext is the same as CreateDeliveryStream with the addition of
// the ability to pass a context and additional request options.
//
// See CreateDeliveryStream for details on how to use this API operation.
//
// The context must be non-nil and will be used for request cancellation. If
// the context is nil a panic will occur. In the future the SDK may create
// sub-contexts for http.Requests. See https://golang.org/pkg/context/
// for more information on using Contexts.
func (c *Firehose) CreateDeliveryStreamWithContext(ctx aws.Context, input *CreateDeliveryStreamInput, opts ...request.Option) (*CreateDeliveryStreamOutput, error) {
req, out := c.CreateDeliveryStreamRequest(input)
req.SetContext(ctx)
req.ApplyOptions(opts...)
return out, req.Send()
}
const opDeleteDeliveryStream = "DeleteDeliveryStream"
// DeleteDeliveryStreamRequest generates a "aws/request.Request" representing the
// client's request for the DeleteDeliveryStream operation. The "output" return
// value will be populated with the request's response once the request completes
// successfully.
//
// Use "Send" method on the returned Request to send the API call to the service.
// the "output" return value is not valid until after Send returns without error.
//
// See DeleteDeliveryStream for more information on using the DeleteDeliveryStream
// API call, and error handling.
//
// This method is useful when you want to inject custom logic or configuration
// into the SDK's request lifecycle. Such as custom headers, or retry logic.
//
//
// // Example sending a request using the DeleteDeliveryStreamRequest method.
// req, resp := client.DeleteDeliveryStreamRequest(params)
//
// err := req.Send()
// if err == nil { // resp is now filled
// fmt.Println(resp)
// }
//
// See also, https://docs.aws.amazon.com/goto/WebAPI/firehose-2015-08-04/DeleteDeliveryStream
func (c *Firehose) DeleteDeliveryStreamRequest(input *DeleteDeliveryStreamInput) (req *request.Request, output *DeleteDeliveryStreamOutput) {
op := &request.Operation{
Name: opDeleteDeliveryStream,
HTTPMethod: "POST",
HTTPPath: "/",
}
if input == nil {
input = &DeleteDeliveryStreamInput{}
}
output = &DeleteDeliveryStreamOutput{}
req = c.newRequest(op, input, output)
req.Handlers.Unmarshal.Swap(jsonrpc.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler)
return
}
// DeleteDeliveryStream API operation for Amazon Kinesis Firehose.
//
// Deletes a delivery stream and its data.
//
// You can delete a delivery stream only if it is in ACTIVE or DELETING state,
// and not in the CREATING state. While the deletion request is in process,
// the delivery stream is in the DELETING state.
//
// To check the state of a delivery stream, use DescribeDeliveryStream.
//
// While the delivery stream is DELETING state, the service might continue to
// accept the records, but it doesn't make any guarantees with respect to delivering
// the data. Therefore, as a best practice, you should first stop any applications
// that are sending records before deleting a delivery stream.
//
// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
// with awserr.Error's Code and Message methods to get detailed information about
// the error.
//
// See the AWS API reference guide for Amazon Kinesis Firehose's
// API operation DeleteDeliveryStream for usage and error information.
//
// Returned Error Codes:
// * ErrCodeResourceInUseException "ResourceInUseException"
// The resource is already in use and not available for this operation.
//
// * ErrCodeResourceNotFoundException "ResourceNotFoundException"
// The specified resource could not be found.
//
// See also, https://docs.aws.amazon.com/goto/WebAPI/firehose-2015-08-04/DeleteDeliveryStream
func (c *Firehose) DeleteDeliveryStream(input *DeleteDeliveryStreamInput) (*DeleteDeliveryStreamOutput, error) {
req, out := c.DeleteDeliveryStreamRequest(input)
return out, req.Send()
}
// DeleteDeliveryStreamWithContext is the same as DeleteDeliveryStream with the addition of
// the ability to pass a context and additional request options.
//
// See DeleteDeliveryStream for details on how to use this API operation.
//
// The context must be non-nil and will be used for request cancellation. If
// the context is nil a panic will occur. In the future the SDK may create
// sub-contexts for http.Requests. See https://golang.org/pkg/context/
// for more information on using Contexts.
func (c *Firehose) DeleteDeliveryStreamWithContext(ctx aws.Context, input *DeleteDeliveryStreamInput, opts ...request.Option) (*DeleteDeliveryStreamOutput, error) {
req, out := c.DeleteDeliveryStreamRequest(input)
req.SetContext(ctx)
req.ApplyOptions(opts...)
return out, req.Send()
}
const opDescribeDeliveryStream = "DescribeDeliveryStream"
// DescribeDeliveryStreamRequest generates a "aws/request.Request" representing the
// client's request for the DescribeDeliveryStream operation. The "output" return
// value will be populated with the request's response once the request completes
// successfully.
//
// Use "Send" method on the returned Request to send the API call to the service.
// the "output" return value is not valid until after Send returns without error.
//
// See DescribeDeliveryStream for more information on using the DescribeDeliveryStream
// API call, and error handling.
//
// This method is useful when you want to inject custom logic or configuration
// into the SDK's request lifecycle. Such as custom headers, or retry logic.
//
//
// // Example sending a request using the DescribeDeliveryStreamRequest method.
// req, resp := client.DescribeDeliveryStreamRequest(params)
//
// err := req.Send()
// if err == nil { // resp is now filled
// fmt.Println(resp)
// }
//
// See also, https://docs.aws.amazon.com/goto/WebAPI/firehose-2015-08-04/DescribeDeliveryStream
func (c *Firehose) DescribeDeliveryStreamRequest(input *DescribeDeliveryStreamInput) (req *request.Request, output *DescribeDeliveryStreamOutput) {
op := &request.Operation{
Name: opDescribeDeliveryStream,
HTTPMethod: "POST",
HTTPPath: "/",
}
if input == nil {
input = &DescribeDeliveryStreamInput{}
}
output = &DescribeDeliveryStreamOutput{}
req = c.newRequest(op, input, output)
return
}
// DescribeDeliveryStream API operation for Amazon Kinesis Firehose.
//
// Describes the specified delivery stream and gets the status. For example,
// after your delivery stream is created, call DescribeDeliveryStream to see
// whether the delivery stream is ACTIVE and therefore ready for data to be
// sent to it.
//
// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
// with awserr.Error's Code and Message methods to get detailed information about
// the error.
//
// See the AWS API reference guide for Amazon Kinesis Firehose's
// API operation DescribeDeliveryStream for usage and error information.
//
// Returned Error Codes:
// * ErrCodeResourceNotFoundException "ResourceNotFoundException"
// The specified resource could not be found.
//
// See also, https://docs.aws.amazon.com/goto/WebAPI/firehose-2015-08-04/DescribeDeliveryStream
func (c *Firehose) DescribeDeliveryStream(input *DescribeDeliveryStreamInput) (*DescribeDeliveryStreamOutput, error) {
req, out := c.DescribeDeliveryStreamRequest(input)
return out, req.Send()
}
// DescribeDeliveryStreamWithContext is the same as DescribeDeliveryStream with the addition of
// the ability to pass a context and additional request options.
//
// See DescribeDeliveryStream for details on how to use this API operation.
//
// The context must be non-nil and will be used for request cancellation. If
// the context is nil a panic will occur. In the future the SDK may create
// sub-contexts for http.Requests. See https://golang.org/pkg/context/
// for more information on using Contexts.
func (c *Firehose) DescribeDeliveryStreamWithContext(ctx aws.Context, input *DescribeDeliveryStreamInput, opts ...request.Option) (*DescribeDeliveryStreamOutput, error) {
req, out := c.DescribeDeliveryStreamRequest(input)
req.SetContext(ctx)
req.ApplyOptions(opts...)
return out, req.Send()
}
const opListDeliveryStreams = "ListDeliveryStreams"
// ListDeliveryStreamsRequest generates a "aws/request.Request" representing the
// client's request for the ListDeliveryStreams operation. The "output" return
// value will be populated with the request's response once the request completes
// successfully.
//
// Use "Send" method on the returned Request to send the API call to the service.
// the "output" return value is not valid until after Send returns without error.
//
// See ListDeliveryStreams for more information on using the ListDeliveryStreams
// API call, and error handling.
//
// This method is useful when you want to inject custom logic or configuration
// into the SDK's request lifecycle. Such as custom headers, or retry logic.
//
//
// // Example sending a request using the ListDeliveryStreamsRequest method.
// req, resp := client.ListDeliveryStreamsRequest(params)
//
// err := req.Send()
// if err == nil { // resp is now filled
// fmt.Println(resp)
// }
//
// See also, https://docs.aws.amazon.com/goto/WebAPI/firehose-2015-08-04/ListDeliveryStreams
func (c *Firehose) ListDeliveryStreamsRequest(input *ListDeliveryStreamsInput) (req *request.Request, output *ListDeliveryStreamsOutput) {
op := &request.Operation{
Name: opListDeliveryStreams,
HTTPMethod: "POST",
HTTPPath: "/",
}
if input == nil {
input = &ListDeliveryStreamsInput{}
}
output = &ListDeliveryStreamsOutput{}
req = c.newRequest(op, input, output)
return
}
// ListDeliveryStreams API operation for Amazon Kinesis Firehose.
//
// Lists your delivery streams in alphabetical order of their names.
//
// The number of delivery streams might be too large to return using a single
// call to ListDeliveryStreams. You can limit the number of delivery streams
// returned, using the Limit parameter. To determine whether there are more
// delivery streams to list, check the value of HasMoreDeliveryStreams in the
// output. If there are more delivery streams to list, you can request them
// by calling this operation again and setting the ExclusiveStartDeliveryStreamName
// parameter to the name of the last delivery stream returned in the last call.
//
// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
// with awserr.Error's Code and Message methods to get detailed information about
// the error.
//
// See the AWS API reference guide for Amazon Kinesis Firehose's
// API operation ListDeliveryStreams for usage and error information.
// See also, https://docs.aws.amazon.com/goto/WebAPI/firehose-2015-08-04/ListDeliveryStreams
func (c *Firehose) ListDeliveryStreams(input *ListDeliveryStreamsInput) (*ListDeliveryStreamsOutput, error) {
req, out := c.ListDeliveryStreamsRequest(input)
return out, req.Send()
}
// ListDeliveryStreamsWithContext is the same as ListDeliveryStreams with the addition of
// the ability to pass a context and additional request options.
//
// See ListDeliveryStreams for details on how to use this API operation.
//
// The context must be non-nil and will be used for request cancellation. If
// the context is nil a panic will occur. In the future the SDK may create
// sub-contexts for http.Requests. See https://golang.org/pkg/context/
// for more information on using Contexts.
func (c *Firehose) ListDeliveryStreamsWithContext(ctx aws.Context, input *ListDeliveryStreamsInput, opts ...request.Option) (*ListDeliveryStreamsOutput, error) {
req, out := c.ListDeliveryStreamsRequest(input)
req.SetContext(ctx)
req.ApplyOptions(opts...)
return out, req.Send()
}
const opListTagsForDeliveryStream = "ListTagsForDeliveryStream"
// ListTagsForDeliveryStreamRequest generates a "aws/request.Request" representing the
// client's request for the ListTagsForDeliveryStream operation. The "output" return
// value will be populated with the request's response once the request completes
// successfully.
//
// Use "Send" method on the returned Request to send the API call to the service.
// the "output" return value is not valid until after Send returns without error.
//
// See ListTagsForDeliveryStream for more information on using the ListTagsForDeliveryStream
// API call, and error handling.
//
// This method is useful when you want to inject custom logic or configuration
// into the SDK's request lifecycle. Such as custom headers, or retry logic.
//
//
// // Example sending a request using the ListTagsForDeliveryStreamRequest method.
// req, resp := client.ListTagsForDeliveryStreamRequest(params)
//
// err := req.Send()
// if err == nil { // resp is now filled
// fmt.Println(resp)
// }
//
// See also, https://docs.aws.amazon.com/goto/WebAPI/firehose-2015-08-04/ListTagsForDeliveryStream
func (c *Firehose) ListTagsForDeliveryStreamRequest(input *ListTagsForDeliveryStreamInput) (req *request.Request, output *ListTagsForDeliveryStreamOutput) {
op := &request.Operation{
Name: opListTagsForDeliveryStream,
HTTPMethod: "POST",
HTTPPath: "/",
}
if input == nil {
input = &ListTagsForDeliveryStreamInput{}
}
output = &ListTagsForDeliveryStreamOutput{}
req = c.newRequest(op, input, output)
return
}
// ListTagsForDeliveryStream API operation for Amazon Kinesis Firehose.
//
// Lists the tags for the specified delivery stream. This operation has a limit
// of five transactions per second per account.
//
// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
// with awserr.Error's Code and Message methods to get detailed information about
// the error.
//
// See the AWS API reference guide for Amazon Kinesis Firehose's
// API operation ListTagsForDeliveryStream for usage and error information.
//
// Returned Error Codes:
// * ErrCodeResourceNotFoundException "ResourceNotFoundException"
// The specified resource could not be found.
//
// * ErrCodeInvalidArgumentException "InvalidArgumentException"
// The specified input parameter has a value that is not valid.
//
// * ErrCodeLimitExceededException "LimitExceededException"
// You have already reached the limit for a requested resource.
//
// See also, https://docs.aws.amazon.com/goto/WebAPI/firehose-2015-08-04/ListTagsForDeliveryStream
func (c *Firehose) ListTagsForDeliveryStream(input *ListTagsForDeliveryStreamInput) (*ListTagsForDeliveryStreamOutput, error) {
req, out := c.ListTagsForDeliveryStreamRequest(input)
return out, req.Send()
}
// ListTagsForDeliveryStreamWithContext is the same as ListTagsForDeliveryStream with the addition of
// the ability to pass a context and additional request options.
//
// See ListTagsForDeliveryStream for details on how to use this API operation.
//
// The context must be non-nil and will be used for request cancellation. If
// the context is nil a panic will occur. In the future the SDK may create
// sub-contexts for http.Requests. See https://golang.org/pkg/context/
// for more information on using Contexts.
func (c *Firehose) ListTagsForDeliveryStreamWithContext(ctx aws.Context, input *ListTagsForDeliveryStreamInput, opts ...request.Option) (*ListTagsForDeliveryStreamOutput, error) {
req, out := c.ListTagsForDeliveryStreamRequest(input)
req.SetContext(ctx)
req.ApplyOptions(opts...)
return out, req.Send()
}
const opPutRecord = "PutRecord"
// PutRecordRequest generates a "aws/request.Request" representing the
// client's request for the PutRecord operation. The "output" return
// value will be populated with the request's response once the request completes
// successfully.
//
// Use "Send" method on the returned Request to send the API call to the service.
// the "output" return value is not valid until after Send returns without error.
//
// See PutRecord for more information on using the PutRecord
// API call, and error handling.
//
// This method is useful when you want to inject custom logic or configuration
// into the SDK's request lifecycle. Such as custom headers, or retry logic.
//
//
// // Example sending a request using the PutRecordRequest method.
// req, resp := client.PutRecordRequest(params)
//
// err := req.Send()
// if err == nil { // resp is now filled
// fmt.Println(resp)
// }
//
// See also, https://docs.aws.amazon.com/goto/WebAPI/firehose-2015-08-04/PutRecord
func (c *Firehose) PutRecordRequest(input *PutRecordInput) (req *request.Request, output *PutRecordOutput) {
op := &request.Operation{
Name: opPutRecord,
HTTPMethod: "POST",
HTTPPath: "/",
}
if input == nil {
input = &PutRecordInput{}
}
output = &PutRecordOutput{}
req = c.newRequest(op, input, output)
return
}
// PutRecord API operation for Amazon Kinesis Firehose.
//
// Writes a single data record into an Amazon Kinesis Data Firehose delivery
// stream. To write multiple data records into a delivery stream, use PutRecordBatch.
// Applications using these operations are referred to as producers.
//
// By default, each delivery stream can take in up to 2,000 transactions per
// second, 5,000 records per second, or 5 MB per second. If you use PutRecord
// and PutRecordBatch, the limits are an aggregate across these two operations
// for each delivery stream. For more information about limits and how to request
// an increase, see Amazon Kinesis Data Firehose Limits (http://docs.aws.amazon.com/firehose/latest/dev/limits.html).
//
// You must specify the name of the delivery stream and the data record when
// using PutRecord. The data record consists of a data blob that can be up to
// 1,000 KB in size, and any kind of data. For example, it can be a segment
// from a log file, geographic location data, website clickstream data, and
// so on.
//
// Kinesis Data Firehose buffers records before delivering them to the destination.
// To disambiguate the data blobs at the destination, a common solution is to
// use delimiters in the data, such as a newline (\n) or some other character
// unique within the data. This allows the consumer application to parse individual
// data items when reading the data from the destination.
//
// The PutRecord operation returns a RecordId, which is a unique string assigned
// to each record. Producer applications can use this ID for purposes such as
// auditability and investigation.
//
// If the PutRecord operation throws a ServiceUnavailableException, back off
// and retry. If the exception persists, it is possible that the throughput
// limits have been exceeded for the delivery stream.
//
// Data records sent to Kinesis Data Firehose are stored for 24 hours from the
// time they are added to a delivery stream as it tries to send the records
// to the destination. If the destination is unreachable for more than 24 hours,
// the data is no longer available.
//
// Don't concatenate two or more base64 strings to form the data fields of your
// records. Instead, concatenate the raw data, then perform base64 encoding.
//
// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
// with awserr.Error's Code and Message methods to get detailed information about
// the error.
//
// See the AWS API reference guide for Amazon Kinesis Firehose's
// API operation PutRecord for usage and error information.
//
// Returned Error Codes:
// * ErrCodeResourceNotFoundException "ResourceNotFoundException"
// The specified resource could not be found.
//
// * ErrCodeInvalidArgumentException "InvalidArgumentException"
// The specified input parameter has a value that is not valid.
//
// * ErrCodeServiceUnavailableException "ServiceUnavailableException"
// The service is unavailable. Back off and retry the operation. If you continue
// to see the exception, throughput limits for the delivery stream may have
// been exceeded. For more information about limits and how to request an increase,
// see Amazon Kinesis Data Firehose Limits (http://docs.aws.amazon.com/firehose/latest/dev/limits.html).
//
// See also, https://docs.aws.amazon.com/goto/WebAPI/firehose-2015-08-04/PutRecord
func (c *Firehose) PutRecord(input *PutRecordInput) (*PutRecordOutput, error) {
req, out := c.PutRecordRequest(input)
return out, req.Send()
}
// PutRecordWithContext is the same as PutRecord with the addition of
// the ability to pass a context and additional request options.
//
// See PutRecord for details on how to use this API operation.
//
// The context must be non-nil and will be used for request cancellation. If
// the context is nil a panic will occur. In the future the SDK may create
// sub-contexts for http.Requests. See https://golang.org/pkg/context/
// for more information on using Contexts.
func (c *Firehose) PutRecordWithContext(ctx aws.Context, input *PutRecordInput, opts ...request.Option) (*PutRecordOutput, error) {
req, out := c.PutRecordRequest(input)
req.SetContext(ctx)
req.ApplyOptions(opts...)
return out, req.Send()
}
const opPutRecordBatch = "PutRecordBatch"
// PutRecordBatchRequest generates a "aws/request.Request" representing the
// client's request for the PutRecordBatch operation. The "output" return
// value will be populated with the request's response once the request completes
// successfully.
//
// Use "Send" method on the returned Request to send the API call to the service.
// the "output" return value is not valid until after Send returns without error.
//
// See PutRecordBatch for more information on using the PutRecordBatch
// API call, and error handling.
//
// This method is useful when you want to inject custom logic or configuration
// into the SDK's request lifecycle. Such as custom headers, or retry logic.
//
//
// // Example sending a request using the PutRecordBatchRequest method.
// req, resp := client.PutRecordBatchRequest(params)
//
// err := req.Send()
// if err == nil { // resp is now filled
// fmt.Println(resp)
// }
//
// See also, https://docs.aws.amazon.com/goto/WebAPI/firehose-2015-08-04/PutRecordBatch
func (c *Firehose) PutRecordBatchRequest(input *PutRecordBatchInput) (req *request.Request, output *PutRecordBatchOutput) {
op := &request.Operation{
Name: opPutRecordBatch,
HTTPMethod: "POST",
HTTPPath: "/",
}
if input == nil {
input = &PutRecordBatchInput{}
}
output = &PutRecordBatchOutput{}
req = c.newRequest(op, input, output)
return
}
// PutRecordBatch API operation for Amazon Kinesis Firehose.
//
// Writes multiple data records into a delivery stream in a single call, which
// can achieve higher throughput per producer than when writing single records.
// To write single data records into a delivery stream, use PutRecord. Applications
// using these operations are referred to as producers.
//
// By default, each delivery stream can take in up to 2,000 transactions per
// second, 5,000 records per second, or 5 MB per second. If you use PutRecord
// and PutRecordBatch, the limits are an aggregate across these two operations
// for each delivery stream. For more information about limits, see Amazon Kinesis
// Data Firehose Limits (http://docs.aws.amazon.com/firehose/latest/dev/limits.html).
//
// Each PutRecordBatch request supports up to 500 records. Each record in the
// request can be as large as 1,000 KB (before 64-bit encoding), up to a limit
// of 4 MB for the entire request. These limits cannot be changed.
//
// You must specify the name of the delivery stream and the data record when
// using PutRecord. The data record consists of a data blob that can be up to
// 1,000 KB in size, and any kind of data. For example, it could be a segment
// from a log file, geographic location data, website clickstream data, and
// so on.
//
// Kinesis Data Firehose buffers records before delivering them to the destination.
// To disambiguate the data blobs at the destination, a common solution is to
// use delimiters in the data, such as a newline (\n) or some other character
// unique within the data. This allows the consumer application to parse individual
// data items when reading the data from the destination.
//
// The PutRecordBatch response includes a count of failed records, FailedPutCount,
// and an array of responses, RequestResponses. Even if the PutRecordBatch call
// succeeds, the value of FailedPutCount may be greater than 0, indicating that
// there are records for which the operation didn't succeed. Each entry in the
// RequestResponses array provides additional information about the processed
// record. It directly correlates with a record in the request array using the
// same ordering, from the top to the bottom. The response array always includes
// the same number of records as the request array. RequestResponses includes
// both successfully and unsuccessfully processed records. Kinesis Data Firehose
// tries to process all records in each PutRecordBatch request. A single record
// failure does not stop the processing of subsequent records.
//
// A successfully processed record includes a RecordId value, which is unique
// for the record. An unsuccessfully processed record includes ErrorCode and
// ErrorMessage values. ErrorCode reflects the type of error, and is one of
// the following values: ServiceUnavailableException or InternalFailure. ErrorMessage
// provides more detailed information about the error.
//
// If there is an internal server error or a timeout, the write might have completed
// or it might have failed. If FailedPutCount is greater than 0, retry the request,
// resending only those records that might have failed processing. This minimizes
// the possible duplicate records and also reduces the total bytes sent (and
// corresponding charges). We recommend that you handle any duplicates at the
// destination.
//
// If PutRecordBatch throws ServiceUnavailableException, back off and retry.
// If the exception persists, it is possible that the throughput limits have
// been exceeded for the delivery stream.
//
// Data records sent to Kinesis Data Firehose are stored for 24 hours from the
// time they are added to a delivery stream as it attempts to send the records
// to the destination. If the destination is unreachable for more than 24 hours,
// the data is no longer available.
//
// Don't concatenate two or more base64 strings to form the data fields of your
// records. Instead, concatenate the raw data, then perform base64 encoding.
//
// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
// with awserr.Error's Code and Message methods to get detailed information about
// the error.
//
// See the AWS API reference guide for Amazon Kinesis Firehose's
// API operation PutRecordBatch for usage and error information.
//
// Returned Error Codes:
// * ErrCodeResourceNotFoundException "ResourceNotFoundException"
// The specified resource could not be found.
//
// * ErrCodeInvalidArgumentException "InvalidArgumentException"
// The specified input parameter has a value that is not valid.
//
// * ErrCodeServiceUnavailableException "ServiceUnavailableException"
// The service is unavailable. Back off and retry the operation. If you continue
// to see the exception, throughput limits for the delivery stream may have
// been exceeded. For more information about limits and how to request an increase,
// see Amazon Kinesis Data Firehose Limits (http://docs.aws.amazon.com/firehose/latest/dev/limits.html).
//
// See also, https://docs.aws.amazon.com/goto/WebAPI/firehose-2015-08-04/PutRecordBatch
func (c *Firehose) PutRecordBatch(input *PutRecordBatchInput) (*PutRecordBatchOutput, error) {
req, out := c.PutRecordBatchRequest(input)
return out, req.Send()
}
// PutRecordBatchWithContext is the same as PutRecordBatch with the addition of
// the ability to pass a context and additional request options.
//
// See PutRecordBatch for details on how to use this API operation.
//
// The context must be non-nil and will be used for request cancellation. If
// the context is nil a panic will occur. In the future the SDK may create
// sub-contexts for http.Requests. See https://golang.org/pkg/context/
// for more information on using Contexts.
func (c *Firehose) PutRecordBatchWithContext(ctx aws.Context, input *PutRecordBatchInput, opts ...request.Option) (*PutRecordBatchOutput, error) {
req, out := c.PutRecordBatchRequest(input)
req.SetContext(ctx)
req.ApplyOptions(opts...)
return out, req.Send()
}
const opStartDeliveryStreamEncryption = "StartDeliveryStreamEncryption"
// StartDeliveryStreamEncryptionRequest generates a "aws/request.Request" representing the
// client's request for the StartDeliveryStreamEncryption operation. The "output" return
// value will be populated with the request's response once the request completes
// successfully.
//
// Use "Send" method on the returned Request to send the API call to the service.
// the "output" return value is not valid until after Send returns without error.
//
// See StartDeliveryStreamEncryption for more information on using the StartDeliveryStreamEncryption
// API call, and error handling.
//
// This method is useful when you want to inject custom logic or configuration
// into the SDK's request lifecycle. Such as custom headers, or retry logic.
//
//
// // Example sending a request using the StartDeliveryStreamEncryptionRequest method.
// req, resp := client.StartDeliveryStreamEncryptionRequest(params)
//
// err := req.Send()
// if err == nil { // resp is now filled
// fmt.Println(resp)
// }
//
// See also, https://docs.aws.amazon.com/goto/WebAPI/firehose-2015-08-04/StartDeliveryStreamEncryption
func (c *Firehose) StartDeliveryStreamEncryptionRequest(input *StartDeliveryStreamEncryptionInput) (req *request.Request, output *StartDeliveryStreamEncryptionOutput) {
op := &request.Operation{
Name: opStartDeliveryStreamEncryption,
HTTPMethod: "POST",
HTTPPath: "/",
}
if input == nil {
input = &StartDeliveryStreamEncryptionInput{}
}
output = &StartDeliveryStreamEncryptionOutput{}
req = c.newRequest(op, input, output)
req.Handlers.Unmarshal.Swap(jsonrpc.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler)
return
}
// StartDeliveryStreamEncryption API operation for Amazon Kinesis Firehose.
//
// Enables server-side encryption (SSE) for the delivery stream.
//
// This operation is asynchronous. It returns immediately. When you invoke it,
// Kinesis Data Firehose first sets the status of the stream to ENABLING, and
// then to ENABLED. You can continue to read and write data to your stream while
// its status is ENABLING, but the data is not encrypted. It can take up to
// 5 seconds after the encryption status changes to ENABLED before all records
// written to the delivery stream are encrypted. To find out whether a record
// or a batch of records was encrypted, check the response elements PutRecordOutput$Encrypted
// and PutRecordBatchOutput$Encrypted, respectively.
//
// To check the encryption state of a delivery stream, use DescribeDeliveryStream.
//
// You can only enable SSE for a delivery stream that uses DirectPut as its
// source.
//
// The StartDeliveryStreamEncryption and StopDeliveryStreamEncryption operations
// have a combined limit of 25 calls per delivery stream per 24 hours. For example,
// you reach the limit if you call StartDeliveryStreamEncryption 13 times and
// StopDeliveryStreamEncryption 12 times for the same delivery stream in a 24-hour
// period.
//
// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
// with awserr.Error's Code and Message methods to get detailed information about
// the error.
//
// See the AWS API reference guide for Amazon Kinesis Firehose's
// API operation StartDeliveryStreamEncryption for usage and error information.
//
// Returned Error Codes:
// * ErrCodeResourceNotFoundException "ResourceNotFoundException"
// The specified resource could not be found.
//
// * ErrCodeResourceInUseException "ResourceInUseException"
// The resource is already in use and not available for this operation.
//
// * ErrCodeInvalidArgumentException "InvalidArgumentException"
// The specified input parameter has a value that is not valid.
//
// * ErrCodeLimitExceededException "LimitExceededException"
// You have already reached the limit for a requested resource.
//
// See also, https://docs.aws.amazon.com/goto/WebAPI/firehose-2015-08-04/StartDeliveryStreamEncryption
func (c *Firehose) StartDeliveryStreamEncryption(input *StartDeliveryStreamEncryptionInput) (*StartDeliveryStreamEncryptionOutput, error) {
req, out := c.StartDeliveryStreamEncryptionRequest(input)
return out, req.Send()
}
// StartDeliveryStreamEncryptionWithContext is the same as StartDeliveryStreamEncryption with the addition of
// the ability to pass a context and additional request options.
//
// See StartDeliveryStreamEncryption for details on how to use this API operation.
//
// The context must be non-nil and will be used for request cancellation. If
// the context is nil a panic will occur. In the future the SDK may create
// sub-contexts for http.Requests. See https://golang.org/pkg/context/
// for more information on using Contexts.
func (c *Firehose) StartDeliveryStreamEncryptionWithContext(ctx aws.Context, input *StartDeliveryStreamEncryptionInput, opts ...request.Option) (*StartDeliveryStreamEncryptionOutput, error) {
req, out := c.StartDeliveryStreamEncryptionRequest(input)
req.SetContext(ctx)
req.ApplyOptions(opts...)
return out, req.Send()
}
const opStopDeliveryStreamEncryption = "StopDeliveryStreamEncryption"
// StopDeliveryStreamEncryptionRequest generates a "aws/request.Request" representing the
// client's request for the StopDeliveryStreamEncryption operation. The "output" return
// value will be populated with the request's response once the request completes
// successfully.
//
// Use "Send" method on the returned Request to send the API call to the service.
// the "output" return value is not valid until after Send returns without error.
//
// See StopDeliveryStreamEncryption for more information on using the StopDeliveryStreamEncryption
// API call, and error handling.
//
// This method is useful when you want to inject custom logic or configuration
// into the SDK's request lifecycle. Such as custom headers, or retry logic.
//
//
// // Example sending a request using the StopDeliveryStreamEncryptionRequest method.
// req, resp := client.StopDeliveryStreamEncryptionRequest(params)
//
// err := req.Send()
// if err == nil { // resp is now filled
// fmt.Println(resp)
// }
//
// See also, https://docs.aws.amazon.com/goto/WebAPI/firehose-2015-08-04/StopDeliveryStreamEncryption
func (c *Firehose) StopDeliveryStreamEncryptionRequest(input *StopDeliveryStreamEncryptionInput) (req *request.Request, output *StopDeliveryStreamEncryptionOutput) {
op := &request.Operation{
Name: opStopDeliveryStreamEncryption,
HTTPMethod: "POST",
HTTPPath: "/",
}
if input == nil {
input = &StopDeliveryStreamEncryptionInput{}
}
output = &StopDeliveryStreamEncryptionOutput{}
req = c.newRequest(op, input, output)
req.Handlers.Unmarshal.Swap(jsonrpc.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler)
return
}
// StopDeliveryStreamEncryption API operation for Amazon Kinesis Firehose.
//
// Disables server-side encryption (SSE) for the delivery stream.
//
// This operation is asynchronous. It returns immediately. When you invoke it,
// Kinesis Data Firehose first sets the status of the stream to DISABLING, and
// then to DISABLED. You can continue to read and write data to your stream
// while its status is DISABLING. It can take up to 5 seconds after the encryption
// status changes to DISABLED before all records written to the delivery stream
// are no longer subject to encryption. To find out whether a record or a batch
// of records was encrypted, check the response elements PutRecordOutput$Encrypted
// and PutRecordBatchOutput$Encrypted, respectively.
//
// To check the encryption state of a delivery stream, use DescribeDeliveryStream.
//
// The StartDeliveryStreamEncryption and StopDeliveryStreamEncryption operations
// have a combined limit of 25 calls per delivery stream per 24 hours. For example,
// you reach the limit if you call StartDeliveryStreamEncryption 13 times and
// StopDeliveryStreamEncryption 12 times for the same delivery stream in a 24-hour
// period.
//
// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
// with awserr.Error's Code and Message methods to get detailed information about
// the error.
//
// See the AWS API reference guide for Amazon Kinesis Firehose's
// API operation StopDeliveryStreamEncryption for usage and error information.
//
// Returned Error Codes:
// * ErrCodeResourceNotFoundException "ResourceNotFoundException"
// The specified resource could not be found.
//
// * ErrCodeResourceInUseException "ResourceInUseException"
// The resource is already in use and not available for this operation.
//
// * ErrCodeInvalidArgumentException "InvalidArgumentException"
// The specified input parameter has a value that is not valid.
//
// * ErrCodeLimitExceededException "LimitExceededException"
// You have already reached the limit for a requested resource.
//
// See also, https://docs.aws.amazon.com/goto/WebAPI/firehose-2015-08-04/StopDeliveryStreamEncryption
func (c *Firehose) StopDeliveryStreamEncryption(input *StopDeliveryStreamEncryptionInput) (*StopDeliveryStreamEncryptionOutput, error) {
req, out := c.StopDeliveryStreamEncryptionRequest(input)
return out, req.Send()
}
// StopDeliveryStreamEncryptionWithContext is the same as StopDeliveryStreamEncryption with the addition of
// the ability to pass a context and additional request options.
//
// See StopDeliveryStreamEncryption for details on how to use this API operation.
//
// The context must be non-nil and will be used for request cancellation. If
// the context is nil a panic will occur. In the future the SDK may create
// sub-contexts for http.Requests. See https://golang.org/pkg/context/
// for more information on using Contexts.
func (c *Firehose) StopDeliveryStreamEncryptionWithContext(ctx aws.Context, input *StopDeliveryStreamEncryptionInput, opts ...request.Option) (*StopDeliveryStreamEncryptionOutput, error) {
req, out := c.StopDeliveryStreamEncryptionRequest(input)
req.SetContext(ctx)
req.ApplyOptions(opts...)
return out, req.Send()
}
const opTagDeliveryStream = "TagDeliveryStream"
// TagDeliveryStreamRequest generates a "aws/request.Request" representing the
// client's request for the TagDeliveryStream operation. The "output" return
// value will be populated with the request's response once the request completes
// successfully.
//
// Use "Send" method on the returned Request to send the API call to the service.
// the "output" return value is not valid until after Send returns without error.
//
// See TagDeliveryStream for more information on using the TagDeliveryStream
// API call, and error handling.
//
// This method is useful when you want to inject custom logic or configuration
// into the SDK's request lifecycle. Such as custom headers, or retry logic.
//
//
// // Example sending a request using the TagDeliveryStreamRequest method.
// req, resp := client.TagDeliveryStreamRequest(params)
//
// err := req.Send()
// if err == nil { // resp is now filled
// fmt.Println(resp)
// }
//
// See also, https://docs.aws.amazon.com/goto/WebAPI/firehose-2015-08-04/TagDeliveryStream
func (c *Firehose) TagDeliveryStreamRequest(input *TagDeliveryStreamInput) (req *request.Request, output *TagDeliveryStreamOutput) {
op := &request.Operation{
Name: opTagDeliveryStream,
HTTPMethod: "POST",
HTTPPath: "/",
}
if input == nil {
input = &TagDeliveryStreamInput{}
}
output = &TagDeliveryStreamOutput{}
req = c.newRequest(op, input, output)
req.Handlers.Unmarshal.Swap(jsonrpc.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler)
return
}
// TagDeliveryStream API operation for Amazon Kinesis Firehose.
//
// Adds or updates tags for the specified delivery stream. A tag is a key-value
// pair that you can define and assign to AWS resources. If you specify a tag
// that already exists, the tag value is replaced with the value that you specify
// in the request. Tags are metadata. For example, you can add friendly names
// and descriptions or other types of information that can help you distinguish
// the delivery stream. For more information about tags, see Using Cost Allocation
// Tags (https://docs.aws.amazon.com/awsaccountbilling/latest/aboutv2/cost-alloc-tags.html)
// in the AWS Billing and Cost Management User Guide.
//
// Each delivery stream can have up to 50 tags.
//
// This operation has a limit of five transactions per second per account.
//
// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
// with awserr.Error's Code and Message methods to get detailed information about
// the error.
//
// See the AWS API reference guide for Amazon Kinesis Firehose's
// API operation TagDeliveryStream for usage and error information.
//
// Returned Error Codes:
// * ErrCodeResourceNotFoundException "ResourceNotFoundException"
// The specified resource could not be found.
//
// * ErrCodeResourceInUseException "ResourceInUseException"
// The resource is already in use and not available for this operation.
//
// * ErrCodeInvalidArgumentException "InvalidArgumentException"
// The specified input parameter has a value that is not valid.
//
// * ErrCodeLimitExceededException "LimitExceededException"
// You have already reached the limit for a requested resource.
//
// See also, https://docs.aws.amazon.com/goto/WebAPI/firehose-2015-08-04/TagDeliveryStream
func (c *Firehose) TagDeliveryStream(input *TagDeliveryStreamInput) (*TagDeliveryStreamOutput, error) {
req, out := c.TagDeliveryStreamRequest(input)
return out, req.Send()
}
// TagDeliveryStreamWithContext is the same as TagDeliveryStream with the addition of
// the ability to pass a context and additional request options.
//
// See TagDeliveryStream for details on how to use this API operation.
//
// The context must be non-nil and will be used for request cancellation. If
// the context is nil a panic will occur. In the future the SDK may create
// sub-contexts for http.Requests. See https://golang.org/pkg/context/
// for more information on using Contexts.
func (c *Firehose) TagDeliveryStreamWithContext(ctx aws.Context, input *TagDeliveryStreamInput, opts ...request.Option) (*TagDeliveryStreamOutput, error) {
req, out := c.TagDeliveryStreamRequest(input)
req.SetContext(ctx)
req.ApplyOptions(opts...)
return out, req.Send()
}
const opUntagDeliveryStream = "UntagDeliveryStream"
// UntagDeliveryStreamRequest generates a "aws/request.Request" representing the
// client's request for the UntagDeliveryStream operation. The "output" return
// value will be populated with the request's response once the request completes
// successfully.
//
// Use "Send" method on the returned Request to send the API call to the service.
// the "output" return value is not valid until after Send returns without error.
//
// See UntagDeliveryStream for more information on using the UntagDeliveryStream
// API call, and error handling.
//
// This method is useful when you want to inject custom logic or configuration
// into the SDK's request lifecycle. Such as custom headers, or retry logic.
//
//
// // Example sending a request using the UntagDeliveryStreamRequest method.
// req, resp := client.UntagDeliveryStreamRequest(params)
//
// err := req.Send()
// if err == nil { // resp is now filled
// fmt.Println(resp)
// }
//
// See also, https://docs.aws.amazon.com/goto/WebAPI/firehose-2015-08-04/UntagDeliveryStream
func (c *Firehose) UntagDeliveryStreamRequest(input *UntagDeliveryStreamInput) (req *request.Request, output *UntagDeliveryStreamOutput) {
op := &request.Operation{
Name: opUntagDeliveryStream,
HTTPMethod: "POST",
HTTPPath: "/",
}
if input == nil {
input = &UntagDeliveryStreamInput{}
}
output = &UntagDeliveryStreamOutput{}
req = c.newRequest(op, input, output)
req.Handlers.Unmarshal.Swap(jsonrpc.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler)
return
}
// UntagDeliveryStream API operation for Amazon Kinesis Firehose.
//
// Removes tags from the specified delivery stream. Removed tags are deleted,
// and you can't recover them after this operation successfully completes.
//
// If you specify a tag that doesn't exist, the operation ignores it.
//
// This operation has a limit of five transactions per second per account.
//
// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
// with awserr.Error's Code and Message methods to get detailed information about
// the error.
//
// See the AWS API reference guide for Amazon Kinesis Firehose's
// API operation UntagDeliveryStream for usage and error information.
//
// Returned Error Codes:
// * ErrCodeResourceNotFoundException "ResourceNotFoundException"
// The specified resource could not be found.
//
// * ErrCodeResourceInUseException "ResourceInUseException"
// The resource is already in use and not available for this operation.
//
// * ErrCodeInvalidArgumentException "InvalidArgumentException"
// The specified input parameter has a value that is not valid.
//
// * ErrCodeLimitExceededException "LimitExceededException"
// You have already reached the limit for a requested resource.
//
// See also, https://docs.aws.amazon.com/goto/WebAPI/firehose-2015-08-04/UntagDeliveryStream
func (c *Firehose) UntagDeliveryStream(input *UntagDeliveryStreamInput) (*UntagDeliveryStreamOutput, error) {
req, out := c.UntagDeliveryStreamRequest(input)
return out, req.Send()
}
// UntagDeliveryStreamWithContext is the same as UntagDeliveryStream with the addition of
// the ability to pass a context and additional request options.
//
// See UntagDeliveryStream for details on how to use this API operation.
//
// The context must be non-nil and will be used for request cancellation. If
// the context is nil a panic will occur. In the future the SDK may create
// sub-contexts for http.Requests. See https://golang.org/pkg/context/
// for more information on using Contexts.
func (c *Firehose) UntagDeliveryStreamWithContext(ctx aws.Context, input *UntagDeliveryStreamInput, opts ...request.Option) (*UntagDeliveryStreamOutput, error) {
req, out := c.UntagDeliveryStreamRequest(input)
req.SetContext(ctx)
req.ApplyOptions(opts...)
return out, req.Send()
}
const opUpdateDestination = "UpdateDestination"
// UpdateDestinationRequest generates a "aws/request.Request" representing the
// client's request for the UpdateDestination operation. The "output" return
// value will be populated with the request's response once the request completes
// successfully.
//
// Use "Send" method on the returned Request to send the API call to the service.
// the "output" return value is not valid until after Send returns without error.
//
// See UpdateDestination for more information on using the UpdateDestination
// API call, and error handling.
//
// This method is useful when you want to inject custom logic or configuration
// into the SDK's request lifecycle. Such as custom headers, or retry logic.
//
//
// // Example sending a request using the UpdateDestinationRequest method.
// req, resp := client.UpdateDestinationRequest(params)
//
// err := req.Send()
// if err == nil { // resp is now filled
// fmt.Println(resp)
// }
//
// See also, https://docs.aws.amazon.com/goto/WebAPI/firehose-2015-08-04/UpdateDestination
func (c *Firehose) UpdateDestinationRequest(input *UpdateDestinationInput) (req *request.Request, output *UpdateDestinationOutput) {
op := &request.Operation{
Name: opUpdateDestination,
HTTPMethod: "POST",
HTTPPath: "/",
}
if input == nil {
input = &UpdateDestinationInput{}
}
output = &UpdateDestinationOutput{}
req = c.newRequest(op, input, output)
req.Handlers.Unmarshal.Swap(jsonrpc.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler)
return
}
// UpdateDestination API operation for Amazon Kinesis Firehose.
//
// Updates the specified destination of the specified delivery stream.
//
// Use this operation to change the destination type (for example, to replace
// the Amazon S3 destination with Amazon Redshift) or change the parameters
// associated with a destination (for example, to change the bucket name of
// the Amazon S3 destination). The update might not occur immediately. The target
// delivery stream remains active while the configurations are updated, so data
// writes to the delivery stream can continue during this process. The updated
// configurations are usually effective within a few minutes.
//
// Switching between Amazon ES and other services is not supported. For an Amazon
// ES destination, you can only update to another Amazon ES destination.
//
// If the destination type is the same, Kinesis Data Firehose merges the configuration
// parameters specified with the destination configuration that already exists
// on the delivery stream. If any of the parameters are not specified in the
// call, the existing values are retained. For example, in the Amazon S3 destination,
// if EncryptionConfiguration is not specified, then the existing EncryptionConfiguration
// is maintained on the destination.
//
// If the destination type is not the same, for example, changing the destination
// from Amazon S3 to Amazon Redshift, Kinesis Data Firehose does not merge any
// parameters. In this case, all parameters must be specified.
//
// Kinesis Data Firehose uses CurrentDeliveryStreamVersionId to avoid race conditions
// and conflicting merges. This is a required field, and the service updates
// the configuration only if the existing configuration has a version ID that
// matches. After the update is applied successfully, the version ID is updated,
// and can be retrieved using DescribeDeliveryStream. Use the new version ID
// to set CurrentDeliveryStreamVersionId in the next call.
//
// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
// with awserr.Error's Code and Message methods to get detailed information about
// the error.
//
// See the AWS API reference guide for Amazon Kinesis Firehose's
// API operation UpdateDestination for usage and error information.
//
// Returned Error Codes:
// * ErrCodeInvalidArgumentException "InvalidArgumentException"
// The specified input parameter has a value that is not valid.
//
// * ErrCodeResourceInUseException "ResourceInUseException"
// The resource is already in use and not available for this operation.
//
// * ErrCodeResourceNotFoundException "ResourceNotFoundException"
// The specified resource could not be found.
//
// * ErrCodeConcurrentModificationException "ConcurrentModificationException"
// Another modification has already happened. Fetch VersionId again and use
// it to update the destination.
//
// See also, https://docs.aws.amazon.com/goto/WebAPI/firehose-2015-08-04/UpdateDestination
func (c *Firehose) UpdateDestination(input *UpdateDestinationInput) (*UpdateDestinationOutput, error) {
req, out := c.UpdateDestinationRequest(input)
return out, req.Send()
}
// UpdateDestinationWithContext is the same as UpdateDestination with the addition of
// the ability to pass a context and additional request options.
//
// See UpdateDestination for details on how to use this API operation.
//
// The context must be non-nil and will be used for request cancellation. If
// the context is nil a panic will occur. In the future the SDK may create
// sub-contexts for http.Requests. See https://golang.org/pkg/context/
// for more information on using Contexts.
func (c *Firehose) UpdateDestinationWithContext(ctx aws.Context, input *UpdateDestinationInput, opts ...request.Option) (*UpdateDestinationOutput, error) {
req, out := c.UpdateDestinationRequest(input)
req.SetContext(ctx)
req.ApplyOptions(opts...)
return out, req.Send()
}
// Describes hints for the buffering to perform before delivering data to the
// destination. These options are treated as hints, and therefore Kinesis Data
// Firehose might choose to use different values when it is optimal.
type BufferingHints struct {
_ struct{} `type:"structure"`
// Buffer incoming data for the specified period of time, in seconds, before
// delivering it to the destination. The default value is 300.
IntervalInSeconds *int64 `min:"60" type:"integer"`
// Buffer incoming data to the specified size, in MBs, before delivering it
// to the destination. The default value is 5.
//
// We recommend setting this parameter to a value greater than the amount of
// data you typically ingest into the delivery stream in 10 seconds. For example,
// if you typically ingest data at 1 MB/sec, the value should be 10 MB or higher.
SizeInMBs *int64 `min:"1" type:"integer"`
}
// String returns the string representation
func (s BufferingHints) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation
func (s BufferingHints) GoString() string {
return s.String()
}
// Validate inspects the fields of the type to determine if they are valid.
func (s *BufferingHints) Validate() error {
invalidParams := request.ErrInvalidParams{Context: "BufferingHints"}
if s.IntervalInSeconds != nil && *s.IntervalInSeconds < 60 {
invalidParams.Add(request.NewErrParamMinValue("IntervalInSeconds", 60))
}
if s.SizeInMBs != nil && *s.SizeInMBs < 1 {
invalidParams.Add(request.NewErrParamMinValue("SizeInMBs", 1))
}
if invalidParams.Len() > 0 {
return invalidParams
}
return nil
}
// SetIntervalInSeconds sets the IntervalInSeconds field's value.
func (s *BufferingHints) SetIntervalInSeconds(v int64) *BufferingHints {
s.IntervalInSeconds = &v
return s
}
// SetSizeInMBs sets the SizeInMBs field's value.
func (s *BufferingHints) SetSizeInMBs(v int64) *BufferingHints {
s.SizeInMBs = &v
return s
}
// Describes the Amazon CloudWatch logging options for your delivery stream.
type CloudWatchLoggingOptions struct {
_ struct{} `type:"structure"`
// Enables or disables CloudWatch logging.
Enabled *bool `type:"boolean"`
// The CloudWatch group name for logging. This value is required if CloudWatch
// logging is enabled.
LogGroupName *string `type:"string"`
// The CloudWatch log stream name for logging. This value is required if CloudWatch
// logging is enabled.
LogStreamName *string `type:"string"`
}
// String returns the string representation
func (s CloudWatchLoggingOptions) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation
func (s CloudWatchLoggingOptions) GoString() string {
return s.String()
}
// SetEnabled sets the Enabled field's value.
func (s *CloudWatchLoggingOptions) SetEnabled(v bool) *CloudWatchLoggingOptions {
s.Enabled = &v
return s
}
// SetLogGroupName sets the LogGroupName field's value.
func (s *CloudWatchLoggingOptions) SetLogGroupName(v string) *CloudWatchLoggingOptions {
s.LogGroupName = &v
return s
}
// SetLogStreamName sets the LogStreamName field's value.
func (s *CloudWatchLoggingOptions) SetLogStreamName(v string) *CloudWatchLoggingOptions {
s.LogStreamName = &v
return s
}
// Describes a COPY command for Amazon Redshift.
type CopyCommand struct {
_ struct{} `type:"structure"`
// Optional parameters to use with the Amazon Redshift COPY command. For more
// information, see the "Optional Parameters" section of Amazon Redshift COPY
// command (http://docs.aws.amazon.com/redshift/latest/dg/r_COPY.html). Some
// possible examples that would apply to Kinesis Data Firehose are as follows:
//
// delimiter '\t' lzop; - fields are delimited with "\t" (TAB character) and
// compressed using lzop.
//
// delimiter '|' - fields are delimited with "|" (this is the default delimiter).
//
// delimiter '|' escape - the delimiter should be escaped.
//
// fixedwidth 'venueid:3,venuename:25,venuecity:12,venuestate:2,venueseats:6'
// - fields are fixed width in the source, with each width specified after every
// column in the table.
//
// JSON 's3://mybucket/jsonpaths.txt' - data is in JSON format, and the path
// specified is the format of the data.
//
// For more examples, see Amazon Redshift COPY command examples (http://docs.aws.amazon.com/redshift/latest/dg/r_COPY_command_examples.html).
CopyOptions *string `type:"string"`
// A comma-separated list of column names.
DataTableColumns *string `type:"string"`
// The name of the target table. The table must already exist in the database.
//
// DataTableName is a required field
DataTableName *string `min:"1" type:"string" required:"true"`
}
// String returns the string representation
func (s CopyCommand) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation
func (s CopyCommand) GoString() string {
return s.String()
}
// Validate inspects the fields of the type to determine if they are valid.
func (s *CopyCommand) Validate() error {
invalidParams := request.ErrInvalidParams{Context: "CopyCommand"}
if s.DataTableName == nil {
invalidParams.Add(request.NewErrParamRequired("DataTableName"))
}
if s.DataTableName != nil && len(*s.DataTableName) < 1 {
invalidParams.Add(request.NewErrParamMinLen("DataTableName", 1))
}
if invalidParams.Len() > 0 {
return invalidParams
}
return nil
}
// SetCopyOptions sets the CopyOptions field's value.
func (s *CopyCommand) SetCopyOptions(v string) *CopyCommand {
s.CopyOptions = &v
return s
}
// SetDataTableColumns sets the DataTableColumns field's value.
func (s *CopyCommand) SetDataTableColumns(v string) *CopyCommand {
s.DataTableColumns = &v
return s
}
// SetDataTableName sets the DataTableName field's value.
func (s *CopyCommand) SetDataTableName(v string) *CopyCommand {
s.DataTableName = &v
return s
}
type CreateDeliveryStreamInput struct {
_ struct{} `type:"structure"`
// The name of the delivery stream. This name must be unique per AWS account
// in the same AWS Region. If the delivery streams are in different accounts
// or different Regions, you can have multiple delivery streams with the same
// name.
//
// DeliveryStreamName is a required field
DeliveryStreamName *string `min:"1" type:"string" required:"true"`
// The delivery stream type. This parameter can be one of the following values:
//
// * DirectPut: Provider applications access the delivery stream directly.
//
// * KinesisStreamAsSource: The delivery stream uses a Kinesis data stream
// as a source.
DeliveryStreamType *string `type:"string" enum:"DeliveryStreamType"`
// The destination in Amazon ES. You can specify only one destination.
ElasticsearchDestinationConfiguration *ElasticsearchDestinationConfiguration `type:"structure"`
// The destination in Amazon S3. You can specify only one destination.
ExtendedS3DestinationConfiguration *ExtendedS3DestinationConfiguration `type:"structure"`
// When a Kinesis data stream is used as the source for the delivery stream,
// a KinesisStreamSourceConfiguration containing the Kinesis data stream Amazon
// Resource Name (ARN) and the role ARN for the source stream.
KinesisStreamSourceConfiguration *KinesisStreamSourceConfiguration `type:"structure"`
// The destination in Amazon Redshift. You can specify only one destination.
RedshiftDestinationConfiguration *RedshiftDestinationConfiguration `type:"structure"`
// [Deprecated] The destination in Amazon S3. You can specify only one destination.
//
// Deprecated: S3DestinationConfiguration has been deprecated
S3DestinationConfiguration *S3DestinationConfiguration `deprecated:"true" type:"structure"`
// The destination in Splunk. You can specify only one destination.
SplunkDestinationConfiguration *SplunkDestinationConfiguration `type:"structure"`
// A set of tags to assign to the delivery stream. A tag is a key-value pair
// that you can define and assign to AWS resources. Tags are metadata. For example,
// you can add friendly names and descriptions or other types of information
// that can help you distinguish the delivery stream. For more information about
// tags, see Using Cost Allocation Tags (https://docs.aws.amazon.com/awsaccountbilling/latest/aboutv2/cost-alloc-tags.html)
// in the AWS Billing and Cost Management User Guide.
//
// You can specify up to 50 tags when creating a delivery stream.
Tags []*Tag `min:"1" type:"list"`
}
// String returns the string representation
func (s CreateDeliveryStreamInput) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation
func (s CreateDeliveryStreamInput) GoString() string {
return s.String()
}
// Validate inspects the fields of the type to determine if they are valid.
func (s *CreateDeliveryStreamInput) Validate() error {
invalidParams := request.ErrInvalidParams{Context: "CreateDeliveryStreamInput"}
if s.DeliveryStreamName == nil {
invalidParams.Add(request.NewErrParamRequired("DeliveryStreamName"))
}
if s.DeliveryStreamName != nil && len(*s.DeliveryStreamName) < 1 {
invalidParams.Add(request.NewErrParamMinLen("DeliveryStreamName", 1))
}
if s.Tags != nil && len(s.Tags) < 1 {
invalidParams.Add(request.NewErrParamMinLen("Tags", 1))
}
if s.ElasticsearchDestinationConfiguration != nil {
if err := s.ElasticsearchDestinationConfiguration.Validate(); err != nil {
invalidParams.AddNested("ElasticsearchDestinationConfiguration", err.(request.ErrInvalidParams))
}
}
if s.ExtendedS3DestinationConfiguration != nil {
if err := s.ExtendedS3DestinationConfiguration.Validate(); err != nil {
invalidParams.AddNested("ExtendedS3DestinationConfiguration", err.(request.ErrInvalidParams))
}
}
if s.KinesisStreamSourceConfiguration != nil {
if err := s.KinesisStreamSourceConfiguration.Validate(); err != nil {
invalidParams.AddNested("KinesisStreamSourceConfiguration", err.(request.ErrInvalidParams))
}
}
if s.RedshiftDestinationConfiguration != nil {
if err := s.RedshiftDestinationConfiguration.Validate(); err != nil {
invalidParams.AddNested("RedshiftDestinationConfiguration", err.(request.ErrInvalidParams))
}
}
if s.S3DestinationConfiguration != nil {
if err := s.S3DestinationConfiguration.Validate(); err != nil {
invalidParams.AddNested("S3DestinationConfiguration", err.(request.ErrInvalidParams))
}
}
if s.SplunkDestinationConfiguration != nil {
if err := s.SplunkDestinationConfiguration.Validate(); err != nil {
invalidParams.AddNested("SplunkDestinationConfiguration", err.(request.ErrInvalidParams))
}
}
if s.Tags != nil {
for i, v := range s.Tags {
if v == nil {
continue
}
if err := v.Validate(); err != nil {
invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Tags", i), err.(request.ErrInvalidParams))
}
}
}
if invalidParams.Len() > 0 {
return invalidParams
}
return nil
}
// SetDeliveryStreamName sets the DeliveryStreamName field's value.
func (s *CreateDeliveryStreamInput) SetDeliveryStreamName(v string) *CreateDeliveryStreamInput {
s.DeliveryStreamName = &v
return s
}
// SetDeliveryStreamType sets the DeliveryStreamType field's value.
func (s *CreateDeliveryStreamInput) SetDeliveryStreamType(v string) *CreateDeliveryStreamInput {
s.DeliveryStreamType = &v
return s
}
// SetElasticsearchDestinationConfiguration sets the ElasticsearchDestinationConfiguration field's value.
func (s *CreateDeliveryStreamInput) SetElasticsearchDestinationConfiguration(v *ElasticsearchDestinationConfiguration) *CreateDeliveryStreamInput {
s.ElasticsearchDestinationConfiguration = v
return s
}
// SetExtendedS3DestinationConfiguration sets the ExtendedS3DestinationConfiguration field's value.
func (s *CreateDeliveryStreamInput) SetExtendedS3DestinationConfiguration(v *ExtendedS3DestinationConfiguration) *CreateDeliveryStreamInput {
s.ExtendedS3DestinationConfiguration = v
return s
}
// SetKinesisStreamSourceConfiguration sets the KinesisStreamSourceConfiguration field's value.
func (s *CreateDeliveryStreamInput) SetKinesisStreamSourceConfiguration(v *KinesisStreamSourceConfiguration) *CreateDeliveryStreamInput {
s.KinesisStreamSourceConfiguration = v
return s
}
// SetRedshiftDestinationConfiguration sets the RedshiftDestinationConfiguration field's value.
func (s *CreateDeliveryStreamInput) SetRedshiftDestinationConfiguration(v *RedshiftDestinationConfiguration) *CreateDeliveryStreamInput {
s.RedshiftDestinationConfiguration = v
return s
}
// SetS3DestinationConfiguration sets the S3DestinationConfiguration field's value.
func (s *CreateDeliveryStreamInput) SetS3DestinationConfiguration(v *S3DestinationConfiguration) *CreateDeliveryStreamInput {
s.S3DestinationConfiguration = v
return s
}
// SetSplunkDestinationConfiguration sets the SplunkDestinationConfiguration field's value.
func (s *CreateDeliveryStreamInput) SetSplunkDestinationConfiguration(v *SplunkDestinationConfiguration) *CreateDeliveryStreamInput {
s.SplunkDestinationConfiguration = v
return s
}
// SetTags sets the Tags field's value.
func (s *CreateDeliveryStreamInput) SetTags(v []*Tag) *CreateDeliveryStreamInput {
s.Tags = v
return s
}
type CreateDeliveryStreamOutput struct {
_ struct{} `type:"structure"`
// The ARN of the delivery stream.
DeliveryStreamARN *string `min:"1" type:"string"`
}
// String returns the string representation
func (s CreateDeliveryStreamOutput) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation
func (s CreateDeliveryStreamOutput) GoString() string {
return s.String()
}
// SetDeliveryStreamARN sets the DeliveryStreamARN field's value.
func (s *CreateDeliveryStreamOutput) SetDeliveryStreamARN(v string) *CreateDeliveryStreamOutput {
s.DeliveryStreamARN = &v
return s
}
// Specifies that you want Kinesis Data Firehose to convert data from the JSON
// format to the Parquet or ORC format before writing it to Amazon S3. Kinesis
// Data Firehose uses the serializer and deserializer that you specify, in addition
// to the column information from the AWS Glue table, to deserialize your input
// data from JSON and then serialize it to the Parquet or ORC format. For more
// information, see Kinesis Data Firehose Record Format Conversion (https://docs.aws.amazon.com/firehose/latest/dev/record-format-conversion.html).
type DataFormatConversionConfiguration struct {
_ struct{} `type:"structure"`
// Defaults to true. Set it to false if you want to disable format conversion
// while preserving the configuration details.
Enabled *bool `type:"boolean"`
// Specifies the deserializer that you want Kinesis Data Firehose to use to
// convert the format of your data from JSON.
InputFormatConfiguration *InputFormatConfiguration `type:"structure"`
// Specifies the serializer that you want Kinesis Data Firehose to use to convert
// the format of your data to the Parquet or ORC format.
OutputFormatConfiguration *OutputFormatConfiguration `type:"structure"`
// Specifies the AWS Glue Data Catalog table that contains the column information.
SchemaConfiguration *SchemaConfiguration `type:"structure"`
}
// String returns the string representation
func (s DataFormatConversionConfiguration) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation
func (s DataFormatConversionConfiguration) GoString() string {
return s.String()
}
// Validate inspects the fields of the type to determine if they are valid.
func (s *DataFormatConversionConfiguration) Validate() error {
invalidParams := request.ErrInvalidParams{Context: "DataFormatConversionConfiguration"}
if s.OutputFormatConfiguration != nil {
if err := s.OutputFormatConfiguration.Validate(); err != nil {
invalidParams.AddNested("OutputFormatConfiguration", err.(request.ErrInvalidParams))
}
}
if invalidParams.Len() > 0 {
return invalidParams
}
return nil
}
// SetEnabled sets the Enabled field's value.
func (s *DataFormatConversionConfiguration) SetEnabled(v bool) *DataFormatConversionConfiguration {
s.Enabled = &v
return s
}
// SetInputFormatConfiguration sets the InputFormatConfiguration field's value.
func (s *DataFormatConversionConfiguration) SetInputFormatConfiguration(v *InputFormatConfiguration) *DataFormatConversionConfiguration {
s.InputFormatConfiguration = v
return s
}
// SetOutputFormatConfiguration sets the OutputFormatConfiguration field's value.
func (s *DataFormatConversionConfiguration) SetOutputFormatConfiguration(v *OutputFormatConfiguration) *DataFormatConversionConfiguration {
s.OutputFormatConfiguration = v
return s
}
// SetSchemaConfiguration sets the SchemaConfiguration field's value.
func (s *DataFormatConversionConfiguration) SetSchemaConfiguration(v *SchemaConfiguration) *DataFormatConversionConfiguration {
s.SchemaConfiguration = v
return s
}
type DeleteDeliveryStreamInput struct {
_ struct{} `type:"structure"`
// The name of the delivery stream.
//
// DeliveryStreamName is a required field
DeliveryStreamName *string `min:"1" type:"string" required:"true"`
}
// String returns the string representation
func (s DeleteDeliveryStreamInput) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation
func (s DeleteDeliveryStreamInput) GoString() string {
return s.String()
}
// Validate inspects the fields of the type to determine if they are valid.
func (s *DeleteDeliveryStreamInput) Validate() error {
invalidParams := request.ErrInvalidParams{Context: "DeleteDeliveryStreamInput"}
if s.DeliveryStreamName == nil {
invalidParams.Add(request.NewErrParamRequired("DeliveryStreamName"))
}
if s.DeliveryStreamName != nil && len(*s.DeliveryStreamName) < 1 {
invalidParams.Add(request.NewErrParamMinLen("DeliveryStreamName", 1))
}
if invalidParams.Len() > 0 {
return invalidParams
}
return nil
}
// SetDeliveryStreamName sets the DeliveryStreamName field's value.
func (s *DeleteDeliveryStreamInput) SetDeliveryStreamName(v string) *DeleteDeliveryStreamInput {
s.DeliveryStreamName = &v
return s
}
type DeleteDeliveryStreamOutput struct {
_ struct{} `type:"structure"`
}
// String returns the string representation
func (s DeleteDeliveryStreamOutput) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation
func (s DeleteDeliveryStreamOutput) GoString() string {
return s.String()
}
// Contains information about a delivery stream.
type DeliveryStreamDescription struct {
_ struct{} `type:"structure"`
// The date and time that the delivery stream was created.
CreateTimestamp *time.Time `type:"timestamp"`
// The Amazon Resource Name (ARN) of the delivery stream. For more information,
// see Amazon Resource Names (ARNs) and AWS Service Namespaces (https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html).
//
// DeliveryStreamARN is a required field
DeliveryStreamARN *string `min:"1" type:"string" required:"true"`
// Indicates the server-side encryption (SSE) status for the delivery stream.
DeliveryStreamEncryptionConfiguration *DeliveryStreamEncryptionConfiguration `type:"structure"`
// The name of the delivery stream.
//
// DeliveryStreamName is a required field
DeliveryStreamName *string `min:"1" type:"string" required:"true"`
// The status of the delivery stream.
//
// DeliveryStreamStatus is a required field
DeliveryStreamStatus *string `type:"string" required:"true" enum:"DeliveryStreamStatus"`
// The delivery stream type. This can be one of the following values:
//
// * DirectPut: Provider applications access the delivery stream directly.
//
// * KinesisStreamAsSource: The delivery stream uses a Kinesis data stream
// as a source.
//
// DeliveryStreamType is a required field
DeliveryStreamType *string `type:"string" required:"true" enum:"DeliveryStreamType"`
// The destinations.
//
// Destinations is a required field
Destinations []*DestinationDescription `type:"list" required:"true"`
// Indicates whether there are more destinations available to list.
//
// HasMoreDestinations is a required field
HasMoreDestinations *bool `type:"boolean" required:"true"`
// The date and time that the delivery stream was last updated.
LastUpdateTimestamp *time.Time `type:"timestamp"`
// If the DeliveryStreamType parameter is KinesisStreamAsSource, a SourceDescription
// object describing the source Kinesis data stream.
Source *SourceDescription `type:"structure"`
// Each time the destination is updated for a delivery stream, the version ID
// is changed, and the current version ID is required when updating the destination.
// This is so that the service knows it is applying the changes to the correct
// version of the delivery stream.
//
// VersionId is a required field
VersionId *string `min:"1" type:"string" required:"true"`
}
// String returns the string representation
func (s DeliveryStreamDescription) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation
func (s DeliveryStreamDescription) GoString() string {
return s.String()
}
// SetCreateTimestamp sets the CreateTimestamp field's value.
func (s *DeliveryStreamDescription) SetCreateTimestamp(v time.Time) *DeliveryStreamDescription {
s.CreateTimestamp = &v
return s
}
// SetDeliveryStreamARN sets the DeliveryStreamARN field's value.
func (s *DeliveryStreamDescription) SetDeliveryStreamARN(v string) *DeliveryStreamDescription {
s.DeliveryStreamARN = &v
return s
}
// SetDeliveryStreamEncryptionConfiguration sets the DeliveryStreamEncryptionConfiguration field's value.
func (s *DeliveryStreamDescription) SetDeliveryStreamEncryptionConfiguration(v *DeliveryStreamEncryptionConfiguration) *DeliveryStreamDescription {
s.DeliveryStreamEncryptionConfiguration = v
return s
}
// SetDeliveryStreamName sets the DeliveryStreamName field's value.
func (s *DeliveryStreamDescription) SetDeliveryStreamName(v string) *DeliveryStreamDescription {
s.DeliveryStreamName = &v
return s
}
// SetDeliveryStreamStatus sets the DeliveryStreamStatus field's value.
func (s *DeliveryStreamDescription) SetDeliveryStreamStatus(v string) *DeliveryStreamDescription {
s.DeliveryStreamStatus = &v
return s
}
// SetDeliveryStreamType sets the DeliveryStreamType field's value.
func (s *DeliveryStreamDescription) SetDeliveryStreamType(v string) *DeliveryStreamDescription {
s.DeliveryStreamType = &v
return s
}
// SetDestinations sets the Destinations field's value.
func (s *DeliveryStreamDescription) SetDestinations(v []*DestinationDescription) *DeliveryStreamDescription {
s.Destinations = v
return s
}
// SetHasMoreDestinations sets the HasMoreDestinations field's value.
func (s *DeliveryStreamDescription) SetHasMoreDestinations(v bool) *DeliveryStreamDescription {
s.HasMoreDestinations = &v
return s
}
// SetLastUpdateTimestamp sets the LastUpdateTimestamp field's value.
func (s *DeliveryStreamDescription) SetLastUpdateTimestamp(v time.Time) *DeliveryStreamDescription {
s.LastUpdateTimestamp = &v
return s
}
// SetSource sets the Source field's value.
func (s *DeliveryStreamDescription) SetSource(v *SourceDescription) *DeliveryStreamDescription {
s.Source = v
return s
}
// SetVersionId sets the VersionId field's value.
func (s *DeliveryStreamDescription) SetVersionId(v string) *DeliveryStreamDescription {
s.VersionId = &v
return s
}
// Indicates the server-side encryption (SSE) status for the delivery stream.
type DeliveryStreamEncryptionConfiguration struct {
_ struct{} `type:"structure"`
// For a full description of the different values of this status, see StartDeliveryStreamEncryption
// and StopDeliveryStreamEncryption.
Status *string `type:"string" enum:"DeliveryStreamEncryptionStatus"`
}
// String returns the string representation
func (s DeliveryStreamEncryptionConfiguration) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation
func (s DeliveryStreamEncryptionConfiguration) GoString() string {
return s.String()
}
// SetStatus sets the Status field's value.
func (s *DeliveryStreamEncryptionConfiguration) SetStatus(v string) *DeliveryStreamEncryptionConfiguration {
s.Status = &v
return s
}
type DescribeDeliveryStreamInput struct {
_ struct{} `type:"structure"`
// The name of the delivery stream.
//
// DeliveryStreamName is a required field
DeliveryStreamName *string `min:"1" type:"string" required:"true"`
// The ID of the destination to start returning the destination information.
// Kinesis Data Firehose supports one destination per delivery stream.
ExclusiveStartDestinationId *string `min:"1" type:"string"`
// The limit on the number of destinations to return. You can have one destination
// per delivery stream.
Limit *int64 `min:"1" type:"integer"`
}
// String returns the string representation
func (s DescribeDeliveryStreamInput) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation
func (s DescribeDeliveryStreamInput) GoString() string {
return s.String()
}
// Validate inspects the fields of the type to determine if they are valid.
func (s *DescribeDeliveryStreamInput) Validate() error {
invalidParams := request.ErrInvalidParams{Context: "DescribeDeliveryStreamInput"}
if s.DeliveryStreamName == nil {
invalidParams.Add(request.NewErrParamRequired("DeliveryStreamName"))
}
if s.DeliveryStreamName != nil && len(*s.DeliveryStreamName) < 1 {
invalidParams.Add(request.NewErrParamMinLen("DeliveryStreamName", 1))
}
if s.ExclusiveStartDestinationId != nil && len(*s.ExclusiveStartDestinationId) < 1 {
invalidParams.Add(request.NewErrParamMinLen("ExclusiveStartDestinationId", 1))
}
if s.Limit != nil && *s.Limit < 1 {
invalidParams.Add(request.NewErrParamMinValue("Limit", 1))
}
if invalidParams.Len() > 0 {
return invalidParams
}
return nil
}
// SetDeliveryStreamName sets the DeliveryStreamName field's value.
func (s *DescribeDeliveryStreamInput) SetDeliveryStreamName(v string) *DescribeDeliveryStreamInput {
s.DeliveryStreamName = &v
return s
}
// SetExclusiveStartDestinationId sets the ExclusiveStartDestinationId field's value.
func (s *DescribeDeliveryStreamInput) SetExclusiveStartDestinationId(v string) *DescribeDeliveryStreamInput {
s.ExclusiveStartDestinationId = &v
return s
}
// SetLimit sets the Limit field's value.
func (s *DescribeDeliveryStreamInput) SetLimit(v int64) *DescribeDeliveryStreamInput {
s.Limit = &v
return s
}
type DescribeDeliveryStreamOutput struct {
_ struct{} `type:"structure"`
// Information about the delivery stream.
//
// DeliveryStreamDescription is a required field
DeliveryStreamDescription *DeliveryStreamDescription `type:"structure" required:"true"`
}
// String returns the string representation
func (s DescribeDeliveryStreamOutput) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation
func (s DescribeDeliveryStreamOutput) GoString() string {
return s.String()
}
// SetDeliveryStreamDescription sets the DeliveryStreamDescription field's value.
func (s *DescribeDeliveryStreamOutput) SetDeliveryStreamDescription(v *DeliveryStreamDescription) *DescribeDeliveryStreamOutput {
s.DeliveryStreamDescription = v
return s
}
// The deserializer you want Kinesis Data Firehose to use for converting the
// input data from JSON. Kinesis Data Firehose then serializes the data to its
// final format using the Serializer. Kinesis Data Firehose supports two types
// of deserializers: the Apache Hive JSON SerDe (https://cwiki.apache.org/confluence/display/Hive/LanguageManual+DDL#LanguageManualDDL-JSON)
// and the OpenX JSON SerDe (https://github.com/rcongiu/Hive-JSON-Serde).
type Deserializer struct {
_ struct{} `type:"structure"`
// The native Hive / HCatalog JsonSerDe. Used by Kinesis Data Firehose for deserializing
// data, which means converting it from the JSON format in preparation for serializing
// it to the Parquet or ORC format. This is one of two deserializers you can
// choose, depending on which one offers the functionality you need. The other
// option is the OpenX SerDe.
HiveJsonSerDe *HiveJsonSerDe `type:"structure"`
// The OpenX SerDe. Used by Kinesis Data Firehose for deserializing data, which
// means converting it from the JSON format in preparation for serializing it
// to the Parquet or ORC format. This is one of two deserializers you can choose,
// depending on which one offers the functionality you need. The other option
// is the native Hive / HCatalog JsonSerDe.
OpenXJsonSerDe *OpenXJsonSerDe `type:"structure"`
}
// String returns the string representation
func (s Deserializer) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation
func (s Deserializer) GoString() string {
return s.String()
}
// SetHiveJsonSerDe sets the HiveJsonSerDe field's value.
func (s *Deserializer) SetHiveJsonSerDe(v *HiveJsonSerDe) *Deserializer {
s.HiveJsonSerDe = v
return s
}
// SetOpenXJsonSerDe sets the OpenXJsonSerDe field's value.
func (s *Deserializer) SetOpenXJsonSerDe(v *OpenXJsonSerDe) *Deserializer {
s.OpenXJsonSerDe = v
return s
}
// Describes the destination for a delivery stream.
type DestinationDescription struct {
_ struct{} `type:"structure"`
// The ID of the destination.
//
// DestinationId is a required field
DestinationId *string `min:"1" type:"string" required:"true"`
// The destination in Amazon ES.
ElasticsearchDestinationDescription *ElasticsearchDestinationDescription `type:"structure"`
// The destination in Amazon S3.
ExtendedS3DestinationDescription *ExtendedS3DestinationDescription `type:"structure"`
// The destination in Amazon Redshift.
RedshiftDestinationDescription *RedshiftDestinationDescription `type:"structure"`
// [Deprecated] The destination in Amazon S3.
S3DestinationDescription *S3DestinationDescription `type:"structure"`
// The destination in Splunk.
SplunkDestinationDescription *SplunkDestinationDescription `type:"structure"`
}
// String returns the string representation
func (s DestinationDescription) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation
func (s DestinationDescription) GoString() string {
return s.String()
}
// SetDestinationId sets the DestinationId field's value.
func (s *DestinationDescription) SetDestinationId(v string) *DestinationDescription {
s.DestinationId = &v
return s
}
// SetElasticsearchDestinationDescription sets the ElasticsearchDestinationDescription field's value.
func (s *DestinationDescription) SetElasticsearchDestinationDescription(v *ElasticsearchDestinationDescription) *DestinationDescription {
s.ElasticsearchDestinationDescription = v
return s
}
// SetExtendedS3DestinationDescription sets the ExtendedS3DestinationDescription field's value.
func (s *DestinationDescription) SetExtendedS3DestinationDescription(v *ExtendedS3DestinationDescription) *DestinationDescription {
s.ExtendedS3DestinationDescription = v
return s
}
// SetRedshiftDestinationDescription sets the RedshiftDestinationDescription field's value.
func (s *DestinationDescription) SetRedshiftDestinationDescription(v *RedshiftDestinationDescription) *DestinationDescription {
s.RedshiftDestinationDescription = v
return s
}
// SetS3DestinationDescription sets the S3DestinationDescription field's value.
func (s *DestinationDescription) SetS3DestinationDescription(v *S3DestinationDescription) *DestinationDescription {
s.S3DestinationDescription = v
return s
}
// SetSplunkDestinationDescription sets the SplunkDestinationDescription field's value.
func (s *DestinationDescription) SetSplunkDestinationDescription(v *SplunkDestinationDescription) *DestinationDescription {
s.SplunkDestinationDescription = v
return s
}
// Describes the buffering to perform before delivering data to the Amazon ES
// destination.
type ElasticsearchBufferingHints struct {
_ struct{} `type:"structure"`
// Buffer incoming data for the specified period of time, in seconds, before
// delivering it to the destination. The default value is 300 (5 minutes).
IntervalInSeconds *int64 `min:"60" type:"integer"`
// Buffer incoming data to the specified size, in MBs, before delivering it
// to the destination. The default value is 5.
//
// We recommend setting this parameter to a value greater than the amount of
// data you typically ingest into the delivery stream in 10 seconds. For example,
// if you typically ingest data at 1 MB/sec, the value should be 10 MB or higher.
SizeInMBs *int64 `min:"1" type:"integer"`
}
// String returns the string representation
func (s ElasticsearchBufferingHints) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation
func (s ElasticsearchBufferingHints) GoString() string {
return s.String()
}
// Validate inspects the fields of the type to determine if they are valid.
func (s *ElasticsearchBufferingHints) Validate() error {
invalidParams := request.ErrInvalidParams{Context: "ElasticsearchBufferingHints"}
if s.IntervalInSeconds != nil && *s.IntervalInSeconds < 60 {
invalidParams.Add(request.NewErrParamMinValue("IntervalInSeconds", 60))
}
if s.SizeInMBs != nil && *s.SizeInMBs < 1 {
invalidParams.Add(request.NewErrParamMinValue("SizeInMBs", 1))
}
if invalidParams.Len() > 0 {
return invalidParams
}
return nil
}
// SetIntervalInSeconds sets the IntervalInSeconds field's value.
func (s *ElasticsearchBufferingHints) SetIntervalInSeconds(v int64) *ElasticsearchBufferingHints {
s.IntervalInSeconds = &v
return s
}
// SetSizeInMBs sets the SizeInMBs field's value.
func (s *ElasticsearchBufferingHints) SetSizeInMBs(v int64) *ElasticsearchBufferingHints {
s.SizeInMBs = &v
return s
}
// Describes the configuration of a destination in Amazon ES.
type ElasticsearchDestinationConfiguration struct {
_ struct{} `type:"structure"`
// The buffering options. If no value is specified, the default values for ElasticsearchBufferingHints
// are used.
BufferingHints *ElasticsearchBufferingHints `type:"structure"`
// The Amazon CloudWatch logging options for your delivery stream.
CloudWatchLoggingOptions *CloudWatchLoggingOptions `type:"structure"`
// The ARN of the Amazon ES domain. The IAM role must have permissions for DescribeElasticsearchDomain,
// DescribeElasticsearchDomains, and DescribeElasticsearchDomainConfig after
// assuming the role specified in RoleARN. For more information, see Amazon
// Resource Names (ARNs) and AWS Service Namespaces (https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html).
//
// DomainARN is a required field
DomainARN *string `min:"1" type:"string" required:"true"`
// The Elasticsearch index name.
//
// IndexName is a required field
IndexName *string `min:"1" type:"string" required:"true"`
// The Elasticsearch index rotation period. Index rotation appends a timestamp
// to the IndexName to facilitate the expiration of old data. For more information,
// see Index Rotation for the Amazon ES Destination (http://docs.aws.amazon.com/firehose/latest/dev/basic-deliver.html#es-index-rotation).
// The default value is OneDay.
IndexRotationPeriod *string `type:"string" enum:"ElasticsearchIndexRotationPeriod"`
// The data processing configuration.
ProcessingConfiguration *ProcessingConfiguration `type:"structure"`
// The retry behavior in case Kinesis Data Firehose is unable to deliver documents
// to Amazon ES. The default value is 300 (5 minutes).
RetryOptions *ElasticsearchRetryOptions `type:"structure"`
// The Amazon Resource Name (ARN) of the IAM role to be assumed by Kinesis Data
// Firehose for calling the Amazon ES Configuration API and for indexing documents.
// For more information, see Grant Kinesis Data Firehose Access to an Amazon
// S3 Destination (http://docs.aws.amazon.com/firehose/latest/dev/controlling-access.html#using-iam-s3)
// and Amazon Resource Names (ARNs) and AWS Service Namespaces (https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html).
//
// RoleARN is a required field
RoleARN *string `min:"1" type:"string" required:"true"`
// Defines how documents should be delivered to Amazon S3. When it is set to
// FailedDocumentsOnly, Kinesis Data Firehose writes any documents that could
// not be indexed to the configured Amazon S3 destination, with elasticsearch-failed/
// appended to the key prefix. When set to AllDocuments, Kinesis Data Firehose
// delivers all incoming records to Amazon S3, and also writes failed documents
// with elasticsearch-failed/ appended to the prefix. For more information,
// see Amazon S3 Backup for the Amazon ES Destination (http://docs.aws.amazon.com/firehose/latest/dev/basic-deliver.html#es-s3-backup).
// Default value is FailedDocumentsOnly.
S3BackupMode *string `type:"string" enum:"ElasticsearchS3BackupMode"`
// The configuration for the backup Amazon S3 location.
//
// S3Configuration is a required field
S3Configuration *S3DestinationConfiguration `type:"structure" required:"true"`
// The Elasticsearch type name. For Elasticsearch 6.x, there can be only one
// type per index. If you try to specify a new type for an existing index that
// already has another type, Kinesis Data Firehose returns an error during run
// time.
//
// TypeName is a required field
TypeName *string `min:"1" type:"string" required:"true"`
}
// String returns the string representation
func (s ElasticsearchDestinationConfiguration) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation
func (s ElasticsearchDestinationConfiguration) GoString() string {
return s.String()
}
// Validate inspects the fields of the type to determine if they are valid.
func (s *ElasticsearchDestinationConfiguration) Validate() error {
invalidParams := request.ErrInvalidParams{Context: "ElasticsearchDestinationConfiguration"}
if s.DomainARN == nil {
invalidParams.Add(request.NewErrParamRequired("DomainARN"))
}
if s.DomainARN != nil && len(*s.DomainARN) < 1 {
invalidParams.Add(request.NewErrParamMinLen("DomainARN", 1))
}
if s.IndexName == nil {
invalidParams.Add(request.NewErrParamRequired("IndexName"))
}
if s.IndexName != nil && len(*s.IndexName) < 1 {
invalidParams.Add(request.NewErrParamMinLen("IndexName", 1))
}
if s.RoleARN == nil {
invalidParams.Add(request.NewErrParamRequired("RoleARN"))
}
if s.RoleARN != nil && len(*s.RoleARN) < 1 {
invalidParams.Add(request.NewErrParamMinLen("RoleARN", 1))
}
if s.S3Configuration == nil {
invalidParams.Add(request.NewErrParamRequired("S3Configuration"))
}
if s.TypeName == nil {
invalidParams.Add(request.NewErrParamRequired("TypeName"))
}
if s.TypeName != nil && len(*s.TypeName) < 1 {
invalidParams.Add(request.NewErrParamMinLen("TypeName", 1))
}
if s.BufferingHints != nil {
if err := s.BufferingHints.Validate(); err != nil {
invalidParams.AddNested("BufferingHints", err.(request.ErrInvalidParams))
}
}
if s.ProcessingConfiguration != nil {
if err := s.ProcessingConfiguration.Validate(); err != nil {
invalidParams.AddNested("ProcessingConfiguration", err.(request.ErrInvalidParams))
}
}
if s.S3Configuration != nil {
if err := s.S3Configuration.Validate(); err != nil {
invalidParams.AddNested("S3Configuration", err.(request.ErrInvalidParams))
}
}
if invalidParams.Len() > 0 {
return invalidParams
}
return nil
}
// SetBufferingHints sets the BufferingHints field's value.
func (s *ElasticsearchDestinationConfiguration) SetBufferingHints(v *ElasticsearchBufferingHints) *ElasticsearchDestinationConfiguration {
s.BufferingHints = v
return s
}
// SetCloudWatchLoggingOptions sets the CloudWatchLoggingOptions field's value.
func (s *ElasticsearchDestinationConfiguration) SetCloudWatchLoggingOptions(v *CloudWatchLoggingOptions) *ElasticsearchDestinationConfiguration {
s.CloudWatchLoggingOptions = v
return s
}
// SetDomainARN sets the DomainARN field's value.
func (s *ElasticsearchDestinationConfiguration) SetDomainARN(v string) *ElasticsearchDestinationConfiguration {
s.DomainARN = &v
return s
}
// SetIndexName sets the IndexName field's value.
func (s *ElasticsearchDestinationConfiguration) SetIndexName(v string) *ElasticsearchDestinationConfiguration {
s.IndexName = &v
return s
}
// SetIndexRotationPeriod sets the IndexRotationPeriod field's value.
func (s *ElasticsearchDestinationConfiguration) SetIndexRotationPeriod(v string) *ElasticsearchDestinationConfiguration {
s.IndexRotationPeriod = &v
return s
}
// SetProcessingConfiguration sets the ProcessingConfiguration field's value.
func (s *ElasticsearchDestinationConfiguration) SetProcessingConfiguration(v *ProcessingConfiguration) *ElasticsearchDestinationConfiguration {
s.ProcessingConfiguration = v
return s
}
// SetRetryOptions sets the RetryOptions field's value.
func (s *ElasticsearchDestinationConfiguration) SetRetryOptions(v *ElasticsearchRetryOptions) *ElasticsearchDestinationConfiguration {
s.RetryOptions = v
return s
}
// SetRoleARN sets the RoleARN field's value.
func (s *ElasticsearchDestinationConfiguration) SetRoleARN(v string) *ElasticsearchDestinationConfiguration {
s.RoleARN = &v
return s
}
// SetS3BackupMode sets the S3BackupMode field's value.
func (s *ElasticsearchDestinationConfiguration) SetS3BackupMode(v string) *ElasticsearchDestinationConfiguration {
s.S3BackupMode = &v
return s
}
// SetS3Configuration sets the S3Configuration field's value.
func (s *ElasticsearchDestinationConfiguration) SetS3Configuration(v *S3DestinationConfiguration) *ElasticsearchDestinationConfiguration {
s.S3Configuration = v
return s
}
// SetTypeName sets the TypeName field's value.
func (s *ElasticsearchDestinationConfiguration) SetTypeName(v string) *ElasticsearchDestinationConfiguration {
s.TypeName = &v
return s
}
// The destination description in Amazon ES.
type ElasticsearchDestinationDescription struct {
_ struct{} `type:"structure"`
// The buffering options.
BufferingHints *ElasticsearchBufferingHints `type:"structure"`
// The Amazon CloudWatch logging options.
CloudWatchLoggingOptions *CloudWatchLoggingOptions `type:"structure"`
// The ARN of the Amazon ES domain. For more information, see Amazon Resource
// Names (ARNs) and AWS Service Namespaces (https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html).
DomainARN *string `min:"1" type:"string"`
// The Elasticsearch index name.
IndexName *string `min:"1" type:"string"`
// The Elasticsearch index rotation period
IndexRotationPeriod *string `type:"string" enum:"ElasticsearchIndexRotationPeriod"`
// The data processing configuration.
ProcessingConfiguration *ProcessingConfiguration `type:"structure"`
// The Amazon ES retry options.
RetryOptions *ElasticsearchRetryOptions `type:"structure"`
// The Amazon Resource Name (ARN) of the AWS credentials. For more information,
// see Amazon Resource Names (ARNs) and AWS Service Namespaces (https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html).
RoleARN *string `min:"1" type:"string"`
// The Amazon S3 backup mode.
S3BackupMode *string `type:"string" enum:"ElasticsearchS3BackupMode"`
// The Amazon S3 destination.
S3DestinationDescription *S3DestinationDescription `type:"structure"`
// The Elasticsearch type name.
TypeName *string `min:"1" type:"string"`
}
// String returns the string representation
func (s ElasticsearchDestinationDescription) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation
func (s ElasticsearchDestinationDescription) GoString() string {
return s.String()
}
// SetBufferingHints sets the BufferingHints field's value.
func (s *ElasticsearchDestinationDescription) SetBufferingHints(v *ElasticsearchBufferingHints) *ElasticsearchDestinationDescription {
s.BufferingHints = v
return s
}
// SetCloudWatchLoggingOptions sets the CloudWatchLoggingOptions field's value.
func (s *ElasticsearchDestinationDescription) SetCloudWatchLoggingOptions(v *CloudWatchLoggingOptions) *ElasticsearchDestinationDescription {
s.CloudWatchLoggingOptions = v
return s
}
// SetDomainARN sets the DomainARN field's value.
func (s *ElasticsearchDestinationDescription) SetDomainARN(v string) *ElasticsearchDestinationDescription {
s.DomainARN = &v
return s
}
// SetIndexName sets the IndexName field's value.
func (s *ElasticsearchDestinationDescription) SetIndexName(v string) *ElasticsearchDestinationDescription {
s.IndexName = &v
return s
}
// SetIndexRotationPeriod sets the IndexRotationPeriod field's value.
func (s *ElasticsearchDestinationDescription) SetIndexRotationPeriod(v string) *ElasticsearchDestinationDescription {
s.IndexRotationPeriod = &v
return s
}
// SetProcessingConfiguration sets the ProcessingConfiguration field's value.
func (s *ElasticsearchDestinationDescription) SetProcessingConfiguration(v *ProcessingConfiguration) *ElasticsearchDestinationDescription {
s.ProcessingConfiguration = v
return s
}
// SetRetryOptions sets the RetryOptions field's value.
func (s *ElasticsearchDestinationDescription) SetRetryOptions(v *ElasticsearchRetryOptions) *ElasticsearchDestinationDescription {
s.RetryOptions = v
return s
}
// SetRoleARN sets the RoleARN field's value.
func (s *ElasticsearchDestinationDescription) SetRoleARN(v string) *ElasticsearchDestinationDescription {
s.RoleARN = &v
return s
}
// SetS3BackupMode sets the S3BackupMode field's value.
func (s *ElasticsearchDestinationDescription) SetS3BackupMode(v string) *ElasticsearchDestinationDescription {
s.S3BackupMode = &v
return s
}
// SetS3DestinationDescription sets the S3DestinationDescription field's value.
func (s *ElasticsearchDestinationDescription) SetS3DestinationDescription(v *S3DestinationDescription) *ElasticsearchDestinationDescription {
s.S3DestinationDescription = v
return s
}
// SetTypeName sets the TypeName field's value.
func (s *ElasticsearchDestinationDescription) SetTypeName(v string) *ElasticsearchDestinationDescription {
s.TypeName = &v
return s
}
// Describes an update for a destination in Amazon ES.
type ElasticsearchDestinationUpdate struct {
_ struct{} `type:"structure"`
// The buffering options. If no value is specified, ElasticsearchBufferingHints
// object default values are used.
BufferingHints *ElasticsearchBufferingHints `type:"structure"`
// The CloudWatch logging options for your delivery stream.
CloudWatchLoggingOptions *CloudWatchLoggingOptions `type:"structure"`
// The ARN of the Amazon ES domain. The IAM role must have permissions for DescribeElasticsearchDomain,
// DescribeElasticsearchDomains, and DescribeElasticsearchDomainConfig after
// assuming the IAM role specified in RoleARN. For more information, see Amazon
// Resource Names (ARNs) and AWS Service Namespaces (https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html).
DomainARN *string `min:"1" type:"string"`
// The Elasticsearch index name.
IndexName *string `min:"1" type:"string"`
// The Elasticsearch index rotation period. Index rotation appends a timestamp
// to IndexName to facilitate the expiration of old data. For more information,
// see Index Rotation for the Amazon ES Destination (http://docs.aws.amazon.com/firehose/latest/dev/basic-deliver.html#es-index-rotation).
// Default value is OneDay.
IndexRotationPeriod *string `type:"string" enum:"ElasticsearchIndexRotationPeriod"`
// The data processing configuration.
ProcessingConfiguration *ProcessingConfiguration `type:"structure"`
// The retry behavior in case Kinesis Data Firehose is unable to deliver documents
// to Amazon ES. The default value is 300 (5 minutes).
RetryOptions *ElasticsearchRetryOptions `type:"structure"`
// The Amazon Resource Name (ARN) of the IAM role to be assumed by Kinesis Data
// Firehose for calling the Amazon ES Configuration API and for indexing documents.
// For more information, see Grant Kinesis Data Firehose Access to an Amazon
// S3 Destination (http://docs.aws.amazon.com/firehose/latest/dev/controlling-access.html#using-iam-s3)
// and Amazon Resource Names (ARNs) and AWS Service Namespaces (https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html).
RoleARN *string `min:"1" type:"string"`
// The Amazon S3 destination.
S3Update *S3DestinationUpdate `type:"structure"`
// The Elasticsearch type name. For Elasticsearch 6.x, there can be only one
// type per index. If you try to specify a new type for an existing index that
// already has another type, Kinesis Data Firehose returns an error during runtime.
TypeName *string `min:"1" type:"string"`
}
// String returns the string representation
func (s ElasticsearchDestinationUpdate) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation
func (s ElasticsearchDestinationUpdate) GoString() string {
return s.String()
}
// Validate inspects the fields of the type to determine if they are valid.
func (s *ElasticsearchDestinationUpdate) Validate() error {
invalidParams := request.ErrInvalidParams{Context: "ElasticsearchDestinationUpdate"}
if s.DomainARN != nil && len(*s.DomainARN) < 1 {
invalidParams.Add(request.NewErrParamMinLen("DomainARN", 1))
}
if s.IndexName != nil && len(*s.IndexName) < 1 {
invalidParams.Add(request.NewErrParamMinLen("IndexName", 1))
}
if s.RoleARN != nil && len(*s.RoleARN) < 1 {
invalidParams.Add(request.NewErrParamMinLen("RoleARN", 1))
}
if s.TypeName != nil && len(*s.TypeName) < 1 {
invalidParams.Add(request.NewErrParamMinLen("TypeName", 1))
}
if s.BufferingHints != nil {
if err := s.BufferingHints.Validate(); err != nil {
invalidParams.AddNested("BufferingHints", err.(request.ErrInvalidParams))
}
}
if s.ProcessingConfiguration != nil {
if err := s.ProcessingConfiguration.Validate(); err != nil {
invalidParams.AddNested("ProcessingConfiguration", err.(request.ErrInvalidParams))
}
}
if s.S3Update != nil {
if err := s.S3Update.Validate(); err != nil {
invalidParams.AddNested("S3Update", err.(request.ErrInvalidParams))
}
}
if invalidParams.Len() > 0 {
return invalidParams
}
return nil
}
// SetBufferingHints sets the BufferingHints field's value.
func (s *ElasticsearchDestinationUpdate) SetBufferingHints(v *ElasticsearchBufferingHints) *ElasticsearchDestinationUpdate {
s.BufferingHints = v
return s
}
// SetCloudWatchLoggingOptions sets the CloudWatchLoggingOptions field's value.
func (s *ElasticsearchDestinationUpdate) SetCloudWatchLoggingOptions(v *CloudWatchLoggingOptions) *ElasticsearchDestinationUpdate {
s.CloudWatchLoggingOptions = v
return s
}
// SetDomainARN sets the DomainARN field's value.
func (s *ElasticsearchDestinationUpdate) SetDomainARN(v string) *ElasticsearchDestinationUpdate {
s.DomainARN = &v
return s
}
// SetIndexName sets the IndexName field's value.
func (s *ElasticsearchDestinationUpdate) SetIndexName(v string) *ElasticsearchDestinationUpdate {
s.IndexName = &v
return s
}
// SetIndexRotationPeriod sets the IndexRotationPeriod field's value.
func (s *ElasticsearchDestinationUpdate) SetIndexRotationPeriod(v string) *ElasticsearchDestinationUpdate {
s.IndexRotationPeriod = &v
return s
}
// SetProcessingConfiguration sets the ProcessingConfiguration field's value.
func (s *ElasticsearchDestinationUpdate) SetProcessingConfiguration(v *ProcessingConfiguration) *ElasticsearchDestinationUpdate {
s.ProcessingConfiguration = v
return s
}
// SetRetryOptions sets the RetryOptions field's value.
func (s *ElasticsearchDestinationUpdate) SetRetryOptions(v *ElasticsearchRetryOptions) *ElasticsearchDestinationUpdate {
s.RetryOptions = v
return s
}
// SetRoleARN sets the RoleARN field's value.
func (s *ElasticsearchDestinationUpdate) SetRoleARN(v string) *ElasticsearchDestinationUpdate {
s.RoleARN = &v
return s
}
// SetS3Update sets the S3Update field's value.
func (s *ElasticsearchDestinationUpdate) SetS3Update(v *S3DestinationUpdate) *ElasticsearchDestinationUpdate {
s.S3Update = v
return s
}
// SetTypeName sets the TypeName field's value.
func (s *ElasticsearchDestinationUpdate) SetTypeName(v string) *ElasticsearchDestinationUpdate {
s.TypeName = &v
return s
}
// Configures retry behavior in case Kinesis Data Firehose is unable to deliver
// documents to Amazon ES.
type ElasticsearchRetryOptions struct {
_ struct{} `type:"structure"`
// After an initial failure to deliver to Amazon ES, the total amount of time
// during which Kinesis Data Firehose retries delivery (including the first
// attempt). After this time has elapsed, the failed documents are written to
// Amazon S3. Default value is 300 seconds (5 minutes). A value of 0 (zero)
// results in no retries.
DurationInSeconds *int64 `type:"integer"`
}
// String returns the string representation
func (s ElasticsearchRetryOptions) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation
func (s ElasticsearchRetryOptions) GoString() string {
return s.String()
}
// SetDurationInSeconds sets the DurationInSeconds field's value.
func (s *ElasticsearchRetryOptions) SetDurationInSeconds(v int64) *ElasticsearchRetryOptions {
s.DurationInSeconds = &v
return s
}
// Describes the encryption for a destination in Amazon S3.
type EncryptionConfiguration struct {
_ struct{} `type:"structure"`
// The encryption key.
KMSEncryptionConfig *KMSEncryptionConfig `type:"structure"`
// Specifically override existing encryption information to ensure that no encryption
// is used.
NoEncryptionConfig *string `type:"string" enum:"NoEncryptionConfig"`
}
// String returns the string representation
func (s EncryptionConfiguration) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation
func (s EncryptionConfiguration) GoString() string {
return s.String()
}
// Validate inspects the fields of the type to determine if they are valid.
func (s *EncryptionConfiguration) Validate() error {
invalidParams := request.ErrInvalidParams{Context: "EncryptionConfiguration"}
if s.KMSEncryptionConfig != nil {
if err := s.KMSEncryptionConfig.Validate(); err != nil {
invalidParams.AddNested("KMSEncryptionConfig", err.(request.ErrInvalidParams))
}
}
if invalidParams.Len() > 0 {
return invalidParams
}
return nil
}
// SetKMSEncryptionConfig sets the KMSEncryptionConfig field's value.
func (s *EncryptionConfiguration) SetKMSEncryptionConfig(v *KMSEncryptionConfig) *EncryptionConfiguration {
s.KMSEncryptionConfig = v
return s
}
// SetNoEncryptionConfig sets the NoEncryptionConfig field's value.
func (s *EncryptionConfiguration) SetNoEncryptionConfig(v string) *EncryptionConfiguration {
s.NoEncryptionConfig = &v
return s
}
// Describes the configuration of a destination in Amazon S3.
type ExtendedS3DestinationConfiguration struct {
_ struct{} `type:"structure"`
// The ARN of the S3 bucket. For more information, see Amazon Resource Names
// (ARNs) and AWS Service Namespaces (https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html).
//
// BucketARN is a required field
BucketARN *string `min:"1" type:"string" required:"true"`
// The buffering option.
BufferingHints *BufferingHints `type:"structure"`
// The Amazon CloudWatch logging options for your delivery stream.
CloudWatchLoggingOptions *CloudWatchLoggingOptions `type:"structure"`
// The compression format. If no value is specified, the default is UNCOMPRESSED.
CompressionFormat *string `type:"string" enum:"CompressionFormat"`
// The serializer, deserializer, and schema for converting data from the JSON
// format to the Parquet or ORC format before writing it to Amazon S3.
DataFormatConversionConfiguration *DataFormatConversionConfiguration `type:"structure"`
// The encryption configuration. If no value is specified, the default is no
// encryption.
EncryptionConfiguration *EncryptionConfiguration `type:"structure"`
// A prefix that Kinesis Data Firehose evaluates and adds to failed records
// before writing them to S3. This prefix appears immediately following the
// bucket name.
ErrorOutputPrefix *string `type:"string"`
// The "YYYY/MM/DD/HH" time format prefix is automatically used for delivered
// Amazon S3 files. You can specify an extra prefix to be added in front of
// the time format prefix. If the prefix ends with a slash, it appears as a
// folder in the S3 bucket. For more information, see Amazon S3 Object Name
// Format (http://docs.aws.amazon.com/firehose/latest/dev/basic-deliver.html#s3-object-name)
// in the Amazon Kinesis Data Firehose Developer Guide.
Prefix *string `type:"string"`
// The data processing configuration.
ProcessingConfiguration *ProcessingConfiguration `type:"structure"`
// The Amazon Resource Name (ARN) of the AWS credentials. For more information,
// see Amazon Resource Names (ARNs) and AWS Service Namespaces (https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html).
//
// RoleARN is a required field
RoleARN *string `min:"1" type:"string" required:"true"`
// The configuration for backup in Amazon S3.
S3BackupConfiguration *S3DestinationConfiguration `type:"structure"`
// The Amazon S3 backup mode.
S3BackupMode *string `type:"string" enum:"S3BackupMode"`
}
// String returns the string representation
func (s ExtendedS3DestinationConfiguration) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation
func (s ExtendedS3DestinationConfiguration) GoString() string {
return s.String()
}
// Validate inspects the fields of the type to determine if they are valid.
func (s *ExtendedS3DestinationConfiguration) Validate() error {
invalidParams := request.ErrInvalidParams{Context: "ExtendedS3DestinationConfiguration"}
if s.BucketARN == nil {
invalidParams.Add(request.NewErrParamRequired("BucketARN"))
}
if s.BucketARN != nil && len(*s.BucketARN) < 1 {
invalidParams.Add(request.NewErrParamMinLen("BucketARN", 1))
}
if s.RoleARN == nil {
invalidParams.Add(request.NewErrParamRequired("RoleARN"))
}
if s.RoleARN != nil && len(*s.RoleARN) < 1 {
invalidParams.Add(request.NewErrParamMinLen("RoleARN", 1))
}
if s.BufferingHints != nil {
if err := s.BufferingHints.Validate(); err != nil {
invalidParams.AddNested("BufferingHints", err.(request.ErrInvalidParams))
}
}
if s.DataFormatConversionConfiguration != nil {
if err := s.DataFormatConversionConfiguration.Validate(); err != nil {
invalidParams.AddNested("DataFormatConversionConfiguration", err.(request.ErrInvalidParams))
}
}
if s.EncryptionConfiguration != nil {
if err := s.EncryptionConfiguration.Validate(); err != nil {
invalidParams.AddNested("EncryptionConfiguration", err.(request.ErrInvalidParams))
}
}
if s.ProcessingConfiguration != nil {
if err := s.ProcessingConfiguration.Validate(); err != nil {
invalidParams.AddNested("ProcessingConfiguration", err.(request.ErrInvalidParams))
}
}
if s.S3BackupConfiguration != nil {
if err := s.S3BackupConfiguration.Validate(); err != nil {
invalidParams.AddNested("S3BackupConfiguration", err.(request.ErrInvalidParams))
}
}
if invalidParams.Len() > 0 {
return invalidParams
}
return nil
}
// SetBucketARN sets the BucketARN field's value.
func (s *ExtendedS3DestinationConfiguration) SetBucketARN(v string) *ExtendedS3DestinationConfiguration {
s.BucketARN = &v
return s
}
// SetBufferingHints sets the BufferingHints field's value.
func (s *ExtendedS3DestinationConfiguration) SetBufferingHints(v *BufferingHints) *ExtendedS3DestinationConfiguration {
s.BufferingHints = v
return s
}
// SetCloudWatchLoggingOptions sets the CloudWatchLoggingOptions field's value.
func (s *ExtendedS3DestinationConfiguration) SetCloudWatchLoggingOptions(v *CloudWatchLoggingOptions) *ExtendedS3DestinationConfiguration {
s.CloudWatchLoggingOptions = v
return s
}
// SetCompressionFormat sets the CompressionFormat field's value.
func (s *ExtendedS3DestinationConfiguration) SetCompressionFormat(v string) *ExtendedS3DestinationConfiguration {
s.CompressionFormat = &v
return s
}
// SetDataFormatConversionConfiguration sets the DataFormatConversionConfiguration field's value.
func (s *ExtendedS3DestinationConfiguration) SetDataFormatConversionConfiguration(v *DataFormatConversionConfiguration) *ExtendedS3DestinationConfiguration {
s.DataFormatConversionConfiguration = v
return s
}
// SetEncryptionConfiguration sets the EncryptionConfiguration field's value.
func (s *ExtendedS3DestinationConfiguration) SetEncryptionConfiguration(v *EncryptionConfiguration) *ExtendedS3DestinationConfiguration {
s.EncryptionConfiguration = v
return s
}
// SetErrorOutputPrefix sets the ErrorOutputPrefix field's value.
func (s *ExtendedS3DestinationConfiguration) SetErrorOutputPrefix(v string) *ExtendedS3DestinationConfiguration {
s.ErrorOutputPrefix = &v
return s
}
// SetPrefix sets the Prefix field's value.
func (s *ExtendedS3DestinationConfiguration) SetPrefix(v string) *ExtendedS3DestinationConfiguration {
s.Prefix = &v
return s
}
// SetProcessingConfiguration sets the ProcessingConfiguration field's value.
func (s *ExtendedS3DestinationConfiguration) SetProcessingConfiguration(v *ProcessingConfiguration) *ExtendedS3DestinationConfiguration {
s.ProcessingConfiguration = v
return s
}
// SetRoleARN sets the RoleARN field's value.
func (s *ExtendedS3DestinationConfiguration) SetRoleARN(v string) *ExtendedS3DestinationConfiguration {
s.RoleARN = &v
return s
}
// SetS3BackupConfiguration sets the S3BackupConfiguration field's value.
func (s *ExtendedS3DestinationConfiguration) SetS3BackupConfiguration(v *S3DestinationConfiguration) *ExtendedS3DestinationConfiguration {
s.S3BackupConfiguration = v
return s
}
// SetS3BackupMode sets the S3BackupMode field's value.
func (s *ExtendedS3DestinationConfiguration) SetS3BackupMode(v string) *ExtendedS3DestinationConfiguration {
s.S3BackupMode = &v
return s
}
// Describes a destination in Amazon S3.
type ExtendedS3DestinationDescription struct {
_ struct{} `type:"structure"`
// The ARN of the S3 bucket. For more information, see Amazon Resource Names
// (ARNs) and AWS Service Namespaces (https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html).
//
// BucketARN is a required field
BucketARN *string `min:"1" type:"string" required:"true"`
// The buffering option.
//
// BufferingHints is a required field
BufferingHints *BufferingHints `type:"structure" required:"true"`
// The Amazon CloudWatch logging options for your delivery stream.
CloudWatchLoggingOptions *CloudWatchLoggingOptions `type:"structure"`
// The compression format. If no value is specified, the default is UNCOMPRESSED.
//
// CompressionFormat is a required field
CompressionFormat *string `type:"string" required:"true" enum:"CompressionFormat"`
// The serializer, deserializer, and schema for converting data from the JSON
// format to the Parquet or ORC format before writing it to Amazon S3.
DataFormatConversionConfiguration *DataFormatConversionConfiguration `type:"structure"`
// The encryption configuration. If no value is specified, the default is no
// encryption.
//
// EncryptionConfiguration is a required field
EncryptionConfiguration *EncryptionConfiguration `type:"structure" required:"true"`
// A prefix that Kinesis Data Firehose evaluates and adds to failed records
// before writing them to S3. This prefix appears immediately following the
// bucket name.
ErrorOutputPrefix *string `type:"string"`
// The "YYYY/MM/DD/HH" time format prefix is automatically used for delivered
// Amazon S3 files. You can specify an extra prefix to be added in front of
// the time format prefix. If the prefix ends with a slash, it appears as a
// folder in the S3 bucket. For more information, see Amazon S3 Object Name
// Format (http://docs.aws.amazon.com/firehose/latest/dev/basic-deliver.html#s3-object-name)
// in the Amazon Kinesis Data Firehose Developer Guide.
Prefix *string `type:"string"`
// The data processing configuration.
ProcessingConfiguration *ProcessingConfiguration `type:"structure"`
// The Amazon Resource Name (ARN) of the AWS credentials. For more information,
// see Amazon Resource Names (ARNs) and AWS Service Namespaces (https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html).
//
// RoleARN is a required field
RoleARN *string `min:"1" type:"string" required:"true"`
// The configuration for backup in Amazon S3.
S3BackupDescription *S3DestinationDescription `type:"structure"`
// The Amazon S3 backup mode.
S3BackupMode *string `type:"string" enum:"S3BackupMode"`
}
// String returns the string representation
func (s ExtendedS3DestinationDescription) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation
func (s ExtendedS3DestinationDescription) GoString() string {
return s.String()
}
// SetBucketARN sets the BucketARN field's value.
func (s *ExtendedS3DestinationDescription) SetBucketARN(v string) *ExtendedS3DestinationDescription {
s.BucketARN = &v
return s
}
// SetBufferingHints sets the BufferingHints field's value.
func (s *ExtendedS3DestinationDescription) SetBufferingHints(v *BufferingHints) *ExtendedS3DestinationDescription {
s.BufferingHints = v
return s
}
// SetCloudWatchLoggingOptions sets the CloudWatchLoggingOptions field's value.
func (s *ExtendedS3DestinationDescription) SetCloudWatchLoggingOptions(v *CloudWatchLoggingOptions) *ExtendedS3DestinationDescription {
s.CloudWatchLoggingOptions = v
return s
}
// SetCompressionFormat sets the CompressionFormat field's value.
func (s *ExtendedS3DestinationDescription) SetCompressionFormat(v string) *ExtendedS3DestinationDescription {
s.CompressionFormat = &v
return s
}
// SetDataFormatConversionConfiguration sets the DataFormatConversionConfiguration field's value.
func (s *ExtendedS3DestinationDescription) SetDataFormatConversionConfiguration(v *DataFormatConversionConfiguration) *ExtendedS3DestinationDescription {
s.DataFormatConversionConfiguration = v
return s
}
// SetEncryptionConfiguration sets the EncryptionConfiguration field's value.
func (s *ExtendedS3DestinationDescription) SetEncryptionConfiguration(v *EncryptionConfiguration) *ExtendedS3DestinationDescription {
s.EncryptionConfiguration = v
return s
}
// SetErrorOutputPrefix sets the ErrorOutputPrefix field's value.
func (s *ExtendedS3DestinationDescription) SetErrorOutputPrefix(v string) *ExtendedS3DestinationDescription {
s.ErrorOutputPrefix = &v
return s
}
// SetPrefix sets the Prefix field's value.
func (s *ExtendedS3DestinationDescription) SetPrefix(v string) *ExtendedS3DestinationDescription {
s.Prefix = &v
return s
}
// SetProcessingConfiguration sets the ProcessingConfiguration field's value.
func (s *ExtendedS3DestinationDescription) SetProcessingConfiguration(v *ProcessingConfiguration) *ExtendedS3DestinationDescription {
s.ProcessingConfiguration = v
return s
}
// SetRoleARN sets the RoleARN field's value.
func (s *ExtendedS3DestinationDescription) SetRoleARN(v string) *ExtendedS3DestinationDescription {
s.RoleARN = &v
return s
}
// SetS3BackupDescription sets the S3BackupDescription field's value.
func (s *ExtendedS3DestinationDescription) SetS3BackupDescription(v *S3DestinationDescription) *ExtendedS3DestinationDescription {
s.S3BackupDescription = v
return s
}
// SetS3BackupMode sets the S3BackupMode field's value.
func (s *ExtendedS3DestinationDescription) SetS3BackupMode(v string) *ExtendedS3DestinationDescription {
s.S3BackupMode = &v
return s
}
// Describes an update for a destination in Amazon S3.
type ExtendedS3DestinationUpdate struct {
_ struct{} `type:"structure"`
// The ARN of the S3 bucket. For more information, see Amazon Resource Names
// (ARNs) and AWS Service Namespaces (https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html).
BucketARN *string `min:"1" type:"string"`
// The buffering option.
BufferingHints *BufferingHints `type:"structure"`
// The Amazon CloudWatch logging options for your delivery stream.
CloudWatchLoggingOptions *CloudWatchLoggingOptions `type:"structure"`
// The compression format. If no value is specified, the default is UNCOMPRESSED.
CompressionFormat *string `type:"string" enum:"CompressionFormat"`
// The serializer, deserializer, and schema for converting data from the JSON
// format to the Parquet or ORC format before writing it to Amazon S3.
DataFormatConversionConfiguration *DataFormatConversionConfiguration `type:"structure"`
// The encryption configuration. If no value is specified, the default is no
// encryption.
EncryptionConfiguration *EncryptionConfiguration `type:"structure"`
// A prefix that Kinesis Data Firehose evaluates and adds to failed records
// before writing them to S3. This prefix appears immediately following the
// bucket name.
ErrorOutputPrefix *string `type:"string"`
// The "YYYY/MM/DD/HH" time format prefix is automatically used for delivered
// Amazon S3 files. You can specify an extra prefix to be added in front of
// the time format prefix. If the prefix ends with a slash, it appears as a
// folder in the S3 bucket. For more information, see Amazon S3 Object Name
// Format (http://docs.aws.amazon.com/firehose/latest/dev/basic-deliver.html#s3-object-name)
// in the Amazon Kinesis Data Firehose Developer Guide.
Prefix *string `type:"string"`
// The data processing configuration.
ProcessingConfiguration *ProcessingConfiguration `type:"structure"`
// The Amazon Resource Name (ARN) of the AWS credentials. For more information,
// see Amazon Resource Names (ARNs) and AWS Service Namespaces (https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html).
RoleARN *string `min:"1" type:"string"`
// Enables or disables Amazon S3 backup mode.
S3BackupMode *string `type:"string" enum:"S3BackupMode"`
// The Amazon S3 destination for backup.
S3BackupUpdate *S3DestinationUpdate `type:"structure"`
}
// String returns the string representation
func (s ExtendedS3DestinationUpdate) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation
func (s ExtendedS3DestinationUpdate) GoString() string {
return s.String()
}
// Validate inspects the fields of the type to determine if they are valid.
func (s *ExtendedS3DestinationUpdate) Validate() error {
invalidParams := request.ErrInvalidParams{Context: "ExtendedS3DestinationUpdate"}
if s.BucketARN != nil && len(*s.BucketARN) < 1 {
invalidParams.Add(request.NewErrParamMinLen("BucketARN", 1))
}
if s.RoleARN != nil && len(*s.RoleARN) < 1 {
invalidParams.Add(request.NewErrParamMinLen("RoleARN", 1))
}
if s.BufferingHints != nil {
if err := s.BufferingHints.Validate(); err != nil {
invalidParams.AddNested("BufferingHints", err.(request.ErrInvalidParams))
}
}
if s.DataFormatConversionConfiguration != nil {
if err := s.DataFormatConversionConfiguration.Validate(); err != nil {
invalidParams.AddNested("DataFormatConversionConfiguration", err.(request.ErrInvalidParams))
}
}
if s.EncryptionConfiguration != nil {
if err := s.EncryptionConfiguration.Validate(); err != nil {
invalidParams.AddNested("EncryptionConfiguration", err.(request.ErrInvalidParams))
}
}
if s.ProcessingConfiguration != nil {
if err := s.ProcessingConfiguration.Validate(); err != nil {
invalidParams.AddNested("ProcessingConfiguration", err.(request.ErrInvalidParams))
}
}
if s.S3BackupUpdate != nil {
if err := s.S3BackupUpdate.Validate(); err != nil {
invalidParams.AddNested("S3BackupUpdate", err.(request.ErrInvalidParams))
}
}
if invalidParams.Len() > 0 {
return invalidParams
}
return nil
}
// SetBucketARN sets the BucketARN field's value.
func (s *ExtendedS3DestinationUpdate) SetBucketARN(v string) *ExtendedS3DestinationUpdate {
s.BucketARN = &v
return s
}
// SetBufferingHints sets the BufferingHints field's value.
func (s *ExtendedS3DestinationUpdate) SetBufferingHints(v *BufferingHints) *ExtendedS3DestinationUpdate {
s.BufferingHints = v
return s
}
// SetCloudWatchLoggingOptions sets the CloudWatchLoggingOptions field's value.
func (s *ExtendedS3DestinationUpdate) SetCloudWatchLoggingOptions(v *CloudWatchLoggingOptions) *ExtendedS3DestinationUpdate {
s.CloudWatchLoggingOptions = v
return s
}
// SetCompressionFormat sets the CompressionFormat field's value.
func (s *ExtendedS3DestinationUpdate) SetCompressionFormat(v string) *ExtendedS3DestinationUpdate {
s.CompressionFormat = &v
return s
}
// SetDataFormatConversionConfiguration sets the DataFormatConversionConfiguration field's value.
func (s *ExtendedS3DestinationUpdate) SetDataFormatConversionConfiguration(v *DataFormatConversionConfiguration) *ExtendedS3DestinationUpdate {
s.DataFormatConversionConfiguration = v
return s
}
// SetEncryptionConfiguration sets the EncryptionConfiguration field's value.
func (s *ExtendedS3DestinationUpdate) SetEncryptionConfiguration(v *EncryptionConfiguration) *ExtendedS3DestinationUpdate {
s.EncryptionConfiguration = v
return s
}
// SetErrorOutputPrefix sets the ErrorOutputPrefix field's value.
func (s *ExtendedS3DestinationUpdate) SetErrorOutputPrefix(v string) *ExtendedS3DestinationUpdate {
s.ErrorOutputPrefix = &v
return s
}
// SetPrefix sets the Prefix field's value.
func (s *ExtendedS3DestinationUpdate) SetPrefix(v string) *ExtendedS3DestinationUpdate {
s.Prefix = &v
return s
}
// SetProcessingConfiguration sets the ProcessingConfiguration field's value.
func (s *ExtendedS3DestinationUpdate) SetProcessingConfiguration(v *ProcessingConfiguration) *ExtendedS3DestinationUpdate {
s.ProcessingConfiguration = v
return s
}
// SetRoleARN sets the RoleARN field's value.
func (s *ExtendedS3DestinationUpdate) SetRoleARN(v string) *ExtendedS3DestinationUpdate {
s.RoleARN = &v
return s
}
// SetS3BackupMode sets the S3BackupMode field's value.
func (s *ExtendedS3DestinationUpdate) SetS3BackupMode(v string) *ExtendedS3DestinationUpdate {
s.S3BackupMode = &v
return s
}
// SetS3BackupUpdate sets the S3BackupUpdate field's value.
func (s *ExtendedS3DestinationUpdate) SetS3BackupUpdate(v *S3DestinationUpdate) *ExtendedS3DestinationUpdate {
s.S3BackupUpdate = v
return s
}
// The native Hive / HCatalog JsonSerDe. Used by Kinesis Data Firehose for deserializing
// data, which means converting it from the JSON format in preparation for serializing
// it to the Parquet or ORC format. This is one of two deserializers you can
// choose, depending on which one offers the functionality you need. The other
// option is the OpenX SerDe.
type HiveJsonSerDe struct {
_ struct{} `type:"structure"`
// Indicates how you want Kinesis Data Firehose to parse the date and timestamps
// that may be present in your input data JSON. To specify these format strings,
// follow the pattern syntax of JodaTime's DateTimeFormat format strings. For
// more information, see Class DateTimeFormat (https://www.joda.org/joda-time/apidocs/org/joda/time/format/DateTimeFormat.html).
// You can also use the special value millis to parse timestamps in epoch milliseconds.
// If you don't specify a format, Kinesis Data Firehose uses java.sql.Timestamp::valueOf
// by default.
TimestampFormats []*string `type:"list"`
}
// String returns the string representation
func (s HiveJsonSerDe) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation
func (s HiveJsonSerDe) GoString() string {
return s.String()
}
// SetTimestampFormats sets the TimestampFormats field's value.
func (s *HiveJsonSerDe) SetTimestampFormats(v []*string) *HiveJsonSerDe {
s.TimestampFormats = v
return s
}
// Specifies the deserializer you want to use to convert the format of the input
// data.
type InputFormatConfiguration struct {
_ struct{} `type:"structure"`
// Specifies which deserializer to use. You can choose either the Apache Hive
// JSON SerDe or the OpenX JSON SerDe. If both are non-null, the server rejects
// the request.
Deserializer *Deserializer `type:"structure"`
}
// String returns the string representation
func (s InputFormatConfiguration) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation
func (s InputFormatConfiguration) GoString() string {
return s.String()
}
// SetDeserializer sets the Deserializer field's value.
func (s *InputFormatConfiguration) SetDeserializer(v *Deserializer) *InputFormatConfiguration {
s.Deserializer = v
return s
}
// Describes an encryption key for a destination in Amazon S3.
type KMSEncryptionConfig struct {
_ struct{} `type:"structure"`
// The Amazon Resource Name (ARN) of the encryption key. Must belong to the
// same AWS Region as the destination Amazon S3 bucket. For more information,
// see Amazon Resource Names (ARNs) and AWS Service Namespaces (https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html).
//
// AWSKMSKeyARN is a required field
AWSKMSKeyARN *string `min:"1" type:"string" required:"true"`
}
// String returns the string representation
func (s KMSEncryptionConfig) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation
func (s KMSEncryptionConfig) GoString() string {
return s.String()
}
// Validate inspects the fields of the type to determine if they are valid.
func (s *KMSEncryptionConfig) Validate() error {
invalidParams := request.ErrInvalidParams{Context: "KMSEncryptionConfig"}
if s.AWSKMSKeyARN == nil {
invalidParams.Add(request.NewErrParamRequired("AWSKMSKeyARN"))
}
if s.AWSKMSKeyARN != nil && len(*s.AWSKMSKeyARN) < 1 {
invalidParams.Add(request.NewErrParamMinLen("AWSKMSKeyARN", 1))
}
if invalidParams.Len() > 0 {
return invalidParams
}
return nil
}
// SetAWSKMSKeyARN sets the AWSKMSKeyARN field's value.
func (s *KMSEncryptionConfig) SetAWSKMSKeyARN(v string) *KMSEncryptionConfig {
s.AWSKMSKeyARN = &v
return s
}
// The stream and role Amazon Resource Names (ARNs) for a Kinesis data stream
// used as the source for a delivery stream.
type KinesisStreamSourceConfiguration struct {
_ struct{} `type:"structure"`
// The ARN of the source Kinesis data stream. For more information, see Amazon
// Kinesis Data Streams ARN Format (https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html#arn-syntax-kinesis-streams).
//
// KinesisStreamARN is a required field
KinesisStreamARN *string `min:"1" type:"string" required:"true"`
// The ARN of the role that provides access to the source Kinesis data stream.
// For more information, see AWS Identity and Access Management (IAM) ARN Format
// (https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html#arn-syntax-iam).
//
// RoleARN is a required field
RoleARN *string `min:"1" type:"string" required:"true"`
}
// String returns the string representation
func (s KinesisStreamSourceConfiguration) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation
func (s KinesisStreamSourceConfiguration) GoString() string {
return s.String()
}
// Validate inspects the fields of the type to determine if they are valid.
func (s *KinesisStreamSourceConfiguration) Validate() error {
invalidParams := request.ErrInvalidParams{Context: "KinesisStreamSourceConfiguration"}
if s.KinesisStreamARN == nil {
invalidParams.Add(request.NewErrParamRequired("KinesisStreamARN"))
}
if s.KinesisStreamARN != nil && len(*s.KinesisStreamARN) < 1 {
invalidParams.Add(request.NewErrParamMinLen("KinesisStreamARN", 1))
}
if s.RoleARN == nil {
invalidParams.Add(request.NewErrParamRequired("RoleARN"))
}
if s.RoleARN != nil && len(*s.RoleARN) < 1 {
invalidParams.Add(request.NewErrParamMinLen("RoleARN", 1))
}
if invalidParams.Len() > 0 {
return invalidParams
}
return nil
}
// SetKinesisStreamARN sets the KinesisStreamARN field's value.
func (s *KinesisStreamSourceConfiguration) SetKinesisStreamARN(v string) *KinesisStreamSourceConfiguration {
s.KinesisStreamARN = &v
return s
}
// SetRoleARN sets the RoleARN field's value.
func (s *KinesisStreamSourceConfiguration) SetRoleARN(v string) *KinesisStreamSourceConfiguration {
s.RoleARN = &v
return s
}
// Details about a Kinesis data stream used as the source for a Kinesis Data
// Firehose delivery stream.
type KinesisStreamSourceDescription struct {
_ struct{} `type:"structure"`
// Kinesis Data Firehose starts retrieving records from the Kinesis data stream
// starting with this timestamp.
DeliveryStartTimestamp *time.Time `type:"timestamp"`
// The Amazon Resource Name (ARN) of the source Kinesis data stream. For more
// information, see Amazon Kinesis Data Streams ARN Format (https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html#arn-syntax-kinesis-streams).
KinesisStreamARN *string `min:"1" type:"string"`
// The ARN of the role used by the source Kinesis data stream. For more information,
// see AWS Identity and Access Management (IAM) ARN Format (https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html#arn-syntax-iam).
RoleARN *string `min:"1" type:"string"`
}
// String returns the string representation
func (s KinesisStreamSourceDescription) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation
func (s KinesisStreamSourceDescription) GoString() string {
return s.String()
}
// SetDeliveryStartTimestamp sets the DeliveryStartTimestamp field's value.
func (s *KinesisStreamSourceDescription) SetDeliveryStartTimestamp(v time.Time) *KinesisStreamSourceDescription {
s.DeliveryStartTimestamp = &v
return s
}
// SetKinesisStreamARN sets the KinesisStreamARN field's value.
func (s *KinesisStreamSourceDescription) SetKinesisStreamARN(v string) *KinesisStreamSourceDescription {
s.KinesisStreamARN = &v
return s
}
// SetRoleARN sets the RoleARN field's value.
func (s *KinesisStreamSourceDescription) SetRoleARN(v string) *KinesisStreamSourceDescription {
s.RoleARN = &v
return s
}
type ListDeliveryStreamsInput struct {
_ struct{} `type:"structure"`
// The delivery stream type. This can be one of the following values:
//
// * DirectPut: Provider applications access the delivery stream directly.
//
// * KinesisStreamAsSource: The delivery stream uses a Kinesis data stream
// as a source.
//
// This parameter is optional. If this parameter is omitted, delivery streams
// of all types are returned.
DeliveryStreamType *string `type:"string" enum:"DeliveryStreamType"`
// The list of delivery streams returned by this call to ListDeliveryStreams
// will start with the delivery stream whose name comes alphabetically immediately
// after the name you specify in ExclusiveStartDeliveryStreamName.
ExclusiveStartDeliveryStreamName *string `min:"1" type:"string"`
// The maximum number of delivery streams to list. The default value is 10.
Limit *int64 `min:"1" type:"integer"`
}
// String returns the string representation
func (s ListDeliveryStreamsInput) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation
func (s ListDeliveryStreamsInput) GoString() string {
return s.String()
}
// Validate inspects the fields of the type to determine if they are valid.
func (s *ListDeliveryStreamsInput) Validate() error {
invalidParams := request.ErrInvalidParams{Context: "ListDeliveryStreamsInput"}
if s.ExclusiveStartDeliveryStreamName != nil && len(*s.ExclusiveStartDeliveryStreamName) < 1 {
invalidParams.Add(request.NewErrParamMinLen("ExclusiveStartDeliveryStreamName", 1))
}
if s.Limit != nil && *s.Limit < 1 {
invalidParams.Add(request.NewErrParamMinValue("Limit", 1))
}
if invalidParams.Len() > 0 {
return invalidParams
}
return nil
}
// SetDeliveryStreamType sets the DeliveryStreamType field's value.
func (s *ListDeliveryStreamsInput) SetDeliveryStreamType(v string) *ListDeliveryStreamsInput {
s.DeliveryStreamType = &v
return s
}
// SetExclusiveStartDeliveryStreamName sets the ExclusiveStartDeliveryStreamName field's value.
func (s *ListDeliveryStreamsInput) SetExclusiveStartDeliveryStreamName(v string) *ListDeliveryStreamsInput {
s.ExclusiveStartDeliveryStreamName = &v
return s
}
// SetLimit sets the Limit field's value.
func (s *ListDeliveryStreamsInput) SetLimit(v int64) *ListDeliveryStreamsInput {
s.Limit = &v
return s
}
type ListDeliveryStreamsOutput struct {
_ struct{} `type:"structure"`
// The names of the delivery streams.
//
// DeliveryStreamNames is a required field
DeliveryStreamNames []*string `type:"list" required:"true"`
// Indicates whether there are more delivery streams available to list.
//
// HasMoreDeliveryStreams is a required field
HasMoreDeliveryStreams *bool `type:"boolean" required:"true"`
}
// String returns the string representation
func (s ListDeliveryStreamsOutput) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation
func (s ListDeliveryStreamsOutput) GoString() string {
return s.String()
}
// SetDeliveryStreamNames sets the DeliveryStreamNames field's value.
func (s *ListDeliveryStreamsOutput) SetDeliveryStreamNames(v []*string) *ListDeliveryStreamsOutput {
s.DeliveryStreamNames = v
return s
}
// SetHasMoreDeliveryStreams sets the HasMoreDeliveryStreams field's value.
func (s *ListDeliveryStreamsOutput) SetHasMoreDeliveryStreams(v bool) *ListDeliveryStreamsOutput {
s.HasMoreDeliveryStreams = &v
return s
}
type ListTagsForDeliveryStreamInput struct {
_ struct{} `type:"structure"`
// The name of the delivery stream whose tags you want to list.
//
// DeliveryStreamName is a required field
DeliveryStreamName *string `min:"1" type:"string" required:"true"`
// The key to use as the starting point for the list of tags. If you set this
// parameter, ListTagsForDeliveryStream gets all tags that occur after ExclusiveStartTagKey.
ExclusiveStartTagKey *string `min:"1" type:"string"`
// The number of tags to return. If this number is less than the total number
// of tags associated with the delivery stream, HasMoreTags is set to true in
// the response. To list additional tags, set ExclusiveStartTagKey to the last
// key in the response.
Limit *int64 `min:"1" type:"integer"`
}
// String returns the string representation
func (s ListTagsForDeliveryStreamInput) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation
func (s ListTagsForDeliveryStreamInput) GoString() string {
return s.String()
}
// Validate inspects the fields of the type to determine if they are valid.
func (s *ListTagsForDeliveryStreamInput) Validate() error {
invalidParams := request.ErrInvalidParams{Context: "ListTagsForDeliveryStreamInput"}
if s.DeliveryStreamName == nil {
invalidParams.Add(request.NewErrParamRequired("DeliveryStreamName"))
}
if s.DeliveryStreamName != nil && len(*s.DeliveryStreamName) < 1 {
invalidParams.Add(request.NewErrParamMinLen("DeliveryStreamName", 1))
}
if s.ExclusiveStartTagKey != nil && len(*s.ExclusiveStartTagKey) < 1 {
invalidParams.Add(request.NewErrParamMinLen("ExclusiveStartTagKey", 1))
}
if s.Limit != nil && *s.Limit < 1 {
invalidParams.Add(request.NewErrParamMinValue("Limit", 1))
}
if invalidParams.Len() > 0 {
return invalidParams
}
return nil
}
// SetDeliveryStreamName sets the DeliveryStreamName field's value.
func (s *ListTagsForDeliveryStreamInput) SetDeliveryStreamName(v string) *ListTagsForDeliveryStreamInput {
s.DeliveryStreamName = &v
return s
}
// SetExclusiveStartTagKey sets the ExclusiveStartTagKey field's value.
func (s *ListTagsForDeliveryStreamInput) SetExclusiveStartTagKey(v string) *ListTagsForDeliveryStreamInput {
s.ExclusiveStartTagKey = &v
return s
}
// SetLimit sets the Limit field's value.
func (s *ListTagsForDeliveryStreamInput) SetLimit(v int64) *ListTagsForDeliveryStreamInput {
s.Limit = &v
return s
}
type ListTagsForDeliveryStreamOutput struct {
_ struct{} `type:"structure"`
// If this is true in the response, more tags are available. To list the remaining
// tags, set ExclusiveStartTagKey to the key of the last tag returned and call
// ListTagsForDeliveryStream again.
//
// HasMoreTags is a required field
HasMoreTags *bool `type:"boolean" required:"true"`
// A list of tags associated with DeliveryStreamName, starting with the first
// tag after ExclusiveStartTagKey and up to the specified Limit.
//
// Tags is a required field
Tags []*Tag `type:"list" required:"true"`
}
// String returns the string representation
func (s ListTagsForDeliveryStreamOutput) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation
func (s ListTagsForDeliveryStreamOutput) GoString() string {
return s.String()
}
// SetHasMoreTags sets the HasMoreTags field's value.
func (s *ListTagsForDeliveryStreamOutput) SetHasMoreTags(v bool) *ListTagsForDeliveryStreamOutput {
s.HasMoreTags = &v
return s
}
// SetTags sets the Tags field's value.
func (s *ListTagsForDeliveryStreamOutput) SetTags(v []*Tag) *ListTagsForDeliveryStreamOutput {
s.Tags = v
return s
}
// The OpenX SerDe. Used by Kinesis Data Firehose for deserializing data, which
// means converting it from the JSON format in preparation for serializing it
// to the Parquet or ORC format. This is one of two deserializers you can choose,
// depending on which one offers the functionality you need. The other option
// is the native Hive / HCatalog JsonSerDe.
type OpenXJsonSerDe struct {
_ struct{} `type:"structure"`
// When set to true, which is the default, Kinesis Data Firehose converts JSON
// keys to lowercase before deserializing them.
CaseInsensitive *bool `type:"boolean"`
// Maps column names to JSON keys that aren't identical to the column names.
// This is useful when the JSON contains keys that are Hive keywords. For example,
// timestamp is a Hive keyword. If you have a JSON key named timestamp, set
// this parameter to {"ts": "timestamp"} to map this key to a column named ts.
ColumnToJsonKeyMappings map[string]*string `type:"map"`
// When set to true, specifies that the names of the keys include dots and that
// you want Kinesis Data Firehose to replace them with underscores. This is
// useful because Apache Hive does not allow dots in column names. For example,
// if the JSON contains a key whose name is "a.b", you can define the column
// name to be "a_b" when using this option.
//
// The default is false.
ConvertDotsInJsonKeysToUnderscores *bool `type:"boolean"`
}
// String returns the string representation
func (s OpenXJsonSerDe) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation
func (s OpenXJsonSerDe) GoString() string {
return s.String()
}
// SetCaseInsensitive sets the CaseInsensitive field's value.
func (s *OpenXJsonSerDe) SetCaseInsensitive(v bool) *OpenXJsonSerDe {
s.CaseInsensitive = &v
return s
}
// SetColumnToJsonKeyMappings sets the ColumnToJsonKeyMappings field's value.
func (s *OpenXJsonSerDe) SetColumnToJsonKeyMappings(v map[string]*string) *OpenXJsonSerDe {
s.ColumnToJsonKeyMappings = v
return s
}
// SetConvertDotsInJsonKeysToUnderscores sets the ConvertDotsInJsonKeysToUnderscores field's value.
func (s *OpenXJsonSerDe) SetConvertDotsInJsonKeysToUnderscores(v bool) *OpenXJsonSerDe {
s.ConvertDotsInJsonKeysToUnderscores = &v
return s
}
// A serializer to use for converting data to the ORC format before storing
// it in Amazon S3. For more information, see Apache ORC (https://orc.apache.org/docs/).
type OrcSerDe struct {
_ struct{} `type:"structure"`
// The Hadoop Distributed File System (HDFS) block size. This is useful if you
// intend to copy the data from Amazon S3 to HDFS before querying. The default
// is 256 MiB and the minimum is 64 MiB. Kinesis Data Firehose uses this value
// for padding calculations.
BlockSizeBytes *int64 `min:"6.7108864e+07" type:"integer"`
// The column names for which you want Kinesis Data Firehose to create bloom
// filters. The default is null.
BloomFilterColumns []*string `type:"list"`
// The Bloom filter false positive probability (FPP). The lower the FPP, the
// bigger the Bloom filter. The default value is 0.05, the minimum is 0, and
// the maximum is 1.
BloomFilterFalsePositiveProbability *float64 `type:"double"`
// The compression code to use over data blocks. The default is SNAPPY.
Compression *string `type:"string" enum:"OrcCompression"`
// Represents the fraction of the total number of non-null rows. To turn off
// dictionary encoding, set this fraction to a number that is less than the
// number of distinct keys in a dictionary. To always use dictionary encoding,
// set this threshold to 1.
DictionaryKeyThreshold *float64 `type:"double"`
// Set this to true to indicate that you want stripes to be padded to the HDFS
// block boundaries. This is useful if you intend to copy the data from Amazon
// S3 to HDFS before querying. The default is false.
EnablePadding *bool `type:"boolean"`
// The version of the file to write. The possible values are V0_11 and V0_12.
// The default is V0_12.
FormatVersion *string `type:"string" enum:"OrcFormatVersion"`
// A number between 0 and 1 that defines the tolerance for block padding as
// a decimal fraction of stripe size. The default value is 0.05, which means
// 5 percent of stripe size.
//
// For the default values of 64 MiB ORC stripes and 256 MiB HDFS blocks, the
// default block padding tolerance of 5 percent reserves a maximum of 3.2 MiB
// for padding within the 256 MiB block. In such a case, if the available size
// within the block is more than 3.2 MiB, a new, smaller stripe is inserted
// to fit within that space. This ensures that no stripe crosses block boundaries
// and causes remote reads within a node-local task.
//
// Kinesis Data Firehose ignores this parameter when OrcSerDe$EnablePadding
// is false.
PaddingTolerance *float64 `type:"double"`
// The number of rows between index entries. The default is 10,000 and the minimum
// is 1,000.
RowIndexStride *int64 `min:"1000" type:"integer"`
// The number of bytes in each stripe. The default is 64 MiB and the minimum
// is 8 MiB.
StripeSizeBytes *int64 `min:"8.388608e+06" type:"integer"`
}
// String returns the string representation
func (s OrcSerDe) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation
func (s OrcSerDe) GoString() string {
return s.String()
}
// Validate inspects the fields of the type to determine if they are valid.
func (s *OrcSerDe) Validate() error {
invalidParams := request.ErrInvalidParams{Context: "OrcSerDe"}
if s.BlockSizeBytes != nil && *s.BlockSizeBytes < 6.7108864e+07 {
invalidParams.Add(request.NewErrParamMinValue("BlockSizeBytes", 6.7108864e+07))
}
if s.RowIndexStride != nil && *s.RowIndexStride < 1000 {
invalidParams.Add(request.NewErrParamMinValue("RowIndexStride", 1000))
}
if s.StripeSizeBytes != nil && *s.StripeSizeBytes < 8.388608e+06 {
invalidParams.Add(request.NewErrParamMinValue("StripeSizeBytes", 8.388608e+06))
}
if invalidParams.Len() > 0 {
return invalidParams
}
return nil
}
// SetBlockSizeBytes sets the BlockSizeBytes field's value.
func (s *OrcSerDe) SetBlockSizeBytes(v int64) *OrcSerDe {
s.BlockSizeBytes = &v
return s
}
// SetBloomFilterColumns sets the BloomFilterColumns field's value.
func (s *OrcSerDe) SetBloomFilterColumns(v []*string) *OrcSerDe {
s.BloomFilterColumns = v
return s
}
// SetBloomFilterFalsePositiveProbability sets the BloomFilterFalsePositiveProbability field's value.
func (s *OrcSerDe) SetBloomFilterFalsePositiveProbability(v float64) *OrcSerDe {
s.BloomFilterFalsePositiveProbability = &v
return s
}
// SetCompression sets the Compression field's value.
func (s *OrcSerDe) SetCompression(v string) *OrcSerDe {
s.Compression = &v
return s
}
// SetDictionaryKeyThreshold sets the DictionaryKeyThreshold field's value.
func (s *OrcSerDe) SetDictionaryKeyThreshold(v float64) *OrcSerDe {
s.DictionaryKeyThreshold = &v
return s
}
// SetEnablePadding sets the EnablePadding field's value.
func (s *OrcSerDe) SetEnablePadding(v bool) *OrcSerDe {
s.EnablePadding = &v
return s
}
// SetFormatVersion sets the FormatVersion field's value.
func (s *OrcSerDe) SetFormatVersion(v string) *OrcSerDe {
s.FormatVersion = &v
return s
}
// SetPaddingTolerance sets the PaddingTolerance field's value.
func (s *OrcSerDe) SetPaddingTolerance(v float64) *OrcSerDe {
s.PaddingTolerance = &v
return s
}
// SetRowIndexStride sets the RowIndexStride field's value.
func (s *OrcSerDe) SetRowIndexStride(v int64) *OrcSerDe {
s.RowIndexStride = &v
return s
}
// SetStripeSizeBytes sets the StripeSizeBytes field's value.
func (s *OrcSerDe) SetStripeSizeBytes(v int64) *OrcSerDe {
s.StripeSizeBytes = &v
return s
}
// Specifies the serializer that you want Kinesis Data Firehose to use to convert
// the format of your data before it writes it to Amazon S3.
type OutputFormatConfiguration struct {
_ struct{} `type:"structure"`
// Specifies which serializer to use. You can choose either the ORC SerDe or
// the Parquet SerDe. If both are non-null, the server rejects the request.
Serializer *Serializer `type:"structure"`
}
// String returns the string representation
func (s OutputFormatConfiguration) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation
func (s OutputFormatConfiguration) GoString() string {
return s.String()
}
// Validate inspects the fields of the type to determine if they are valid.
func (s *OutputFormatConfiguration) Validate() error {
invalidParams := request.ErrInvalidParams{Context: "OutputFormatConfiguration"}
if s.Serializer != nil {
if err := s.Serializer.Validate(); err != nil {
invalidParams.AddNested("Serializer", err.(request.ErrInvalidParams))
}
}
if invalidParams.Len() > 0 {
return invalidParams
}
return nil
}
// SetSerializer sets the Serializer field's value.
func (s *OutputFormatConfiguration) SetSerializer(v *Serializer) *OutputFormatConfiguration {
s.Serializer = v
return s
}
// A serializer to use for converting data to the Parquet format before storing
// it in Amazon S3. For more information, see Apache Parquet (https://parquet.apache.org/documentation/latest/).
type ParquetSerDe struct {
_ struct{} `type:"structure"`
// The Hadoop Distributed File System (HDFS) block size. This is useful if you
// intend to copy the data from Amazon S3 to HDFS before querying. The default
// is 256 MiB and the minimum is 64 MiB. Kinesis Data Firehose uses this value
// for padding calculations.
BlockSizeBytes *int64 `min:"6.7108864e+07" type:"integer"`
// The compression code to use over data blocks. The possible values are UNCOMPRESSED,
// SNAPPY, and GZIP, with the default being SNAPPY. Use SNAPPY for higher decompression
// speed. Use GZIP if the compression ration is more important than speed.
Compression *string `type:"string" enum:"ParquetCompression"`
// Indicates whether to enable dictionary compression.
EnableDictionaryCompression *bool `type:"boolean"`
// The maximum amount of padding to apply. This is useful if you intend to copy
// the data from Amazon S3 to HDFS before querying. The default is 0.
MaxPaddingBytes *int64 `type:"integer"`
// The Parquet page size. Column chunks are divided into pages. A page is conceptually
// an indivisible unit (in terms of compression and encoding). The minimum value
// is 64 KiB and the default is 1 MiB.
PageSizeBytes *int64 `min:"65536" type:"integer"`
// Indicates the version of row format to output. The possible values are V1
// and V2. The default is V1.
WriterVersion *string `type:"string" enum:"ParquetWriterVersion"`
}
// String returns the string representation
func (s ParquetSerDe) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation
func (s ParquetSerDe) GoString() string {
return s.String()
}
// Validate inspects the fields of the type to determine if they are valid.
func (s *ParquetSerDe) Validate() error {
invalidParams := request.ErrInvalidParams{Context: "ParquetSerDe"}
if s.BlockSizeBytes != nil && *s.BlockSizeBytes < 6.7108864e+07 {
invalidParams.Add(request.NewErrParamMinValue("BlockSizeBytes", 6.7108864e+07))
}
if s.PageSizeBytes != nil && *s.PageSizeBytes < 65536 {
invalidParams.Add(request.NewErrParamMinValue("PageSizeBytes", 65536))
}
if invalidParams.Len() > 0 {
return invalidParams
}
return nil
}
// SetBlockSizeBytes sets the BlockSizeBytes field's value.
func (s *ParquetSerDe) SetBlockSizeBytes(v int64) *ParquetSerDe {
s.BlockSizeBytes = &v
return s
}
// SetCompression sets the Compression field's value.
func (s *ParquetSerDe) SetCompression(v string) *ParquetSerDe {
s.Compression = &v
return s
}
// SetEnableDictionaryCompression sets the EnableDictionaryCompression field's value.
func (s *ParquetSerDe) SetEnableDictionaryCompression(v bool) *ParquetSerDe {
s.EnableDictionaryCompression = &v
return s
}
// SetMaxPaddingBytes sets the MaxPaddingBytes field's value.
func (s *ParquetSerDe) SetMaxPaddingBytes(v int64) *ParquetSerDe {
s.MaxPaddingBytes = &v
return s
}
// SetPageSizeBytes sets the PageSizeBytes field's value.
func (s *ParquetSerDe) SetPageSizeBytes(v int64) *ParquetSerDe {
s.PageSizeBytes = &v
return s
}
// SetWriterVersion sets the WriterVersion field's value.
func (s *ParquetSerDe) SetWriterVersion(v string) *ParquetSerDe {
s.WriterVersion = &v
return s
}
// Describes a data processing configuration.
type ProcessingConfiguration struct {
_ struct{} `type:"structure"`
// Enables or disables data processing.
Enabled *bool `type:"boolean"`
// The data processors.
Processors []*Processor `type:"list"`
}
// String returns the string representation
func (s ProcessingConfiguration) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation
func (s ProcessingConfiguration) GoString() string {
return s.String()
}
// Validate inspects the fields of the type to determine if they are valid.
func (s *ProcessingConfiguration) Validate() error {
invalidParams := request.ErrInvalidParams{Context: "ProcessingConfiguration"}
if s.Processors != nil {
for i, v := range s.Processors {
if v == nil {
continue
}
if err := v.Validate(); err != nil {
invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Processors", i), err.(request.ErrInvalidParams))
}
}
}
if invalidParams.Len() > 0 {
return invalidParams
}
return nil
}
// SetEnabled sets the Enabled field's value.
func (s *ProcessingConfiguration) SetEnabled(v bool) *ProcessingConfiguration {
s.Enabled = &v
return s
}
// SetProcessors sets the Processors field's value.
func (s *ProcessingConfiguration) SetProcessors(v []*Processor) *ProcessingConfiguration {
s.Processors = v
return s
}
// Describes a data processor.
type Processor struct {
_ struct{} `type:"structure"`
// The processor parameters.
Parameters []*ProcessorParameter `type:"list"`
// The type of processor.
//
// Type is a required field
Type *string `type:"string" required:"true" enum:"ProcessorType"`
}
// String returns the string representation
func (s Processor) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation
func (s Processor) GoString() string {
return s.String()
}
// Validate inspects the fields of the type to determine if they are valid.
func (s *Processor) Validate() error {
invalidParams := request.ErrInvalidParams{Context: "Processor"}
if s.Type == nil {
invalidParams.Add(request.NewErrParamRequired("Type"))
}
if s.Parameters != nil {
for i, v := range s.Parameters {
if v == nil {
continue
}
if err := v.Validate(); err != nil {
invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Parameters", i), err.(request.ErrInvalidParams))
}
}
}
if invalidParams.Len() > 0 {
return invalidParams
}
return nil
}
// SetParameters sets the Parameters field's value.
func (s *Processor) SetParameters(v []*ProcessorParameter) *Processor {
s.Parameters = v
return s
}
// SetType sets the Type field's value.
func (s *Processor) SetType(v string) *Processor {
s.Type = &v
return s
}
// Describes the processor parameter.
type ProcessorParameter struct {
_ struct{} `type:"structure"`
// The name of the parameter.
//
// ParameterName is a required field
ParameterName *string `type:"string" required:"true" enum:"ProcessorParameterName"`
// The parameter value.
//
// ParameterValue is a required field
ParameterValue *string `min:"1" type:"string" required:"true"`
}
// String returns the string representation
func (s ProcessorParameter) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation
func (s ProcessorParameter) GoString() string {
return s.String()
}
// Validate inspects the fields of the type to determine if they are valid.
func (s *ProcessorParameter) Validate() error {
invalidParams := request.ErrInvalidParams{Context: "ProcessorParameter"}
if s.ParameterName == nil {
invalidParams.Add(request.NewErrParamRequired("ParameterName"))
}
if s.ParameterValue == nil {
invalidParams.Add(request.NewErrParamRequired("ParameterValue"))
}
if s.ParameterValue != nil && len(*s.ParameterValue) < 1 {
invalidParams.Add(request.NewErrParamMinLen("ParameterValue", 1))
}
if invalidParams.Len() > 0 {
return invalidParams
}
return nil
}
// SetParameterName sets the ParameterName field's value.
func (s *ProcessorParameter) SetParameterName(v string) *ProcessorParameter {
s.ParameterName = &v
return s
}
// SetParameterValue sets the ParameterValue field's value.
func (s *ProcessorParameter) SetParameterValue(v string) *ProcessorParameter {
s.ParameterValue = &v
return s
}
type PutRecordBatchInput struct {
_ struct{} `type:"structure"`
// The name of the delivery stream.
//
// DeliveryStreamName is a required field
DeliveryStreamName *string `min:"1" type:"string" required:"true"`
// One or more records.
//
// Records is a required field
Records []*Record `min:"1" type:"list" required:"true"`
}
// String returns the string representation
func (s PutRecordBatchInput) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation
func (s PutRecordBatchInput) GoString() string {
return s.String()
}
// Validate inspects the fields of the type to determine if they are valid.
func (s *PutRecordBatchInput) Validate() error {
invalidParams := request.ErrInvalidParams{Context: "PutRecordBatchInput"}
if s.DeliveryStreamName == nil {
invalidParams.Add(request.NewErrParamRequired("DeliveryStreamName"))
}
if s.DeliveryStreamName != nil && len(*s.DeliveryStreamName) < 1 {
invalidParams.Add(request.NewErrParamMinLen("DeliveryStreamName", 1))
}
if s.Records == nil {
invalidParams.Add(request.NewErrParamRequired("Records"))
}
if s.Records != nil && len(s.Records) < 1 {
invalidParams.Add(request.NewErrParamMinLen("Records", 1))
}
if s.Records != nil {
for i, v := range s.Records {
if v == nil {
continue
}
if err := v.Validate(); err != nil {
invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Records", i), err.(request.ErrInvalidParams))
}
}
}
if invalidParams.Len() > 0 {
return invalidParams
}
return nil
}
// SetDeliveryStreamName sets the DeliveryStreamName field's value.
func (s *PutRecordBatchInput) SetDeliveryStreamName(v string) *PutRecordBatchInput {
s.DeliveryStreamName = &v
return s
}
// SetRecords sets the Records field's value.
func (s *PutRecordBatchInput) SetRecords(v []*Record) *PutRecordBatchInput {
s.Records = v
return s
}
type PutRecordBatchOutput struct {
_ struct{} `type:"structure"`
// Indicates whether server-side encryption (SSE) was enabled during this operation.
Encrypted *bool `type:"boolean"`
// The number of records that might have failed processing. This number might
// be greater than 0 even if the PutRecordBatch call succeeds. Check FailedPutCount
// to determine whether there are records that you need to resend.
//
// FailedPutCount is a required field
FailedPutCount *int64 `type:"integer" required:"true"`
// The results array. For each record, the index of the response element is
// the same as the index used in the request array.
//
// RequestResponses is a required field
RequestResponses []*PutRecordBatchResponseEntry `min:"1" type:"list" required:"true"`
}
// String returns the string representation
func (s PutRecordBatchOutput) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation
func (s PutRecordBatchOutput) GoString() string {
return s.String()
}
// SetEncrypted sets the Encrypted field's value.
func (s *PutRecordBatchOutput) SetEncrypted(v bool) *PutRecordBatchOutput {
s.Encrypted = &v
return s
}
// SetFailedPutCount sets the FailedPutCount field's value.
func (s *PutRecordBatchOutput) SetFailedPutCount(v int64) *PutRecordBatchOutput {
s.FailedPutCount = &v
return s
}
// SetRequestResponses sets the RequestResponses field's value.
func (s *PutRecordBatchOutput) SetRequestResponses(v []*PutRecordBatchResponseEntry) *PutRecordBatchOutput {
s.RequestResponses = v
return s
}
// Contains the result for an individual record from a PutRecordBatch request.
// If the record is successfully added to your delivery stream, it receives
// a record ID. If the record fails to be added to your delivery stream, the
// result includes an error code and an error message.
type PutRecordBatchResponseEntry struct {
_ struct{} `type:"structure"`
// The error code for an individual record result.
ErrorCode *string `type:"string"`
// The error message for an individual record result.
ErrorMessage *string `type:"string"`
// The ID of the record.
RecordId *string `min:"1" type:"string"`
}
// String returns the string representation
func (s PutRecordBatchResponseEntry) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation
func (s PutRecordBatchResponseEntry) GoString() string {
return s.String()
}
// SetErrorCode sets the ErrorCode field's value.
func (s *PutRecordBatchResponseEntry) SetErrorCode(v string) *PutRecordBatchResponseEntry {
s.ErrorCode = &v
return s
}
// SetErrorMessage sets the ErrorMessage field's value.
func (s *PutRecordBatchResponseEntry) SetErrorMessage(v string) *PutRecordBatchResponseEntry {
s.ErrorMessage = &v
return s
}
// SetRecordId sets the RecordId field's value.
func (s *PutRecordBatchResponseEntry) SetRecordId(v string) *PutRecordBatchResponseEntry {
s.RecordId = &v
return s
}
type PutRecordInput struct {
_ struct{} `type:"structure"`
// The name of the delivery stream.
//
// DeliveryStreamName is a required field
DeliveryStreamName *string `min:"1" type:"string" required:"true"`
// The record.
//
// Record is a required field
Record *Record `type:"structure" required:"true"`
}
// String returns the string representation
func (s PutRecordInput) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation
func (s PutRecordInput) GoString() string {
return s.String()
}
// Validate inspects the fields of the type to determine if they are valid.
func (s *PutRecordInput) Validate() error {
invalidParams := request.ErrInvalidParams{Context: "PutRecordInput"}
if s.DeliveryStreamName == nil {
invalidParams.Add(request.NewErrParamRequired("DeliveryStreamName"))
}
if s.DeliveryStreamName != nil && len(*s.DeliveryStreamName) < 1 {
invalidParams.Add(request.NewErrParamMinLen("DeliveryStreamName", 1))
}
if s.Record == nil {
invalidParams.Add(request.NewErrParamRequired("Record"))
}
if s.Record != nil {
if err := s.Record.Validate(); err != nil {
invalidParams.AddNested("Record", err.(request.ErrInvalidParams))
}
}
if invalidParams.Len() > 0 {
return invalidParams
}
return nil
}
// SetDeliveryStreamName sets the DeliveryStreamName field's value.
func (s *PutRecordInput) SetDeliveryStreamName(v string) *PutRecordInput {
s.DeliveryStreamName = &v
return s
}
// SetRecord sets the Record field's value.
func (s *PutRecordInput) SetRecord(v *Record) *PutRecordInput {
s.Record = v
return s
}
type PutRecordOutput struct {
_ struct{} `type:"structure"`
// Indicates whether server-side encryption (SSE) was enabled during this operation.
Encrypted *bool `type:"boolean"`
// The ID of the record.
//
// RecordId is a required field
RecordId *string `min:"1" type:"string" required:"true"`
}
// String returns the string representation
func (s PutRecordOutput) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation
func (s PutRecordOutput) GoString() string {
return s.String()
}
// SetEncrypted sets the Encrypted field's value.
func (s *PutRecordOutput) SetEncrypted(v bool) *PutRecordOutput {
s.Encrypted = &v
return s
}
// SetRecordId sets the RecordId field's value.
func (s *PutRecordOutput) SetRecordId(v string) *PutRecordOutput {
s.RecordId = &v
return s
}
// The unit of data in a delivery stream.
type Record struct {
_ struct{} `type:"structure"`
// The data blob, which is base64-encoded when the blob is serialized. The maximum
// size of the data blob, before base64-encoding, is 1,000 KiB.
//
// Data is automatically base64 encoded/decoded by the SDK.
//
// Data is a required field
Data []byte `type:"blob" required:"true"`
}
// String returns the string representation
func (s Record) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation
func (s Record) GoString() string {
return s.String()
}
// Validate inspects the fields of the type to determine if they are valid.
func (s *Record) Validate() error {
invalidParams := request.ErrInvalidParams{Context: "Record"}
if s.Data == nil {
invalidParams.Add(request.NewErrParamRequired("Data"))
}
if invalidParams.Len() > 0 {
return invalidParams
}
return nil
}
// SetData sets the Data field's value.
func (s *Record) SetData(v []byte) *Record {
s.Data = v
return s
}
// Describes the configuration of a destination in Amazon Redshift.
type RedshiftDestinationConfiguration struct {
_ struct{} `type:"structure"`
// The CloudWatch logging options for your delivery stream.
CloudWatchLoggingOptions *CloudWatchLoggingOptions `type:"structure"`
// The database connection string.
//
// ClusterJDBCURL is a required field
ClusterJDBCURL *string `min:"1" type:"string" required:"true"`
// The COPY command.
//
// CopyCommand is a required field
CopyCommand *CopyCommand `type:"structure" required:"true"`
// The user password.
//
// Password is a required field
Password *string `min:"6" type:"string" required:"true" sensitive:"true"`
// The data processing configuration.
ProcessingConfiguration *ProcessingConfiguration `type:"structure"`
// The retry behavior in case Kinesis Data Firehose is unable to deliver documents
// to Amazon Redshift. Default value is 3600 (60 minutes).
RetryOptions *RedshiftRetryOptions `type:"structure"`
// The Amazon Resource Name (ARN) of the AWS credentials. For more information,
// see Amazon Resource Names (ARNs) and AWS Service Namespaces (https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html).
//
// RoleARN is a required field
RoleARN *string `min:"1" type:"string" required:"true"`
// The configuration for backup in Amazon S3.
S3BackupConfiguration *S3DestinationConfiguration `type:"structure"`
// The Amazon S3 backup mode.
S3BackupMode *string `type:"string" enum:"RedshiftS3BackupMode"`
// The configuration for the intermediate Amazon S3 location from which Amazon
// Redshift obtains data. Restrictions are described in the topic for CreateDeliveryStream.
//
// The compression formats SNAPPY or ZIP cannot be specified in RedshiftDestinationConfiguration.S3Configuration
// because the Amazon Redshift COPY operation that reads from the S3 bucket
// doesn't support these compression formats.
//
// S3Configuration is a required field
S3Configuration *S3DestinationConfiguration `type:"structure" required:"true"`
// The name of the user.
//
// Username is a required field
Username *string `min:"1" type:"string" required:"true" sensitive:"true"`
}
// String returns the string representation
func (s RedshiftDestinationConfiguration) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation
func (s RedshiftDestinationConfiguration) GoString() string {
return s.String()
}
// Validate inspects the fields of the type to determine if they are valid.
func (s *RedshiftDestinationConfiguration) Validate() error {
invalidParams := request.ErrInvalidParams{Context: "RedshiftDestinationConfiguration"}
if s.ClusterJDBCURL == nil {
invalidParams.Add(request.NewErrParamRequired("ClusterJDBCURL"))
}
if s.ClusterJDBCURL != nil && len(*s.ClusterJDBCURL) < 1 {
invalidParams.Add(request.NewErrParamMinLen("ClusterJDBCURL", 1))
}
if s.CopyCommand == nil {
invalidParams.Add(request.NewErrParamRequired("CopyCommand"))
}
if s.Password == nil {
invalidParams.Add(request.NewErrParamRequired("Password"))
}
if s.Password != nil && len(*s.Password) < 6 {
invalidParams.Add(request.NewErrParamMinLen("Password", 6))
}
if s.RoleARN == nil {
invalidParams.Add(request.NewErrParamRequired("RoleARN"))
}
if s.RoleARN != nil && len(*s.RoleARN) < 1 {
invalidParams.Add(request.NewErrParamMinLen("RoleARN", 1))
}
if s.S3Configuration == nil {
invalidParams.Add(request.NewErrParamRequired("S3Configuration"))
}
if s.Username == nil {
invalidParams.Add(request.NewErrParamRequired("Username"))
}
if s.Username != nil && len(*s.Username) < 1 {
invalidParams.Add(request.NewErrParamMinLen("Username", 1))
}
if s.CopyCommand != nil {
if err := s.CopyCommand.Validate(); err != nil {
invalidParams.AddNested("CopyCommand", err.(request.ErrInvalidParams))
}
}
if s.ProcessingConfiguration != nil {
if err := s.ProcessingConfiguration.Validate(); err != nil {
invalidParams.AddNested("ProcessingConfiguration", err.(request.ErrInvalidParams))
}
}
if s.S3BackupConfiguration != nil {
if err := s.S3BackupConfiguration.Validate(); err != nil {
invalidParams.AddNested("S3BackupConfiguration", err.(request.ErrInvalidParams))
}
}
if s.S3Configuration != nil {
if err := s.S3Configuration.Validate(); err != nil {
invalidParams.AddNested("S3Configuration", err.(request.ErrInvalidParams))
}
}
if invalidParams.Len() > 0 {
return invalidParams
}
return nil
}
// SetCloudWatchLoggingOptions sets the CloudWatchLoggingOptions field's value.
func (s *RedshiftDestinationConfiguration) SetCloudWatchLoggingOptions(v *CloudWatchLoggingOptions) *RedshiftDestinationConfiguration {
s.CloudWatchLoggingOptions = v
return s
}
// SetClusterJDBCURL sets the ClusterJDBCURL field's value.
func (s *RedshiftDestinationConfiguration) SetClusterJDBCURL(v string) *RedshiftDestinationConfiguration {
s.ClusterJDBCURL = &v
return s
}
// SetCopyCommand sets the CopyCommand field's value.
func (s *RedshiftDestinationConfiguration) SetCopyCommand(v *CopyCommand) *RedshiftDestinationConfiguration {
s.CopyCommand = v
return s
}
// SetPassword sets the Password field's value.
func (s *RedshiftDestinationConfiguration) SetPassword(v string) *RedshiftDestinationConfiguration {
s.Password = &v
return s
}
// SetProcessingConfiguration sets the ProcessingConfiguration field's value.
func (s *RedshiftDestinationConfiguration) SetProcessingConfiguration(v *ProcessingConfiguration) *RedshiftDestinationConfiguration {
s.ProcessingConfiguration = v
return s
}
// SetRetryOptions sets the RetryOptions field's value.
func (s *RedshiftDestinationConfiguration) SetRetryOptions(v *RedshiftRetryOptions) *RedshiftDestinationConfiguration {
s.RetryOptions = v
return s
}
// SetRoleARN sets the RoleARN field's value.
func (s *RedshiftDestinationConfiguration) SetRoleARN(v string) *RedshiftDestinationConfiguration {
s.RoleARN = &v
return s
}
// SetS3BackupConfiguration sets the S3BackupConfiguration field's value.
func (s *RedshiftDestinationConfiguration) SetS3BackupConfiguration(v *S3DestinationConfiguration) *RedshiftDestinationConfiguration {
s.S3BackupConfiguration = v
return s
}
// SetS3BackupMode sets the S3BackupMode field's value.
func (s *RedshiftDestinationConfiguration) SetS3BackupMode(v string) *RedshiftDestinationConfiguration {
s.S3BackupMode = &v
return s
}
// SetS3Configuration sets the S3Configuration field's value.
func (s *RedshiftDestinationConfiguration) SetS3Configuration(v *S3DestinationConfiguration) *RedshiftDestinationConfiguration {
s.S3Configuration = v
return s
}
// SetUsername sets the Username field's value.
func (s *RedshiftDestinationConfiguration) SetUsername(v string) *RedshiftDestinationConfiguration {
s.Username = &v
return s
}
// Describes a destination in Amazon Redshift.
type RedshiftDestinationDescription struct {
_ struct{} `type:"structure"`
// The Amazon CloudWatch logging options for your delivery stream.
CloudWatchLoggingOptions *CloudWatchLoggingOptions `type:"structure"`
// The database connection string.
//
// ClusterJDBCURL is a required field
ClusterJDBCURL *string `min:"1" type:"string" required:"true"`
// The COPY command.
//
// CopyCommand is a required field
CopyCommand *CopyCommand `type:"structure" required:"true"`
// The data processing configuration.
ProcessingConfiguration *ProcessingConfiguration `type:"structure"`
// The retry behavior in case Kinesis Data Firehose is unable to deliver documents
// to Amazon Redshift. Default value is 3600 (60 minutes).
RetryOptions *RedshiftRetryOptions `type:"structure"`
// The Amazon Resource Name (ARN) of the AWS credentials. For more information,
// see Amazon Resource Names (ARNs) and AWS Service Namespaces (https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html).
//
// RoleARN is a required field
RoleARN *string `min:"1" type:"string" required:"true"`
// The configuration for backup in Amazon S3.
S3BackupDescription *S3DestinationDescription `type:"structure"`
// The Amazon S3 backup mode.
S3BackupMode *string `type:"string" enum:"RedshiftS3BackupMode"`
// The Amazon S3 destination.
//
// S3DestinationDescription is a required field
S3DestinationDescription *S3DestinationDescription `type:"structure" required:"true"`
// The name of the user.
//
// Username is a required field
Username *string `min:"1" type:"string" required:"true" sensitive:"true"`
}
// String returns the string representation
func (s RedshiftDestinationDescription) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation
func (s RedshiftDestinationDescription) GoString() string {
return s.String()
}
// SetCloudWatchLoggingOptions sets the CloudWatchLoggingOptions field's value.
func (s *RedshiftDestinationDescription) SetCloudWatchLoggingOptions(v *CloudWatchLoggingOptions) *RedshiftDestinationDescription {
s.CloudWatchLoggingOptions = v
return s
}
// SetClusterJDBCURL sets the ClusterJDBCURL field's value.
func (s *RedshiftDestinationDescription) SetClusterJDBCURL(v string) *RedshiftDestinationDescription {
s.ClusterJDBCURL = &v
return s
}
// SetCopyCommand sets the CopyCommand field's value.
func (s *RedshiftDestinationDescription) SetCopyCommand(v *CopyCommand) *RedshiftDestinationDescription {
s.CopyCommand = v
return s
}
// SetProcessingConfiguration sets the ProcessingConfiguration field's value.
func (s *RedshiftDestinationDescription) SetProcessingConfiguration(v *ProcessingConfiguration) *RedshiftDestinationDescription {
s.ProcessingConfiguration = v
return s
}
// SetRetryOptions sets the RetryOptions field's value.
func (s *RedshiftDestinationDescription) SetRetryOptions(v *RedshiftRetryOptions) *RedshiftDestinationDescription {
s.RetryOptions = v
return s
}
// SetRoleARN sets the RoleARN field's value.
func (s *RedshiftDestinationDescription) SetRoleARN(v string) *RedshiftDestinationDescription {
s.RoleARN = &v
return s
}
// SetS3BackupDescription sets the S3BackupDescription field's value.
func (s *RedshiftDestinationDescription) SetS3BackupDescription(v *S3DestinationDescription) *RedshiftDestinationDescription {
s.S3BackupDescription = v
return s
}
// SetS3BackupMode sets the S3BackupMode field's value.
func (s *RedshiftDestinationDescription) SetS3BackupMode(v string) *RedshiftDestinationDescription {
s.S3BackupMode = &v
return s
}
// SetS3DestinationDescription sets the S3DestinationDescription field's value.
func (s *RedshiftDestinationDescription) SetS3DestinationDescription(v *S3DestinationDescription) *RedshiftDestinationDescription {
s.S3DestinationDescription = v
return s
}
// SetUsername sets the Username field's value.
func (s *RedshiftDestinationDescription) SetUsername(v string) *RedshiftDestinationDescription {
s.Username = &v
return s
}
// Describes an update for a destination in Amazon Redshift.
type RedshiftDestinationUpdate struct {
_ struct{} `type:"structure"`
// The Amazon CloudWatch logging options for your delivery stream.
CloudWatchLoggingOptions *CloudWatchLoggingOptions `type:"structure"`
// The database connection string.
ClusterJDBCURL *string `min:"1" type:"string"`
// The COPY command.
CopyCommand *CopyCommand `type:"structure"`
// The user password.
Password *string `min:"6" type:"string" sensitive:"true"`
// The data processing configuration.
ProcessingConfiguration *ProcessingConfiguration `type:"structure"`
// The retry behavior in case Kinesis Data Firehose is unable to deliver documents
// to Amazon Redshift. Default value is 3600 (60 minutes).
RetryOptions *RedshiftRetryOptions `type:"structure"`
// The Amazon Resource Name (ARN) of the AWS credentials. For more information,
// see Amazon Resource Names (ARNs) and AWS Service Namespaces (https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html).
RoleARN *string `min:"1" type:"string"`
// The Amazon S3 backup mode.
S3BackupMode *string `type:"string" enum:"RedshiftS3BackupMode"`
// The Amazon S3 destination for backup.
S3BackupUpdate *S3DestinationUpdate `type:"structure"`
// The Amazon S3 destination.
//
// The compression formats SNAPPY or ZIP cannot be specified in RedshiftDestinationUpdate.S3Update
// because the Amazon Redshift COPY operation that reads from the S3 bucket
// doesn't support these compression formats.
S3Update *S3DestinationUpdate `type:"structure"`
// The name of the user.
Username *string `min:"1" type:"string" sensitive:"true"`
}
// String returns the string representation
func (s RedshiftDestinationUpdate) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation
func (s RedshiftDestinationUpdate) GoString() string {
return s.String()
}
// Validate inspects the fields of the type to determine if they are valid.
func (s *RedshiftDestinationUpdate) Validate() error {
invalidParams := request.ErrInvalidParams{Context: "RedshiftDestinationUpdate"}
if s.ClusterJDBCURL != nil && len(*s.ClusterJDBCURL) < 1 {
invalidParams.Add(request.NewErrParamMinLen("ClusterJDBCURL", 1))
}
if s.Password != nil && len(*s.Password) < 6 {
invalidParams.Add(request.NewErrParamMinLen("Password", 6))
}
if s.RoleARN != nil && len(*s.RoleARN) < 1 {
invalidParams.Add(request.NewErrParamMinLen("RoleARN", 1))
}
if s.Username != nil && len(*s.Username) < 1 {
invalidParams.Add(request.NewErrParamMinLen("Username", 1))
}
if s.CopyCommand != nil {
if err := s.CopyCommand.Validate(); err != nil {
invalidParams.AddNested("CopyCommand", err.(request.ErrInvalidParams))
}
}
if s.ProcessingConfiguration != nil {
if err := s.ProcessingConfiguration.Validate(); err != nil {
invalidParams.AddNested("ProcessingConfiguration", err.(request.ErrInvalidParams))
}
}
if s.S3BackupUpdate != nil {
if err := s.S3BackupUpdate.Validate(); err != nil {
invalidParams.AddNested("S3BackupUpdate", err.(request.ErrInvalidParams))
}
}
if s.S3Update != nil {
if err := s.S3Update.Validate(); err != nil {
invalidParams.AddNested("S3Update", err.(request.ErrInvalidParams))
}
}
if invalidParams.Len() > 0 {
return invalidParams
}
return nil
}
// SetCloudWatchLoggingOptions sets the CloudWatchLoggingOptions field's value.
func (s *RedshiftDestinationUpdate) SetCloudWatchLoggingOptions(v *CloudWatchLoggingOptions) *RedshiftDestinationUpdate {
s.CloudWatchLoggingOptions = v
return s
}
// SetClusterJDBCURL sets the ClusterJDBCURL field's value.
func (s *RedshiftDestinationUpdate) SetClusterJDBCURL(v string) *RedshiftDestinationUpdate {
s.ClusterJDBCURL = &v
return s
}
// SetCopyCommand sets the CopyCommand field's value.
func (s *RedshiftDestinationUpdate) SetCopyCommand(v *CopyCommand) *RedshiftDestinationUpdate {
s.CopyCommand = v
return s
}
// SetPassword sets the Password field's value.
func (s *RedshiftDestinationUpdate) SetPassword(v string) *RedshiftDestinationUpdate {
s.Password = &v
return s
}
// SetProcessingConfiguration sets the ProcessingConfiguration field's value.
func (s *RedshiftDestinationUpdate) SetProcessingConfiguration(v *ProcessingConfiguration) *RedshiftDestinationUpdate {
s.ProcessingConfiguration = v
return s
}
// SetRetryOptions sets the RetryOptions field's value.
func (s *RedshiftDestinationUpdate) SetRetryOptions(v *RedshiftRetryOptions) *RedshiftDestinationUpdate {
s.RetryOptions = v
return s
}
// SetRoleARN sets the RoleARN field's value.
func (s *RedshiftDestinationUpdate) SetRoleARN(v string) *RedshiftDestinationUpdate {
s.RoleARN = &v
return s
}
// SetS3BackupMode sets the S3BackupMode field's value.
func (s *RedshiftDestinationUpdate) SetS3BackupMode(v string) *RedshiftDestinationUpdate {
s.S3BackupMode = &v
return s
}
// SetS3BackupUpdate sets the S3BackupUpdate field's value.
func (s *RedshiftDestinationUpdate) SetS3BackupUpdate(v *S3DestinationUpdate) *RedshiftDestinationUpdate {
s.S3BackupUpdate = v
return s
}
// SetS3Update sets the S3Update field's value.
func (s *RedshiftDestinationUpdate) SetS3Update(v *S3DestinationUpdate) *RedshiftDestinationUpdate {
s.S3Update = v
return s
}
// SetUsername sets the Username field's value.
func (s *RedshiftDestinationUpdate) SetUsername(v string) *RedshiftDestinationUpdate {
s.Username = &v
return s
}
// Configures retry behavior in case Kinesis Data Firehose is unable to deliver
// documents to Amazon Redshift.
type RedshiftRetryOptions struct {
_ struct{} `type:"structure"`
// The length of time during which Kinesis Data Firehose retries delivery after
// a failure, starting from the initial request and including the first attempt.
// The default value is 3600 seconds (60 minutes). Kinesis Data Firehose does
// not retry if the value of DurationInSeconds is 0 (zero) or if the first delivery
// attempt takes longer than the current value.
DurationInSeconds *int64 `type:"integer"`
}
// String returns the string representation
func (s RedshiftRetryOptions) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation
func (s RedshiftRetryOptions) GoString() string {
return s.String()
}
// SetDurationInSeconds sets the DurationInSeconds field's value.
func (s *RedshiftRetryOptions) SetDurationInSeconds(v int64) *RedshiftRetryOptions {
s.DurationInSeconds = &v
return s
}
// Describes the configuration of a destination in Amazon S3.
type S3DestinationConfiguration struct {
_ struct{} `type:"structure"`
// The ARN of the S3 bucket. For more information, see Amazon Resource Names
// (ARNs) and AWS Service Namespaces (https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html).
//
// BucketARN is a required field
BucketARN *string `min:"1" type:"string" required:"true"`
// The buffering option. If no value is specified, BufferingHints object default
// values are used.
BufferingHints *BufferingHints `type:"structure"`
// The CloudWatch logging options for your delivery stream.
CloudWatchLoggingOptions *CloudWatchLoggingOptions `type:"structure"`
// The compression format. If no value is specified, the default is UNCOMPRESSED.
//
// The compression formats SNAPPY or ZIP cannot be specified for Amazon Redshift
// destinations because they are not supported by the Amazon Redshift COPY operation
// that reads from the S3 bucket.
CompressionFormat *string `type:"string" enum:"CompressionFormat"`
// The encryption configuration. If no value is specified, the default is no
// encryption.
EncryptionConfiguration *EncryptionConfiguration `type:"structure"`
// A prefix that Kinesis Data Firehose evaluates and adds to failed records
// before writing them to S3. This prefix appears immediately following the
// bucket name.
ErrorOutputPrefix *string `type:"string"`
// The "YYYY/MM/DD/HH" time format prefix is automatically used for delivered
// Amazon S3 files. You can specify an extra prefix to be added in front of
// the time format prefix. If the prefix ends with a slash, it appears as a
// folder in the S3 bucket. For more information, see Amazon S3 Object Name
// Format (http://docs.aws.amazon.com/firehose/latest/dev/basic-deliver.html#s3-object-name)
// in the Amazon Kinesis Data Firehose Developer Guide.
Prefix *string `type:"string"`
// The Amazon Resource Name (ARN) of the AWS credentials. For more information,
// see Amazon Resource Names (ARNs) and AWS Service Namespaces (https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html).
//
// RoleARN is a required field
RoleARN *string `min:"1" type:"string" required:"true"`
}
// String returns the string representation
func (s S3DestinationConfiguration) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation
func (s S3DestinationConfiguration) GoString() string {
return s.String()
}
// Validate inspects the fields of the type to determine if they are valid.
func (s *S3DestinationConfiguration) Validate() error {
invalidParams := request.ErrInvalidParams{Context: "S3DestinationConfiguration"}
if s.BucketARN == nil {
invalidParams.Add(request.NewErrParamRequired("BucketARN"))
}
if s.BucketARN != nil && len(*s.BucketARN) < 1 {
invalidParams.Add(request.NewErrParamMinLen("BucketARN", 1))
}
if s.RoleARN == nil {
invalidParams.Add(request.NewErrParamRequired("RoleARN"))
}
if s.RoleARN != nil && len(*s.RoleARN) < 1 {
invalidParams.Add(request.NewErrParamMinLen("RoleARN", 1))
}
if s.BufferingHints != nil {
if err := s.BufferingHints.Validate(); err != nil {
invalidParams.AddNested("BufferingHints", err.(request.ErrInvalidParams))
}
}
if s.EncryptionConfiguration != nil {
if err := s.EncryptionConfiguration.Validate(); err != nil {
invalidParams.AddNested("EncryptionConfiguration", err.(request.ErrInvalidParams))
}
}
if invalidParams.Len() > 0 {
return invalidParams
}
return nil
}
// SetBucketARN sets the BucketARN field's value.
func (s *S3DestinationConfiguration) SetBucketARN(v string) *S3DestinationConfiguration {
s.BucketARN = &v
return s
}
// SetBufferingHints sets the BufferingHints field's value.
func (s *S3DestinationConfiguration) SetBufferingHints(v *BufferingHints) *S3DestinationConfiguration {
s.BufferingHints = v
return s
}
// SetCloudWatchLoggingOptions sets the CloudWatchLoggingOptions field's value.
func (s *S3DestinationConfiguration) SetCloudWatchLoggingOptions(v *CloudWatchLoggingOptions) *S3DestinationConfiguration {
s.CloudWatchLoggingOptions = v
return s
}
// SetCompressionFormat sets the CompressionFormat field's value.
func (s *S3DestinationConfiguration) SetCompressionFormat(v string) *S3DestinationConfiguration {
s.CompressionFormat = &v
return s
}
// SetEncryptionConfiguration sets the EncryptionConfiguration field's value.
func (s *S3DestinationConfiguration) SetEncryptionConfiguration(v *EncryptionConfiguration) *S3DestinationConfiguration {
s.EncryptionConfiguration = v
return s
}
// SetErrorOutputPrefix sets the ErrorOutputPrefix field's value.
func (s *S3DestinationConfiguration) SetErrorOutputPrefix(v string) *S3DestinationConfiguration {
s.ErrorOutputPrefix = &v
return s
}
// SetPrefix sets the Prefix field's value.
func (s *S3DestinationConfiguration) SetPrefix(v string) *S3DestinationConfiguration {
s.Prefix = &v
return s
}
// SetRoleARN sets the RoleARN field's value.
func (s *S3DestinationConfiguration) SetRoleARN(v string) *S3DestinationConfiguration {
s.RoleARN = &v
return s
}
// Describes a destination in Amazon S3.
type S3DestinationDescription struct {
_ struct{} `type:"structure"`
// The ARN of the S3 bucket. For more information, see Amazon Resource Names
// (ARNs) and AWS Service Namespaces (https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html).
//
// BucketARN is a required field
BucketARN *string `min:"1" type:"string" required:"true"`
// The buffering option. If no value is specified, BufferingHints object default
// values are used.
//
// BufferingHints is a required field
BufferingHints *BufferingHints `type:"structure" required:"true"`
// The Amazon CloudWatch logging options for your delivery stream.
CloudWatchLoggingOptions *CloudWatchLoggingOptions `type:"structure"`
// The compression format. If no value is specified, the default is UNCOMPRESSED.
//
// CompressionFormat is a required field
CompressionFormat *string `type:"string" required:"true" enum:"CompressionFormat"`
// The encryption configuration. If no value is specified, the default is no
// encryption.
//
// EncryptionConfiguration is a required field
EncryptionConfiguration *EncryptionConfiguration `type:"structure" required:"true"`
// A prefix that Kinesis Data Firehose evaluates and adds to failed records
// before writing them to S3. This prefix appears immediately following the
// bucket name.
ErrorOutputPrefix *string `type:"string"`
// The "YYYY/MM/DD/HH" time format prefix is automatically used for delivered
// Amazon S3 files. You can specify an extra prefix to be added in front of
// the time format prefix. If the prefix ends with a slash, it appears as a
// folder in the S3 bucket. For more information, see Amazon S3 Object Name
// Format (http://docs.aws.amazon.com/firehose/latest/dev/basic-deliver.html#s3-object-name)
// in the Amazon Kinesis Data Firehose Developer Guide.
Prefix *string `type:"string"`
// The Amazon Resource Name (ARN) of the AWS credentials. For more information,
// see Amazon Resource Names (ARNs) and AWS Service Namespaces (https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html).
//
// RoleARN is a required field
RoleARN *string `min:"1" type:"string" required:"true"`
}
// String returns the string representation
func (s S3DestinationDescription) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation
func (s S3DestinationDescription) GoString() string {
return s.String()
}
// SetBucketARN sets the BucketARN field's value.
func (s *S3DestinationDescription) SetBucketARN(v string) *S3DestinationDescription {
s.BucketARN = &v
return s
}
// SetBufferingHints sets the BufferingHints field's value.
func (s *S3DestinationDescription) SetBufferingHints(v *BufferingHints) *S3DestinationDescription {
s.BufferingHints = v
return s
}
// SetCloudWatchLoggingOptions sets the CloudWatchLoggingOptions field's value.
func (s *S3DestinationDescription) SetCloudWatchLoggingOptions(v *CloudWatchLoggingOptions) *S3DestinationDescription {
s.CloudWatchLoggingOptions = v
return s
}
// SetCompressionFormat sets the CompressionFormat field's value.
func (s *S3DestinationDescription) SetCompressionFormat(v string) *S3DestinationDescription {
s.CompressionFormat = &v
return s
}
// SetEncryptionConfiguration sets the EncryptionConfiguration field's value.
func (s *S3DestinationDescription) SetEncryptionConfiguration(v *EncryptionConfiguration) *S3DestinationDescription {
s.EncryptionConfiguration = v
return s
}
// SetErrorOutputPrefix sets the ErrorOutputPrefix field's value.
func (s *S3DestinationDescription) SetErrorOutputPrefix(v string) *S3DestinationDescription {
s.ErrorOutputPrefix = &v
return s
}
// SetPrefix sets the Prefix field's value.
func (s *S3DestinationDescription) SetPrefix(v string) *S3DestinationDescription {
s.Prefix = &v
return s
}
// SetRoleARN sets the RoleARN field's value.
func (s *S3DestinationDescription) SetRoleARN(v string) *S3DestinationDescription {
s.RoleARN = &v
return s
}
// Describes an update for a destination in Amazon S3.
type S3DestinationUpdate struct {
_ struct{} `type:"structure"`
// The ARN of the S3 bucket. For more information, see Amazon Resource Names
// (ARNs) and AWS Service Namespaces (https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html).
BucketARN *string `min:"1" type:"string"`
// The buffering option. If no value is specified, BufferingHints object default
// values are used.
BufferingHints *BufferingHints `type:"structure"`
// The CloudWatch logging options for your delivery stream.
CloudWatchLoggingOptions *CloudWatchLoggingOptions `type:"structure"`
// The compression format. If no value is specified, the default is UNCOMPRESSED.
//
// The compression formats SNAPPY or ZIP cannot be specified for Amazon Redshift
// destinations because they are not supported by the Amazon Redshift COPY operation
// that reads from the S3 bucket.
CompressionFormat *string `type:"string" enum:"CompressionFormat"`
// The encryption configuration. If no value is specified, the default is no
// encryption.
EncryptionConfiguration *EncryptionConfiguration `type:"structure"`
// A prefix that Kinesis Data Firehose evaluates and adds to failed records
// before writing them to S3. This prefix appears immediately following the
// bucket name.
ErrorOutputPrefix *string `type:"string"`
// The "YYYY/MM/DD/HH" time format prefix is automatically used for delivered
// Amazon S3 files. You can specify an extra prefix to be added in front of
// the time format prefix. If the prefix ends with a slash, it appears as a
// folder in the S3 bucket. For more information, see Amazon S3 Object Name
// Format (http://docs.aws.amazon.com/firehose/latest/dev/basic-deliver.html#s3-object-name)
// in the Amazon Kinesis Data Firehose Developer Guide.
Prefix *string `type:"string"`
// The Amazon Resource Name (ARN) of the AWS credentials. For more information,
// see Amazon Resource Names (ARNs) and AWS Service Namespaces (https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html).
RoleARN *string `min:"1" type:"string"`
}
// String returns the string representation
func (s S3DestinationUpdate) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation
func (s S3DestinationUpdate) GoString() string {
return s.String()
}
// Validate inspects the fields of the type to determine if they are valid.
func (s *S3DestinationUpdate) Validate() error {
invalidParams := request.ErrInvalidParams{Context: "S3DestinationUpdate"}
if s.BucketARN != nil && len(*s.BucketARN) < 1 {
invalidParams.Add(request.NewErrParamMinLen("BucketARN", 1))
}
if s.RoleARN != nil && len(*s.RoleARN) < 1 {
invalidParams.Add(request.NewErrParamMinLen("RoleARN", 1))
}
if s.BufferingHints != nil {
if err := s.BufferingHints.Validate(); err != nil {
invalidParams.AddNested("BufferingHints", err.(request.ErrInvalidParams))
}
}
if s.EncryptionConfiguration != nil {
if err := s.EncryptionConfiguration.Validate(); err != nil {
invalidParams.AddNested("EncryptionConfiguration", err.(request.ErrInvalidParams))
}
}
if invalidParams.Len() > 0 {
return invalidParams
}
return nil
}
// SetBucketARN sets the BucketARN field's value.
func (s *S3DestinationUpdate) SetBucketARN(v string) *S3DestinationUpdate {
s.BucketARN = &v
return s
}
// SetBufferingHints sets the BufferingHints field's value.
func (s *S3DestinationUpdate) SetBufferingHints(v *BufferingHints) *S3DestinationUpdate {
s.BufferingHints = v
return s
}
// SetCloudWatchLoggingOptions sets the CloudWatchLoggingOptions field's value.
func (s *S3DestinationUpdate) SetCloudWatchLoggingOptions(v *CloudWatchLoggingOptions) *S3DestinationUpdate {
s.CloudWatchLoggingOptions = v
return s
}
// SetCompressionFormat sets the CompressionFormat field's value.
func (s *S3DestinationUpdate) SetCompressionFormat(v string) *S3DestinationUpdate {
s.CompressionFormat = &v
return s
}
// SetEncryptionConfiguration sets the EncryptionConfiguration field's value.
func (s *S3DestinationUpdate) SetEncryptionConfiguration(v *EncryptionConfiguration) *S3DestinationUpdate {
s.EncryptionConfiguration = v
return s
}
// SetErrorOutputPrefix sets the ErrorOutputPrefix field's value.
func (s *S3DestinationUpdate) SetErrorOutputPrefix(v string) *S3DestinationUpdate {
s.ErrorOutputPrefix = &v
return s
}
// SetPrefix sets the Prefix field's value.
func (s *S3DestinationUpdate) SetPrefix(v string) *S3DestinationUpdate {
s.Prefix = &v
return s
}
// SetRoleARN sets the RoleARN field's value.
func (s *S3DestinationUpdate) SetRoleARN(v string) *S3DestinationUpdate {
s.RoleARN = &v
return s
}
// Specifies the schema to which you want Kinesis Data Firehose to configure
// your data before it writes it to Amazon S3.
type SchemaConfiguration struct {
_ struct{} `type:"structure"`
// The ID of the AWS Glue Data Catalog. If you don't supply this, the AWS account
// ID is used by default.
CatalogId *string `type:"string"`
// Specifies the name of the AWS Glue database that contains the schema for
// the output data.
DatabaseName *string `type:"string"`
// If you don't specify an AWS Region, the default is the current Region.
Region *string `type:"string"`
// The role that Kinesis Data Firehose can use to access AWS Glue. This role
// must be in the same account you use for Kinesis Data Firehose. Cross-account
// roles aren't allowed.
RoleARN *string `type:"string"`
// Specifies the AWS Glue table that contains the column information that constitutes
// your data schema.
TableName *string `type:"string"`
// Specifies the table version for the output data schema. If you don't specify
// this version ID, or if you set it to LATEST, Kinesis Data Firehose uses the
// most recent version. This means that any updates to the table are automatically
// picked up.
VersionId *string `type:"string"`
}
// String returns the string representation
func (s SchemaConfiguration) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation
func (s SchemaConfiguration) GoString() string {
return s.String()
}
// SetCatalogId sets the CatalogId field's value.
func (s *SchemaConfiguration) SetCatalogId(v string) *SchemaConfiguration {
s.CatalogId = &v
return s
}
// SetDatabaseName sets the DatabaseName field's value.
func (s *SchemaConfiguration) SetDatabaseName(v string) *SchemaConfiguration {
s.DatabaseName = &v
return s
}
// SetRegion sets the Region field's value.
func (s *SchemaConfiguration) SetRegion(v string) *SchemaConfiguration {
s.Region = &v
return s
}
// SetRoleARN sets the RoleARN field's value.
func (s *SchemaConfiguration) SetRoleARN(v string) *SchemaConfiguration {
s.RoleARN = &v
return s
}
// SetTableName sets the TableName field's value.
func (s *SchemaConfiguration) SetTableName(v string) *SchemaConfiguration {
s.TableName = &v
return s
}
// SetVersionId sets the VersionId field's value.
func (s *SchemaConfiguration) SetVersionId(v string) *SchemaConfiguration {
s.VersionId = &v
return s
}
// The serializer that you want Kinesis Data Firehose to use to convert data
// to the target format before writing it to Amazon S3. Kinesis Data Firehose
// supports two types of serializers: the ORC SerDe (https://hive.apache.org/javadocs/r1.2.2/api/org/apache/hadoop/hive/ql/io/orc/OrcSerde.html)
// and the Parquet SerDe (https://hive.apache.org/javadocs/r1.2.2/api/org/apache/hadoop/hive/ql/io/parquet/serde/ParquetHiveSerDe.html).
type Serializer struct {
_ struct{} `type:"structure"`
// A serializer to use for converting data to the ORC format before storing
// it in Amazon S3. For more information, see Apache ORC (https://orc.apache.org/docs/).
OrcSerDe *OrcSerDe `type:"structure"`
// A serializer to use for converting data to the Parquet format before storing
// it in Amazon S3. For more information, see Apache Parquet (https://parquet.apache.org/documentation/latest/).
ParquetSerDe *ParquetSerDe `type:"structure"`
}
// String returns the string representation
func (s Serializer) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation
func (s Serializer) GoString() string {
return s.String()
}
// Validate inspects the fields of the type to determine if they are valid.
func (s *Serializer) Validate() error {
invalidParams := request.ErrInvalidParams{Context: "Serializer"}
if s.OrcSerDe != nil {
if err := s.OrcSerDe.Validate(); err != nil {
invalidParams.AddNested("OrcSerDe", err.(request.ErrInvalidParams))
}
}
if s.ParquetSerDe != nil {
if err := s.ParquetSerDe.Validate(); err != nil {
invalidParams.AddNested("ParquetSerDe", err.(request.ErrInvalidParams))
}
}
if invalidParams.Len() > 0 {
return invalidParams
}
return nil
}
// SetOrcSerDe sets the OrcSerDe field's value.
func (s *Serializer) SetOrcSerDe(v *OrcSerDe) *Serializer {
s.OrcSerDe = v
return s
}
// SetParquetSerDe sets the ParquetSerDe field's value.
func (s *Serializer) SetParquetSerDe(v *ParquetSerDe) *Serializer {
s.ParquetSerDe = v
return s
}
// Details about a Kinesis data stream used as the source for a Kinesis Data
// Firehose delivery stream.
type SourceDescription struct {
_ struct{} `type:"structure"`
// The KinesisStreamSourceDescription value for the source Kinesis data stream.
KinesisStreamSourceDescription *KinesisStreamSourceDescription `type:"structure"`
}
// String returns the string representation
func (s SourceDescription) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation
func (s SourceDescription) GoString() string {
return s.String()
}
// SetKinesisStreamSourceDescription sets the KinesisStreamSourceDescription field's value.
func (s *SourceDescription) SetKinesisStreamSourceDescription(v *KinesisStreamSourceDescription) *SourceDescription {
s.KinesisStreamSourceDescription = v
return s
}
// Describes the configuration of a destination in Splunk.
type SplunkDestinationConfiguration struct {
_ struct{} `type:"structure"`
// The Amazon CloudWatch logging options for your delivery stream.
CloudWatchLoggingOptions *CloudWatchLoggingOptions `type:"structure"`
// The amount of time that Kinesis Data Firehose waits to receive an acknowledgment
// from Splunk after it sends it data. At the end of the timeout period, Kinesis
// Data Firehose either tries to send the data again or considers it an error,
// based on your retry settings.
HECAcknowledgmentTimeoutInSeconds *int64 `min:"180" type:"integer"`
// The HTTP Event Collector (HEC) endpoint to which Kinesis Data Firehose sends
// your data.
//
// HECEndpoint is a required field
HECEndpoint *string `type:"string" required:"true"`
// This type can be either "Raw" or "Event."
//
// HECEndpointType is a required field
HECEndpointType *string `type:"string" required:"true" enum:"HECEndpointType"`
// This is a GUID that you obtain from your Splunk cluster when you create a
// new HEC endpoint.
//
// HECToken is a required field
HECToken *string `type:"string" required:"true"`
// The data processing configuration.
ProcessingConfiguration *ProcessingConfiguration `type:"structure"`
// The retry behavior in case Kinesis Data Firehose is unable to deliver data
// to Splunk, or if it doesn't receive an acknowledgment of receipt from Splunk.
RetryOptions *SplunkRetryOptions `type:"structure"`
// Defines how documents should be delivered to Amazon S3. When set to FailedDocumentsOnly,
// Kinesis Data Firehose writes any data that could not be indexed to the configured
// Amazon S3 destination. When set to AllDocuments, Kinesis Data Firehose delivers
// all incoming records to Amazon S3, and also writes failed documents to Amazon
// S3. Default value is FailedDocumentsOnly.
S3BackupMode *string `type:"string" enum:"SplunkS3BackupMode"`
// The configuration for the backup Amazon S3 location.
//
// S3Configuration is a required field
S3Configuration *S3DestinationConfiguration `type:"structure" required:"true"`
}
// String returns the string representation
func (s SplunkDestinationConfiguration) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation
func (s SplunkDestinationConfiguration) GoString() string {
return s.String()
}
// Validate inspects the fields of the type to determine if they are valid.
func (s *SplunkDestinationConfiguration) Validate() error {
invalidParams := request.ErrInvalidParams{Context: "SplunkDestinationConfiguration"}
if s.HECAcknowledgmentTimeoutInSeconds != nil && *s.HECAcknowledgmentTimeoutInSeconds < 180 {
invalidParams.Add(request.NewErrParamMinValue("HECAcknowledgmentTimeoutInSeconds", 180))
}
if s.HECEndpoint == nil {
invalidParams.Add(request.NewErrParamRequired("HECEndpoint"))
}
if s.HECEndpointType == nil {
invalidParams.Add(request.NewErrParamRequired("HECEndpointType"))
}
if s.HECToken == nil {
invalidParams.Add(request.NewErrParamRequired("HECToken"))
}
if s.S3Configuration == nil {
invalidParams.Add(request.NewErrParamRequired("S3Configuration"))
}
if s.ProcessingConfiguration != nil {
if err := s.ProcessingConfiguration.Validate(); err != nil {
invalidParams.AddNested("ProcessingConfiguration", err.(request.ErrInvalidParams))
}
}
if s.S3Configuration != nil {
if err := s.S3Configuration.Validate(); err != nil {
invalidParams.AddNested("S3Configuration", err.(request.ErrInvalidParams))
}
}
if invalidParams.Len() > 0 {
return invalidParams
}
return nil
}
// SetCloudWatchLoggingOptions sets the CloudWatchLoggingOptions field's value.
func (s *SplunkDestinationConfiguration) SetCloudWatchLoggingOptions(v *CloudWatchLoggingOptions) *SplunkDestinationConfiguration {
s.CloudWatchLoggingOptions = v
return s
}
// SetHECAcknowledgmentTimeoutInSeconds sets the HECAcknowledgmentTimeoutInSeconds field's value.
func (s *SplunkDestinationConfiguration) SetHECAcknowledgmentTimeoutInSeconds(v int64) *SplunkDestinationConfiguration {
s.HECAcknowledgmentTimeoutInSeconds = &v
return s
}
// SetHECEndpoint sets the HECEndpoint field's value.
func (s *SplunkDestinationConfiguration) SetHECEndpoint(v string) *SplunkDestinationConfiguration {
s.HECEndpoint = &v
return s
}
// SetHECEndpointType sets the HECEndpointType field's value.
func (s *SplunkDestinationConfiguration) SetHECEndpointType(v string) *SplunkDestinationConfiguration {
s.HECEndpointType = &v
return s
}
// SetHECToken sets the HECToken field's value.
func (s *SplunkDestinationConfiguration) SetHECToken(v string) *SplunkDestinationConfiguration {
s.HECToken = &v
return s
}
// SetProcessingConfiguration sets the ProcessingConfiguration field's value.
func (s *SplunkDestinationConfiguration) SetProcessingConfiguration(v *ProcessingConfiguration) *SplunkDestinationConfiguration {
s.ProcessingConfiguration = v
return s
}
// SetRetryOptions sets the RetryOptions field's value.
func (s *SplunkDestinationConfiguration) SetRetryOptions(v *SplunkRetryOptions) *SplunkDestinationConfiguration {
s.RetryOptions = v
return s
}
// SetS3BackupMode sets the S3BackupMode field's value.
func (s *SplunkDestinationConfiguration) SetS3BackupMode(v string) *SplunkDestinationConfiguration {
s.S3BackupMode = &v
return s
}
// SetS3Configuration sets the S3Configuration field's value.
func (s *SplunkDestinationConfiguration) SetS3Configuration(v *S3DestinationConfiguration) *SplunkDestinationConfiguration {
s.S3Configuration = v
return s
}
// Describes a destination in Splunk.
type SplunkDestinationDescription struct {
_ struct{} `type:"structure"`
// The Amazon CloudWatch logging options for your delivery stream.
CloudWatchLoggingOptions *CloudWatchLoggingOptions `type:"structure"`
// The amount of time that Kinesis Data Firehose waits to receive an acknowledgment
// from Splunk after it sends it data. At the end of the timeout period, Kinesis
// Data Firehose either tries to send the data again or considers it an error,
// based on your retry settings.
HECAcknowledgmentTimeoutInSeconds *int64 `min:"180" type:"integer"`
// The HTTP Event Collector (HEC) endpoint to which Kinesis Data Firehose sends
// your data.
HECEndpoint *string `type:"string"`
// This type can be either "Raw" or "Event."
HECEndpointType *string `type:"string" enum:"HECEndpointType"`
// A GUID you obtain from your Splunk cluster when you create a new HEC endpoint.
HECToken *string `type:"string"`
// The data processing configuration.
ProcessingConfiguration *ProcessingConfiguration `type:"structure"`
// The retry behavior in case Kinesis Data Firehose is unable to deliver data
// to Splunk or if it doesn't receive an acknowledgment of receipt from Splunk.
RetryOptions *SplunkRetryOptions `type:"structure"`
// Defines how documents should be delivered to Amazon S3. When set to FailedDocumentsOnly,
// Kinesis Data Firehose writes any data that could not be indexed to the configured
// Amazon S3 destination. When set to AllDocuments, Kinesis Data Firehose delivers
// all incoming records to Amazon S3, and also writes failed documents to Amazon
// S3. Default value is FailedDocumentsOnly.
S3BackupMode *string `type:"string" enum:"SplunkS3BackupMode"`
// The Amazon S3 destination.>
S3DestinationDescription *S3DestinationDescription `type:"structure"`
}
// String returns the string representation
func (s SplunkDestinationDescription) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation
func (s SplunkDestinationDescription) GoString() string {
return s.String()
}
// SetCloudWatchLoggingOptions sets the CloudWatchLoggingOptions field's value.
func (s *SplunkDestinationDescription) SetCloudWatchLoggingOptions(v *CloudWatchLoggingOptions) *SplunkDestinationDescription {
s.CloudWatchLoggingOptions = v
return s
}
// SetHECAcknowledgmentTimeoutInSeconds sets the HECAcknowledgmentTimeoutInSeconds field's value.
func (s *SplunkDestinationDescription) SetHECAcknowledgmentTimeoutInSeconds(v int64) *SplunkDestinationDescription {
s.HECAcknowledgmentTimeoutInSeconds = &v
return s
}
// SetHECEndpoint sets the HECEndpoint field's value.
func (s *SplunkDestinationDescription) SetHECEndpoint(v string) *SplunkDestinationDescription {
s.HECEndpoint = &v
return s
}
// SetHECEndpointType sets the HECEndpointType field's value.
func (s *SplunkDestinationDescription) SetHECEndpointType(v string) *SplunkDestinationDescription {
s.HECEndpointType = &v
return s
}
// SetHECToken sets the HECToken field's value.
func (s *SplunkDestinationDescription) SetHECToken(v string) *SplunkDestinationDescription {
s.HECToken = &v
return s
}
// SetProcessingConfiguration sets the ProcessingConfiguration field's value.
func (s *SplunkDestinationDescription) SetProcessingConfiguration(v *ProcessingConfiguration) *SplunkDestinationDescription {
s.ProcessingConfiguration = v
return s
}
// SetRetryOptions sets the RetryOptions field's value.
func (s *SplunkDestinationDescription) SetRetryOptions(v *SplunkRetryOptions) *SplunkDestinationDescription {
s.RetryOptions = v
return s
}
// SetS3BackupMode sets the S3BackupMode field's value.
func (s *SplunkDestinationDescription) SetS3BackupMode(v string) *SplunkDestinationDescription {
s.S3BackupMode = &v
return s
}
// SetS3DestinationDescription sets the S3DestinationDescription field's value.
func (s *SplunkDestinationDescription) SetS3DestinationDescription(v *S3DestinationDescription) *SplunkDestinationDescription {
s.S3DestinationDescription = v
return s
}
// Describes an update for a destination in Splunk.
type SplunkDestinationUpdate struct {
_ struct{} `type:"structure"`
// The Amazon CloudWatch logging options for your delivery stream.
CloudWatchLoggingOptions *CloudWatchLoggingOptions `type:"structure"`
// The amount of time that Kinesis Data Firehose waits to receive an acknowledgment
// from Splunk after it sends data. At the end of the timeout period, Kinesis
// Data Firehose either tries to send the data again or considers it an error,
// based on your retry settings.
HECAcknowledgmentTimeoutInSeconds *int64 `min:"180" type:"integer"`
// The HTTP Event Collector (HEC) endpoint to which Kinesis Data Firehose sends
// your data.
HECEndpoint *string `type:"string"`
// This type can be either "Raw" or "Event."
HECEndpointType *string `type:"string" enum:"HECEndpointType"`
// A GUID that you obtain from your Splunk cluster when you create a new HEC
// endpoint.
HECToken *string `type:"string"`
// The data processing configuration.
ProcessingConfiguration *ProcessingConfiguration `type:"structure"`
// The retry behavior in case Kinesis Data Firehose is unable to deliver data
// to Splunk or if it doesn't receive an acknowledgment of receipt from Splunk.
RetryOptions *SplunkRetryOptions `type:"structure"`
// Defines how documents should be delivered to Amazon S3. When set to FailedDocumentsOnly,
// Kinesis Data Firehose writes any data that could not be indexed to the configured
// Amazon S3 destination. When set to AllDocuments, Kinesis Data Firehose delivers
// all incoming records to Amazon S3, and also writes failed documents to Amazon
// S3. Default value is FailedDocumentsOnly.
S3BackupMode *string `type:"string" enum:"SplunkS3BackupMode"`
// Your update to the configuration of the backup Amazon S3 location.
S3Update *S3DestinationUpdate `type:"structure"`
}
// String returns the string representation
func (s SplunkDestinationUpdate) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation
func (s SplunkDestinationUpdate) GoString() string {
return s.String()
}
// Validate inspects the fields of the type to determine if they are valid.
func (s *SplunkDestinationUpdate) Validate() error {
invalidParams := request.ErrInvalidParams{Context: "SplunkDestinationUpdate"}
if s.HECAcknowledgmentTimeoutInSeconds != nil && *s.HECAcknowledgmentTimeoutInSeconds < 180 {
invalidParams.Add(request.NewErrParamMinValue("HECAcknowledgmentTimeoutInSeconds", 180))
}
if s.ProcessingConfiguration != nil {
if err := s.ProcessingConfiguration.Validate(); err != nil {
invalidParams.AddNested("ProcessingConfiguration", err.(request.ErrInvalidParams))
}
}
if s.S3Update != nil {
if err := s.S3Update.Validate(); err != nil {
invalidParams.AddNested("S3Update", err.(request.ErrInvalidParams))
}
}
if invalidParams.Len() > 0 {
return invalidParams
}
return nil
}
// SetCloudWatchLoggingOptions sets the CloudWatchLoggingOptions field's value.
func (s *SplunkDestinationUpdate) SetCloudWatchLoggingOptions(v *CloudWatchLoggingOptions) *SplunkDestinationUpdate {
s.CloudWatchLoggingOptions = v
return s
}
// SetHECAcknowledgmentTimeoutInSeconds sets the HECAcknowledgmentTimeoutInSeconds field's value.
func (s *SplunkDestinationUpdate) SetHECAcknowledgmentTimeoutInSeconds(v int64) *SplunkDestinationUpdate {
s.HECAcknowledgmentTimeoutInSeconds = &v
return s
}
// SetHECEndpoint sets the HECEndpoint field's value.
func (s *SplunkDestinationUpdate) SetHECEndpoint(v string) *SplunkDestinationUpdate {
s.HECEndpoint = &v
return s
}
// SetHECEndpointType sets the HECEndpointType field's value.
func (s *SplunkDestinationUpdate) SetHECEndpointType(v string) *SplunkDestinationUpdate {
s.HECEndpointType = &v
return s
}
// SetHECToken sets the HECToken field's value.
func (s *SplunkDestinationUpdate) SetHECToken(v string) *SplunkDestinationUpdate {
s.HECToken = &v
return s
}
// SetProcessingConfiguration sets the ProcessingConfiguration field's value.
func (s *SplunkDestinationUpdate) SetProcessingConfiguration(v *ProcessingConfiguration) *SplunkDestinationUpdate {
s.ProcessingConfiguration = v
return s
}
// SetRetryOptions sets the RetryOptions field's value.
func (s *SplunkDestinationUpdate) SetRetryOptions(v *SplunkRetryOptions) *SplunkDestinationUpdate {
s.RetryOptions = v
return s
}
// SetS3BackupMode sets the S3BackupMode field's value.
func (s *SplunkDestinationUpdate) SetS3BackupMode(v string) *SplunkDestinationUpdate {
s.S3BackupMode = &v
return s
}
// SetS3Update sets the S3Update field's value.
func (s *SplunkDestinationUpdate) SetS3Update(v *S3DestinationUpdate) *SplunkDestinationUpdate {
s.S3Update = v
return s
}
// Configures retry behavior in case Kinesis Data Firehose is unable to deliver
// documents to Splunk, or if it doesn't receive an acknowledgment from Splunk.
type SplunkRetryOptions struct {
_ struct{} `type:"structure"`
// The total amount of time that Kinesis Data Firehose spends on retries. This
// duration starts after the initial attempt to send data to Splunk fails. It
// doesn't include the periods during which Kinesis Data Firehose waits for
// acknowledgment from Splunk after each attempt.
DurationInSeconds *int64 `type:"integer"`
}
// String returns the string representation
func (s SplunkRetryOptions) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation
func (s SplunkRetryOptions) GoString() string {
return s.String()
}
// SetDurationInSeconds sets the DurationInSeconds field's value.
func (s *SplunkRetryOptions) SetDurationInSeconds(v int64) *SplunkRetryOptions {
s.DurationInSeconds = &v
return s
}
type StartDeliveryStreamEncryptionInput struct {
_ struct{} `type:"structure"`
// The name of the delivery stream for which you want to enable server-side
// encryption (SSE).
//
// DeliveryStreamName is a required field
DeliveryStreamName *string `min:"1" type:"string" required:"true"`
}
// String returns the string representation
func (s StartDeliveryStreamEncryptionInput) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation
func (s StartDeliveryStreamEncryptionInput) GoString() string {
return s.String()
}
// Validate inspects the fields of the type to determine if they are valid.
func (s *StartDeliveryStreamEncryptionInput) Validate() error {
invalidParams := request.ErrInvalidParams{Context: "StartDeliveryStreamEncryptionInput"}
if s.DeliveryStreamName == nil {
invalidParams.Add(request.NewErrParamRequired("DeliveryStreamName"))
}
if s.DeliveryStreamName != nil && len(*s.DeliveryStreamName) < 1 {
invalidParams.Add(request.NewErrParamMinLen("DeliveryStreamName", 1))
}
if invalidParams.Len() > 0 {
return invalidParams
}
return nil
}
// SetDeliveryStreamName sets the DeliveryStreamName field's value.
func (s *StartDeliveryStreamEncryptionInput) SetDeliveryStreamName(v string) *StartDeliveryStreamEncryptionInput {
s.DeliveryStreamName = &v
return s
}
type StartDeliveryStreamEncryptionOutput struct {
_ struct{} `type:"structure"`
}
// String returns the string representation
func (s StartDeliveryStreamEncryptionOutput) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation
func (s StartDeliveryStreamEncryptionOutput) GoString() string {
return s.String()
}
type StopDeliveryStreamEncryptionInput struct {
_ struct{} `type:"structure"`
// The name of the delivery stream for which you want to disable server-side
// encryption (SSE).
//
// DeliveryStreamName is a required field
DeliveryStreamName *string `min:"1" type:"string" required:"true"`
}
// String returns the string representation
func (s StopDeliveryStreamEncryptionInput) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation
func (s StopDeliveryStreamEncryptionInput) GoString() string {
return s.String()
}
// Validate inspects the fields of the type to determine if they are valid.
func (s *StopDeliveryStreamEncryptionInput) Validate() error {
invalidParams := request.ErrInvalidParams{Context: "StopDeliveryStreamEncryptionInput"}
if s.DeliveryStreamName == nil {
invalidParams.Add(request.NewErrParamRequired("DeliveryStreamName"))
}
if s.DeliveryStreamName != nil && len(*s.DeliveryStreamName) < 1 {
invalidParams.Add(request.NewErrParamMinLen("DeliveryStreamName", 1))
}
if invalidParams.Len() > 0 {
return invalidParams
}
return nil
}
// SetDeliveryStreamName sets the DeliveryStreamName field's value.
func (s *StopDeliveryStreamEncryptionInput) SetDeliveryStreamName(v string) *StopDeliveryStreamEncryptionInput {
s.DeliveryStreamName = &v
return s
}
type StopDeliveryStreamEncryptionOutput struct {
_ struct{} `type:"structure"`
}
// String returns the string representation
func (s StopDeliveryStreamEncryptionOutput) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation
func (s StopDeliveryStreamEncryptionOutput) GoString() string {
return s.String()
}
// Metadata that you can assign to a delivery stream, consisting of a key-value
// pair.
type Tag struct {
_ struct{} `type:"structure"`
// A unique identifier for the tag. Maximum length: 128 characters. Valid characters:
// Unicode letters, digits, white space, _ . / = + - % @
//
// Key is a required field
Key *string `min:"1" type:"string" required:"true"`
// An optional string, which you can use to describe or define the tag. Maximum
// length: 256 characters. Valid characters: Unicode letters, digits, white
// space, _ . / = + - % @
Value *string `type:"string"`
}
// String returns the string representation
func (s Tag) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation
func (s Tag) GoString() string {
return s.String()
}
// Validate inspects the fields of the type to determine if they are valid.
func (s *Tag) Validate() error {
invalidParams := request.ErrInvalidParams{Context: "Tag"}
if s.Key == nil {
invalidParams.Add(request.NewErrParamRequired("Key"))
}
if s.Key != nil && len(*s.Key) < 1 {
invalidParams.Add(request.NewErrParamMinLen("Key", 1))
}
if invalidParams.Len() > 0 {
return invalidParams
}
return nil
}
// SetKey sets the Key field's value.
func (s *Tag) SetKey(v string) *Tag {
s.Key = &v
return s
}
// SetValue sets the Value field's value.
func (s *Tag) SetValue(v string) *Tag {
s.Value = &v
return s
}
type TagDeliveryStreamInput struct {
_ struct{} `type:"structure"`
// The name of the delivery stream to which you want to add the tags.
//
// DeliveryStreamName is a required field
DeliveryStreamName *string `min:"1" type:"string" required:"true"`
// A set of key-value pairs to use to create the tags.
//
// Tags is a required field
Tags []*Tag `min:"1" type:"list" required:"true"`
}
// String returns the string representation
func (s TagDeliveryStreamInput) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation
func (s TagDeliveryStreamInput) GoString() string {
return s.String()
}
// Validate inspects the fields of the type to determine if they are valid.
func (s *TagDeliveryStreamInput) Validate() error {
invalidParams := request.ErrInvalidParams{Context: "TagDeliveryStreamInput"}
if s.DeliveryStreamName == nil {
invalidParams.Add(request.NewErrParamRequired("DeliveryStreamName"))
}
if s.DeliveryStreamName != nil && len(*s.DeliveryStreamName) < 1 {
invalidParams.Add(request.NewErrParamMinLen("DeliveryStreamName", 1))
}
if s.Tags == nil {
invalidParams.Add(request.NewErrParamRequired("Tags"))
}
if s.Tags != nil && len(s.Tags) < 1 {
invalidParams.Add(request.NewErrParamMinLen("Tags", 1))
}
if s.Tags != nil {
for i, v := range s.Tags {
if v == nil {
continue
}
if err := v.Validate(); err != nil {
invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Tags", i), err.(request.ErrInvalidParams))
}
}
}
if invalidParams.Len() > 0 {
return invalidParams
}
return nil
}
// SetDeliveryStreamName sets the DeliveryStreamName field's value.
func (s *TagDeliveryStreamInput) SetDeliveryStreamName(v string) *TagDeliveryStreamInput {
s.DeliveryStreamName = &v
return s
}
// SetTags sets the Tags field's value.
func (s *TagDeliveryStreamInput) SetTags(v []*Tag) *TagDeliveryStreamInput {
s.Tags = v
return s
}
type TagDeliveryStreamOutput struct {
_ struct{} `type:"structure"`
}
// String returns the string representation
func (s TagDeliveryStreamOutput) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation
func (s TagDeliveryStreamOutput) GoString() string {
return s.String()
}
type UntagDeliveryStreamInput struct {
_ struct{} `type:"structure"`
// The name of the delivery stream.
//
// DeliveryStreamName is a required field
DeliveryStreamName *string `min:"1" type:"string" required:"true"`
// A list of tag keys. Each corresponding tag is removed from the delivery stream.
//
// TagKeys is a required field
TagKeys []*string `min:"1" type:"list" required:"true"`
}
// String returns the string representation
func (s UntagDeliveryStreamInput) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation
func (s UntagDeliveryStreamInput) GoString() string {
return s.String()
}
// Validate inspects the fields of the type to determine if they are valid.
func (s *UntagDeliveryStreamInput) Validate() error {
invalidParams := request.ErrInvalidParams{Context: "UntagDeliveryStreamInput"}
if s.DeliveryStreamName == nil {
invalidParams.Add(request.NewErrParamRequired("DeliveryStreamName"))
}
if s.DeliveryStreamName != nil && len(*s.DeliveryStreamName) < 1 {
invalidParams.Add(request.NewErrParamMinLen("DeliveryStreamName", 1))
}
if s.TagKeys == nil {
invalidParams.Add(request.NewErrParamRequired("TagKeys"))
}
if s.TagKeys != nil && len(s.TagKeys) < 1 {
invalidParams.Add(request.NewErrParamMinLen("TagKeys", 1))
}
if invalidParams.Len() > 0 {
return invalidParams
}
return nil
}
// SetDeliveryStreamName sets the DeliveryStreamName field's value.
func (s *UntagDeliveryStreamInput) SetDeliveryStreamName(v string) *UntagDeliveryStreamInput {
s.DeliveryStreamName = &v
return s
}
// SetTagKeys sets the TagKeys field's value.
func (s *UntagDeliveryStreamInput) SetTagKeys(v []*string) *UntagDeliveryStreamInput {
s.TagKeys = v
return s
}
type UntagDeliveryStreamOutput struct {
_ struct{} `type:"structure"`
}
// String returns the string representation
func (s UntagDeliveryStreamOutput) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation
func (s UntagDeliveryStreamOutput) GoString() string {
return s.String()
}
type UpdateDestinationInput struct {
_ struct{} `type:"structure"`
// Obtain this value from the VersionId result of DeliveryStreamDescription.
// This value is required, and helps the service perform conditional operations.
// For example, if there is an interleaving update and this value is null, then
// the update destination fails. After the update is successful, the VersionId
// value is updated. The service then performs a merge of the old configuration
// with the new configuration.
//
// CurrentDeliveryStreamVersionId is a required field
CurrentDeliveryStreamVersionId *string `min:"1" type:"string" required:"true"`
// The name of the delivery stream.
//
// DeliveryStreamName is a required field
DeliveryStreamName *string `min:"1" type:"string" required:"true"`
// The ID of the destination.
//
// DestinationId is a required field
DestinationId *string `min:"1" type:"string" required:"true"`
// Describes an update for a destination in Amazon ES.
ElasticsearchDestinationUpdate *ElasticsearchDestinationUpdate `type:"structure"`
// Describes an update for a destination in Amazon S3.
ExtendedS3DestinationUpdate *ExtendedS3DestinationUpdate `type:"structure"`
// Describes an update for a destination in Amazon Redshift.
RedshiftDestinationUpdate *RedshiftDestinationUpdate `type:"structure"`
// [Deprecated] Describes an update for a destination in Amazon S3.
//
// Deprecated: S3DestinationUpdate has been deprecated
S3DestinationUpdate *S3DestinationUpdate `deprecated:"true" type:"structure"`
// Describes an update for a destination in Splunk.
SplunkDestinationUpdate *SplunkDestinationUpdate `type:"structure"`
}
// String returns the string representation
func (s UpdateDestinationInput) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation
func (s UpdateDestinationInput) GoString() string {
return s.String()
}
// Validate inspects the fields of the type to determine if they are valid.
func (s *UpdateDestinationInput) Validate() error {
invalidParams := request.ErrInvalidParams{Context: "UpdateDestinationInput"}
if s.CurrentDeliveryStreamVersionId == nil {
invalidParams.Add(request.NewErrParamRequired("CurrentDeliveryStreamVersionId"))
}
if s.CurrentDeliveryStreamVersionId != nil && len(*s.CurrentDeliveryStreamVersionId) < 1 {
invalidParams.Add(request.NewErrParamMinLen("CurrentDeliveryStreamVersionId", 1))
}
if s.DeliveryStreamName == nil {
invalidParams.Add(request.NewErrParamRequired("DeliveryStreamName"))
}
if s.DeliveryStreamName != nil && len(*s.DeliveryStreamName) < 1 {
invalidParams.Add(request.NewErrParamMinLen("DeliveryStreamName", 1))
}
if s.DestinationId == nil {
invalidParams.Add(request.NewErrParamRequired("DestinationId"))
}
if s.DestinationId != nil && len(*s.DestinationId) < 1 {
invalidParams.Add(request.NewErrParamMinLen("DestinationId", 1))
}
if s.ElasticsearchDestinationUpdate != nil {
if err := s.ElasticsearchDestinationUpdate.Validate(); err != nil {
invalidParams.AddNested("ElasticsearchDestinationUpdate", err.(request.ErrInvalidParams))
}
}
if s.ExtendedS3DestinationUpdate != nil {
if err := s.ExtendedS3DestinationUpdate.Validate(); err != nil {
invalidParams.AddNested("ExtendedS3DestinationUpdate", err.(request.ErrInvalidParams))
}
}
if s.RedshiftDestinationUpdate != nil {
if err := s.RedshiftDestinationUpdate.Validate(); err != nil {
invalidParams.AddNested("RedshiftDestinationUpdate", err.(request.ErrInvalidParams))
}
}
if s.S3DestinationUpdate != nil {
if err := s.S3DestinationUpdate.Validate(); err != nil {
invalidParams.AddNested("S3DestinationUpdate", err.(request.ErrInvalidParams))
}
}
if s.SplunkDestinationUpdate != nil {
if err := s.SplunkDestinationUpdate.Validate(); err != nil {
invalidParams.AddNested("SplunkDestinationUpdate", err.(request.ErrInvalidParams))
}
}
if invalidParams.Len() > 0 {
return invalidParams
}
return nil
}
// SetCurrentDeliveryStreamVersionId sets the CurrentDeliveryStreamVersionId field's value.
func (s *UpdateDestinationInput) SetCurrentDeliveryStreamVersionId(v string) *UpdateDestinationInput {
s.CurrentDeliveryStreamVersionId = &v
return s
}
// SetDeliveryStreamName sets the DeliveryStreamName field's value.
func (s *UpdateDestinationInput) SetDeliveryStreamName(v string) *UpdateDestinationInput {
s.DeliveryStreamName = &v
return s
}
// SetDestinationId sets the DestinationId field's value.
func (s *UpdateDestinationInput) SetDestinationId(v string) *UpdateDestinationInput {
s.DestinationId = &v
return s
}
// SetElasticsearchDestinationUpdate sets the ElasticsearchDestinationUpdate field's value.
func (s *UpdateDestinationInput) SetElasticsearchDestinationUpdate(v *ElasticsearchDestinationUpdate) *UpdateDestinationInput {
s.ElasticsearchDestinationUpdate = v
return s
}
// SetExtendedS3DestinationUpdate sets the ExtendedS3DestinationUpdate field's value.
func (s *UpdateDestinationInput) SetExtendedS3DestinationUpdate(v *ExtendedS3DestinationUpdate) *UpdateDestinationInput {
s.ExtendedS3DestinationUpdate = v
return s
}
// SetRedshiftDestinationUpdate sets the RedshiftDestinationUpdate field's value.
func (s *UpdateDestinationInput) SetRedshiftDestinationUpdate(v *RedshiftDestinationUpdate) *UpdateDestinationInput {
s.RedshiftDestinationUpdate = v
return s
}
// SetS3DestinationUpdate sets the S3DestinationUpdate field's value.
func (s *UpdateDestinationInput) SetS3DestinationUpdate(v *S3DestinationUpdate) *UpdateDestinationInput {
s.S3DestinationUpdate = v
return s
}
// SetSplunkDestinationUpdate sets the SplunkDestinationUpdate field's value.
func (s *UpdateDestinationInput) SetSplunkDestinationUpdate(v *SplunkDestinationUpdate) *UpdateDestinationInput {
s.SplunkDestinationUpdate = v
return s
}
type UpdateDestinationOutput struct {
_ struct{} `type:"structure"`
}
// String returns the string representation
func (s UpdateDestinationOutput) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation
func (s UpdateDestinationOutput) GoString() string {
return s.String()
}
const (
// CompressionFormatUncompressed is a CompressionFormat enum value
CompressionFormatUncompressed = "UNCOMPRESSED"
// CompressionFormatGzip is a CompressionFormat enum value
CompressionFormatGzip = "GZIP"
// CompressionFormatZip is a CompressionFormat enum value
CompressionFormatZip = "ZIP"
// CompressionFormatSnappy is a CompressionFormat enum value
CompressionFormatSnappy = "Snappy"
)
const (
// DeliveryStreamEncryptionStatusEnabled is a DeliveryStreamEncryptionStatus enum value
DeliveryStreamEncryptionStatusEnabled = "ENABLED"
// DeliveryStreamEncryptionStatusEnabling is a DeliveryStreamEncryptionStatus enum value
DeliveryStreamEncryptionStatusEnabling = "ENABLING"
// DeliveryStreamEncryptionStatusDisabled is a DeliveryStreamEncryptionStatus enum value
DeliveryStreamEncryptionStatusDisabled = "DISABLED"
// DeliveryStreamEncryptionStatusDisabling is a DeliveryStreamEncryptionStatus enum value
DeliveryStreamEncryptionStatusDisabling = "DISABLING"
)
const (
// DeliveryStreamStatusCreating is a DeliveryStreamStatus enum value
DeliveryStreamStatusCreating = "CREATING"
// DeliveryStreamStatusDeleting is a DeliveryStreamStatus enum value
DeliveryStreamStatusDeleting = "DELETING"
// DeliveryStreamStatusActive is a DeliveryStreamStatus enum value
DeliveryStreamStatusActive = "ACTIVE"
)
const (
// DeliveryStreamTypeDirectPut is a DeliveryStreamType enum value
DeliveryStreamTypeDirectPut = "DirectPut"
// DeliveryStreamTypeKinesisStreamAsSource is a DeliveryStreamType enum value
DeliveryStreamTypeKinesisStreamAsSource = "KinesisStreamAsSource"
)
const (
// ElasticsearchIndexRotationPeriodNoRotation is a ElasticsearchIndexRotationPeriod enum value
ElasticsearchIndexRotationPeriodNoRotation = "NoRotation"
// ElasticsearchIndexRotationPeriodOneHour is a ElasticsearchIndexRotationPeriod enum value
ElasticsearchIndexRotationPeriodOneHour = "OneHour"
// ElasticsearchIndexRotationPeriodOneDay is a ElasticsearchIndexRotationPeriod enum value
ElasticsearchIndexRotationPeriodOneDay = "OneDay"
// ElasticsearchIndexRotationPeriodOneWeek is a ElasticsearchIndexRotationPeriod enum value
ElasticsearchIndexRotationPeriodOneWeek = "OneWeek"
// ElasticsearchIndexRotationPeriodOneMonth is a ElasticsearchIndexRotationPeriod enum value
ElasticsearchIndexRotationPeriodOneMonth = "OneMonth"
)
const (
// ElasticsearchS3BackupModeFailedDocumentsOnly is a ElasticsearchS3BackupMode enum value
ElasticsearchS3BackupModeFailedDocumentsOnly = "FailedDocumentsOnly"
// ElasticsearchS3BackupModeAllDocuments is a ElasticsearchS3BackupMode enum value
ElasticsearchS3BackupModeAllDocuments = "AllDocuments"
)
const (
// HECEndpointTypeRaw is a HECEndpointType enum value
HECEndpointTypeRaw = "Raw"
// HECEndpointTypeEvent is a HECEndpointType enum value
HECEndpointTypeEvent = "Event"
)
const (
// NoEncryptionConfigNoEncryption is a NoEncryptionConfig enum value
NoEncryptionConfigNoEncryption = "NoEncryption"
)
const (
// OrcCompressionNone is a OrcCompression enum value
OrcCompressionNone = "NONE"
// OrcCompressionZlib is a OrcCompression enum value
OrcCompressionZlib = "ZLIB"
// OrcCompressionSnappy is a OrcCompression enum value
OrcCompressionSnappy = "SNAPPY"
)
const (
// OrcFormatVersionV011 is a OrcFormatVersion enum value
OrcFormatVersionV011 = "V0_11"
// OrcFormatVersionV012 is a OrcFormatVersion enum value
OrcFormatVersionV012 = "V0_12"
)
const (
// ParquetCompressionUncompressed is a ParquetCompression enum value
ParquetCompressionUncompressed = "UNCOMPRESSED"
// ParquetCompressionGzip is a ParquetCompression enum value
ParquetCompressionGzip = "GZIP"
// ParquetCompressionSnappy is a ParquetCompression enum value
ParquetCompressionSnappy = "SNAPPY"
)
const (
// ParquetWriterVersionV1 is a ParquetWriterVersion enum value
ParquetWriterVersionV1 = "V1"
// ParquetWriterVersionV2 is a ParquetWriterVersion enum value
ParquetWriterVersionV2 = "V2"
)
const (
// ProcessorParameterNameLambdaArn is a ProcessorParameterName enum value
ProcessorParameterNameLambdaArn = "LambdaArn"
// ProcessorParameterNameNumberOfRetries is a ProcessorParameterName enum value
ProcessorParameterNameNumberOfRetries = "NumberOfRetries"
// ProcessorParameterNameRoleArn is a ProcessorParameterName enum value
ProcessorParameterNameRoleArn = "RoleArn"
// ProcessorParameterNameBufferSizeInMbs is a ProcessorParameterName enum value
ProcessorParameterNameBufferSizeInMbs = "BufferSizeInMBs"
// ProcessorParameterNameBufferIntervalInSeconds is a ProcessorParameterName enum value
ProcessorParameterNameBufferIntervalInSeconds = "BufferIntervalInSeconds"
)
const (
// ProcessorTypeLambda is a ProcessorType enum value
ProcessorTypeLambda = "Lambda"
)
const (
// RedshiftS3BackupModeDisabled is a RedshiftS3BackupMode enum value
RedshiftS3BackupModeDisabled = "Disabled"
// RedshiftS3BackupModeEnabled is a RedshiftS3BackupMode enum value
RedshiftS3BackupModeEnabled = "Enabled"
)
const (
// S3BackupModeDisabled is a S3BackupMode enum value
S3BackupModeDisabled = "Disabled"
// S3BackupModeEnabled is a S3BackupMode enum value
S3BackupModeEnabled = "Enabled"
)
const (
// SplunkS3BackupModeFailedEventsOnly is a SplunkS3BackupMode enum value
SplunkS3BackupModeFailedEventsOnly = "FailedEventsOnly"
// SplunkS3BackupModeAllEvents is a SplunkS3BackupMode enum value
SplunkS3BackupModeAllEvents = "AllEvents"
)