line |
stmt |
bran |
cond |
sub |
pod |
time |
code |
1
|
|
|
|
|
|
|
package Paws::Firehose; |
2
|
1
|
|
|
1
|
|
7222
|
use Moose; |
|
1
|
|
|
|
|
5
|
|
|
1
|
|
|
|
|
14
|
|
3
|
|
|
|
|
|
|
sub service { 'firehose' } |
4
|
|
|
|
|
|
|
sub version { '2015-08-04' } |
5
|
|
|
|
|
|
|
sub target_prefix { 'Firehose_20150804' } |
6
|
|
|
|
|
|
|
sub json_version { "1.1" } |
7
|
|
|
|
|
|
|
has max_attempts => (is => 'ro', isa => 'Int', default => 5); |
8
|
|
|
|
|
|
|
has retry => (is => 'ro', isa => 'HashRef', default => sub { |
9
|
|
|
|
|
|
|
{ base => 'rand', type => 'exponential', growth_factor => 2 } |
10
|
|
|
|
|
|
|
}); |
11
|
|
|
|
|
|
|
has retriables => (is => 'ro', isa => 'ArrayRef', default => sub { [ |
12
|
|
|
|
|
|
|
] }); |
13
|
|
|
|
|
|
|
|
14
|
|
|
|
|
|
|
with 'Paws::API::Caller', 'Paws::API::EndpointResolver', 'Paws::Net::V4Signature', 'Paws::Net::JsonCaller', 'Paws::Net::JsonResponse'; |
15
|
|
|
|
|
|
|
|
16
|
|
|
|
|
|
|
|
17
|
|
|
|
|
|
|
sub CreateDeliveryStream { |
18
|
|
|
|
|
|
|
my $self = shift; |
19
|
|
|
|
|
|
|
my $call_object = $self->new_with_coercions('Paws::Firehose::CreateDeliveryStream', @_); |
20
|
|
|
|
|
|
|
return $self->caller->do_call($self, $call_object); |
21
|
|
|
|
|
|
|
} |
22
|
|
|
|
|
|
|
sub DeleteDeliveryStream { |
23
|
|
|
|
|
|
|
my $self = shift; |
24
|
|
|
|
|
|
|
my $call_object = $self->new_with_coercions('Paws::Firehose::DeleteDeliveryStream', @_); |
25
|
|
|
|
|
|
|
return $self->caller->do_call($self, $call_object); |
26
|
|
|
|
|
|
|
} |
27
|
|
|
|
|
|
|
sub DescribeDeliveryStream { |
28
|
|
|
|
|
|
|
my $self = shift; |
29
|
|
|
|
|
|
|
my $call_object = $self->new_with_coercions('Paws::Firehose::DescribeDeliveryStream', @_); |
30
|
|
|
|
|
|
|
return $self->caller->do_call($self, $call_object); |
31
|
|
|
|
|
|
|
} |
32
|
|
|
|
|
|
|
sub GetKinesisStream { |
33
|
|
|
|
|
|
|
my $self = shift; |
34
|
|
|
|
|
|
|
my $call_object = $self->new_with_coercions('Paws::Firehose::GetKinesisStream', @_); |
35
|
|
|
|
|
|
|
return $self->caller->do_call($self, $call_object); |
36
|
|
|
|
|
|
|
} |
37
|
|
|
|
|
|
|
sub ListDeliveryStreams { |
38
|
|
|
|
|
|
|
my $self = shift; |
39
|
|
|
|
|
|
|
my $call_object = $self->new_with_coercions('Paws::Firehose::ListDeliveryStreams', @_); |
40
|
|
|
|
|
|
|
return $self->caller->do_call($self, $call_object); |
41
|
|
|
|
|
|
|
} |
42
|
|
|
|
|
|
|
sub PutRecord { |
43
|
|
|
|
|
|
|
my $self = shift; |
44
|
|
|
|
|
|
|
my $call_object = $self->new_with_coercions('Paws::Firehose::PutRecord', @_); |
45
|
|
|
|
|
|
|
return $self->caller->do_call($self, $call_object); |
46
|
|
|
|
|
|
|
} |
47
|
|
|
|
|
|
|
sub PutRecordBatch { |
48
|
|
|
|
|
|
|
my $self = shift; |
49
|
|
|
|
|
|
|
my $call_object = $self->new_with_coercions('Paws::Firehose::PutRecordBatch', @_); |
50
|
|
|
|
|
|
|
return $self->caller->do_call($self, $call_object); |
51
|
|
|
|
|
|
|
} |
52
|
|
|
|
|
|
|
sub UpdateDestination { |
53
|
|
|
|
|
|
|
my $self = shift; |
54
|
|
|
|
|
|
|
my $call_object = $self->new_with_coercions('Paws::Firehose::UpdateDestination', @_); |
55
|
|
|
|
|
|
|
return $self->caller->do_call($self, $call_object); |
56
|
|
|
|
|
|
|
} |
57
|
|
|
|
|
|
|
|
58
|
|
|
|
|
|
|
|
59
|
|
|
|
|
|
|
|
60
|
|
|
|
|
|
|
sub operations { qw/CreateDeliveryStream DeleteDeliveryStream DescribeDeliveryStream GetKinesisStream ListDeliveryStreams PutRecord PutRecordBatch UpdateDestination / } |
61
|
|
|
|
|
|
|
|
62
|
|
|
|
|
|
|
1; |
63
|
|
|
|
|
|
|
|
64
|
|
|
|
|
|
|
### main pod documentation begin ### |
65
|
|
|
|
|
|
|
|
66
|
|
|
|
|
|
|
=head1 NAME |
67
|
|
|
|
|
|
|
|
68
|
|
|
|
|
|
|
Paws::Firehose - Perl Interface to AWS Amazon Kinesis Firehose |
69
|
|
|
|
|
|
|
|
70
|
|
|
|
|
|
|
=head1 SYNOPSIS |
71
|
|
|
|
|
|
|
|
72
|
|
|
|
|
|
|
use Paws; |
73
|
|
|
|
|
|
|
|
74
|
|
|
|
|
|
|
my $obj = Paws->service('Firehose'); |
75
|
|
|
|
|
|
|
my $res = $obj->Method( |
76
|
|
|
|
|
|
|
Arg1 => $val1, |
77
|
|
|
|
|
|
|
Arg2 => [ 'V1', 'V2' ], |
78
|
|
|
|
|
|
|
# if Arg3 is an object, the HashRef will be used as arguments to the constructor |
79
|
|
|
|
|
|
|
# of the arguments type |
80
|
|
|
|
|
|
|
Arg3 => { Att1 => 'Val1' }, |
81
|
|
|
|
|
|
|
# if Arg4 is an array of objects, the HashRefs will be passed as arguments to |
82
|
|
|
|
|
|
|
# the constructor of the arguments type |
83
|
|
|
|
|
|
|
Arg4 => [ { Att1 => 'Val1' }, { Att1 => 'Val2' } ], |
84
|
|
|
|
|
|
|
); |
85
|
|
|
|
|
|
|
|
86
|
|
|
|
|
|
|
=head1 DESCRIPTION |
87
|
|
|
|
|
|
|
|
88
|
|
|
|
|
|
|
Amazon Kinesis Firehose API Reference |
89
|
|
|
|
|
|
|
|
90
|
|
|
|
|
|
|
Amazon Kinesis Firehose is a fully managed service that delivers |
91
|
|
|
|
|
|
|
real-time streaming data to destinations such as Amazon Simple Storage |
92
|
|
|
|
|
|
|
Service (Amazon S3), Amazon Elasticsearch Service (Amazon ES), and |
93
|
|
|
|
|
|
|
Amazon Redshift. |
94
|
|
|
|
|
|
|
|
95
|
|
|
|
|
|
|
=head1 METHODS |
96
|
|
|
|
|
|
|
|
97
|
|
|
|
|
|
|
=head2 CreateDeliveryStream(DeliveryStreamName => Str, [DeliveryStreamType => Str, ElasticsearchDestinationConfiguration => L<Paws::Firehose::ElasticsearchDestinationConfiguration>, ExtendedS3DestinationConfiguration => L<Paws::Firehose::ExtendedS3DestinationConfiguration>, KinesisStreamSourceConfiguration => L<Paws::Firehose::KinesisStreamSourceConfiguration>, RedshiftDestinationConfiguration => L<Paws::Firehose::RedshiftDestinationConfiguration>, S3DestinationConfiguration => L<Paws::Firehose::S3DestinationConfiguration>]) |
98
|
|
|
|
|
|
|
|
99
|
|
|
|
|
|
|
Each argument is described in detail in: L<Paws::Firehose::CreateDeliveryStream> |
100
|
|
|
|
|
|
|
|
101
|
|
|
|
|
|
|
Returns: a L<Paws::Firehose::CreateDeliveryStreamOutput> instance |
102
|
|
|
|
|
|
|
|
103
|
|
|
|
|
|
|
Creates a delivery stream. |
104
|
|
|
|
|
|
|
|
105
|
|
|
|
|
|
|
By default, you can create up to 20 delivery streams per region. |
106
|
|
|
|
|
|
|
|
107
|
|
|
|
|
|
|
This is an asynchronous operation that immediately returns. The initial |
108
|
|
|
|
|
|
|
status of the delivery stream is C<CREATING>. After the delivery stream |
109
|
|
|
|
|
|
|
is created, its status is C<ACTIVE> and it now accepts data. Attempts |
110
|
|
|
|
|
|
|
to send data to a delivery stream that is not in the C<ACTIVE> state |
111
|
|
|
|
|
|
|
cause an exception. To check the state of a delivery stream, use |
112
|
|
|
|
|
|
|
DescribeDeliveryStream. |
113
|
|
|
|
|
|
|
|
114
|
|
|
|
|
|
|
A Kinesis Firehose delivery stream can be configured to receive records |
115
|
|
|
|
|
|
|
directly from providers using PutRecord or PutRecordBatch, or it can be |
116
|
|
|
|
|
|
|
configured to use an existing Kinesis stream as its source. To specify |
117
|
|
|
|
|
|
|
a Kinesis stream as input, set the C<DeliveryStreamType> parameter to |
118
|
|
|
|
|
|
|
C<KinesisStreamAsSource>, and provide the Kinesis stream ARN and role |
119
|
|
|
|
|
|
|
ARN in the C<KinesisStreamSourceConfiguration> parameter. |
120
|
|
|
|
|
|
|
|
121
|
|
|
|
|
|
|
A delivery stream is configured with a single destination: Amazon S3, |
122
|
|
|
|
|
|
|
Amazon ES, or Amazon Redshift. You must specify only one of the |
123
|
|
|
|
|
|
|
following destination configuration parameters: |
124
|
|
|
|
|
|
|
B<ExtendedS3DestinationConfiguration>, B<S3DestinationConfiguration>, |
125
|
|
|
|
|
|
|
B<ElasticsearchDestinationConfiguration>, or |
126
|
|
|
|
|
|
|
B<RedshiftDestinationConfiguration>. |
127
|
|
|
|
|
|
|
|
128
|
|
|
|
|
|
|
When you specify B<S3DestinationConfiguration>, you can also provide |
129
|
|
|
|
|
|
|
the following optional values: B<BufferingHints>, |
130
|
|
|
|
|
|
|
B<EncryptionConfiguration>, and B<CompressionFormat>. By default, if no |
131
|
|
|
|
|
|
|
B<BufferingHints> value is provided, Kinesis Firehose buffers data up |
132
|
|
|
|
|
|
|
to 5 MB or for 5 minutes, whichever condition is satisfied first. Note |
133
|
|
|
|
|
|
|
that B<BufferingHints> is a hint, so there are some cases where the |
134
|
|
|
|
|
|
|
service cannot adhere to these conditions strictly; for example, record |
135
|
|
|
|
|
|
|
boundaries are such that the size is a little over or under the |
136
|
|
|
|
|
|
|
configured buffering size. By default, no encryption is performed. We |
137
|
|
|
|
|
|
|
strongly recommend that you enable encryption to ensure secure data |
138
|
|
|
|
|
|
|
storage in Amazon S3. |
139
|
|
|
|
|
|
|
|
140
|
|
|
|
|
|
|
A few notes about Amazon Redshift as a destination: |
141
|
|
|
|
|
|
|
|
142
|
|
|
|
|
|
|
=over |
143
|
|
|
|
|
|
|
|
144
|
|
|
|
|
|
|
=item * |
145
|
|
|
|
|
|
|
|
146
|
|
|
|
|
|
|
An Amazon Redshift destination requires an S3 bucket as intermediate |
147
|
|
|
|
|
|
|
location, as Kinesis Firehose first delivers data to S3 and then uses |
148
|
|
|
|
|
|
|
C<COPY> syntax to load data into an Amazon Redshift table. This is |
149
|
|
|
|
|
|
|
specified in the B<RedshiftDestinationConfiguration.S3Configuration> |
150
|
|
|
|
|
|
|
parameter. |
151
|
|
|
|
|
|
|
|
152
|
|
|
|
|
|
|
=item * |
153
|
|
|
|
|
|
|
|
154
|
|
|
|
|
|
|
The compression formats C<SNAPPY> or C<ZIP> cannot be specified in |
155
|
|
|
|
|
|
|
B<RedshiftDestinationConfiguration.S3Configuration> because the Amazon |
156
|
|
|
|
|
|
|
Redshift C<COPY> operation that reads from the S3 bucket doesn't |
157
|
|
|
|
|
|
|
support these compression formats. |
158
|
|
|
|
|
|
|
|
159
|
|
|
|
|
|
|
=item * |
160
|
|
|
|
|
|
|
|
161
|
|
|
|
|
|
|
We strongly recommend that you use the user name and password you |
162
|
|
|
|
|
|
|
provide exclusively with Kinesis Firehose, and that the permissions for |
163
|
|
|
|
|
|
|
the account are restricted for Amazon Redshift C<INSERT> permissions. |
164
|
|
|
|
|
|
|
|
165
|
|
|
|
|
|
|
=back |
166
|
|
|
|
|
|
|
|
167
|
|
|
|
|
|
|
Kinesis Firehose assumes the IAM role that is configured as part of the |
168
|
|
|
|
|
|
|
destination. The role should allow the Kinesis Firehose principal to |
169
|
|
|
|
|
|
|
assume the role, and the role should have permissions that allow the |
170
|
|
|
|
|
|
|
service to deliver the data. For more information, see Amazon S3 Bucket |
171
|
|
|
|
|
|
|
Access in the I<Amazon Kinesis Firehose Developer Guide>. |
172
|
|
|
|
|
|
|
|
173
|
|
|
|
|
|
|
|
174
|
|
|
|
|
|
|
=head2 DeleteDeliveryStream(DeliveryStreamName => Str) |
175
|
|
|
|
|
|
|
|
176
|
|
|
|
|
|
|
Each argument is described in detail in: L<Paws::Firehose::DeleteDeliveryStream> |
177
|
|
|
|
|
|
|
|
178
|
|
|
|
|
|
|
Returns: a L<Paws::Firehose::DeleteDeliveryStreamOutput> instance |
179
|
|
|
|
|
|
|
|
180
|
|
|
|
|
|
|
Deletes a delivery stream and its data. |
181
|
|
|
|
|
|
|
|
182
|
|
|
|
|
|
|
You can delete a delivery stream only if it is in C<ACTIVE> or |
183
|
|
|
|
|
|
|
C<DELETING> state, and not in the C<CREATING> state. While the deletion |
184
|
|
|
|
|
|
|
request is in process, the delivery stream is in the C<DELETING> state. |
185
|
|
|
|
|
|
|
|
186
|
|
|
|
|
|
|
To check the state of a delivery stream, use DescribeDeliveryStream. |
187
|
|
|
|
|
|
|
|
188
|
|
|
|
|
|
|
While the delivery stream is C<DELETING> state, the service may |
189
|
|
|
|
|
|
|
continue to accept the records, but the service doesn't make any |
190
|
|
|
|
|
|
|
guarantees with respect to delivering the data. Therefore, as a best |
191
|
|
|
|
|
|
|
practice, you should first stop any applications that are sending |
192
|
|
|
|
|
|
|
records before deleting a delivery stream. |
193
|
|
|
|
|
|
|
|
194
|
|
|
|
|
|
|
|
195
|
|
|
|
|
|
|
=head2 DescribeDeliveryStream(DeliveryStreamName => Str, [ExclusiveStartDestinationId => Str, Limit => Int]) |
196
|
|
|
|
|
|
|
|
197
|
|
|
|
|
|
|
Each argument is described in detail in: L<Paws::Firehose::DescribeDeliveryStream> |
198
|
|
|
|
|
|
|
|
199
|
|
|
|
|
|
|
Returns: a L<Paws::Firehose::DescribeDeliveryStreamOutput> instance |
200
|
|
|
|
|
|
|
|
201
|
|
|
|
|
|
|
Describes the specified delivery stream and gets the status. For |
202
|
|
|
|
|
|
|
example, after your delivery stream is created, call |
203
|
|
|
|
|
|
|
DescribeDeliveryStream to see if the delivery stream is C<ACTIVE> and |
204
|
|
|
|
|
|
|
therefore ready for data to be sent to it. |
205
|
|
|
|
|
|
|
|
206
|
|
|
|
|
|
|
|
207
|
|
|
|
|
|
|
=head2 GetKinesisStream(DeliveryStreamARN => Str) |
208
|
|
|
|
|
|
|
|
209
|
|
|
|
|
|
|
Each argument is described in detail in: L<Paws::Firehose::GetKinesisStream> |
210
|
|
|
|
|
|
|
|
211
|
|
|
|
|
|
|
Returns: a L<Paws::Firehose::GetKinesisStreamOutput> instance |
212
|
|
|
|
|
|
|
|
213
|
|
|
|
|
|
|
|
214
|
|
|
|
|
|
|
|
215
|
|
|
|
|
|
|
|
216
|
|
|
|
|
|
|
=head2 ListDeliveryStreams([DeliveryStreamType => Str, ExclusiveStartDeliveryStreamName => Str, Limit => Int]) |
217
|
|
|
|
|
|
|
|
218
|
|
|
|
|
|
|
Each argument is described in detail in: L<Paws::Firehose::ListDeliveryStreams> |
219
|
|
|
|
|
|
|
|
220
|
|
|
|
|
|
|
Returns: a L<Paws::Firehose::ListDeliveryStreamsOutput> instance |
221
|
|
|
|
|
|
|
|
222
|
|
|
|
|
|
|
Lists your delivery streams. |
223
|
|
|
|
|
|
|
|
224
|
|
|
|
|
|
|
The number of delivery streams might be too large to return using a |
225
|
|
|
|
|
|
|
single call to ListDeliveryStreams. You can limit the number of |
226
|
|
|
|
|
|
|
delivery streams returned, using the B<Limit> parameter. To determine |
227
|
|
|
|
|
|
|
whether there are more delivery streams to list, check the value of |
228
|
|
|
|
|
|
|
B<HasMoreDeliveryStreams> in the output. If there are more delivery |
229
|
|
|
|
|
|
|
streams to list, you can request them by specifying the name of the |
230
|
|
|
|
|
|
|
last delivery stream returned in the call in the |
231
|
|
|
|
|
|
|
B<ExclusiveStartDeliveryStreamName> parameter of a subsequent call. |
232
|
|
|
|
|
|
|
|
233
|
|
|
|
|
|
|
|
234
|
|
|
|
|
|
|
=head2 PutRecord(DeliveryStreamName => Str, Record => L<Paws::Firehose::Record>) |
235
|
|
|
|
|
|
|
|
236
|
|
|
|
|
|
|
Each argument is described in detail in: L<Paws::Firehose::PutRecord> |
237
|
|
|
|
|
|
|
|
238
|
|
|
|
|
|
|
Returns: a L<Paws::Firehose::PutRecordOutput> instance |
239
|
|
|
|
|
|
|
|
240
|
|
|
|
|
|
|
Writes a single data record into an Amazon Kinesis Firehose delivery |
241
|
|
|
|
|
|
|
stream. To write multiple data records into a delivery stream, use |
242
|
|
|
|
|
|
|
PutRecordBatch. Applications using these operations are referred to as |
243
|
|
|
|
|
|
|
producers. |
244
|
|
|
|
|
|
|
|
245
|
|
|
|
|
|
|
By default, each delivery stream can take in up to 2,000 transactions |
246
|
|
|
|
|
|
|
per second, 5,000 records per second, or 5 MB per second. Note that if |
247
|
|
|
|
|
|
|
you use PutRecord and PutRecordBatch, the limits are an aggregate |
248
|
|
|
|
|
|
|
across these two operations for each delivery stream. For more |
249
|
|
|
|
|
|
|
information about limits and how to request an increase, see Amazon |
250
|
|
|
|
|
|
|
Kinesis Firehose Limits. |
251
|
|
|
|
|
|
|
|
252
|
|
|
|
|
|
|
You must specify the name of the delivery stream and the data record |
253
|
|
|
|
|
|
|
when using PutRecord. The data record consists of a data blob that can |
254
|
|
|
|
|
|
|
be up to 1,000 KB in size, and any kind of data, for example, a segment |
255
|
|
|
|
|
|
|
from a log file, geographic location data, website clickstream data, |
256
|
|
|
|
|
|
|
and so on. |
257
|
|
|
|
|
|
|
|
258
|
|
|
|
|
|
|
Kinesis Firehose buffers records before delivering them to the |
259
|
|
|
|
|
|
|
destination. To disambiguate the data blobs at the destination, a |
260
|
|
|
|
|
|
|
common solution is to use delimiters in the data, such as a newline |
261
|
|
|
|
|
|
|
(C<\n>) or some other character unique within the data. This allows the |
262
|
|
|
|
|
|
|
consumer application to parse individual data items when reading the |
263
|
|
|
|
|
|
|
data from the destination. |
264
|
|
|
|
|
|
|
|
265
|
|
|
|
|
|
|
The PutRecord operation returns a B<RecordId>, which is a unique string |
266
|
|
|
|
|
|
|
assigned to each record. Producer applications can use this ID for |
267
|
|
|
|
|
|
|
purposes such as auditability and investigation. |
268
|
|
|
|
|
|
|
|
269
|
|
|
|
|
|
|
If the PutRecord operation throws a B<ServiceUnavailableException>, |
270
|
|
|
|
|
|
|
back off and retry. If the exception persists, it is possible that the |
271
|
|
|
|
|
|
|
throughput limits have been exceeded for the delivery stream. |
272
|
|
|
|
|
|
|
|
273
|
|
|
|
|
|
|
Data records sent to Kinesis Firehose are stored for 24 hours from the |
274
|
|
|
|
|
|
|
time they are added to a delivery stream as it attempts to send the |
275
|
|
|
|
|
|
|
records to the destination. If the destination is unreachable for more |
276
|
|
|
|
|
|
|
than 24 hours, the data is no longer available. |
277
|
|
|
|
|
|
|
|
278
|
|
|
|
|
|
|
|
279
|
|
|
|
|
|
|
=head2 PutRecordBatch(DeliveryStreamName => Str, Records => ArrayRef[L<Paws::Firehose::Record>]) |
280
|
|
|
|
|
|
|
|
281
|
|
|
|
|
|
|
Each argument is described in detail in: L<Paws::Firehose::PutRecordBatch> |
282
|
|
|
|
|
|
|
|
283
|
|
|
|
|
|
|
Returns: a L<Paws::Firehose::PutRecordBatchOutput> instance |
284
|
|
|
|
|
|
|
|
285
|
|
|
|
|
|
|
Writes multiple data records into a delivery stream in a single call, |
286
|
|
|
|
|
|
|
which can achieve higher throughput per producer than when writing |
287
|
|
|
|
|
|
|
single records. To write single data records into a delivery stream, |
288
|
|
|
|
|
|
|
use PutRecord. Applications using these operations are referred to as |
289
|
|
|
|
|
|
|
producers. |
290
|
|
|
|
|
|
|
|
291
|
|
|
|
|
|
|
By default, each delivery stream can take in up to 2,000 transactions |
292
|
|
|
|
|
|
|
per second, 5,000 records per second, or 5 MB per second. If you use |
293
|
|
|
|
|
|
|
PutRecord and PutRecordBatch, the limits are an aggregate across these |
294
|
|
|
|
|
|
|
two operations for each delivery stream. For more information about |
295
|
|
|
|
|
|
|
limits, see Amazon Kinesis Firehose Limits. |
296
|
|
|
|
|
|
|
|
297
|
|
|
|
|
|
|
Each PutRecordBatch request supports up to 500 records. Each record in |
298
|
|
|
|
|
|
|
the request can be as large as 1,000 KB (before 64-bit encoding), up to |
299
|
|
|
|
|
|
|
a limit of 4 MB for the entire request. These limits cannot be changed. |
300
|
|
|
|
|
|
|
|
301
|
|
|
|
|
|
|
You must specify the name of the delivery stream and the data record |
302
|
|
|
|
|
|
|
when using PutRecord. The data record consists of a data blob that can |
303
|
|
|
|
|
|
|
be up to 1,000 KB in size, and any kind of data. For example, it could |
304
|
|
|
|
|
|
|
be a segment from a log file, geographic location data, web site |
305
|
|
|
|
|
|
|
clickstream data, and so on. |
306
|
|
|
|
|
|
|
|
307
|
|
|
|
|
|
|
Kinesis Firehose buffers records before delivering them to the |
308
|
|
|
|
|
|
|
destination. To disambiguate the data blobs at the destination, a |
309
|
|
|
|
|
|
|
common solution is to use delimiters in the data, such as a newline |
310
|
|
|
|
|
|
|
(C<\n>) or some other character unique within the data. This allows the |
311
|
|
|
|
|
|
|
consumer application to parse individual data items when reading the |
312
|
|
|
|
|
|
|
data from the destination. |
313
|
|
|
|
|
|
|
|
314
|
|
|
|
|
|
|
The PutRecordBatch response includes a count of failed records, |
315
|
|
|
|
|
|
|
B<FailedPutCount>, and an array of responses, B<RequestResponses>. Each |
316
|
|
|
|
|
|
|
entry in the B<RequestResponses> array provides additional information |
317
|
|
|
|
|
|
|
about the processed record. It directly correlates with a record in the |
318
|
|
|
|
|
|
|
request array using the same ordering, from the top to the bottom. The |
319
|
|
|
|
|
|
|
response array always includes the same number of records as the |
320
|
|
|
|
|
|
|
request array. B<RequestResponses> includes both successfully and |
321
|
|
|
|
|
|
|
unsuccessfully processed records. Kinesis Firehose attempts to process |
322
|
|
|
|
|
|
|
all records in each PutRecordBatch request. A single record failure |
323
|
|
|
|
|
|
|
does not stop the processing of subsequent records. |
324
|
|
|
|
|
|
|
|
325
|
|
|
|
|
|
|
A successfully processed record includes a B<RecordId> value, which is |
326
|
|
|
|
|
|
|
unique for the record. An unsuccessfully processed record includes |
327
|
|
|
|
|
|
|
B<ErrorCode> and B<ErrorMessage> values. B<ErrorCode> reflects the type |
328
|
|
|
|
|
|
|
of error, and is one of the following values: C<ServiceUnavailable> or |
329
|
|
|
|
|
|
|
C<InternalFailure>. B<ErrorMessage> provides more detailed information |
330
|
|
|
|
|
|
|
about the error. |
331
|
|
|
|
|
|
|
|
332
|
|
|
|
|
|
|
If there is an internal server error or a timeout, the write might have |
333
|
|
|
|
|
|
|
completed or it might have failed. If B<FailedPutCount> is greater than |
334
|
|
|
|
|
|
|
0, retry the request, resending only those records that might have |
335
|
|
|
|
|
|
|
failed processing. This minimizes the possible duplicate records and |
336
|
|
|
|
|
|
|
also reduces the total bytes sent (and corresponding charges). We |
337
|
|
|
|
|
|
|
recommend that you handle any duplicates at the destination. |
338
|
|
|
|
|
|
|
|
339
|
|
|
|
|
|
|
If PutRecordBatch throws B<ServiceUnavailableException>, back off and |
340
|
|
|
|
|
|
|
retry. If the exception persists, it is possible that the throughput |
341
|
|
|
|
|
|
|
limits have been exceeded for the delivery stream. |
342
|
|
|
|
|
|
|
|
343
|
|
|
|
|
|
|
Data records sent to Kinesis Firehose are stored for 24 hours from the |
344
|
|
|
|
|
|
|
time they are added to a delivery stream as it attempts to send the |
345
|
|
|
|
|
|
|
records to the destination. If the destination is unreachable for more |
346
|
|
|
|
|
|
|
than 24 hours, the data is no longer available. |
347
|
|
|
|
|
|
|
|
348
|
|
|
|
|
|
|
|
349
|
|
|
|
|
|
|
=head2 UpdateDestination(CurrentDeliveryStreamVersionId => Str, DeliveryStreamName => Str, DestinationId => Str, [ElasticsearchDestinationUpdate => L<Paws::Firehose::ElasticsearchDestinationUpdate>, ExtendedS3DestinationUpdate => L<Paws::Firehose::ExtendedS3DestinationUpdate>, RedshiftDestinationUpdate => L<Paws::Firehose::RedshiftDestinationUpdate>, S3DestinationUpdate => L<Paws::Firehose::S3DestinationUpdate>]) |
350
|
|
|
|
|
|
|
|
351
|
|
|
|
|
|
|
Each argument is described in detail in: L<Paws::Firehose::UpdateDestination> |
352
|
|
|
|
|
|
|
|
353
|
|
|
|
|
|
|
Returns: a L<Paws::Firehose::UpdateDestinationOutput> instance |
354
|
|
|
|
|
|
|
|
355
|
|
|
|
|
|
|
Updates the specified destination of the specified delivery stream. |
356
|
|
|
|
|
|
|
|
357
|
|
|
|
|
|
|
You can use this operation to change the destination type (for example, |
358
|
|
|
|
|
|
|
to replace the Amazon S3 destination with Amazon Redshift) or change |
359
|
|
|
|
|
|
|
the parameters associated with a destination (for example, to change |
360
|
|
|
|
|
|
|
the bucket name of the Amazon S3 destination). The update might not |
361
|
|
|
|
|
|
|
occur immediately. The target delivery stream remains active while the |
362
|
|
|
|
|
|
|
configurations are updated, so data writes to the delivery stream can |
363
|
|
|
|
|
|
|
continue during this process. The updated configurations are usually |
364
|
|
|
|
|
|
|
effective within a few minutes. |
365
|
|
|
|
|
|
|
|
366
|
|
|
|
|
|
|
Note that switching between Amazon ES and other services is not |
367
|
|
|
|
|
|
|
supported. For an Amazon ES destination, you can only update to another |
368
|
|
|
|
|
|
|
Amazon ES destination. |
369
|
|
|
|
|
|
|
|
370
|
|
|
|
|
|
|
If the destination type is the same, Kinesis Firehose merges the |
371
|
|
|
|
|
|
|
configuration parameters specified with the destination configuration |
372
|
|
|
|
|
|
|
that already exists on the delivery stream. If any of the parameters |
373
|
|
|
|
|
|
|
are not specified in the call, the existing values are retained. For |
374
|
|
|
|
|
|
|
example, in the Amazon S3 destination, if EncryptionConfiguration is |
375
|
|
|
|
|
|
|
not specified, then the existing EncryptionConfiguration is maintained |
376
|
|
|
|
|
|
|
on the destination. |
377
|
|
|
|
|
|
|
|
378
|
|
|
|
|
|
|
If the destination type is not the same, for example, changing the |
379
|
|
|
|
|
|
|
destination from Amazon S3 to Amazon Redshift, Kinesis Firehose does |
380
|
|
|
|
|
|
|
not merge any parameters. In this case, all parameters must be |
381
|
|
|
|
|
|
|
specified. |
382
|
|
|
|
|
|
|
|
383
|
|
|
|
|
|
|
Kinesis Firehose uses B<CurrentDeliveryStreamVersionId> to avoid race |
384
|
|
|
|
|
|
|
conditions and conflicting merges. This is a required field, and the |
385
|
|
|
|
|
|
|
service updates the configuration only if the existing configuration |
386
|
|
|
|
|
|
|
has a version ID that matches. After the update is applied |
387
|
|
|
|
|
|
|
successfully, the version ID is updated, and can be retrieved using |
388
|
|
|
|
|
|
|
DescribeDeliveryStream. Use the new version ID to set |
389
|
|
|
|
|
|
|
B<CurrentDeliveryStreamVersionId> in the next call. |
390
|
|
|
|
|
|
|
|
391
|
|
|
|
|
|
|
|
392
|
|
|
|
|
|
|
|
393
|
|
|
|
|
|
|
|
394
|
|
|
|
|
|
|
=head1 PAGINATORS |
395
|
|
|
|
|
|
|
|
396
|
|
|
|
|
|
|
Paginator methods are helpers that repetively call methods that return partial results |
397
|
|
|
|
|
|
|
|
398
|
|
|
|
|
|
|
|
399
|
|
|
|
|
|
|
|
400
|
|
|
|
|
|
|
|
401
|
|
|
|
|
|
|
=head1 SEE ALSO |
402
|
|
|
|
|
|
|
|
403
|
|
|
|
|
|
|
This service class forms part of L<Paws> |
404
|
|
|
|
|
|
|
|
405
|
|
|
|
|
|
|
=head1 BUGS and CONTRIBUTIONS |
406
|
|
|
|
|
|
|
|
407
|
|
|
|
|
|
|
The source code is located here: https://github.com/pplu/aws-sdk-perl |
408
|
|
|
|
|
|
|
|
409
|
|
|
|
|
|
|
Please report bugs to: https://github.com/pplu/aws-sdk-perl/issues |
410
|
|
|
|
|
|
|
|
411
|
|
|
|
|
|
|
=cut |
412
|
|
|
|
|
|
|
|