line |
stmt |
bran |
cond |
sub |
pod |
time |
code |
1
|
|
|
|
|
|
|
package Net::Amazon::Glacier; |
2
|
|
|
|
|
|
|
|
3
|
1
|
|
|
1
|
|
34466
|
use 5.10.0; |
|
1
|
|
|
|
|
4
|
|
|
1
|
|
|
|
|
44
|
|
4
|
1
|
|
|
1
|
|
5
|
use strict; |
|
1
|
|
|
|
|
2
|
|
|
1
|
|
|
|
|
31
|
|
5
|
1
|
|
|
1
|
|
5
|
use warnings; |
|
1
|
|
|
|
|
12
|
|
|
1
|
|
|
|
|
27
|
|
6
|
|
|
|
|
|
|
|
7
|
1
|
|
|
1
|
|
2014
|
use Net::Amazon::Signature::V4; |
|
1
|
|
|
|
|
377294
|
|
|
1
|
|
|
|
|
44
|
|
8
|
1
|
|
|
1
|
|
1111
|
use Net::Amazon::TreeHash; |
|
1
|
|
|
|
|
704
|
|
|
1
|
|
|
|
|
27
|
|
9
|
|
|
|
|
|
|
|
10
|
1
|
|
|
1
|
|
854
|
use HTTP::Request; |
|
1
|
|
|
|
|
538187
|
|
|
1
|
|
|
|
|
36
|
|
11
|
1
|
|
|
1
|
|
17041
|
use LWP::UserAgent; |
|
1
|
|
|
|
|
167802
|
|
|
1
|
|
|
|
|
45
|
|
12
|
1
|
|
|
1
|
|
2346
|
use JSON 2.61; |
|
1
|
|
|
|
|
14868
|
|
|
1
|
|
|
|
|
9
|
|
13
|
1
|
|
|
1
|
|
170
|
use POSIX; |
|
1
|
|
|
|
|
3
|
|
|
1
|
|
|
|
|
11
|
|
14
|
1
|
|
|
1
|
|
2790
|
use Digest::SHA; |
|
1
|
|
|
|
|
2
|
|
|
1
|
|
|
|
|
52
|
|
15
|
1
|
|
|
1
|
|
1090
|
use File::Slurp 9999.19; |
|
1
|
|
|
|
|
7322
|
|
|
1
|
|
|
|
|
97
|
|
16
|
1
|
|
|
1
|
|
13
|
use Carp; |
|
1
|
|
|
|
|
1
|
|
|
1
|
|
|
|
|
4686
|
|
17
|
|
|
|
|
|
|
|
18
|
|
|
|
|
|
|
=head1 NAME |
19
|
|
|
|
|
|
|
|
20
|
|
|
|
|
|
|
Net::Amazon::Glacier - An implementation of the full Amazon Glacier RESTful 2012-06-01 API. |
21
|
|
|
|
|
|
|
|
22
|
|
|
|
|
|
|
=head1 VERSION |
23
|
|
|
|
|
|
|
|
24
|
|
|
|
|
|
|
Version 0.15 |
25
|
|
|
|
|
|
|
|
26
|
|
|
|
|
|
|
=cut |
27
|
|
|
|
|
|
|
|
28
|
|
|
|
|
|
|
our $VERSION = '0.15'; |
29
|
|
|
|
|
|
|
|
30
|
|
|
|
|
|
|
=head1 SYNOPSIS |
31
|
|
|
|
|
|
|
|
32
|
|
|
|
|
|
|
Amazon Glacier is Amazon's long-term storage service and can be used to store |
33
|
|
|
|
|
|
|
cold archives with a novel pricing scheme. |
34
|
|
|
|
|
|
|
This module implements the full Amazon Glacier RESTful API, version 2012-06-01 |
35
|
|
|
|
|
|
|
(current at writing). It can be used to manage Glacier vaults, upload archives |
36
|
|
|
|
|
|
|
as single part or multipart up to 40.000Gb in a single element and download them |
37
|
|
|
|
|
|
|
in ranges or single parts. |
38
|
|
|
|
|
|
|
|
39
|
|
|
|
|
|
|
Perhaps a little code snippet: |
40
|
|
|
|
|
|
|
|
41
|
|
|
|
|
|
|
use Net::Amazon::Glacier; |
42
|
|
|
|
|
|
|
|
43
|
|
|
|
|
|
|
my $glacier = Net::Amazon::Glacier->new( |
44
|
|
|
|
|
|
|
'eu-west-1', |
45
|
|
|
|
|
|
|
'AKIMYACCOUNTID', |
46
|
|
|
|
|
|
|
'MYSECRET', |
47
|
|
|
|
|
|
|
); |
48
|
|
|
|
|
|
|
|
49
|
|
|
|
|
|
|
my $vault = 'a_vault'; |
50
|
|
|
|
|
|
|
|
51
|
|
|
|
|
|
|
my @vaults = $glacier->list_vaults(); |
52
|
|
|
|
|
|
|
|
53
|
|
|
|
|
|
|
if ( $glacier->create_vault( $vault ) ) { |
54
|
|
|
|
|
|
|
|
55
|
|
|
|
|
|
|
if ( my $archive_id = $glacier->upload_archive( './archive.7z' ) ) { |
56
|
|
|
|
|
|
|
|
57
|
|
|
|
|
|
|
my $job_id = $glacier->inititate_job( $vault, $archive_id ); |
58
|
|
|
|
|
|
|
|
59
|
|
|
|
|
|
|
# Jobs generally take about 4 hours to complete |
60
|
|
|
|
|
|
|
my $job_description = $glacier->describe_job( $vault, $job_id ); |
61
|
|
|
|
|
|
|
|
62
|
|
|
|
|
|
|
# For a better way to wait for completion, see |
63
|
|
|
|
|
|
|
# http://docs.aws.amazon.com/amazonglacier/latest/dev/api-initiate-job-post.html |
64
|
|
|
|
|
|
|
while ( $job_description->{'StatusCode'} ne 'Succeeded' ) { |
65
|
|
|
|
|
|
|
sleep 15 * 60 * 60; |
66
|
|
|
|
|
|
|
$job_description = $glacier->describe_job( $vault, $job_id ); |
67
|
|
|
|
|
|
|
} |
68
|
|
|
|
|
|
|
|
69
|
|
|
|
|
|
|
my $archive_bytes = $glacier->get_job_output( $vault, $job_id ); |
70
|
|
|
|
|
|
|
|
71
|
|
|
|
|
|
|
# Jobs live as completed jobs for "a period", according to |
72
|
|
|
|
|
|
|
# http://docs.aws.amazon.com/amazonglacier/latest/dev/api-jobs-get.html |
73
|
|
|
|
|
|
|
my @jobs = $glacier->list_jobs( $vault ); |
74
|
|
|
|
|
|
|
|
75
|
|
|
|
|
|
|
# As of 2013-02-09 jobs are blindly created even if a job for the same archive_id and Range exists. |
76
|
|
|
|
|
|
|
# Keep $archive_ids, reuse the expensive job resource, and remember 4 hours. |
77
|
|
|
|
|
|
|
foreach my $job ( @jobs ) { |
78
|
|
|
|
|
|
|
next unless $job->{ArchiveId} eq $archive_id; |
79
|
|
|
|
|
|
|
my $archive_bytes = $glacier->get_job_output( $vault, $job_id ); |
80
|
|
|
|
|
|
|
} |
81
|
|
|
|
|
|
|
|
82
|
|
|
|
|
|
|
} |
83
|
|
|
|
|
|
|
|
84
|
|
|
|
|
|
|
} |
85
|
|
|
|
|
|
|
|
86
|
|
|
|
|
|
|
The functions are intended to closely reflect Amazon's Glacier API. Please see |
87
|
|
|
|
|
|
|
Amazon's API reference for documentation of the functions: |
88
|
|
|
|
|
|
|
L. |
89
|
|
|
|
|
|
|
|
90
|
|
|
|
|
|
|
=head1 CONSTRUCTOR |
91
|
|
|
|
|
|
|
|
92
|
|
|
|
|
|
|
=head2 new( $region, $access_key_id, $secret ) |
93
|
|
|
|
|
|
|
|
94
|
|
|
|
|
|
|
=cut |
95
|
|
|
|
|
|
|
|
96
|
|
|
|
|
|
|
sub new { |
97
|
0
|
|
|
0
|
1
|
|
my ( $class, $region, $access_key_id, $secret ) = @_; |
98
|
|
|
|
|
|
|
|
99
|
0
|
0
|
|
|
|
|
croak "no region specified" unless $region; |
100
|
0
|
0
|
|
|
|
|
croak "no access key specified" unless $access_key_id; |
101
|
0
|
0
|
|
|
|
|
croak "no secret specified" unless $secret; |
102
|
|
|
|
|
|
|
|
103
|
0
|
|
|
|
|
|
my $self = { |
104
|
|
|
|
|
|
|
region => $region, |
105
|
|
|
|
|
|
|
# be well behaved and tell who we are |
106
|
|
|
|
|
|
|
ua => LWP::UserAgent->new( agent=> __PACKAGE__ . '/' . $VERSION ), |
107
|
|
|
|
|
|
|
sig => Net::Amazon::Signature::V4->new( $access_key_id, $secret, $region, 'glacier' ), |
108
|
|
|
|
|
|
|
}; |
109
|
0
|
|
|
|
|
|
return bless $self, $class; |
110
|
|
|
|
|
|
|
} |
111
|
|
|
|
|
|
|
|
112
|
|
|
|
|
|
|
=head1 VAULT OPERATORS |
113
|
|
|
|
|
|
|
|
114
|
|
|
|
|
|
|
=head2 create_vault( $vault_name ) |
115
|
|
|
|
|
|
|
|
116
|
|
|
|
|
|
|
Creates a vault with the specified name. Returns true on success, croaks on failure. |
117
|
|
|
|
|
|
|
L |
118
|
|
|
|
|
|
|
=cut |
119
|
|
|
|
|
|
|
|
120
|
|
|
|
|
|
|
sub create_vault { |
121
|
0
|
|
|
0
|
1
|
|
my ( $self, $vault_name ) = @_; |
122
|
|
|
|
|
|
|
|
123
|
0
|
0
|
|
|
|
|
croak "no vault name given" unless $vault_name; |
124
|
|
|
|
|
|
|
|
125
|
0
|
|
|
|
|
|
my $res = $self->_send_receive( PUT => "/-/vaults/$vault_name" ); |
126
|
|
|
|
|
|
|
|
127
|
|
|
|
|
|
|
# updated error severity |
128
|
0
|
0
|
|
|
|
|
croak 'describe_vault failed with error ' . $res->status_line |
129
|
|
|
|
|
|
|
unless $res->is_success; |
130
|
|
|
|
|
|
|
|
131
|
0
|
|
|
|
|
|
return 1; |
132
|
|
|
|
|
|
|
|
133
|
|
|
|
|
|
|
} |
134
|
|
|
|
|
|
|
|
135
|
|
|
|
|
|
|
=head2 delete_vault( $vault_name ) |
136
|
|
|
|
|
|
|
|
137
|
|
|
|
|
|
|
Deletes the specified vault. Returns true on success, croaks on failure. |
138
|
|
|
|
|
|
|
|
139
|
|
|
|
|
|
|
L |
140
|
|
|
|
|
|
|
=cut |
141
|
|
|
|
|
|
|
|
142
|
|
|
|
|
|
|
sub delete_vault { |
143
|
0
|
|
|
0
|
1
|
|
my ( $self, $vault_name ) = @_; |
144
|
|
|
|
|
|
|
|
145
|
0
|
0
|
|
|
|
|
croak "no vault name given" unless $vault_name; |
146
|
|
|
|
|
|
|
|
147
|
0
|
|
|
|
|
|
my $res = $self->_send_receive( DELETE => "/-/vaults/$vault_name" ); |
148
|
|
|
|
|
|
|
# updated error severity |
149
|
0
|
0
|
|
|
|
|
croak 'describe_vault failed with error ' . $res->status_line |
150
|
|
|
|
|
|
|
unless $res->is_success; |
151
|
|
|
|
|
|
|
|
152
|
0
|
|
|
|
|
|
return 1; |
153
|
|
|
|
|
|
|
} |
154
|
|
|
|
|
|
|
|
155
|
|
|
|
|
|
|
=head2 describe_vault( $vault_name ) |
156
|
|
|
|
|
|
|
|
157
|
|
|
|
|
|
|
Fetches information about the specified vault. |
158
|
|
|
|
|
|
|
|
159
|
|
|
|
|
|
|
Returns a hash reference with |
160
|
|
|
|
|
|
|
the keys described by L. |
161
|
|
|
|
|
|
|
|
162
|
|
|
|
|
|
|
Croaks on failure. |
163
|
|
|
|
|
|
|
|
164
|
|
|
|
|
|
|
L |
165
|
|
|
|
|
|
|
|
166
|
|
|
|
|
|
|
=cut |
167
|
|
|
|
|
|
|
|
168
|
|
|
|
|
|
|
sub describe_vault { |
169
|
0
|
|
|
0
|
1
|
|
my ( $self, $vault_name ) = @_; |
170
|
|
|
|
|
|
|
|
171
|
0
|
0
|
|
|
|
|
croak "no vault name given" unless $vault_name; |
172
|
|
|
|
|
|
|
|
173
|
0
|
|
|
|
|
|
my $res = $self->_send_receive( GET => "/-/vaults/$vault_name" ); |
174
|
|
|
|
|
|
|
# updated error severity |
175
|
0
|
0
|
|
|
|
|
croak 'describe_vault failed with error ' . $res->status_line unless $res->is_success; |
176
|
|
|
|
|
|
|
|
177
|
0
|
|
|
|
|
|
return $self->_decode_and_handle_response( $res ); |
178
|
|
|
|
|
|
|
} |
179
|
|
|
|
|
|
|
|
180
|
|
|
|
|
|
|
=head2 list_vaults |
181
|
|
|
|
|
|
|
|
182
|
|
|
|
|
|
|
Lists the vaults. Returns an array with all vaults. |
183
|
|
|
|
|
|
|
L. |
184
|
|
|
|
|
|
|
|
185
|
|
|
|
|
|
|
A call to list_vaults can result in many calls to the Amazon API at a rate |
186
|
|
|
|
|
|
|
of 1 per 1,000 vaults in existence. |
187
|
|
|
|
|
|
|
Calls to List Vaults in the API are L. |
188
|
|
|
|
|
|
|
|
189
|
|
|
|
|
|
|
Croaks on failure. |
190
|
|
|
|
|
|
|
|
191
|
|
|
|
|
|
|
=cut |
192
|
|
|
|
|
|
|
|
193
|
|
|
|
|
|
|
sub list_vaults { |
194
|
0
|
|
|
0
|
1
|
|
my ( $self ) = @_; |
195
|
0
|
|
|
|
|
|
my @vaults; |
196
|
|
|
|
|
|
|
|
197
|
|
|
|
|
|
|
my $marker; |
198
|
0
|
|
|
|
|
|
do { |
199
|
|
|
|
|
|
|
#1000 is the default limit, send a marker if needed |
200
|
0
|
0
|
|
|
|
|
my $res = $self->_send_receive( GET => "/-/vaults?limit=1000" . ($marker?'&'.$marker:'') ); |
201
|
|
|
|
|
|
|
# updated error severity |
202
|
0
|
0
|
|
|
|
|
croak 'list_vaults failed with error ' . $res->status_line unless $res->is_success; |
203
|
0
|
|
|
|
|
|
my $decoded = $self->_decode_and_handle_response( $res ); |
204
|
|
|
|
|
|
|
|
205
|
0
|
|
|
|
|
|
push @vaults, @{$decoded->{VaultList}}; |
|
0
|
|
|
|
|
|
|
206
|
0
|
|
|
|
|
|
$marker = $decoded->{Marker}; |
207
|
|
|
|
|
|
|
} while ( $marker ); |
208
|
|
|
|
|
|
|
|
209
|
0
|
|
|
|
|
|
return ( \@vaults ); |
210
|
|
|
|
|
|
|
} |
211
|
|
|
|
|
|
|
|
212
|
|
|
|
|
|
|
=head2 set_vault_notifications( $vault_name, $sns_topic, $events ) |
213
|
|
|
|
|
|
|
|
214
|
|
|
|
|
|
|
Sets vault notifications for a given vault. |
215
|
|
|
|
|
|
|
|
216
|
|
|
|
|
|
|
An SNS Topic to send notifications to must be provided. The SNS Topic must |
217
|
|
|
|
|
|
|
grant permission to the vault to be allowed to publish notifications to the topic. |
218
|
|
|
|
|
|
|
|
219
|
|
|
|
|
|
|
An array ref to a list of events must be provided. Valid events are |
220
|
|
|
|
|
|
|
ArchiveRetrievalCompleted and InventoryRetrievalCompleted |
221
|
|
|
|
|
|
|
|
222
|
|
|
|
|
|
|
Return true on success, croaks on failure. |
223
|
|
|
|
|
|
|
|
224
|
|
|
|
|
|
|
L. |
225
|
|
|
|
|
|
|
|
226
|
|
|
|
|
|
|
=cut |
227
|
|
|
|
|
|
|
|
228
|
|
|
|
|
|
|
sub set_vault_notifications { |
229
|
0
|
|
|
0
|
1
|
|
my ( $self, $vault_name, $sns_topic, $events ) = @_; |
230
|
|
|
|
|
|
|
|
231
|
0
|
0
|
|
|
|
|
croak "no vault name given" unless $vault_name; |
232
|
0
|
0
|
|
|
|
|
croak "no sns topic given" unless $sns_topic; |
233
|
0
|
0
|
|
|
|
|
croak "events should be an array ref" unless ref $events eq 'ARRAY'; |
234
|
|
|
|
|
|
|
|
235
|
0
|
|
|
|
|
|
my $content_raw; |
236
|
|
|
|
|
|
|
|
237
|
0
|
0
|
|
|
|
|
$content_raw->{SNSTopic} = $sns_topic |
238
|
|
|
|
|
|
|
if defined($sns_topic); |
239
|
|
|
|
|
|
|
|
240
|
0
|
0
|
|
|
|
|
$content_raw->{Events} = $events |
241
|
|
|
|
|
|
|
if defined($events); |
242
|
|
|
|
|
|
|
|
243
|
0
|
|
|
|
|
|
my $res = $self->_send_receive( |
244
|
|
|
|
|
|
|
PUT => "/-/vaults/$vault_name/notification-configuration", |
245
|
|
|
|
|
|
|
[ |
246
|
|
|
|
|
|
|
], |
247
|
|
|
|
|
|
|
encode_json($content_raw), |
248
|
|
|
|
|
|
|
); |
249
|
|
|
|
|
|
|
# updated error severity |
250
|
0
|
0
|
|
|
|
|
croak 'get_vault_notifications failed with error ' . $res->status_line |
251
|
|
|
|
|
|
|
unless $res->is_success; |
252
|
|
|
|
|
|
|
|
253
|
0
|
|
|
|
|
|
return 1; |
254
|
|
|
|
|
|
|
} |
255
|
|
|
|
|
|
|
|
256
|
|
|
|
|
|
|
=head2 get_vault_notifications( $vault_name ) |
257
|
|
|
|
|
|
|
|
258
|
|
|
|
|
|
|
Gets vault notifications status for a given vault. |
259
|
|
|
|
|
|
|
|
260
|
|
|
|
|
|
|
Returns a hash with an 'SNSTopic' and and array of 'Events' on success, croaks |
261
|
|
|
|
|
|
|
on failure. |
262
|
|
|
|
|
|
|
|
263
|
|
|
|
|
|
|
L. |
264
|
|
|
|
|
|
|
|
265
|
|
|
|
|
|
|
=cut |
266
|
|
|
|
|
|
|
|
267
|
|
|
|
|
|
|
sub get_vault_notifications { |
268
|
0
|
|
|
0
|
1
|
|
my ( $self, $vault_name, $sns_topic, $events ) = @_; |
269
|
|
|
|
|
|
|
|
270
|
0
|
0
|
|
|
|
|
croak "no vault name given" unless $vault_name; |
271
|
|
|
|
|
|
|
|
272
|
0
|
|
|
|
|
|
my $res = $self->_send_receive( |
273
|
|
|
|
|
|
|
PUT => "/-/vaults/$vault_name/notification-configuration", |
274
|
|
|
|
|
|
|
); |
275
|
|
|
|
|
|
|
# updated error severity |
276
|
0
|
0
|
|
|
|
|
croak 'get_vault_notifications failed with error ' . $res->status_line |
277
|
|
|
|
|
|
|
unless $res->is_success; |
278
|
|
|
|
|
|
|
|
279
|
0
|
|
|
|
|
|
return $self->_decode_and_handle_response( $res ); |
280
|
|
|
|
|
|
|
} |
281
|
|
|
|
|
|
|
|
282
|
|
|
|
|
|
|
=head2 delete_vault_notifications( $vault_name ) |
283
|
|
|
|
|
|
|
|
284
|
|
|
|
|
|
|
Deletes vault notifications for a given vault. |
285
|
|
|
|
|
|
|
|
286
|
|
|
|
|
|
|
Return true on success, croaks on failure. |
287
|
|
|
|
|
|
|
|
288
|
|
|
|
|
|
|
L. |
289
|
|
|
|
|
|
|
|
290
|
|
|
|
|
|
|
=cut |
291
|
|
|
|
|
|
|
|
292
|
|
|
|
|
|
|
sub delete_vault_notifications { |
293
|
0
|
|
|
0
|
1
|
|
my ( $self, $vault_name, $sns_topic, $events ) = @_; |
294
|
|
|
|
|
|
|
|
295
|
0
|
0
|
|
|
|
|
croak "no vault name given" unless $vault_name; |
296
|
|
|
|
|
|
|
|
297
|
0
|
|
|
|
|
|
my $res = $self->_send_receive( |
298
|
|
|
|
|
|
|
DELETE => "/-/vaults/$vault_name/notification-configuration", |
299
|
|
|
|
|
|
|
); |
300
|
|
|
|
|
|
|
# updated error severity |
301
|
0
|
0
|
|
|
|
|
croak 'delete_vault_notifications failed with error ' . $res->status_line |
302
|
|
|
|
|
|
|
unless $res->is_success; |
303
|
|
|
|
|
|
|
|
304
|
0
|
|
|
|
|
|
return 1; |
305
|
|
|
|
|
|
|
} |
306
|
|
|
|
|
|
|
|
307
|
|
|
|
|
|
|
=head1 ARCHIVE OPERATIONS |
308
|
|
|
|
|
|
|
|
309
|
|
|
|
|
|
|
=head2 upload_archive( $vault_name, $archive_path, [ $description ] ) |
310
|
|
|
|
|
|
|
|
311
|
|
|
|
|
|
|
Uploads an archive to the specified vault. $archive_path is the local path to |
312
|
|
|
|
|
|
|
any file smaller than 4GB. For larger files, see MULTIPART UPLOAD OPERATIONS. |
313
|
|
|
|
|
|
|
|
314
|
|
|
|
|
|
|
An archive description of up to 1024 printable ASCII characters can be supplied. |
315
|
|
|
|
|
|
|
|
316
|
|
|
|
|
|
|
Returns the Amazon-generated archive ID on success, or false on failure. |
317
|
|
|
|
|
|
|
|
318
|
|
|
|
|
|
|
L |
319
|
|
|
|
|
|
|
|
320
|
|
|
|
|
|
|
=cut |
321
|
|
|
|
|
|
|
|
322
|
|
|
|
|
|
|
sub upload_archive { |
323
|
0
|
|
|
0
|
1
|
|
my ( $self, $vault_name, $archive_path, $description ) = @_; |
324
|
|
|
|
|
|
|
|
325
|
0
|
0
|
|
|
|
|
croak "no vault name given" unless $vault_name; |
326
|
0
|
0
|
|
|
|
|
croak "no archive path given" unless $archive_path; |
327
|
0
|
0
|
|
|
|
|
croak 'archive path is not a file' unless -f $archive_path; |
328
|
|
|
|
|
|
|
|
329
|
0
|
|
0
|
|
|
|
$description //= ''; |
330
|
0
|
|
|
|
|
|
my $content = File::Slurp::read_file( $archive_path, err_mode => 'croak', binmode => ':raw', scalar_ref => 1 ); |
331
|
|
|
|
|
|
|
|
332
|
0
|
|
|
|
|
|
return $self->_do_upload($vault_name, $content, $description); |
333
|
|
|
|
|
|
|
} |
334
|
|
|
|
|
|
|
|
335
|
|
|
|
|
|
|
=head2 upload_archive_from_ref( $vault_name, $ref, [ $description ] ) |
336
|
|
|
|
|
|
|
|
337
|
|
|
|
|
|
|
DEPRECATED at birth. Will be dropped in next version. A more robust |
338
|
|
|
|
|
|
|
upload_archive will support file paths, refs, code refs, filehandles and more. |
339
|
|
|
|
|
|
|
|
340
|
|
|
|
|
|
|
In the meanwhile... |
341
|
|
|
|
|
|
|
|
342
|
|
|
|
|
|
|
Like upload_archive, but takes a reference to your data instead of the path to |
343
|
|
|
|
|
|
|
a file. For data greater than 4GB, see multi-part upload. An archive |
344
|
|
|
|
|
|
|
description of up to 1024 printable ASCII characters can be supplied. Returns |
345
|
|
|
|
|
|
|
the Amazon-generated archive ID on success, or false on failure. |
346
|
|
|
|
|
|
|
|
347
|
|
|
|
|
|
|
=cut |
348
|
|
|
|
|
|
|
|
349
|
|
|
|
|
|
|
sub upload_archive_from_ref { |
350
|
0
|
|
|
0
|
1
|
|
my ( $self, $vault_name, $ref, $description ) = @_; |
351
|
|
|
|
|
|
|
|
352
|
0
|
0
|
|
|
|
|
croak "no vault name given" unless $vault_name; |
353
|
0
|
0
|
|
|
|
|
croak "data must be a reference" unless ref $ref; |
354
|
|
|
|
|
|
|
|
355
|
0
|
|
|
|
|
|
return $self->_do_upload($vault_name, $ref, $description); |
356
|
|
|
|
|
|
|
} |
357
|
|
|
|
|
|
|
|
358
|
|
|
|
|
|
|
sub _do_upload { |
359
|
0
|
|
|
0
|
|
|
my ( $self, $vault_name, $content_ref, $description ) = @_; |
360
|
|
|
|
|
|
|
|
361
|
0
|
|
|
|
|
|
_enforce_description_limits( \$description ); |
362
|
|
|
|
|
|
|
|
363
|
0
|
|
|
|
|
|
my $th = Net::Amazon::TreeHash->new(); |
364
|
0
|
|
|
|
|
|
$th->eat_data ( $content_ref ); |
365
|
0
|
|
|
|
|
|
$th->calc_tree; |
366
|
|
|
|
|
|
|
|
367
|
0
|
|
|
|
|
|
my $res = $self->_send_receive( |
368
|
|
|
|
|
|
|
POST => "/-/vaults/$vault_name/archives", |
369
|
|
|
|
|
|
|
[ |
370
|
|
|
|
|
|
|
'x-amz-archive-description' => $description, |
371
|
|
|
|
|
|
|
'x-amz-sha256-tree-hash' => $th->get_final_hash(), |
372
|
|
|
|
|
|
|
'x-amz-content-sha256' => Digest::SHA::sha256_hex( $$content_ref ), |
373
|
|
|
|
|
|
|
], |
374
|
|
|
|
|
|
|
$$content_ref |
375
|
|
|
|
|
|
|
); |
376
|
0
|
0
|
|
|
|
|
croak 'upload_archive failed with error ' . $res->status_line unless $res->is_success; |
377
|
|
|
|
|
|
|
|
378
|
0
|
|
|
|
|
|
my $rec_archive_id; |
379
|
0
|
0
|
|
|
|
|
unless ( $res->header('location') =~ m{^/[^/]+/vaults/[^/]+/archives/(.*)$} ) { |
380
|
|
|
|
|
|
|
# update severity of error. This method must return an archive id |
381
|
0
|
|
|
|
|
|
croak 'request succeeded, but reported archive location does not match regex: ' . $res->header('location'); |
382
|
|
|
|
|
|
|
} else { |
383
|
0
|
|
|
|
|
|
$rec_archive_id = $1; |
384
|
|
|
|
|
|
|
} |
385
|
|
|
|
|
|
|
|
386
|
0
|
|
|
|
|
|
return $rec_archive_id; |
387
|
|
|
|
|
|
|
} |
388
|
|
|
|
|
|
|
|
389
|
|
|
|
|
|
|
=head2 delete_archive( $vault_name, $archive_id ) |
390
|
|
|
|
|
|
|
|
391
|
|
|
|
|
|
|
Issues a request to delete a file from Glacier. $archive_id is the ID you |
392
|
|
|
|
|
|
|
received either when you uploaded the file originally or from an inventory. |
393
|
|
|
|
|
|
|
L |
394
|
|
|
|
|
|
|
|
395
|
|
|
|
|
|
|
=cut |
396
|
|
|
|
|
|
|
|
397
|
|
|
|
|
|
|
sub delete_archive { |
398
|
0
|
|
|
0
|
1
|
|
my ( $self, $vault_name, $archive_id ) = @_; |
399
|
|
|
|
|
|
|
|
400
|
0
|
0
|
|
|
|
|
croak "no vault name given" unless $vault_name; |
401
|
0
|
0
|
|
|
|
|
croak "no archive ID given" unless $archive_id; |
402
|
|
|
|
|
|
|
|
403
|
0
|
|
|
|
|
|
my $res = $self->_send_receive( DELETE => "/-/vaults/$vault_name/archives/$archive_id" ); |
404
|
|
|
|
|
|
|
# updated error severity |
405
|
0
|
0
|
|
|
|
|
croak 'delete_archive failed with error ' . $res->status_line unless $res->is_success; |
406
|
|
|
|
|
|
|
|
407
|
0
|
|
|
|
|
|
return $res->is_success; |
408
|
|
|
|
|
|
|
} |
409
|
|
|
|
|
|
|
|
410
|
|
|
|
|
|
|
=head1 MULTIPART UPLOAD OPERATIONS |
411
|
|
|
|
|
|
|
|
412
|
|
|
|
|
|
|
Amazon requires this method for files larger than 4GB, and recommends it for |
413
|
|
|
|
|
|
|
files larger than 100MB. |
414
|
|
|
|
|
|
|
|
415
|
|
|
|
|
|
|
L |
416
|
|
|
|
|
|
|
|
417
|
|
|
|
|
|
|
=head2 SYNOPSIS |
418
|
|
|
|
|
|
|
|
419
|
|
|
|
|
|
|
use Net::Amazon::Glacier; |
420
|
|
|
|
|
|
|
|
421
|
|
|
|
|
|
|
my $glacier = Net::Amazon::Glacier->new( |
422
|
|
|
|
|
|
|
'eu-west-1', |
423
|
|
|
|
|
|
|
'AKIMYACCOUNTID', |
424
|
|
|
|
|
|
|
'MYSECRET', |
425
|
|
|
|
|
|
|
); |
426
|
|
|
|
|
|
|
|
427
|
|
|
|
|
|
|
my $part_size = $glacier->calculate_multipart_upload_partsize( -s $filename ); |
428
|
|
|
|
|
|
|
|
429
|
|
|
|
|
|
|
my $upload_id = $glacier->multipart_upload_init( $vault, $part_size, $description ); |
430
|
|
|
|
|
|
|
|
431
|
|
|
|
|
|
|
open ( A_FILE, '<', 'a_file.bin' ); |
432
|
|
|
|
|
|
|
|
433
|
|
|
|
|
|
|
my $part_index = 0; |
434
|
|
|
|
|
|
|
my $read_bytes; |
435
|
|
|
|
|
|
|
my $parts_hash = []; # to store partial tree hash for complete method |
436
|
|
|
|
|
|
|
|
437
|
|
|
|
|
|
|
# Upload parts of A_FILE |
438
|
|
|
|
|
|
|
do { |
439
|
|
|
|
|
|
|
$read_bytes = read ( A_FILE, $part, $part_size ); |
440
|
|
|
|
|
|
|
$parts_hash->[$part_index] = $glacier->multipart_upload_upload_part( $vault, $upload_id, $part_size, $part_index, \$part ); |
441
|
|
|
|
|
|
|
} while ( ( $read_bytes == $part_size) && $parts_hash->[$part_index++] =~ /^[0-9a-f]{64}$/ ); |
442
|
|
|
|
|
|
|
close ( A_FILE ); |
443
|
|
|
|
|
|
|
|
444
|
|
|
|
|
|
|
my $archive_size = $part_size * ( $part_index ) + $read_bytes; |
445
|
|
|
|
|
|
|
|
446
|
|
|
|
|
|
|
# Capture archive id or error code |
447
|
|
|
|
|
|
|
my $archive_id = $glacier->multipart_upload_complete( $vault, $upload_id, $parts_hash, $archive_size ); |
448
|
|
|
|
|
|
|
|
449
|
|
|
|
|
|
|
# Check if we have a valid $archive_id |
450
|
|
|
|
|
|
|
unless ( $archive_id =~ /^[a-zA-Z0-9_\-]{10,}$/ ) { |
451
|
|
|
|
|
|
|
# abort partial failed upload |
452
|
|
|
|
|
|
|
# could also store upload_id and continue later |
453
|
|
|
|
|
|
|
$glacier->multipart_upload_abort( $vault, $upload_id ); |
454
|
|
|
|
|
|
|
} |
455
|
|
|
|
|
|
|
|
456
|
|
|
|
|
|
|
# Other useful methods |
457
|
|
|
|
|
|
|
# Get an array ref with incomplete multipart uploads |
458
|
|
|
|
|
|
|
my $upload_list = $glacier->multipart_upload_list_uploads( $vault ); |
459
|
|
|
|
|
|
|
|
460
|
|
|
|
|
|
|
# Get an array ref with uploaded parts for a multipart upload |
461
|
|
|
|
|
|
|
my $upload_parts = $glacier->multipart_upload_list_parts( $vault, $upload_id ); |
462
|
|
|
|
|
|
|
|
463
|
|
|
|
|
|
|
=head2 calculate_multipart_upload_partsize ( $archive_size ) |
464
|
|
|
|
|
|
|
|
465
|
|
|
|
|
|
|
Calculates the part size that would allow to uploading files of $archive_size |
466
|
|
|
|
|
|
|
|
467
|
|
|
|
|
|
|
$archive_size is the maximum expected archive size |
468
|
|
|
|
|
|
|
|
469
|
|
|
|
|
|
|
Returns the smallest possible part size to upload an archive of |
470
|
|
|
|
|
|
|
size $archive_size, 0 when files cannot be uploaded in parts (i.e. >39Tb) |
471
|
|
|
|
|
|
|
|
472
|
|
|
|
|
|
|
=cut |
473
|
|
|
|
|
|
|
|
474
|
|
|
|
|
|
|
sub calculate_multipart_upload_partsize { |
475
|
0
|
|
|
0
|
1
|
|
my ( $self, $archive_size ) = @_; |
476
|
|
|
|
|
|
|
|
477
|
|
|
|
|
|
|
# get the size of a part if uploaded in the maximum possible parts in MiB |
478
|
0
|
|
|
|
|
|
my $part_size = ( $archive_size - 1) / 10000; |
479
|
|
|
|
|
|
|
|
480
|
|
|
|
|
|
|
# the smallest power of 2 that fits this amount of MiB |
481
|
0
|
|
|
|
|
|
my $part_size_MiB_rounded = 2**(int(log($part_size)/log(2))+1); |
482
|
|
|
|
|
|
|
|
483
|
|
|
|
|
|
|
# range check response for minimum and maximum API limits |
484
|
0
|
0
|
|
|
|
|
if ( $part_size_MiB_rounded < 1024 * 1024 ) { |
|
|
0
|
|
|
|
|
|
485
|
|
|
|
|
|
|
# part size must be at least 1MiB |
486
|
0
|
|
|
|
|
|
return 1024 * 1024; |
487
|
|
|
|
|
|
|
} elsif ( $part_size_MiB_rounded > 4 * 1024 * 1024 * 1024 ) { |
488
|
|
|
|
|
|
|
# updated error severity |
489
|
0
|
|
|
|
|
|
croak 'part size must not exceed 4GiB, this file size is not uploadable'; |
490
|
|
|
|
|
|
|
} else { |
491
|
0
|
|
|
|
|
|
return $part_size_MiB_rounded; |
492
|
|
|
|
|
|
|
} |
493
|
|
|
|
|
|
|
} |
494
|
|
|
|
|
|
|
|
495
|
|
|
|
|
|
|
=head2 multipart_upload_init( $vault_name, $part_size, [ $description ] ) |
496
|
|
|
|
|
|
|
|
497
|
|
|
|
|
|
|
Initiates a multipart upload. |
498
|
|
|
|
|
|
|
$part_size should be carefully calculated to avoid dead ends as documented in |
499
|
|
|
|
|
|
|
the API. Use calculate_multipart_upload_partsize. |
500
|
|
|
|
|
|
|
|
501
|
|
|
|
|
|
|
Returns a multipart upload id that should be used while adding parts to the |
502
|
|
|
|
|
|
|
online archive that is being constructed. |
503
|
|
|
|
|
|
|
|
504
|
|
|
|
|
|
|
Multipart upload ids are valid until multipart_upload_abort is called or 24 |
505
|
|
|
|
|
|
|
hours after last archive related activity is registered. After that period id |
506
|
|
|
|
|
|
|
validity should not be expected. |
507
|
|
|
|
|
|
|
|
508
|
|
|
|
|
|
|
L. |
509
|
|
|
|
|
|
|
|
510
|
|
|
|
|
|
|
=cut |
511
|
|
|
|
|
|
|
|
512
|
|
|
|
|
|
|
sub multipart_upload_init { |
513
|
0
|
|
|
0
|
1
|
|
my ( $self, $vault_name, $part_size, $description) = @_; |
514
|
|
|
|
|
|
|
|
515
|
0
|
0
|
|
|
|
|
croak "no vault name given" unless $vault_name; |
516
|
0
|
0
|
|
|
|
|
croak "no part size given" unless $part_size; |
517
|
0
|
0
|
0
|
|
|
|
croak "parameter number mismatch" unless @_ == 3 || @_ == 4; |
518
|
|
|
|
|
|
|
|
519
|
0
|
|
|
|
|
|
_enforce_description_limits( \$description ); |
520
|
|
|
|
|
|
|
|
521
|
0
|
|
|
|
|
|
my $multipart_upload_id; |
522
|
|
|
|
|
|
|
|
523
|
0
|
|
|
|
|
|
my $res = $self->_send_receive( |
524
|
|
|
|
|
|
|
POST => "/-/vaults/$vault_name/multipart-uploads", |
525
|
|
|
|
|
|
|
[ |
526
|
|
|
|
|
|
|
'x-amz-archive-description' => $description, |
527
|
|
|
|
|
|
|
'x-amz-part-size' => $part_size, |
528
|
|
|
|
|
|
|
], |
529
|
|
|
|
|
|
|
); |
530
|
|
|
|
|
|
|
# updated error severity |
531
|
0
|
0
|
|
|
|
|
croak 'multipart_upload_init failed with error ' . $res->status_line unless $res->is_success; |
532
|
|
|
|
|
|
|
|
533
|
0
|
|
|
|
|
|
$multipart_upload_id = $res->header('x-amz-multipart-upload-id'); |
534
|
|
|
|
|
|
|
|
535
|
|
|
|
|
|
|
# double check the webservice speaks the same language |
536
|
|
|
|
|
|
|
# updated error severity |
537
|
0
|
0
|
|
|
|
|
croak 'request succeeded, but no multipart upload id was returned' unless ( $multipart_upload_id ); |
538
|
|
|
|
|
|
|
|
539
|
0
|
|
|
|
|
|
return $multipart_upload_id; |
540
|
|
|
|
|
|
|
} |
541
|
|
|
|
|
|
|
|
542
|
|
|
|
|
|
|
=head2 multipart_upload_upload_part( $vault_name, $multipart_upload_id, $part_size, $part_index, $part ) |
543
|
|
|
|
|
|
|
|
544
|
|
|
|
|
|
|
Uploads a certain range of a multipart upload. |
545
|
|
|
|
|
|
|
|
546
|
|
|
|
|
|
|
$part_size must be the same supplied to multipart_upload_init for a given |
547
|
|
|
|
|
|
|
multipart upload. |
548
|
|
|
|
|
|
|
|
549
|
|
|
|
|
|
|
$part_index should be the index of a file of N $part_size chunks whose data is |
550
|
|
|
|
|
|
|
passed in $part. |
551
|
|
|
|
|
|
|
|
552
|
|
|
|
|
|
|
$part can must be a reference to a string or be a filehandle and must be exactly |
553
|
|
|
|
|
|
|
the part_size supplied to multipart_upload_initiate unless it is the last past |
554
|
|
|
|
|
|
|
which can be any non-zero size. |
555
|
|
|
|
|
|
|
|
556
|
|
|
|
|
|
|
Absolute maximum online archive size is 4GB*10000 or slightly over 39Tb. |
557
|
|
|
|
|
|
|
L |
558
|
|
|
|
|
|
|
|
559
|
|
|
|
|
|
|
Returns uploaded part tree-hash (which should be store in an array ref to be |
560
|
|
|
|
|
|
|
passed to multipart_upload_complete |
561
|
|
|
|
|
|
|
|
562
|
|
|
|
|
|
|
L. |
563
|
|
|
|
|
|
|
|
564
|
|
|
|
|
|
|
=cut |
565
|
|
|
|
|
|
|
|
566
|
|
|
|
|
|
|
sub multipart_upload_upload_part { |
567
|
0
|
|
|
0
|
1
|
|
my ( $self, $vault_name, $multipart_upload_id, $part_size, $part_index, $part ) = @_; |
568
|
|
|
|
|
|
|
|
569
|
0
|
0
|
|
|
|
|
croak "no vault name given" unless $vault_name; |
570
|
0
|
0
|
|
|
|
|
croak "no multipart upload id given" unless $multipart_upload_id; |
571
|
0
|
0
|
|
|
|
|
croak "parameter number mismatch" unless @_ == 6; |
572
|
|
|
|
|
|
|
|
573
|
|
|
|
|
|
|
# identify $part as filehandle or string and get content |
574
|
0
|
|
|
|
|
|
my $content = ''; |
575
|
|
|
|
|
|
|
|
576
|
0
|
0
|
|
|
|
|
if ( ref $part eq 'SCALAR' ) { |
577
|
|
|
|
|
|
|
# keep scalar reference |
578
|
0
|
|
|
|
|
|
$content = $part; |
579
|
0
|
0
|
|
|
|
|
croak "no data supplied" unless length $$content; |
580
|
|
|
|
|
|
|
} else { |
581
|
|
|
|
|
|
|
#try to read any other content as supported by File::Slurp |
582
|
0
|
|
|
|
|
|
eval { |
583
|
0
|
|
|
|
|
|
$content = File::Slurp::read_file( $part, bin_mode => ':raw', err_mode => 'carp', scalar_ref => 1 ); |
584
|
|
|
|
|
|
|
}; |
585
|
0
|
0
|
|
|
|
|
croak "\$part interpreted as file (GLOB, IO::Handle/File) but error occured while reading: $@" if ( $@ ); |
586
|
|
|
|
|
|
|
|
587
|
0
|
0
|
|
|
|
|
croak "no data read from file" unless length $$content; |
588
|
|
|
|
|
|
|
} |
589
|
|
|
|
|
|
|
|
590
|
0
|
|
|
|
|
|
my $upload_part_size = length $$content; |
591
|
|
|
|
|
|
|
|
592
|
|
|
|
|
|
|
# compute part hash |
593
|
0
|
|
|
|
|
|
my $th = Net::Amazon::TreeHash->new(); |
594
|
|
|
|
|
|
|
|
595
|
0
|
|
|
|
|
|
$th->eat_data( $content ); |
596
|
|
|
|
|
|
|
|
597
|
0
|
|
|
|
|
|
$th->calc_tree(); |
598
|
|
|
|
|
|
|
|
599
|
|
|
|
|
|
|
# range end must not be ( $part_size * ( $part_index + 1 ) - 1 ) or last part |
600
|
|
|
|
|
|
|
# will fail. |
601
|
0
|
|
|
|
|
|
my $res = $self->_send_receive( |
602
|
|
|
|
|
|
|
PUT => "/-/vaults/$vault_name/multipart-uploads/$multipart_upload_id", |
603
|
|
|
|
|
|
|
[ |
604
|
|
|
|
|
|
|
'Content-Range' => 'bytes ' . ( $part_size * $part_index ) . '-' . ( ( $part_size * $part_index ) + $upload_part_size - 1 ) . '/*', |
605
|
|
|
|
|
|
|
'Content-Length' => $upload_part_size, |
606
|
|
|
|
|
|
|
'Content-Type' => 'application/octet-stream', |
607
|
|
|
|
|
|
|
'x-amz-sha256-tree-hash' => $th->get_final_hash(), |
608
|
|
|
|
|
|
|
'x-amz-content-sha256' => Digest::SHA::sha256_hex( $$content ), |
609
|
|
|
|
|
|
|
# documentation seems to suggest x-amz-content-sha256 may not be needed but it is! |
610
|
|
|
|
|
|
|
], |
611
|
|
|
|
|
|
|
$$content |
612
|
|
|
|
|
|
|
); |
613
|
|
|
|
|
|
|
# updated error severity |
614
|
0
|
0
|
|
|
|
|
croak 'multipart_upload_upload_part failed with error ' . $res->status_line unless $res->is_success; |
615
|
|
|
|
|
|
|
|
616
|
|
|
|
|
|
|
# check glacier tree-hash = local tree-hash |
617
|
|
|
|
|
|
|
# updated error severity; multipart upload id must be returned |
618
|
0
|
0
|
|
|
|
|
croak 'request succeeded, but reported and computed tree-hash for part do not match' unless ( $th->get_final_hash() eq $res->header('x-amz-sha256-tree-hash') ); |
619
|
|
|
|
|
|
|
# return computed tree-hash for this part |
620
|
0
|
|
|
|
|
|
return $res->header('x-amz-sha256-tree-hash'); |
621
|
|
|
|
|
|
|
} |
622
|
|
|
|
|
|
|
|
623
|
|
|
|
|
|
|
=head2 multipart_upload_complete( $vault_name, $multipart_upload_id, $tree_hash_array_ref, $archive_size ) |
624
|
|
|
|
|
|
|
|
625
|
|
|
|
|
|
|
Signals completion of multipart upload. |
626
|
|
|
|
|
|
|
|
627
|
|
|
|
|
|
|
$tree_hash_array_ref must be an ordered list (same order as final assembled online |
628
|
|
|
|
|
|
|
archive, as opposed to upload order) of partial tree hashes as returned by |
629
|
|
|
|
|
|
|
multipart_upload_upload_part |
630
|
|
|
|
|
|
|
|
631
|
|
|
|
|
|
|
$archive_size is provided at completion to check all parts make up an archive an |
632
|
|
|
|
|
|
|
not before hand to allow for archive streaming a.k.a. upload archives of unknown |
633
|
|
|
|
|
|
|
size. Beware of dead ends when choosing part size. Use |
634
|
|
|
|
|
|
|
calculate_multipart_upload_partsize to select a part size that will work. |
635
|
|
|
|
|
|
|
|
636
|
|
|
|
|
|
|
Returns an archive id that can be used to request a job to retrieve the archive |
637
|
|
|
|
|
|
|
at a later time on success and 0 on failure. |
638
|
|
|
|
|
|
|
|
639
|
|
|
|
|
|
|
On failure multipart_upload_list_parts could be used to determine the missing |
640
|
|
|
|
|
|
|
part or recover the partial tree hashes, complete the missing parts and |
641
|
|
|
|
|
|
|
recalculate the correct archive tree hash and call multipart_upload_complete |
642
|
|
|
|
|
|
|
with a successful result. |
643
|
|
|
|
|
|
|
|
644
|
|
|
|
|
|
|
L. |
645
|
|
|
|
|
|
|
|
646
|
|
|
|
|
|
|
=cut |
647
|
|
|
|
|
|
|
|
648
|
|
|
|
|
|
|
sub multipart_upload_complete { |
649
|
0
|
|
|
0
|
1
|
|
my ( $self, $vault_name, $multipart_upload_id, $tree_hash_array_ref, $archive_size ) = @_; |
650
|
|
|
|
|
|
|
|
651
|
0
|
0
|
|
|
|
|
croak "no vault name given" unless $vault_name; |
652
|
0
|
0
|
|
|
|
|
croak "no multipart upload id given" unless $multipart_upload_id; |
653
|
0
|
0
|
|
|
|
|
croak "no tree hash object given" unless ref $tree_hash_array_ref eq 'ARRAY'; |
654
|
0
|
0
|
|
|
|
|
croak "parameter number mismatch" unless @_ == 5; |
655
|
|
|
|
|
|
|
|
656
|
0
|
|
|
|
|
|
my $archive_tree_hash = $self->_tree_hash_from_array_ref( $tree_hash_array_ref ); |
657
|
|
|
|
|
|
|
|
658
|
0
|
|
|
|
|
|
my $res = $self->_send_receive( |
659
|
|
|
|
|
|
|
POST => "/-/vaults/$vault_name/multipart-uploads/$multipart_upload_id", |
660
|
|
|
|
|
|
|
[ |
661
|
|
|
|
|
|
|
'x-amz-sha256-tree-hash' => $archive_tree_hash , |
662
|
|
|
|
|
|
|
'x-amz-archive-size' => $archive_size, |
663
|
|
|
|
|
|
|
], |
664
|
|
|
|
|
|
|
); |
665
|
|
|
|
|
|
|
# updated error severity |
666
|
0
|
0
|
|
|
|
|
croak 'multipart_upload_complete failed with error ' . $res->status_line unless $res->is_success; |
667
|
|
|
|
|
|
|
|
668
|
0
|
|
|
|
|
|
my $rec_archive_id; |
669
|
0
|
0
|
|
|
|
|
unless ( $res->header('location') =~ m{^/[^/]+/vaults/[^/]+/archives/(.*)$} ) { |
670
|
|
|
|
|
|
|
# update severity of error. This method must return an archive id |
671
|
0
|
|
|
|
|
|
croak 'request succeeded, but reported archive location does not match regex: ' . $res->header('location'); |
672
|
|
|
|
|
|
|
} else { |
673
|
0
|
|
|
|
|
|
$rec_archive_id = $1; |
674
|
|
|
|
|
|
|
} |
675
|
|
|
|
|
|
|
|
676
|
0
|
|
|
|
|
|
return $rec_archive_id; |
677
|
|
|
|
|
|
|
} |
678
|
|
|
|
|
|
|
|
679
|
|
|
|
|
|
|
=head2 multipart_upload_abort( $vault_name, $multipart_upload_id ) |
680
|
|
|
|
|
|
|
|
681
|
|
|
|
|
|
|
Aborts multipart upload releasing the id and related online resources of |
682
|
|
|
|
|
|
|
a partially uploaded archive. |
683
|
|
|
|
|
|
|
|
684
|
|
|
|
|
|
|
L. |
685
|
|
|
|
|
|
|
|
686
|
|
|
|
|
|
|
=cut |
687
|
|
|
|
|
|
|
|
688
|
|
|
|
|
|
|
sub multipart_upload_abort { |
689
|
0
|
|
|
0
|
1
|
|
my ( $self, $vault_name, $multipart_upload_id ) = @_; |
690
|
|
|
|
|
|
|
|
691
|
0
|
0
|
|
|
|
|
croak "no vault name given" unless $vault_name; |
692
|
0
|
0
|
|
|
|
|
croak "no multipart_upload_id given" unless $multipart_upload_id; |
693
|
0
|
0
|
|
|
|
|
croak "parameter number mismatch" unless @_ == 3; |
694
|
|
|
|
|
|
|
|
695
|
0
|
|
|
|
|
|
my $res = $self->_send_receive( |
696
|
|
|
|
|
|
|
DELETE => "/-/vaults/$vault_name/multipart-uploads/$multipart_upload_id", |
697
|
|
|
|
|
|
|
); |
698
|
|
|
|
|
|
|
# updated error severity |
699
|
0
|
0
|
|
|
|
|
croak 'multipart_upload_abort failed with error ' . $res->status_line unless $res->is_success; |
700
|
|
|
|
|
|
|
|
701
|
|
|
|
|
|
|
# double check the webservice speaks the same language |
702
|
|
|
|
|
|
|
# updated error severity |
703
|
0
|
0
|
|
|
|
|
croak 'request returned an invalid code' unless ( $res->code == 204 ); |
704
|
|
|
|
|
|
|
|
705
|
0
|
|
|
|
|
|
return $res->is_success; |
706
|
|
|
|
|
|
|
} |
707
|
|
|
|
|
|
|
|
708
|
|
|
|
|
|
|
=head2 multipart_upload_list_parts ( $vault_name, $multipart_upload_id ) |
709
|
|
|
|
|
|
|
|
710
|
|
|
|
|
|
|
Returns an array ref with information on all uploaded parts of the, probably |
711
|
|
|
|
|
|
|
partially uploaded, online archive. |
712
|
|
|
|
|
|
|
|
713
|
|
|
|
|
|
|
Useful to recover file part tree hashes and complete a broken multipart upload. |
714
|
|
|
|
|
|
|
|
715
|
|
|
|
|
|
|
L |
716
|
|
|
|
|
|
|
|
717
|
|
|
|
|
|
|
A call to multipart_upload_part_list can result in many calls to the |
718
|
|
|
|
|
|
|
Amazon API at a rate of 1 per 1,000 recently completed job in existence. |
719
|
|
|
|
|
|
|
Calls to List Parts in the API are L. |
720
|
|
|
|
|
|
|
|
721
|
|
|
|
|
|
|
=cut |
722
|
|
|
|
|
|
|
|
723
|
|
|
|
|
|
|
sub multipart_upload_list_parts { |
724
|
0
|
|
|
0
|
1
|
|
my ( $self, $vault_name, $multipart_upload_id ) = @_; |
725
|
|
|
|
|
|
|
|
726
|
0
|
0
|
|
|
|
|
croak "no vault name given" unless $vault_name; |
727
|
0
|
0
|
|
|
|
|
croak "no multipart_upload_id given" unless $multipart_upload_id; |
728
|
0
|
0
|
|
|
|
|
croak "parameter number mismatch" unless @_ == 3; |
729
|
|
|
|
|
|
|
|
730
|
0
|
|
|
|
|
|
my @upload_part_list; |
731
|
|
|
|
|
|
|
|
732
|
|
|
|
|
|
|
my $marker; |
733
|
0
|
|
|
|
|
|
do { |
734
|
|
|
|
|
|
|
#1000 is the default limit, send a marker if needed |
735
|
0
|
0
|
|
|
|
|
my $res = $self->_send_receive( GET => "/-/vaults/$vault_name/multipart-uploads/$multipart_upload_id?limit=1000" . ($marker?'&'.$marker:'') ); |
736
|
|
|
|
|
|
|
# updated error severity |
737
|
0
|
0
|
|
|
|
|
croak 'multipart_upload_list_parts failed with error ' . $res->status_line unless $res->is_success; |
738
|
0
|
|
|
|
|
|
my $decoded = $self->_decode_and_handle_response( $res ); |
739
|
|
|
|
|
|
|
|
740
|
0
|
|
|
|
|
|
push @upload_part_list, @{$decoded->{Parts}}; |
|
0
|
|
|
|
|
|
|
741
|
0
|
|
|
|
|
|
$marker = $decoded->{Marker}; |
742
|
|
|
|
|
|
|
} while ( $marker ); |
743
|
|
|
|
|
|
|
|
744
|
0
|
|
|
|
|
|
return \@upload_part_list; |
745
|
|
|
|
|
|
|
} |
746
|
|
|
|
|
|
|
|
747
|
|
|
|
|
|
|
=head2 multipart_upload_list_uploads( $vault_name ) |
748
|
|
|
|
|
|
|
|
749
|
|
|
|
|
|
|
Returns an array ref with information on all non completed multipart uploads. |
750
|
|
|
|
|
|
|
Useful to recover multipart upload ids. |
751
|
|
|
|
|
|
|
L |
752
|
|
|
|
|
|
|
|
753
|
|
|
|
|
|
|
A call to multipart_upload_list can result in many calls to the Amazon API |
754
|
|
|
|
|
|
|
at a rate of 1 per 1,000 recently completed job in existence. |
755
|
|
|
|
|
|
|
Calls to List Multipart Uploads in the API are L. |
756
|
|
|
|
|
|
|
|
757
|
|
|
|
|
|
|
=cut |
758
|
|
|
|
|
|
|
|
759
|
|
|
|
|
|
|
sub multipart_upload_list_uploads { |
760
|
0
|
|
|
0
|
1
|
|
my ( $self, $vault_name ) = @_; |
761
|
|
|
|
|
|
|
|
762
|
0
|
0
|
|
|
|
|
croak "no vault name given" unless $vault_name; |
763
|
0
|
0
|
|
|
|
|
croak "parameter number mismatch" unless @_ == 2; |
764
|
|
|
|
|
|
|
|
765
|
0
|
|
|
|
|
|
my @upload_list; |
766
|
|
|
|
|
|
|
|
767
|
|
|
|
|
|
|
my $marker; |
768
|
0
|
|
|
|
|
|
do { |
769
|
|
|
|
|
|
|
#1000 is the default limit, send a marker if needed |
770
|
0
|
0
|
|
|
|
|
my $res = $self->_send_receive( GET => "/-/vaults/$vault_name/multipart-uploads?limit=1000" . ($marker?'&'.$marker:'') ); |
771
|
|
|
|
|
|
|
# updated error severity |
772
|
0
|
0
|
|
|
|
|
croak 'multipart_upload_list_uploads failed with error ' . $res->status_line unless $res->is_success; |
773
|
0
|
|
|
|
|
|
my $decoded = $self->_decode_and_handle_response( $res ); |
774
|
|
|
|
|
|
|
|
775
|
0
|
|
|
|
|
|
push @upload_list, @{$decoded->{UploadsList}}; |
|
0
|
|
|
|
|
|
|
776
|
0
|
|
|
|
|
|
$marker = $decoded->{Marker}; |
777
|
|
|
|
|
|
|
} while ( $marker ); |
778
|
|
|
|
|
|
|
|
779
|
0
|
|
|
|
|
|
return \@upload_list; |
780
|
|
|
|
|
|
|
} |
781
|
|
|
|
|
|
|
|
782
|
|
|
|
|
|
|
=head1 JOB OPERATIONS |
783
|
|
|
|
|
|
|
|
784
|
|
|
|
|
|
|
=head2 initiate_archive_retrieval( $vault_name, $archive_id, [ |
785
|
|
|
|
|
|
|
$description, $sns_topic ] ) |
786
|
|
|
|
|
|
|
|
787
|
|
|
|
|
|
|
Initiates an archive retrieval job. $archive_id is an ID previously |
788
|
|
|
|
|
|
|
retrieved from Amazon Glacier. |
789
|
|
|
|
|
|
|
|
790
|
|
|
|
|
|
|
A job description of up to 1,024 printable ASCII characters may be supplied. |
791
|
|
|
|
|
|
|
Net::Amazon::Glacier does it's best to enforce this restriction. When unsure |
792
|
|
|
|
|
|
|
send the string and look for Carp. |
793
|
|
|
|
|
|
|
|
794
|
|
|
|
|
|
|
An SNS Topic to send notifications to upon job completion may also be supplied. |
795
|
|
|
|
|
|
|
|
796
|
|
|
|
|
|
|
L. |
797
|
|
|
|
|
|
|
|
798
|
|
|
|
|
|
|
=cut |
799
|
|
|
|
|
|
|
|
800
|
|
|
|
|
|
|
sub initiate_archive_retrieval { |
801
|
0
|
|
|
0
|
1
|
|
my ( $self, $vault_name, $archive_id, $description, $sns_topic ) = @_; |
802
|
|
|
|
|
|
|
|
803
|
0
|
0
|
|
|
|
|
croak "no vault name given" unless $vault_name; |
804
|
0
|
0
|
|
|
|
|
croak "no archive id given" unless $archive_id; |
805
|
|
|
|
|
|
|
|
806
|
0
|
|
|
|
|
|
my $content_raw = { |
807
|
|
|
|
|
|
|
Type => 'archive-retrieval', |
808
|
|
|
|
|
|
|
ArchiveId => $archive_id, |
809
|
|
|
|
|
|
|
}; |
810
|
|
|
|
|
|
|
|
811
|
0
|
0
|
|
|
|
|
if ( defined $description ) { |
812
|
0
|
|
|
|
|
|
_enforce_description_limits( \$description ); |
813
|
0
|
|
|
|
|
|
$content_raw->{Description} = $description; |
814
|
|
|
|
|
|
|
} |
815
|
|
|
|
|
|
|
|
816
|
0
|
0
|
|
|
|
|
$content_raw->{SNSTopic} = $sns_topic |
817
|
|
|
|
|
|
|
if defined($sns_topic); |
818
|
|
|
|
|
|
|
|
819
|
0
|
|
|
|
|
|
my $res = $self->_send_receive( |
820
|
|
|
|
|
|
|
POST => "/-/vaults/$vault_name/jobs", |
821
|
|
|
|
|
|
|
[ ], |
822
|
|
|
|
|
|
|
encode_json($content_raw), |
823
|
|
|
|
|
|
|
); |
824
|
|
|
|
|
|
|
# updated error severity; method must return a job id |
825
|
0
|
0
|
|
|
|
|
croak 'initiate_archive_retrieval failed with error ' . $res->status_line unless $res->is_success; |
826
|
|
|
|
|
|
|
|
827
|
0
|
|
|
|
|
|
return $res->header('x-amz-job-id'); |
828
|
|
|
|
|
|
|
} |
829
|
|
|
|
|
|
|
|
830
|
|
|
|
|
|
|
=head2 initiate_inventory_retrieval( $vault_name, $format, [ $description, |
831
|
|
|
|
|
|
|
$sns_topic ] ) |
832
|
|
|
|
|
|
|
|
833
|
|
|
|
|
|
|
Initiates an inventory retrieval job. $format is either CSV or JSON. |
834
|
|
|
|
|
|
|
|
835
|
|
|
|
|
|
|
A job description of up to 1,024 printable ASCII characters may be supplied. |
836
|
|
|
|
|
|
|
Net::Amazon::Glacier does it's best to enforce this restriction. When unsure |
837
|
|
|
|
|
|
|
send the string and look for Carp. |
838
|
|
|
|
|
|
|
|
839
|
|
|
|
|
|
|
An SNS Topic to send notifications to upon job completion may also be supplied. |
840
|
|
|
|
|
|
|
|
841
|
|
|
|
|
|
|
L. |
842
|
|
|
|
|
|
|
|
843
|
|
|
|
|
|
|
=cut |
844
|
|
|
|
|
|
|
|
845
|
|
|
|
|
|
|
sub initiate_inventory_retrieval { |
846
|
0
|
|
|
0
|
1
|
|
my ( $self, $vault_name, $format, $description, $sns_topic ) = @_; |
847
|
|
|
|
|
|
|
|
848
|
0
|
0
|
|
|
|
|
croak "no vault name given" unless $vault_name; |
849
|
0
|
0
|
|
|
|
|
croak "no format given" unless $format; |
850
|
|
|
|
|
|
|
|
851
|
0
|
|
|
|
|
|
my $content_raw = { |
852
|
|
|
|
|
|
|
Type => 'inventory-retrieval', |
853
|
|
|
|
|
|
|
}; |
854
|
|
|
|
|
|
|
|
855
|
0
|
0
|
|
|
|
|
$content_raw->{Format} = $format |
856
|
|
|
|
|
|
|
if defined($format); |
857
|
|
|
|
|
|
|
|
858
|
0
|
0
|
|
|
|
|
if ( defined $description ) { |
859
|
0
|
|
|
|
|
|
_enforce_description_limits( \$description ); |
860
|
0
|
|
|
|
|
|
$content_raw->{Description} = $description; |
861
|
|
|
|
|
|
|
} |
862
|
|
|
|
|
|
|
|
863
|
0
|
0
|
|
|
|
|
$content_raw->{SNSTopic} = $sns_topic |
864
|
|
|
|
|
|
|
if defined($sns_topic); |
865
|
|
|
|
|
|
|
|
866
|
0
|
|
|
|
|
|
my $res = $self->_send_receive( |
867
|
|
|
|
|
|
|
POST => "/-/vaults/$vault_name/jobs", |
868
|
|
|
|
|
|
|
[ ], |
869
|
|
|
|
|
|
|
encode_json($content_raw), |
870
|
|
|
|
|
|
|
); |
871
|
|
|
|
|
|
|
# updated error severity; method must return a job id |
872
|
0
|
0
|
|
|
|
|
croak 'initiate_inventory_retrieval failed with error ' . $res->status_line unless $res->is_success; |
873
|
|
|
|
|
|
|
|
874
|
0
|
|
|
|
|
|
return $res->header('x-amz-job-id'); |
875
|
|
|
|
|
|
|
} |
876
|
|
|
|
|
|
|
|
877
|
|
|
|
|
|
|
=head2 initiate_job( ( $vault_name, $archive_id, [ $description, $sns_topic ] ) |
878
|
|
|
|
|
|
|
|
879
|
|
|
|
|
|
|
Effectively calls initiate_inventory_retrieval. |
880
|
|
|
|
|
|
|
|
881
|
|
|
|
|
|
|
Exists for the sole purpose or implementing the Amazon Glacier Developer Guide (API Version 2012-06-01) |
882
|
|
|
|
|
|
|
nomenclature. |
883
|
|
|
|
|
|
|
|
884
|
|
|
|
|
|
|
L. |
885
|
|
|
|
|
|
|
|
886
|
|
|
|
|
|
|
=cut |
887
|
|
|
|
|
|
|
|
888
|
|
|
|
|
|
|
sub initiate_job { |
889
|
0
|
|
|
0
|
1
|
|
initiate_inventory_retrieval( @_ ); |
890
|
|
|
|
|
|
|
} |
891
|
|
|
|
|
|
|
|
892
|
|
|
|
|
|
|
=head2 describe_job( $vault_name, $job_id ) |
893
|
|
|
|
|
|
|
|
894
|
|
|
|
|
|
|
Retrieves a hashref with information about the requested JobID. |
895
|
|
|
|
|
|
|
|
896
|
|
|
|
|
|
|
L. |
897
|
|
|
|
|
|
|
|
898
|
|
|
|
|
|
|
=cut |
899
|
|
|
|
|
|
|
|
900
|
|
|
|
|
|
|
sub describe_job { |
901
|
0
|
|
|
0
|
1
|
|
my ( $self, $vault_name, $job_id ) = @_; |
902
|
0
|
|
|
|
|
|
my $res = $self->_send_receive( GET => "/-/vaults/$vault_name/jobs/$job_id" ); |
903
|
|
|
|
|
|
|
# updated error severity |
904
|
0
|
0
|
|
|
|
|
croak 'describe_job failed with error ' . $res->status_line unless $res->is_success; |
905
|
0
|
|
|
|
|
|
return $self->_decode_and_handle_response( $res ); |
906
|
|
|
|
|
|
|
} |
907
|
|
|
|
|
|
|
|
908
|
|
|
|
|
|
|
=head2 get_job_output( $vault_name, $job_id, [ $range ] ) |
909
|
|
|
|
|
|
|
|
910
|
|
|
|
|
|
|
Retrieves the output of a job, returns a binary blob. Optional range |
911
|
|
|
|
|
|
|
parameter is passed as an HTTP header. |
912
|
|
|
|
|
|
|
L. |
913
|
|
|
|
|
|
|
|
914
|
|
|
|
|
|
|
If you pass a range parameter, you're going to want the tree-hash for your |
915
|
|
|
|
|
|
|
chunk. That will be returned in an additional return value, so collect it |
916
|
|
|
|
|
|
|
like this: |
917
|
|
|
|
|
|
|
|
918
|
|
|
|
|
|
|
($bytes, $tree_hash) = get_job_output(...) |
919
|
|
|
|
|
|
|
|
920
|
|
|
|
|
|
|
=cut |
921
|
|
|
|
|
|
|
|
922
|
|
|
|
|
|
|
sub get_job_output { |
923
|
0
|
|
|
0
|
1
|
|
my ( $self, $vault_name, $job_id, $range ) = @_; |
924
|
|
|
|
|
|
|
|
925
|
0
|
0
|
|
|
|
|
croak "no vault name given" unless $vault_name; |
926
|
0
|
0
|
|
|
|
|
croak "no job id given" unless $vault_name; |
927
|
|
|
|
|
|
|
|
928
|
0
|
|
|
|
|
|
my $headers = []; |
929
|
|
|
|
|
|
|
|
930
|
0
|
0
|
|
|
|
|
push @$headers, (Range => $range) |
931
|
|
|
|
|
|
|
if defined($range); |
932
|
|
|
|
|
|
|
|
933
|
0
|
|
|
|
|
|
my $res = $self->_send_receive( GET => "/-/vaults/$vault_name/jobs/$job_id/output", $headers ); |
934
|
|
|
|
|
|
|
# updated error severity |
935
|
0
|
0
|
|
|
|
|
croak 'get_job_output failed with error ' . $res->status_line unless $res->is_success; |
936
|
|
|
|
|
|
|
|
937
|
0
|
0
|
|
|
|
|
return wantarray ? ($res->decoded_content, $res->header('x-amz-sha256-tree-hash')) : $res->decoded_content; |
938
|
|
|
|
|
|
|
} |
939
|
|
|
|
|
|
|
|
940
|
|
|
|
|
|
|
=head2 list_jobs( $vault_name ) |
941
|
|
|
|
|
|
|
|
942
|
|
|
|
|
|
|
Return an array with information about all recently completed jobs for the |
943
|
|
|
|
|
|
|
specified vault. |
944
|
|
|
|
|
|
|
L. |
945
|
|
|
|
|
|
|
|
946
|
|
|
|
|
|
|
A call to list_jobs can result in many calls to the Amazon API at a rate of |
947
|
|
|
|
|
|
|
1 per 1,000 recently completed job in existence. |
948
|
|
|
|
|
|
|
Calls to List Jobs in the API are L. |
949
|
|
|
|
|
|
|
|
950
|
|
|
|
|
|
|
=cut |
951
|
|
|
|
|
|
|
|
952
|
|
|
|
|
|
|
sub list_jobs { |
953
|
0
|
|
|
0
|
1
|
|
my ( $self, $vault_name ) = @_; |
954
|
|
|
|
|
|
|
|
955
|
0
|
0
|
|
|
|
|
croak "no vault name given" unless $vault_name; |
956
|
|
|
|
|
|
|
|
957
|
0
|
|
|
|
|
|
my @completed_jobs; |
958
|
|
|
|
|
|
|
|
959
|
|
|
|
|
|
|
my $marker; |
960
|
0
|
|
|
|
|
|
do { |
961
|
|
|
|
|
|
|
#1000 is the default limit, send a marker if needed |
962
|
0
|
0
|
|
|
|
|
my $res = $self->_send_receive( GET => "/-/vaults/$vault_name/jobs?limit=1000" . ($marker?'&'.$marker:'') ); |
963
|
|
|
|
|
|
|
# updated error severity |
964
|
0
|
0
|
|
|
|
|
croak 'list_jobs failed with error ' . $res->status_line unless $res->is_success; |
965
|
0
|
|
|
|
|
|
my $decoded = $self->_decode_and_handle_response( $res ); |
966
|
|
|
|
|
|
|
|
967
|
0
|
|
|
|
|
|
push @completed_jobs, @{$decoded->{JobList}}; |
|
0
|
|
|
|
|
|
|
968
|
0
|
|
|
|
|
|
$marker = $decoded->{Marker}; |
969
|
|
|
|
|
|
|
} while ( $marker ); |
970
|
|
|
|
|
|
|
|
971
|
0
|
|
|
|
|
|
return ( \@completed_jobs ); |
972
|
|
|
|
|
|
|
} |
973
|
|
|
|
|
|
|
|
974
|
|
|
|
|
|
|
# helper functions |
975
|
|
|
|
|
|
|
|
976
|
|
|
|
|
|
|
# receives an array ref of hex strings as returned by multipart_upload_upload_part |
977
|
|
|
|
|
|
|
# the array ref must be in the resulting online archive order as oppossed to the |
978
|
|
|
|
|
|
|
# upload order |
979
|
|
|
|
|
|
|
# returns an hex string representing the tree hash of the complete archive for |
980
|
|
|
|
|
|
|
# use in multipart_upload_complete |
981
|
|
|
|
|
|
|
sub _tree_hash_from_array_ref { |
982
|
0
|
|
|
0
|
|
|
my ( $self, $tree_hash_array_ref ) = @_; |
983
|
|
|
|
|
|
|
|
984
|
0
|
0
|
|
|
|
|
croak "no tree hash object given" unless $tree_hash_array_ref; |
985
|
0
|
0
|
|
|
|
|
croak "tree hash array ref is not an array reference" unless ref $tree_hash_array_ref eq 'ARRAY'; |
986
|
0
|
0
|
|
|
|
|
croak "tree hash array ref does not seem to contain sha256 hex strings" unless |
987
|
|
|
|
|
|
|
length join ('', map m/^[0-9a-fA-F]{64}$/, @$tree_hash_array_ref) == scalar @$tree_hash_array_ref; |
988
|
|
|
|
|
|
|
|
989
|
|
|
|
|
|
|
# copy array to temporary array mapped to byte values |
990
|
0
|
|
|
|
|
|
my @prevLvlHashes = map( pack("H*", $_), @{$tree_hash_array_ref} ); |
|
0
|
|
|
|
|
|
|
991
|
|
|
|
|
|
|
|
992
|
|
|
|
|
|
|
# consume parts in pairs A (+) B until we have one part (unrolled recursive) |
993
|
0
|
|
|
|
|
|
while ( @prevLvlHashes > 1 ) { |
994
|
0
|
|
|
|
|
|
my ( $prevLvlIterator, $currLvlIterator ); |
995
|
|
|
|
|
|
|
|
996
|
0
|
|
|
|
|
|
my @currLvlHashes; |
997
|
|
|
|
|
|
|
|
998
|
|
|
|
|
|
|
# consume two elements form previous level to make for one element of the |
999
|
|
|
|
|
|
|
# next level, last elements on odd sized arrays copied verbatim to next level |
1000
|
0
|
|
|
|
|
|
for ( $prevLvlIterator = 0, $currLvlIterator = 0; $prevLvlIterator < @prevLvlHashes; $prevLvlIterator+=2 ) { |
1001
|
0
|
0
|
|
|
|
|
if ( @prevLvlHashes - $prevLvlIterator > 1) { |
1002
|
|
|
|
|
|
|
# store digest in next level as byte values |
1003
|
0
|
|
|
|
|
|
push @currLvlHashes, Digest::SHA::sha256( $prevLvlHashes[ $prevLvlIterator ], $prevLvlHashes[ $prevLvlIterator + 1 ] ); |
1004
|
|
|
|
|
|
|
} else { |
1005
|
0
|
|
|
|
|
|
push @currLvlHashes, $prevLvlHashes[ $prevLvlIterator ]; |
1006
|
|
|
|
|
|
|
} |
1007
|
|
|
|
|
|
|
} |
1008
|
|
|
|
|
|
|
|
1009
|
|
|
|
|
|
|
# advance one level |
1010
|
0
|
|
|
|
|
|
@prevLvlHashes = @currLvlHashes; |
1011
|
|
|
|
|
|
|
} |
1012
|
|
|
|
|
|
|
|
1013
|
|
|
|
|
|
|
# return resulting array as string of hex values |
1014
|
0
|
|
|
|
|
|
return unpack( 'H*', $prevLvlHashes[0] ); |
1015
|
|
|
|
|
|
|
} |
1016
|
|
|
|
|
|
|
|
1017
|
|
|
|
|
|
|
sub _decode_and_handle_response { |
1018
|
0
|
|
|
0
|
|
|
my ( $self, $res ) = @_; |
1019
|
|
|
|
|
|
|
|
1020
|
0
|
0
|
|
|
|
|
if ( $res->is_success ) { |
1021
|
0
|
|
|
|
|
|
return decode_json( $res->decoded_content ); |
1022
|
|
|
|
|
|
|
} else { |
1023
|
0
|
|
|
|
|
|
return undef; |
1024
|
|
|
|
|
|
|
} |
1025
|
|
|
|
|
|
|
} |
1026
|
|
|
|
|
|
|
|
1027
|
|
|
|
|
|
|
sub _send_receive { |
1028
|
0
|
|
|
0
|
|
|
my $self = shift; |
1029
|
0
|
|
|
|
|
|
my $req = $self->_craft_request( @_ ); |
1030
|
0
|
|
|
|
|
|
return $self->_send_request( $req ); |
1031
|
|
|
|
|
|
|
} |
1032
|
|
|
|
|
|
|
|
1033
|
|
|
|
|
|
|
sub _craft_request { |
1034
|
0
|
|
|
0
|
|
|
my ( $self, $method, $url, $header, $content ) = @_; |
1035
|
0
|
|
|
|
|
|
my $host = 'glacier.'.$self->{region}.'.amazonaws.com'; |
1036
|
0
|
0
|
|
|
|
|
my $total_header = [ |
1037
|
|
|
|
|
|
|
'x-amz-glacier-version' => '2012-06-01', |
1038
|
|
|
|
|
|
|
'Host' => $host, |
1039
|
|
|
|
|
|
|
'Date' => POSIX::strftime( '%Y%m%dT%H%M%SZ', gmtime ), |
1040
|
|
|
|
|
|
|
$header ? @$header : () |
1041
|
|
|
|
|
|
|
]; |
1042
|
0
|
|
|
|
|
|
my $req = HTTP::Request->new( $method => "https://$host$url", $total_header, $content); |
1043
|
0
|
|
|
|
|
|
my $signed_req = $self->{sig}->sign( $req ); |
1044
|
0
|
|
|
|
|
|
return $signed_req; |
1045
|
|
|
|
|
|
|
} |
1046
|
|
|
|
|
|
|
|
1047
|
|
|
|
|
|
|
sub _send_request { |
1048
|
0
|
|
|
0
|
|
|
my ( $self, $req ) = @_; |
1049
|
0
|
|
|
|
|
|
my $res = $self->{ua}->request( $req ); |
1050
|
0
|
0
|
|
|
|
|
if ( $res->is_error ) { |
1051
|
|
|
|
|
|
|
# try to decode Glacier error |
1052
|
0
|
|
|
|
|
|
eval { |
1053
|
0
|
|
|
|
|
|
my $error = decode_json( $res->decoded_content ); |
1054
|
0
|
|
|
|
|
|
carp sprintf 'Non-successful response: %s (%s)', $res->status_line, $error->{code}; |
1055
|
0
|
|
|
|
|
|
carp decode_json( $res->decoded_content )->{message}; |
1056
|
|
|
|
|
|
|
}; |
1057
|
0
|
0
|
|
|
|
|
if ( $@ ) { |
1058
|
|
|
|
|
|
|
# fall back to reporting ua errors |
1059
|
0
|
|
|
|
|
|
carp sprintf "[%d] %s %s\n", $res->code, $res->message, $res->decoded_content; |
1060
|
|
|
|
|
|
|
} |
1061
|
|
|
|
|
|
|
} |
1062
|
0
|
|
|
|
|
|
return $res; |
1063
|
|
|
|
|
|
|
} |
1064
|
|
|
|
|
|
|
|
1065
|
|
|
|
|
|
|
sub _enforce_description_limits { |
1066
|
0
|
|
|
0
|
|
|
my ( $description ) = @_; |
1067
|
0
|
0
|
|
|
|
|
croak 'Description should be a reference so that I can enforce limits on it.' unless ref $description eq 'SCALAR'; |
1068
|
|
|
|
|
|
|
# order is important. We do not want to loose any characters unless needed. |
1069
|
0
|
|
|
|
|
|
my $changes = ( $$description =~ tr/\x20-\x7f//cd ); |
1070
|
0
|
0
|
|
|
|
|
carp 'Description contains invalid characters stick to printable ASCII (x20-x7f). Fixed.' if ( $changes ); |
1071
|
0
|
0
|
|
|
|
|
if ( length $$description > 1024 ) { |
1072
|
0
|
|
|
|
|
|
$$description = substr( $$description, 0, 1024 ); |
1073
|
0
|
|
|
|
|
|
carp 'Description should not be longer than 1024 characters. Fixed.'; |
1074
|
|
|
|
|
|
|
} |
1075
|
|
|
|
|
|
|
|
1076
|
0
|
|
|
|
|
|
return $description; |
1077
|
|
|
|
|
|
|
} |
1078
|
|
|
|
|
|
|
|
1079
|
|
|
|
|
|
|
=head1 ROADMAP |
1080
|
|
|
|
|
|
|
|
1081
|
|
|
|
|
|
|
=over 4 |
1082
|
|
|
|
|
|
|
|
1083
|
|
|
|
|
|
|
=item * Online tests. |
1084
|
|
|
|
|
|
|
|
1085
|
|
|
|
|
|
|
=item * Implement a "simple" interfase in the lines of |
1086
|
|
|
|
|
|
|
|
1087
|
|
|
|
|
|
|
use Net::Amazon::Glacier; |
1088
|
|
|
|
|
|
|
|
1089
|
|
|
|
|
|
|
# Bless and upload something |
1090
|
|
|
|
|
|
|
my $glacier = Net::Amazon::Glacier->new( $region, $aws_key, $aws_secret, $metadata_store ); |
1091
|
|
|
|
|
|
|
|
1092
|
|
|
|
|
|
|
# Upload intelligently, i.e. in resumable parts, split very big files. |
1093
|
|
|
|
|
|
|
$glacier->simple->upload( $path || $scalar_ref || $some_fh ); |
1094
|
|
|
|
|
|
|
|
1095
|
|
|
|
|
|
|
# Support automatic archive_id to some description conversion |
1096
|
|
|
|
|
|
|
# Ask for a job when first called, return while it is not ready, |
1097
|
|
|
|
|
|
|
# return content when ready. |
1098
|
|
|
|
|
|
|
$glacier->simple->download( $archive_id || 'description', [ $ranges ] ); |
1099
|
|
|
|
|
|
|
|
1100
|
|
|
|
|
|
|
# Request download and spawn something, wait and execute $some_code_ref |
1101
|
|
|
|
|
|
|
# when content ready. |
1102
|
|
|
|
|
|
|
$glacier->simple->download_wait( $archive_id || 'description' , $some_code_ref, [ $ranges ] ); |
1103
|
|
|
|
|
|
|
|
1104
|
|
|
|
|
|
|
# Delete online archive |
1105
|
|
|
|
|
|
|
$glacier->simple->delete( $archive_id || 'description' ); |
1106
|
|
|
|
|
|
|
|
1107
|
|
|
|
|
|
|
=item * Implement a simple command line cli with access to simple interface. |
1108
|
|
|
|
|
|
|
|
1109
|
|
|
|
|
|
|
glacier new us-east-1 AAIKSAKS... sdoasdod... /metadata/file |
1110
|
|
|
|
|
|
|
glacier upload /some/file |
1111
|
|
|
|
|
|
|
glacier download /some/file (this would spawn a daemon waiting for download) |
1112
|
|
|
|
|
|
|
glacier ls |
1113
|
|
|
|
|
|
|
|
1114
|
|
|
|
|
|
|
=back |
1115
|
|
|
|
|
|
|
|
1116
|
|
|
|
|
|
|
=head1 SUPPORT |
1117
|
|
|
|
|
|
|
|
1118
|
|
|
|
|
|
|
You can find documentation for this module with the perldoc command. |
1119
|
|
|
|
|
|
|
|
1120
|
|
|
|
|
|
|
perldoc Net::Amazon::Glacier |
1121
|
|
|
|
|
|
|
|
1122
|
|
|
|
|
|
|
You can also look for information at: |
1123
|
|
|
|
|
|
|
|
1124
|
|
|
|
|
|
|
=over 4 |
1125
|
|
|
|
|
|
|
|
1126
|
|
|
|
|
|
|
=item * RT: CPAN's request tracker (report bugs here) |
1127
|
|
|
|
|
|
|
|
1128
|
|
|
|
|
|
|
L |
1129
|
|
|
|
|
|
|
|
1130
|
|
|
|
|
|
|
=item * AnnoCPAN: Annotated CPAN documentation |
1131
|
|
|
|
|
|
|
|
1132
|
|
|
|
|
|
|
L |
1133
|
|
|
|
|
|
|
|
1134
|
|
|
|
|
|
|
=item * CPAN Ratings |
1135
|
|
|
|
|
|
|
|
1136
|
|
|
|
|
|
|
L |
1137
|
|
|
|
|
|
|
|
1138
|
|
|
|
|
|
|
=item * Search CPAN |
1139
|
|
|
|
|
|
|
|
1140
|
|
|
|
|
|
|
L |
1141
|
|
|
|
|
|
|
|
1142
|
|
|
|
|
|
|
=item * Check the GitHub repo, development branches in particular. |
1143
|
|
|
|
|
|
|
|
1144
|
|
|
|
|
|
|
L |
1145
|
|
|
|
|
|
|
|
1146
|
|
|
|
|
|
|
=item * Mail Gonzalo Barco |
1147
|
|
|
|
|
|
|
|
1148
|
|
|
|
|
|
|
C<< >> |
1149
|
|
|
|
|
|
|
|
1150
|
|
|
|
|
|
|
=back |
1151
|
|
|
|
|
|
|
|
1152
|
|
|
|
|
|
|
=head1 BUGS |
1153
|
|
|
|
|
|
|
|
1154
|
|
|
|
|
|
|
Please report any bugs or feature requests to C, |
1155
|
|
|
|
|
|
|
or through the web interface at L. |
1156
|
|
|
|
|
|
|
I will be notified, and then you'll automatically be notified of progress on |
1157
|
|
|
|
|
|
|
your bug as I make changes. |
1158
|
|
|
|
|
|
|
|
1159
|
|
|
|
|
|
|
=head1 SEE ALSO |
1160
|
|
|
|
|
|
|
|
1161
|
|
|
|
|
|
|
See also Victor Efimov's MT::AWS::Glacier, an application for AWS Glacier |
1162
|
|
|
|
|
|
|
synchronization. It is available at L. |
1163
|
|
|
|
|
|
|
|
1164
|
|
|
|
|
|
|
=head1 AUTHORS |
1165
|
|
|
|
|
|
|
|
1166
|
|
|
|
|
|
|
Originally written by Tim Nordenfur, C<< >>. |
1167
|
|
|
|
|
|
|
Maintained by Gonzalo Barco C<< >> |
1168
|
|
|
|
|
|
|
Support for job operations was contributed by Ted Reed at IMVU. |
1169
|
|
|
|
|
|
|
Support for many file operations and multipart uploads by Gonzalo Barco. |
1170
|
|
|
|
|
|
|
Bugs, suggestions and fixes contributed by Victor Efimov and Kevin Goess. |
1171
|
|
|
|
|
|
|
|
1172
|
|
|
|
|
|
|
=head1 LICENSE AND COPYRIGHT |
1173
|
|
|
|
|
|
|
|
1174
|
|
|
|
|
|
|
Copyright 2012 Tim Nordenfur. |
1175
|
|
|
|
|
|
|
|
1176
|
|
|
|
|
|
|
This program is free software; you can redistribute it and/or modify it |
1177
|
|
|
|
|
|
|
under the terms of either: the GNU General Public License as published |
1178
|
|
|
|
|
|
|
by the Free Software Foundation; or the Artistic License. |
1179
|
|
|
|
|
|
|
|
1180
|
|
|
|
|
|
|
See http://dev.perl.org/licenses/ for more information. |
1181
|
|
|
|
|
|
|
|
1182
|
|
|
|
|
|
|
=cut |
1183
|
|
|
|
|
|
|
|
1184
|
|
|
|
|
|
|
1; # End of Net::Amazon::Glacier |