line |
stmt |
bran |
cond |
sub |
pod |
time |
code |
1
|
|
|
|
|
|
|
package Yars; |
2
|
|
|
|
|
|
|
|
3
|
23
|
|
|
23
|
|
520007
|
use strict; |
|
23
|
|
|
|
|
55
|
|
|
23
|
|
|
|
|
657
|
|
4
|
23
|
|
|
23
|
|
116
|
use warnings; |
|
23
|
|
|
|
|
63
|
|
|
23
|
|
|
|
|
580
|
|
5
|
23
|
|
|
23
|
|
277
|
use 5.010.1; |
|
23
|
|
|
|
|
153
|
|
6
|
23
|
|
|
23
|
|
115
|
use Mojo::Base 'Clustericious::App'; |
|
23
|
|
|
|
|
46
|
|
|
23
|
|
|
|
|
224
|
|
7
|
23
|
|
|
23
|
|
1356055
|
use Yars::Routes; |
|
23
|
|
|
|
|
86
|
|
|
23
|
|
|
|
|
882
|
|
8
|
23
|
|
|
23
|
|
762
|
use Yars::Tools; |
|
23
|
|
|
|
|
94
|
|
|
23
|
|
|
|
|
1194
|
|
9
|
23
|
|
|
23
|
|
286
|
use Mojo::ByteStream qw/b/; |
|
23
|
|
|
|
|
49
|
|
|
23
|
|
|
|
|
1285
|
|
10
|
23
|
|
|
23
|
|
132
|
use File::Path qw/mkpath/; |
|
23
|
|
|
|
|
50
|
|
|
23
|
|
|
|
|
1017
|
|
11
|
23
|
|
|
23
|
|
139
|
use Log::Log4perl qw(:easy); |
|
23
|
|
|
|
|
47
|
|
|
23
|
|
|
|
|
215
|
|
12
|
23
|
|
|
23
|
|
16920
|
use Number::Bytes::Human qw( format_bytes parse_bytes ); |
|
23
|
|
|
|
|
58282
|
|
|
23
|
|
|
|
|
24183
|
|
13
|
|
|
|
|
|
|
|
14
|
|
|
|
|
|
|
# ABSTRACT: Yet Another RESTful-Archive Service |
15
|
|
|
|
|
|
|
our $VERSION = '1.28'; # VERSION |
16
|
|
|
|
|
|
|
|
17
|
|
|
|
|
|
|
|
18
|
|
|
|
|
|
|
has secret => rand; |
19
|
|
|
|
|
|
|
|
20
|
|
|
|
|
|
|
sub startup { |
21
|
31
|
|
|
31
|
1
|
328027
|
my $self = shift; |
22
|
|
|
|
|
|
|
|
23
|
|
|
|
|
|
|
$self->hook(before_dispatch => sub { |
24
|
1078
|
|
|
1078
|
|
644642
|
my($c) = @_; |
25
|
1078
|
|
|
|
|
4376
|
my $stream = Mojo::IOLoop->stream($c->tx->connection); |
26
|
1078
|
100
|
|
|
|
29742
|
return unless defined $stream; |
27
|
867
|
|
|
|
|
3511
|
$stream->timeout(3000); |
28
|
31
|
|
|
|
|
208
|
}); |
29
|
|
|
|
|
|
|
|
30
|
31
|
|
|
|
|
321
|
my $max_size = 53687091200; |
31
|
|
|
|
|
|
|
|
32
|
31
|
|
|
|
|
62
|
my $tools; |
33
|
|
|
|
|
|
|
|
34
|
|
|
|
|
|
|
$self->hook( |
35
|
|
|
|
|
|
|
after_build_tx => sub { |
36
|
|
|
|
|
|
|
# my($tx,$app) = @_; |
37
|
1092
|
|
|
1092
|
|
7808652
|
my ( $tx ) = @_; |
38
|
1092
|
|
|
|
|
4478
|
$tx->req->max_message_size($max_size); |
39
|
|
|
|
|
|
|
$tx->req->content->on(body => sub { |
40
|
1078
|
|
|
|
|
746551
|
my $content = shift; |
41
|
1078
|
100
|
|
|
|
4270
|
my $md5_b64 = $content->headers->header('Content-MD5') or return; |
42
|
|
|
|
|
|
|
$content->asset->on( |
43
|
|
|
|
|
|
|
upgrade => sub { |
44
|
|
|
|
|
|
|
#my ( $mem, $file ) = @_; |
45
|
148
|
|
|
|
|
34189
|
my $md5 = unpack 'H*', b($md5_b64)->b64_decode; |
46
|
148
|
100
|
|
|
|
4773
|
my $disk = $tools->disk_for($md5) or return; |
47
|
105
|
|
|
|
|
348
|
my $tmpdir = join '/', $disk, 'tmp'; |
48
|
105
|
100
|
|
|
|
2968
|
-d $tmpdir or do { mkpath $tmpdir; chmod 0777, $tmpdir; }; |
|
5
|
|
|
|
|
805
|
|
|
5
|
|
|
|
|
99
|
|
49
|
105
|
50
|
|
|
|
1220
|
-w $tmpdir or chmod 0777, $tmpdir; |
50
|
105
|
|
|
|
|
596
|
$_[1]->tmpdir($tmpdir); |
51
|
|
|
|
|
|
|
} |
52
|
188
|
|
|
|
|
3669
|
); |
53
|
|
|
|
|
|
|
} |
54
|
1092
|
|
|
|
|
21948
|
); |
55
|
|
|
|
|
|
|
} |
56
|
31
|
|
|
|
|
230
|
); |
57
|
|
|
|
|
|
|
|
58
|
31
|
|
|
|
|
518
|
$self->SUPER::startup(@_); |
59
|
|
|
|
|
|
|
|
60
|
31
|
|
|
|
|
250709
|
$tools = Yars::Tools->new($self->config); |
61
|
|
|
|
|
|
|
|
62
|
|
|
|
|
|
|
$self->hook( |
63
|
|
|
|
|
|
|
before_dispatch => sub { |
64
|
1078
|
|
|
1078
|
|
117545
|
$tools->refresh_config($self->config); |
65
|
|
|
|
|
|
|
} |
66
|
31
|
|
|
|
|
308
|
); |
67
|
|
|
|
|
|
|
|
68
|
31
|
|
|
5080
|
|
589
|
$self->helper( tools => sub { $tools } ); |
|
5080
|
|
|
|
|
192638
|
|
69
|
|
|
|
|
|
|
|
70
|
31
|
50
|
|
|
|
715
|
if(my $time = $self->config->{test_expiration}) { |
71
|
0
|
|
|
|
|
0
|
require Clustericious::Command::stop; |
72
|
0
|
|
|
|
|
0
|
WARN "this process will stop after $time seconds"; |
73
|
|
|
|
|
|
|
Mojo::IOLoop->timer($time => sub { |
74
|
0
|
|
|
0
|
|
0
|
WARN "self terminating after $time seconds"; |
75
|
0
|
|
|
|
|
0
|
eval { Clustericious::Command::stop->run }; |
|
0
|
|
|
|
|
0
|
|
76
|
0
|
0
|
|
|
|
0
|
WARN "error in stop: $@" if $@; |
77
|
0
|
|
|
|
|
0
|
}); |
78
|
|
|
|
|
|
|
} |
79
|
|
|
|
|
|
|
|
80
|
31
|
|
|
|
|
374
|
$max_size = parse_bytes($self->config->max_message_size_server(default => 53687091200)); |
81
|
31
|
|
|
|
|
11061
|
INFO "max message size = " . format_bytes($max_size) . " ($max_size)"; |
82
|
|
|
|
|
|
|
} |
83
|
|
|
|
|
|
|
|
84
|
|
|
|
|
|
|
sub sanity_check |
85
|
|
|
|
|
|
|
{ |
86
|
0
|
|
|
0
|
1
|
|
my($self) = @_; |
87
|
|
|
|
|
|
|
|
88
|
0
|
0
|
|
|
|
|
return 0 unless $self->SUPER::sanity_check; |
89
|
|
|
|
|
|
|
|
90
|
0
|
|
|
|
|
|
my $sane = 1; |
91
|
|
|
|
|
|
|
|
92
|
0
|
|
|
|
|
|
my($url) = grep { $_ eq $self->config->url } map { $_->{url} } @{ $self->config->{servers} }; |
|
0
|
|
|
|
|
|
|
|
0
|
|
|
|
|
|
|
|
0
|
|
|
|
|
|
|
93
|
|
|
|
|
|
|
|
94
|
0
|
0
|
|
|
|
|
unless(defined $url) |
95
|
|
|
|
|
|
|
{ |
96
|
0
|
|
|
|
|
|
say "url for this server is not in the disk map"; |
97
|
0
|
|
|
|
|
|
$sane = 0; |
98
|
|
|
|
|
|
|
} |
99
|
|
|
|
|
|
|
|
100
|
0
|
|
|
|
|
|
my %buckets; |
101
|
|
|
|
|
|
|
|
102
|
0
|
|
|
|
|
|
foreach my $server (@{ $self->config->{servers} }) |
|
0
|
|
|
|
|
|
|
103
|
|
|
|
|
|
|
{ |
104
|
0
|
|
0
|
|
|
|
my $name = $server->{url} // 'unknown'; |
105
|
0
|
0
|
|
|
|
|
unless($server->{url}) |
106
|
|
|
|
|
|
|
{ |
107
|
0
|
|
|
|
|
|
say "server $name has no URL"; |
108
|
0
|
|
|
|
|
|
$sane = 0; |
109
|
|
|
|
|
|
|
} |
110
|
0
|
0
|
|
|
|
|
if(@{ $server->{disks} } > 0) |
|
0
|
|
|
|
|
|
|
111
|
|
|
|
|
|
|
{ |
112
|
0
|
|
|
|
|
|
foreach my $disk (@{ $server->{disks} }) |
|
0
|
|
|
|
|
|
|
113
|
|
|
|
|
|
|
{ |
114
|
0
|
|
0
|
|
|
|
my $name2 = $disk->{root} // 'unknown'; |
115
|
0
|
0
|
|
|
|
|
unless($disk->{root}) |
116
|
|
|
|
|
|
|
{ |
117
|
0
|
|
|
|
|
|
say "server $name disk $name2 has no root"; |
118
|
0
|
|
|
|
|
|
$sane = 0; |
119
|
|
|
|
|
|
|
} |
120
|
0
|
0
|
|
|
|
|
if(@{ $disk->{buckets} }) |
|
0
|
|
|
|
|
|
|
121
|
|
|
|
|
|
|
{ |
122
|
0
|
|
|
|
|
|
foreach my $bucket (@{ $disk->{buckets} }) |
|
0
|
|
|
|
|
|
|
123
|
|
|
|
|
|
|
{ |
124
|
0
|
0
|
|
|
|
|
if($buckets{$bucket}) |
125
|
|
|
|
|
|
|
{ |
126
|
0
|
|
|
|
|
|
say "server $name disk $name2 has duplicate bucket (also seen at $buckets{$bucket})"; |
127
|
0
|
|
|
|
|
|
$sane = 0; |
128
|
|
|
|
|
|
|
} |
129
|
|
|
|
|
|
|
else |
130
|
|
|
|
|
|
|
{ |
131
|
0
|
|
|
|
|
|
$buckets{$bucket} = "server $name disk $name2"; |
132
|
|
|
|
|
|
|
} |
133
|
|
|
|
|
|
|
} |
134
|
|
|
|
|
|
|
} |
135
|
|
|
|
|
|
|
else |
136
|
|
|
|
|
|
|
{ |
137
|
0
|
|
|
|
|
|
say "server $name disk $name2 has no buckets assigned"; |
138
|
0
|
|
|
|
|
|
$sane = 0; |
139
|
|
|
|
|
|
|
} |
140
|
|
|
|
|
|
|
} |
141
|
|
|
|
|
|
|
} |
142
|
|
|
|
|
|
|
else |
143
|
|
|
|
|
|
|
{ |
144
|
0
|
|
|
|
|
|
say "server $name has no disks"; |
145
|
0
|
|
|
|
|
|
$sane = 0; |
146
|
|
|
|
|
|
|
} |
147
|
|
|
|
|
|
|
} |
148
|
|
|
|
|
|
|
|
149
|
0
|
|
|
|
|
|
$sane; |
150
|
|
|
|
|
|
|
} |
151
|
|
|
|
|
|
|
|
152
|
|
|
|
|
|
|
sub generate_config { |
153
|
0
|
|
|
0
|
0
|
|
my $self = shift; |
154
|
|
|
|
|
|
|
|
155
|
0
|
|
0
|
|
|
|
my $root = $ENV{CLUSTERICIOUS_CONF_DIR} || $ENV{HOME}; |
156
|
|
|
|
|
|
|
|
157
|
|
|
|
|
|
|
return { |
158
|
0
|
|
|
|
|
|
dirs => [ |
159
|
|
|
|
|
|
|
[qw(etc)], |
160
|
|
|
|
|
|
|
[qw(var log)], |
161
|
|
|
|
|
|
|
[qw(var run)], |
162
|
|
|
|
|
|
|
[qw(var lib yars data)], |
163
|
|
|
|
|
|
|
], |
164
|
|
|
|
|
|
|
files => { 'Yars.conf' => <<'CONF', 'log4perl.conf' => <<CONF2 } }; |
165
|
|
|
|
|
|
|
--- |
166
|
|
|
|
|
|
|
% my $root = $ENV{HOME}; |
167
|
|
|
|
|
|
|
start_mode : 'hypnotoad' |
168
|
|
|
|
|
|
|
url : http://localhost:9001 |
169
|
|
|
|
|
|
|
hypnotoad : |
170
|
|
|
|
|
|
|
pid_file : <%= $root %>/var/run/yars.pid |
171
|
|
|
|
|
|
|
listen : |
172
|
|
|
|
|
|
|
- http://localhost:9001 |
173
|
|
|
|
|
|
|
servers : |
174
|
|
|
|
|
|
|
- url : http://localhost:9001 |
175
|
|
|
|
|
|
|
disks : |
176
|
|
|
|
|
|
|
- root : <%= $root %>/var/lib/yars/data |
177
|
|
|
|
|
|
|
buckets : [ <%= join ',', '0'..'9', 'a' .. 'f' %> ] |
178
|
|
|
|
|
|
|
CONF |
179
|
|
|
|
|
|
|
log4perl.rootLogger=TRACE, LOGFILE |
180
|
|
|
|
|
|
|
log4perl.logger.Mojolicious=TRACE |
181
|
|
|
|
|
|
|
log4perl.appender.LOGFILE=Log::Log4perl::Appender::File |
182
|
|
|
|
|
|
|
log4perl.appender.LOGFILE.filename=$root/var/log/yars.log |
183
|
|
|
|
|
|
|
log4perl.appender.LOGFILE.mode=append |
184
|
|
|
|
|
|
|
log4perl.appender.LOGFILE.layout=PatternLayout |
185
|
|
|
|
|
|
|
log4perl.appender.LOGFILE.layout.ConversionPattern=[%d{ISO8601}] [%7Z] %5p: %m%n |
186
|
|
|
|
|
|
|
CONF2 |
187
|
|
|
|
|
|
|
} |
188
|
|
|
|
|
|
|
|
189
|
|
|
|
|
|
|
|
190
|
|
|
|
|
|
|
1; |
191
|
|
|
|
|
|
|
|
192
|
|
|
|
|
|
|
__END__ |
193
|
|
|
|
|
|
|
|
194
|
|
|
|
|
|
|
=pod |
195
|
|
|
|
|
|
|
|
196
|
|
|
|
|
|
|
=encoding UTF-8 |
197
|
|
|
|
|
|
|
|
198
|
|
|
|
|
|
|
=head1 NAME |
199
|
|
|
|
|
|
|
|
200
|
|
|
|
|
|
|
Yars - Yet Another RESTful-Archive Service |
201
|
|
|
|
|
|
|
|
202
|
|
|
|
|
|
|
=head1 VERSION |
203
|
|
|
|
|
|
|
|
204
|
|
|
|
|
|
|
version 1.28 |
205
|
|
|
|
|
|
|
|
206
|
|
|
|
|
|
|
=head1 SYNOPSIS |
207
|
|
|
|
|
|
|
|
208
|
|
|
|
|
|
|
Create a configuration in ~/etc/Yars.conf |
209
|
|
|
|
|
|
|
|
210
|
|
|
|
|
|
|
--- |
211
|
|
|
|
|
|
|
url: http://localhost:9001 |
212
|
|
|
|
|
|
|
start_mode: hypnotoad |
213
|
|
|
|
|
|
|
hypnotoad: |
214
|
|
|
|
|
|
|
pid_file: <%= home %>/var/run/yars.pid |
215
|
|
|
|
|
|
|
listen: [ 'http://localhost:9001' ] |
216
|
|
|
|
|
|
|
servers: |
217
|
|
|
|
|
|
|
- url: http://localhost:9001 |
218
|
|
|
|
|
|
|
disks: |
219
|
|
|
|
|
|
|
- root: <%= home %>/var/data/disk1 |
220
|
|
|
|
|
|
|
buckets: <%= json [ 0..9, 'a'..'f' ] %> |
221
|
|
|
|
|
|
|
|
222
|
|
|
|
|
|
|
Create needed directories and run the server |
223
|
|
|
|
|
|
|
|
224
|
|
|
|
|
|
|
% mkdir -p ~/var/run ~/var/data/disk1 |
225
|
|
|
|
|
|
|
% yars start |
226
|
|
|
|
|
|
|
|
227
|
|
|
|
|
|
|
Upload a file: |
228
|
|
|
|
|
|
|
|
229
|
|
|
|
|
|
|
% md5sum foo.jog |
230
|
|
|
|
|
|
|
469f9b131cce1631ddd449fbef9059ba foo.jpg |
231
|
|
|
|
|
|
|
% yarsclient upload foo.jpg |
232
|
|
|
|
|
|
|
|
233
|
|
|
|
|
|
|
Download a file |
234
|
|
|
|
|
|
|
|
235
|
|
|
|
|
|
|
% yarsclient download foo.jpg 469f9b131cce1631ddd449fbef9059ba |
236
|
|
|
|
|
|
|
|
237
|
|
|
|
|
|
|
=head1 DESCRIPTION |
238
|
|
|
|
|
|
|
|
239
|
|
|
|
|
|
|
Yars is a simple RESTful server for data storage. |
240
|
|
|
|
|
|
|
|
241
|
|
|
|
|
|
|
Properly configured it provides consistent WRITE availability, and |
242
|
|
|
|
|
|
|
eventual READ availability. Once files are written to the storage |
243
|
|
|
|
|
|
|
cluster they are immutable (new files can -- even with the same |
244
|
|
|
|
|
|
|
filename) can also be written to the cluster. |
245
|
|
|
|
|
|
|
|
246
|
|
|
|
|
|
|
It allows files to be PUT and GET based on their md5 sums and filenames, |
247
|
|
|
|
|
|
|
and uses a distributed hash table to store the files across any number |
248
|
|
|
|
|
|
|
of hosts and disks. |
249
|
|
|
|
|
|
|
|
250
|
|
|
|
|
|
|
Files are assigned to disks and hosts based on their md5s in the |
251
|
|
|
|
|
|
|
following manner : |
252
|
|
|
|
|
|
|
|
253
|
|
|
|
|
|
|
The first N digits of the md5 are considered the "bucket" for a file. |
254
|
|
|
|
|
|
|
e.g. for N=2, 256 buckets are then distributed among the disks in |
255
|
|
|
|
|
|
|
proportion to the size of each disk. The bucket distribution is done |
256
|
|
|
|
|
|
|
manually as part of the configuration (with the aid of an included tool, |
257
|
|
|
|
|
|
|
L<yars_generate_diskmap>). |
258
|
|
|
|
|
|
|
|
259
|
|
|
|
|
|
|
The server is controlled with the command line tool L<yars>. |
260
|
|
|
|
|
|
|
|
261
|
|
|
|
|
|
|
The basic operations of a running yars cluster are supporting requests |
262
|
|
|
|
|
|
|
of the form |
263
|
|
|
|
|
|
|
|
264
|
|
|
|
|
|
|
PUT http://$host/file/$filename |
265
|
|
|
|
|
|
|
GET http://$host/file/$md5/$filename |
266
|
|
|
|
|
|
|
HEAD http://$host/file/$md5/$filename |
267
|
|
|
|
|
|
|
GET http://$host/bucket_map |
268
|
|
|
|
|
|
|
|
269
|
|
|
|
|
|
|
to store and retrieve files, where C<$host> may be any of the hosts in |
270
|
|
|
|
|
|
|
the cluster, C<$md5> is the md5 of the content, and C<$filename> is a |
271
|
|
|
|
|
|
|
filename for the content to be stored. See L<Yars::Routes> for |
272
|
|
|
|
|
|
|
documentation of other routes. |
273
|
|
|
|
|
|
|
|
274
|
|
|
|
|
|
|
Failover is handled in the following manner: |
275
|
|
|
|
|
|
|
|
276
|
|
|
|
|
|
|
If the host to which a file is assigned is not available, then the file |
277
|
|
|
|
|
|
|
will be "stashed" on the filesystem for the host to which it was sent. |
278
|
|
|
|
|
|
|
If there is no space there, other hosts and disks will be tried until an |
279
|
|
|
|
|
|
|
available one is found. Because of this failover mechanism, the "stash" |
280
|
|
|
|
|
|
|
must be checked whenever a GET request is handled. A successful GET will |
281
|
|
|
|
|
|
|
return quickly, but an unsuccessful one will take longer because all of |
282
|
|
|
|
|
|
|
the stashes on all of the servers must be checked before a "404 Not |
283
|
|
|
|
|
|
|
Found" is returned. |
284
|
|
|
|
|
|
|
|
285
|
|
|
|
|
|
|
Another tool L<yars_fast_balance> is provided which takes files from |
286
|
|
|
|
|
|
|
stashes and returns them to their correct locations. |
287
|
|
|
|
|
|
|
|
288
|
|
|
|
|
|
|
A client L<Yars::Client> is also available (in a separate distribution), |
289
|
|
|
|
|
|
|
for interacting with a yars server. |
290
|
|
|
|
|
|
|
|
291
|
|
|
|
|
|
|
=head1 EXAMPLES |
292
|
|
|
|
|
|
|
|
293
|
|
|
|
|
|
|
=head2 simple single server configuration |
294
|
|
|
|
|
|
|
|
295
|
|
|
|
|
|
|
This creates a single Yars server using hypnotoad with sixteen buckets. |
296
|
|
|
|
|
|
|
|
297
|
|
|
|
|
|
|
Create a configuration file in C<~/etc/Yars.conf> with this content: |
298
|
|
|
|
|
|
|
|
299
|
|
|
|
|
|
|
--- |
300
|
|
|
|
|
|
|
|
301
|
|
|
|
|
|
|
# The first half of the configuration specifies the |
302
|
|
|
|
|
|
|
# generic Clustericious / web server settings for |
303
|
|
|
|
|
|
|
# the server |
304
|
|
|
|
|
|
|
start_mode : 'hypnotoad' |
305
|
|
|
|
|
|
|
url : http://localhost:9001 |
306
|
|
|
|
|
|
|
hypnotoad : |
307
|
|
|
|
|
|
|
pid_file : <%= home %>/var/run/yars.pid |
308
|
|
|
|
|
|
|
listen : |
309
|
|
|
|
|
|
|
- http://localhost:9001 |
310
|
|
|
|
|
|
|
|
311
|
|
|
|
|
|
|
# The rest defines the servers, disks and buckets |
312
|
|
|
|
|
|
|
# used by the Yars cluster. In this single server |
313
|
|
|
|
|
|
|
# example, there is only one server and one disk |
314
|
|
|
|
|
|
|
servers : |
315
|
|
|
|
|
|
|
- url : http://localhost:9001 |
316
|
|
|
|
|
|
|
disks : |
317
|
|
|
|
|
|
|
- root : <%= home %>/var/data/disk1 |
318
|
|
|
|
|
|
|
buckets : <%= json [ 0..9, 'a'..'f' ] %> |
319
|
|
|
|
|
|
|
|
320
|
|
|
|
|
|
|
The configuration file is a L<Mojo::Template> template with helpers |
321
|
|
|
|
|
|
|
provided by L<Clustericious::Config::Helpers>. |
322
|
|
|
|
|
|
|
|
323
|
|
|
|
|
|
|
Create the directories needed for the server: |
324
|
|
|
|
|
|
|
|
325
|
|
|
|
|
|
|
% mkdir -p ~/var/run ~/var/data |
326
|
|
|
|
|
|
|
|
327
|
|
|
|
|
|
|
Now you can start the server process |
328
|
|
|
|
|
|
|
|
329
|
|
|
|
|
|
|
% yars start |
330
|
|
|
|
|
|
|
|
331
|
|
|
|
|
|
|
=head3 check status |
332
|
|
|
|
|
|
|
|
333
|
|
|
|
|
|
|
Now verify that it works: |
334
|
|
|
|
|
|
|
|
335
|
|
|
|
|
|
|
% curl http://localhost:9001/status |
336
|
|
|
|
|
|
|
{"server_url":"http://localhost:9001","server_version":"1.11","app_name":"Yars","server_hostname":"iscah"} |
337
|
|
|
|
|
|
|
|
338
|
|
|
|
|
|
|
You can also verify that it works with L<yarsclient>: |
339
|
|
|
|
|
|
|
|
340
|
|
|
|
|
|
|
% yarsclient status |
341
|
|
|
|
|
|
|
--- |
342
|
|
|
|
|
|
|
app_name: Yars |
343
|
|
|
|
|
|
|
server_hostname: iscah |
344
|
|
|
|
|
|
|
server_url: http://localhost:9001 |
345
|
|
|
|
|
|
|
server_version: '1.11' |
346
|
|
|
|
|
|
|
|
347
|
|
|
|
|
|
|
Or via L<Yars::Client>: |
348
|
|
|
|
|
|
|
|
349
|
|
|
|
|
|
|
% perl -MYars::Client -MYAML::XS=Dump -E 'say Dump(Yars::Client->new->status)' |
350
|
|
|
|
|
|
|
--- |
351
|
|
|
|
|
|
|
app_name: Yars |
352
|
|
|
|
|
|
|
server_hostname: iscah |
353
|
|
|
|
|
|
|
server_url: http://localhost:9001 |
354
|
|
|
|
|
|
|
server_version: '1.11' |
355
|
|
|
|
|
|
|
|
356
|
|
|
|
|
|
|
=head3 upload and downloads |
357
|
|
|
|
|
|
|
|
358
|
|
|
|
|
|
|
Now try storing a file: |
359
|
|
|
|
|
|
|
|
360
|
|
|
|
|
|
|
% echo "hi" | curl -D headers.txt -T - http://localhost:9001/file/test_file1 |
361
|
|
|
|
|
|
|
ok |
362
|
|
|
|
|
|
|
% grep Location headers.txt |
363
|
|
|
|
|
|
|
Location: http://localhost:9001/file/764efa883dda1e11db47671c4a3bbd9e/test_file1 |
364
|
|
|
|
|
|
|
|
365
|
|
|
|
|
|
|
You can use the Location header to fetch the file at a later time |
366
|
|
|
|
|
|
|
|
367
|
|
|
|
|
|
|
% curl http://localhost:9001/file/764efa883dda1e11db47671c4a3bbd9e/test_file1 |
368
|
|
|
|
|
|
|
hi |
369
|
|
|
|
|
|
|
|
370
|
|
|
|
|
|
|
With L<yarsclient> |
371
|
|
|
|
|
|
|
|
372
|
|
|
|
|
|
|
% echo "hi" > test_file2 |
373
|
|
|
|
|
|
|
% md5sum test_file2 |
374
|
|
|
|
|
|
|
764efa883dda1e11db47671c4a3bbd9e test_file2 |
375
|
|
|
|
|
|
|
% yarsclient upload test_file2 |
376
|
|
|
|
|
|
|
|
377
|
|
|
|
|
|
|
... some time later ... |
378
|
|
|
|
|
|
|
|
379
|
|
|
|
|
|
|
% yarsclient downbload test_file2 764efa883dda1e11db47671c4a3bbd9e |
380
|
|
|
|
|
|
|
|
381
|
|
|
|
|
|
|
You can see the HTTP requests and responses using the C<--trace> option: |
382
|
|
|
|
|
|
|
|
383
|
|
|
|
|
|
|
% yarsclient --trace upload test_file2 |
384
|
|
|
|
|
|
|
% yarsclient --trace download test_file2 764efa883dda1e11db47671c4a3bbd9e |
385
|
|
|
|
|
|
|
|
386
|
|
|
|
|
|
|
And from Perl: |
387
|
|
|
|
|
|
|
|
388
|
|
|
|
|
|
|
use 5.010; |
389
|
|
|
|
|
|
|
use Yars::Client; |
390
|
|
|
|
|
|
|
use Digest::MD5 qw( md5_hex ); |
391
|
|
|
|
|
|
|
|
392
|
|
|
|
|
|
|
my $y = Yars::Client->new; |
393
|
|
|
|
|
|
|
|
394
|
|
|
|
|
|
|
# filename as first argument, |
395
|
|
|
|
|
|
|
# reference to content as second argument |
396
|
|
|
|
|
|
|
$y->upload("test_file3", \"hi\n"); |
397
|
|
|
|
|
|
|
|
398
|
|
|
|
|
|
|
# you can also skip the content like this: |
399
|
|
|
|
|
|
|
# $y->upload("test_file3"); |
400
|
|
|
|
|
|
|
# to upload content from a local file |
401
|
|
|
|
|
|
|
|
402
|
|
|
|
|
|
|
my $md5 = md5_hex("hi\n"); |
403
|
|
|
|
|
|
|
|
404
|
|
|
|
|
|
|
$y->download("test_file3", $md5); |
405
|
|
|
|
|
|
|
|
406
|
|
|
|
|
|
|
=head2 Multiple servers |
407
|
|
|
|
|
|
|
|
408
|
|
|
|
|
|
|
=head3 set up the URL |
409
|
|
|
|
|
|
|
|
410
|
|
|
|
|
|
|
When configuring a cluster of several hosts, the C<url> value in the |
411
|
|
|
|
|
|
|
configuration must have the correct hostname or IP address for each host |
412
|
|
|
|
|
|
|
that the server is running on. One way to handle this would be to have |
413
|
|
|
|
|
|
|
a configuration file for each host: |
414
|
|
|
|
|
|
|
|
415
|
|
|
|
|
|
|
--- |
416
|
|
|
|
|
|
|
# ~/etc/Yars.conf on yars1 |
417
|
|
|
|
|
|
|
url: http://yars1:9001 |
418
|
|
|
|
|
|
|
|
419
|
|
|
|
|
|
|
--- |
420
|
|
|
|
|
|
|
# ~/etc/Yars.conf on yars2 |
421
|
|
|
|
|
|
|
url: http://yars2:9001 |
422
|
|
|
|
|
|
|
|
423
|
|
|
|
|
|
|
A less tedious way is to use the C<hostname> or C<hostname_full> helper |
424
|
|
|
|
|
|
|
from L<Clustericious::Config::Helpers>. This allows you to use the same |
425
|
|
|
|
|
|
|
configuration for all servers in the cluster: |
426
|
|
|
|
|
|
|
|
427
|
|
|
|
|
|
|
--- |
428
|
|
|
|
|
|
|
# works for yars1, yars2 but not for |
429
|
|
|
|
|
|
|
# a client host |
430
|
|
|
|
|
|
|
url: http://<%= hostname %>:9001 |
431
|
|
|
|
|
|
|
|
432
|
|
|
|
|
|
|
=head3 abstract the webserver configuration |
433
|
|
|
|
|
|
|
|
434
|
|
|
|
|
|
|
If you have multiple L<Clustericious> services on the same host, or if |
435
|
|
|
|
|
|
|
you share configurations between multiple hosts, it may be useful to use |
436
|
|
|
|
|
|
|
the <%= extends_config %> helper and put the web server configuration in |
437
|
|
|
|
|
|
|
a separate file. For example: |
438
|
|
|
|
|
|
|
|
439
|
|
|
|
|
|
|
--- |
440
|
|
|
|
|
|
|
# ~/etc/Yars.conf |
441
|
|
|
|
|
|
|
% my $url = "http://" . hostname . ":9001"; |
442
|
|
|
|
|
|
|
url: <%= $url %> |
443
|
|
|
|
|
|
|
% extends_config 'hypnotoad', url => $url, name => 'yars'; |
444
|
|
|
|
|
|
|
|
445
|
|
|
|
|
|
|
--- |
446
|
|
|
|
|
|
|
# ~/etc/hypnotoad.conf |
447
|
|
|
|
|
|
|
hypnotoad : |
448
|
|
|
|
|
|
|
pid_file : <%= home %>/var/run/<%= $name %>.pid |
449
|
|
|
|
|
|
|
listen : |
450
|
|
|
|
|
|
|
- <%= $url %> |
451
|
|
|
|
|
|
|
|
452
|
|
|
|
|
|
|
Now if you were also going to use L<PlugAuth> on the same host they |
453
|
|
|
|
|
|
|
could share the same C<hypnotoad.conf> file with different parameters: |
454
|
|
|
|
|
|
|
|
455
|
|
|
|
|
|
|
--- |
456
|
|
|
|
|
|
|
# ~/etc/PlugAuth.conf |
457
|
|
|
|
|
|
|
% my $url = "http://" . hostname . ":3001"; |
458
|
|
|
|
|
|
|
url: <%= $url %> |
459
|
|
|
|
|
|
|
% extends_config 'hypnotoad', url => $url, name => 'plugauth'; |
460
|
|
|
|
|
|
|
|
461
|
|
|
|
|
|
|
=head3 generate the disk map |
462
|
|
|
|
|
|
|
|
463
|
|
|
|
|
|
|
Given a file with a list of hosts and disks like this called diskmap.txt: |
464
|
|
|
|
|
|
|
|
465
|
|
|
|
|
|
|
yars1 /disk/1a |
466
|
|
|
|
|
|
|
yars1 /disk/1b |
467
|
|
|
|
|
|
|
yars2 /disk/2a |
468
|
|
|
|
|
|
|
yars2 /disk/2b |
469
|
|
|
|
|
|
|
yars3 /disk/3a |
470
|
|
|
|
|
|
|
yars3 /disk/3b |
471
|
|
|
|
|
|
|
|
472
|
|
|
|
|
|
|
You can generate a disk map using the L<yars_generate_diskmap> command: |
473
|
|
|
|
|
|
|
|
474
|
|
|
|
|
|
|
% yars_generate_diskmap 2 diskmap.txt > ~/etc/yars_diskmap.conf |
475
|
|
|
|
|
|
|
|
476
|
|
|
|
|
|
|
This will generate a diskmap configuration with the buckets evenly |
477
|
|
|
|
|
|
|
allocated to the available disks: |
478
|
|
|
|
|
|
|
|
479
|
|
|
|
|
|
|
--- |
480
|
|
|
|
|
|
|
servers : |
481
|
|
|
|
|
|
|
- url : http://yars1:9001 |
482
|
|
|
|
|
|
|
disks : |
483
|
|
|
|
|
|
|
- root : /disk/1a |
484
|
|
|
|
|
|
|
buckets : [ 00, 06, 0c, 12, 18, 1e, 24, 2a, 30, 36, 3c, 42, 48, |
485
|
|
|
|
|
|
|
4e, 54, 5a, 60, 66, 6c, 72, 78, 7e, 84, 8a, 90, 96, 9c, |
486
|
|
|
|
|
|
|
a2, a8, ae, b4, ba, c0, c6, cc, d2, d8, de, e4, ea, f0, |
487
|
|
|
|
|
|
|
f6, fc ] |
488
|
|
|
|
|
|
|
- root : /disk/1b |
489
|
|
|
|
|
|
|
buckets : [ 01, 07, 0d, 13, 19, 1f, 25, 2b, 31, 37, 3d, 43, 49, |
490
|
|
|
|
|
|
|
4f, 55, 5b, 61, 67, 6d, 73, 79, 7f, 85, 8b, 91, 97, 9d, |
491
|
|
|
|
|
|
|
a3, a9, af, b5, bb, c1, c7, cd, d3, d9, df, e5, eb, f1, |
492
|
|
|
|
|
|
|
f7, fd ] |
493
|
|
|
|
|
|
|
- url : http://yars2:9001 |
494
|
|
|
|
|
|
|
disks : |
495
|
|
|
|
|
|
|
- root : /disk/2a |
496
|
|
|
|
|
|
|
buckets : [ 02, 08, 0e, 14, 1a, 20, 26, 2c, 32, 38, 3e, 44, 4a, |
497
|
|
|
|
|
|
|
50, 56, 5c, 62, 68, 6e, 74, 7a, 80, 86, 8c, 92, 98, 9e, |
498
|
|
|
|
|
|
|
a4, aa, b0, b6, bc, c2, c8, ce, d4, da, e0, e6, ec, f2, |
499
|
|
|
|
|
|
|
f8, fe ] |
500
|
|
|
|
|
|
|
- root : /disk/2b |
501
|
|
|
|
|
|
|
buckets : [ 03, 09, 0f, 15, 1b, 21, 27, 2d, 33, 39, 3f, 45, 4b, |
502
|
|
|
|
|
|
|
51, 57, 5d, 63, 69, 6f, 75, 7b, 81, 87, 8d, 93, 99, 9f, |
503
|
|
|
|
|
|
|
a5, ab, b1, b7, bd, c3, c9, cf, d5, db, e1, e7, ed, f3, |
504
|
|
|
|
|
|
|
f9, ff ] |
505
|
|
|
|
|
|
|
- url : http://yars3:9001 |
506
|
|
|
|
|
|
|
disks : |
507
|
|
|
|
|
|
|
- root : /disk/3a |
508
|
|
|
|
|
|
|
buckets : [ 04, 0a, 10, 16, 1c, 22, 28, 2e, 34, 3a, 40, 46, 4c, |
509
|
|
|
|
|
|
|
52, 58, 5e, 64, 6a, 70, 76, 7c, 82, 88, 8e, 94, 9a, a0, |
510
|
|
|
|
|
|
|
a6, ac, b2, b8, be, c4, ca, d0, d6, dc, e2, e8, ee, f4, |
511
|
|
|
|
|
|
|
fa ] |
512
|
|
|
|
|
|
|
- root : /disk/3b |
513
|
|
|
|
|
|
|
buckets : [ 05, 0b, 11, 17, 1d, 23, 29, 2f, 35, 3b, 41, 47, 4d, |
514
|
|
|
|
|
|
|
53, 59, 5f, 65, 6b, 71, 77, 7d, 83, 89, 8f, 95, 9b, a1, |
515
|
|
|
|
|
|
|
a7, ad, b3, b9, bf, c5, cb, d1, d7, dd, e3, e9, ef, f5, |
516
|
|
|
|
|
|
|
fb ] |
517
|
|
|
|
|
|
|
|
518
|
|
|
|
|
|
|
which you can now extend from the Yars.conf file: |
519
|
|
|
|
|
|
|
|
520
|
|
|
|
|
|
|
--- |
521
|
|
|
|
|
|
|
# ~/etc/Yars.conf |
522
|
|
|
|
|
|
|
% my $url = "http://" . hostname . ":9001"; |
523
|
|
|
|
|
|
|
url: <%= $url %> |
524
|
|
|
|
|
|
|
% extends_config 'hypnotoad', url => $url, name => 'yars'; |
525
|
|
|
|
|
|
|
% extends_config 'yars_diskmap'; |
526
|
|
|
|
|
|
|
|
527
|
|
|
|
|
|
|
Also, if for whatever reason you are unable to use the C<hostname> or |
528
|
|
|
|
|
|
|
C<hostname_full> helper in your C<Yars.conf> file, it helps to keep your |
529
|
|
|
|
|
|
|
diskmap configuration in a separate file that can be shared between the |
530
|
|
|
|
|
|
|
different Yars server configuration files. |
531
|
|
|
|
|
|
|
|
532
|
|
|
|
|
|
|
You can now run C<yars start> on each host to start the servers. L<clad> |
533
|
|
|
|
|
|
|
may be useful for starting "yars start" on multiple hosts at once. |
534
|
|
|
|
|
|
|
|
535
|
|
|
|
|
|
|
=head3 client configuration |
536
|
|
|
|
|
|
|
|
537
|
|
|
|
|
|
|
If you are using the C<hostname> or C<hostname_full> helpers to generate |
538
|
|
|
|
|
|
|
the URL in the serve configuration, then you won't be able to share that |
539
|
|
|
|
|
|
|
configuration with client systems. In addition you can specify one or |
540
|
|
|
|
|
|
|
more failover hosts for L<Yars::Client> and C<yarsclient> to use when |
541
|
|
|
|
|
|
|
the primary is not available: |
542
|
|
|
|
|
|
|
|
543
|
|
|
|
|
|
|
--- |
544
|
|
|
|
|
|
|
# ~/etc/Yars.conf on client systems |
545
|
|
|
|
|
|
|
url: http://yars2:9001 |
546
|
|
|
|
|
|
|
failover_urls: |
547
|
|
|
|
|
|
|
- http://yars1:9001 |
548
|
|
|
|
|
|
|
|
549
|
|
|
|
|
|
|
=head3 randomizing the server choices |
550
|
|
|
|
|
|
|
|
551
|
|
|
|
|
|
|
In order to more evenly spread the load over each node in the Yars |
552
|
|
|
|
|
|
|
cluster, you can randomize the servers that the client considers the |
553
|
|
|
|
|
|
|
"primary" and the "failover(s)": |
554
|
|
|
|
|
|
|
|
555
|
|
|
|
|
|
|
--- |
556
|
|
|
|
|
|
|
# ~/etc/Yars.conf on client systems |
557
|
|
|
|
|
|
|
% use List::Util qw( shuffle ); |
558
|
|
|
|
|
|
|
% my @url = shuffle map { "http://yars$_:9001" } 1..3; |
559
|
|
|
|
|
|
|
url: <%= $url[0] %> |
560
|
|
|
|
|
|
|
failover_urls: |
561
|
|
|
|
|
|
|
- <%= $url[1] %> |
562
|
|
|
|
|
|
|
|
563
|
|
|
|
|
|
|
=head2 Accelerated downloads with nginx |
564
|
|
|
|
|
|
|
|
565
|
|
|
|
|
|
|
One of the advantages of Clustericious is that it integrates with a |
566
|
|
|
|
|
|
|
number of different webservers. You can do testing with hypnotoad, |
567
|
|
|
|
|
|
|
which comes with L<Mojolicious> (and thus a prerequisite of |
568
|
|
|
|
|
|
|
L<Clustericious> and Yars), and then deploy to production with a more |
569
|
|
|
|
|
|
|
capable webserver, such as nginx. The integration with nginx allows for |
570
|
|
|
|
|
|
|
handing off some of the workload to nginx; hypnotoad is good for serving |
571
|
|
|
|
|
|
|
dynamic web applications, but nginx is better for serving static files. |
572
|
|
|
|
|
|
|
So with this next configuration we will show you how to configure Yars |
573
|
|
|
|
|
|
|
to handle the selection of servers and disks and hand off the actual |
574
|
|
|
|
|
|
|
serving of the static file to nginx. |
575
|
|
|
|
|
|
|
|
576
|
|
|
|
|
|
|
Once again we put the nginx configuration in its own file so that we can |
577
|
|
|
|
|
|
|
reuse it with other L<Clustericious> services. |
578
|
|
|
|
|
|
|
|
579
|
|
|
|
|
|
|
--- |
580
|
|
|
|
|
|
|
# ~/etc/nginx.conf |
581
|
|
|
|
|
|
|
start_mode: |
582
|
|
|
|
|
|
|
- hypnotoad |
583
|
|
|
|
|
|
|
- nginx |
584
|
|
|
|
|
|
|
|
585
|
|
|
|
|
|
|
# we use hypnotoad to server the dynamic part of the app |
586
|
|
|
|
|
|
|
# and listen to the same port on localhost |
587
|
|
|
|
|
|
|
hypnotoad: |
588
|
|
|
|
|
|
|
listen: |
589
|
|
|
|
|
|
|
- http://127.0.0.1:<%= $port %> |
590
|
|
|
|
|
|
|
pid_file: <%= home %>/var/run/<%= $name %>-hypnotoad.pid |
591
|
|
|
|
|
|
|
proxy: 1 |
592
|
|
|
|
|
|
|
|
593
|
|
|
|
|
|
|
# and we proxy requests on the main IP address through |
594
|
|
|
|
|
|
|
# nginx |
595
|
|
|
|
|
|
|
nginx: |
596
|
|
|
|
|
|
|
'-p': <%= home %>/var/run/<%= $name %>-nginx |
597
|
|
|
|
|
|
|
'-c': <%= home %>/var/run/<%= $name %>-nginx/conf/nginx.conf |
598
|
|
|
|
|
|
|
autogen: |
599
|
|
|
|
|
|
|
filename: <%= home %>/var/run/<%= $name %>-nginx/conf/nginx.conf |
600
|
|
|
|
|
|
|
content: | |
601
|
|
|
|
|
|
|
# autogenerated file |
602
|
|
|
|
|
|
|
events { |
603
|
|
|
|
|
|
|
worker_connections 4096; |
604
|
|
|
|
|
|
|
} |
605
|
|
|
|
|
|
|
http { |
606
|
|
|
|
|
|
|
server { |
607
|
|
|
|
|
|
|
listen <%= hostname %>:<%= $port %>; |
608
|
|
|
|
|
|
|
location / { |
609
|
|
|
|
|
|
|
proxy_pass http://127.0.0.1:<%= $port %>; |
610
|
|
|
|
|
|
|
proxy_http_version 1.1; |
611
|
|
|
|
|
|
|
% if($name eq 'yars') { |
612
|
|
|
|
|
|
|
# to accelerate downloads, for Yars only |
613
|
|
|
|
|
|
|
# we set the X-Yars-Use-X-Accel header to |
614
|
|
|
|
|
|
|
# any value. This will trigger Yars to |
615
|
|
|
|
|
|
|
# use nginx's X-Accel-Redirect to serve |
616
|
|
|
|
|
|
|
# actual static files back to the client. |
617
|
|
|
|
|
|
|
proxy_set_header X-Yars-Use-X-Accel yes; |
618
|
|
|
|
|
|
|
% } |
619
|
|
|
|
|
|
|
} |
620
|
|
|
|
|
|
|
% if($name eq 'yars') { |
621
|
|
|
|
|
|
|
# we need to make the static files available |
622
|
|
|
|
|
|
|
# to nginx. The /static prefix is to ensure |
623
|
|
|
|
|
|
|
# that routes (future and present) do not |
624
|
|
|
|
|
|
|
# conflict with physical files on your disk. |
625
|
|
|
|
|
|
|
location /static/disk/ { |
626
|
|
|
|
|
|
|
# internal makes sure that these files |
627
|
|
|
|
|
|
|
# won't be served to external clients |
628
|
|
|
|
|
|
|
# without going through the yars interface |
629
|
|
|
|
|
|
|
internal; |
630
|
|
|
|
|
|
|
alias /disk/; |
631
|
|
|
|
|
|
|
} |
632
|
|
|
|
|
|
|
% } |
633
|
|
|
|
|
|
|
} |
634
|
|
|
|
|
|
|
} |
635
|
|
|
|
|
|
|
|
636
|
|
|
|
|
|
|
and once again, our C<Yars.conf> file is short and sweet: |
637
|
|
|
|
|
|
|
|
638
|
|
|
|
|
|
|
--- |
639
|
|
|
|
|
|
|
% my $port = 9001; |
640
|
|
|
|
|
|
|
url: http://<%= hostname %>:<%= $port %> |
641
|
|
|
|
|
|
|
% extends_config 'nginx', port => $port, name => 'yars'; |
642
|
|
|
|
|
|
|
% extends_config 'yars_diskmap'; |
643
|
|
|
|
|
|
|
|
644
|
|
|
|
|
|
|
If you are storing large files in your Yars cluster the nginx default |
645
|
|
|
|
|
|
|
maximum request size will probably not be adequate. If you see an error |
646
|
|
|
|
|
|
|
message like this: |
647
|
|
|
|
|
|
|
|
648
|
|
|
|
|
|
|
% yarsclient upload large-file.iso |
649
|
|
|
|
|
|
|
[ERROR] 2016/09/30 11:23:48 Command.pm (204) (413) Request Entity Too Large |
650
|
|
|
|
|
|
|
|
651
|
|
|
|
|
|
|
Then you need to set |
652
|
|
|
|
|
|
|
L<client_max_body_size|http://nginx.org/en/docs/http/ngx_http_core_module.html#client_max_body_size> |
653
|
|
|
|
|
|
|
appropriately. |
654
|
|
|
|
|
|
|
|
655
|
|
|
|
|
|
|
http { |
656
|
|
|
|
|
|
|
server { |
657
|
|
|
|
|
|
|
client_max_body_size 0; # zero for no max |
658
|
|
|
|
|
|
|
... |
659
|
|
|
|
|
|
|
|
660
|
|
|
|
|
|
|
=head2 Accelerate by not checking the md5 twice |
661
|
|
|
|
|
|
|
|
662
|
|
|
|
|
|
|
By default, Yars checks the MD5 of files before serving them to the |
663
|
|
|
|
|
|
|
client. L<Yars::Client> and L<yarsclient> both also check the MD5 sum |
664
|
|
|
|
|
|
|
after downloading. This saves bandwidth if automated processes attempt |
665
|
|
|
|
|
|
|
to redownload the same file if it is corrupted on the disk of the |
666
|
|
|
|
|
|
|
server. The chance of error is likely much higher on the network than it |
667
|
|
|
|
|
|
|
is on the disk, and if you prefer to do the check just on the client |
668
|
|
|
|
|
|
|
side, then you can use set the download_md5_verify to zero. |
669
|
|
|
|
|
|
|
|
670
|
|
|
|
|
|
|
--- |
671
|
|
|
|
|
|
|
% my $port = 9001; |
672
|
|
|
|
|
|
|
url: http://<%= hostname %>:<%= $port %> |
673
|
|
|
|
|
|
|
% extends_config 'nginx', port => $port, name => 'yars'; |
674
|
|
|
|
|
|
|
% extends_config 'yars_diskmap'; |
675
|
|
|
|
|
|
|
download_md5_verify: 0 |
676
|
|
|
|
|
|
|
|
677
|
|
|
|
|
|
|
When you download files with other clients like C<curl> or C<wget>, the |
678
|
|
|
|
|
|
|
MD5 check will still happen on the server side. You may request this |
679
|
|
|
|
|
|
|
check be skipped by setting the C<X-Yars-Skip-Verify> header to any |
680
|
|
|
|
|
|
|
value. |
681
|
|
|
|
|
|
|
|
682
|
|
|
|
|
|
|
=head1 ACKNOWLEDGEMENT |
683
|
|
|
|
|
|
|
|
684
|
|
|
|
|
|
|
Thanks to Brian Duggan (BDUGGAN) for doing most of the initial work on |
685
|
|
|
|
|
|
|
Yars, and David Golden (XDG, DAGOLDEN) for describing Yars strength as |
686
|
|
|
|
|
|
|
"Write availability and eventual read consistency and availability". |
687
|
|
|
|
|
|
|
|
688
|
|
|
|
|
|
|
=head1 SEE ALSO |
689
|
|
|
|
|
|
|
|
690
|
|
|
|
|
|
|
=over 4 |
691
|
|
|
|
|
|
|
|
692
|
|
|
|
|
|
|
=item L<Yars::Client> |
693
|
|
|
|
|
|
|
|
694
|
|
|
|
|
|
|
Perl API interface to Yars. |
695
|
|
|
|
|
|
|
|
696
|
|
|
|
|
|
|
=item L<yarsclient> |
697
|
|
|
|
|
|
|
|
698
|
|
|
|
|
|
|
Command line client interface to Yars. |
699
|
|
|
|
|
|
|
|
700
|
|
|
|
|
|
|
=item L<Yars::Routes> |
701
|
|
|
|
|
|
|
|
702
|
|
|
|
|
|
|
HTTP REST routes useable for interfacing with Yars. |
703
|
|
|
|
|
|
|
|
704
|
|
|
|
|
|
|
=item L<yars_exercise> |
705
|
|
|
|
|
|
|
|
706
|
|
|
|
|
|
|
Automated upload / download of files to Yars for performance testing. |
707
|
|
|
|
|
|
|
|
708
|
|
|
|
|
|
|
=item L<Clustericious> |
709
|
|
|
|
|
|
|
|
710
|
|
|
|
|
|
|
Yars is built on the L<Clustericious> framework, itself heavily utilizing |
711
|
|
|
|
|
|
|
L<Mojolicious> |
712
|
|
|
|
|
|
|
|
713
|
|
|
|
|
|
|
=back |
714
|
|
|
|
|
|
|
|
715
|
|
|
|
|
|
|
=head1 AUTHOR |
716
|
|
|
|
|
|
|
|
717
|
|
|
|
|
|
|
Original author: Marty Brandon |
718
|
|
|
|
|
|
|
|
719
|
|
|
|
|
|
|
Current maintainer: Graham Ollis E<lt>plicease@cpan.orgE<gt> |
720
|
|
|
|
|
|
|
|
721
|
|
|
|
|
|
|
Contributors: |
722
|
|
|
|
|
|
|
|
723
|
|
|
|
|
|
|
Brian Duggan |
724
|
|
|
|
|
|
|
|
725
|
|
|
|
|
|
|
Curt Tilmes |
726
|
|
|
|
|
|
|
|
727
|
|
|
|
|
|
|
=head1 COPYRIGHT AND LICENSE |
728
|
|
|
|
|
|
|
|
729
|
|
|
|
|
|
|
This software is copyright (c) 2013 by NASA GSFC. |
730
|
|
|
|
|
|
|
|
731
|
|
|
|
|
|
|
This is free software; you can redistribute it and/or modify it under |
732
|
|
|
|
|
|
|
the same terms as the Perl 5 programming language system itself. |
733
|
|
|
|
|
|
|
|
734
|
|
|
|
|
|
|
=cut |