| line |
stmt |
bran |
cond |
sub |
pod |
time |
code |
|
1
|
|
|
|
|
|
|
package Test2::Aggregate; |
|
2
|
|
|
|
|
|
|
|
|
3
|
16
|
|
|
16
|
|
3071788
|
use strict; |
|
|
16
|
|
|
|
|
96
|
|
|
|
16
|
|
|
|
|
421
|
|
|
4
|
16
|
|
|
16
|
|
67
|
use warnings; |
|
|
16
|
|
|
|
|
27
|
|
|
|
16
|
|
|
|
|
358
|
|
|
5
|
|
|
|
|
|
|
|
|
6
|
16
|
|
|
16
|
|
67
|
use File::Find; |
|
|
16
|
|
|
|
|
31
|
|
|
|
16
|
|
|
|
|
842
|
|
|
7
|
16
|
|
|
16
|
|
83
|
use File::Path; |
|
|
16
|
|
|
|
|
32
|
|
|
|
16
|
|
|
|
|
866
|
|
|
8
|
16
|
|
|
16
|
|
7372
|
use File::Slurp; |
|
|
16
|
|
|
|
|
329022
|
|
|
|
16
|
|
|
|
|
935
|
|
|
9
|
|
|
|
|
|
|
|
|
10
|
16
|
|
|
16
|
|
533
|
use Test2::V0 'subtest'; |
|
|
16
|
|
|
|
|
126717
|
|
|
|
16
|
|
|
|
|
110
|
|
|
11
|
|
|
|
|
|
|
|
|
12
|
|
|
|
|
|
|
=head1 NAME |
|
13
|
|
|
|
|
|
|
|
|
14
|
|
|
|
|
|
|
Test2::Aggregate - Aggregate tests for increased speed |
|
15
|
|
|
|
|
|
|
|
|
16
|
|
|
|
|
|
|
=head1 SYNOPSIS |
|
17
|
|
|
|
|
|
|
|
|
18
|
|
|
|
|
|
|
use Test2::Aggregate; |
|
19
|
|
|
|
|
|
|
use Test2::V0; # Or 'use Test::More' etc if your suite uses an other framework |
|
20
|
|
|
|
|
|
|
|
|
21
|
|
|
|
|
|
|
Test2::Aggregate::run_tests( |
|
22
|
|
|
|
|
|
|
dirs => \@test_dirs |
|
23
|
|
|
|
|
|
|
); |
|
24
|
|
|
|
|
|
|
|
|
25
|
|
|
|
|
|
|
done_testing(); |
|
26
|
|
|
|
|
|
|
|
|
27
|
|
|
|
|
|
|
=head1 VERSION |
|
28
|
|
|
|
|
|
|
|
|
29
|
|
|
|
|
|
|
Version 0.15 |
|
30
|
|
|
|
|
|
|
|
|
31
|
|
|
|
|
|
|
=cut |
|
32
|
|
|
|
|
|
|
|
|
33
|
|
|
|
|
|
|
our $VERSION = '0.15'; |
|
34
|
|
|
|
|
|
|
|
|
35
|
|
|
|
|
|
|
=head1 DESCRIPTION |
|
36
|
|
|
|
|
|
|
|
|
37
|
|
|
|
|
|
|
Aggregates all tests specified with C (which can even be individual tests) |
|
38
|
|
|
|
|
|
|
to avoid forking, reloading etc that can help with performance (dramatically if |
|
39
|
|
|
|
|
|
|
you have numerous small tests) and also facilitate group profiling. It is quite |
|
40
|
|
|
|
|
|
|
common to have tests that take over a second of startup time for milliseconds of |
|
41
|
|
|
|
|
|
|
actual runtime - L removes that overhead. |
|
42
|
|
|
|
|
|
|
Test files are expected to end in B<.t> and are run as subtests of a single |
|
43
|
|
|
|
|
|
|
aggregate test. |
|
44
|
|
|
|
|
|
|
|
|
45
|
|
|
|
|
|
|
A bit similar (mainly in intent) to L, but no inspiration was |
|
46
|
|
|
|
|
|
|
drawn from the specific module, so simpler in concept and execution, which |
|
47
|
|
|
|
|
|
|
makes it much more likely to work with your test suite (especially if you use modern |
|
48
|
|
|
|
|
|
|
tools like L). It does not even try to package each test by default |
|
49
|
|
|
|
|
|
|
(there is an option), which may be good or bad, depending on your requirements. |
|
50
|
|
|
|
|
|
|
|
|
51
|
|
|
|
|
|
|
Generally, the way to use this module is to try to aggregate sets of quick tests |
|
52
|
|
|
|
|
|
|
(e.g. unit tests). Try to iterativelly add tests to the aggregator, using the C |
|
53
|
|
|
|
|
|
|
option, so you can easily edit and remove those that do not work. Trying an entire, |
|
54
|
|
|
|
|
|
|
large, suite in one go is not a good idea, as an incompatible test can break the |
|
55
|
|
|
|
|
|
|
run making the subsequent tests fail (especially when doing things like globally |
|
56
|
|
|
|
|
|
|
redefining built-ins etc) - see the module usage notes for help. |
|
57
|
|
|
|
|
|
|
|
|
58
|
|
|
|
|
|
|
The module can work with L / L suites, but you will |
|
59
|
|
|
|
|
|
|
have less issues with L (see notes). |
|
60
|
|
|
|
|
|
|
|
|
61
|
|
|
|
|
|
|
=head1 METHODS |
|
62
|
|
|
|
|
|
|
|
|
63
|
|
|
|
|
|
|
=head2 C |
|
64
|
|
|
|
|
|
|
|
|
65
|
|
|
|
|
|
|
my $stats = Test2::Aggregate::run_tests( |
|
66
|
|
|
|
|
|
|
dirs => \@dirs, # optional if lists defined |
|
67
|
|
|
|
|
|
|
lists => \@lists, # optional if dirs defined |
|
68
|
|
|
|
|
|
|
exclude => qr/exclude_regex/, # optional |
|
69
|
|
|
|
|
|
|
include => qr/include_regex/, # optional |
|
70
|
|
|
|
|
|
|
root => '/testroot/', # optional |
|
71
|
|
|
|
|
|
|
load_modules => \@modules, # optional |
|
72
|
|
|
|
|
|
|
package => 0, # optional |
|
73
|
|
|
|
|
|
|
shuffle => 0, # optional |
|
74
|
|
|
|
|
|
|
sort => 0, # optional |
|
75
|
|
|
|
|
|
|
reverse => 0, # optional |
|
76
|
|
|
|
|
|
|
unique => 1, # optional |
|
77
|
|
|
|
|
|
|
repeat => 1, # optional, requires Test2::Plugin::BailOnFail for < 0 |
|
78
|
|
|
|
|
|
|
slow => 0, # optional |
|
79
|
|
|
|
|
|
|
override => \%override, # optional, requires Sub::Override |
|
80
|
|
|
|
|
|
|
stats_output => $stats_output_path, # optional |
|
81
|
|
|
|
|
|
|
extend_stats => 0, # optional |
|
82
|
|
|
|
|
|
|
test_warnings => 0, # optional |
|
83
|
|
|
|
|
|
|
allow_errors => 0, # optional |
|
84
|
|
|
|
|
|
|
pre_eval => $code_to_eval, # optional |
|
85
|
|
|
|
|
|
|
dry_run => 0 # optional |
|
86
|
|
|
|
|
|
|
); |
|
87
|
|
|
|
|
|
|
|
|
88
|
|
|
|
|
|
|
Runs the aggregate tests. Returns a hashref with stats like this: |
|
89
|
|
|
|
|
|
|
|
|
90
|
|
|
|
|
|
|
$stats = { |
|
91
|
|
|
|
|
|
|
'test.t' => { |
|
92
|
|
|
|
|
|
|
'test_no' => 1, # numbering starts at 1 |
|
93
|
|
|
|
|
|
|
'pass_perc' => 100, # for single runs pass/fail is 100/0 |
|
94
|
|
|
|
|
|
|
'timestamp' => '20190705T145043', # start of test |
|
95
|
|
|
|
|
|
|
'time' => '0.1732', # seconds - only with stats_output |
|
96
|
|
|
|
|
|
|
'warnings' => $STDERR # only with test_warnings on non empty STDERR |
|
97
|
|
|
|
|
|
|
} |
|
98
|
|
|
|
|
|
|
}; |
|
99
|
|
|
|
|
|
|
|
|
100
|
|
|
|
|
|
|
The parameters to pass: |
|
101
|
|
|
|
|
|
|
|
|
102
|
|
|
|
|
|
|
=over 4 |
|
103
|
|
|
|
|
|
|
|
|
104
|
|
|
|
|
|
|
=item * C (either this or C is required) |
|
105
|
|
|
|
|
|
|
|
|
106
|
|
|
|
|
|
|
An arrayref containing directories which will be searched recursively, or even |
|
107
|
|
|
|
|
|
|
individual tests. The directories (unless C or C are true) |
|
108
|
|
|
|
|
|
|
will be processed and tests run in order specified. Test files are expected to |
|
109
|
|
|
|
|
|
|
end in C<.t>. |
|
110
|
|
|
|
|
|
|
|
|
111
|
|
|
|
|
|
|
=item * C (either this or C is required) |
|
112
|
|
|
|
|
|
|
|
|
113
|
|
|
|
|
|
|
Arrayref of flat files from which each line will be pushed to C (so they |
|
114
|
|
|
|
|
|
|
have a lower precedence - note C still applies, don't include it in the |
|
115
|
|
|
|
|
|
|
paths inside the list files). If the path does not exist, it will currently be |
|
116
|
|
|
|
|
|
|
silently ignored, however the "official" way to skip a line without checking it |
|
117
|
|
|
|
|
|
|
as a path is to start with a C<#> to denote a comment. |
|
118
|
|
|
|
|
|
|
|
|
119
|
|
|
|
|
|
|
This option is nicely combined with the C<--exclude-list> option of C (the |
|
120
|
|
|
|
|
|
|
L) to skip the individual runs of the tests you aggregated. |
|
121
|
|
|
|
|
|
|
|
|
122
|
|
|
|
|
|
|
=item * C (optional) |
|
123
|
|
|
|
|
|
|
|
|
124
|
|
|
|
|
|
|
A regex to filter out tests that you want excluded. |
|
125
|
|
|
|
|
|
|
|
|
126
|
|
|
|
|
|
|
=item * C (optional) |
|
127
|
|
|
|
|
|
|
|
|
128
|
|
|
|
|
|
|
A regex which the tests have to match in order to be included in the test run. |
|
129
|
|
|
|
|
|
|
Applied after C. |
|
130
|
|
|
|
|
|
|
|
|
131
|
|
|
|
|
|
|
=item * C (optional) |
|
132
|
|
|
|
|
|
|
|
|
133
|
|
|
|
|
|
|
If defined, must be a valid root directory that will prefix all C and |
|
134
|
|
|
|
|
|
|
C items. You may want to set it to C<'./'> if you want dirs relative |
|
135
|
|
|
|
|
|
|
to the current directory and the dot is not in your C<@INC>. |
|
136
|
|
|
|
|
|
|
|
|
137
|
|
|
|
|
|
|
=item * C (optional) |
|
138
|
|
|
|
|
|
|
|
|
139
|
|
|
|
|
|
|
Arrayref with modules to be loaded (with C) at the start of the |
|
140
|
|
|
|
|
|
|
test. Useful for testing modules with special namespace requirements. |
|
141
|
|
|
|
|
|
|
|
|
142
|
|
|
|
|
|
|
=item * C (optional) |
|
143
|
|
|
|
|
|
|
|
|
144
|
|
|
|
|
|
|
Will package each test in its own namespace. While it may help avoid things like |
|
145
|
|
|
|
|
|
|
redefine warnings, from experience, it can break some tests, so it is disabled |
|
146
|
|
|
|
|
|
|
by default. |
|
147
|
|
|
|
|
|
|
|
|
148
|
|
|
|
|
|
|
=item * C (optional) |
|
149
|
|
|
|
|
|
|
|
|
150
|
|
|
|
|
|
|
Pass L compatible key/values as a hashref. |
|
151
|
|
|
|
|
|
|
|
|
152
|
|
|
|
|
|
|
=item * C (optional) |
|
153
|
|
|
|
|
|
|
|
|
154
|
|
|
|
|
|
|
Number of times to repeat the test(s) (default is 1 for a single run). If |
|
155
|
|
|
|
|
|
|
C is negative, L is required, as the tests |
|
156
|
|
|
|
|
|
|
will repeat until they bail on a failure. It can be combined with C |
|
157
|
|
|
|
|
|
|
in which case a warning will also cause the test run to end. |
|
158
|
|
|
|
|
|
|
|
|
159
|
|
|
|
|
|
|
=item * C (optional) |
|
160
|
|
|
|
|
|
|
|
|
161
|
|
|
|
|
|
|
From v0.11, duplicate tests are by default removed from the running list as that |
|
162
|
|
|
|
|
|
|
could mess up the stats output. You can still define it as false to allow duplicate |
|
163
|
|
|
|
|
|
|
tests in the list. |
|
164
|
|
|
|
|
|
|
|
|
165
|
|
|
|
|
|
|
=item * C (optional) |
|
166
|
|
|
|
|
|
|
|
|
167
|
|
|
|
|
|
|
Sort tests alphabetically if set to true. Provides a way to fix the test order |
|
168
|
|
|
|
|
|
|
across systems. |
|
169
|
|
|
|
|
|
|
|
|
170
|
|
|
|
|
|
|
=item * C (optional) |
|
171
|
|
|
|
|
|
|
|
|
172
|
|
|
|
|
|
|
Random order of tests if set to true. Will override C. |
|
173
|
|
|
|
|
|
|
|
|
174
|
|
|
|
|
|
|
=item * C (optional) |
|
175
|
|
|
|
|
|
|
|
|
176
|
|
|
|
|
|
|
Reverse order of tests if set to true. |
|
177
|
|
|
|
|
|
|
|
|
178
|
|
|
|
|
|
|
=item * C (optional) |
|
179
|
|
|
|
|
|
|
|
|
180
|
|
|
|
|
|
|
When true, tests will be skipped if the environment variable C is set. |
|
181
|
|
|
|
|
|
|
|
|
182
|
|
|
|
|
|
|
=item * C (optional) |
|
183
|
|
|
|
|
|
|
|
|
184
|
|
|
|
|
|
|
Tests for warnings over all the tests if set to true - this is added as a final |
|
185
|
|
|
|
|
|
|
test which expects zero as the number of tests which had STDERR output. |
|
186
|
|
|
|
|
|
|
The STDERR output of each test will be printed at the end of the test run (and |
|
187
|
|
|
|
|
|
|
included in the test run result hash), so if you want to see warnings the moment |
|
188
|
|
|
|
|
|
|
they are generated leave this option disabled. |
|
189
|
|
|
|
|
|
|
|
|
190
|
|
|
|
|
|
|
=item * C (optional) |
|
191
|
|
|
|
|
|
|
|
|
192
|
|
|
|
|
|
|
If enabled, it will allow errors that exit tests prematurely (so they may return |
|
193
|
|
|
|
|
|
|
a pass if one of their subtests had passed). The option is available to enable |
|
194
|
|
|
|
|
|
|
old behaviour (version <= 0.12), before the module stopped allowing this. |
|
195
|
|
|
|
|
|
|
|
|
196
|
|
|
|
|
|
|
=item * C (optional) |
|
197
|
|
|
|
|
|
|
|
|
198
|
|
|
|
|
|
|
Instead of running the tests, will do C for each one. Otherwise, |
|
199
|
|
|
|
|
|
|
test order, stats files etc. will be produced (as if all tests passed). |
|
200
|
|
|
|
|
|
|
|
|
201
|
|
|
|
|
|
|
=item * C (optional) |
|
202
|
|
|
|
|
|
|
|
|
203
|
|
|
|
|
|
|
String with code to run with eval before each test. You might be inclined to do |
|
204
|
|
|
|
|
|
|
this for example: |
|
205
|
|
|
|
|
|
|
|
|
206
|
|
|
|
|
|
|
pre_eval => "no warnings 'redefine';" |
|
207
|
|
|
|
|
|
|
|
|
208
|
|
|
|
|
|
|
You might expect it to silence redefine warnings (when you have similarly named |
|
209
|
|
|
|
|
|
|
subs on many tests), but even if you don't set warnings explicitly in your tests, |
|
210
|
|
|
|
|
|
|
most test bundles will set warnings automatically for you (e.g. for L |
|
211
|
|
|
|
|
|
|
you'd have to do C |
|
212
|
|
|
|
|
|
|
|
|
213
|
|
|
|
|
|
|
=item * C (optional) |
|
214
|
|
|
|
|
|
|
|
|
215
|
|
|
|
|
|
|
C specifies a path where a file will be created to print out |
|
216
|
|
|
|
|
|
|
running time per test (average if multiple iterations) and passing percentage. |
|
217
|
|
|
|
|
|
|
Output is sorted from slowest test to fastest. On negative C the stats |
|
218
|
|
|
|
|
|
|
of each successful run will be written separately instead of the averages. |
|
219
|
|
|
|
|
|
|
The name of the file is C. |
|
220
|
|
|
|
|
|
|
If C<'-'> is passed instead of a path, then the output will be written to STDOUT. |
|
221
|
|
|
|
|
|
|
The timing stats are useful because the test harness doesn't normally measure |
|
222
|
|
|
|
|
|
|
time per subtest (remember, your individual aggregated tests become subtests). |
|
223
|
|
|
|
|
|
|
If you prefer to capture the hash output of the function and use that for your |
|
224
|
|
|
|
|
|
|
reports, you still need to define C to enable timing (just send |
|
225
|
|
|
|
|
|
|
the output to C, C etc). |
|
226
|
|
|
|
|
|
|
|
|
227
|
|
|
|
|
|
|
=item * C (optional) |
|
228
|
|
|
|
|
|
|
|
|
229
|
|
|
|
|
|
|
This option exist to make the default output format of C be fixed, |
|
230
|
|
|
|
|
|
|
but still allow additions in future versions that will only be written with the |
|
231
|
|
|
|
|
|
|
C option enabled. |
|
232
|
|
|
|
|
|
|
Additions with C as of the current version: |
|
233
|
|
|
|
|
|
|
|
|
234
|
|
|
|
|
|
|
=over 4 |
|
235
|
|
|
|
|
|
|
|
|
236
|
|
|
|
|
|
|
- starting date/time in ISO_8601. |
|
237
|
|
|
|
|
|
|
|
|
238
|
|
|
|
|
|
|
=back |
|
239
|
|
|
|
|
|
|
|
|
240
|
|
|
|
|
|
|
=back |
|
241
|
|
|
|
|
|
|
|
|
242
|
|
|
|
|
|
|
=cut |
|
243
|
|
|
|
|
|
|
|
|
244
|
|
|
|
|
|
|
sub run_tests { |
|
245
|
37
|
|
|
37
|
1
|
67862
|
my %args = @_; |
|
246
|
|
|
|
|
|
|
Test2::V0::plan skip_all => 'Skipping slow tests.' |
|
247
|
37
|
100
|
100
|
|
|
137
|
if $args{slow} && $ENV{SKIP_SLOW}; |
|
248
|
|
|
|
|
|
|
|
|
249
|
36
|
|
|
|
|
68
|
eval "use $_;" foreach @{$args{load_modules}}; |
|
|
36
|
|
|
|
|
169
|
|
|
250
|
36
|
|
|
|
|
965
|
local $ENV{AGGREGATE_TESTS} = 1; |
|
251
|
|
|
|
|
|
|
|
|
252
|
36
|
100
|
|
|
|
122
|
my $override = $args{override} ? _override($args{override}) : undef; |
|
253
|
36
|
|
|
|
|
73
|
my @dirs = (); |
|
254
|
36
|
|
100
|
|
|
185
|
my $root = $args{root} || ''; |
|
255
|
36
|
|
|
|
|
63
|
my @tests; |
|
256
|
|
|
|
|
|
|
|
|
257
|
36
|
100
|
|
|
|
126
|
@dirs = @{$args{dirs}} if $args{dirs}; |
|
|
27
|
|
|
|
|
68
|
|
|
258
|
36
|
100
|
100
|
|
|
128
|
$root .= '/' unless !$root || $root =~ m#/$#; |
|
259
|
|
|
|
|
|
|
|
|
260
|
36
|
100
|
100
|
|
|
196
|
if ($root && ! -e $root) { |
|
261
|
2
|
|
|
|
|
26
|
warn "Root '$root' does not exist, no tests are loaded." |
|
262
|
|
|
|
|
|
|
} else { |
|
263
|
34
|
|
|
|
|
62
|
foreach my $file (@{$args{lists}}) { |
|
|
34
|
|
|
|
|
116
|
|
|
264
|
|
|
|
|
|
|
push @dirs, |
|
265
|
11
|
100
|
|
|
|
71
|
map { /^\s*#/ ? () : $_ } |
|
|
33
|
|
|
|
|
1487
|
|
|
266
|
|
|
|
|
|
|
split( /\r?\n/, read_file("$root$file") ); |
|
267
|
|
|
|
|
|
|
} |
|
268
|
|
|
|
|
|
|
|
|
269
|
|
|
|
|
|
|
find( |
|
270
|
112
|
100
|
|
112
|
|
3559
|
sub {push @tests, $File::Find::name if /\.t$/}, |
|
271
|
34
|
100
|
|
|
|
291
|
grep {-e} map {$root . $_} @dirs |
|
|
47
|
|
|
|
|
3174
|
|
|
|
47
|
|
|
|
|
163
|
|
|
272
|
|
|
|
|
|
|
) |
|
273
|
|
|
|
|
|
|
if @dirs; |
|
274
|
|
|
|
|
|
|
} |
|
275
|
|
|
|
|
|
|
|
|
276
|
36
|
100
|
|
|
|
276
|
$args{unique} = 1 unless defined $args{unique}; |
|
277
|
36
|
|
100
|
|
|
196
|
$args{repeat} ||= 1; |
|
278
|
|
|
|
|
|
|
|
|
279
|
36
|
|
|
|
|
152
|
_process_run_order(\@tests, \%args); |
|
280
|
|
|
|
|
|
|
|
|
281
|
36
|
|
|
|
|
125
|
my @stack = caller(); |
|
282
|
36
|
|
100
|
|
|
137
|
$args{caller} = $stack[1] || 'aggregate'; |
|
283
|
36
|
|
|
|
|
422
|
$args{caller} =~ s#^.*?([^/]+)$#$1#; |
|
284
|
|
|
|
|
|
|
|
|
285
|
36
|
|
|
|
|
76
|
my $warnings = []; |
|
286
|
36
|
100
|
|
|
|
132
|
if ($args{repeat} < 0) { |
|
|
|
100
|
|
|
|
|
|
|
287
|
3
|
|
|
4
|
|
212
|
eval 'use Test2::Plugin::BailOnFail'; |
|
|
4
|
|
|
|
|
29
|
|
|
|
4
|
|
|
|
|
8
|
|
|
|
4
|
|
|
|
|
21
|
|
|
288
|
3
|
|
|
|
|
39
|
my $iter = 0; |
|
289
|
3
|
|
|
|
|
14
|
while (!@$warnings) { |
|
290
|
4
|
|
|
|
|
6
|
$iter++; |
|
291
|
4
|
|
|
|
|
119
|
print "Test suite iteration $iter\n"; |
|
292
|
4
|
100
|
|
|
|
23
|
if ($args{test_warnings}) { |
|
293
|
|
|
|
|
|
|
$warnings = _process_warnings( |
|
294
|
3
|
|
|
3
|
|
40
|
Test2::V0::warnings{_run_tests(\@tests, \%args)}, |
|
295
|
3
|
|
|
|
|
20
|
\%args |
|
296
|
|
|
|
|
|
|
); |
|
297
|
|
|
|
|
|
|
} else { |
|
298
|
1
|
|
|
|
|
3
|
_run_tests(\@tests, \%args); |
|
299
|
|
|
|
|
|
|
} |
|
300
|
|
|
|
|
|
|
} |
|
301
|
|
|
|
|
|
|
} elsif ($args{test_warnings}) { |
|
302
|
|
|
|
|
|
|
$warnings = _process_warnings( |
|
303
|
5
|
|
|
5
|
|
65
|
Test2::V0::warnings { _run_tests(\@tests, \%args) }, |
|
304
|
5
|
|
|
|
|
30
|
\%args |
|
305
|
|
|
|
|
|
|
); |
|
306
|
5
|
|
|
|
|
39
|
Test2::V0::is( |
|
307
|
|
|
|
|
|
|
@$warnings, |
|
308
|
|
|
|
|
|
|
0, |
|
309
|
|
|
|
|
|
|
'No warnings in the aggregate tests.' |
|
310
|
|
|
|
|
|
|
); |
|
311
|
|
|
|
|
|
|
} else { |
|
312
|
28
|
|
|
|
|
93
|
_run_tests(\@tests, \%args); |
|
313
|
|
|
|
|
|
|
} |
|
314
|
|
|
|
|
|
|
|
|
315
|
34
|
100
|
|
|
|
5865
|
warn "Test warning output:\n".join("\n", @$warnings)."\n" |
|
316
|
|
|
|
|
|
|
if @$warnings; |
|
317
|
|
|
|
|
|
|
|
|
318
|
34
|
|
|
|
|
699
|
return $args{stats}; |
|
319
|
|
|
|
|
|
|
} |
|
320
|
|
|
|
|
|
|
|
|
321
|
|
|
|
|
|
|
sub _process_run_order { |
|
322
|
36
|
|
|
36
|
|
63
|
my $tests = shift; |
|
323
|
36
|
|
|
|
|
61
|
my $args = shift; |
|
324
|
|
|
|
|
|
|
|
|
325
|
36
|
100
|
|
|
|
135
|
@$tests = grep(!/$args->{exclude}/, @$tests) if $args->{exclude}; |
|
326
|
36
|
100
|
|
|
|
118
|
@$tests = grep(/$args->{include}/, @$tests) if $args->{include}; |
|
327
|
|
|
|
|
|
|
|
|
328
|
36
|
100
|
|
|
|
151
|
@$tests = _uniq(@$tests) if $args->{unique}; |
|
329
|
36
|
100
|
|
|
|
112
|
@$tests = reverse @$tests if $args->{reverse}; |
|
330
|
|
|
|
|
|
|
|
|
331
|
36
|
100
|
|
|
|
131
|
if ($args->{shuffle}) { |
|
|
|
100
|
|
|
|
|
|
|
332
|
1
|
|
|
|
|
6
|
require List::Util; |
|
333
|
1
|
|
|
|
|
6
|
@$tests = List::Util::shuffle @$tests; |
|
334
|
|
|
|
|
|
|
} elsif ($args->{sort}) { |
|
335
|
3
|
|
|
|
|
11
|
@$tests = sort @$tests; |
|
336
|
|
|
|
|
|
|
} |
|
337
|
|
|
|
|
|
|
} |
|
338
|
|
|
|
|
|
|
|
|
339
|
|
|
|
|
|
|
sub _process_warnings { |
|
340
|
8
|
|
|
8
|
|
124
|
my $warnings = shift; |
|
341
|
8
|
|
|
|
|
14
|
my $args = shift; |
|
342
|
8
|
|
|
|
|
87
|
my @warnings = split(/<-Test2::Aggregate\n/, join('',@$warnings)); |
|
343
|
8
|
|
|
|
|
20
|
my @clean = (); |
|
344
|
|
|
|
|
|
|
|
|
345
|
8
|
|
|
|
|
17
|
foreach my $warn (@warnings) { |
|
346
|
22
|
100
|
|
|
|
66
|
if ($warn =~ m/(.*)->Test2::Aggregate\n(.*\S.*)/s) { |
|
347
|
4
|
|
|
|
|
19
|
push @clean, "<$1>\n$2"; |
|
348
|
4
|
|
|
|
|
13
|
$args->{stats}->{$1}->{warnings} = $2; |
|
349
|
4
|
|
|
|
|
21
|
$args->{stats}->{$1}->{pass_perc} = 0; |
|
350
|
|
|
|
|
|
|
} |
|
351
|
|
|
|
|
|
|
} |
|
352
|
8
|
|
|
|
|
35
|
return \@clean; |
|
353
|
|
|
|
|
|
|
} |
|
354
|
|
|
|
|
|
|
|
|
355
|
|
|
|
|
|
|
sub _run_tests { |
|
356
|
36
|
|
|
36
|
|
56
|
my $tests = shift; |
|
357
|
36
|
|
|
|
|
63
|
my $args = shift; |
|
358
|
|
|
|
|
|
|
|
|
359
|
36
|
|
|
|
|
65
|
my $repeat = $args->{repeat}; |
|
360
|
36
|
100
|
|
|
|
100
|
$repeat = 1 if $repeat < 0; |
|
361
|
36
|
|
|
|
|
66
|
my (%stats, $start); |
|
362
|
|
|
|
|
|
|
|
|
363
|
36
|
100
|
|
|
|
596
|
require Time::HiRes if $args->{stats_output}; |
|
364
|
|
|
|
|
|
|
|
|
365
|
36
|
|
|
|
|
1246
|
for my $i (1 .. $repeat) { |
|
366
|
40
|
100
|
|
|
|
107
|
my $iter = $repeat > 1 ? "Iter: $i/$repeat - " : ''; |
|
367
|
40
|
|
|
|
|
62
|
my $count = 1; |
|
368
|
40
|
|
|
|
|
79
|
foreach my $test (@$tests) { |
|
369
|
|
|
|
|
|
|
|
|
370
|
64
|
100
|
|
|
|
242
|
warn "$test->Test2::Aggregate\n" if $args->{test_warnings}; |
|
371
|
|
|
|
|
|
|
|
|
372
|
64
|
100
|
|
|
|
307
|
$stats{$test}{test_no} = $count unless $stats{$test}{test_no}; |
|
373
|
64
|
100
|
|
|
|
163
|
$start = Time::HiRes::time() if $args->{stats_output}; |
|
374
|
64
|
|
|
|
|
145
|
$stats{$test}{timestamp} = _timestamp(); |
|
375
|
|
|
|
|
|
|
|
|
376
|
64
|
|
|
|
|
117
|
my $exec_error; |
|
377
|
|
|
|
|
|
|
my $result = subtest $iter. "Running test $test" => sub { |
|
378
|
64
|
100
|
|
64
|
|
36684
|
eval $args->{pre_eval} if $args->{pre_eval}; |
|
379
|
|
|
|
|
|
|
|
|
380
|
64
|
100
|
|
|
|
178
|
if ($args->{dry_run}) { |
|
381
|
2
|
|
|
|
|
6
|
Test2::V0::ok($test); |
|
382
|
|
|
|
|
|
|
} else { |
|
383
|
|
|
|
|
|
|
$args->{package} |
|
384
|
62
|
100
|
|
|
|
12137
|
? eval "package Test::$i" . '::' . "$count; do '$test';" |
|
385
|
|
|
|
|
|
|
: do $test; |
|
386
|
61
|
|
|
|
|
152021
|
$exec_error = $@; |
|
387
|
|
|
|
|
|
|
} |
|
388
|
|
|
|
|
|
|
Test2::V0::is($exec_error, '', 'Execution should not fail/warn') |
|
389
|
63
|
100
|
100
|
|
|
1023
|
if !$args->{allow_errors} && $exec_error; |
|
390
|
64
|
|
|
|
|
489
|
}; |
|
391
|
|
|
|
|
|
|
|
|
392
|
63
|
100
|
|
|
|
63861
|
warn "<-Test2::Aggregate\n" if $args->{test_warnings}; |
|
393
|
|
|
|
|
|
|
|
|
394
|
|
|
|
|
|
|
$stats{$test}{time} += (Time::HiRes::time() - $start)/$repeat |
|
395
|
63
|
100
|
|
|
|
279
|
if $args->{stats_output}; |
|
396
|
63
|
100
|
|
|
|
253
|
$stats{$test}{pass_perc} += $result ? 100/$repeat : 0; |
|
397
|
63
|
|
|
|
|
146
|
$count++; |
|
398
|
|
|
|
|
|
|
} |
|
399
|
|
|
|
|
|
|
} |
|
400
|
|
|
|
|
|
|
|
|
401
|
35
|
100
|
|
|
|
119
|
_print_stats(\%stats, $args) if $args->{stats_output}; |
|
402
|
34
|
|
|
|
|
150
|
$args->{stats} = \%stats; |
|
403
|
|
|
|
|
|
|
} |
|
404
|
|
|
|
|
|
|
|
|
405
|
|
|
|
|
|
|
sub _override { |
|
406
|
1
|
|
|
1
|
|
2
|
my $replace = shift; |
|
407
|
|
|
|
|
|
|
|
|
408
|
1
|
|
|
|
|
6
|
require Sub::Override; |
|
409
|
|
|
|
|
|
|
|
|
410
|
1
|
|
|
|
|
7
|
my $override = Sub::Override->new; |
|
411
|
1
|
|
|
|
|
8
|
$override->replace($_, $replace->{$_}) for (keys %{$replace}); |
|
|
1
|
|
|
|
|
6
|
|
|
412
|
|
|
|
|
|
|
|
|
413
|
1
|
|
|
|
|
40
|
return $override; |
|
414
|
|
|
|
|
|
|
} |
|
415
|
|
|
|
|
|
|
|
|
416
|
|
|
|
|
|
|
sub _print_stats { |
|
417
|
7
|
|
|
7
|
|
20
|
my ($stats, $args) = @_; |
|
418
|
|
|
|
|
|
|
|
|
419
|
7
|
100
|
|
|
|
178
|
unless (-e $args->{stats_output}) { |
|
420
|
4
|
|
|
|
|
497
|
my @create = mkpath($args->{stats_output}); |
|
421
|
4
|
100
|
|
|
|
168
|
unless (scalar @create) { |
|
422
|
1
|
|
|
|
|
14
|
warn "Could not create ".$args->{stats_output}; |
|
423
|
1
|
|
|
|
|
7
|
return; |
|
424
|
|
|
|
|
|
|
} |
|
425
|
|
|
|
|
|
|
} |
|
426
|
|
|
|
|
|
|
|
|
427
|
6
|
|
|
|
|
37
|
my $fh; |
|
428
|
6
|
100
|
|
|
|
28
|
if ($args->{stats_output} =~ /^-$/) { |
|
429
|
2
|
|
|
|
|
6
|
$fh = *STDOUT |
|
430
|
|
|
|
|
|
|
} else { |
|
431
|
4
|
|
|
|
|
25
|
my $file = $args->{stats_output}."/".$args->{caller}."-"._timestamp().".txt"; |
|
432
|
4
|
100
|
|
|
|
204
|
open($fh, '>', $file) or die "Can't open > $file: $!"; |
|
433
|
|
|
|
|
|
|
} |
|
434
|
|
|
|
|
|
|
|
|
435
|
5
|
|
|
|
|
14
|
my $total = 0; |
|
436
|
5
|
100
|
|
|
|
17
|
my $extra = $args->{extend_stats} ? ' TIMESTAMP' : ''; |
|
437
|
5
|
|
|
|
|
120
|
print $fh "TIME PASS%$extra TEST\n"; |
|
438
|
|
|
|
|
|
|
|
|
439
|
5
|
|
|
|
|
37
|
foreach my $test (sort {$stats->{$b}->{time}<=>$stats->{$a}->{time}} keys %$stats) { |
|
|
5
|
|
|
|
|
28
|
|
|
440
|
10
|
100
|
|
|
|
26
|
$extra = ' '.$stats->{$test}->{timestamp} if $args->{extend_stats}; |
|
441
|
10
|
|
|
|
|
18
|
$total += $stats->{$test}->{time}; |
|
442
|
|
|
|
|
|
|
printf $fh "%.2f %d$extra $test\n", |
|
443
|
10
|
|
|
|
|
124
|
$stats->{$test}->{time}, $stats->{$test}->{pass_perc}; |
|
444
|
|
|
|
|
|
|
} |
|
445
|
|
|
|
|
|
|
|
|
446
|
5
|
|
|
|
|
39
|
printf $fh "TOTAL TIME: %.1f sec\n", $total; |
|
447
|
5
|
100
|
|
|
|
84
|
close $fh unless $args->{stats_output} =~ /^-$/; |
|
448
|
|
|
|
|
|
|
} |
|
449
|
|
|
|
|
|
|
|
|
450
|
|
|
|
|
|
|
sub _uniq { |
|
451
|
35
|
|
|
35
|
|
50
|
my %seen; |
|
452
|
35
|
|
|
|
|
178
|
grep !$seen{$_}++, @_; |
|
453
|
|
|
|
|
|
|
} |
|
454
|
|
|
|
|
|
|
|
|
455
|
|
|
|
|
|
|
sub _timestamp { |
|
456
|
68
|
|
|
68
|
|
1788
|
my ($s, $m, $h, $D, $M, $Y) = localtime(time); |
|
457
|
68
|
|
|
|
|
633
|
return sprintf "%04d%02d%02dT%02d%02d%02d", $Y+1900, $M+1, $D, $h, $m, $s; |
|
458
|
|
|
|
|
|
|
} |
|
459
|
|
|
|
|
|
|
|
|
460
|
|
|
|
|
|
|
=head1 USAGE NOTES |
|
461
|
|
|
|
|
|
|
|
|
462
|
|
|
|
|
|
|
Not all tests can be modified to run under the aggregator, it is not intended |
|
463
|
|
|
|
|
|
|
for tests that require an isolated environment, do overrides etc. For other tests |
|
464
|
|
|
|
|
|
|
which can potentially run under the aggregator, sometimes very simple changes may be |
|
465
|
|
|
|
|
|
|
needed like giving unique names to subs (or not warning for redefines, or trying the |
|
466
|
|
|
|
|
|
|
package option), replacing things that complain, restoring the environment at |
|
467
|
|
|
|
|
|
|
the end of the test etc. |
|
468
|
|
|
|
|
|
|
|
|
469
|
|
|
|
|
|
|
Unit tests are usually great for aggregating. You could use the hash that C |
|
470
|
|
|
|
|
|
|
returns in a script that tries to add more tests automatically to an aggregate list |
|
471
|
|
|
|
|
|
|
to see which added tests passed and keep them, dropping failures. See later in the |
|
472
|
|
|
|
|
|
|
notes for a detailed example. |
|
473
|
|
|
|
|
|
|
|
|
474
|
|
|
|
|
|
|
Trying to aggregate too many tests into a single one can be counter-intuitive as |
|
475
|
|
|
|
|
|
|
you would ideally want to parallelize your test suite (so a super-long aggregated |
|
476
|
|
|
|
|
|
|
test continuing after the rest are done will slow down the suite). And in general |
|
477
|
|
|
|
|
|
|
more tests will run aggregated if they are grouped so that tests that can't be |
|
478
|
|
|
|
|
|
|
aggregated together are in different groups. |
|
479
|
|
|
|
|
|
|
|
|
480
|
|
|
|
|
|
|
In general you can call C multiple times in a test and |
|
481
|
|
|
|
|
|
|
even load C with tests that already contain another C, the |
|
482
|
|
|
|
|
|
|
only real issue with multiple calls is that if you use C on a call, |
|
483
|
|
|
|
|
|
|
L is loaded so any subsequent failure, on any following |
|
484
|
|
|
|
|
|
|
C call will trigger a Bail. |
|
485
|
|
|
|
|
|
|
|
|
486
|
|
|
|
|
|
|
=head2 Test::More |
|
487
|
|
|
|
|
|
|
|
|
488
|
|
|
|
|
|
|
If you haven't switched to the L you are generally advised to do so |
|
489
|
|
|
|
|
|
|
for a number of reasons, compatibility with this module being only a very minor |
|
490
|
|
|
|
|
|
|
one. If you are stuck with a L suite, L can still |
|
491
|
|
|
|
|
|
|
probably help you more than the similarly-named C modules. |
|
492
|
|
|
|
|
|
|
|
|
493
|
|
|
|
|
|
|
Although the module tries to load C with minimal imports to not interfere, |
|
494
|
|
|
|
|
|
|
it is generally better to do C |
|
495
|
|
|
|
|
|
|
alongside with C |
|
496
|
|
|
|
|
|
|
|
|
497
|
|
|
|
|
|
|
=head2 BEGIN / END Blocks |
|
498
|
|
|
|
|
|
|
|
|
499
|
|
|
|
|
|
|
C / C blocks will run at the start/end of each test and any overrides |
|
500
|
|
|
|
|
|
|
etc you might have set will apply to the rest of the tests, so if you use them you |
|
501
|
|
|
|
|
|
|
probably need to make changes for aggregation. An example of such a change is when |
|
502
|
|
|
|
|
|
|
you have a C<*GLOBAL::CORE::exit> override to test scripts that can call C. |
|
503
|
|
|
|
|
|
|
A solution is to use something like L: |
|
504
|
|
|
|
|
|
|
|
|
505
|
|
|
|
|
|
|
BEGIN { |
|
506
|
|
|
|
|
|
|
unless ($Test::Trap::VERSION) { # Avoid warnings for multiple loads in aggregation |
|
507
|
|
|
|
|
|
|
require Test::Trap; |
|
508
|
|
|
|
|
|
|
Test::Trap->import(); |
|
509
|
|
|
|
|
|
|
} |
|
510
|
|
|
|
|
|
|
} |
|
511
|
|
|
|
|
|
|
|
|
512
|
|
|
|
|
|
|
=head2 Test::Class |
|
513
|
|
|
|
|
|
|
|
|
514
|
|
|
|
|
|
|
L is sort of an aggregator itself. You make your tests into modules |
|
515
|
|
|
|
|
|
|
and then load them on the same C<.t> file, so ideally you will not end up with many |
|
516
|
|
|
|
|
|
|
C<.t> files that would require further aggregation. If you do, due to the L |
|
517
|
|
|
|
|
|
|
implementation specifics, those C<.t> files won't run under L. |
|
518
|
|
|
|
|
|
|
|
|
519
|
|
|
|
|
|
|
=head2 $ENV{AGGREGATE_TESTS} |
|
520
|
|
|
|
|
|
|
|
|
521
|
|
|
|
|
|
|
The environment variable C will be set while the tests are running |
|
522
|
|
|
|
|
|
|
for your convenience. Example usage is making a test you know cannot run under the |
|
523
|
|
|
|
|
|
|
aggregator check and croak if it was run under it, or a module that can only be loaded |
|
524
|
|
|
|
|
|
|
once, so you load it on the aggregated test file and then use something like this in |
|
525
|
|
|
|
|
|
|
the individual test files: |
|
526
|
|
|
|
|
|
|
|
|
527
|
|
|
|
|
|
|
eval 'use My::Module' unless $ENV{AGGREGATE_TESTS}; |
|
528
|
|
|
|
|
|
|
|
|
529
|
|
|
|
|
|
|
If you have a custom test bundle, you could use the variable to do things like |
|
530
|
|
|
|
|
|
|
disable warnings on redefines only for tests that run aggregated: |
|
531
|
|
|
|
|
|
|
|
|
532
|
|
|
|
|
|
|
use Import::Into; |
|
533
|
|
|
|
|
|
|
|
|
534
|
|
|
|
|
|
|
sub import { |
|
535
|
|
|
|
|
|
|
... |
|
536
|
|
|
|
|
|
|
'warnings'->unimport::out_of($package, 'redefine') |
|
537
|
|
|
|
|
|
|
if $ENV{AGGREGATE_TESTS}; |
|
538
|
|
|
|
|
|
|
} |
|
539
|
|
|
|
|
|
|
|
|
540
|
|
|
|
|
|
|
Another idea is to make the test die when it is run under the aggregator, if, at |
|
541
|
|
|
|
|
|
|
design time, you know it is not supposed to run aggregated. |
|
542
|
|
|
|
|
|
|
|
|
543
|
|
|
|
|
|
|
=head2 Example aggregating strategy |
|
544
|
|
|
|
|
|
|
|
|
545
|
|
|
|
|
|
|
There are many approaches you could do to use C with an existing |
|
546
|
|
|
|
|
|
|
test suite, so for example you can start by making a list of the test files you |
|
547
|
|
|
|
|
|
|
are trying to aggregate: |
|
548
|
|
|
|
|
|
|
|
|
549
|
|
|
|
|
|
|
find t -name '*.t' > all.lst |
|
550
|
|
|
|
|
|
|
|
|
551
|
|
|
|
|
|
|
If you have a substantial test suite, perhaps try with a portion of it (a subdir?) |
|
552
|
|
|
|
|
|
|
instead of the entire suite. In any case, try running them aggregated like this: |
|
553
|
|
|
|
|
|
|
|
|
554
|
|
|
|
|
|
|
use Test2::Aggregate; |
|
555
|
|
|
|
|
|
|
use Test2::V0; # Or Test::More; |
|
556
|
|
|
|
|
|
|
|
|
557
|
|
|
|
|
|
|
my $stats = Test2::Aggregate::run_tests( |
|
558
|
|
|
|
|
|
|
lists => ['all.lst'], |
|
559
|
|
|
|
|
|
|
); |
|
560
|
|
|
|
|
|
|
|
|
561
|
|
|
|
|
|
|
open OUT, ">pass.lst"; |
|
562
|
|
|
|
|
|
|
foreach my $test (sort {$stats->{$a}->{test_no} <=> $stats->{$b}->{test_no}} keys %$stats) { |
|
563
|
|
|
|
|
|
|
print OUT "$test\n" if $stats->{$test}->{pass_perc}; |
|
564
|
|
|
|
|
|
|
} |
|
565
|
|
|
|
|
|
|
close OUT; |
|
566
|
|
|
|
|
|
|
|
|
567
|
|
|
|
|
|
|
done_testing(); |
|
568
|
|
|
|
|
|
|
|
|
569
|
|
|
|
|
|
|
Run the above with C or C in verbose mode, so that in case the run |
|
570
|
|
|
|
|
|
|
hangs (it can happen), you can see where it did so and edit C removing |
|
571
|
|
|
|
|
|
|
the offending test. |
|
572
|
|
|
|
|
|
|
|
|
573
|
|
|
|
|
|
|
If the run completes, you have a "starting point" - i.e. a list that can run under |
|
574
|
|
|
|
|
|
|
the aggregator in C. |
|
575
|
|
|
|
|
|
|
You can try adding back some of the failed tests - test failures can be cascading, |
|
576
|
|
|
|
|
|
|
so some might be passing if added back, or have small issues you can address. |
|
577
|
|
|
|
|
|
|
|
|
578
|
|
|
|
|
|
|
Try adding C 1> to C to fix warnings as well, unless |
|
579
|
|
|
|
|
|
|
it is common for your tests to have C output. |
|
580
|
|
|
|
|
|
|
|
|
581
|
|
|
|
|
|
|
To have your entire suite run aggregated tests together once and not repeat them |
|
582
|
|
|
|
|
|
|
along with the other, non-aggregated, tests, it is a good idea to use the |
|
583
|
|
|
|
|
|
|
C<--exclude-list> option of the C. |
|
584
|
|
|
|
|
|
|
|
|
585
|
|
|
|
|
|
|
Hopefully your tests can run in parallel (C), in which case you |
|
586
|
|
|
|
|
|
|
would split your aggregated tests into multiple lists to have them run in parallel. |
|
587
|
|
|
|
|
|
|
Here is an example of a wrapper around C, to easily handle multiple lists: |
|
588
|
|
|
|
|
|
|
|
|
589
|
|
|
|
|
|
|
BEGIN { |
|
590
|
|
|
|
|
|
|
my @args = (); |
|
591
|
|
|
|
|
|
|
foreach (@ARGV) { |
|
592
|
|
|
|
|
|
|
if (/--exclude-lists=(\S+)/) { |
|
593
|
|
|
|
|
|
|
my $all = 't/aggregate/aggregated.tests'; |
|
594
|
|
|
|
|
|
|
`awk '{print "t/"\$0}' $1 > $all`; |
|
595
|
|
|
|
|
|
|
push @args, "--exclude-list=$all"; |
|
596
|
|
|
|
|
|
|
} else { push @args, $_ if $_; } |
|
597
|
|
|
|
|
|
|
} |
|
598
|
|
|
|
|
|
|
push @args, qw(-P...) # Preload module list (useful for non-aggregated tests) |
|
599
|
|
|
|
|
|
|
unless grep {/--cover/} @args; |
|
600
|
|
|
|
|
|
|
@ARGV = @args; |
|
601
|
|
|
|
|
|
|
} |
|
602
|
|
|
|
|
|
|
exec ('yath', @ARGV); |
|
603
|
|
|
|
|
|
|
|
|
604
|
|
|
|
|
|
|
You would call it with something like C<--exclude-lists=t/aggregate/*.lst>, and |
|
605
|
|
|
|
|
|
|
the tests listed will be excluded (you will have them running aggregated through |
|
606
|
|
|
|
|
|
|
their own C<.t> files using L). |
|
607
|
|
|
|
|
|
|
|
|
608
|
|
|
|
|
|
|
=head1 AUTHOR |
|
609
|
|
|
|
|
|
|
|
|
610
|
|
|
|
|
|
|
Dimitrios Kechagias, C<< >> |
|
611
|
|
|
|
|
|
|
|
|
612
|
|
|
|
|
|
|
=head1 BUGS |
|
613
|
|
|
|
|
|
|
|
|
614
|
|
|
|
|
|
|
Please report any bugs or feature requests to C, |
|
615
|
|
|
|
|
|
|
or through the web interface at L. |
|
616
|
|
|
|
|
|
|
I will be notified, and then you'll automatically be notified of progress on your |
|
617
|
|
|
|
|
|
|
bug as I make changes. You could also submit issues or even pull requests to the |
|
618
|
|
|
|
|
|
|
github repo (see below). |
|
619
|
|
|
|
|
|
|
|
|
620
|
|
|
|
|
|
|
=head1 GIT |
|
621
|
|
|
|
|
|
|
|
|
622
|
|
|
|
|
|
|
L |
|
623
|
|
|
|
|
|
|
|
|
624
|
|
|
|
|
|
|
=head1 COPYRIGHT & LICENSE |
|
625
|
|
|
|
|
|
|
|
|
626
|
|
|
|
|
|
|
Copyright (C) 2019, SpareRoom.com |
|
627
|
|
|
|
|
|
|
|
|
628
|
|
|
|
|
|
|
This program is free software; you can redistribute |
|
629
|
|
|
|
|
|
|
it and/or modify it under the same terms as Perl itself. |
|
630
|
|
|
|
|
|
|
|
|
631
|
|
|
|
|
|
|
=cut |
|
632
|
|
|
|
|
|
|
|
|
633
|
|
|
|
|
|
|
1; |