line |
stmt |
bran |
cond |
sub |
pod |
time |
code |
1
|
|
|
|
|
|
|
package HPC::Runner::Scheduler; |
2
|
|
|
|
|
|
|
|
3
|
1
|
|
|
1
|
|
44024
|
use File::Path qw(make_path remove_tree); |
|
1
|
|
|
|
|
2
|
|
|
1
|
|
|
|
|
52
|
|
4
|
1
|
|
|
1
|
|
622
|
use File::Temp qw/ tempfile tempdir /; |
|
1
|
|
|
|
|
14659
|
|
|
1
|
|
|
|
|
52
|
|
5
|
1
|
|
|
1
|
|
374
|
use IO::File; |
|
1
|
|
|
|
|
705
|
|
|
1
|
|
|
|
|
91
|
|
6
|
1
|
|
|
1
|
|
378
|
use IO::Select; |
|
1
|
|
|
|
|
1042
|
|
|
1
|
|
|
|
|
34
|
|
7
|
1
|
|
|
1
|
|
3
|
use Cwd; |
|
1
|
|
|
|
|
2
|
|
|
1
|
|
|
|
|
40
|
|
8
|
1
|
|
|
1
|
|
430
|
use IPC::Open3; |
|
1
|
|
|
|
|
1799
|
|
|
1
|
|
|
|
|
39
|
|
9
|
1
|
|
|
1
|
|
5
|
use Symbol; |
|
1
|
|
|
|
|
1
|
|
|
1
|
|
|
|
|
37
|
|
10
|
1
|
|
|
1
|
|
457
|
use Template; |
|
1
|
|
|
|
|
13369
|
|
|
1
|
|
|
|
|
26
|
|
11
|
1
|
|
|
1
|
|
763
|
use Log::Log4perl qw(:easy); |
|
1
|
|
|
|
|
32637
|
|
|
1
|
|
|
|
|
5
|
|
12
|
1
|
|
|
1
|
|
1189
|
use DateTime; |
|
1
|
|
|
|
|
84252
|
|
|
1
|
|
|
|
|
33
|
|
13
|
1
|
|
|
1
|
|
501
|
use Data::Dumper; |
|
1
|
|
|
|
|
4085
|
|
|
1
|
|
|
|
|
51
|
|
14
|
1
|
|
|
1
|
|
5
|
use List::Util qw(shuffle); |
|
1
|
|
|
|
|
1
|
|
|
1
|
|
|
|
|
49
|
|
15
|
1
|
|
|
1
|
|
445
|
use List::MoreUtils qw(firstidx); |
|
1
|
|
|
|
|
6602
|
|
|
1
|
|
|
|
|
5
|
|
16
|
1
|
|
|
1
|
|
986
|
use JSON; |
|
1
|
|
|
|
|
7734
|
|
|
1
|
|
|
|
|
3
|
|
17
|
|
|
|
|
|
|
|
18
|
1
|
|
|
1
|
|
567
|
use Moose; |
|
1
|
|
|
|
|
275454
|
|
|
1
|
|
|
|
|
5
|
|
19
|
1
|
|
|
1
|
|
4492
|
use namespace::autoclean; |
|
1
|
|
|
|
|
1
|
|
|
1
|
|
|
|
|
8
|
|
20
|
|
|
|
|
|
|
extends 'HPC::Runner'; |
21
|
|
|
|
|
|
|
with 'MooseX::SimpleConfig'; |
22
|
|
|
|
|
|
|
|
23
|
|
|
|
|
|
|
# For pretty man pages! |
24
|
|
|
|
|
|
|
$ENV{TERM} = 'xterm-256color'; |
25
|
|
|
|
|
|
|
|
26
|
|
|
|
|
|
|
our $VERSION = '0.07'; |
27
|
|
|
|
|
|
|
|
28
|
|
|
|
|
|
|
=encoding utf-8 |
29
|
|
|
|
|
|
|
|
30
|
|
|
|
|
|
|
=head1 NAME |
31
|
|
|
|
|
|
|
|
32
|
|
|
|
|
|
|
HPC::Runner::Scheduler - Base Library for HPC::Runner::Slurm and HPC::Runner::PBS |
33
|
|
|
|
|
|
|
|
34
|
|
|
|
|
|
|
=head1 SYNOPSIS |
35
|
|
|
|
|
|
|
|
36
|
|
|
|
|
|
|
pbsrunner.pl/slurmrunner.pl/mcerunner.pl --infile list_of_commands |
37
|
|
|
|
|
|
|
|
38
|
|
|
|
|
|
|
=head1 DESCRIPTION |
39
|
|
|
|
|
|
|
|
40
|
|
|
|
|
|
|
HPC::Runner::Scheduler is a base library for creating templates of HPC Scheduler (Slurm, PBS, etc) submission scripts. |
41
|
|
|
|
|
|
|
|
42
|
|
|
|
|
|
|
All the scheduler variables: memory, cpus, nodes, partitions/queues, are abstracted to a template. Instead of writing an entire submission template |
43
|
|
|
|
|
|
|
|
44
|
|
|
|
|
|
|
slurmrunner.pl --infile list_of_commands #with list of optional parameters |
45
|
|
|
|
|
|
|
|
46
|
|
|
|
|
|
|
Please see the indepth usage guide at L<HPC::Runner::Usage> |
47
|
|
|
|
|
|
|
|
48
|
|
|
|
|
|
|
=cut |
49
|
|
|
|
|
|
|
|
50
|
|
|
|
|
|
|
=head1 User Options |
51
|
|
|
|
|
|
|
|
52
|
|
|
|
|
|
|
User options can be passed to the script with script --opt1 or in a configfile. It uses MooseX::SimpleConfig for the commands |
53
|
|
|
|
|
|
|
|
54
|
|
|
|
|
|
|
=head2 configfile |
55
|
|
|
|
|
|
|
|
56
|
|
|
|
|
|
|
Config file to pass to command line as --configfile /path/to/file. It should be a yaml or xml (untested) |
57
|
|
|
|
|
|
|
This is optional. Paramaters can be passed straight to the command line |
58
|
|
|
|
|
|
|
|
59
|
|
|
|
|
|
|
=head3 example.yml |
60
|
|
|
|
|
|
|
|
61
|
|
|
|
|
|
|
--- |
62
|
|
|
|
|
|
|
infile: "/path/to/commands/testcommand.in" |
63
|
|
|
|
|
|
|
outdir: "path/to/testdir" |
64
|
|
|
|
|
|
|
module: |
65
|
|
|
|
|
|
|
- "R2" |
66
|
|
|
|
|
|
|
- "shared" |
67
|
|
|
|
|
|
|
|
68
|
|
|
|
|
|
|
=cut |
69
|
|
|
|
|
|
|
|
70
|
|
|
|
|
|
|
has '+configfile' => ( |
71
|
|
|
|
|
|
|
required => 0, |
72
|
|
|
|
|
|
|
documentation => |
73
|
|
|
|
|
|
|
q{If you get tired of putting all your options on the command line create a config file instead. |
74
|
|
|
|
|
|
|
--- |
75
|
|
|
|
|
|
|
infile: "/path/to/commands/testcommand.in" |
76
|
|
|
|
|
|
|
outdir: "path/to/testdir" |
77
|
|
|
|
|
|
|
module: |
78
|
|
|
|
|
|
|
- "R2" |
79
|
|
|
|
|
|
|
- "shared" |
80
|
|
|
|
|
|
|
} |
81
|
|
|
|
|
|
|
); |
82
|
|
|
|
|
|
|
|
83
|
|
|
|
|
|
|
=head2 infile |
84
|
|
|
|
|
|
|
|
85
|
|
|
|
|
|
|
infile of commands separated by newline |
86
|
|
|
|
|
|
|
|
87
|
|
|
|
|
|
|
=head3 example.in |
88
|
|
|
|
|
|
|
|
89
|
|
|
|
|
|
|
cmd1 |
90
|
|
|
|
|
|
|
cmd2 --input --input \ |
91
|
|
|
|
|
|
|
--someotherinput |
92
|
|
|
|
|
|
|
wait |
93
|
|
|
|
|
|
|
#Wait tells slurm to make sure previous commands have exited with exit status 0. |
94
|
|
|
|
|
|
|
cmd3 ##very heavy job |
95
|
|
|
|
|
|
|
newnode |
96
|
|
|
|
|
|
|
#cmd3 is a very heavy job so lets start the next job on a new node |
97
|
|
|
|
|
|
|
|
98
|
|
|
|
|
|
|
=cut |
99
|
|
|
|
|
|
|
|
100
|
|
|
|
|
|
|
=head2 module |
101
|
|
|
|
|
|
|
|
102
|
|
|
|
|
|
|
modules to load with slurm |
103
|
|
|
|
|
|
|
Should use the same names used in 'module load' |
104
|
|
|
|
|
|
|
|
105
|
|
|
|
|
|
|
Example. R2 becomes 'module load R2' |
106
|
|
|
|
|
|
|
|
107
|
|
|
|
|
|
|
=cut |
108
|
|
|
|
|
|
|
|
109
|
|
|
|
|
|
|
has 'module' => ( |
110
|
|
|
|
|
|
|
is => 'rw', |
111
|
|
|
|
|
|
|
isa => 'ArrayRef', |
112
|
|
|
|
|
|
|
required => 0, |
113
|
|
|
|
|
|
|
documentation => q{List of modules to load ex. R2, samtools, etc}, |
114
|
|
|
|
|
|
|
default => sub { [] }, |
115
|
|
|
|
|
|
|
); |
116
|
|
|
|
|
|
|
|
117
|
|
|
|
|
|
|
=head2 afterok |
118
|
|
|
|
|
|
|
|
119
|
|
|
|
|
|
|
The afterok switch in slurm. --afterok 123 will tell slurm to start this job after job 123 has completed successfully. |
120
|
|
|
|
|
|
|
|
121
|
|
|
|
|
|
|
=cut |
122
|
|
|
|
|
|
|
|
123
|
|
|
|
|
|
|
has afterok => ( |
124
|
|
|
|
|
|
|
is => 'rw', |
125
|
|
|
|
|
|
|
isa => 'ArrayRef', |
126
|
|
|
|
|
|
|
required => 0, |
127
|
|
|
|
|
|
|
default => sub { |
128
|
|
|
|
|
|
|
return []; |
129
|
|
|
|
|
|
|
}, |
130
|
|
|
|
|
|
|
); |
131
|
|
|
|
|
|
|
|
132
|
|
|
|
|
|
|
=head2 cpus_per_task |
133
|
|
|
|
|
|
|
|
134
|
|
|
|
|
|
|
slurm item --cpus_per_task defaults to 4, which is probably fine |
135
|
|
|
|
|
|
|
|
136
|
|
|
|
|
|
|
=cut |
137
|
|
|
|
|
|
|
|
138
|
|
|
|
|
|
|
has 'cpus_per_task' => ( |
139
|
|
|
|
|
|
|
is => 'rw', |
140
|
|
|
|
|
|
|
isa => 'Str', |
141
|
|
|
|
|
|
|
required => 0, |
142
|
|
|
|
|
|
|
default => 4, |
143
|
|
|
|
|
|
|
predicate => 'has_cpus_per_task', |
144
|
|
|
|
|
|
|
clearer => 'clear_cpus_per_task' |
145
|
|
|
|
|
|
|
); |
146
|
|
|
|
|
|
|
|
147
|
|
|
|
|
|
|
=head2 commands_per_node |
148
|
|
|
|
|
|
|
|
149
|
|
|
|
|
|
|
--commands_per_node defaults to 8, which is probably fine |
150
|
|
|
|
|
|
|
|
151
|
|
|
|
|
|
|
=cut |
152
|
|
|
|
|
|
|
|
153
|
|
|
|
|
|
|
has 'commands_per_node' => ( |
154
|
|
|
|
|
|
|
is => 'rw', |
155
|
|
|
|
|
|
|
isa => 'Str', |
156
|
|
|
|
|
|
|
required => 0, |
157
|
|
|
|
|
|
|
default => 8, |
158
|
|
|
|
|
|
|
documentation => |
159
|
|
|
|
|
|
|
q{Commands to run on each node. This is not the same as concurrent_commands_per_node!}, |
160
|
|
|
|
|
|
|
predicate => 'has_commands_per_node', |
161
|
|
|
|
|
|
|
clearer => 'clear_commands_per_node' |
162
|
|
|
|
|
|
|
); |
163
|
|
|
|
|
|
|
|
164
|
|
|
|
|
|
|
=head2 nodes_count |
165
|
|
|
|
|
|
|
|
166
|
|
|
|
|
|
|
Number of nodes to use on a job. This is only useful for mpi jobs. |
167
|
|
|
|
|
|
|
|
168
|
|
|
|
|
|
|
PBS: |
169
|
|
|
|
|
|
|
#PBS -l nodes=nodes_count:ppn=16 this |
170
|
|
|
|
|
|
|
|
171
|
|
|
|
|
|
|
Slurm: |
172
|
|
|
|
|
|
|
#SBATCH --nodes nodes_count |
173
|
|
|
|
|
|
|
|
174
|
|
|
|
|
|
|
=cut |
175
|
|
|
|
|
|
|
|
176
|
|
|
|
|
|
|
has 'nodes_count' => ( |
177
|
|
|
|
|
|
|
is => 'rw', |
178
|
|
|
|
|
|
|
isa => 'Str', |
179
|
|
|
|
|
|
|
required => 0, |
180
|
|
|
|
|
|
|
default => 1, |
181
|
|
|
|
|
|
|
documentation => |
182
|
|
|
|
|
|
|
q{Number of nodes requested. You should only use this if submitting parallel jobs.}, |
183
|
|
|
|
|
|
|
predicate => 'has_nodes_count', |
184
|
|
|
|
|
|
|
clearer => 'clear_nodes_count' |
185
|
|
|
|
|
|
|
); |
186
|
|
|
|
|
|
|
|
187
|
|
|
|
|
|
|
=head2 partition |
188
|
|
|
|
|
|
|
|
189
|
|
|
|
|
|
|
#Should probably have something at some point that you can specify multiple partitions.... |
190
|
|
|
|
|
|
|
|
191
|
|
|
|
|
|
|
Specify the partition. Defaults to the partition that has the most nodes. |
192
|
|
|
|
|
|
|
|
193
|
|
|
|
|
|
|
=cut |
194
|
|
|
|
|
|
|
|
195
|
|
|
|
|
|
|
has 'partition' => ( |
196
|
|
|
|
|
|
|
is => 'rw', |
197
|
|
|
|
|
|
|
isa => 'Str', |
198
|
|
|
|
|
|
|
required => 0, |
199
|
|
|
|
|
|
|
default => '', |
200
|
|
|
|
|
|
|
documentation => |
201
|
|
|
|
|
|
|
q{Slurm partition to submit jobs to. Defaults to the partition with the most available nodes}, |
202
|
|
|
|
|
|
|
predicate => 'has_partition', |
203
|
|
|
|
|
|
|
clearer => 'clear_partition' |
204
|
|
|
|
|
|
|
); |
205
|
|
|
|
|
|
|
|
206
|
|
|
|
|
|
|
#=head2 nodelist |
207
|
|
|
|
|
|
|
|
208
|
|
|
|
|
|
|
#Defaults to the nodes on the defq queue |
209
|
|
|
|
|
|
|
|
210
|
|
|
|
|
|
|
#=cut |
211
|
|
|
|
|
|
|
|
212
|
|
|
|
|
|
|
#has 'nodelist' => ( |
213
|
|
|
|
|
|
|
#is => 'rw', |
214
|
|
|
|
|
|
|
#isa => 'ArrayRef', |
215
|
|
|
|
|
|
|
#required => 0, |
216
|
|
|
|
|
|
|
#default => sub { return [] }, |
217
|
|
|
|
|
|
|
#documentation => |
218
|
|
|
|
|
|
|
#q{List of nodes to submit jobs to. Defaults to the partition with the most nodes.}, |
219
|
|
|
|
|
|
|
#); |
220
|
|
|
|
|
|
|
|
221
|
|
|
|
|
|
|
=head2 submit_slurm |
222
|
|
|
|
|
|
|
|
223
|
|
|
|
|
|
|
Bool value whether or not to submit to slurm. If you are looking to debug your files, or this script you will want to set this to zero. |
224
|
|
|
|
|
|
|
Don't submit to slurm with --nosubmit_to_slurm from the command line or |
225
|
|
|
|
|
|
|
$self->submit_to_slurm(0); within your code |
226
|
|
|
|
|
|
|
|
227
|
|
|
|
|
|
|
=cut |
228
|
|
|
|
|
|
|
|
229
|
|
|
|
|
|
|
has 'submit_to_slurm' => ( |
230
|
|
|
|
|
|
|
is => 'rw', |
231
|
|
|
|
|
|
|
isa => 'Bool', |
232
|
|
|
|
|
|
|
default => 1, |
233
|
|
|
|
|
|
|
required => 1, |
234
|
|
|
|
|
|
|
documentation => |
235
|
|
|
|
|
|
|
q{Bool value whether or not to submit to slurm. If you are looking to debug your files, or this script you will want to set this to zero.}, |
236
|
|
|
|
|
|
|
); |
237
|
|
|
|
|
|
|
|
238
|
|
|
|
|
|
|
=head2 first_pass |
239
|
|
|
|
|
|
|
|
240
|
|
|
|
|
|
|
Do a first pass of the file to get all the stats |
241
|
|
|
|
|
|
|
|
242
|
|
|
|
|
|
|
=cut |
243
|
|
|
|
|
|
|
|
244
|
|
|
|
|
|
|
has 'first_pass' => ( |
245
|
|
|
|
|
|
|
traits => ['NoGetopt'], |
246
|
|
|
|
|
|
|
is => 'rw', |
247
|
|
|
|
|
|
|
isa => 'Bool', |
248
|
|
|
|
|
|
|
default => 1, |
249
|
|
|
|
|
|
|
required => 1, |
250
|
|
|
|
|
|
|
); |
251
|
|
|
|
|
|
|
|
252
|
|
|
|
|
|
|
=head2 template_file |
253
|
|
|
|
|
|
|
|
254
|
|
|
|
|
|
|
actual template file |
255
|
|
|
|
|
|
|
|
256
|
|
|
|
|
|
|
One is generated here for you, but you can always supply your own with --template_file /path/to/template |
257
|
|
|
|
|
|
|
|
258
|
|
|
|
|
|
|
=cut |
259
|
|
|
|
|
|
|
|
260
|
|
|
|
|
|
|
has 'template_file' => ( |
261
|
|
|
|
|
|
|
is => 'rw', |
262
|
|
|
|
|
|
|
isa => 'Str', |
263
|
|
|
|
|
|
|
default => sub { |
264
|
|
|
|
|
|
|
my $self = shift; |
265
|
|
|
|
|
|
|
|
266
|
|
|
|
|
|
|
my ( $fh, $filename ) = tempfile(); |
267
|
|
|
|
|
|
|
|
268
|
|
|
|
|
|
|
my $tt = <<EOF; |
269
|
|
|
|
|
|
|
#!/bin/bash |
270
|
|
|
|
|
|
|
# |
271
|
|
|
|
|
|
|
#SBATCH --share |
272
|
|
|
|
|
|
|
#SBATCH --get-user-env |
273
|
|
|
|
|
|
|
#SBATCH --job-name=[% JOBNAME %] |
274
|
|
|
|
|
|
|
#SBATCH --output=[% OUT %] |
275
|
|
|
|
|
|
|
[% IF PARTITION %] |
276
|
|
|
|
|
|
|
#SBATCH --partition=[% PARTITION %] |
277
|
|
|
|
|
|
|
[% END %] |
278
|
|
|
|
|
|
|
[% IF CPU %] |
279
|
|
|
|
|
|
|
#SBATCH --cpus-per-task=[% CPU %] |
280
|
|
|
|
|
|
|
[% END %] |
281
|
|
|
|
|
|
|
[% IF AFTEROK %] |
282
|
|
|
|
|
|
|
#SBATCH --dependency=afterok:[% AFTEROK %] |
283
|
|
|
|
|
|
|
[% END %] |
284
|
|
|
|
|
|
|
|
285
|
|
|
|
|
|
|
[% IF MODULE %] |
286
|
|
|
|
|
|
|
[% FOR d = MODULE %] |
287
|
|
|
|
|
|
|
module load [% d %] |
288
|
|
|
|
|
|
|
[% END %] |
289
|
|
|
|
|
|
|
[% END %] |
290
|
|
|
|
|
|
|
|
291
|
|
|
|
|
|
|
[% COMMAND %] |
292
|
|
|
|
|
|
|
EOF |
293
|
|
|
|
|
|
|
|
294
|
|
|
|
|
|
|
print $fh $tt; |
295
|
|
|
|
|
|
|
return $filename; |
296
|
|
|
|
|
|
|
}, |
297
|
|
|
|
|
|
|
predicate => 'has_template_file', |
298
|
|
|
|
|
|
|
clearer => 'clear_template_file', |
299
|
|
|
|
|
|
|
documentation => |
300
|
|
|
|
|
|
|
q{Path to Slurm template file if you do not wish to use the default} |
301
|
|
|
|
|
|
|
); |
302
|
|
|
|
|
|
|
|
303
|
|
|
|
|
|
|
=head2 serial |
304
|
|
|
|
|
|
|
|
305
|
|
|
|
|
|
|
Option to run all jobs serially, one after the other, no parallelism |
306
|
|
|
|
|
|
|
The default is to use 4 procs |
307
|
|
|
|
|
|
|
|
308
|
|
|
|
|
|
|
=cut |
309
|
|
|
|
|
|
|
|
310
|
|
|
|
|
|
|
has serial => ( |
311
|
|
|
|
|
|
|
is => 'rw', |
312
|
|
|
|
|
|
|
isa => 'Bool', |
313
|
|
|
|
|
|
|
default => 0, |
314
|
|
|
|
|
|
|
documentation => |
315
|
|
|
|
|
|
|
q{Use this if you wish to run each job run one after another, with each job starting only after the previous has completed successfully}, |
316
|
|
|
|
|
|
|
predicate => 'has_serial', |
317
|
|
|
|
|
|
|
clearer => 'clear_serial' |
318
|
|
|
|
|
|
|
); |
319
|
|
|
|
|
|
|
|
320
|
|
|
|
|
|
|
=head2 user |
321
|
|
|
|
|
|
|
|
322
|
|
|
|
|
|
|
user running the script. Passed to slurm for mail information |
323
|
|
|
|
|
|
|
|
324
|
|
|
|
|
|
|
=cut |
325
|
|
|
|
|
|
|
|
326
|
|
|
|
|
|
|
has 'user' => ( |
327
|
|
|
|
|
|
|
is => 'rw', |
328
|
|
|
|
|
|
|
isa => 'Str', |
329
|
|
|
|
|
|
|
default => sub { return $ENV{LOGNAME} || $ENV{USER} || getpwuid($<); }, |
330
|
|
|
|
|
|
|
required => 1, |
331
|
|
|
|
|
|
|
documentation => |
332
|
|
|
|
|
|
|
q{This defaults to your current user ID. This can only be changed if running as an admin user} |
333
|
|
|
|
|
|
|
); |
334
|
|
|
|
|
|
|
|
335
|
|
|
|
|
|
|
=head2 use_threads |
336
|
|
|
|
|
|
|
|
337
|
|
|
|
|
|
|
Bool value to indicate whether or not to use threads. Default is uses processes |
338
|
|
|
|
|
|
|
|
339
|
|
|
|
|
|
|
If using threads your perl must be compiled to use threads! |
340
|
|
|
|
|
|
|
|
341
|
|
|
|
|
|
|
=cut |
342
|
|
|
|
|
|
|
|
343
|
|
|
|
|
|
|
has 'use_threads' => ( |
344
|
|
|
|
|
|
|
is => 'rw', |
345
|
|
|
|
|
|
|
isa => 'Bool', |
346
|
|
|
|
|
|
|
default => 0, |
347
|
|
|
|
|
|
|
required => 0, |
348
|
|
|
|
|
|
|
documentation => q{Use threads to run jobs}, |
349
|
|
|
|
|
|
|
); |
350
|
|
|
|
|
|
|
|
351
|
|
|
|
|
|
|
=head2 use_processes |
352
|
|
|
|
|
|
|
|
353
|
|
|
|
|
|
|
Bool value to indicate whether or not to use processes. Default is uses processes |
354
|
|
|
|
|
|
|
|
355
|
|
|
|
|
|
|
=cut |
356
|
|
|
|
|
|
|
|
357
|
|
|
|
|
|
|
has 'use_processes' => ( |
358
|
|
|
|
|
|
|
is => 'rw', |
359
|
|
|
|
|
|
|
isa => 'Bool', |
360
|
|
|
|
|
|
|
default => 1, |
361
|
|
|
|
|
|
|
required => 0, |
362
|
|
|
|
|
|
|
documentation => q{Use processes to run jobs}, |
363
|
|
|
|
|
|
|
); |
364
|
|
|
|
|
|
|
|
365
|
|
|
|
|
|
|
=head2 use_gnuparallel |
366
|
|
|
|
|
|
|
|
367
|
|
|
|
|
|
|
Bool value to indicate whether or not to use processes. Default is uses processes |
368
|
|
|
|
|
|
|
|
369
|
|
|
|
|
|
|
=cut |
370
|
|
|
|
|
|
|
|
371
|
|
|
|
|
|
|
has 'use_gnuparallel' => ( |
372
|
|
|
|
|
|
|
is => 'rw', |
373
|
|
|
|
|
|
|
isa => 'Bool', |
374
|
|
|
|
|
|
|
default => 0, |
375
|
|
|
|
|
|
|
required => 0, |
376
|
|
|
|
|
|
|
documentation => |
377
|
|
|
|
|
|
|
q{Use gnu-parallel to run jobs and manage threads. This is the best option if you do not know how many threads your application uses!} |
378
|
|
|
|
|
|
|
); |
379
|
|
|
|
|
|
|
|
380
|
|
|
|
|
|
|
=head2 use_custom |
381
|
|
|
|
|
|
|
|
382
|
|
|
|
|
|
|
Supply your own command instead of mcerunner/threadsrunner/etc |
383
|
|
|
|
|
|
|
|
384
|
|
|
|
|
|
|
=cut |
385
|
|
|
|
|
|
|
|
386
|
|
|
|
|
|
|
has 'custom_command' => ( |
387
|
|
|
|
|
|
|
is => 'rw', |
388
|
|
|
|
|
|
|
isa => 'Str', |
389
|
|
|
|
|
|
|
predicate => 'has_custom_command', |
390
|
|
|
|
|
|
|
clearer => 'clear_custom_command', |
391
|
|
|
|
|
|
|
); |
392
|
|
|
|
|
|
|
|
393
|
|
|
|
|
|
|
=head1 Internal Variables |
394
|
|
|
|
|
|
|
|
395
|
|
|
|
|
|
|
You should not need to mess with any of these. |
396
|
|
|
|
|
|
|
|
397
|
|
|
|
|
|
|
=head2 template |
398
|
|
|
|
|
|
|
|
399
|
|
|
|
|
|
|
template object for writing slurm batch submission script |
400
|
|
|
|
|
|
|
|
401
|
|
|
|
|
|
|
=cut |
402
|
|
|
|
|
|
|
|
403
|
|
|
|
|
|
|
has 'template' => ( |
404
|
|
|
|
|
|
|
traits => ['NoGetopt'], |
405
|
|
|
|
|
|
|
is => 'rw', |
406
|
|
|
|
|
|
|
required => 0, |
407
|
|
|
|
|
|
|
default => sub { return Template->new( ABSOLUTE => 1 ) }, |
408
|
|
|
|
|
|
|
); |
409
|
|
|
|
|
|
|
|
410
|
|
|
|
|
|
|
=head2 cmd_counter |
411
|
|
|
|
|
|
|
|
412
|
|
|
|
|
|
|
keep track of the number of commands - when we get to more than commands_per_node restart so we get submit to a new node. |
413
|
|
|
|
|
|
|
This is the number of commands within a batch. Each new batch resets it. |
414
|
|
|
|
|
|
|
|
415
|
|
|
|
|
|
|
=cut |
416
|
|
|
|
|
|
|
|
417
|
|
|
|
|
|
|
has 'cmd_counter' => ( |
418
|
|
|
|
|
|
|
traits => [ 'Counter', 'NoGetopt' ], |
419
|
|
|
|
|
|
|
is => 'ro', |
420
|
|
|
|
|
|
|
isa => 'Num', |
421
|
|
|
|
|
|
|
required => 1, |
422
|
|
|
|
|
|
|
default => 0, |
423
|
|
|
|
|
|
|
handles => { |
424
|
|
|
|
|
|
|
inc_cmd_counter => 'inc', |
425
|
|
|
|
|
|
|
dec_cmd_counter => 'dec', |
426
|
|
|
|
|
|
|
reset_cmd_counter => 'reset', |
427
|
|
|
|
|
|
|
}, |
428
|
|
|
|
|
|
|
); |
429
|
|
|
|
|
|
|
|
430
|
|
|
|
|
|
|
=head2 node_counter |
431
|
|
|
|
|
|
|
|
432
|
|
|
|
|
|
|
Keep track of which node we are on |
433
|
|
|
|
|
|
|
|
434
|
|
|
|
|
|
|
=cut |
435
|
|
|
|
|
|
|
|
436
|
|
|
|
|
|
|
has 'node_counter' => ( |
437
|
|
|
|
|
|
|
traits => [ 'Counter', 'NoGetopt' ], |
438
|
|
|
|
|
|
|
is => 'ro', |
439
|
|
|
|
|
|
|
isa => 'Num', |
440
|
|
|
|
|
|
|
required => 1, |
441
|
|
|
|
|
|
|
default => 0, |
442
|
|
|
|
|
|
|
handles => { |
443
|
|
|
|
|
|
|
inc_node_counter => 'inc', |
444
|
|
|
|
|
|
|
dec_node_counter => 'dec', |
445
|
|
|
|
|
|
|
reset_node_counter => 'reset', |
446
|
|
|
|
|
|
|
}, |
447
|
|
|
|
|
|
|
); |
448
|
|
|
|
|
|
|
|
449
|
|
|
|
|
|
|
=head2 batch_counter |
450
|
|
|
|
|
|
|
|
451
|
|
|
|
|
|
|
Keep track of how many batches we have submited to slurm |
452
|
|
|
|
|
|
|
|
453
|
|
|
|
|
|
|
=cut |
454
|
|
|
|
|
|
|
|
455
|
|
|
|
|
|
|
has 'batch_counter' => ( |
456
|
|
|
|
|
|
|
traits => [ 'Counter', 'NoGetopt' ], |
457
|
|
|
|
|
|
|
is => 'ro', |
458
|
|
|
|
|
|
|
isa => 'Num', |
459
|
|
|
|
|
|
|
required => 1, |
460
|
|
|
|
|
|
|
default => 1, |
461
|
|
|
|
|
|
|
handles => { |
462
|
|
|
|
|
|
|
inc_batch_counter => 'inc', |
463
|
|
|
|
|
|
|
dec_batch_counter => 'dec', |
464
|
|
|
|
|
|
|
reset_batch_counter => 'reset', |
465
|
|
|
|
|
|
|
}, |
466
|
|
|
|
|
|
|
); |
467
|
|
|
|
|
|
|
|
468
|
|
|
|
|
|
|
#=head2 node |
469
|
|
|
|
|
|
|
|
470
|
|
|
|
|
|
|
#Node we are running on |
471
|
|
|
|
|
|
|
|
472
|
|
|
|
|
|
|
#=cut |
473
|
|
|
|
|
|
|
|
474
|
|
|
|
|
|
|
#has 'node' => ( |
475
|
|
|
|
|
|
|
#traits => ['NoGetopt'], |
476
|
|
|
|
|
|
|
#is => 'rw', |
477
|
|
|
|
|
|
|
#isa => 'Str|Undef', |
478
|
|
|
|
|
|
|
#lazy => 1, |
479
|
|
|
|
|
|
|
#default => sub { |
480
|
|
|
|
|
|
|
#my $self = shift; |
481
|
|
|
|
|
|
|
#return $self->nodelist()->[0] if $self->nodelist; |
482
|
|
|
|
|
|
|
#return ""; |
483
|
|
|
|
|
|
|
#} |
484
|
|
|
|
|
|
|
#); |
485
|
|
|
|
|
|
|
|
486
|
|
|
|
|
|
|
=head2 batch |
487
|
|
|
|
|
|
|
|
488
|
|
|
|
|
|
|
List of commands to submit to slurm |
489
|
|
|
|
|
|
|
|
490
|
|
|
|
|
|
|
=cut |
491
|
|
|
|
|
|
|
|
492
|
|
|
|
|
|
|
has 'batch' => ( |
493
|
|
|
|
|
|
|
traits => [ 'String', 'NoGetopt', ], |
494
|
|
|
|
|
|
|
is => 'rw', |
495
|
|
|
|
|
|
|
isa => 'Str', |
496
|
|
|
|
|
|
|
default => q{}, |
497
|
|
|
|
|
|
|
required => 0, |
498
|
|
|
|
|
|
|
handles => { add_batch => 'append', }, |
499
|
|
|
|
|
|
|
clearer => 'clear_batch', |
500
|
|
|
|
|
|
|
predicate => 'has_batch', |
501
|
|
|
|
|
|
|
); |
502
|
|
|
|
|
|
|
|
503
|
|
|
|
|
|
|
=head2 cmdfile |
504
|
|
|
|
|
|
|
|
505
|
|
|
|
|
|
|
File of commands for mcerunner/parallelrunner |
506
|
|
|
|
|
|
|
Is cleared at the end of each slurm submission |
507
|
|
|
|
|
|
|
|
508
|
|
|
|
|
|
|
=cut |
509
|
|
|
|
|
|
|
|
510
|
|
|
|
|
|
|
has 'cmdfile' => ( |
511
|
|
|
|
|
|
|
traits => [ 'String', 'NoGetopt' ], |
512
|
|
|
|
|
|
|
default => q{}, |
513
|
|
|
|
|
|
|
is => 'rw', |
514
|
|
|
|
|
|
|
isa => 'Str', |
515
|
|
|
|
|
|
|
required => 0, |
516
|
|
|
|
|
|
|
handles => { clear_cmdfile => 'clear', }, |
517
|
|
|
|
|
|
|
); |
518
|
|
|
|
|
|
|
|
519
|
|
|
|
|
|
|
=head2 slurmfile |
520
|
|
|
|
|
|
|
|
521
|
|
|
|
|
|
|
File generated from slurm template |
522
|
|
|
|
|
|
|
|
523
|
|
|
|
|
|
|
=cut |
524
|
|
|
|
|
|
|
|
525
|
|
|
|
|
|
|
has 'slurmfile' => ( |
526
|
|
|
|
|
|
|
traits => [ 'String', 'NoGetopt' ], |
527
|
|
|
|
|
|
|
default => q{}, |
528
|
|
|
|
|
|
|
is => 'rw', |
529
|
|
|
|
|
|
|
isa => 'Str', |
530
|
|
|
|
|
|
|
required => 0, |
531
|
|
|
|
|
|
|
handles => { clear_slurmfile => 'clear', }, |
532
|
|
|
|
|
|
|
); |
533
|
|
|
|
|
|
|
|
534
|
|
|
|
|
|
|
=head2 slurm_decides |
535
|
|
|
|
|
|
|
|
536
|
|
|
|
|
|
|
Do not specify a node or partition in your sbatch file. Let Slurm decide which nodes/partition to submit jobs. |
537
|
|
|
|
|
|
|
|
538
|
|
|
|
|
|
|
=cut |
539
|
|
|
|
|
|
|
|
540
|
|
|
|
|
|
|
has 'slurm_decides' => ( |
541
|
|
|
|
|
|
|
is => 'rw', |
542
|
|
|
|
|
|
|
isa => 'Bool', |
543
|
|
|
|
|
|
|
default => 0, |
544
|
|
|
|
|
|
|
); |
545
|
|
|
|
|
|
|
|
546
|
|
|
|
|
|
|
=head2 job_stats |
547
|
|
|
|
|
|
|
|
548
|
|
|
|
|
|
|
HashRef of job stats - total jobs submitted, total processes, etc |
549
|
|
|
|
|
|
|
|
550
|
|
|
|
|
|
|
=cut |
551
|
|
|
|
|
|
|
|
552
|
|
|
|
|
|
|
has 'job_stats' => ( |
553
|
|
|
|
|
|
|
traits => ['NoGetopt'], |
554
|
|
|
|
|
|
|
is => 'rw', |
555
|
|
|
|
|
|
|
isa => 'HashRef', |
556
|
|
|
|
|
|
|
default => sub { |
557
|
|
|
|
|
|
|
my $self = shift; |
558
|
|
|
|
|
|
|
my $href = {}; |
559
|
|
|
|
|
|
|
$href->{total_processes} = 0; |
560
|
|
|
|
|
|
|
$href->{jobnames} = {}; |
561
|
|
|
|
|
|
|
$href->{total_batches} = 0; |
562
|
|
|
|
|
|
|
$href->{batches} = {}; |
563
|
|
|
|
|
|
|
} |
564
|
|
|
|
|
|
|
); |
565
|
|
|
|
|
|
|
|
566
|
|
|
|
|
|
|
=head2 job_deps |
567
|
|
|
|
|
|
|
|
568
|
|
|
|
|
|
|
#HPC jobname=assembly |
569
|
|
|
|
|
|
|
#HPC job_deps=gzip,fastqc |
570
|
|
|
|
|
|
|
|
571
|
|
|
|
|
|
|
=cut |
572
|
|
|
|
|
|
|
|
573
|
|
|
|
|
|
|
has 'job_deps' => ( |
574
|
|
|
|
|
|
|
traits => ['NoGetopt'], |
575
|
|
|
|
|
|
|
is => 'rw', |
576
|
|
|
|
|
|
|
isa => 'HashRef', |
577
|
|
|
|
|
|
|
required => 0, |
578
|
|
|
|
|
|
|
default => sub { |
579
|
|
|
|
|
|
|
my $self = shift; |
580
|
|
|
|
|
|
|
$self->jobname => []; |
581
|
|
|
|
|
|
|
}, |
582
|
|
|
|
|
|
|
lazy => 1, |
583
|
|
|
|
|
|
|
); |
584
|
|
|
|
|
|
|
|
585
|
|
|
|
|
|
|
|
586
|
|
|
|
|
|
|
=head2 job_scheduler_id |
587
|
|
|
|
|
|
|
|
588
|
|
|
|
|
|
|
Job Scheduler ID running the script. Passed to slurm for mail information |
589
|
|
|
|
|
|
|
|
590
|
|
|
|
|
|
|
=cut |
591
|
|
|
|
|
|
|
|
592
|
|
|
|
|
|
|
has 'job_scheduler_id' => ( |
593
|
|
|
|
|
|
|
is => 'rw', |
594
|
|
|
|
|
|
|
isa => 'Str|Undef', |
595
|
|
|
|
|
|
|
default => sub { return $ENV{SBATCH_JOB_ID} || $ENV{PBS_JOBID} || undef; }, |
596
|
|
|
|
|
|
|
required => 1, |
597
|
|
|
|
|
|
|
documentation => q{This defaults to your current Job Scheduler ID. Ignore this if running on a single node}, |
598
|
|
|
|
|
|
|
predicate => 'has_job_scheduler_id', |
599
|
|
|
|
|
|
|
clearer => 'clear_job_scheduler_id', |
600
|
|
|
|
|
|
|
); |
601
|
|
|
|
|
|
|
|
602
|
|
|
|
|
|
|
=head2 jobname |
603
|
|
|
|
|
|
|
|
604
|
|
|
|
|
|
|
Specify a job name, and jobs will be jobname_1, jobname_2, jobname_x |
605
|
|
|
|
|
|
|
|
606
|
|
|
|
|
|
|
=cut |
607
|
|
|
|
|
|
|
|
608
|
|
|
|
|
|
|
has 'jobname' => ( |
609
|
|
|
|
|
|
|
is => 'rw', |
610
|
|
|
|
|
|
|
isa => 'Str', |
611
|
|
|
|
|
|
|
required => 0, |
612
|
|
|
|
|
|
|
traits => ['String'], |
613
|
|
|
|
|
|
|
default => q{job}, |
614
|
|
|
|
|
|
|
default => sub { return $ENV{SBATCH_JOB_NAME} || $ENV{PBS_JOBNAME} || 'job'; }, |
615
|
|
|
|
|
|
|
predicate => 'has_jobname', |
616
|
|
|
|
|
|
|
handles => { |
617
|
|
|
|
|
|
|
add_jobname => 'append', |
618
|
|
|
|
|
|
|
clear_jobname => 'clear', |
619
|
|
|
|
|
|
|
replace_jobname => 'replace', |
620
|
|
|
|
|
|
|
}, |
621
|
|
|
|
|
|
|
documentation => q{Specify a job name, each job will be appended with its batch order}, |
622
|
|
|
|
|
|
|
); |
623
|
|
|
|
|
|
|
|
624
|
|
|
|
|
|
|
=head2 jobref |
625
|
|
|
|
|
|
|
|
626
|
|
|
|
|
|
|
Array of arrays details slurm/process/scheduler job id. Index -1 is the most recent job submissisions, and there will be an index -2 if there are any job dependencies |
627
|
|
|
|
|
|
|
|
628
|
|
|
|
|
|
|
=cut |
629
|
|
|
|
|
|
|
|
630
|
|
|
|
|
|
|
has 'jobref' => ( |
631
|
|
|
|
|
|
|
traits => ['NoGetopt'], |
632
|
|
|
|
|
|
|
is => 'rw', |
633
|
|
|
|
|
|
|
isa => 'ArrayRef', |
634
|
|
|
|
|
|
|
default => sub { [ [] ] }, |
635
|
|
|
|
|
|
|
); |
636
|
|
|
|
|
|
|
|
637
|
|
|
|
|
|
|
=head1 SUBROUTINES/METHODS |
638
|
|
|
|
|
|
|
|
639
|
|
|
|
|
|
|
=cut |
640
|
|
|
|
|
|
|
|
641
|
|
|
|
|
|
|
=head2 run() |
642
|
|
|
|
|
|
|
|
643
|
|
|
|
|
|
|
First sub called |
644
|
|
|
|
|
|
|
Calling system module load * does not work within a screen session! |
645
|
|
|
|
|
|
|
|
646
|
|
|
|
|
|
|
=cut |
647
|
|
|
|
|
|
|
|
648
|
|
|
|
|
|
|
sub run { |
649
|
0
|
|
|
0
|
1
|
|
my $self = shift; |
650
|
|
|
|
|
|
|
|
651
|
0
|
0
|
|
|
|
|
if ( $self->serial ) { |
652
|
0
|
|
|
|
|
|
$self->procs(1); |
653
|
|
|
|
|
|
|
} |
654
|
|
|
|
|
|
|
|
655
|
0
|
|
|
|
|
|
$self->check_files; |
656
|
|
|
|
|
|
|
|
657
|
0
|
|
|
|
|
|
$self->first_pass(1); |
658
|
0
|
|
|
|
|
|
$self->parse_file_slurm; |
659
|
0
|
|
|
|
|
|
$self->do_stats; |
660
|
|
|
|
|
|
|
|
661
|
0
|
|
|
|
|
|
$DB::single = 2; |
662
|
0
|
|
|
|
|
|
$self->first_pass(0); |
663
|
0
|
|
|
|
|
|
$self->parse_file_slurm; |
664
|
|
|
|
|
|
|
} |
665
|
|
|
|
|
|
|
|
666
|
|
|
|
|
|
|
=head2 do_stats |
667
|
|
|
|
|
|
|
|
668
|
|
|
|
|
|
|
Do some stats on our job stats |
669
|
|
|
|
|
|
|
Foreach job name get the number of batches, and have a put that in batches->batch->job_batches |
670
|
|
|
|
|
|
|
|
671
|
|
|
|
|
|
|
=cut |
672
|
|
|
|
|
|
|
|
673
|
|
|
|
|
|
|
sub do_stats { |
674
|
0
|
|
|
0
|
1
|
|
my $self = shift; |
675
|
|
|
|
|
|
|
|
676
|
0
|
|
|
|
|
|
my @jobs = keys %{$self->job_stats->{jobnames}}; |
|
0
|
|
|
|
|
|
|
677
|
|
|
|
|
|
|
|
678
|
0
|
|
|
|
|
|
foreach my $batch (keys %{$self->job_stats->{batches}}){ |
|
0
|
|
|
|
|
|
|
679
|
0
|
|
|
|
|
|
my $href = $self->job_stats->{batches}->{$batch}; |
680
|
0
|
|
|
|
|
|
my $jobname = $href->{jobname}; |
681
|
0
|
|
|
|
|
|
my @job_batches = @{$self->job_stats->{jobnames}->{$jobname} }; |
|
0
|
|
|
|
|
|
|
682
|
|
|
|
|
|
|
|
683
|
0
|
|
|
0
|
|
|
my $index = firstidx {$_ eq $batch} @job_batches; |
|
0
|
|
|
|
|
|
|
684
|
0
|
|
|
|
|
|
$index += 1; |
685
|
0
|
|
|
|
|
|
my $lenjobs = $#job_batches + 1; |
686
|
0
|
|
|
|
|
|
$self->job_stats->{batches}->{$batch}->{job_batches} = $index."/".$lenjobs; |
687
|
|
|
|
|
|
|
|
688
|
|
|
|
|
|
|
|
689
|
0
|
|
|
|
|
|
$href->{total_processes} = $self->job_stats->{total_processes}; |
690
|
0
|
|
|
|
|
|
$href->{total_batches} = $self->job_stats->{total_batches}; |
691
|
0
|
|
|
|
|
|
$href->{batch_count} = $href->{batch}."/".$self->job_stats->{total_batches}; |
692
|
|
|
|
|
|
|
} |
693
|
|
|
|
|
|
|
} |
694
|
|
|
|
|
|
|
|
695
|
|
|
|
|
|
|
=head2 check_files() |
696
|
|
|
|
|
|
|
|
697
|
|
|
|
|
|
|
Check to make sure the outdir exists. |
698
|
|
|
|
|
|
|
If it doesn't exist the entire path will be created |
699
|
|
|
|
|
|
|
|
700
|
|
|
|
|
|
|
=cut |
701
|
|
|
|
|
|
|
|
702
|
|
|
|
|
|
|
sub check_files { |
703
|
0
|
|
|
0
|
1
|
|
my ($self) = @_; |
704
|
0
|
|
|
|
|
|
my ($t); |
705
|
|
|
|
|
|
|
|
706
|
0
|
|
|
|
|
|
$t = $self->outdir; |
707
|
0
|
|
|
|
|
|
$t =~ s/\/$//g; |
708
|
0
|
|
|
|
|
|
$self->outdir($t); |
709
|
|
|
|
|
|
|
|
710
|
|
|
|
|
|
|
#make the outdir |
711
|
0
|
0
|
|
|
|
|
make_path( $self->outdir ) if !-d $self->outdir; |
712
|
|
|
|
|
|
|
|
713
|
|
|
|
|
|
|
#$self->get_nodes; |
714
|
|
|
|
|
|
|
} |
715
|
|
|
|
|
|
|
|
716
|
|
|
|
|
|
|
=head2 parse_file_slurm |
717
|
|
|
|
|
|
|
|
718
|
|
|
|
|
|
|
Parse the file looking for the following conditions |
719
|
|
|
|
|
|
|
|
720
|
|
|
|
|
|
|
lines ending in `\` |
721
|
|
|
|
|
|
|
wait |
722
|
|
|
|
|
|
|
nextnode |
723
|
|
|
|
|
|
|
|
724
|
|
|
|
|
|
|
Batch commands in groups of $self->cpus_per_task, or smaller as wait and nextnode indicate |
725
|
|
|
|
|
|
|
|
726
|
|
|
|
|
|
|
=cut |
727
|
|
|
|
|
|
|
|
728
|
|
|
|
|
|
|
sub parse_file_slurm { |
729
|
0
|
|
|
0
|
1
|
|
my $self = shift; |
730
|
0
|
0
|
|
|
|
|
my $fh = IO::File->new( $self->infile, q{<} ) |
731
|
|
|
|
|
|
|
or print "Error opening file " |
732
|
|
|
|
|
|
|
. $self->infile . " " |
733
|
|
|
|
|
|
|
. $!; # even better! |
734
|
|
|
|
|
|
|
|
735
|
0
|
|
|
|
|
|
$self->reset_cmd_counter; |
736
|
0
|
|
|
|
|
|
$self->reset_node_counter; |
737
|
0
|
|
|
|
|
|
$self->reset_batch_counter; |
738
|
0
|
|
|
|
|
|
$self->jobref( [] ); |
739
|
|
|
|
|
|
|
|
740
|
0
|
0
|
|
|
|
|
if ( $self->afterok ) { |
741
|
0
|
|
|
|
|
|
$self->wait(1); |
742
|
0
|
|
|
|
|
|
$self->jobref->[0] = $self->afterok; |
743
|
0
|
|
|
|
|
|
push( @{ $self->jobref }, [] ); |
|
0
|
|
|
|
|
|
|
744
|
|
|
|
|
|
|
} |
745
|
|
|
|
|
|
|
|
746
|
0
|
|
|
|
|
|
while (<$fh>) { |
747
|
0
|
|
|
|
|
|
my $line = $_; |
748
|
0
|
0
|
|
|
|
|
next unless $line; |
749
|
0
|
0
|
|
|
|
|
next unless $line =~ m/\S/; |
750
|
0
|
|
|
|
|
|
$self->process_lines($line); |
751
|
|
|
|
|
|
|
} |
752
|
0
|
0
|
|
|
|
|
$self->work if $self->has_batch; |
753
|
0
|
0
|
|
|
|
|
push( @{ $self->jobref }, [] ) if $self->serial; |
|
0
|
|
|
|
|
|
|
754
|
0
|
|
|
|
|
|
close($fh); |
755
|
|
|
|
|
|
|
} |
756
|
|
|
|
|
|
|
|
757
|
|
|
|
|
|
|
sub process_lines { |
758
|
0
|
|
|
0
|
0
|
|
my $self = shift; |
759
|
0
|
|
|
|
|
|
my $line = shift; |
760
|
|
|
|
|
|
|
|
761
|
|
|
|
|
|
|
#Do a sanity check for nohup |
762
|
0
|
0
|
|
|
|
|
if ( $line =~ m/^nohup/ ) { |
763
|
0
|
|
|
|
|
|
die print |
764
|
|
|
|
|
|
|
"You cannot submit jobs to the queue using nohup! Please remove nohup and try again.\n"; |
765
|
|
|
|
|
|
|
} |
766
|
|
|
|
|
|
|
|
767
|
|
|
|
|
|
|
#if( $self->cmd_counter > 0 && 0 == $self->cmd_counter % ($self->commands_per_node + 1) && $self->batch ){ |
768
|
0
|
0
|
0
|
|
|
|
if ( $self->cmd_counter > 0 |
|
|
|
0
|
|
|
|
|
769
|
|
|
|
|
|
|
&& 0 == $self->cmd_counter % ( $self->commands_per_node ) |
770
|
|
|
|
|
|
|
&& $self->batch ) |
771
|
|
|
|
|
|
|
{ |
772
|
|
|
|
|
|
|
#Run this batch and start the next |
773
|
0
|
|
|
|
|
|
$self->work; |
774
|
0
|
0
|
|
|
|
|
push( @{ $self->jobref }, [] ) if $self->serial; |
|
0
|
|
|
|
|
|
|
775
|
|
|
|
|
|
|
} |
776
|
|
|
|
|
|
|
|
777
|
0
|
|
|
|
|
|
$self->check_hpc_meta($line); |
778
|
0
|
0
|
|
|
|
|
return if $line =~ m/^#/; |
779
|
|
|
|
|
|
|
|
780
|
0
|
0
|
|
|
|
|
if ( $self->has_cmd ) { |
781
|
0
|
|
|
|
|
|
$self->add_cmd($line); |
782
|
0
|
|
|
|
|
|
$self->add_batch($line); |
783
|
0
|
0
|
|
|
|
|
if ( $line =~ m/\\$/ ) { |
784
|
0
|
|
|
|
|
|
return; |
785
|
|
|
|
|
|
|
} |
786
|
|
|
|
|
|
|
else { |
787
|
0
|
|
|
|
|
|
$self->add_cmd("\n"); |
788
|
0
|
|
|
|
|
|
$self->add_batch("\n"); |
789
|
0
|
|
|
|
|
|
$self->clear_cmd; |
790
|
0
|
|
|
|
|
|
$self->inc_cmd_counter; |
791
|
|
|
|
|
|
|
} |
792
|
|
|
|
|
|
|
} |
793
|
|
|
|
|
|
|
else { |
794
|
0
|
|
|
|
|
|
$self->add_cmd($line); |
795
|
|
|
|
|
|
|
|
796
|
0
|
0
|
|
|
|
|
if ( $line =~ m/\\$/ ) { |
|
|
0
|
|
|
|
|
|
|
|
0
|
|
|
|
|
|
797
|
0
|
|
|
|
|
|
$self->add_batch($line); |
798
|
|
|
|
|
|
|
|
799
|
|
|
|
|
|
|
#next; |
800
|
0
|
|
|
|
|
|
return; |
801
|
|
|
|
|
|
|
} |
802
|
|
|
|
|
|
|
elsif ( $self->match_cmd(qr/^wait$/) ) { |
803
|
|
|
|
|
|
|
|
804
|
|
|
|
|
|
|
#submit this batch and get the job id so the next can depend upon it |
805
|
0
|
|
|
|
|
|
$self->clear_cmd; |
806
|
0
|
|
|
|
|
|
$self->wait(1); |
807
|
0
|
0
|
|
|
|
|
$self->work if $self->has_batch; |
808
|
0
|
|
|
|
|
|
push( @{ $self->jobref }, [] ); |
|
0
|
|
|
|
|
|
|
809
|
|
|
|
|
|
|
} |
810
|
|
|
|
|
|
|
elsif ( $self->match_cmd(qr/^newnode$/) ) { |
811
|
0
|
|
|
|
|
|
$self->clear_cmd; |
812
|
0
|
0
|
|
|
|
|
$self->work if $self->has_batch; |
813
|
0
|
0
|
|
|
|
|
push( @{ $self->jobref }, [] ) if $self->serial; |
|
0
|
|
|
|
|
|
|
814
|
|
|
|
|
|
|
} |
815
|
|
|
|
|
|
|
else { |
816
|
|
|
|
|
|
|
#Don't want to increase command count for wait and newnode |
817
|
0
|
|
|
|
|
|
$self->inc_cmd_counter; |
818
|
|
|
|
|
|
|
} |
819
|
0
|
0
|
|
|
|
|
$self->add_batch( $line . "\n" ) if $self->has_cmd; |
820
|
0
|
|
|
|
|
|
$self->clear_cmd; |
821
|
|
|
|
|
|
|
} |
822
|
|
|
|
|
|
|
|
823
|
|
|
|
|
|
|
} |
824
|
|
|
|
|
|
|
|
825
|
|
|
|
|
|
|
=head2 check_meta |
826
|
|
|
|
|
|
|
|
827
|
|
|
|
|
|
|
allow for changing parameters mid through the script |
828
|
|
|
|
|
|
|
|
829
|
|
|
|
|
|
|
#Job1 |
830
|
|
|
|
|
|
|
echo "this is job one" && \ |
831
|
|
|
|
|
|
|
bin/dostuff bblahblahblah |
832
|
|
|
|
|
|
|
|
833
|
|
|
|
|
|
|
#HPC cpu_per_task=12 |
834
|
|
|
|
|
|
|
|
835
|
|
|
|
|
|
|
echo "This is my new job with new HPC params!" |
836
|
|
|
|
|
|
|
|
837
|
|
|
|
|
|
|
=cut |
838
|
|
|
|
|
|
|
|
839
|
|
|
|
|
|
|
sub check_hpc_meta { |
840
|
0
|
|
|
0
|
0
|
|
my $self = shift; |
841
|
0
|
|
|
|
|
|
my $line = shift; |
842
|
0
|
|
|
|
|
|
my ( @match, $t1, $t2 ); |
843
|
|
|
|
|
|
|
|
844
|
0
|
0
|
|
|
|
|
return unless $line =~ m/^#HPC/; |
845
|
|
|
|
|
|
|
|
846
|
0
|
|
|
|
|
|
@match = $line =~ m/HPC (\w+)=(.+)$/; |
847
|
0
|
|
|
|
|
|
( $t1, $t2 ) = ( $match[0], $match[1] ); |
848
|
|
|
|
|
|
|
|
849
|
0
|
0
|
|
|
|
|
if ( !$self->can($t1) ) { |
850
|
0
|
|
|
|
|
|
print "Option $t1 is an invalid option!\n"; |
851
|
0
|
|
|
|
|
|
return; |
852
|
|
|
|
|
|
|
} |
853
|
|
|
|
|
|
|
|
854
|
0
|
0
|
|
|
|
|
if ($t1) { |
855
|
0
|
0
|
|
|
|
|
if ( $t1 eq "module" ) { |
856
|
0
|
|
|
|
|
|
$self->$t1( [$t2] ); |
857
|
|
|
|
|
|
|
} |
858
|
|
|
|
|
|
|
else { |
859
|
0
|
|
|
|
|
|
$self->$t1($t2); |
860
|
|
|
|
|
|
|
} |
861
|
|
|
|
|
|
|
} |
862
|
|
|
|
|
|
|
else { |
863
|
0
|
|
|
|
|
|
@match = $line =~ m/HPC (\w+)$/; |
864
|
0
|
|
|
|
|
|
$t1 = $match[0]; |
865
|
0
|
0
|
|
|
|
|
return unless $t1; |
866
|
0
|
|
|
|
|
|
$t1 = "clear_$t1"; |
867
|
0
|
|
|
|
|
|
$self->$t1; |
868
|
|
|
|
|
|
|
} |
869
|
|
|
|
|
|
|
} |
870
|
|
|
|
|
|
|
|
871
|
|
|
|
|
|
|
sub check_note_meta { |
872
|
0
|
|
|
0
|
0
|
|
my $self = shift; |
873
|
0
|
|
|
|
|
|
my $line = shift; |
874
|
|
|
|
|
|
|
|
875
|
0
|
0
|
|
|
|
|
return unless $line =~ m/^#NOTE/; |
876
|
|
|
|
|
|
|
|
877
|
0
|
|
|
|
|
|
$self->add_batch( $line . "\n" ); |
878
|
|
|
|
|
|
|
} |
879
|
|
|
|
|
|
|
|
880
|
|
|
|
|
|
|
=head2 work |
881
|
|
|
|
|
|
|
|
882
|
|
|
|
|
|
|
Get the node #may be removed but we'll try it out |
883
|
|
|
|
|
|
|
Process the batch |
884
|
|
|
|
|
|
|
Submit to slurm |
885
|
|
|
|
|
|
|
Take care of the counters |
886
|
|
|
|
|
|
|
|
887
|
|
|
|
|
|
|
=cut |
888
|
|
|
|
|
|
|
|
889
|
|
|
|
|
|
|
sub work { |
890
|
0
|
|
|
0
|
1
|
|
my $self = shift; |
891
|
|
|
|
|
|
|
|
892
|
0
|
|
|
|
|
|
$DB::single=2; |
893
|
|
|
|
|
|
|
|
894
|
0
|
0
|
|
|
|
|
$self->collect_stats if $self->first_pass; |
895
|
|
|
|
|
|
|
|
896
|
|
|
|
|
|
|
#if ( $self->node_counter > ( scalar @{ $self->nodelist } ) ) { |
897
|
|
|
|
|
|
|
#$self->reset_node_counter; |
898
|
|
|
|
|
|
|
#} |
899
|
|
|
|
|
|
|
#$self->node( $self->nodelist()->[ $self->node_counter ] ) |
900
|
|
|
|
|
|
|
#if $self->nodelist; |
901
|
0
|
0
|
|
|
|
|
$self->process_batch unless $self->first_pass; |
902
|
|
|
|
|
|
|
|
903
|
0
|
|
|
|
|
|
$self->inc_batch_counter; |
904
|
0
|
|
|
|
|
|
$self->clear_batch; |
905
|
0
|
|
|
|
|
|
$self->inc_node_counter; |
906
|
|
|
|
|
|
|
|
907
|
0
|
|
|
|
|
|
$self->reset_cmd_counter; |
908
|
|
|
|
|
|
|
} |
909
|
|
|
|
|
|
|
|
910
|
|
|
|
|
|
|
=head2 collect_stats |
911
|
|
|
|
|
|
|
|
912
|
|
|
|
|
|
|
Collect job stats |
913
|
|
|
|
|
|
|
|
914
|
|
|
|
|
|
|
=cut |
915
|
|
|
|
|
|
|
|
916
|
|
|
|
|
|
|
sub collect_stats { |
917
|
0
|
|
|
0
|
1
|
|
my $self = shift; |
918
|
|
|
|
|
|
|
|
919
|
0
|
0
|
|
|
|
|
return unless $self->first_pass; |
920
|
|
|
|
|
|
|
|
921
|
0
|
|
|
|
|
|
my $counter = $self->batch_counter; |
922
|
0
|
|
|
|
|
|
$counter = sprintf( "%03d", $counter ); |
923
|
|
|
|
|
|
|
|
924
|
|
|
|
|
|
|
#Get the total processes |
925
|
0
|
|
|
|
|
|
my $href = $self->job_stats; |
926
|
0
|
|
|
|
|
|
$href->{total_processes} += $self->cmd_counter; |
927
|
|
|
|
|
|
|
|
928
|
|
|
|
|
|
|
#Get the command count |
929
|
0
|
|
|
|
|
|
my $command_count = ($href->{total_processes} - $self->cmd_counter) + 1; |
930
|
|
|
|
|
|
|
#Get number of commands in this batch |
931
|
|
|
|
|
|
|
$href->{batches}->{ $counter . "_" . $self->jobname } = { |
932
|
|
|
|
|
|
|
commands => $self->cmd_counter, |
933
|
|
|
|
|
|
|
jobname => $self->jobname, |
934
|
|
|
|
|
|
|
batch => $self->batch_counter, |
935
|
|
|
|
|
|
|
command_count => $command_count."-".$href->{total_processes}, |
936
|
0
|
|
|
|
|
|
}; |
937
|
|
|
|
|
|
|
|
938
|
0
|
|
|
|
|
|
my $jobhref = {}; |
939
|
0
|
|
|
|
|
|
$jobhref->{ $self->jobname } = []; |
940
|
|
|
|
|
|
|
|
941
|
0
|
0
|
|
|
|
|
if ( exists $href->{jobnames}->{ $self->jobname } ) { |
942
|
0
|
|
|
|
|
|
my $tarray = $href->{jobnames}->{ $self->jobname }; |
943
|
0
|
|
|
|
|
|
push( @{$tarray}, $counter . "_" . $self->jobname ); |
|
0
|
|
|
|
|
|
|
944
|
|
|
|
|
|
|
} |
945
|
|
|
|
|
|
|
else { |
946
|
0
|
|
|
|
|
|
$href->{jobnames}->{ $self->jobname } |
947
|
|
|
|
|
|
|
= [ $counter . "_" . $self->jobname ]; |
948
|
|
|
|
|
|
|
} |
949
|
|
|
|
|
|
|
|
950
|
0
|
|
|
|
|
|
$href->{total_batches} += 1; |
951
|
0
|
|
|
|
|
|
$self->job_stats($href); |
952
|
|
|
|
|
|
|
} |
953
|
|
|
|
|
|
|
|
954
|
|
|
|
|
|
|
=head2 process_batch() |
955
|
|
|
|
|
|
|
|
956
|
|
|
|
|
|
|
Create the slurm submission script from the slurm template |
957
|
|
|
|
|
|
|
Write out template, submission job, and infile for parallel runner |
958
|
|
|
|
|
|
|
|
959
|
|
|
|
|
|
|
=cut |
960
|
|
|
|
|
|
|
|
961
|
|
|
|
|
|
|
sub process_batch { |
962
|
0
|
|
|
0
|
1
|
|
my $self = shift; |
963
|
0
|
|
|
|
|
|
my ( $cmdfile, $slurmfile, $slurmsubmit, $fh, $command ); |
964
|
|
|
|
|
|
|
|
965
|
0
|
|
|
|
|
|
my $counter = $self->batch_counter; |
966
|
0
|
|
|
|
|
|
$counter = sprintf( "%03d", $counter ); |
967
|
|
|
|
|
|
|
|
968
|
|
|
|
|
|
|
#$self->cmdfile($self->outdir."/".$self->jobname."_".$self->batch_counter.".in"); |
969
|
|
|
|
|
|
|
#$self->slurmfile($self->outdir."/".$self->jobname."_".$self->batch_counter.".sh"); |
970
|
0
|
|
|
|
|
|
$self->cmdfile( |
971
|
|
|
|
|
|
|
$self->outdir . "/$counter" . "_" . $self->jobname . ".in" ); |
972
|
0
|
|
|
|
|
|
$self->slurmfile( |
973
|
|
|
|
|
|
|
$self->outdir . "/$counter" . "_" . $self->jobname . ".sh" ); |
974
|
|
|
|
|
|
|
|
975
|
0
|
0
|
|
|
|
|
$fh = IO::File->new( $self->cmdfile, q{>} ) |
976
|
|
|
|
|
|
|
or print "Error opening file " . $self->cmdfile . " " . $!; |
977
|
|
|
|
|
|
|
|
978
|
0
|
0
|
0
|
|
|
|
print $fh $self->batch if defined $fh && defined $self->batch; |
979
|
0
|
|
|
|
|
|
$fh->close; |
980
|
|
|
|
|
|
|
|
981
|
0
|
|
|
|
|
|
my $ok; |
982
|
0
|
0
|
|
|
|
|
if ( $self->wait ) { |
983
|
0
|
0
|
|
|
|
|
$ok = join( ":", @{ $self->jobref->[-2] } ) if $self->jobref->[-2]; |
|
0
|
|
|
|
|
|
|
984
|
|
|
|
|
|
|
} |
985
|
|
|
|
|
|
|
|
986
|
0
|
|
|
|
|
|
$command = $self->process_batch_command(); |
987
|
0
|
|
|
|
|
|
$DB::single = 2; |
988
|
|
|
|
|
|
|
|
989
|
0
|
0
|
|
|
|
|
$self->template->process( |
990
|
|
|
|
|
|
|
$self->template_file, |
991
|
|
|
|
|
|
|
{ JOBNAME => $counter . "_" . $self->jobname, |
992
|
|
|
|
|
|
|
USER => $self->user, |
993
|
|
|
|
|
|
|
CPU => $self->cpus_per_task, |
994
|
|
|
|
|
|
|
PARTITION => $self->partition, |
995
|
|
|
|
|
|
|
AFTEROK => $ok, |
996
|
|
|
|
|
|
|
OUT => $self->logdir |
997
|
|
|
|
|
|
|
. "/$counter" . "_" |
998
|
|
|
|
|
|
|
. $self->jobname . ".log", |
999
|
|
|
|
|
|
|
MODULE => $self->module, |
1000
|
|
|
|
|
|
|
self => $self, |
1001
|
|
|
|
|
|
|
COMMAND => $command |
1002
|
|
|
|
|
|
|
}, |
1003
|
|
|
|
|
|
|
$self->slurmfile |
1004
|
|
|
|
|
|
|
) || die $self->template->error; |
1005
|
|
|
|
|
|
|
|
1006
|
0
|
|
|
|
|
|
chmod 0777, $self->slurmfile; |
1007
|
|
|
|
|
|
|
|
1008
|
0
|
0
|
|
|
|
|
$self->submit_slurm if $self->submit_to_slurm; |
1009
|
|
|
|
|
|
|
} |
1010
|
|
|
|
|
|
|
|
1011
|
|
|
|
|
|
|
=head2 process_batch_command |
1012
|
|
|
|
|
|
|
|
1013
|
|
|
|
|
|
|
splitting this off from the main command |
1014
|
|
|
|
|
|
|
|
1015
|
|
|
|
|
|
|
=cut |
1016
|
|
|
|
|
|
|
|
1017
|
|
|
|
|
|
|
#TODO add support for custom commands |
1018
|
|
|
|
|
|
|
#TODO Change this all to a plugin system |
1019
|
|
|
|
|
|
|
|
1020
|
|
|
|
|
|
|
sub process_batch_command { |
1021
|
0
|
|
|
0
|
1
|
|
my ($self) = @_; |
1022
|
0
|
|
|
|
|
|
my $command; |
1023
|
|
|
|
|
|
|
|
1024
|
|
|
|
|
|
|
#Giving outdir/jobname doesn't work unless a full file path is supplied |
1025
|
|
|
|
|
|
|
#Need to get absolute path going on... |
1026
|
|
|
|
|
|
|
#$self->cmdfile($self->jobname."_batch".$self->batch_counter.".in"); |
1027
|
|
|
|
|
|
|
|
1028
|
0
|
|
|
|
|
|
my $counter = $self->batch_counter; |
1029
|
0
|
|
|
|
|
|
$counter = sprintf( "%03d", $counter ); |
1030
|
|
|
|
|
|
|
|
1031
|
0
|
|
|
|
|
|
$command = "cd " . getcwd() . "\n"; |
1032
|
0
|
0
|
|
|
|
|
if ( $self->has_custom_command ) { |
|
|
0
|
|
|
|
|
|
|
|
0
|
|
|
|
|
|
|
|
0
|
|
|
|
|
|
1033
|
0
|
|
|
|
|
|
$command |
1034
|
|
|
|
|
|
|
.= $self->custom_command |
1035
|
|
|
|
|
|
|
. " --procs " |
1036
|
|
|
|
|
|
|
. $self->procs |
1037
|
|
|
|
|
|
|
. " --infile " |
1038
|
|
|
|
|
|
|
. $self->cmdfile |
1039
|
|
|
|
|
|
|
. " --outdir " |
1040
|
|
|
|
|
|
|
. $self->outdir |
1041
|
|
|
|
|
|
|
. " --logname " |
1042
|
|
|
|
|
|
|
."$counter" . "_" |
1043
|
|
|
|
|
|
|
. $self->jobname |
1044
|
|
|
|
|
|
|
. " --process_table " |
1045
|
|
|
|
|
|
|
. $self->process_table; |
1046
|
|
|
|
|
|
|
} |
1047
|
|
|
|
|
|
|
elsif ( $self->use_gnuparallel ) { |
1048
|
0
|
|
|
|
|
|
$command |
1049
|
|
|
|
|
|
|
.= "cat " |
1050
|
|
|
|
|
|
|
. $self->cmdfile |
1051
|
|
|
|
|
|
|
. " | parallelparser.pl | parallel --joblog " |
1052
|
|
|
|
|
|
|
. $self->outdir |
1053
|
|
|
|
|
|
|
. "/main.log --gnu -N 1 -q gnuparallelrunner.pl --command `echo {}` --outdir " |
1054
|
|
|
|
|
|
|
. $self->outdir |
1055
|
|
|
|
|
|
|
. " --logname $counter" . "_" |
1056
|
|
|
|
|
|
|
. $self->jobname |
1057
|
|
|
|
|
|
|
. " --seq {#}" . "\n"; |
1058
|
|
|
|
|
|
|
} |
1059
|
|
|
|
|
|
|
elsif ( $self->use_threads ) { |
1060
|
0
|
|
|
|
|
|
$command |
1061
|
|
|
|
|
|
|
.= "paralellrunner.pl --procs " |
1062
|
|
|
|
|
|
|
. $self->procs |
1063
|
|
|
|
|
|
|
. " --infile " |
1064
|
|
|
|
|
|
|
. $self->cmdfile |
1065
|
|
|
|
|
|
|
. " --outdir " |
1066
|
|
|
|
|
|
|
. $self->outdir |
1067
|
|
|
|
|
|
|
. " --logname $counter" . "_" |
1068
|
|
|
|
|
|
|
. $self->jobname |
1069
|
|
|
|
|
|
|
. " --process_table " |
1070
|
|
|
|
|
|
|
. $self->process_table; |
1071
|
|
|
|
|
|
|
} |
1072
|
|
|
|
|
|
|
elsif ( $self->use_processes ) { |
1073
|
0
|
|
|
|
|
|
$command |
1074
|
|
|
|
|
|
|
.= "mcerunner.pl --procs " |
1075
|
|
|
|
|
|
|
. $self->procs |
1076
|
|
|
|
|
|
|
. " --infile " |
1077
|
|
|
|
|
|
|
. $self->cmdfile |
1078
|
|
|
|
|
|
|
. " --outdir " |
1079
|
|
|
|
|
|
|
. $self->outdir |
1080
|
|
|
|
|
|
|
. " --logname $counter" . "_" |
1081
|
|
|
|
|
|
|
. " --process_table " |
1082
|
|
|
|
|
|
|
. $self->process_table |
1083
|
|
|
|
|
|
|
. $self->jobname; |
1084
|
|
|
|
|
|
|
} |
1085
|
|
|
|
|
|
|
else { |
1086
|
0
|
|
|
|
|
|
die print "None of the job processes were chosen!\n"; |
1087
|
|
|
|
|
|
|
} |
1088
|
|
|
|
|
|
|
|
1089
|
|
|
|
|
|
|
|
1090
|
0
|
|
|
|
|
|
my $metastr = $self->create_meta_str; |
1091
|
0
|
0
|
|
|
|
|
$command .= $metastr if $metastr; |
1092
|
|
|
|
|
|
|
|
1093
|
0
|
|
|
|
|
|
my $pluginstr = $self->create_plugin_str; |
1094
|
0
|
0
|
|
|
|
|
$command .= $pluginstr if $pluginstr; |
1095
|
|
|
|
|
|
|
|
1096
|
0
|
|
|
|
|
|
return $command; |
1097
|
|
|
|
|
|
|
} |
1098
|
|
|
|
|
|
|
|
1099
|
|
|
|
|
|
|
sub create_meta_str { |
1100
|
0
|
|
|
0
|
0
|
|
my $self = shift; |
1101
|
|
|
|
|
|
|
|
1102
|
0
|
|
|
|
|
|
my $counter = $self->batch_counter; |
1103
|
0
|
|
|
|
|
|
$counter = sprintf( "%03d", $counter ); |
1104
|
0
|
|
|
|
|
|
my $batchname = $counter . "_" . $self->jobname; |
1105
|
|
|
|
|
|
|
|
1106
|
0
|
|
|
|
|
|
my $batch = $self->job_stats->{batches}->{$batchname}; |
1107
|
|
|
|
|
|
|
|
1108
|
0
|
|
|
|
|
|
my $json = JSON->new->allow_nonref; |
1109
|
0
|
|
|
|
|
|
my $json_text = $json->encode( $batch ); |
1110
|
|
|
|
|
|
|
|
1111
|
0
|
|
|
|
|
|
$DB::single=2; |
1112
|
0
|
|
|
|
|
|
$json_text = " --metastr \'$json_text\'"; |
1113
|
0
|
|
|
|
|
|
return $json_text; |
1114
|
|
|
|
|
|
|
} |
1115
|
|
|
|
|
|
|
|
1116
|
|
|
|
|
|
|
sub create_plugin_str { |
1117
|
0
|
|
|
0
|
0
|
|
my $self = shift; |
1118
|
|
|
|
|
|
|
|
1119
|
0
|
0
|
|
|
|
|
return unless $self->plugins; |
1120
|
0
|
|
|
|
|
|
my $plugins = $self->plugins; |
1121
|
0
|
|
|
|
|
|
my $pluginstr = ""; |
1122
|
0
|
0
|
|
|
|
|
if ($plugins) { |
1123
|
0
|
0
|
|
|
|
|
if ( ref($plugins) ) { |
1124
|
0
|
|
|
|
|
|
my @plugins = @{$plugins}; |
|
0
|
|
|
|
|
|
|
1125
|
0
|
|
|
|
|
|
foreach my $plugin (@plugins) { |
1126
|
0
|
|
|
|
|
|
$pluginstr .= " --plugins $plugin"; |
1127
|
|
|
|
|
|
|
} |
1128
|
|
|
|
|
|
|
} |
1129
|
|
|
|
|
|
|
else { |
1130
|
0
|
|
|
|
|
|
$pluginstr = " --plugins $plugins"; |
1131
|
|
|
|
|
|
|
} |
1132
|
|
|
|
|
|
|
} |
1133
|
|
|
|
|
|
|
|
1134
|
0
|
|
|
|
|
|
return $pluginstr; |
1135
|
|
|
|
|
|
|
} |
1136
|
|
|
|
|
|
|
|
1137
|
|
|
|
|
|
|
__PACKAGE__->meta->make_immutable; |
1138
|
|
|
|
|
|
|
|
1139
|
|
|
|
|
|
|
#use namespace::autoclean; |
1140
|
|
|
|
|
|
|
|
1141
|
|
|
|
|
|
|
1; |
1142
|
|
|
|
|
|
|
|
1143
|
|
|
|
|
|
|
=head1 AUTHOR |
1144
|
|
|
|
|
|
|
|
1145
|
|
|
|
|
|
|
Jillian Rowe E<lt>jillian.e.rowe@gmail.comE<gt> |
1146
|
|
|
|
|
|
|
|
1147
|
|
|
|
|
|
|
=head1 COPYRIGHT |
1148
|
|
|
|
|
|
|
|
1149
|
|
|
|
|
|
|
Copyright 2016- Jillian Rowe |
1150
|
|
|
|
|
|
|
|
1151
|
|
|
|
|
|
|
=head1 LICENSE |
1152
|
|
|
|
|
|
|
|
1153
|
|
|
|
|
|
|
This library is free software; you can redistribute it and/or modify |
1154
|
|
|
|
|
|
|
it under the same terms as Perl itself. |
1155
|
|
|
|
|
|
|
|
1156
|
|
|
|
|
|
|
=head1 SEE ALSO |
1157
|
|
|
|
|
|
|
|
1158
|
|
|
|
|
|
|
L<HPC::Runner::Slurm> |
1159
|
|
|
|
|
|
|
L<HPC::Runner::PBS> |
1160
|
|
|
|
|
|
|
L<HPC::Runner::MCE> |
1161
|
|
|
|
|
|
|
|
1162
|
|
|
|
|
|
|
=cut |
1163
|
|
|
|
|
|
|
|
1164
|
|
|
|
|
|
|
__END__ |