line |
stmt |
bran |
cond |
sub |
pod |
time |
code |
1
|
|
|
|
|
|
|
package HPC::Runner::Scheduler; |
2
|
|
|
|
|
|
|
|
3
|
1
|
|
|
1
|
|
12700
|
use File::Path qw(make_path remove_tree); |
|
1
|
|
|
|
|
2
|
|
|
1
|
|
|
|
|
63
|
|
4
|
1
|
|
|
1
|
|
605
|
use File::Temp qw/ tempfile tempdir /; |
|
1
|
|
|
|
|
15597
|
|
|
1
|
|
|
|
|
51
|
|
5
|
1
|
|
|
1
|
|
386
|
use IO::File; |
|
1
|
|
|
|
|
617
|
|
|
1
|
|
|
|
|
87
|
|
6
|
1
|
|
|
1
|
|
369
|
use IO::Select; |
|
1
|
|
|
|
|
1018
|
|
|
1
|
|
|
|
|
34
|
|
7
|
1
|
|
|
1
|
|
4
|
use Cwd; |
|
1
|
|
|
|
|
1
|
|
|
1
|
|
|
|
|
38
|
|
8
|
1
|
|
|
1
|
|
428
|
use IPC::Open3; |
|
1
|
|
|
|
|
1841
|
|
|
1
|
|
|
|
|
40
|
|
9
|
1
|
|
|
1
|
|
4
|
use Symbol; |
|
1
|
|
|
|
|
1
|
|
|
1
|
|
|
|
|
35
|
|
10
|
1
|
|
|
1
|
|
455
|
use Template; |
|
1
|
|
|
|
|
13355
|
|
|
1
|
|
|
|
|
30
|
|
11
|
1
|
|
|
1
|
|
751
|
use Log::Log4perl qw(:easy); |
|
1
|
|
|
|
|
32412
|
|
|
1
|
|
|
|
|
4
|
|
12
|
1
|
|
|
1
|
|
1127
|
use DateTime; |
|
1
|
|
|
|
|
84040
|
|
|
1
|
|
|
|
|
32
|
|
13
|
1
|
|
|
1
|
|
506
|
use Data::Dumper; |
|
1
|
|
|
|
|
4227
|
|
|
1
|
|
|
|
|
53
|
|
14
|
1
|
|
|
1
|
|
4
|
use List::Util qw(shuffle); |
|
1
|
|
|
|
|
2
|
|
|
1
|
|
|
|
|
50
|
|
15
|
1
|
|
|
1
|
|
462
|
use List::MoreUtils qw(firstidx); |
|
1
|
|
|
|
|
6990
|
|
|
1
|
|
|
|
|
4
|
|
16
|
1
|
|
|
1
|
|
923
|
use JSON; |
|
1
|
|
|
|
|
7756
|
|
|
1
|
|
|
|
|
4
|
|
17
|
|
|
|
|
|
|
|
18
|
1
|
|
|
1
|
|
571
|
use Moose; |
|
1
|
|
|
|
|
289056
|
|
|
1
|
|
|
|
|
5
|
|
19
|
1
|
|
|
1
|
|
4648
|
use namespace::autoclean; |
|
1
|
|
|
|
|
2
|
|
|
1
|
|
|
|
|
7
|
|
20
|
|
|
|
|
|
|
extends 'HPC::Runner'; |
21
|
|
|
|
|
|
|
with 'MooseX::SimpleConfig'; |
22
|
|
|
|
|
|
|
|
23
|
|
|
|
|
|
|
# For pretty man pages! |
24
|
|
|
|
|
|
|
$ENV{TERM} = 'xterm-256color'; |
25
|
|
|
|
|
|
|
|
26
|
|
|
|
|
|
|
our $VERSION = '0.09'; |
27
|
|
|
|
|
|
|
|
28
|
|
|
|
|
|
|
=encoding utf-8 |
29
|
|
|
|
|
|
|
|
30
|
|
|
|
|
|
|
=head1 NAME |
31
|
|
|
|
|
|
|
|
32
|
|
|
|
|
|
|
HPC::Runner::Scheduler - Base Library for HPC::Runner::Slurm and HPC::Runner::PBS |
33
|
|
|
|
|
|
|
|
34
|
|
|
|
|
|
|
=head1 SYNOPSIS |
35
|
|
|
|
|
|
|
|
36
|
|
|
|
|
|
|
pbsrunner.pl/slurmrunner.pl/mcerunner.pl --infile list_of_commands |
37
|
|
|
|
|
|
|
|
38
|
|
|
|
|
|
|
=head1 DESCRIPTION |
39
|
|
|
|
|
|
|
|
40
|
|
|
|
|
|
|
HPC::Runner::Scheduler is a base library for creating templates of HPC Scheduler (Slurm, PBS, etc) submission scripts. |
41
|
|
|
|
|
|
|
|
42
|
|
|
|
|
|
|
All the scheduler variables: memory, cpus, nodes, partitions/queues, are abstracted to a template. Instead of writing an entire submission template |
43
|
|
|
|
|
|
|
|
44
|
|
|
|
|
|
|
slurmrunner.pl --infile list_of_commands #with list of optional parameters |
45
|
|
|
|
|
|
|
|
46
|
|
|
|
|
|
|
Please see the indepth usage guide at L<HPC::Runner::Usage> |
47
|
|
|
|
|
|
|
|
48
|
|
|
|
|
|
|
=cut |
49
|
|
|
|
|
|
|
|
50
|
|
|
|
|
|
|
=head1 User Options |
51
|
|
|
|
|
|
|
|
52
|
|
|
|
|
|
|
User options can be passed to the script with script --opt1 or in a configfile. It uses MooseX::SimpleConfig for the commands |
53
|
|
|
|
|
|
|
|
54
|
|
|
|
|
|
|
=head2 configfile |
55
|
|
|
|
|
|
|
|
56
|
|
|
|
|
|
|
Config file to pass to command line as --configfile /path/to/file. It should be a yaml or xml (untested) |
57
|
|
|
|
|
|
|
This is optional. Paramaters can be passed straight to the command line |
58
|
|
|
|
|
|
|
|
59
|
|
|
|
|
|
|
=head3 example.yml |
60
|
|
|
|
|
|
|
|
61
|
|
|
|
|
|
|
--- |
62
|
|
|
|
|
|
|
infile: "/path/to/commands/testcommand.in" |
63
|
|
|
|
|
|
|
outdir: "path/to/testdir" |
64
|
|
|
|
|
|
|
module: |
65
|
|
|
|
|
|
|
- "R2" |
66
|
|
|
|
|
|
|
- "shared" |
67
|
|
|
|
|
|
|
|
68
|
|
|
|
|
|
|
=cut |
69
|
|
|
|
|
|
|
|
70
|
|
|
|
|
|
|
has '+configfile' => ( |
71
|
|
|
|
|
|
|
required => 0, |
72
|
|
|
|
|
|
|
documentation => |
73
|
|
|
|
|
|
|
q{If you get tired of putting all your options on the command line create a config file instead. |
74
|
|
|
|
|
|
|
--- |
75
|
|
|
|
|
|
|
infile: "/path/to/commands/testcommand.in" |
76
|
|
|
|
|
|
|
outdir: "path/to/testdir" |
77
|
|
|
|
|
|
|
module: |
78
|
|
|
|
|
|
|
- "R2" |
79
|
|
|
|
|
|
|
- "shared" |
80
|
|
|
|
|
|
|
} |
81
|
|
|
|
|
|
|
); |
82
|
|
|
|
|
|
|
|
83
|
|
|
|
|
|
|
=head2 infile |
84
|
|
|
|
|
|
|
|
85
|
|
|
|
|
|
|
infile of commands separated by newline |
86
|
|
|
|
|
|
|
|
87
|
|
|
|
|
|
|
=head3 example.in |
88
|
|
|
|
|
|
|
|
89
|
|
|
|
|
|
|
cmd1 |
90
|
|
|
|
|
|
|
cmd2 --input --input \ |
91
|
|
|
|
|
|
|
--someotherinput |
92
|
|
|
|
|
|
|
wait |
93
|
|
|
|
|
|
|
#Wait tells slurm to make sure previous commands have exited with exit status 0. |
94
|
|
|
|
|
|
|
cmd3 ##very heavy job |
95
|
|
|
|
|
|
|
newnode |
96
|
|
|
|
|
|
|
#cmd3 is a very heavy job so lets start the next job on a new node |
97
|
|
|
|
|
|
|
|
98
|
|
|
|
|
|
|
=cut |
99
|
|
|
|
|
|
|
|
100
|
|
|
|
|
|
|
=head2 module |
101
|
|
|
|
|
|
|
|
102
|
|
|
|
|
|
|
modules to load with slurm |
103
|
|
|
|
|
|
|
Should use the same names used in 'module load' |
104
|
|
|
|
|
|
|
|
105
|
|
|
|
|
|
|
Example. R2 becomes 'module load R2' |
106
|
|
|
|
|
|
|
|
107
|
|
|
|
|
|
|
=cut |
108
|
|
|
|
|
|
|
|
109
|
|
|
|
|
|
|
has 'module' => ( |
110
|
|
|
|
|
|
|
is => 'rw', |
111
|
|
|
|
|
|
|
isa => 'ArrayRef', |
112
|
|
|
|
|
|
|
required => 0, |
113
|
|
|
|
|
|
|
documentation => q{List of modules to load ex. R2, samtools, etc}, |
114
|
|
|
|
|
|
|
default => sub { [] }, |
115
|
|
|
|
|
|
|
); |
116
|
|
|
|
|
|
|
|
117
|
|
|
|
|
|
|
=head2 afterok |
118
|
|
|
|
|
|
|
|
119
|
|
|
|
|
|
|
The afterok switch in slurm. --afterok 123 will tell slurm to start this job after job 123 has completed successfully. |
120
|
|
|
|
|
|
|
|
121
|
|
|
|
|
|
|
=cut |
122
|
|
|
|
|
|
|
|
123
|
|
|
|
|
|
|
has afterok => ( |
124
|
|
|
|
|
|
|
is => 'rw', |
125
|
|
|
|
|
|
|
isa => 'ArrayRef', |
126
|
|
|
|
|
|
|
required => 0, |
127
|
|
|
|
|
|
|
default => sub { |
128
|
|
|
|
|
|
|
return []; |
129
|
|
|
|
|
|
|
}, |
130
|
|
|
|
|
|
|
); |
131
|
|
|
|
|
|
|
|
132
|
|
|
|
|
|
|
=head2 cpus_per_task |
133
|
|
|
|
|
|
|
|
134
|
|
|
|
|
|
|
slurm item --cpus_per_task defaults to 4, which is probably fine |
135
|
|
|
|
|
|
|
|
136
|
|
|
|
|
|
|
=cut |
137
|
|
|
|
|
|
|
|
138
|
|
|
|
|
|
|
has 'cpus_per_task' => ( |
139
|
|
|
|
|
|
|
is => 'rw', |
140
|
|
|
|
|
|
|
isa => 'Str', |
141
|
|
|
|
|
|
|
required => 0, |
142
|
|
|
|
|
|
|
default => 4, |
143
|
|
|
|
|
|
|
predicate => 'has_cpus_per_task', |
144
|
|
|
|
|
|
|
clearer => 'clear_cpus_per_task' |
145
|
|
|
|
|
|
|
); |
146
|
|
|
|
|
|
|
|
147
|
|
|
|
|
|
|
=head2 commands_per_node |
148
|
|
|
|
|
|
|
|
149
|
|
|
|
|
|
|
--commands_per_node defaults to 8, which is probably fine |
150
|
|
|
|
|
|
|
|
151
|
|
|
|
|
|
|
=cut |
152
|
|
|
|
|
|
|
|
153
|
|
|
|
|
|
|
has 'commands_per_node' => ( |
154
|
|
|
|
|
|
|
is => 'rw', |
155
|
|
|
|
|
|
|
isa => 'Str', |
156
|
|
|
|
|
|
|
required => 0, |
157
|
|
|
|
|
|
|
default => 8, |
158
|
|
|
|
|
|
|
documentation => |
159
|
|
|
|
|
|
|
q{Commands to run on each node. This is not the same as concurrent_commands_per_node!}, |
160
|
|
|
|
|
|
|
predicate => 'has_commands_per_node', |
161
|
|
|
|
|
|
|
clearer => 'clear_commands_per_node' |
162
|
|
|
|
|
|
|
); |
163
|
|
|
|
|
|
|
|
164
|
|
|
|
|
|
|
=head2 nodes_count |
165
|
|
|
|
|
|
|
|
166
|
|
|
|
|
|
|
Number of nodes to use on a job. This is only useful for mpi jobs. |
167
|
|
|
|
|
|
|
|
168
|
|
|
|
|
|
|
PBS: |
169
|
|
|
|
|
|
|
#PBS -l nodes=nodes_count:ppn=16 this |
170
|
|
|
|
|
|
|
|
171
|
|
|
|
|
|
|
Slurm: |
172
|
|
|
|
|
|
|
#SBATCH --nodes nodes_count |
173
|
|
|
|
|
|
|
|
174
|
|
|
|
|
|
|
=cut |
175
|
|
|
|
|
|
|
|
176
|
|
|
|
|
|
|
has 'nodes_count' => ( |
177
|
|
|
|
|
|
|
is => 'rw', |
178
|
|
|
|
|
|
|
isa => 'Str', |
179
|
|
|
|
|
|
|
required => 0, |
180
|
|
|
|
|
|
|
default => 1, |
181
|
|
|
|
|
|
|
documentation => |
182
|
|
|
|
|
|
|
q{Number of nodes requested. You should only use this if submitting parallel jobs.}, |
183
|
|
|
|
|
|
|
predicate => 'has_nodes_count', |
184
|
|
|
|
|
|
|
clearer => 'clear_nodes_count' |
185
|
|
|
|
|
|
|
); |
186
|
|
|
|
|
|
|
|
187
|
|
|
|
|
|
|
=head2 partition |
188
|
|
|
|
|
|
|
|
189
|
|
|
|
|
|
|
#Should probably have something at some point that you can specify multiple partitions.... |
190
|
|
|
|
|
|
|
|
191
|
|
|
|
|
|
|
Specify the partition. Defaults to the partition that has the most nodes. |
192
|
|
|
|
|
|
|
|
193
|
|
|
|
|
|
|
=cut |
194
|
|
|
|
|
|
|
|
195
|
|
|
|
|
|
|
has 'partition' => ( |
196
|
|
|
|
|
|
|
is => 'rw', |
197
|
|
|
|
|
|
|
isa => 'Str', |
198
|
|
|
|
|
|
|
required => 0, |
199
|
|
|
|
|
|
|
default => '', |
200
|
|
|
|
|
|
|
documentation => |
201
|
|
|
|
|
|
|
q{Slurm partition to submit jobs to. Defaults to the partition with the most available nodes}, |
202
|
|
|
|
|
|
|
predicate => 'has_partition', |
203
|
|
|
|
|
|
|
clearer => 'clear_partition' |
204
|
|
|
|
|
|
|
); |
205
|
|
|
|
|
|
|
|
206
|
|
|
|
|
|
|
=head2 walltime |
207
|
|
|
|
|
|
|
|
208
|
|
|
|
|
|
|
Define PBS walltime |
209
|
|
|
|
|
|
|
|
210
|
|
|
|
|
|
|
=cut |
211
|
|
|
|
|
|
|
|
212
|
|
|
|
|
|
|
has 'walltime' => ( |
213
|
|
|
|
|
|
|
is => 'rw', |
214
|
|
|
|
|
|
|
isa => 'Str', |
215
|
|
|
|
|
|
|
required => 1, |
216
|
|
|
|
|
|
|
default => '04:00:00', |
217
|
|
|
|
|
|
|
predicate => 'has_walltime', |
218
|
|
|
|
|
|
|
clearer => 'clear_walltime,' |
219
|
|
|
|
|
|
|
); |
220
|
|
|
|
|
|
|
|
221
|
|
|
|
|
|
|
=head2 mem |
222
|
|
|
|
|
|
|
|
223
|
|
|
|
|
|
|
=cut |
224
|
|
|
|
|
|
|
|
225
|
|
|
|
|
|
|
has 'mem' => ( |
226
|
|
|
|
|
|
|
is => 'rw', |
227
|
|
|
|
|
|
|
isa => 'Str|Undef', |
228
|
|
|
|
|
|
|
predicate => 'has_mem', |
229
|
|
|
|
|
|
|
clearer => 'clear_mem', |
230
|
|
|
|
|
|
|
required => 0, |
231
|
|
|
|
|
|
|
documentation => q{Supply a memory limit}, |
232
|
|
|
|
|
|
|
); |
233
|
|
|
|
|
|
|
|
234
|
|
|
|
|
|
|
|
235
|
|
|
|
|
|
|
#=head2 nodelist |
236
|
|
|
|
|
|
|
|
237
|
|
|
|
|
|
|
#Defaults to the nodes on the defq queue |
238
|
|
|
|
|
|
|
|
239
|
|
|
|
|
|
|
#=cut |
240
|
|
|
|
|
|
|
|
241
|
|
|
|
|
|
|
#has 'nodelist' => ( |
242
|
|
|
|
|
|
|
#is => 'rw', |
243
|
|
|
|
|
|
|
#isa => 'ArrayRef', |
244
|
|
|
|
|
|
|
#required => 0, |
245
|
|
|
|
|
|
|
#default => sub { return [] }, |
246
|
|
|
|
|
|
|
#documentation => |
247
|
|
|
|
|
|
|
#q{List of nodes to submit jobs to. Defaults to the partition with the most nodes.}, |
248
|
|
|
|
|
|
|
#); |
249
|
|
|
|
|
|
|
|
250
|
|
|
|
|
|
|
=head2 submit_slurm |
251
|
|
|
|
|
|
|
|
252
|
|
|
|
|
|
|
Bool value whether or not to submit to slurm. If you are looking to debug your files, or this script you will want to set this to zero. |
253
|
|
|
|
|
|
|
Don't submit to slurm with --nosubmit_to_slurm from the command line or |
254
|
|
|
|
|
|
|
$self->submit_to_slurm(0); within your code |
255
|
|
|
|
|
|
|
|
256
|
|
|
|
|
|
|
=cut |
257
|
|
|
|
|
|
|
|
258
|
|
|
|
|
|
|
has 'submit_to_slurm' => ( |
259
|
|
|
|
|
|
|
is => 'rw', |
260
|
|
|
|
|
|
|
isa => 'Bool', |
261
|
|
|
|
|
|
|
default => 1, |
262
|
|
|
|
|
|
|
required => 1, |
263
|
|
|
|
|
|
|
documentation => |
264
|
|
|
|
|
|
|
q{Bool value whether or not to submit to slurm. If you are looking to debug your files, or this script you will want to set this to zero.}, |
265
|
|
|
|
|
|
|
); |
266
|
|
|
|
|
|
|
|
267
|
|
|
|
|
|
|
=head2 first_pass |
268
|
|
|
|
|
|
|
|
269
|
|
|
|
|
|
|
Do a first pass of the file to get all the stats |
270
|
|
|
|
|
|
|
|
271
|
|
|
|
|
|
|
=cut |
272
|
|
|
|
|
|
|
|
273
|
|
|
|
|
|
|
has 'first_pass' => ( |
274
|
|
|
|
|
|
|
traits => ['NoGetopt'], |
275
|
|
|
|
|
|
|
is => 'rw', |
276
|
|
|
|
|
|
|
isa => 'Bool', |
277
|
|
|
|
|
|
|
default => 1, |
278
|
|
|
|
|
|
|
required => 1, |
279
|
|
|
|
|
|
|
); |
280
|
|
|
|
|
|
|
|
281
|
|
|
|
|
|
|
=head2 template_file |
282
|
|
|
|
|
|
|
|
283
|
|
|
|
|
|
|
actual template file |
284
|
|
|
|
|
|
|
|
285
|
|
|
|
|
|
|
One is generated here for you, but you can always supply your own with --template_file /path/to/template |
286
|
|
|
|
|
|
|
|
287
|
|
|
|
|
|
|
=cut |
288
|
|
|
|
|
|
|
|
289
|
|
|
|
|
|
|
has 'template_file' => ( |
290
|
|
|
|
|
|
|
is => 'rw', |
291
|
|
|
|
|
|
|
isa => 'Str', |
292
|
|
|
|
|
|
|
default => sub { |
293
|
|
|
|
|
|
|
my $self = shift; |
294
|
|
|
|
|
|
|
|
295
|
|
|
|
|
|
|
my ( $fh, $filename ) = tempfile(); |
296
|
|
|
|
|
|
|
|
297
|
|
|
|
|
|
|
my $tt = <<EOF; |
298
|
|
|
|
|
|
|
#!/bin/bash |
299
|
|
|
|
|
|
|
# |
300
|
|
|
|
|
|
|
#SBATCH --share |
301
|
|
|
|
|
|
|
#SBATCH --get-user-env |
302
|
|
|
|
|
|
|
#SBATCH --job-name=[% JOBNAME %] |
303
|
|
|
|
|
|
|
#SBATCH --output=[% OUT %] |
304
|
|
|
|
|
|
|
[% IF PARTITION %] |
305
|
|
|
|
|
|
|
#SBATCH --partition=[% PARTITION %] |
306
|
|
|
|
|
|
|
[% END %] |
307
|
|
|
|
|
|
|
[% IF CPU %] |
308
|
|
|
|
|
|
|
#SBATCH --cpus-per-task=[% CPU %] |
309
|
|
|
|
|
|
|
[% END %] |
310
|
|
|
|
|
|
|
[% IF self.has_mem %] |
311
|
|
|
|
|
|
|
#SBATCH --mem=[% self.mem %] |
312
|
|
|
|
|
|
|
[% END %] |
313
|
|
|
|
|
|
|
[% IF self.has_walltime %] |
314
|
|
|
|
|
|
|
#SBATCH --time=[% self.walltime %] |
315
|
|
|
|
|
|
|
[% END %] |
316
|
|
|
|
|
|
|
[% IF AFTEROK %] |
317
|
|
|
|
|
|
|
#SBATCH --dependency=afterok:[% AFTEROK %] |
318
|
|
|
|
|
|
|
[% END %] |
319
|
|
|
|
|
|
|
|
320
|
|
|
|
|
|
|
[% IF MODULE %] |
321
|
|
|
|
|
|
|
[% FOR d = MODULE %] |
322
|
|
|
|
|
|
|
module load [% d %] |
323
|
|
|
|
|
|
|
[% END %] |
324
|
|
|
|
|
|
|
[% END %] |
325
|
|
|
|
|
|
|
|
326
|
|
|
|
|
|
|
[% COMMAND %] |
327
|
|
|
|
|
|
|
EOF |
328
|
|
|
|
|
|
|
|
329
|
|
|
|
|
|
|
print $fh $tt; |
330
|
|
|
|
|
|
|
return $filename; |
331
|
|
|
|
|
|
|
}, |
332
|
|
|
|
|
|
|
predicate => 'has_template_file', |
333
|
|
|
|
|
|
|
clearer => 'clear_template_file', |
334
|
|
|
|
|
|
|
documentation => |
335
|
|
|
|
|
|
|
q{Path to Slurm template file if you do not wish to use the default} |
336
|
|
|
|
|
|
|
); |
337
|
|
|
|
|
|
|
|
338
|
|
|
|
|
|
|
=head2 serial |
339
|
|
|
|
|
|
|
|
340
|
|
|
|
|
|
|
Option to run all jobs serially, one after the other, no parallelism |
341
|
|
|
|
|
|
|
The default is to use 4 procs |
342
|
|
|
|
|
|
|
|
343
|
|
|
|
|
|
|
=cut |
344
|
|
|
|
|
|
|
|
345
|
|
|
|
|
|
|
has serial => ( |
346
|
|
|
|
|
|
|
is => 'rw', |
347
|
|
|
|
|
|
|
isa => 'Bool', |
348
|
|
|
|
|
|
|
default => 0, |
349
|
|
|
|
|
|
|
documentation => |
350
|
|
|
|
|
|
|
q{Use this if you wish to run each job run one after another, with each job starting only after the previous has completed successfully}, |
351
|
|
|
|
|
|
|
predicate => 'has_serial', |
352
|
|
|
|
|
|
|
clearer => 'clear_serial' |
353
|
|
|
|
|
|
|
); |
354
|
|
|
|
|
|
|
|
355
|
|
|
|
|
|
|
=head2 user |
356
|
|
|
|
|
|
|
|
357
|
|
|
|
|
|
|
user running the script. Passed to slurm for mail information |
358
|
|
|
|
|
|
|
|
359
|
|
|
|
|
|
|
=cut |
360
|
|
|
|
|
|
|
|
361
|
|
|
|
|
|
|
has 'user' => ( |
362
|
|
|
|
|
|
|
is => 'rw', |
363
|
|
|
|
|
|
|
isa => 'Str', |
364
|
|
|
|
|
|
|
default => sub { return $ENV{LOGNAME} || $ENV{USER} || getpwuid($<); }, |
365
|
|
|
|
|
|
|
required => 1, |
366
|
|
|
|
|
|
|
documentation => |
367
|
|
|
|
|
|
|
q{This defaults to your current user ID. This can only be changed if running as an admin user} |
368
|
|
|
|
|
|
|
); |
369
|
|
|
|
|
|
|
|
370
|
|
|
|
|
|
|
=head2 use_threads |
371
|
|
|
|
|
|
|
|
372
|
|
|
|
|
|
|
Bool value to indicate whether or not to use threads. Default is uses processes |
373
|
|
|
|
|
|
|
|
374
|
|
|
|
|
|
|
If using threads your perl must be compiled to use threads! |
375
|
|
|
|
|
|
|
|
376
|
|
|
|
|
|
|
=cut |
377
|
|
|
|
|
|
|
|
378
|
|
|
|
|
|
|
has 'use_threads' => ( |
379
|
|
|
|
|
|
|
is => 'rw', |
380
|
|
|
|
|
|
|
isa => 'Bool', |
381
|
|
|
|
|
|
|
default => 0, |
382
|
|
|
|
|
|
|
required => 0, |
383
|
|
|
|
|
|
|
documentation => q{Use threads to run jobs}, |
384
|
|
|
|
|
|
|
); |
385
|
|
|
|
|
|
|
|
386
|
|
|
|
|
|
|
=head2 use_processes |
387
|
|
|
|
|
|
|
|
388
|
|
|
|
|
|
|
Bool value to indicate whether or not to use processes. Default is uses processes |
389
|
|
|
|
|
|
|
|
390
|
|
|
|
|
|
|
=cut |
391
|
|
|
|
|
|
|
|
392
|
|
|
|
|
|
|
has 'use_processes' => ( |
393
|
|
|
|
|
|
|
is => 'rw', |
394
|
|
|
|
|
|
|
isa => 'Bool', |
395
|
|
|
|
|
|
|
default => 1, |
396
|
|
|
|
|
|
|
required => 0, |
397
|
|
|
|
|
|
|
documentation => q{Use processes to run jobs}, |
398
|
|
|
|
|
|
|
); |
399
|
|
|
|
|
|
|
|
400
|
|
|
|
|
|
|
=head2 use_gnuparallel |
401
|
|
|
|
|
|
|
|
402
|
|
|
|
|
|
|
Bool value to indicate whether or not to use processes. Default is uses processes |
403
|
|
|
|
|
|
|
|
404
|
|
|
|
|
|
|
=cut |
405
|
|
|
|
|
|
|
|
406
|
|
|
|
|
|
|
has 'use_gnuparallel' => ( |
407
|
|
|
|
|
|
|
is => 'rw', |
408
|
|
|
|
|
|
|
isa => 'Bool', |
409
|
|
|
|
|
|
|
default => 0, |
410
|
|
|
|
|
|
|
required => 0, |
411
|
|
|
|
|
|
|
documentation => |
412
|
|
|
|
|
|
|
q{Use gnu-parallel to run jobs and manage threads. This is the best option if you do not know how many threads your application uses!} |
413
|
|
|
|
|
|
|
); |
414
|
|
|
|
|
|
|
|
415
|
|
|
|
|
|
|
=head2 use_custom |
416
|
|
|
|
|
|
|
|
417
|
|
|
|
|
|
|
Supply your own command instead of mcerunner/threadsrunner/etc |
418
|
|
|
|
|
|
|
|
419
|
|
|
|
|
|
|
=cut |
420
|
|
|
|
|
|
|
|
421
|
|
|
|
|
|
|
has 'custom_command' => ( |
422
|
|
|
|
|
|
|
is => 'rw', |
423
|
|
|
|
|
|
|
isa => 'Str', |
424
|
|
|
|
|
|
|
predicate => 'has_custom_command', |
425
|
|
|
|
|
|
|
clearer => 'clear_custom_command', |
426
|
|
|
|
|
|
|
); |
427
|
|
|
|
|
|
|
|
428
|
|
|
|
|
|
|
=head1 Internal Variables |
429
|
|
|
|
|
|
|
|
430
|
|
|
|
|
|
|
You should not need to mess with any of these. |
431
|
|
|
|
|
|
|
|
432
|
|
|
|
|
|
|
=head2 template |
433
|
|
|
|
|
|
|
|
434
|
|
|
|
|
|
|
template object for writing slurm batch submission script |
435
|
|
|
|
|
|
|
|
436
|
|
|
|
|
|
|
=cut |
437
|
|
|
|
|
|
|
|
438
|
|
|
|
|
|
|
has 'template' => ( |
439
|
|
|
|
|
|
|
traits => ['NoGetopt'], |
440
|
|
|
|
|
|
|
is => 'rw', |
441
|
|
|
|
|
|
|
required => 0, |
442
|
|
|
|
|
|
|
default => sub { return Template->new( ABSOLUTE => 1 ) }, |
443
|
|
|
|
|
|
|
); |
444
|
|
|
|
|
|
|
|
445
|
|
|
|
|
|
|
=head2 cmd_counter |
446
|
|
|
|
|
|
|
|
447
|
|
|
|
|
|
|
keep track of the number of commands - when we get to more than commands_per_node restart so we get submit to a new node. |
448
|
|
|
|
|
|
|
This is the number of commands within a batch. Each new batch resets it. |
449
|
|
|
|
|
|
|
|
450
|
|
|
|
|
|
|
=cut |
451
|
|
|
|
|
|
|
|
452
|
|
|
|
|
|
|
has 'cmd_counter' => ( |
453
|
|
|
|
|
|
|
traits => [ 'Counter', 'NoGetopt' ], |
454
|
|
|
|
|
|
|
is => 'ro', |
455
|
|
|
|
|
|
|
isa => 'Num', |
456
|
|
|
|
|
|
|
required => 1, |
457
|
|
|
|
|
|
|
default => 0, |
458
|
|
|
|
|
|
|
handles => { |
459
|
|
|
|
|
|
|
inc_cmd_counter => 'inc', |
460
|
|
|
|
|
|
|
dec_cmd_counter => 'dec', |
461
|
|
|
|
|
|
|
reset_cmd_counter => 'reset', |
462
|
|
|
|
|
|
|
}, |
463
|
|
|
|
|
|
|
); |
464
|
|
|
|
|
|
|
|
465
|
|
|
|
|
|
|
=head2 node_counter |
466
|
|
|
|
|
|
|
|
467
|
|
|
|
|
|
|
Keep track of which node we are on |
468
|
|
|
|
|
|
|
|
469
|
|
|
|
|
|
|
=cut |
470
|
|
|
|
|
|
|
|
471
|
|
|
|
|
|
|
has 'node_counter' => ( |
472
|
|
|
|
|
|
|
traits => [ 'Counter', 'NoGetopt' ], |
473
|
|
|
|
|
|
|
is => 'ro', |
474
|
|
|
|
|
|
|
isa => 'Num', |
475
|
|
|
|
|
|
|
required => 1, |
476
|
|
|
|
|
|
|
default => 0, |
477
|
|
|
|
|
|
|
handles => { |
478
|
|
|
|
|
|
|
inc_node_counter => 'inc', |
479
|
|
|
|
|
|
|
dec_node_counter => 'dec', |
480
|
|
|
|
|
|
|
reset_node_counter => 'reset', |
481
|
|
|
|
|
|
|
}, |
482
|
|
|
|
|
|
|
); |
483
|
|
|
|
|
|
|
|
484
|
|
|
|
|
|
|
=head2 batch_counter |
485
|
|
|
|
|
|
|
|
486
|
|
|
|
|
|
|
Keep track of how many batches we have submited to slurm |
487
|
|
|
|
|
|
|
|
488
|
|
|
|
|
|
|
=cut |
489
|
|
|
|
|
|
|
|
490
|
|
|
|
|
|
|
has 'batch_counter' => ( |
491
|
|
|
|
|
|
|
traits => [ 'Counter', 'NoGetopt' ], |
492
|
|
|
|
|
|
|
is => 'ro', |
493
|
|
|
|
|
|
|
isa => 'Num', |
494
|
|
|
|
|
|
|
required => 1, |
495
|
|
|
|
|
|
|
default => 1, |
496
|
|
|
|
|
|
|
handles => { |
497
|
|
|
|
|
|
|
inc_batch_counter => 'inc', |
498
|
|
|
|
|
|
|
dec_batch_counter => 'dec', |
499
|
|
|
|
|
|
|
reset_batch_counter => 'reset', |
500
|
|
|
|
|
|
|
}, |
501
|
|
|
|
|
|
|
); |
502
|
|
|
|
|
|
|
|
503
|
|
|
|
|
|
|
#=head2 node |
504
|
|
|
|
|
|
|
|
505
|
|
|
|
|
|
|
#Node we are running on |
506
|
|
|
|
|
|
|
|
507
|
|
|
|
|
|
|
#=cut |
508
|
|
|
|
|
|
|
|
509
|
|
|
|
|
|
|
#has 'node' => ( |
510
|
|
|
|
|
|
|
#traits => ['NoGetopt'], |
511
|
|
|
|
|
|
|
#is => 'rw', |
512
|
|
|
|
|
|
|
#isa => 'Str|Undef', |
513
|
|
|
|
|
|
|
#lazy => 1, |
514
|
|
|
|
|
|
|
#default => sub { |
515
|
|
|
|
|
|
|
#my $self = shift; |
516
|
|
|
|
|
|
|
#return $self->nodelist()->[0] if $self->nodelist; |
517
|
|
|
|
|
|
|
#return ""; |
518
|
|
|
|
|
|
|
#} |
519
|
|
|
|
|
|
|
#); |
520
|
|
|
|
|
|
|
|
521
|
|
|
|
|
|
|
=head2 batch |
522
|
|
|
|
|
|
|
|
523
|
|
|
|
|
|
|
List of commands to submit to slurm |
524
|
|
|
|
|
|
|
|
525
|
|
|
|
|
|
|
=cut |
526
|
|
|
|
|
|
|
|
527
|
|
|
|
|
|
|
has 'batch' => ( |
528
|
|
|
|
|
|
|
traits => [ 'String', 'NoGetopt', ], |
529
|
|
|
|
|
|
|
is => 'rw', |
530
|
|
|
|
|
|
|
isa => 'Str', |
531
|
|
|
|
|
|
|
default => q{}, |
532
|
|
|
|
|
|
|
required => 0, |
533
|
|
|
|
|
|
|
handles => { add_batch => 'append', }, |
534
|
|
|
|
|
|
|
clearer => 'clear_batch', |
535
|
|
|
|
|
|
|
predicate => 'has_batch', |
536
|
|
|
|
|
|
|
); |
537
|
|
|
|
|
|
|
|
538
|
|
|
|
|
|
|
=head2 cmdfile |
539
|
|
|
|
|
|
|
|
540
|
|
|
|
|
|
|
File of commands for mcerunner/parallelrunner |
541
|
|
|
|
|
|
|
Is cleared at the end of each slurm submission |
542
|
|
|
|
|
|
|
|
543
|
|
|
|
|
|
|
=cut |
544
|
|
|
|
|
|
|
|
545
|
|
|
|
|
|
|
has 'cmdfile' => ( |
546
|
|
|
|
|
|
|
traits => [ 'String', 'NoGetopt' ], |
547
|
|
|
|
|
|
|
default => q{}, |
548
|
|
|
|
|
|
|
is => 'rw', |
549
|
|
|
|
|
|
|
isa => 'Str', |
550
|
|
|
|
|
|
|
required => 0, |
551
|
|
|
|
|
|
|
handles => { clear_cmdfile => 'clear', }, |
552
|
|
|
|
|
|
|
); |
553
|
|
|
|
|
|
|
|
554
|
|
|
|
|
|
|
=head2 slurmfile |
555
|
|
|
|
|
|
|
|
556
|
|
|
|
|
|
|
File generated from slurm template |
557
|
|
|
|
|
|
|
|
558
|
|
|
|
|
|
|
=cut |
559
|
|
|
|
|
|
|
|
560
|
|
|
|
|
|
|
has 'slurmfile' => ( |
561
|
|
|
|
|
|
|
traits => [ 'String', 'NoGetopt' ], |
562
|
|
|
|
|
|
|
default => q{}, |
563
|
|
|
|
|
|
|
is => 'rw', |
564
|
|
|
|
|
|
|
isa => 'Str', |
565
|
|
|
|
|
|
|
required => 0, |
566
|
|
|
|
|
|
|
handles => { clear_slurmfile => 'clear', }, |
567
|
|
|
|
|
|
|
); |
568
|
|
|
|
|
|
|
|
569
|
|
|
|
|
|
|
=head2 slurm_decides |
570
|
|
|
|
|
|
|
|
571
|
|
|
|
|
|
|
Do not specify a node or partition in your sbatch file. Let Slurm decide which nodes/partition to submit jobs. |
572
|
|
|
|
|
|
|
|
573
|
|
|
|
|
|
|
=cut |
574
|
|
|
|
|
|
|
|
575
|
|
|
|
|
|
|
has 'slurm_decides' => ( |
576
|
|
|
|
|
|
|
is => 'rw', |
577
|
|
|
|
|
|
|
isa => 'Bool', |
578
|
|
|
|
|
|
|
default => 0, |
579
|
|
|
|
|
|
|
); |
580
|
|
|
|
|
|
|
|
581
|
|
|
|
|
|
|
=head2 job_stats |
582
|
|
|
|
|
|
|
|
583
|
|
|
|
|
|
|
HashRef of job stats - total jobs submitted, total processes, etc |
584
|
|
|
|
|
|
|
|
585
|
|
|
|
|
|
|
=cut |
586
|
|
|
|
|
|
|
|
587
|
|
|
|
|
|
|
has 'job_stats' => ( |
588
|
|
|
|
|
|
|
traits => ['NoGetopt'], |
589
|
|
|
|
|
|
|
is => 'rw', |
590
|
|
|
|
|
|
|
isa => 'HashRef', |
591
|
|
|
|
|
|
|
default => sub { |
592
|
|
|
|
|
|
|
my $self = shift; |
593
|
|
|
|
|
|
|
my $href = {}; |
594
|
|
|
|
|
|
|
$href->{total_processes} = 0; |
595
|
|
|
|
|
|
|
$href->{jobnames} = {}; |
596
|
|
|
|
|
|
|
$href->{total_batches} = 0; |
597
|
|
|
|
|
|
|
$href->{batches} = {}; |
598
|
|
|
|
|
|
|
} |
599
|
|
|
|
|
|
|
); |
600
|
|
|
|
|
|
|
|
601
|
|
|
|
|
|
|
=head2 job_deps |
602
|
|
|
|
|
|
|
|
603
|
|
|
|
|
|
|
#HPC jobname=assembly |
604
|
|
|
|
|
|
|
#HPC job_deps=gzip,fastqc |
605
|
|
|
|
|
|
|
|
606
|
|
|
|
|
|
|
=cut |
607
|
|
|
|
|
|
|
|
608
|
|
|
|
|
|
|
has 'job_deps' => ( |
609
|
|
|
|
|
|
|
traits => ['NoGetopt'], |
610
|
|
|
|
|
|
|
is => 'rw', |
611
|
|
|
|
|
|
|
isa => 'HashRef', |
612
|
|
|
|
|
|
|
required => 0, |
613
|
|
|
|
|
|
|
default => sub { |
614
|
|
|
|
|
|
|
my $self = shift; |
615
|
|
|
|
|
|
|
$self->jobname => []; |
616
|
|
|
|
|
|
|
}, |
617
|
|
|
|
|
|
|
lazy => 1, |
618
|
|
|
|
|
|
|
); |
619
|
|
|
|
|
|
|
|
620
|
|
|
|
|
|
|
|
621
|
|
|
|
|
|
|
=head2 job_scheduler_id |
622
|
|
|
|
|
|
|
|
623
|
|
|
|
|
|
|
Job Scheduler ID running the script. Passed to slurm for mail information |
624
|
|
|
|
|
|
|
|
625
|
|
|
|
|
|
|
=cut |
626
|
|
|
|
|
|
|
|
627
|
|
|
|
|
|
|
has 'job_scheduler_id' => ( |
628
|
|
|
|
|
|
|
is => 'rw', |
629
|
|
|
|
|
|
|
isa => 'Str|Undef', |
630
|
|
|
|
|
|
|
default => sub { return $ENV{SBATCH_JOB_ID} || $ENV{PBS_JOBID} || undef; }, |
631
|
|
|
|
|
|
|
required => 1, |
632
|
|
|
|
|
|
|
documentation => q{This defaults to your current Job Scheduler ID. Ignore this if running on a single node}, |
633
|
|
|
|
|
|
|
predicate => 'has_job_scheduler_id', |
634
|
|
|
|
|
|
|
clearer => 'clear_job_scheduler_id', |
635
|
|
|
|
|
|
|
); |
636
|
|
|
|
|
|
|
|
637
|
|
|
|
|
|
|
=head2 jobname |
638
|
|
|
|
|
|
|
|
639
|
|
|
|
|
|
|
Specify a job name, and jobs will be jobname_1, jobname_2, jobname_x |
640
|
|
|
|
|
|
|
|
641
|
|
|
|
|
|
|
=cut |
642
|
|
|
|
|
|
|
|
643
|
|
|
|
|
|
|
has 'jobname' => ( |
644
|
|
|
|
|
|
|
is => 'rw', |
645
|
|
|
|
|
|
|
isa => 'Str', |
646
|
|
|
|
|
|
|
required => 0, |
647
|
|
|
|
|
|
|
traits => ['String'], |
648
|
|
|
|
|
|
|
default => q{job}, |
649
|
|
|
|
|
|
|
default => sub { return $ENV{SBATCH_JOB_NAME} || $ENV{PBS_JOBNAME} || 'job'; }, |
650
|
|
|
|
|
|
|
predicate => 'has_jobname', |
651
|
|
|
|
|
|
|
handles => { |
652
|
|
|
|
|
|
|
add_jobname => 'append', |
653
|
|
|
|
|
|
|
clear_jobname => 'clear', |
654
|
|
|
|
|
|
|
replace_jobname => 'replace', |
655
|
|
|
|
|
|
|
}, |
656
|
|
|
|
|
|
|
documentation => q{Specify a job name, each job will be appended with its batch order}, |
657
|
|
|
|
|
|
|
); |
658
|
|
|
|
|
|
|
|
659
|
|
|
|
|
|
|
=head2 jobref |
660
|
|
|
|
|
|
|
|
661
|
|
|
|
|
|
|
Array of arrays details slurm/process/scheduler job id. Index -1 is the most recent job submissisions, and there will be an index -2 if there are any job dependencies |
662
|
|
|
|
|
|
|
|
663
|
|
|
|
|
|
|
=cut |
664
|
|
|
|
|
|
|
|
665
|
|
|
|
|
|
|
has 'jobref' => ( |
666
|
|
|
|
|
|
|
traits => ['NoGetopt'], |
667
|
|
|
|
|
|
|
is => 'rw', |
668
|
|
|
|
|
|
|
isa => 'ArrayRef', |
669
|
|
|
|
|
|
|
default => sub { [ [] ] }, |
670
|
|
|
|
|
|
|
); |
671
|
|
|
|
|
|
|
|
672
|
|
|
|
|
|
|
=head1 SUBROUTINES/METHODS |
673
|
|
|
|
|
|
|
|
674
|
|
|
|
|
|
|
=cut |
675
|
|
|
|
|
|
|
|
676
|
|
|
|
|
|
|
=head2 run() |
677
|
|
|
|
|
|
|
|
678
|
|
|
|
|
|
|
First sub called |
679
|
|
|
|
|
|
|
Calling system module load * does not work within a screen session! |
680
|
|
|
|
|
|
|
|
681
|
|
|
|
|
|
|
=cut |
682
|
|
|
|
|
|
|
|
683
|
|
|
|
|
|
|
sub run { |
684
|
0
|
|
|
0
|
1
|
|
my $self = shift; |
685
|
|
|
|
|
|
|
|
686
|
0
|
0
|
|
|
|
|
if ( $self->serial ) { |
687
|
0
|
|
|
|
|
|
$self->procs(1); |
688
|
|
|
|
|
|
|
} |
689
|
|
|
|
|
|
|
|
690
|
0
|
|
|
|
|
|
$self->check_files; |
691
|
|
|
|
|
|
|
|
692
|
0
|
|
|
|
|
|
$self->first_pass(1); |
693
|
0
|
|
|
|
|
|
$self->parse_file_slurm; |
694
|
0
|
|
|
|
|
|
$self->do_stats; |
695
|
|
|
|
|
|
|
|
696
|
0
|
|
|
|
|
|
$DB::single = 2; |
697
|
0
|
|
|
|
|
|
$self->first_pass(0); |
698
|
0
|
|
|
|
|
|
$self->parse_file_slurm; |
699
|
|
|
|
|
|
|
} |
700
|
|
|
|
|
|
|
|
701
|
|
|
|
|
|
|
=head2 do_stats |
702
|
|
|
|
|
|
|
|
703
|
|
|
|
|
|
|
Do some stats on our job stats |
704
|
|
|
|
|
|
|
Foreach job name get the number of batches, and have a put that in batches->batch->job_batches |
705
|
|
|
|
|
|
|
|
706
|
|
|
|
|
|
|
=cut |
707
|
|
|
|
|
|
|
|
708
|
|
|
|
|
|
|
sub do_stats { |
709
|
0
|
|
|
0
|
1
|
|
my $self = shift; |
710
|
|
|
|
|
|
|
|
711
|
0
|
|
|
|
|
|
my @jobs = keys %{$self->job_stats->{jobnames}}; |
|
0
|
|
|
|
|
|
|
712
|
|
|
|
|
|
|
|
713
|
0
|
|
|
|
|
|
foreach my $batch (keys %{$self->job_stats->{batches}}){ |
|
0
|
|
|
|
|
|
|
714
|
0
|
|
|
|
|
|
my $href = $self->job_stats->{batches}->{$batch}; |
715
|
0
|
|
|
|
|
|
my $jobname = $href->{jobname}; |
716
|
0
|
|
|
|
|
|
my @job_batches = @{$self->job_stats->{jobnames}->{$jobname} }; |
|
0
|
|
|
|
|
|
|
717
|
|
|
|
|
|
|
|
718
|
0
|
|
|
0
|
|
|
my $index = firstidx {$_ eq $batch} @job_batches; |
|
0
|
|
|
|
|
|
|
719
|
0
|
|
|
|
|
|
$index += 1; |
720
|
0
|
|
|
|
|
|
my $lenjobs = $#job_batches + 1; |
721
|
0
|
|
|
|
|
|
$self->job_stats->{batches}->{$batch}->{job_batches} = $index."/".$lenjobs; |
722
|
|
|
|
|
|
|
|
723
|
|
|
|
|
|
|
|
724
|
0
|
|
|
|
|
|
$href->{total_processes} = $self->job_stats->{total_processes}; |
725
|
0
|
|
|
|
|
|
$href->{total_batches} = $self->job_stats->{total_batches}; |
726
|
0
|
|
|
|
|
|
$href->{batch_count} = $href->{batch}."/".$self->job_stats->{total_batches}; |
727
|
|
|
|
|
|
|
} |
728
|
|
|
|
|
|
|
} |
729
|
|
|
|
|
|
|
|
730
|
|
|
|
|
|
|
=head2 check_files() |
731
|
|
|
|
|
|
|
|
732
|
|
|
|
|
|
|
Check to make sure the outdir exists. |
733
|
|
|
|
|
|
|
If it doesn't exist the entire path will be created |
734
|
|
|
|
|
|
|
|
735
|
|
|
|
|
|
|
=cut |
736
|
|
|
|
|
|
|
|
737
|
|
|
|
|
|
|
sub check_files { |
738
|
0
|
|
|
0
|
1
|
|
my ($self) = @_; |
739
|
0
|
|
|
|
|
|
my ($t); |
740
|
|
|
|
|
|
|
|
741
|
0
|
|
|
|
|
|
$t = $self->outdir; |
742
|
0
|
|
|
|
|
|
$t =~ s/\/$//g; |
743
|
0
|
|
|
|
|
|
$self->outdir($t); |
744
|
|
|
|
|
|
|
|
745
|
|
|
|
|
|
|
#make the outdir |
746
|
0
|
0
|
|
|
|
|
make_path( $self->outdir ) if !-d $self->outdir; |
747
|
|
|
|
|
|
|
|
748
|
|
|
|
|
|
|
#$self->get_nodes; |
749
|
|
|
|
|
|
|
} |
750
|
|
|
|
|
|
|
|
751
|
|
|
|
|
|
|
=head2 parse_file_slurm |
752
|
|
|
|
|
|
|
|
753
|
|
|
|
|
|
|
Parse the file looking for the following conditions |
754
|
|
|
|
|
|
|
|
755
|
|
|
|
|
|
|
lines ending in `\` |
756
|
|
|
|
|
|
|
wait |
757
|
|
|
|
|
|
|
nextnode |
758
|
|
|
|
|
|
|
|
759
|
|
|
|
|
|
|
Batch commands in groups of $self->cpus_per_task, or smaller as wait and nextnode indicate |
760
|
|
|
|
|
|
|
|
761
|
|
|
|
|
|
|
=cut |
762
|
|
|
|
|
|
|
|
763
|
|
|
|
|
|
|
sub parse_file_slurm { |
764
|
0
|
|
|
0
|
1
|
|
my $self = shift; |
765
|
0
|
0
|
|
|
|
|
my $fh = IO::File->new( $self->infile, q{<} ) |
766
|
|
|
|
|
|
|
or print "Error opening file " |
767
|
|
|
|
|
|
|
. $self->infile . " " |
768
|
|
|
|
|
|
|
. $!; # even better! |
769
|
|
|
|
|
|
|
|
770
|
0
|
|
|
|
|
|
$self->reset_cmd_counter; |
771
|
0
|
|
|
|
|
|
$self->reset_node_counter; |
772
|
0
|
|
|
|
|
|
$self->reset_batch_counter; |
773
|
0
|
|
|
|
|
|
$self->jobref( [] ); |
774
|
|
|
|
|
|
|
|
775
|
0
|
0
|
|
|
|
|
if ( $self->afterok ) { |
776
|
0
|
|
|
|
|
|
$self->wait(1); |
777
|
0
|
|
|
|
|
|
$self->jobref->[0] = $self->afterok; |
778
|
0
|
|
|
|
|
|
push( @{ $self->jobref }, [] ); |
|
0
|
|
|
|
|
|
|
779
|
|
|
|
|
|
|
} |
780
|
|
|
|
|
|
|
|
781
|
0
|
|
|
|
|
|
while (<$fh>) { |
782
|
0
|
|
|
|
|
|
my $line = $_; |
783
|
0
|
0
|
|
|
|
|
next unless $line; |
784
|
0
|
0
|
|
|
|
|
next unless $line =~ m/\S/; |
785
|
0
|
|
|
|
|
|
$self->process_lines($line); |
786
|
|
|
|
|
|
|
} |
787
|
0
|
0
|
|
|
|
|
$self->work if $self->has_batch; |
788
|
0
|
0
|
|
|
|
|
push( @{ $self->jobref }, [] ) if $self->serial; |
|
0
|
|
|
|
|
|
|
789
|
0
|
|
|
|
|
|
close($fh); |
790
|
|
|
|
|
|
|
} |
791
|
|
|
|
|
|
|
|
792
|
|
|
|
|
|
|
sub process_lines { |
793
|
0
|
|
|
0
|
0
|
|
my $self = shift; |
794
|
0
|
|
|
|
|
|
my $line = shift; |
795
|
|
|
|
|
|
|
|
796
|
|
|
|
|
|
|
#Do a sanity check for nohup |
797
|
0
|
0
|
|
|
|
|
if ( $line =~ m/^nohup/ ) { |
798
|
0
|
|
|
|
|
|
die print |
799
|
|
|
|
|
|
|
"You cannot submit jobs to the queue using nohup! Please remove nohup and try again.\n"; |
800
|
|
|
|
|
|
|
} |
801
|
|
|
|
|
|
|
|
802
|
|
|
|
|
|
|
#if( $self->cmd_counter > 0 && 0 == $self->cmd_counter % ($self->commands_per_node + 1) && $self->batch ){ |
803
|
0
|
0
|
0
|
|
|
|
if ( $self->cmd_counter > 0 |
|
|
|
0
|
|
|
|
|
804
|
|
|
|
|
|
|
&& 0 == $self->cmd_counter % ( $self->commands_per_node ) |
805
|
|
|
|
|
|
|
&& $self->batch ) |
806
|
|
|
|
|
|
|
{ |
807
|
|
|
|
|
|
|
#Run this batch and start the next |
808
|
0
|
|
|
|
|
|
$self->work; |
809
|
0
|
0
|
|
|
|
|
push( @{ $self->jobref }, [] ) if $self->serial; |
|
0
|
|
|
|
|
|
|
810
|
|
|
|
|
|
|
} |
811
|
|
|
|
|
|
|
|
812
|
0
|
|
|
|
|
|
$self->check_hpc_meta($line); |
813
|
0
|
0
|
|
|
|
|
return if $line =~ m/^#/; |
814
|
|
|
|
|
|
|
|
815
|
0
|
0
|
|
|
|
|
if ( $self->has_cmd ) { |
816
|
0
|
|
|
|
|
|
$self->add_cmd($line); |
817
|
0
|
|
|
|
|
|
$self->add_batch($line); |
818
|
0
|
0
|
|
|
|
|
if ( $line =~ m/\\$/ ) { |
819
|
0
|
|
|
|
|
|
return; |
820
|
|
|
|
|
|
|
} |
821
|
|
|
|
|
|
|
else { |
822
|
0
|
|
|
|
|
|
$self->add_cmd("\n"); |
823
|
0
|
|
|
|
|
|
$self->add_batch("\n"); |
824
|
0
|
|
|
|
|
|
$self->clear_cmd; |
825
|
0
|
|
|
|
|
|
$self->inc_cmd_counter; |
826
|
|
|
|
|
|
|
} |
827
|
|
|
|
|
|
|
} |
828
|
|
|
|
|
|
|
else { |
829
|
0
|
|
|
|
|
|
$self->add_cmd($line); |
830
|
|
|
|
|
|
|
|
831
|
0
|
0
|
|
|
|
|
if ( $line =~ m/\\$/ ) { |
|
|
0
|
|
|
|
|
|
|
|
0
|
|
|
|
|
|
832
|
0
|
|
|
|
|
|
$self->add_batch($line); |
833
|
|
|
|
|
|
|
|
834
|
|
|
|
|
|
|
#next; |
835
|
0
|
|
|
|
|
|
return; |
836
|
|
|
|
|
|
|
} |
837
|
|
|
|
|
|
|
elsif ( $self->match_cmd(qr/^wait$/) ) { |
838
|
|
|
|
|
|
|
|
839
|
|
|
|
|
|
|
#submit this batch and get the job id so the next can depend upon it |
840
|
0
|
|
|
|
|
|
$self->clear_cmd; |
841
|
0
|
|
|
|
|
|
$self->wait(1); |
842
|
0
|
0
|
|
|
|
|
$self->work if $self->has_batch; |
843
|
0
|
|
|
|
|
|
push( @{ $self->jobref }, [] ); |
|
0
|
|
|
|
|
|
|
844
|
|
|
|
|
|
|
} |
845
|
|
|
|
|
|
|
elsif ( $self->match_cmd(qr/^newnode$/) ) { |
846
|
0
|
|
|
|
|
|
$self->clear_cmd; |
847
|
0
|
0
|
|
|
|
|
$self->work if $self->has_batch; |
848
|
0
|
0
|
|
|
|
|
push( @{ $self->jobref }, [] ) if $self->serial; |
|
0
|
|
|
|
|
|
|
849
|
|
|
|
|
|
|
} |
850
|
|
|
|
|
|
|
else { |
851
|
|
|
|
|
|
|
#Don't want to increase command count for wait and newnode |
852
|
0
|
|
|
|
|
|
$self->inc_cmd_counter; |
853
|
|
|
|
|
|
|
} |
854
|
0
|
0
|
|
|
|
|
$self->add_batch( $line . "\n" ) if $self->has_cmd; |
855
|
0
|
|
|
|
|
|
$self->clear_cmd; |
856
|
|
|
|
|
|
|
} |
857
|
|
|
|
|
|
|
|
858
|
|
|
|
|
|
|
} |
859
|
|
|
|
|
|
|
|
860
|
|
|
|
|
|
|
=head2 check_meta |
861
|
|
|
|
|
|
|
|
862
|
|
|
|
|
|
|
allow for changing parameters mid through the script |
863
|
|
|
|
|
|
|
|
864
|
|
|
|
|
|
|
#Job1 |
865
|
|
|
|
|
|
|
echo "this is job one" && \ |
866
|
|
|
|
|
|
|
bin/dostuff bblahblahblah |
867
|
|
|
|
|
|
|
|
868
|
|
|
|
|
|
|
#HPC cpu_per_task=12 |
869
|
|
|
|
|
|
|
|
870
|
|
|
|
|
|
|
echo "This is my new job with new HPC params!" |
871
|
|
|
|
|
|
|
|
872
|
|
|
|
|
|
|
=cut |
873
|
|
|
|
|
|
|
|
874
|
|
|
|
|
|
|
sub check_hpc_meta { |
875
|
0
|
|
|
0
|
0
|
|
my $self = shift; |
876
|
0
|
|
|
|
|
|
my $line = shift; |
877
|
0
|
|
|
|
|
|
my ( @match, $t1, $t2 ); |
878
|
|
|
|
|
|
|
|
879
|
0
|
0
|
|
|
|
|
return unless $line =~ m/^#HPC/; |
880
|
|
|
|
|
|
|
|
881
|
0
|
|
|
|
|
|
@match = $line =~ m/HPC (\w+)=(.+)$/; |
882
|
0
|
|
|
|
|
|
( $t1, $t2 ) = ( $match[0], $match[1] ); |
883
|
|
|
|
|
|
|
|
884
|
0
|
0
|
|
|
|
|
if ( !$self->can($t1) ) { |
885
|
0
|
|
|
|
|
|
print "Option $t1 is an invalid option!\n"; |
886
|
0
|
|
|
|
|
|
return; |
887
|
|
|
|
|
|
|
} |
888
|
|
|
|
|
|
|
|
889
|
0
|
0
|
|
|
|
|
if ($t1) { |
890
|
0
|
0
|
|
|
|
|
if ( $t1 eq "module" ) { |
891
|
0
|
|
|
|
|
|
$self->$t1( [$t2] ); |
892
|
|
|
|
|
|
|
} |
893
|
|
|
|
|
|
|
else { |
894
|
0
|
|
|
|
|
|
$self->$t1($t2); |
895
|
|
|
|
|
|
|
} |
896
|
|
|
|
|
|
|
} |
897
|
|
|
|
|
|
|
else { |
898
|
0
|
|
|
|
|
|
@match = $line =~ m/HPC (\w+)$/; |
899
|
0
|
|
|
|
|
|
$t1 = $match[0]; |
900
|
0
|
0
|
|
|
|
|
return unless $t1; |
901
|
0
|
|
|
|
|
|
$t1 = "clear_$t1"; |
902
|
0
|
|
|
|
|
|
$self->$t1; |
903
|
|
|
|
|
|
|
} |
904
|
|
|
|
|
|
|
} |
905
|
|
|
|
|
|
|
|
906
|
|
|
|
|
|
|
sub check_note_meta { |
907
|
0
|
|
|
0
|
0
|
|
my $self = shift; |
908
|
0
|
|
|
|
|
|
my $line = shift; |
909
|
|
|
|
|
|
|
|
910
|
0
|
0
|
|
|
|
|
return unless $line =~ m/^#NOTE/; |
911
|
|
|
|
|
|
|
|
912
|
0
|
|
|
|
|
|
$self->add_batch( $line . "\n" ); |
913
|
|
|
|
|
|
|
} |
914
|
|
|
|
|
|
|
|
915
|
|
|
|
|
|
|
=head2 work |
916
|
|
|
|
|
|
|
|
917
|
|
|
|
|
|
|
Get the node #may be removed but we'll try it out |
918
|
|
|
|
|
|
|
Process the batch |
919
|
|
|
|
|
|
|
Submit to slurm |
920
|
|
|
|
|
|
|
Take care of the counters |
921
|
|
|
|
|
|
|
|
922
|
|
|
|
|
|
|
=cut |
923
|
|
|
|
|
|
|
|
924
|
|
|
|
|
|
|
sub work { |
925
|
0
|
|
|
0
|
1
|
|
my $self = shift; |
926
|
|
|
|
|
|
|
|
927
|
0
|
|
|
|
|
|
$DB::single=2; |
928
|
|
|
|
|
|
|
|
929
|
0
|
0
|
|
|
|
|
$self->collect_stats if $self->first_pass; |
930
|
|
|
|
|
|
|
|
931
|
|
|
|
|
|
|
#if ( $self->node_counter > ( scalar @{ $self->nodelist } ) ) { |
932
|
|
|
|
|
|
|
#$self->reset_node_counter; |
933
|
|
|
|
|
|
|
#} |
934
|
|
|
|
|
|
|
#$self->node( $self->nodelist()->[ $self->node_counter ] ) |
935
|
|
|
|
|
|
|
#if $self->nodelist; |
936
|
0
|
0
|
|
|
|
|
$self->process_batch unless $self->first_pass; |
937
|
|
|
|
|
|
|
|
938
|
0
|
|
|
|
|
|
$self->inc_batch_counter; |
939
|
0
|
|
|
|
|
|
$self->clear_batch; |
940
|
0
|
|
|
|
|
|
$self->inc_node_counter; |
941
|
|
|
|
|
|
|
|
942
|
0
|
|
|
|
|
|
$self->reset_cmd_counter; |
943
|
|
|
|
|
|
|
} |
944
|
|
|
|
|
|
|
|
945
|
|
|
|
|
|
|
=head2 collect_stats |
946
|
|
|
|
|
|
|
|
947
|
|
|
|
|
|
|
Collect job stats |
948
|
|
|
|
|
|
|
|
949
|
|
|
|
|
|
|
=cut |
950
|
|
|
|
|
|
|
|
951
|
|
|
|
|
|
|
sub collect_stats { |
952
|
0
|
|
|
0
|
1
|
|
my $self = shift; |
953
|
|
|
|
|
|
|
|
954
|
0
|
0
|
|
|
|
|
return unless $self->first_pass; |
955
|
|
|
|
|
|
|
|
956
|
0
|
|
|
|
|
|
my $counter = $self->batch_counter; |
957
|
0
|
|
|
|
|
|
$counter = sprintf( "%03d", $counter ); |
958
|
|
|
|
|
|
|
|
959
|
|
|
|
|
|
|
#Get the total processes |
960
|
0
|
|
|
|
|
|
my $href = $self->job_stats; |
961
|
0
|
|
|
|
|
|
$href->{total_processes} += $self->cmd_counter; |
962
|
|
|
|
|
|
|
|
963
|
|
|
|
|
|
|
#Get the command count |
964
|
0
|
|
|
|
|
|
my $command_count = ($href->{total_processes} - $self->cmd_counter) + 1; |
965
|
|
|
|
|
|
|
#Get number of commands in this batch |
966
|
|
|
|
|
|
|
$href->{batches}->{ $counter . "_" . $self->jobname } = { |
967
|
|
|
|
|
|
|
commands => $self->cmd_counter, |
968
|
|
|
|
|
|
|
jobname => $self->jobname, |
969
|
|
|
|
|
|
|
batch => $self->batch_counter, |
970
|
|
|
|
|
|
|
command_count => $command_count."-".$href->{total_processes}, |
971
|
0
|
|
|
|
|
|
}; |
972
|
|
|
|
|
|
|
|
973
|
0
|
|
|
|
|
|
my $jobhref = {}; |
974
|
0
|
|
|
|
|
|
$jobhref->{ $self->jobname } = []; |
975
|
|
|
|
|
|
|
|
976
|
0
|
0
|
|
|
|
|
if ( exists $href->{jobnames}->{ $self->jobname } ) { |
977
|
0
|
|
|
|
|
|
my $tarray = $href->{jobnames}->{ $self->jobname }; |
978
|
0
|
|
|
|
|
|
push( @{$tarray}, $counter . "_" . $self->jobname ); |
|
0
|
|
|
|
|
|
|
979
|
|
|
|
|
|
|
} |
980
|
|
|
|
|
|
|
else { |
981
|
0
|
|
|
|
|
|
$href->{jobnames}->{ $self->jobname } |
982
|
|
|
|
|
|
|
= [ $counter . "_" . $self->jobname ]; |
983
|
|
|
|
|
|
|
} |
984
|
|
|
|
|
|
|
|
985
|
0
|
|
|
|
|
|
$href->{total_batches} += 1; |
986
|
0
|
|
|
|
|
|
$self->job_stats($href); |
987
|
|
|
|
|
|
|
} |
988
|
|
|
|
|
|
|
|
989
|
|
|
|
|
|
|
=head2 process_batch() |
990
|
|
|
|
|
|
|
|
991
|
|
|
|
|
|
|
Create the slurm submission script from the slurm template |
992
|
|
|
|
|
|
|
Write out template, submission job, and infile for parallel runner |
993
|
|
|
|
|
|
|
|
994
|
|
|
|
|
|
|
=cut |
995
|
|
|
|
|
|
|
|
996
|
|
|
|
|
|
|
sub process_batch { |
997
|
0
|
|
|
0
|
1
|
|
my $self = shift; |
998
|
0
|
|
|
|
|
|
my ( $cmdfile, $slurmfile, $slurmsubmit, $fh, $command ); |
999
|
|
|
|
|
|
|
|
1000
|
0
|
|
|
|
|
|
my $counter = $self->batch_counter; |
1001
|
0
|
|
|
|
|
|
$counter = sprintf( "%03d", $counter ); |
1002
|
|
|
|
|
|
|
|
1003
|
|
|
|
|
|
|
#$self->cmdfile($self->outdir."/".$self->jobname."_".$self->batch_counter.".in"); |
1004
|
|
|
|
|
|
|
#$self->slurmfile($self->outdir."/".$self->jobname."_".$self->batch_counter.".sh"); |
1005
|
0
|
|
|
|
|
|
$self->cmdfile( |
1006
|
|
|
|
|
|
|
$self->outdir . "/$counter" . "_" . $self->jobname . ".in" ); |
1007
|
0
|
|
|
|
|
|
$self->slurmfile( |
1008
|
|
|
|
|
|
|
$self->outdir . "/$counter" . "_" . $self->jobname . ".sh" ); |
1009
|
|
|
|
|
|
|
|
1010
|
0
|
0
|
|
|
|
|
$fh = IO::File->new( $self->cmdfile, q{>} ) |
1011
|
|
|
|
|
|
|
or print "Error opening file " . $self->cmdfile . " " . $!; |
1012
|
|
|
|
|
|
|
|
1013
|
0
|
0
|
0
|
|
|
|
print $fh $self->batch if defined $fh && defined $self->batch; |
1014
|
0
|
|
|
|
|
|
$fh->close; |
1015
|
|
|
|
|
|
|
|
1016
|
0
|
|
|
|
|
|
my $ok; |
1017
|
0
|
0
|
|
|
|
|
if ( $self->wait ) { |
1018
|
0
|
0
|
|
|
|
|
$ok = join( ":", @{ $self->jobref->[-2] } ) if $self->jobref->[-2]; |
|
0
|
|
|
|
|
|
|
1019
|
|
|
|
|
|
|
} |
1020
|
|
|
|
|
|
|
|
1021
|
0
|
|
|
|
|
|
$command = $self->process_batch_command(); |
1022
|
0
|
|
|
|
|
|
$DB::single = 2; |
1023
|
|
|
|
|
|
|
|
1024
|
0
|
0
|
|
|
|
|
$self->template->process( |
1025
|
|
|
|
|
|
|
$self->template_file, |
1026
|
|
|
|
|
|
|
{ JOBNAME => $counter . "_" . $self->jobname, |
1027
|
|
|
|
|
|
|
USER => $self->user, |
1028
|
|
|
|
|
|
|
CPU => $self->cpus_per_task, |
1029
|
|
|
|
|
|
|
PARTITION => $self->partition, |
1030
|
|
|
|
|
|
|
AFTEROK => $ok, |
1031
|
|
|
|
|
|
|
OUT => $self->logdir |
1032
|
|
|
|
|
|
|
. "/$counter" . "_" |
1033
|
|
|
|
|
|
|
. $self->jobname . ".log", |
1034
|
|
|
|
|
|
|
MODULE => $self->module, |
1035
|
|
|
|
|
|
|
self => $self, |
1036
|
|
|
|
|
|
|
COMMAND => $command |
1037
|
|
|
|
|
|
|
}, |
1038
|
|
|
|
|
|
|
$self->slurmfile |
1039
|
|
|
|
|
|
|
) || die $self->template->error; |
1040
|
|
|
|
|
|
|
|
1041
|
0
|
|
|
|
|
|
chmod 0777, $self->slurmfile; |
1042
|
|
|
|
|
|
|
|
1043
|
0
|
0
|
|
|
|
|
$self->submit_slurm if $self->submit_to_slurm; |
1044
|
|
|
|
|
|
|
} |
1045
|
|
|
|
|
|
|
|
1046
|
|
|
|
|
|
|
=head2 process_batch_command |
1047
|
|
|
|
|
|
|
|
1048
|
|
|
|
|
|
|
splitting this off from the main command |
1049
|
|
|
|
|
|
|
|
1050
|
|
|
|
|
|
|
=cut |
1051
|
|
|
|
|
|
|
|
1052
|
|
|
|
|
|
|
#TODO add support for custom commands |
1053
|
|
|
|
|
|
|
#TODO Change this all to a plugin system |
1054
|
|
|
|
|
|
|
|
1055
|
|
|
|
|
|
|
sub process_batch_command { |
1056
|
0
|
|
|
0
|
1
|
|
my ($self) = @_; |
1057
|
0
|
|
|
|
|
|
my $command; |
1058
|
|
|
|
|
|
|
|
1059
|
|
|
|
|
|
|
#Giving outdir/jobname doesn't work unless a full file path is supplied |
1060
|
|
|
|
|
|
|
#Need to get absolute path going on... |
1061
|
|
|
|
|
|
|
#$self->cmdfile($self->jobname."_batch".$self->batch_counter.".in"); |
1062
|
|
|
|
|
|
|
|
1063
|
0
|
|
|
|
|
|
my $counter = $self->batch_counter; |
1064
|
0
|
|
|
|
|
|
$counter = sprintf( "%03d", $counter ); |
1065
|
|
|
|
|
|
|
|
1066
|
0
|
|
|
|
|
|
$command = "cd " . getcwd() . "\n"; |
1067
|
0
|
0
|
|
|
|
|
if ( $self->has_custom_command ) { |
|
|
0
|
|
|
|
|
|
|
|
0
|
|
|
|
|
|
|
|
0
|
|
|
|
|
|
1068
|
0
|
|
|
|
|
|
$command |
1069
|
|
|
|
|
|
|
.= $self->custom_command |
1070
|
|
|
|
|
|
|
. " --procs " |
1071
|
|
|
|
|
|
|
. $self->procs |
1072
|
|
|
|
|
|
|
. " --infile " |
1073
|
|
|
|
|
|
|
. $self->cmdfile |
1074
|
|
|
|
|
|
|
. " --outdir " |
1075
|
|
|
|
|
|
|
. $self->outdir |
1076
|
|
|
|
|
|
|
. " --logname " |
1077
|
|
|
|
|
|
|
."$counter" . "_" |
1078
|
|
|
|
|
|
|
. $self->jobname |
1079
|
|
|
|
|
|
|
. " --process_table " |
1080
|
|
|
|
|
|
|
. $self->process_table; |
1081
|
|
|
|
|
|
|
} |
1082
|
|
|
|
|
|
|
elsif ( $self->use_gnuparallel ) { |
1083
|
0
|
|
|
|
|
|
$command |
1084
|
|
|
|
|
|
|
.= "cat " |
1085
|
|
|
|
|
|
|
. $self->cmdfile |
1086
|
|
|
|
|
|
|
. " | parallelparser.pl | parallel --joblog " |
1087
|
|
|
|
|
|
|
. $self->outdir |
1088
|
|
|
|
|
|
|
. "/main.log --gnu -N 1 -q gnuparallelrunner.pl --command `echo {}` --outdir " |
1089
|
|
|
|
|
|
|
. $self->outdir |
1090
|
|
|
|
|
|
|
. " --logname $counter" . "_" |
1091
|
|
|
|
|
|
|
. $self->jobname |
1092
|
|
|
|
|
|
|
. " --seq {#}" . "\n"; |
1093
|
|
|
|
|
|
|
} |
1094
|
|
|
|
|
|
|
elsif ( $self->use_threads ) { |
1095
|
0
|
|
|
|
|
|
$command |
1096
|
|
|
|
|
|
|
.= "paralellrunner.pl --procs " |
1097
|
|
|
|
|
|
|
. $self->procs |
1098
|
|
|
|
|
|
|
. " --infile " |
1099
|
|
|
|
|
|
|
. $self->cmdfile |
1100
|
|
|
|
|
|
|
. " --outdir " |
1101
|
|
|
|
|
|
|
. $self->outdir |
1102
|
|
|
|
|
|
|
. " --logname $counter" . "_" |
1103
|
|
|
|
|
|
|
. $self->jobname |
1104
|
|
|
|
|
|
|
. " --process_table " |
1105
|
|
|
|
|
|
|
. $self->process_table; |
1106
|
|
|
|
|
|
|
} |
1107
|
|
|
|
|
|
|
elsif ( $self->use_processes ) { |
1108
|
0
|
|
|
|
|
|
$command |
1109
|
|
|
|
|
|
|
.= "mcerunner.pl --procs " |
1110
|
|
|
|
|
|
|
. $self->procs |
1111
|
|
|
|
|
|
|
. " --infile " |
1112
|
|
|
|
|
|
|
. $self->cmdfile |
1113
|
|
|
|
|
|
|
. " --outdir " |
1114
|
|
|
|
|
|
|
. $self->outdir |
1115
|
|
|
|
|
|
|
. " --logname $counter" . "_" |
1116
|
|
|
|
|
|
|
. $self->jobname |
1117
|
|
|
|
|
|
|
. " --process_table " |
1118
|
|
|
|
|
|
|
. $self->process_table; |
1119
|
|
|
|
|
|
|
} |
1120
|
|
|
|
|
|
|
else { |
1121
|
0
|
|
|
|
|
|
die print "None of the job processes were chosen!\n"; |
1122
|
|
|
|
|
|
|
} |
1123
|
|
|
|
|
|
|
|
1124
|
|
|
|
|
|
|
|
1125
|
0
|
|
|
|
|
|
my $metastr = $self->create_meta_str; |
1126
|
0
|
0
|
|
|
|
|
$command .= $metastr if $metastr; |
1127
|
|
|
|
|
|
|
|
1128
|
0
|
|
|
|
|
|
my $pluginstr = $self->create_plugin_str; |
1129
|
0
|
0
|
|
|
|
|
$command .= $pluginstr if $pluginstr; |
1130
|
|
|
|
|
|
|
|
1131
|
0
|
|
|
|
|
|
return $command; |
1132
|
|
|
|
|
|
|
} |
1133
|
|
|
|
|
|
|
|
1134
|
|
|
|
|
|
|
sub create_meta_str { |
1135
|
0
|
|
|
0
|
0
|
|
my $self = shift; |
1136
|
|
|
|
|
|
|
|
1137
|
0
|
|
|
|
|
|
my $counter = $self->batch_counter; |
1138
|
0
|
|
|
|
|
|
$counter = sprintf( "%03d", $counter ); |
1139
|
0
|
|
|
|
|
|
my $batchname = $counter . "_" . $self->jobname; |
1140
|
|
|
|
|
|
|
|
1141
|
0
|
|
|
|
|
|
my $batch = $self->job_stats->{batches}->{$batchname}; |
1142
|
|
|
|
|
|
|
|
1143
|
0
|
|
|
|
|
|
my $json = JSON->new->allow_nonref; |
1144
|
0
|
|
|
|
|
|
my $json_text = $json->encode( $batch ); |
1145
|
|
|
|
|
|
|
|
1146
|
0
|
|
|
|
|
|
$DB::single=2; |
1147
|
0
|
|
|
|
|
|
$json_text = " --metastr \'$json_text\'"; |
1148
|
0
|
|
|
|
|
|
return $json_text; |
1149
|
|
|
|
|
|
|
} |
1150
|
|
|
|
|
|
|
|
1151
|
|
|
|
|
|
|
sub create_plugin_str { |
1152
|
0
|
|
|
0
|
0
|
|
my $self = shift; |
1153
|
|
|
|
|
|
|
|
1154
|
0
|
0
|
|
|
|
|
return unless $self->plugins; |
1155
|
0
|
|
|
|
|
|
my $plugins = $self->plugins; |
1156
|
0
|
|
|
|
|
|
my $pluginstr = ""; |
1157
|
0
|
0
|
|
|
|
|
if ($plugins) { |
1158
|
0
|
0
|
|
|
|
|
if ( ref($plugins) ) { |
1159
|
0
|
|
|
|
|
|
my @plugins = @{$plugins}; |
|
0
|
|
|
|
|
|
|
1160
|
0
|
|
|
|
|
|
foreach my $plugin (@plugins) { |
1161
|
0
|
|
|
|
|
|
$pluginstr .= " --plugins $plugin"; |
1162
|
|
|
|
|
|
|
} |
1163
|
|
|
|
|
|
|
} |
1164
|
|
|
|
|
|
|
else { |
1165
|
0
|
|
|
|
|
|
$pluginstr = " --plugins $plugins"; |
1166
|
|
|
|
|
|
|
} |
1167
|
|
|
|
|
|
|
} |
1168
|
|
|
|
|
|
|
|
1169
|
0
|
|
|
|
|
|
return $pluginstr; |
1170
|
|
|
|
|
|
|
} |
1171
|
|
|
|
|
|
|
|
1172
|
|
|
|
|
|
|
__PACKAGE__->meta->make_immutable; |
1173
|
|
|
|
|
|
|
|
1174
|
|
|
|
|
|
|
#use namespace::autoclean; |
1175
|
|
|
|
|
|
|
|
1176
|
|
|
|
|
|
|
1; |
1177
|
|
|
|
|
|
|
|
1178
|
|
|
|
|
|
|
=head1 AUTHOR |
1179
|
|
|
|
|
|
|
|
1180
|
|
|
|
|
|
|
Jillian Rowe E<lt>jillian.e.rowe@gmail.comE<gt> |
1181
|
|
|
|
|
|
|
|
1182
|
|
|
|
|
|
|
=head1 COPYRIGHT |
1183
|
|
|
|
|
|
|
|
1184
|
|
|
|
|
|
|
Copyright 2016- Jillian Rowe |
1185
|
|
|
|
|
|
|
|
1186
|
|
|
|
|
|
|
=head1 LICENSE |
1187
|
|
|
|
|
|
|
|
1188
|
|
|
|
|
|
|
This library is free software; you can redistribute it and/or modify |
1189
|
|
|
|
|
|
|
it under the same terms as Perl itself. |
1190
|
|
|
|
|
|
|
|
1191
|
|
|
|
|
|
|
=head1 SEE ALSO |
1192
|
|
|
|
|
|
|
|
1193
|
|
|
|
|
|
|
L<HPC::Runner::Slurm> |
1194
|
|
|
|
|
|
|
L<HPC::Runner::PBS> |
1195
|
|
|
|
|
|
|
L<HPC::Runner::MCE> |
1196
|
|
|
|
|
|
|
|
1197
|
|
|
|
|
|
|
=cut |
1198
|
|
|
|
|
|
|
|
1199
|
|
|
|
|
|
|
__END__ |