| line |
stmt |
bran |
cond |
sub |
pod |
time |
code |
|
1
|
|
|
|
|
|
|
package HPC::Runner::Command::submit_jobs::Utils::Scheduler::Directives; |
|
2
|
|
|
|
|
|
|
|
|
3
|
1
|
|
|
1
|
|
566
|
use MooseX::App::Role; |
|
|
1
|
|
|
|
|
2
|
|
|
|
1
|
|
|
|
|
9
|
|
|
4
|
1
|
|
|
1
|
|
6146
|
use namespace::autoclean; |
|
|
1
|
|
|
|
|
3
|
|
|
|
1
|
|
|
|
|
9
|
|
|
5
|
|
|
|
|
|
|
|
|
6
|
1
|
|
|
1
|
|
72
|
use BioSAILs::Utils::Traits qw(ArrayRefOfStrs); |
|
|
1
|
|
|
|
|
2
|
|
|
|
1
|
|
|
|
|
8
|
|
|
7
|
|
|
|
|
|
|
|
|
8
|
1
|
|
|
1
|
|
783
|
use File::Temp qw/ tempfile /; |
|
|
1
|
|
|
|
|
2
|
|
|
|
1
|
|
|
|
|
54
|
|
|
9
|
1
|
|
|
1
|
|
7
|
use File::Slurp; |
|
|
1
|
|
|
|
|
2
|
|
|
|
1
|
|
|
|
|
55
|
|
|
10
|
1
|
|
|
1
|
|
5
|
use File::Spec; |
|
|
1
|
|
|
|
|
2
|
|
|
|
1
|
|
|
|
|
436
|
|
|
11
|
|
|
|
|
|
|
|
|
12
|
|
|
|
|
|
|
=head1 HPC::Runner::Command::submit_jobs::Utils::Scheduler::Directives |
|
13
|
|
|
|
|
|
|
|
|
14
|
|
|
|
|
|
|
=cut |
|
15
|
|
|
|
|
|
|
|
|
16
|
|
|
|
|
|
|
=head2 Command Line Options |
|
17
|
|
|
|
|
|
|
|
|
18
|
|
|
|
|
|
|
#TODO Move this over to docs |
|
19
|
|
|
|
|
|
|
|
|
20
|
|
|
|
|
|
|
=cut |
|
21
|
|
|
|
|
|
|
|
|
22
|
|
|
|
|
|
|
=head3 module |
|
23
|
|
|
|
|
|
|
|
|
24
|
|
|
|
|
|
|
modules to load with slurm |
|
25
|
|
|
|
|
|
|
Should use the same names used in 'module load' |
|
26
|
|
|
|
|
|
|
|
|
27
|
|
|
|
|
|
|
Example. R2 becomes 'module load R2' |
|
28
|
|
|
|
|
|
|
|
|
29
|
|
|
|
|
|
|
=cut |
|
30
|
|
|
|
|
|
|
|
|
31
|
|
|
|
|
|
|
option 'module' => ( |
|
32
|
|
|
|
|
|
|
traits => ['Array'], |
|
33
|
|
|
|
|
|
|
is => 'rw', |
|
34
|
|
|
|
|
|
|
isa => ArrayRefOfStrs, |
|
35
|
|
|
|
|
|
|
coerce => 1, |
|
36
|
|
|
|
|
|
|
required => 0, |
|
37
|
|
|
|
|
|
|
documentation => q{List of modules to load ex. R2, samtools, etc}, |
|
38
|
|
|
|
|
|
|
default => sub { [] }, |
|
39
|
|
|
|
|
|
|
cmd_split => qr/,/, |
|
40
|
|
|
|
|
|
|
handles => { |
|
41
|
|
|
|
|
|
|
has_modules => 'count', |
|
42
|
|
|
|
|
|
|
all_modules => 'elements', |
|
43
|
|
|
|
|
|
|
join_modules => 'join', |
|
44
|
|
|
|
|
|
|
}, |
|
45
|
|
|
|
|
|
|
); |
|
46
|
|
|
|
|
|
|
|
|
47
|
|
|
|
|
|
|
=head3 conda_env |
|
48
|
|
|
|
|
|
|
|
|
49
|
|
|
|
|
|
|
Anaconda envs to load |
|
50
|
|
|
|
|
|
|
|
|
51
|
|
|
|
|
|
|
Load anaconda envs with |
|
52
|
|
|
|
|
|
|
|
|
53
|
|
|
|
|
|
|
source activate /path/to/my/env |
|
54
|
|
|
|
|
|
|
|
|
55
|
|
|
|
|
|
|
=cut |
|
56
|
|
|
|
|
|
|
|
|
57
|
|
|
|
|
|
|
option 'conda_env' => ( |
|
58
|
|
|
|
|
|
|
is => 'rw', |
|
59
|
|
|
|
|
|
|
isa => 'Str', |
|
60
|
|
|
|
|
|
|
required => 0, |
|
61
|
|
|
|
|
|
|
documentation => q{Conda env to activate.}, |
|
62
|
|
|
|
|
|
|
predicate => 'has_conda_env', |
|
63
|
|
|
|
|
|
|
); |
|
64
|
|
|
|
|
|
|
|
|
65
|
|
|
|
|
|
|
=head3 cpus_per_task |
|
66
|
|
|
|
|
|
|
|
|
67
|
|
|
|
|
|
|
slurm item --cpus_per_task defaults to 1 |
|
68
|
|
|
|
|
|
|
|
|
69
|
|
|
|
|
|
|
=cut |
|
70
|
|
|
|
|
|
|
|
|
71
|
|
|
|
|
|
|
option 'cpus_per_task' => ( |
|
72
|
|
|
|
|
|
|
is => 'rw', |
|
73
|
|
|
|
|
|
|
isa => 'Int', |
|
74
|
|
|
|
|
|
|
required => 0, |
|
75
|
|
|
|
|
|
|
default => 4, |
|
76
|
|
|
|
|
|
|
predicate => 'has_cpus_per_task', |
|
77
|
|
|
|
|
|
|
clearer => 'clear_cpus_per_task', |
|
78
|
|
|
|
|
|
|
documentation => '--cpus-per-task switch in slurm' |
|
79
|
|
|
|
|
|
|
); |
|
80
|
|
|
|
|
|
|
|
|
81
|
|
|
|
|
|
|
=head3 ntasks |
|
82
|
|
|
|
|
|
|
|
|
83
|
|
|
|
|
|
|
slurm item --ntasks defaults to 1 |
|
84
|
|
|
|
|
|
|
|
|
85
|
|
|
|
|
|
|
=cut |
|
86
|
|
|
|
|
|
|
|
|
87
|
|
|
|
|
|
|
option 'ntasks' => ( |
|
88
|
|
|
|
|
|
|
is => 'rw', |
|
89
|
|
|
|
|
|
|
isa => 'Int', |
|
90
|
|
|
|
|
|
|
required => 0, |
|
91
|
|
|
|
|
|
|
default => 1, |
|
92
|
|
|
|
|
|
|
predicate => 'has_ntasks', |
|
93
|
|
|
|
|
|
|
clearer => 'clear_ntasks', |
|
94
|
|
|
|
|
|
|
documentation => |
|
95
|
|
|
|
|
|
|
'--ntasks switch in slurm. This is equal to the number of concurrent tasks on each node * the number of nodes, not the total number of tasks' |
|
96
|
|
|
|
|
|
|
); |
|
97
|
|
|
|
|
|
|
|
|
98
|
|
|
|
|
|
|
=head3 account |
|
99
|
|
|
|
|
|
|
|
|
100
|
|
|
|
|
|
|
slurm item --account defaults to 1 |
|
101
|
|
|
|
|
|
|
|
|
102
|
|
|
|
|
|
|
=cut |
|
103
|
|
|
|
|
|
|
|
|
104
|
|
|
|
|
|
|
option 'account' => ( |
|
105
|
|
|
|
|
|
|
is => 'rw', |
|
106
|
|
|
|
|
|
|
isa => 'Str', |
|
107
|
|
|
|
|
|
|
required => 0, |
|
108
|
|
|
|
|
|
|
predicate => 'has_account', |
|
109
|
|
|
|
|
|
|
clearer => 'clear_account', |
|
110
|
|
|
|
|
|
|
documentation => '--account switch in slurm. ' |
|
111
|
|
|
|
|
|
|
); |
|
112
|
|
|
|
|
|
|
|
|
113
|
|
|
|
|
|
|
=head3 account-per-node |
|
114
|
|
|
|
|
|
|
|
|
115
|
|
|
|
|
|
|
slurm item --ntasks-per-node defaults to 28 |
|
116
|
|
|
|
|
|
|
|
|
117
|
|
|
|
|
|
|
=cut |
|
118
|
|
|
|
|
|
|
|
|
119
|
|
|
|
|
|
|
option 'ntasks_per_node' => ( |
|
120
|
|
|
|
|
|
|
is => 'rw', |
|
121
|
|
|
|
|
|
|
isa => 'Int', |
|
122
|
|
|
|
|
|
|
required => 0, |
|
123
|
|
|
|
|
|
|
default => 1, |
|
124
|
|
|
|
|
|
|
trigger => sub { |
|
125
|
|
|
|
|
|
|
my $self = shift; |
|
126
|
|
|
|
|
|
|
my $ntasks = $self->ntasks_per_node * $self->nodes_count; |
|
127
|
|
|
|
|
|
|
$self->ntasks($ntasks); |
|
128
|
|
|
|
|
|
|
}, |
|
129
|
|
|
|
|
|
|
predicate => 'has_ntasks_per_node', |
|
130
|
|
|
|
|
|
|
clearer => 'clear_ntasks_per_node', |
|
131
|
|
|
|
|
|
|
documentation => |
|
132
|
|
|
|
|
|
|
'--ntasks-per-node switch in slurm. total concurrent tasks on a node.' |
|
133
|
|
|
|
|
|
|
); |
|
134
|
|
|
|
|
|
|
|
|
135
|
|
|
|
|
|
|
=head3 commands_per_node |
|
136
|
|
|
|
|
|
|
|
|
137
|
|
|
|
|
|
|
commands to run per node |
|
138
|
|
|
|
|
|
|
|
|
139
|
|
|
|
|
|
|
=cut |
|
140
|
|
|
|
|
|
|
|
|
141
|
|
|
|
|
|
|
#TODO Update this for job arrays |
|
142
|
|
|
|
|
|
|
|
|
143
|
|
|
|
|
|
|
option 'commands_per_node' => ( |
|
144
|
|
|
|
|
|
|
is => 'rw', |
|
145
|
|
|
|
|
|
|
isa => 'Int', |
|
146
|
|
|
|
|
|
|
required => 0, |
|
147
|
|
|
|
|
|
|
default => 1, |
|
148
|
|
|
|
|
|
|
documentation => |
|
149
|
|
|
|
|
|
|
q{Commands to run on each node. If you have a low number of jobs you can submit at a time you want this number much higher. }, |
|
150
|
|
|
|
|
|
|
predicate => 'has_commands_per_node', |
|
151
|
|
|
|
|
|
|
clearer => 'clear_commands_per_node' |
|
152
|
|
|
|
|
|
|
); |
|
153
|
|
|
|
|
|
|
|
|
154
|
|
|
|
|
|
|
=head3 nodes_count |
|
155
|
|
|
|
|
|
|
|
|
156
|
|
|
|
|
|
|
Number of nodes to use on a job. This is only useful for mpi jobs. |
|
157
|
|
|
|
|
|
|
|
|
158
|
|
|
|
|
|
|
PBS: |
|
159
|
|
|
|
|
|
|
#PBS -l nodes=nodes_count:ppn=16 this |
|
160
|
|
|
|
|
|
|
|
|
161
|
|
|
|
|
|
|
Slurm: |
|
162
|
|
|
|
|
|
|
#SBATCH --nodes=nodes_count |
|
163
|
|
|
|
|
|
|
|
|
164
|
|
|
|
|
|
|
=cut |
|
165
|
|
|
|
|
|
|
|
|
166
|
|
|
|
|
|
|
option 'nodes_count' => ( |
|
167
|
|
|
|
|
|
|
is => 'rw', |
|
168
|
|
|
|
|
|
|
isa => 'Int', |
|
169
|
|
|
|
|
|
|
required => 0, |
|
170
|
|
|
|
|
|
|
default => 1, |
|
171
|
|
|
|
|
|
|
documentation => |
|
172
|
|
|
|
|
|
|
q{Number of nodes requested. You should only use this if submitting parallel jobs.}, |
|
173
|
|
|
|
|
|
|
predicate => 'has_nodes_count', |
|
174
|
|
|
|
|
|
|
clearer => 'clear_nodes_count' |
|
175
|
|
|
|
|
|
|
); |
|
176
|
|
|
|
|
|
|
|
|
177
|
|
|
|
|
|
|
=head3 partition |
|
178
|
|
|
|
|
|
|
|
|
179
|
|
|
|
|
|
|
Specify the partition. |
|
180
|
|
|
|
|
|
|
|
|
181
|
|
|
|
|
|
|
In PBS this is called 'queue' |
|
182
|
|
|
|
|
|
|
|
|
183
|
|
|
|
|
|
|
=cut |
|
184
|
|
|
|
|
|
|
|
|
185
|
|
|
|
|
|
|
option 'partition' => ( |
|
186
|
|
|
|
|
|
|
is => 'rw', |
|
187
|
|
|
|
|
|
|
isa => 'Str', |
|
188
|
|
|
|
|
|
|
required => 0, |
|
189
|
|
|
|
|
|
|
documentation => q{Slurm partition to submit jobs to.}, |
|
190
|
|
|
|
|
|
|
predicate => 'has_partition', |
|
191
|
|
|
|
|
|
|
clearer => 'clear_partition' |
|
192
|
|
|
|
|
|
|
); |
|
193
|
|
|
|
|
|
|
|
|
194
|
|
|
|
|
|
|
=head3 walltime |
|
195
|
|
|
|
|
|
|
|
|
196
|
|
|
|
|
|
|
Define scheduler walltime |
|
197
|
|
|
|
|
|
|
|
|
198
|
|
|
|
|
|
|
=cut |
|
199
|
|
|
|
|
|
|
|
|
200
|
|
|
|
|
|
|
option 'walltime' => ( |
|
201
|
|
|
|
|
|
|
is => 'rw', |
|
202
|
|
|
|
|
|
|
isa => 'Str', |
|
203
|
|
|
|
|
|
|
required => 0, |
|
204
|
|
|
|
|
|
|
default => '00:20:00', |
|
205
|
|
|
|
|
|
|
predicate => 'has_walltime', |
|
206
|
|
|
|
|
|
|
clearer => 'clear_walltime,' |
|
207
|
|
|
|
|
|
|
); |
|
208
|
|
|
|
|
|
|
|
|
209
|
|
|
|
|
|
|
=head2 mem |
|
210
|
|
|
|
|
|
|
|
|
211
|
|
|
|
|
|
|
=cut |
|
212
|
|
|
|
|
|
|
|
|
213
|
|
|
|
|
|
|
option 'mem' => ( |
|
214
|
|
|
|
|
|
|
is => 'rw', |
|
215
|
|
|
|
|
|
|
isa => 'Str|Undef', |
|
216
|
|
|
|
|
|
|
predicate => 'has_mem', |
|
217
|
|
|
|
|
|
|
clearer => 'clear_mem', |
|
218
|
|
|
|
|
|
|
required => 0, |
|
219
|
|
|
|
|
|
|
default => '10GB', |
|
220
|
|
|
|
|
|
|
documentation => q{Supply a memory limit}, |
|
221
|
|
|
|
|
|
|
); |
|
222
|
|
|
|
|
|
|
|
|
223
|
|
|
|
|
|
|
=head3 user |
|
224
|
|
|
|
|
|
|
|
|
225
|
|
|
|
|
|
|
user running the script. Passed to slurm for mail information |
|
226
|
|
|
|
|
|
|
|
|
227
|
|
|
|
|
|
|
=cut |
|
228
|
|
|
|
|
|
|
|
|
229
|
|
|
|
|
|
|
option 'user' => ( |
|
230
|
|
|
|
|
|
|
is => 'rw', |
|
231
|
|
|
|
|
|
|
isa => 'Str', |
|
232
|
|
|
|
|
|
|
default => sub { return $ENV{USER} || $ENV{LOGNAME} || getpwuid($<); }, |
|
233
|
|
|
|
|
|
|
required => 1, |
|
234
|
|
|
|
|
|
|
documentation => |
|
235
|
|
|
|
|
|
|
q{This defaults to your current user ID. This can only be changed if running as an admin user} |
|
236
|
|
|
|
|
|
|
); |
|
237
|
|
|
|
|
|
|
|
|
238
|
|
|
|
|
|
|
=head3 procs |
|
239
|
|
|
|
|
|
|
|
|
240
|
|
|
|
|
|
|
Total number of concurrent running tasks. |
|
241
|
|
|
|
|
|
|
|
|
242
|
|
|
|
|
|
|
Analagous to parallel --jobs i |
|
243
|
|
|
|
|
|
|
|
|
244
|
|
|
|
|
|
|
=cut |
|
245
|
|
|
|
|
|
|
|
|
246
|
|
|
|
|
|
|
option 'procs' => ( |
|
247
|
|
|
|
|
|
|
is => 'rw', |
|
248
|
|
|
|
|
|
|
isa => 'Int', |
|
249
|
|
|
|
|
|
|
default => 1, |
|
250
|
|
|
|
|
|
|
required => 0, |
|
251
|
|
|
|
|
|
|
documentation => |
|
252
|
|
|
|
|
|
|
q{Total number of concurrently running jobs allowed at any time.}, |
|
253
|
|
|
|
|
|
|
trigger => sub { |
|
254
|
|
|
|
|
|
|
my $self = shift; |
|
255
|
|
|
|
|
|
|
$self->ntasks_per_node( $self->procs ); |
|
256
|
|
|
|
|
|
|
} |
|
257
|
|
|
|
|
|
|
); |
|
258
|
|
|
|
|
|
|
|
|
259
|
|
|
|
|
|
|
=head3 template_file |
|
260
|
|
|
|
|
|
|
|
|
261
|
|
|
|
|
|
|
actual template file |
|
262
|
|
|
|
|
|
|
|
|
263
|
|
|
|
|
|
|
One is generated here for you, but you can always supply your own with |
|
264
|
|
|
|
|
|
|
--template_file /path/to/template |
|
265
|
|
|
|
|
|
|
|
|
266
|
|
|
|
|
|
|
Default is for SLURM |
|
267
|
|
|
|
|
|
|
|
|
268
|
|
|
|
|
|
|
=cut |
|
269
|
|
|
|
|
|
|
|
|
270
|
|
|
|
|
|
|
has 'template_file' => ( |
|
271
|
|
|
|
|
|
|
is => 'rw', |
|
272
|
|
|
|
|
|
|
isa => 'Str', |
|
273
|
|
|
|
|
|
|
default => sub { |
|
274
|
|
|
|
|
|
|
my $self = shift; |
|
275
|
|
|
|
|
|
|
|
|
276
|
|
|
|
|
|
|
my ( $fh, $filename ) = tempfile(); |
|
277
|
|
|
|
|
|
|
|
|
278
|
|
|
|
|
|
|
my $tt = <<EOF; |
|
279
|
|
|
|
|
|
|
#!/usr/bin/env bash |
|
280
|
|
|
|
|
|
|
# |
|
281
|
|
|
|
|
|
|
#SBATCH --share |
|
282
|
|
|
|
|
|
|
#SBATCH --job-name=[% JOBNAME %] |
|
283
|
|
|
|
|
|
|
#SBATCH --output=[% OUT %] |
|
284
|
|
|
|
|
|
|
[% IF job.has_account %] |
|
285
|
|
|
|
|
|
|
#SBATCH --account=[% job.account %] |
|
286
|
|
|
|
|
|
|
[% END %] |
|
287
|
|
|
|
|
|
|
[% IF job.has_partition %] |
|
288
|
|
|
|
|
|
|
#SBATCH --partition=[% job.partition %] |
|
289
|
|
|
|
|
|
|
[% END %] |
|
290
|
|
|
|
|
|
|
[% IF job.has_nodes_count %] |
|
291
|
|
|
|
|
|
|
#SBATCH --nodes=[% job.nodes_count %] |
|
292
|
|
|
|
|
|
|
[% END %] |
|
293
|
|
|
|
|
|
|
[% IF job.has_ntasks %] |
|
294
|
|
|
|
|
|
|
#SBATCH --ntasks=[% job.ntasks %] |
|
295
|
|
|
|
|
|
|
[% END %] |
|
296
|
|
|
|
|
|
|
[% IF job.has_cpus_per_task %] |
|
297
|
|
|
|
|
|
|
#SBATCH --cpus-per-task=[% job.cpus_per_task %] |
|
298
|
|
|
|
|
|
|
[% END %] |
|
299
|
|
|
|
|
|
|
[% IF job.has_ntasks_per_node %] |
|
300
|
|
|
|
|
|
|
#SBATCH --ntasks-per-node=[% job.ntasks_per_node %] |
|
301
|
|
|
|
|
|
|
[% END %] |
|
302
|
|
|
|
|
|
|
[% IF job.has_mem %] |
|
303
|
|
|
|
|
|
|
#SBATCH --mem=[% job.mem %] |
|
304
|
|
|
|
|
|
|
[% END %] |
|
305
|
|
|
|
|
|
|
[% IF job.has_walltime %] |
|
306
|
|
|
|
|
|
|
#SBATCH --time=[% job.walltime %] |
|
307
|
|
|
|
|
|
|
[% END %] |
|
308
|
|
|
|
|
|
|
[% IF ARRAY_STR %] |
|
309
|
|
|
|
|
|
|
#SBATCH --array=[% ARRAY_STR %] |
|
310
|
|
|
|
|
|
|
[% END %] |
|
311
|
|
|
|
|
|
|
[% IF AFTEROK %] |
|
312
|
|
|
|
|
|
|
#SBATCH --dependency=afterok:[% AFTEROK %] |
|
313
|
|
|
|
|
|
|
[% END %] |
|
314
|
|
|
|
|
|
|
|
|
315
|
|
|
|
|
|
|
[% IF MODULES %] |
|
316
|
|
|
|
|
|
|
module load [% MODULES %] |
|
317
|
|
|
|
|
|
|
[% END %] |
|
318
|
|
|
|
|
|
|
|
|
319
|
|
|
|
|
|
|
[% IF job.has_conda_env %] |
|
320
|
|
|
|
|
|
|
source activate [% job.conda_env %] |
|
321
|
|
|
|
|
|
|
[% END %] |
|
322
|
|
|
|
|
|
|
|
|
323
|
|
|
|
|
|
|
[% COMMAND %] |
|
324
|
|
|
|
|
|
|
|
|
325
|
|
|
|
|
|
|
EOF |
|
326
|
|
|
|
|
|
|
|
|
327
|
|
|
|
|
|
|
print $fh $tt; |
|
328
|
|
|
|
|
|
|
return $filename; |
|
329
|
|
|
|
|
|
|
}, |
|
330
|
|
|
|
|
|
|
predicate => 'has_template_file', |
|
331
|
|
|
|
|
|
|
clearer => 'clear_template_file', |
|
332
|
|
|
|
|
|
|
documentation => |
|
333
|
|
|
|
|
|
|
q{Path to Scheduler template file if you do not wish to use the default.} |
|
334
|
|
|
|
|
|
|
); |
|
335
|
|
|
|
|
|
|
|
|
336
|
|
|
|
|
|
|
|
|
337
|
|
|
|
|
|
|
|
|
338
|
|
|
|
|
|
|
1; |