line |
stmt |
bran |
cond |
sub |
pod |
time |
code |
1
|
|
|
|
|
|
|
package HPC::Runner::Command::submit_jobs::Utils::Scheduler::Directives; |
2
|
|
|
|
|
|
|
|
3
|
1
|
|
|
1
|
|
870
|
use MooseX::App::Role; |
|
1
|
|
|
|
|
3
|
|
|
1
|
|
|
|
|
14
|
|
4
|
1
|
|
|
1
|
|
10620
|
use BioSAILs::Utils::Traits qw(ArrayRefOfStrs); |
|
1
|
|
|
|
|
4
|
|
|
1
|
|
|
|
|
15
|
|
5
|
1
|
|
|
1
|
|
1261
|
use namespace::autoclean; |
|
1
|
|
|
|
|
3
|
|
|
1
|
|
|
|
|
12
|
|
6
|
|
|
|
|
|
|
|
7
|
|
|
|
|
|
|
=head1 HPC::Runner::Command::submit_jobs::Utils::Scheduler::Directives |
8
|
|
|
|
|
|
|
|
9
|
|
|
|
|
|
|
=cut |
10
|
|
|
|
|
|
|
|
11
|
|
|
|
|
|
|
=head2 Command Line Options |
12
|
|
|
|
|
|
|
|
13
|
|
|
|
|
|
|
#TODO Move this over to docs |
14
|
|
|
|
|
|
|
|
15
|
|
|
|
|
|
|
=cut |
16
|
|
|
|
|
|
|
|
17
|
|
|
|
|
|
|
=head3 module |
18
|
|
|
|
|
|
|
|
19
|
|
|
|
|
|
|
modules to load with slurm |
20
|
|
|
|
|
|
|
Should use the same names used in 'module load' |
21
|
|
|
|
|
|
|
|
22
|
|
|
|
|
|
|
Example. R2 becomes 'module load R2' |
23
|
|
|
|
|
|
|
|
24
|
|
|
|
|
|
|
=cut |
25
|
|
|
|
|
|
|
|
26
|
|
|
|
|
|
|
option 'module' => ( |
27
|
|
|
|
|
|
|
traits => ['Array'], |
28
|
|
|
|
|
|
|
is => 'rw', |
29
|
|
|
|
|
|
|
isa => ArrayRefOfStrs, |
30
|
|
|
|
|
|
|
coerce => 1, |
31
|
|
|
|
|
|
|
required => 0, |
32
|
|
|
|
|
|
|
documentation => q{List of modules to load ex. R2, samtools, etc}, |
33
|
|
|
|
|
|
|
default => sub { [] }, |
34
|
|
|
|
|
|
|
cmd_split => qr/,/, |
35
|
|
|
|
|
|
|
handles => { |
36
|
|
|
|
|
|
|
has_modules => 'count', |
37
|
|
|
|
|
|
|
all_modules => 'elements', |
38
|
|
|
|
|
|
|
join_modules => 'join', |
39
|
|
|
|
|
|
|
}, |
40
|
|
|
|
|
|
|
); |
41
|
|
|
|
|
|
|
|
42
|
|
|
|
|
|
|
=head3 conda_env |
43
|
|
|
|
|
|
|
|
44
|
|
|
|
|
|
|
Anaconda envs to load |
45
|
|
|
|
|
|
|
|
46
|
|
|
|
|
|
|
Load anaconda envs with |
47
|
|
|
|
|
|
|
|
48
|
|
|
|
|
|
|
source activate /path/to/my/env |
49
|
|
|
|
|
|
|
|
50
|
|
|
|
|
|
|
=cut |
51
|
|
|
|
|
|
|
|
52
|
|
|
|
|
|
|
option 'conda_env' => ( |
53
|
|
|
|
|
|
|
is => 'rw', |
54
|
|
|
|
|
|
|
isa => 'Str', |
55
|
|
|
|
|
|
|
required => 0, |
56
|
|
|
|
|
|
|
documentation => q{Conda env to activate.}, |
57
|
|
|
|
|
|
|
predicate => 'has_conda_env', |
58
|
|
|
|
|
|
|
); |
59
|
|
|
|
|
|
|
|
60
|
|
|
|
|
|
|
=head3 cpus_per_task |
61
|
|
|
|
|
|
|
|
62
|
|
|
|
|
|
|
slurm item --cpus_per_task defaults to 1 |
63
|
|
|
|
|
|
|
|
64
|
|
|
|
|
|
|
=cut |
65
|
|
|
|
|
|
|
|
66
|
|
|
|
|
|
|
option 'cpus_per_task' => ( |
67
|
|
|
|
|
|
|
is => 'rw', |
68
|
|
|
|
|
|
|
isa => 'Int', |
69
|
|
|
|
|
|
|
required => 0, |
70
|
|
|
|
|
|
|
default => 4, |
71
|
|
|
|
|
|
|
predicate => 'has_cpus_per_task', |
72
|
|
|
|
|
|
|
clearer => 'clear_cpus_per_task', |
73
|
|
|
|
|
|
|
documentation => '--cpus-per-task switch in slurm' |
74
|
|
|
|
|
|
|
); |
75
|
|
|
|
|
|
|
|
76
|
|
|
|
|
|
|
=head3 ntasks |
77
|
|
|
|
|
|
|
|
78
|
|
|
|
|
|
|
slurm item --ntasks defaults to 1 |
79
|
|
|
|
|
|
|
|
80
|
|
|
|
|
|
|
=cut |
81
|
|
|
|
|
|
|
|
82
|
|
|
|
|
|
|
option 'ntasks' => ( |
83
|
|
|
|
|
|
|
is => 'rw', |
84
|
|
|
|
|
|
|
isa => 'Int', |
85
|
|
|
|
|
|
|
required => 0, |
86
|
|
|
|
|
|
|
default => 1, |
87
|
|
|
|
|
|
|
predicate => 'has_ntasks', |
88
|
|
|
|
|
|
|
clearer => 'clear_ntasks', |
89
|
|
|
|
|
|
|
documentation => |
90
|
|
|
|
|
|
|
'--ntasks switch in slurm. This is equal to the number of concurrent tasks on each node * the number of nodes, not the total number of tasks' |
91
|
|
|
|
|
|
|
); |
92
|
|
|
|
|
|
|
|
93
|
|
|
|
|
|
|
=head3 account |
94
|
|
|
|
|
|
|
|
95
|
|
|
|
|
|
|
slurm item --account defaults to 1 |
96
|
|
|
|
|
|
|
|
97
|
|
|
|
|
|
|
=cut |
98
|
|
|
|
|
|
|
|
99
|
|
|
|
|
|
|
option 'account' => ( |
100
|
|
|
|
|
|
|
is => 'rw', |
101
|
|
|
|
|
|
|
isa => 'Str', |
102
|
|
|
|
|
|
|
required => 0, |
103
|
|
|
|
|
|
|
predicate => 'has_account', |
104
|
|
|
|
|
|
|
clearer => 'clear_account', |
105
|
|
|
|
|
|
|
documentation => '--account switch in slurm. ' |
106
|
|
|
|
|
|
|
); |
107
|
|
|
|
|
|
|
|
108
|
|
|
|
|
|
|
=head3 account-per-node |
109
|
|
|
|
|
|
|
|
110
|
|
|
|
|
|
|
slurm item --ntasks-per-node defaults to 28 |
111
|
|
|
|
|
|
|
|
112
|
|
|
|
|
|
|
=cut |
113
|
|
|
|
|
|
|
|
114
|
|
|
|
|
|
|
option 'ntasks_per_node' => ( |
115
|
|
|
|
|
|
|
is => 'rw', |
116
|
|
|
|
|
|
|
isa => 'Int', |
117
|
|
|
|
|
|
|
required => 0, |
118
|
|
|
|
|
|
|
default => 1, |
119
|
|
|
|
|
|
|
trigger => sub { |
120
|
|
|
|
|
|
|
my $self = shift; |
121
|
|
|
|
|
|
|
my $ntasks = $self->ntasks_per_node * $self->nodes_count; |
122
|
|
|
|
|
|
|
$self->ntasks($ntasks); |
123
|
|
|
|
|
|
|
}, |
124
|
|
|
|
|
|
|
predicate => 'has_ntasks_per_node', |
125
|
|
|
|
|
|
|
clearer => 'clear_ntasks_per_node', |
126
|
|
|
|
|
|
|
documentation => |
127
|
|
|
|
|
|
|
'--ntasks-per-node switch in slurm. total concurrent tasks on a node.' |
128
|
|
|
|
|
|
|
); |
129
|
|
|
|
|
|
|
|
130
|
|
|
|
|
|
|
=head3 commands_per_node |
131
|
|
|
|
|
|
|
|
132
|
|
|
|
|
|
|
commands to run per node |
133
|
|
|
|
|
|
|
|
134
|
|
|
|
|
|
|
=cut |
135
|
|
|
|
|
|
|
|
136
|
|
|
|
|
|
|
#TODO Update this for job arrays |
137
|
|
|
|
|
|
|
|
138
|
|
|
|
|
|
|
option 'commands_per_node' => ( |
139
|
|
|
|
|
|
|
is => 'rw', |
140
|
|
|
|
|
|
|
isa => 'Int', |
141
|
|
|
|
|
|
|
required => 0, |
142
|
|
|
|
|
|
|
default => 1, |
143
|
|
|
|
|
|
|
documentation => |
144
|
|
|
|
|
|
|
q{Commands to run on each node. If you have a low number of jobs you can submit at a time you want this number much higher. }, |
145
|
|
|
|
|
|
|
predicate => 'has_commands_per_node', |
146
|
|
|
|
|
|
|
clearer => 'clear_commands_per_node' |
147
|
|
|
|
|
|
|
); |
148
|
|
|
|
|
|
|
|
149
|
|
|
|
|
|
|
=head3 nodes_count |
150
|
|
|
|
|
|
|
|
151
|
|
|
|
|
|
|
Number of nodes to use on a job. This is only useful for mpi jobs. |
152
|
|
|
|
|
|
|
|
153
|
|
|
|
|
|
|
PBS: |
154
|
|
|
|
|
|
|
#PBS -l nodes=nodes_count:ppn=16 this |
155
|
|
|
|
|
|
|
|
156
|
|
|
|
|
|
|
Slurm: |
157
|
|
|
|
|
|
|
#SBATCH --nodes=nodes_count |
158
|
|
|
|
|
|
|
|
159
|
|
|
|
|
|
|
=cut |
160
|
|
|
|
|
|
|
|
161
|
|
|
|
|
|
|
option 'nodes_count' => ( |
162
|
|
|
|
|
|
|
is => 'rw', |
163
|
|
|
|
|
|
|
isa => 'Int', |
164
|
|
|
|
|
|
|
required => 0, |
165
|
|
|
|
|
|
|
default => 1, |
166
|
|
|
|
|
|
|
documentation => |
167
|
|
|
|
|
|
|
q{Number of nodes requested. You should only use this if submitting parallel jobs.}, |
168
|
|
|
|
|
|
|
predicate => 'has_nodes_count', |
169
|
|
|
|
|
|
|
clearer => 'clear_nodes_count' |
170
|
|
|
|
|
|
|
); |
171
|
|
|
|
|
|
|
|
172
|
|
|
|
|
|
|
=head3 partition |
173
|
|
|
|
|
|
|
|
174
|
|
|
|
|
|
|
Specify the partition. Defaults to the partition that has the most nodes. |
175
|
|
|
|
|
|
|
|
176
|
|
|
|
|
|
|
In PBS this is called 'queue' |
177
|
|
|
|
|
|
|
|
178
|
|
|
|
|
|
|
=cut |
179
|
|
|
|
|
|
|
|
180
|
|
|
|
|
|
|
option 'partition' => ( |
181
|
|
|
|
|
|
|
is => 'rw', |
182
|
|
|
|
|
|
|
isa => 'Str', |
183
|
|
|
|
|
|
|
required => 0, |
184
|
|
|
|
|
|
|
default => 'serial', |
185
|
|
|
|
|
|
|
documentation => q{Slurm partition to submit jobs to.}, |
186
|
|
|
|
|
|
|
predicate => 'has_partition', |
187
|
|
|
|
|
|
|
clearer => 'clear_partition' |
188
|
|
|
|
|
|
|
); |
189
|
|
|
|
|
|
|
|
190
|
|
|
|
|
|
|
=head3 walltime |
191
|
|
|
|
|
|
|
|
192
|
|
|
|
|
|
|
Define scheduler walltime |
193
|
|
|
|
|
|
|
|
194
|
|
|
|
|
|
|
=cut |
195
|
|
|
|
|
|
|
|
196
|
|
|
|
|
|
|
option 'walltime' => ( |
197
|
|
|
|
|
|
|
is => 'rw', |
198
|
|
|
|
|
|
|
isa => 'Str', |
199
|
|
|
|
|
|
|
required => 0, |
200
|
|
|
|
|
|
|
default => '00:20:00', |
201
|
|
|
|
|
|
|
predicate => 'has_walltime', |
202
|
|
|
|
|
|
|
clearer => 'clear_walltime,' |
203
|
|
|
|
|
|
|
); |
204
|
|
|
|
|
|
|
|
205
|
|
|
|
|
|
|
=head2 mem |
206
|
|
|
|
|
|
|
|
207
|
|
|
|
|
|
|
=cut |
208
|
|
|
|
|
|
|
|
209
|
|
|
|
|
|
|
option 'mem' => ( |
210
|
|
|
|
|
|
|
is => 'rw', |
211
|
|
|
|
|
|
|
isa => 'Str|Undef', |
212
|
|
|
|
|
|
|
predicate => 'has_mem', |
213
|
|
|
|
|
|
|
clearer => 'clear_mem', |
214
|
|
|
|
|
|
|
required => 0, |
215
|
|
|
|
|
|
|
default => '10GB', |
216
|
|
|
|
|
|
|
documentation => q{Supply a memory limit}, |
217
|
|
|
|
|
|
|
); |
218
|
|
|
|
|
|
|
|
219
|
|
|
|
|
|
|
=head3 user |
220
|
|
|
|
|
|
|
|
221
|
|
|
|
|
|
|
user running the script. Passed to slurm for mail information |
222
|
|
|
|
|
|
|
|
223
|
|
|
|
|
|
|
=cut |
224
|
|
|
|
|
|
|
|
225
|
|
|
|
|
|
|
option 'user' => ( |
226
|
|
|
|
|
|
|
is => 'rw', |
227
|
|
|
|
|
|
|
isa => 'Str', |
228
|
|
|
|
|
|
|
default => sub { return $ENV{USER} || $ENV{LOGNAME} || getpwuid($<); }, |
229
|
|
|
|
|
|
|
required => 1, |
230
|
|
|
|
|
|
|
documentation => |
231
|
|
|
|
|
|
|
q{This defaults to your current user ID. This can only be changed if running as an admin user} |
232
|
|
|
|
|
|
|
); |
233
|
|
|
|
|
|
|
|
234
|
|
|
|
|
|
|
=head3 procs |
235
|
|
|
|
|
|
|
|
236
|
|
|
|
|
|
|
Total number of concurrent running tasks. |
237
|
|
|
|
|
|
|
|
238
|
|
|
|
|
|
|
Analagous to parallel --jobs i |
239
|
|
|
|
|
|
|
|
240
|
|
|
|
|
|
|
=cut |
241
|
|
|
|
|
|
|
|
242
|
|
|
|
|
|
|
option 'procs' => ( |
243
|
|
|
|
|
|
|
is => 'rw', |
244
|
|
|
|
|
|
|
isa => 'Int', |
245
|
|
|
|
|
|
|
default => 1, |
246
|
|
|
|
|
|
|
required => 0, |
247
|
|
|
|
|
|
|
documentation => |
248
|
|
|
|
|
|
|
q{Total number of concurrently running jobs allowed at any time.}, |
249
|
|
|
|
|
|
|
trigger => sub { |
250
|
|
|
|
|
|
|
my $self = shift; |
251
|
|
|
|
|
|
|
$self->ntasks_per_node( $self->procs ); |
252
|
|
|
|
|
|
|
} |
253
|
|
|
|
|
|
|
); |
254
|
|
|
|
|
|
|
|
255
|
|
|
|
|
|
|
1; |