line |
stmt |
bran |
cond |
sub |
pod |
time |
code |
1
|
|
|
|
|
|
|
package DBA::Backup::mysql; |
2
|
|
|
|
|
|
|
|
3
|
1
|
|
|
1
|
|
25037
|
use 5.006001; |
|
1
|
|
|
|
|
14
|
|
|
1
|
|
|
|
|
42
|
|
4
|
1
|
|
|
1
|
|
6
|
use strict; |
|
1
|
|
|
|
|
2
|
|
|
1
|
|
|
|
|
35
|
|
5
|
1
|
|
|
1
|
|
6
|
use warnings; |
|
1
|
|
|
|
|
7
|
|
|
1
|
|
|
|
|
46
|
|
6
|
1
|
|
|
1
|
|
1095
|
use Compress::Zlib; # replace shelling to gzip? |
|
1
|
|
|
|
|
71820
|
|
|
1
|
|
|
|
|
337
|
|
7
|
1
|
|
|
1
|
|
554
|
use DBA::Backup; |
|
0
|
|
|
|
|
|
|
|
0
|
|
|
|
|
|
|
8
|
|
|
|
|
|
|
|
9
|
|
|
|
|
|
|
our $VERSION = '0.2_1'; |
10
|
|
|
|
|
|
|
|
11
|
|
|
|
|
|
|
=head1 |
12
|
|
|
|
|
|
|
|
13
|
|
|
|
|
|
|
NOTICE! This is currently a broken partial port from the origal working |
14
|
|
|
|
|
|
|
MySQL specific module. I hope to have the port finished and a functional |
15
|
|
|
|
|
|
|
version uploaded soon. Email me or the list for more information. |
16
|
|
|
|
|
|
|
|
17
|
|
|
|
|
|
|
The mailing list for the DBA modules is perl-dba@fini.net. See |
18
|
|
|
|
|
|
|
http://lists.fini.net/mailman/listinfo/perl-dba to subscribe. |
19
|
|
|
|
|
|
|
|
20
|
|
|
|
|
|
|
=begin dev |
21
|
|
|
|
|
|
|
|
22
|
|
|
|
|
|
|
Required methods (can be void if appropriate): |
23
|
|
|
|
|
|
|
* _flush_logs # so all logs are current |
24
|
|
|
|
|
|
|
_rotate_logs # rotate specified log types |
25
|
|
|
|
|
|
|
_dump_databases # as server optimized SQL file (locking option) |
26
|
|
|
|
|
|
|
_stop_server |
27
|
|
|
|
|
|
|
_start_server |
28
|
|
|
|
|
|
|
_lock_database |
29
|
|
|
|
|
|
|
|
30
|
|
|
|
|
|
|
|
31
|
|
|
|
|
|
|
=end dev |
32
|
|
|
|
|
|
|
|
33
|
|
|
|
|
|
|
=cut |
34
|
|
|
|
|
|
|
|
35
|
|
|
|
|
|
|
=head1 mysql_flush_logs |
36
|
|
|
|
|
|
|
|
37
|
|
|
|
|
|
|
Uses mysqladmin refresh to flush all logs and tables. |
38
|
|
|
|
|
|
|
|
39
|
|
|
|
|
|
|
=cut |
40
|
|
|
|
|
|
|
sub mysql_flush_logs { |
41
|
|
|
|
|
|
|
my $self = shift; |
42
|
|
|
|
|
|
|
my $HR_conf = shift; |
43
|
|
|
|
|
|
|
|
44
|
|
|
|
|
|
|
# prepare the flush command |
45
|
|
|
|
|
|
|
my $cmd_path = $HR_conf->{mysqladmin}{path}; |
46
|
|
|
|
|
|
|
my $username = $HR_conf->{connect}{USER}; |
47
|
|
|
|
|
|
|
my $password = $HR_conf->{connect}{PASSWORD}; |
48
|
|
|
|
|
|
|
my $db_host = $HR_conf->{connect}{RDBMS_HOST}; |
49
|
|
|
|
|
|
|
my $socket = $HR_conf->{connect}{SOCKET}; |
50
|
|
|
|
|
|
|
my $cmd = "$cmd_path -u$username -p$password --host=$db_host " |
51
|
|
|
|
|
|
|
. "--socket=$socket "; |
52
|
|
|
|
|
|
|
$cmd .= join(' ', map { "--$_" } |
53
|
|
|
|
|
|
|
@{$HR_conf->{mysqladmin}{options}}); |
54
|
|
|
|
|
|
|
$cmd .= " refresh"; |
55
|
|
|
|
|
|
|
|
56
|
|
|
|
|
|
|
$self->_debug("flushing logs with $cmd"); |
57
|
|
|
|
|
|
|
# execute the flush command |
58
|
|
|
|
|
|
|
if (-x $cmd_path) { |
59
|
|
|
|
|
|
|
my $rc = system($cmd); |
60
|
|
|
|
|
|
|
$cmd =~ s/$password/xxxxxx/; |
61
|
|
|
|
|
|
|
$self->_error("$rc recieved on $cmd") if $rc; |
62
|
|
|
|
|
|
|
print $self->{LOG} "Completed: $cmd"; |
63
|
|
|
|
|
|
|
} # if we're allowed to execute |
64
|
|
|
|
|
|
|
else { |
65
|
|
|
|
|
|
|
$self->_error("mysqladmin is not executable."); |
66
|
|
|
|
|
|
|
} # else bitch |
67
|
|
|
|
|
|
|
|
68
|
|
|
|
|
|
|
return 0; |
69
|
|
|
|
|
|
|
} # mysql_flush_logs |
70
|
|
|
|
|
|
|
|
71
|
|
|
|
|
|
|
=head1 mysql_rotate_logs |
72
|
|
|
|
|
|
|
|
73
|
|
|
|
|
|
|
Rotates the binary update, error and any extra mysql logs specified in the |
74
|
|
|
|
|
|
|
conf file. Rotation of binary and error logs is not optional on runs when the |
75
|
|
|
|
|
|
|
databases get backed up. Error and binary logs are kept as incrementals. |
76
|
|
|
|
|
|
|
Other logs are just appended, and are cleared and restarted once over a certain |
77
|
|
|
|
|
|
|
size (as defined in conf). |
78
|
|
|
|
|
|
|
|
79
|
|
|
|
|
|
|
=cut |
80
|
|
|
|
|
|
|
sub mysql_rotate_logs { |
81
|
|
|
|
|
|
|
my $self = shift; |
82
|
|
|
|
|
|
|
my $HR_conf = shift; |
83
|
|
|
|
|
|
|
|
84
|
|
|
|
|
|
|
# check the list of currently running mysql queries |
85
|
|
|
|
|
|
|
print $self->{LOG} "\n\n*Current processlist*\n"; |
86
|
|
|
|
|
|
|
$self->_debug("_get_process_list",2); |
87
|
|
|
|
|
|
|
print $self->{LOG} $self->_get_process_list($HR_conf); |
88
|
|
|
|
|
|
|
|
89
|
|
|
|
|
|
|
my $base_log_dir = $HR_conf->{backup_params}{LOG_DIR}; |
90
|
|
|
|
|
|
|
my $hostname = $HR_conf->{connect}{RDBMS_HOST} eq 'localhost' |
91
|
|
|
|
|
|
|
? $self->{HOSTNAME} : $HR_conf->{connect}{RDBMS_HOST}; |
92
|
|
|
|
|
|
|
|
93
|
|
|
|
|
|
|
print $self->{LOG} "\n\n*Rotating logs*\n"; |
94
|
|
|
|
|
|
|
$self->_debug("_rotate_general_query_log?"); |
95
|
|
|
|
|
|
|
$self->_rotate_log("General query", |
96
|
|
|
|
|
|
|
"$base_log_dir/$hostname .log", |
97
|
|
|
|
|
|
|
$HR_conf->{backup_params}{MAX_GEN_LOG_SIZE}, |
98
|
|
|
|
|
|
|
$HR_conf->{backup_params}{MAX_GEN_LOG_FILES}) |
99
|
|
|
|
|
|
|
if $HR_conf->{backup_params}{ROTATE_GEN_QUERY_LOGS} =~ /yes/i; |
100
|
|
|
|
|
|
|
|
101
|
|
|
|
|
|
|
$self->_debug("_rotate_slow_query_log?"); |
102
|
|
|
|
|
|
|
$self->_rotate_slow_query_log() |
103
|
|
|
|
|
|
|
if $HR_conf->{backup_params}{ROTATE_SLOW_QUERY_LOGS} =~ /yes/i; |
104
|
|
|
|
|
|
|
|
105
|
|
|
|
|
|
|
$self->_debug("_cycle_bin_logs?"); |
106
|
|
|
|
|
|
|
my $cur_day = substr(localtime,0,3); |
107
|
|
|
|
|
|
|
if (($self->{backup_params}{CYCLE_BIN_LOGS_DAILY} =~ /^yes$/i ) |
108
|
|
|
|
|
|
|
or (grep(/$cur_day/i, @{$HR_conf->{days}}))) { |
109
|
|
|
|
|
|
|
$self->_cycle_bin_logs(); |
110
|
|
|
|
|
|
|
} # if bin logs backed up daily or today is a full dump day |
111
|
|
|
|
|
|
|
|
112
|
|
|
|
|
|
|
|
113
|
|
|
|
|
|
|
$self->_debug("_rotate_error_log?"); |
114
|
|
|
|
|
|
|
$self->_rotate_error_log() |
115
|
|
|
|
|
|
|
if $HR_conf->{backup_params}{ROTATE_ERROR_LOGS} =~ /yes/i; |
116
|
|
|
|
|
|
|
|
117
|
|
|
|
|
|
|
return 0; |
118
|
|
|
|
|
|
|
} # mysql_rotate_logs |
119
|
|
|
|
|
|
|
|
120
|
|
|
|
|
|
|
|
121
|
|
|
|
|
|
|
### |
122
|
|
|
|
|
|
|
### Internal functions |
123
|
|
|
|
|
|
|
### |
124
|
|
|
|
|
|
|
|
125
|
|
|
|
|
|
|
|
126
|
|
|
|
|
|
|
## _get_process_list() ## |
127
|
|
|
|
|
|
|
# |
128
|
|
|
|
|
|
|
# Returns a list of all mysql processes running currently on the server. |
129
|
|
|
|
|
|
|
# |
130
|
|
|
|
|
|
|
# Gets the processlist from dbms and print it to the LOG the fields are |
131
|
|
|
|
|
|
|
# as follows: |
132
|
|
|
|
|
|
|
# Id User Host db Command Time State Info |
133
|
|
|
|
|
|
|
# |
134
|
|
|
|
|
|
|
# The assumption is that these fields will not change. It's hard to make |
135
|
|
|
|
|
|
|
# a dynamic script because LISTFIELDS works only on tables, and retrieval |
136
|
|
|
|
|
|
|
# to a hash does not preserve the order of the fields. |
137
|
|
|
|
|
|
|
# |
138
|
|
|
|
|
|
|
sub _get_process_list { |
139
|
|
|
|
|
|
|
my $self = shift; |
140
|
|
|
|
|
|
|
my $HR_conf = shift; |
141
|
|
|
|
|
|
|
|
142
|
|
|
|
|
|
|
# prepare the flush command |
143
|
|
|
|
|
|
|
my $cmd_path = $HR_conf->{mysqladmin}{path}; |
144
|
|
|
|
|
|
|
my $username = $HR_conf->{connect}{USER}; |
145
|
|
|
|
|
|
|
my $password = $HR_conf->{connect}{PASSWORD}; |
146
|
|
|
|
|
|
|
my $db_host = $HR_conf->{connect}{RDBMS_HOST}; |
147
|
|
|
|
|
|
|
my $socket = $HR_conf->{connect}{SOCKET}; |
148
|
|
|
|
|
|
|
my $cmd = "$cmd_path -u$username -p$password --host=$db_host " |
149
|
|
|
|
|
|
|
. "--socket=$socket "; |
150
|
|
|
|
|
|
|
$cmd .= join(' ', map { "--$_" } |
151
|
|
|
|
|
|
|
@{$HR_conf->{mysqladmin}{options}}); |
152
|
|
|
|
|
|
|
$cmd .= " flush-logs flush-hosts"; |
153
|
|
|
|
|
|
|
|
154
|
|
|
|
|
|
|
$self->_debug("Getting process list with $cmd"); |
155
|
|
|
|
|
|
|
# execute the flush command |
156
|
|
|
|
|
|
|
my $mesg = ''; |
157
|
|
|
|
|
|
|
if (-x $cmd_path) { |
158
|
|
|
|
|
|
|
$mesg = `$cmd`; |
159
|
|
|
|
|
|
|
$cmd =~ s/$password/xxxxxx/; |
160
|
|
|
|
|
|
|
$self->_error("$rc recieved on $cmd") if $?; |
161
|
|
|
|
|
|
|
$self->_debug("Completed: $cmd"); |
162
|
|
|
|
|
|
|
} # if we're allowed to execute |
163
|
|
|
|
|
|
|
else { |
164
|
|
|
|
|
|
|
$self->_error("mysqladmin is not executable."); |
165
|
|
|
|
|
|
|
} # else bitch |
166
|
|
|
|
|
|
|
|
167
|
|
|
|
|
|
|
return $mesg; |
168
|
|
|
|
|
|
|
} # end _get_process_list() |
169
|
|
|
|
|
|
|
|
170
|
|
|
|
|
|
|
|
171
|
|
|
|
|
|
|
|
172
|
|
|
|
|
|
|
|
173
|
|
|
|
|
|
|
|
174
|
|
|
|
|
|
|
### |
175
|
|
|
|
|
|
|
### unprocessed original subs |
176
|
|
|
|
|
|
|
### |
177
|
|
|
|
|
|
|
|
178
|
|
|
|
|
|
|
|
179
|
|
|
|
|
|
|
sub _rotate_log { |
180
|
|
|
|
|
|
|
my $self = shift; |
181
|
|
|
|
|
|
|
|
182
|
|
|
|
|
|
|
my $logname = shift; |
183
|
|
|
|
|
|
|
my $log_file = shift; |
184
|
|
|
|
|
|
|
my $max_log_size = shift; |
185
|
|
|
|
|
|
|
my $max_log_count = shift; |
186
|
|
|
|
|
|
|
|
187
|
|
|
|
|
|
|
# test if file exists |
188
|
|
|
|
|
|
|
unless (-f $log_file) { |
189
|
|
|
|
|
|
|
print $self->{LOG} "$logname log doesn't exist\n"; |
190
|
|
|
|
|
|
|
return; |
191
|
|
|
|
|
|
|
} # only rotate if it exists |
192
|
|
|
|
|
|
|
print $self->{LOG} "$logname log is $log_file\n"; |
193
|
|
|
|
|
|
|
|
194
|
|
|
|
|
|
|
# test if file is larger than max log size |
195
|
|
|
|
|
|
|
unless (-s $log_file > ($max_log_size*1024*1024) ) { |
196
|
|
|
|
|
|
|
print $self->{LOG} "$logname log did not need rotating\n"; |
197
|
|
|
|
|
|
|
return; |
198
|
|
|
|
|
|
|
} # rotate log if larger than max log size |
199
|
|
|
|
|
|
|
|
200
|
|
|
|
|
|
|
# rename all of the old logs, keep only as many of them as set by the |
201
|
|
|
|
|
|
|
# config parameter |
202
|
|
|
|
|
|
|
for (my $i = $max_log_count - 2; $i >= 0; $i--) { |
203
|
|
|
|
|
|
|
my $j = $i + 1; |
204
|
|
|
|
|
|
|
if (-f "$log_file.$i") { |
205
|
|
|
|
|
|
|
print $self->{LOG} "Renaming $log_file.$i to $log_file.$j\n"; |
206
|
|
|
|
|
|
|
rename("$log_file.$i", "$log_file.$j"); |
207
|
|
|
|
|
|
|
} # if log at iteration exists, move down |
208
|
|
|
|
|
|
|
} # for each possible log iteration |
209
|
|
|
|
|
|
|
|
210
|
|
|
|
|
|
|
# rename the current log to .0 |
211
|
|
|
|
|
|
|
rename($log_file, "$log_file.0"); |
212
|
|
|
|
|
|
|
|
213
|
|
|
|
|
|
|
# done |
214
|
|
|
|
|
|
|
print $self->{LOG} "$logname log rotated\n"; |
215
|
|
|
|
|
|
|
|
216
|
|
|
|
|
|
|
} # end _rotate_generic_log() |
217
|
|
|
|
|
|
|
|
218
|
|
|
|
|
|
|
sub _rotate_slow_query_log { |
219
|
|
|
|
|
|
|
my $self = shift; |
220
|
|
|
|
|
|
|
|
221
|
|
|
|
|
|
|
$self->_rotate_generic_log("Slow query", |
222
|
|
|
|
|
|
|
$self->{backup_params}{ROTATE_SLOW_QUERY_LOGS}, |
223
|
|
|
|
|
|
|
$self->{backup_params}{LOG_DIR} . '/' |
224
|
|
|
|
|
|
|
. $self->{db_connect}{HOSTNAME} . '.log', |
225
|
|
|
|
|
|
|
$self->{backup_params}{MAX_SLOW_LOG_SIZE}, |
226
|
|
|
|
|
|
|
$self->{backup_params}{MAX_SLOW_LOG_FILES}); |
227
|
|
|
|
|
|
|
} # end _rotate_slow_query_log() |
228
|
|
|
|
|
|
|
|
229
|
|
|
|
|
|
|
=head1 _rotate_error_log() |
230
|
|
|
|
|
|
|
|
231
|
|
|
|
|
|
|
The mysql error logs don't operate the same way as the other logs. |
232
|
|
|
|
|
|
|
As of mysql 4.0.10, every flush-logs command will cause the error log |
233
|
|
|
|
|
|
|
to rotate to a file with an "-old" suffix attached. This is |
234
|
|
|
|
|
|
|
regardless of the file's size. Mysql shutdown/startup will *not* |
235
|
|
|
|
|
|
|
rotate the error log to the -old file. Any previous -old file |
236
|
|
|
|
|
|
|
is deleted. |
237
|
|
|
|
|
|
|
|
238
|
|
|
|
|
|
|
This function attempts to restore some sanity to how mysql treats |
239
|
|
|
|
|
|
|
the error log. Call this function after the flush-logs command. |
240
|
|
|
|
|
|
|
We will take new -old file and append it to the end of our own file, |
241
|
|
|
|
|
|
|
(different name) and delete the -old file. We'll then call the usual |
242
|
|
|
|
|
|
|
_rotate_generic_log function on it. |
243
|
|
|
|
|
|
|
|
244
|
|
|
|
|
|
|
=cut |
245
|
|
|
|
|
|
|
|
246
|
|
|
|
|
|
|
sub _rotate_error_log { |
247
|
|
|
|
|
|
|
my $self = shift; |
248
|
|
|
|
|
|
|
|
249
|
|
|
|
|
|
|
my $log_dir = $self->{backup_params}{LOG_DIR}; |
250
|
|
|
|
|
|
|
my $hostname = $self->{db_connect}{HOSTNAME}; |
251
|
|
|
|
|
|
|
my $log_in = $log_dir . '/' . $hostname . '.err-old'; |
252
|
|
|
|
|
|
|
my $log_out = $log_dir . '/' . $hostname . '-error.log'; |
253
|
|
|
|
|
|
|
print $self->{LOG} "\n"; |
254
|
|
|
|
|
|
|
|
255
|
|
|
|
|
|
|
# test if file exists |
256
|
|
|
|
|
|
|
unless (-f $log_in) { |
257
|
|
|
|
|
|
|
print $self->{LOG} "mysqld old error log ($log_in) doesn't exist\n"; |
258
|
|
|
|
|
|
|
return; |
259
|
|
|
|
|
|
|
} # if old err log doesn't exist |
260
|
|
|
|
|
|
|
print $self->{LOG} "mysqld old error log is $log_in\n"; |
261
|
|
|
|
|
|
|
print $self->{LOG} "... merging into cumulative error log $log_out\n"; |
262
|
|
|
|
|
|
|
|
263
|
|
|
|
|
|
|
# merge mysql droppings into our own log file |
264
|
|
|
|
|
|
|
open(INFILE,$log_in) or $self->_error("Problem reading $log_in: $!"); |
265
|
|
|
|
|
|
|
open(OUTFILE,">>$log_out") or $self->_error("Problem appending $log_out: $!"); |
266
|
|
|
|
|
|
|
while () { print OUTFILE $_; } |
267
|
|
|
|
|
|
|
close OUTFILE or $self->_gripe("$!"); |
268
|
|
|
|
|
|
|
close INFILE or $self->_gripe("$!"); |
269
|
|
|
|
|
|
|
unlink($log_in); |
270
|
|
|
|
|
|
|
|
271
|
|
|
|
|
|
|
# perform usual log rotation on merged file |
272
|
|
|
|
|
|
|
_rotate_generic_log("Cumulative error", |
273
|
|
|
|
|
|
|
$self->{backup_params}{ROTATE_ERROR_LOGS}, |
274
|
|
|
|
|
|
|
$log_dir . '/' . $hostname . '.log', |
275
|
|
|
|
|
|
|
$self->{backup_params}{MAX_ERROR_LOG_SIZE}, |
276
|
|
|
|
|
|
|
$self->{backup_params}{MAX_ERROR_LOG_FILES}); |
277
|
|
|
|
|
|
|
} # end _rotate_error_logs() |
278
|
|
|
|
|
|
|
|
279
|
|
|
|
|
|
|
|
280
|
|
|
|
|
|
|
=head1 _cycle_bin_logs() |
281
|
|
|
|
|
|
|
|
282
|
|
|
|
|
|
|
Issues command to mysqld to finish writing to the current binary |
283
|
|
|
|
|
|
|
update log and start writing to a new one. We then push all of |
284
|
|
|
|
|
|
|
the bin-logs (except for the newest one) into [dump_dir]/00/. |
285
|
|
|
|
|
|
|
|
286
|
|
|
|
|
|
|
The flush logs command causes mysqld to close the old (already renamed) |
287
|
|
|
|
|
|
|
general query and slow query logs and reopen the logs of the usual |
288
|
|
|
|
|
|
|
file name. It also causes mysqld to flush the binary update log and |
289
|
|
|
|
|
|
|
begin writing to a new binlog file. It does not affect the error |
290
|
|
|
|
|
|
|
log, only a restart of mysqld will start a new error log. |
291
|
|
|
|
|
|
|
|
292
|
|
|
|
|
|
|
The flush hosts command will clean up the hosts cache. |
293
|
|
|
|
|
|
|
|
294
|
|
|
|
|
|
|
=cut |
295
|
|
|
|
|
|
|
|
296
|
|
|
|
|
|
|
sub _cycle_bin_logs { |
297
|
|
|
|
|
|
|
my $self = shift; |
298
|
|
|
|
|
|
|
|
299
|
|
|
|
|
|
|
my ($hostname) = $self->{db_connect}{HOSTNAME} =~ m/^([^\.]+)/; |
300
|
|
|
|
|
|
|
my $data_dir = $self->{backup_params}{LOG_DIR}; |
301
|
|
|
|
|
|
|
my $dump_dir = $self->{backup_params}{DUMP_DIR} . '/00'; |
302
|
|
|
|
|
|
|
|
303
|
|
|
|
|
|
|
# get a list of all existing binlog files to back up |
304
|
|
|
|
|
|
|
opendir(DIR, $data_dir) |
305
|
|
|
|
|
|
|
or $self->_error("Cannot open directory where bin log files reside\n"); |
306
|
|
|
|
|
|
|
my @binlog_files = grep { /$hostname\-bin\.\d+/ } readdir(DIR); |
307
|
|
|
|
|
|
|
closedir(DIR); |
308
|
|
|
|
|
|
|
$self->_debug("Found @binlog_files in $data_dir"); |
309
|
|
|
|
|
|
|
|
310
|
|
|
|
|
|
|
# back up the binary update logs |
311
|
|
|
|
|
|
|
$self->_debug('backing up bin log'); |
312
|
|
|
|
|
|
|
print $self->{LOG} "\nBacking up binary update logs\n"; |
313
|
|
|
|
|
|
|
print $self->{LOG} "Moving binlogs from $data_dir/ to $dump_dir/ ...\n"; |
314
|
|
|
|
|
|
|
foreach my $file (@binlog_files) { |
315
|
|
|
|
|
|
|
my $rc = File::Copy::move("$data_dir/$file", "$dump_dir/$file"); |
316
|
|
|
|
|
|
|
if ($rc) { |
317
|
|
|
|
|
|
|
print $self->{LOG} "... moved $file\n"; |
318
|
|
|
|
|
|
|
} # if move succesful |
319
|
|
|
|
|
|
|
else { |
320
|
|
|
|
|
|
|
$self->_error("Can't move the binary log file $file - $!($rc)\n"); |
321
|
|
|
|
|
|
|
} # else die |
322
|
|
|
|
|
|
|
} # foreach bin log |
323
|
|
|
|
|
|
|
print $self->{LOG} "Backed up " . int(@binlog_files) . " binary update logs\n"; |
324
|
|
|
|
|
|
|
|
325
|
|
|
|
|
|
|
} # end _cycle_bin_logs() |
326
|
|
|
|
|
|
|
|
327
|
|
|
|
|
|
|
|
328
|
|
|
|
|
|
|
=head1 _backup_databases() |
329
|
|
|
|
|
|
|
|
330
|
|
|
|
|
|
|
Backup all databases on the server DBMS which are mentioned |
331
|
|
|
|
|
|
|
explicitly or as a pattern in the [included-databases] section |
332
|
|
|
|
|
|
|
in the config file. |
333
|
|
|
|
|
|
|
|
334
|
|
|
|
|
|
|
This function will dump all specified databases to .sql.gz files |
335
|
|
|
|
|
|
|
in the directory [dump_dir]/new/. If there were no errors during |
336
|
|
|
|
|
|
|
backup, _rotate_dump_dirs will then rename it [dump_dir]/00/. |
337
|
|
|
|
|
|
|
|
338
|
|
|
|
|
|
|
If this function encounters errors during backup, the partial dumps |
339
|
|
|
|
|
|
|
to [dump_dir]/new/ will remain until the next time this function is |
340
|
|
|
|
|
|
|
executed. At that time, the contents of [dump_dir]/new/ will be |
341
|
|
|
|
|
|
|
destroyed and new dumps will be placed there. |
342
|
|
|
|
|
|
|
|
343
|
|
|
|
|
|
|
At no time are binary update logs ever placed in [dump_dir]/new/. |
344
|
|
|
|
|
|
|
|
345
|
|
|
|
|
|
|
Return with the number of errors encountered during backup. |
346
|
|
|
|
|
|
|
|
347
|
|
|
|
|
|
|
=cut |
348
|
|
|
|
|
|
|
|
349
|
|
|
|
|
|
|
sub _backup_databases { |
350
|
|
|
|
|
|
|
my $self = shift; |
351
|
|
|
|
|
|
|
|
352
|
|
|
|
|
|
|
my $dump_dir = $self->{backup_params}{DUMP_DIR} . '/new'; |
353
|
|
|
|
|
|
|
my $backup_errors = 0; |
354
|
|
|
|
|
|
|
|
355
|
|
|
|
|
|
|
# create the new/ dump_dir, but delete it if it already exists |
356
|
|
|
|
|
|
|
if (-d $dump_dir) { |
357
|
|
|
|
|
|
|
print $self->{LOG} "Partial/failed dumps in $dump_dir exist, deleting...\n"; |
358
|
|
|
|
|
|
|
eval { File::Path::rmtree($dump_dir) }; |
359
|
|
|
|
|
|
|
$self->_error("Cannot delete $dump_dir - $@\n") if ($@); |
360
|
|
|
|
|
|
|
$self->_error("$dump_dir deleted, but still exists!\n") if (-d $dump_dir); |
361
|
|
|
|
|
|
|
} # if directory exists |
362
|
|
|
|
|
|
|
$self->_debug('_test_create_dirs'); |
363
|
|
|
|
|
|
|
$self->_test_create_dirs($dump_dir); |
364
|
|
|
|
|
|
|
|
365
|
|
|
|
|
|
|
# dump a .sql.gz file for each database into the dump_dir |
366
|
|
|
|
|
|
|
foreach my $database ( @{$self->{backup}{databases}} ) { |
367
|
|
|
|
|
|
|
$self->_debug("Backing up $database"); |
368
|
|
|
|
|
|
|
|
369
|
|
|
|
|
|
|
# get the date, parsed into its parts |
370
|
|
|
|
|
|
|
my ($sec, $min, $hour, $mday, $mon, $year, $wday, $yday, $isdst) |
371
|
|
|
|
|
|
|
= localtime(); |
372
|
|
|
|
|
|
|
$year += 1900; $mon += 1; |
373
|
|
|
|
|
|
|
|
374
|
|
|
|
|
|
|
# build part of the output file name from the date parts |
375
|
|
|
|
|
|
|
my $date_spec = $year . sprintf("%02d%02d%02d%02d", $mon, $mday, |
376
|
|
|
|
|
|
|
$hour, $min); |
377
|
|
|
|
|
|
|
my $time_stamp = sprintf("%04d-%02d-%02d %02d:%02d:%02d", |
378
|
|
|
|
|
|
|
$year,$mon,$mday,$hour,$min,$sec); |
379
|
|
|
|
|
|
|
my $db_host = $self->{db_connect}{RDBMS_HOST}; |
380
|
|
|
|
|
|
|
my $hostname = $self->{db_connect}{HOSTNAME}; |
381
|
|
|
|
|
|
|
(my $short_hostname) = $hostname =~ s/^([^\.]+).*$/$1/; |
382
|
|
|
|
|
|
|
my $dump_file = $date_spec .'_'. $short_hostname .'_'. $database |
383
|
|
|
|
|
|
|
. '.sql.gz'; |
384
|
|
|
|
|
|
|
|
385
|
|
|
|
|
|
|
print $self->{LOG} "[$time_stamp] Dumping $database to $dump_file\n"; |
386
|
|
|
|
|
|
|
|
387
|
|
|
|
|
|
|
# build the dump command line in steps |
388
|
|
|
|
|
|
|
my $gzip = $self->{bin_dir}{gzip}; |
389
|
|
|
|
|
|
|
my $mysqldump = $self->{bin_dir}{mysqldump}; |
390
|
|
|
|
|
|
|
my $username = $self->{db_connect}{USER}; |
391
|
|
|
|
|
|
|
my $password = $self->{db_connect}{PASSWORD}; |
392
|
|
|
|
|
|
|
my $socket = $self->{db_connect}{SOCKET}; |
393
|
|
|
|
|
|
|
|
394
|
|
|
|
|
|
|
my $cmd = "$mysqldump -u$username -p$password --host=$db_host " |
395
|
|
|
|
|
|
|
. "--socket=$socket"; |
396
|
|
|
|
|
|
|
$cmd .= join(' ', |
397
|
|
|
|
|
|
|
map { "--$_" } @{$self->{mysqldump}{options}}); |
398
|
|
|
|
|
|
|
$cmd .= "$database | $gzip -9 > $dump_dir/$dump_file"; |
399
|
|
|
|
|
|
|
|
400
|
|
|
|
|
|
|
# make sure that the database backup went fine |
401
|
|
|
|
|
|
|
$self->_debug("Dumping with $cmd"); |
402
|
|
|
|
|
|
|
my $rc = system($cmd); |
403
|
|
|
|
|
|
|
if ($rc) { |
404
|
|
|
|
|
|
|
$cmd =~ s/ -p$password / -pPASSWORD_HIDDEN /; |
405
|
|
|
|
|
|
|
print $self->{LOG} 'An error occured while backing up database ' |
406
|
|
|
|
|
|
|
. "$database - $rc - '$cmd'\n"; |
407
|
|
|
|
|
|
|
$backup_errors++; |
408
|
|
|
|
|
|
|
} # if there was an error executing command |
409
|
|
|
|
|
|
|
|
410
|
|
|
|
|
|
|
} # foreach $database |
411
|
|
|
|
|
|
|
|
412
|
|
|
|
|
|
|
# print timestamp one more time when it's all done |
413
|
|
|
|
|
|
|
my ($sec,$min,$hour,$mday,$mon,$year,$wday,$yday,$isdst) = localtime; |
414
|
|
|
|
|
|
|
$year += 1900; $mon += 1; |
415
|
|
|
|
|
|
|
my $time_stamp = sprintf("%04d-%02d-%02d %02d:%02d:%02d", |
416
|
|
|
|
|
|
|
$year,$mon,$mday,$hour,$min,$sec); |
417
|
|
|
|
|
|
|
print $self->{LOG} "[$time_stamp] Compressed dumps to $dump_dir/ " |
418
|
|
|
|
|
|
|
. "completed with $backup_errors errors\n"; |
419
|
|
|
|
|
|
|
|
420
|
|
|
|
|
|
|
return $backup_errors; |
421
|
|
|
|
|
|
|
} # end _backup_databases() |
422
|
|
|
|
|
|
|
|
423
|
|
|
|
|
|
|
|
424
|
|
|
|
|
|
|
|
425
|
|
|
|
|
|
|
binmode STDOUT; # gzopen only sets it on the fd |
426
|
|
|
|
|
|
|
|
427
|
|
|
|
|
|
|
my $gz = gzopen(\*STDOUT, "wb") |
428
|
|
|
|
|
|
|
or die "Cannot open stdout: $gzerrno\n" ; |
429
|
|
|
|
|
|
|
|
430
|
|
|
|
|
|
|
while (<>) { |
431
|
|
|
|
|
|
|
$gz->gzwrite($_) |
432
|
|
|
|
|
|
|
or die "error writing: $gzerrno\n" ; |
433
|
|
|
|
|
|
|
} |
434
|
|
|
|
|
|
|
|
435
|
|
|
|
|
|
|
$gz->gzclose ; |
436
|
|
|
|
|
|
|
#my $zipped = Compress::Zlib::memGzip($dumped_db); |
437
|
|
|
|
|
|
|
|
438
|
|
|
|
|
|
|
1; |
439
|
|
|
|
|
|
|
__END__ |