forked from omniti-labs/zetaback
-
Notifications
You must be signed in to change notification settings - Fork 0
/
zetaback.in
executable file
·1747 lines (1509 loc) · 54.4 KB
/
zetaback.in
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
#!/usr/bin/perl
# vim: sts=2 sw=2 ts=8 et
# Copyright (c) 2007 OmniTI Computer Consulting, Inc. All rights reserved.
# For information on licensing see:
# https://labs.omniti.com/zetaback/trunk/LICENSE
use strict;
use Getopt::Long;
use MIME::Base64;
use POSIX qw/strftime/;
use Fcntl qw/:flock/;
use File::Path qw/mkpath/;
use File::Copy;
use IO::File;
use Pod::Usage;
use vars qw/%conf %locks $version_string $process_lock
$PREFIX $CONF $BLOCKSIZE $DEBUG $HOST $BACKUP
$RESTORE $RESTORE_HOST $RESTORE_ZFS $TIMESTAMP
$LIST $SUMMARY $SUMMARY_EXT $SUMMARY_VIOLATORS
$SUMMARY_VIOLATORS_VERBOSE $FORCE_FULL $FORCE_INC
$EXPUNGE $NEUTERED $ZFS $SHOW_FILENAMES $ARCHIVE
$VERSION $HELP/;
$version_string = '1.0.6';
$PREFIX = q^__PREFIX__^;
$CONF = qq^$PREFIX/etc/zetaback.conf^;
$BLOCKSIZE = 1024*64;
$conf{'default'}->{'time_format'} = "%Y-%m-%d %H:%M:%S";
$conf{'default'}->{'retention'} = 14 * 86400;
$conf{'default'}->{'compressionlevel'} = 1;
$conf{'default'}->{'dataset_backup'} = 0;
$conf{'default'}->{'violator_grace_period'} = 21600;
=pod
=head1 NAME
zetaback - perform backup, restore and retention policies for ZFS backups.
=head1 SYNOPSIS
zetaback -v
zetaback [-l|-s|-sx|-sv|-svv] [--files] [-c conf] [-d] [-h host] [-z zfs]
zetaback -a [-c conf] [-d] [-h host] [-z zfs]
zetaback -b [-ff] [-fi] [-x] [-c conf] [-d] [-n] [-h host] [-z zfs]
zetaback -x [-b] [-c conf] [-d] [-n] [-h host] [-z zfs]
zetaback -r [-c conf] [-d] [-n] [-h host] [-z zfs] [-t timestamp]
[-rhost host] [-rzfs fs]
=cut
GetOptions(
"h=s" => \$HOST,
"z=s" => \$ZFS,
"c=s" => \$CONF,
"a" => \$ARCHIVE,
"b" => \$BACKUP,
"l" => \$LIST,
"s" => \$SUMMARY,
"sx" => \$SUMMARY_EXT,
"sv" => \$SUMMARY_VIOLATORS,
"svv" => \$SUMMARY_VIOLATORS_VERBOSE,
"r" => \$RESTORE,
"t=i" => \$TIMESTAMP,
"rhost=s" => \$RESTORE_HOST,
"rzfs=s" => \$RESTORE_ZFS,
"d" => \$DEBUG,
"n" => \$NEUTERED,
"x" => \$EXPUNGE,
"v" => \$VERSION,
"ff" => \$FORCE_FULL,
"fi" => \$FORCE_INC,
"files" => \$SHOW_FILENAMES,
);
# actions allowed together 'x' and 'b' all others are exclusive:
my $actions = 0;
$actions++ if($ARCHIVE);
$actions++ if($BACKUP || $EXPUNGE);
$actions++ if($RESTORE);
$actions++ if($LIST);
$actions++ if($SUMMARY);
$actions++ if($SUMMARY_EXT);
$actions++ if($SUMMARY_VIOLATORS);
$actions++ if($SUMMARY_VIOLATORS_VERBOSE);
$actions++ if($VERSION);
$actions++ if($BACKUP && $FORCE_FULL && $FORCE_INC);
if($actions != 1) {
pod2usage({ -verbose => 0 });
exit -1;
}
=pod
=head1 DESCRIPTION
The B<zetaback> program orchestrates the backup (either full or
incremental) of remote ZFS filesystems to a local store. It handles
frequency requirements for both full and incemental backups as well
as retention policies. In addition to backups, the B<zetaback> tool
allows for the restore of any backup to a specified host and zfs
filesystem.
=head1 OPTIONS
The non-optional action command line arguments define the invocation purpose
of B<zetaback>. All other arguments are optional and refine the target
of the action specified.
=head2 Generic Options
The following arguments have the same meaning over several actions:
=over
=item -c <conf>
Use the specified file as the configuration file. The default file, if
none is specified is /usr/local/etc/zetaback.conf. The prefix of this
file may also be specified as an argument to the configure script.
=item -d
Enable debugging output.
=item -n
Don't actually perform any remote commands or expunging. This is useful with
the -d argument to ascertain what would be done if the command was actually
executed.
=item -t <timestamp>
Used during the restore process to specify a backup image from the desired
point in time. If omitted, the command becomes interactive. This timestamp
is a UNIX timestamp and is shown in the output of the -s and -sx actions.
=item -rhost <host>
Specify the remote host that is the target for a restore operation. If
omitted the command becomes interactive.
=item -rzfs <zfs>
Specify the remote ZFS filesystem that is the target for a restore
operation. If omitted the command becomes interactive.
=item -h <host>
Filters the operation to the host specified. If <host> is of the form
/pattern/, it matches 'pattern' as a perl regular expression against available
hosts. If omitted, no limit is enforced and all hosts are used for the action.
=item -z <zfs>
Filters the operation to the zfs filesystem specified. If <zfs> is of the
form /pattern/, it matches 'pattern' as a perl regular expression against
available zfs filesystems. If omitted, no filter is enforced and all zfs
filesystems are used for the action.
=back
=head2 Actions
=over
=item -v
Show the version.
=item -l
Show a brief listing of available backups.
=item -s
Like -l, -s will show a list of backups but provides additional information
about the backups including timestamp, type (full or incremental) and the
size on disk.
=item -sx
Shows an extended summary. In addition to the output provided by the -s
action, the -sx action will show detail for each availble backup. For
full backups, the detail will include any more recent full backups, if
they exist. For incremental backups, the detail will include any
incremental backups that are more recent than the last full backup.
=item -sv
Display all backups in the current store that violate the configured
backup policy. This is where the most recent full backup is older than
full_interval seconds ago, or the most recent incremental backup is older
than backup_interval seconds ago.
If, at the time of the most recent backup, a filesystem no longer exists on
the server (because it was deleted), then backups of this filesystem are not
included in the list of violators. To include these filesystems, use the -svv
option instead.
=item -svv
The violators summary will exclude backups of filesystems that are no longer
on the server in the list of violators. Use this option to include those
filesystems.
=item --files
Display the on-disk file corresponding to each backup named in the output.
This is useful with the -sv flag to name violating files. Often times,
violators are filesystems that have been removed on the host machines and
zetaback can no longer back them up. Be very careful if you choose to
automate the removal of such backups as filesystems that would be backed up
by the next regular zetaback run will often show up as violators.
=item -a
Performs an archive. This option will look at all eligible backup points
(as restricted by -z and -h) and move those to the configured archive
directory. The recommended use is to first issue -sx --files then
carefully review available backup points and prune those that are
unneeded. Then invoke with -a to move only the remaining "desired"
backup points into the archives. Archived backups do not appear in any
listings or in the list of policy violators generated by the -sv option.
In effect, they are no longer "visible" to zetaback.
=item -b
Performs a backup. This option will investigate all eligible hosts, query
the available filesystems from the remote agent and determine if any such
filesystems require a new full or incremental backup to be taken. This
option may be combined with the -x option (to clean up afterwards.)
=item -ff
Forces a full backup to be taken on each filesystem encountered. This is
used in combination with -b. It is recommended to use this option only when
targeting specific filesystems (via the -h and -z options.) Forcing a full
backup across all machines will cause staggered backups to coalesce and
could cause performance issues.
=item -fi
Forces an incremental backup to be taken on each filesystem encountered.
This is used in combination with -b. It is recommended to use this option
only when targeting specific filesystems (via the -h and -z options.) Forcing
an incremental backup across all machines will cause staggered backups
to coalesce and could cause performance issues.
=item -x
Perform an expunge. This option will determine which, if any, of the local
backups may be deleted given the retention policy specified in the
configuration.
=item -r
Perform a restore. This option will operate on the specified backup and
restore it to the ZFS filesystem specified with -rzfs on the host specified
with the -rhost option. The -h, -z and -t options may be used to filter
the source backup list. If the filtered list contains more than one
source backup image, the command will act interactively. If the -rhost
and -rzfs command are not specified, the command will act interactively.
When running interactively, you can choose multiple filesystems from the list
using ranges. For example 1-4,5,10-11. If you do this, zetaback will enter
multi-restore mode. In this mode it will automatically select the most recent
backup, and restore filessytems in bulk.
In multi-restore mode, you have the option to specify a base filesystem to
restore to. This filesystem will be added as a prefix to the original
filesystem name, so if you picked a prefix of data/restore, and one of the
filesystems you are restoring is called data/set/myfilesystem, then the
filesystem will be restored to data/restore/data/set/myfilesystem.
Note that, just like in regular restore mode, zetaback won't create
intermediate filesystems for you when restoring, and these should either exist
beforehand, or you should make sure you pick a set of filesystems that will
restore the entire tree for you, for example, you should restore data as well
as data/set before restoring data/set/foo.
=back
=cut
if($VERSION) {
print "zetaback: $version_string\n";
exit 0;
}
=pod
=head1 CONFIGURATION
The zetaback configuration file consists of a default stanza, containing
settings that can be overridden on a per-host basis. A stanza begins
either with the string 'default', or a fully-qualified hostname, with
settings enclosed in braces ({}). Single-line comments begin with a hash
('#'), and whitespace is ignored, so feel free to indent for better
readability. Every host to be backed up must have a host stanza in the
configuration file.
=head2 Storage Classes
In addition to the default and host stanzas, the configuration file can also
contain 'class' stanzas. Classes allow you to override settings on a
per-filesystem basis rather than a per-host basis. A class stanza begins with
the name of the class, and has a setting 'type = class'. For example:
myclass {
type = class
store = /path/to/alternate/store
}
To add a filesystem to a class, set a zfs user property on the relevant
filesystem. This must be done on the server that runs the zetaback agent, and
not the zetaback server itself.
zfs set com.omniti.labs.zetaback:class=myclass pool/fs
Note that user properties (and therefore classes) are are only available on
Solaris 10 8/07 and newer, and on Solaris Express build 48 and newer. Only the
server running the agent needs to have user property support, not the zetaback
server itself.
The following settings can be included in a class stanza. All other settings
will be ignored, and their default (or per host) settings used instead:
=over
=item *
store
=item *
full_interval
=item *
backup_interval
=item *
retention
=item *
dataset_backup
=item *
violator_grace_period
=back
=head2 Settings
The following settings are valid in both the default and host scopes:
=over
=item store
The base directory under which to keep backups. An interpolated variable
'%h' can be used, which expands to the hostname. There is no default for
this setting.
=item archive
The base directory under which archives are stored. The format is the same
as the store setting. This is the destination to which files are relocated
when issuing an archive action (-a).
=item agent
The location of the zetaback_agent binary on the host. There is no default
for this setting.
=item time_format
All timestamps within zetaback are in UNIX timestamp format. This setting
provides a string for formatting all timestamps on output. The sequences
available are identical to those in strftime(3). If not specified, the
default is '%Y-%m-%d %H:%M:%S'.
=item backup_interval
The frequency (in seconds) at which to perform incremental backups. An
incremental backup will be performed if the current time is more than
backup_interval since the last incremental backup. If there is no full backup
for a particular filesystem, then a full backup is performed. There is no
default for this setting.
=item full_interval
The frequency (in seconds) at which to perform full backups. A full backup will
be performed if the current time is more than full_interval since the last full
backup.
=item retention
The retention time (in seconds) for backups. This can be a simple number, in
which case all backups older than this will be expunged.
The retention specification can also be more complex, and consist of pairs of
values separated by a comma. The first value is a time period in seconds, and
the second value is how many backups should be retained within that period.
For example:
retention = 3600,4;86400,11
This will keep up to 4 backups for the first hour, and an additional 11
backups over 24 hours. The times do not stack. In other words, the 11 backups
would be kept during the period from 1 hour old to 24 hours old, or one every
2 hours.
Any backups older than the largest time given are deleted. In the above
example, all backups older than 24 hours are deleted.
If a second number is not specified, then all backups are kept within that
period.
Note: Full backups are never deleted if they are depended upon by an
incremental. In addition, the most recent backup is never deleted, regardless
of how old it is.
This value defaults to (14 * 86400), or two weeks.
=item compressionlevel
Compress files using gzip at the specified compression level. 0 means no
compression. Accepted values are 1-9. Defaults to 1 (fastest/minimal
compression.)
=item ssh_config
Full path to an alternate ssh client config. This is useful for specifying a
less secure but faster cipher for some hosts, or using a different private
key. There is no default for this setting.
=item dataset_backup
By default zetaback backs zfs filesystems up to files. This option lets you
specify that the backup go be stored as a zfs dataset on the backup host.
=item offline
Setting this option to 1 for a host will mark it as being 'offline'. Hosts
that are marked offline will not be backed up, will not have any old backups
expunged and will not be included in the list of policy violators. However,
the host will still be shown when listing backups and archiving.
=item violator_grace_period
This setting controls the grace period used when deciding if a backup has
violated its backup window. It is used to prevent false positives in the case
where a filesystem is still being backed up. For example, if it is 25 hours
since the last daily backup, but the daily backup is in progress, the grace
period will mean that it is not shown in the violators list.
Like all intervals, this period is in seconds. The default is 21600 seconds (6
hours).
=back
=head2 Global Settings
The following settings are only valid in the default scope:
=over
=item process_limit
This setting limits the number of concurrent zetaback processes that can run
at one time. Zetaback already has locks on hosts and datasets to prevent
conflicting backups, and this allows you to have multiple zetaback instances
running in the event a backup takes some time to complete, while still keeping
a limit on the resources used. If this configuration entry is missing, then no
limiting will occur.
=back
=head1 CONFIGURATION EXAMPLES
=head2 Uniform hosts
This config results in backups stored in /var/spool/zfs_backups, with a
subdirectory for each host. Incremental backups will be performed
approximately once per day, assuming zetaback is run hourly. Full backups
will be done once per week. Time format and retention are default.
default {
store = /var/spool/zfs_backups/%h
agent = /usr/local/bin/zetaback_agent
backup_interval = 83000
full_interval = 604800
}
host1 {}
host2 {}
=head2 Non-uniform hosts
Here, host1's and host2's agents are found in different places, and host2's
backups should be stored in a different path.
default {
store = /var/spool/zfs_backups/%h
agent = /usr/local/bin/zetaback_agent
backup_interval = 83000
full_interval = 604800
}
host1 {
agent = /opt/local/bin/zetaback_agent
}
host2 {
store = /var/spool/alt_backups/%h
agent = /www/bin/zetaback_agent
}
=cut
# Make the parser more formal:
# config => stanza*
# stanza => string { kvp* }
# kvp => string = string
my $str_re = qr/(?:"(?:\\\\|\\"|[^"])*"|\S+)/;
my $kvp_re = qr/($str_re)\s*=\s*($str_re)/;
my $stanza_re = qr/($str_re)\s*\{((?:\s*$kvp_re)*)\s*\}/;
sub parse_config() {
local($/);
$/ = undef;
open(CONF, "<$CONF") || die "Unable to open config file: $CONF";
my $file = <CONF>;
# Rip comments
$file =~ s/^\s*#.*$//mg;
while($file =~ m/$stanza_re/gm) {
my $scope = $1;
my $filepart = $2;
$scope =~ s/^"(.*)"$/$1/;
$conf{$scope} ||= {};
while($filepart =~ m/$kvp_re/gm) {
my $key = $1;
my $value = $2;
$key =~ s/^"(.*)"$/$1/;
$value =~ s/^"(.*)"$/$1/;
$conf{$scope}->{lc($key)} = $value;
}
}
close(CONF);
}
sub config_get($$;$) {
# Params: host, key, class
# Order of precedence: class, host, default
if ($_[2]) {
return $conf{$_[2]}->{$_[1]} || $conf{$_[0]}->{$_[1]} ||
$conf{'default'}->{$_[1]};
} else {
return $conf{$_[0]}->{$_[1]} || $conf{'default'}->{$_[1]};
}
}
sub get_store($;$) {
my ($host, $class) = @_;
my $store = config_get($host, 'store', $class);
$store =~ s/%h/$host/g;;
return $store;
}
sub get_classes() {
my @classes = (""); # The default/blank class is always present
foreach my $key (keys %conf) {
if ($conf{$key}->{'type'} eq 'class') {
push @classes, $key;
}
}
return @classes;
}
sub fs_encode($) {
my $d = shift;
my @parts = split('@', $d);
my $e = encode_base64($parts[0], '');
$e =~ s/\//_/g;
$e =~ s/=/-/g;
$e =~ s/\+/\./g;
if (exists $parts[1]) {
$e .= "\@$parts[1]";
}
return $e;
}
sub fs_decode($) {
my $e = shift;
$e =~ s/_/\//g;
$e =~ s/-/=/g;
$e =~ s/\./\+/g;
return decode_base64($e);
}
sub dir_encode($) {
my $d = shift;
my $e = encode_base64($d, '');
$e =~ s/\//_/;
return $e;
}
sub dir_decode($) {
my $e = shift;
$e =~ s/_/\//;
return decode_base64($e);
}
sub pretty_size($) {
my $bytes = shift;
if($bytes > 1024*1024*1024) {
return sprintf("%0.2f Gb", $bytes / (1024*1024*1024));
}
if($bytes > 1024*1024) {
return sprintf("%0.2f Mb", $bytes / (1024*1024));
}
if($bytes > 1024) {
return sprintf("%0.2f Kb", $bytes / (1024));
}
return "$bytes b";
}
sub lock($;$$) {
my ($host, $file, $nowait) = @_;
print "Acquiring lock for $host:$file\n" if($DEBUG);
$file ||= 'master.lock';
my $store = get_store($host); # Don't take classes into account - not needed
mkpath($store) if(! -d $store);
return 1 if(exists($locks{"$host:$file"}));
open(LOCK, "+>>$store/$file") || return 0;
unless(flock(LOCK, LOCK_EX | ($nowait ? LOCK_NB : 0))) {
close(LOCK);
return 0;
}
$locks{"$host:$file"} = \*LOCK;
return 1;
}
sub unlock($;$$) {
my ($host, $file, $remove) = @_;
print "Releasing lock for $host:$file\n" if($DEBUG);
$file ||= 'master.lock';
my $store = get_store($host); # Don't take classes into account - not needed
mkpath($store) if(! -d $store);
return 0 unless(exists($locks{"$host:$file"}));
*UNLOCK = $locks{$file};
unlink("$store/$file") if($remove);
flock(UNLOCK, LOCK_UN);
close(UNLOCK);
return 1;
}
sub limit_running_processes() {
my $max = $conf{'default'}->{'process_limit'};
return unless defined($max);
print "Aquiring process lock\n" if $DEBUG;
for (my $i=0; $i < $max; $i++) {
my $file = "/tmp/.zetaback_$i.lock";
print "$file\n" if $DEBUG;
open ($process_lock, "+>>$file") || next;
if (flock($process_lock, LOCK_EX | LOCK_NB)) {
print "Process lock succeeded: $file\n" if $DEBUG;
return 1;
} else {
close($process_lock);
}
}
print "Too many zetaback processes running. Exiting...\n" if $DEBUG;
exit 0;
}
sub scan_for_backups($) {
my %info = ();
my $dir = shift;
$info{last_full} = $info{last_incremental} = $info{last_backup} = 0;
# Look for standard file based backups first
opendir(D, $dir) || return \%info;
foreach my $file (readdir(D)) {
if($file =~ /^(\d+)\.([^\.]+)\.full$/) {
my $whence = $1;
my $fs = dir_decode($2);
$info{$fs}->{full}->{$whence}->{'file'} = "$dir/$file";
$info{$fs}->{last_full} = $whence if($whence > $info{$fs}->{last_full});
$info{$fs}->{last_backup} = $info{$fs}->{last_incremental} > $info{$fs}->{last_full} ?
$info{$fs}->{last_incremental} : $info{$fs}->{last_full};
}
elsif($file =~ /^(\d+).([^\.]+)\.incremental.(\d+)$/) {
my $whence = $1;
my $fs = dir_decode($2);
$info{$fs}->{incremental}->{$whence}->{'depends'} = $3;
$info{$fs}->{incremental}->{$whence}->{'file'} = "$dir/$file";
$info{$fs}->{last_incremental} = $whence if($whence > $info{$fs}->{last_incremental});
$info{$fs}->{last_backup} = $info{$fs}->{last_incremental} > $info{$fs}->{last_full} ?
$info{$fs}->{last_incremental} : $info{$fs}->{last_full};
}
}
closedir(D);
# Now look for zfs based backups
my $storefs;
eval {
$storefs = get_fs_from_mountpoint($dir);
};
return \%info if ($@);
my $rv = open(ZFSLIST, "__ZFS__ list -H -r -t snapshot $storefs |");
return \%info unless $rv;
while (<ZFSLIST>) {
my @F = split(' ');
my ($rawfs, $snap) = split('@', $F[0]);
my ($whence) = ($snap =~ /(\d+)/);
next unless $whence;
my @fsparts = split('/', $rawfs);
my $fs = fs_decode($fsparts[-1]);
# Treat a dataset backup as a full backup from the point of view of the
# backup lists
$info{$fs}->{full}->{$whence}->{'snapshot'} = $snap;
$info{$fs}->{full}->{$whence}->{'dataset'} = "$rawfs\@$snap";
# Note - this field isn't set for file backups - we probably should do
# this
$info{$fs}->{full}->{$whence}->{'pretty_size'} = "$F[1]";
$info{$fs}->{last_full} = $whence if ($whence >
$info{$fs}->{last_full});
$info{$fs}->{last_backup} = $whence if ($whence >
$info{$fs}->{last_backup});
}
close(ZFSLIST);
return \%info;
}
parse_config();
sub zetaback_log($$;@) {
my ($host, $mess, @args) = @_;
my $tf = config_get($host, 'time_format');
my $file = config_get($host, 'logfile');
my $fileh;
if(defined($file)) {
$fileh = IO::File->new(">>$file");
}
$fileh ||= IO::File->new(">&STDERR");
printf $fileh "%s: $mess", strftime($tf, localtime(time)), @args;
$fileh->close();
}
sub zfs_remove_snap($$$) {
my ($host, $fs, $snap) = @_;
my $agent = config_get($host, 'agent');
my $ssh_config = config_get($host, 'ssh_config');
$ssh_config = "-F $ssh_config" if($ssh_config);
print "Using custom ssh config file: $ssh_config\n" if($DEBUG);
return unless($snap);
print "Dropping $snap on $fs\n" if($DEBUG);
`ssh $ssh_config $host $agent -z $fs -d $snap`;
}
# Lots of args.. internally called.
sub zfs_do_backup($$$$$$;$) {
my ($host, $fs, $type, $point, $store, $dumpname, $base) = @_;
my ($storefs, $encodedname);
my $agent = config_get($host, 'agent');
my $ssh_config = config_get($host, 'ssh_config');
$ssh_config = "-F $ssh_config" if($ssh_config);
print "Using custom ssh config file: $ssh_config\n" if($DEBUG);
# compression is meaningless for dataset backups
if ($type ne "s") {
my $cl = config_get($host, 'compressionlevel');
if ($cl >= 1 && $cl <= 9) {
open(LBACKUP, "|gzip -$cl >$store/.$dumpname") ||
die "zfs_do_backup $host:$fs $type: cannot create dump\n";
} else {
open(LBACKUP, ">$store/.$dumpname") ||
die "zfs_do_backup $host:$fs $type: cannot create dump\n";
}
} else {
# Dataset backup - pipe received filesystem to zfs recv
eval {
$storefs = get_fs_from_mountpoint($store);
};
if ($@) {
# The zfs filesystem doesn't exist, so we have to work out what it
# would be
my $basestore = $store;
$basestore =~ s/\/?%h//g;
$storefs = get_fs_from_mountpoint($basestore);
$storefs="$storefs/$host";
}
$encodedname = fs_encode($dumpname);
print STDERR "Receiving to zfs filesystem $storefs/$encodedname\n"
if($DEBUG);
zfs_create_intermediate_filesystems("$storefs/$encodedname");
open(LBACKUP, "|__ZFS__ recv -F $storefs/$encodedname");
}
# Do it. yeah.
eval {
if(my $pid = fork()) {
close(LBACKUP);
waitpid($pid, 0);
die "error: $?" if($?);
}
else {
my @cmd = ('ssh', split(/ /, $ssh_config), $host, $agent, '-z', $fs);
if ($type eq "i" || ($type eq "s" && $base)) {
push @cmd, ("-i", $base);
}
if ($type eq "f" || $type eq "s") {
push @cmd, ("-$type", $point);
}
open STDIN, "/dev/null" || exit(-1);
open STDOUT, ">&LBACKUP" || exit(-1);
print STDERR " => @cmd\n" if($DEBUG);
unless (exec { $cmd[0] } @cmd) {
print STDERR "$cmd[0] failed: $!\n";
exit(1);
}
}
if ($type ne "s") {
die "dump failed (zero bytes)\n" if(-z "$store/.$dumpname");
rename("$store/.$dumpname", "$store/$dumpname") || die "cannot rename dump\n";
} else {
# Check everything is ok
`__ZFS__ list $storefs/$encodedname`;
die "dump failed (received snapshot $storefs/$encodedname does not exist)\n"
if $?;
}
};
if($@) {
if ($type ne "s") {
unlink("$store/.$dumpname");
}
chomp(my $error = $@);
$error =~ s/[\r\n]+/ /gsm;
zetaback_log($host, "FAILED[$error] $host:$fs $type\n");
die "zfs_do_backup $host:$fs $type: $error";
}
my $size;
if ($type ne "s") {
my @st = stat("$store/$dumpname");
$size = pretty_size($st[7]);
} else {
$size = `__ZFS__ get -Ho value used $storefs/$encodedname`;
chomp $size;
}
zetaback_log($host, "SUCCESS[$size] $host:$fs $type\n");
}
sub zfs_create_intermediate_filesystems($) {
my ($fs) = @_;
my $idx=0;
while (($idx = index($fs, '/', $idx+1)) != -1) {
my $fspart = substr($fs, 0, $idx);
`__ZFS__ list $fspart 2>&1`;
if ($?) {
print STDERR "Creating intermediate zfs filesystem: $fspart\n"
if $DEBUG;
`__ZFS__ create $fspart`;
}
}
}
sub zfs_full_backup($$$) {
my ($host, $fs, $store) = @_;
# Translate into a proper dumpname
my $point = time();
my $efs = dir_encode($fs);
my $dumpname = "$point.$efs.full";
zfs_do_backup($host, $fs, 'f', $point, $store, $dumpname);
}
sub zfs_incremental_backup($$$$) {
my ($host, $fs, $base, $store) = @_;
my $agent = config_get($host, 'agent');
# Translate into a proper dumpname
my $point = time();
my $efs = dir_encode($fs);
my $dumpname = "$point.$efs.incremental.$base";
zfs_do_backup($host, $fs, 'i', $point, $store, $dumpname, $base);
}
sub zfs_dataset_backup($$$$) {
my ($host, $fs, $base, $store) = @_;
my $agent = config_get($host, 'agent');
my $point = time();
my $dumpname = "$fs\@$point";
zfs_do_backup($host, $fs, 's', $point, $store, $dumpname, $base);
}
sub perform_retention($) {
my ($host) = @_;
my $now = time();
if ($DEBUG) {
print "Performing retention for $host\n";
}
foreach my $class (get_classes()) {
if ($DEBUG) {
if ($class) {
print "=> Class: $class\n" if $class;
} else {
print "=> Class: (none)\n";
}
}
my $retention = config_get($host, 'retention', $class);
my $store = get_store($host, $class);
my $backup_info = scan_for_backups($store);
foreach my $disk (sort keys %{$backup_info}) {
my $info = $backup_info->{$disk};
next unless(ref($info) eq 'HASH');
my %must_save;
if ($DEBUG) {
print " $disk\n";
}
# Get a list of all the full and incrementals, sorts newest to oldest
my @backup_points = (keys %{$info->{full}}, keys %{$info->{incremental}});
@backup_points = sort { $b <=> $a } @backup_points;
# We _cannot_ throw away _all_ our backups,
# so save the most recent incremental and full no matter what
push(@{$must_save{$backup_points[0]}}, "most recent backup");
my @fulls = grep { exists($info->{full}->{$_}) } @backup_points;
push(@{$must_save{$fulls[0]}}, "most recent full");
# Process retention policy
my @parts = split(/;/, $retention);
my %retention_map;
foreach (@parts) {
my ($period, $amount) = split(/,/);
if (!defined($amount)) {
$amount = -1;
}
$retention_map{$period} = $amount;
}
my @periods = sort { $a <=> $b } keys(%retention_map);
my %backup_bins;
foreach(@periods) {
$backup_bins{$_} = ();
}
my $cutoff = $now - $periods[0];
# Sort backups into time period sections
foreach (@backup_points) {
# @backup_points is in descending order (newest first)
while ($_ <= $cutoff) {
# Move to the next largest bin if the current backup is not in the
# current bin. However, if there is no larger bin, then don't
shift(@periods);
if (@periods) {
$cutoff = $now - $periods[0];
} else {
last;
}
}
# Throw away all backups older than the largest time period specified
if (!@periods) {
last;
}
push(@{$backup_bins{$periods[0]}}, $_);
}
foreach (keys(%backup_bins)) {
my $keep = $retention_map{$_}; # How many backups to keep
if ($backup_bins{$_}) {
my @backups = @{$backup_bins{$_}};
my $total = @backups; # How many backups we have
# If we didn't specify how many to keep, keep them all
if ($keep == -1) { $keep = $total };
# If we have less backups than we should keep, keep them all
if ($total < $keep) { $keep = $total };
for (my $i = 1; $i <= $keep; $i++) {
my $idx = int(($i * $total) / $keep) - 1;
push(@{$must_save{$backups[$idx]}}, "retention policy - $_");
}
}
}
if ($DEBUG) {
print " => Backup bins:\n";
foreach my $a (keys(%backup_bins)) {
print " => $a\n";
foreach my $i (@{$backup_bins{$a}}) {
my $trans = $now - $i;
print " => $i ($trans seconds old)";
if (exists($must_save{$i})) { print " => keep" };
print "\n";
}