#!/usr/bin/perl
################################################################################
## Suggested crontab entry
#MyBack Parallel - MySQL Backup Script
#04 04 * * * root /usr/local/bin/myback_parallel.pl > /dev/null 2>&1
##
################################################################################
## Monolith Toolkit for MySQL
## NAME: mt-backup-parallel.pl
## DATE: 2008-10-01
## AUTHOR: Matt Reid
## WEBSITE: http://monolith-mysql.com && http://themattreid.com 
## EMAIL: themattreid@gmail.com
## LICENSE: BSD http://www.opensource.org/licenses/bsd-license.php
################################################################################
## Copyright 2008 Matt Reid
## All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# 1. Redistributions of source code must retain the above copyright
#    notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
#    notice, this list of conditions and the following disclaimer in the
#    documentation and/or other materials provided with the distribution.
# 3. The name of the author may not be used to endorse or promote products
#    derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
# IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
# OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
# IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
# NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
# THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
##################################################################################
#
## WHAT THIS SCRIPT DOES #########################################################
## prunes backup directory files and keeps 7,3,1 daily/weekly/monthly backups
## dumps mysql databases in parallel, 
## checks for completeness, 
## returns error codes if any, 
## logs debug info, 
## reports back to management server if enabled
##################################################################################
#
## REQUIREMENTS ##################################################################
## perl version: 5 or greater 
## perl modules: DBI, DBD::MySQL, Fcntl, POSIX ":sys_wait_h", Time::HiRes
##               Compress::Zlib, Parallel::ForkManager, Date::Format
##
## RH4: up2date install perl-DBI
## RH4: up2date -d perl-DBD-MySQL
## RH4: rpm --nodeps -Uvh /var/spool/up2date/perl-DBD-MySQL-2.9004-3.1.x86_64.rpm
## RH4: cpan -i Parallel::ForkManager Compress::Zlib Date::Format Time::HiRes
##
## RH5: yum install perl-DBI perl-DBD-MySQL
## RH5: cpan -i Parallel::ForkManager Compress::Zlib Date::Format Time::HiRes
##
## MySQL client: 5.0.27 or greater
## MySQL server: 5.0.24 or greater
##################################################################################
#
## ERROR CODES ###################################################################
## Possible types: OK, INFO, ERROR, CRIT
## 00: [OK]/[INFO] successful operation, generic code for various duties
## 01: [CRIT] database connection failure
## 02: [CRIT] backup directory $backup_dir not writeable, cannot be created.
## 03: [CRIT] DATABASE_LIST variable set incorrectly.
## 04: [CRIT] @Database array empty.
## 05: [OK] Processing mysqldump of schema: $schema
## 06: [OK] Skipping $DB_Skip[$counter] schema for backup.
## 07: [CRIT] Thread $thread_id for $schema: failed to execute: $!
## 08: [CRIT] Thread $thread_id for $schema: child died with signal 127, with coredump
## 09: [CRIT] Thread $thread_id for $schema: child died with signal 128, without coredump
## 10: [INFO] Thread $thread_id for $schema: child exited with value $? >> 8
## 11: [CRIT] mysqldump: Error: Binlogging on server not active, child exited value 2
## 12: [CRIT] file_verify: seek failed: $!
## 13: [INFO] $schema export file verified.
## 14: [CRIT] $file verify failed. Exiting.
## 15: [CRIT] Unable to write $gzfile $!
## 16: [CRIT] Zlib error writing to $gzfile: $gz->gzerror
## 17: [INFO] $file GZipped to $gzfile
## 18: [INFO] Number of schemas exceeds 32, limiting forks to 32.
## 19: [INFO] Created Parallel::ForkManager with Max $db_counter forks for processing.
## 20: [INFO] Export time for $schema was $elapsed_export_now seconds.
## 21: [INFO] Compress time for $schema was $elapsed_compress_now seconds.
## 22: [CRIT] mysqldump process already running. Exiting.
## 23: [OK] Finished backup of schema: $schema
## 24: [INFO] Overall size = $filesize_standard.
## 25: [CRIT] Size processing tmp files not found. Exiting.
## 26: [CRIT] tmp directory $tmpdir is NOT writable, cannot be created.
## 27: [CRIT] MySQL not running. Exiting. 
## 66: [CRIT] Very first connection errors. We email the exit report here.
## 98: [CRIT] PROCESS FAILURE
## 99: [OK] PROCESS END
##################################################################################
## Required Modules - Don't touch!
use warnings;
use strict;
use Fcntl qw(:flock :seek);
use DBI;
use DBD::mysql;
use POSIX ":sys_wait_h";
use Date::Format;
use Compress::Zlib;
use Time::HiRes qw(gettimeofday tv_interval);
use Parallel::ForkManager;
##################################################################################
## Host information
my $client = "themattreid"; # client name for email reporting
my $hostname = "sunfire";   # server hostname
my $CNF_FILE="/etc/my.cnf"; # location of MySQL cnf file
##################################################################################
## Email settings
my @Report_email = ('corp-dba-mysql-mon@opsource.net,c3-mon@alert.opsource.net'); # Email address(es) for final report
                                              # Comma separated, enclosed by ''
my $sendmail = '/usr/sbin/sendmail';          # Path to sendmail binary
my $from_email = 'corp-dba-mysql-mon@opsource.net'; # Email address for the From: field
##################################################################################
## Local Database settings                                                                                                         
my $DBHOST = 'localhost';
my $DBUSER = 'root';
my $DBPASS = 'pass';
my $DATABASE_LIST = 'ALL'; # Set this to "ALL" to backup all databases on server. 
                           # ^ Otherwise set to 'SOME'
my @Databases = ();        # Quoted and comma separated list of databases to backup 
                           # ^ if not backing up ALL databases on serves. 
                           # example: my @Databases = ('mysql','foo');
my @DB_Skip = ('information_schema','BACKUP','lost+found','test'); # Database naames to 
                                                          # ^  skip enclosed by ''
my $DBOPTIONS="--hex-blob --routines --triggers --master-data=2 --single-transaction --opt"; #mysqldump options to run for backup
##################################################################################
#
##################################################################################
## Talkback Database settings
my $TBenabled='OFF';        # enable/disable talkback reporting ["ON"|"OFF"]
my $TBhost = '209.34.79.53';      # talkback hostname or IP address
my $TBuser = 'monitor_incoming';  # talkback username
my $TBpass = '33dkksDDikwkks866'; # talkback password
my $TBdatabase = 'monitor';       # talkback database name (usually 'monitor')
my $TBid = 'NULL';                   # talkback id number from the 
                                  # ^ monitor database.hosts id column
##################################################################################
#
##################################################################################
## Logging settings
my $loglevel = 2; # choices are in sequence of detail 1 =[INFO], 2 =[OK], 3 =[CRIT]
my $tmpdir = "/tmp"; # temporary directory, must be writeable by script user 
my $backup_dir = "/home/mysql-backups";  # directory to write backups, no 
                                                  # ^ trailing slash
my $vlmessages = "ON";                            # log final status to 
                                                  # ^ /var/log/messages ["ON"|"OFF"], 
                                                 # ^ will always log to debug 
                                                  # ^ logfile below either way
my $master_log = "/var/log/myback_parallel.log";  # master logfile location, script 
                                                  # ^ must have permission to 
                                                  # ^ create/write to this file
##################################################################################
# End of user definied settings
##################################################################################
#### DO NOT EDIT BELOW HERE ####
##################################################################################
#
## Global Vars                                                                                                               
#my $filesize_standard :shared; #Deprecated  
#my $filesize_compress :shared; #Deprecated
my $VERSION_NUM="0.6.2-rc"; #script version
my $filesize_standard = 0;
my $filesize_compress = 0;
my $timer0 = [gettimeofday];
my $thread_id = 1;
my @Threads = ();
my $thread_count = undef;
my $start_date = time2str("%Y%m%d-%H%M%S", time);
my $DMY = time2str("%Y%m%d", time);
my $backup_base = $backup_dir;
my $dayM = time2str("%d",time); #day of the month                                                                                                                                                        
my $dayW = time2str("%w",time); #day of the week                                                                                                                                                         
if($dayW eq "0") {
    $backup_dir = $backup_base."/weekly/".$DMY;;
}
elsif($dayM eq "01") {
    $backup_dir = $backup_base."/monthly/".$DMY;;
}
else {
    $backup_dir = $backup_dir."/".$DMY;
}
##

#write overall success or failure to /var/log/messages system logfile
sub final_log {
    my($CODE,$CONTENT) = @_;
    my $DATE = localtime();
    if($vlmessages eq "ON") {
        #standard size reporting
        open FILE, "$tmpdir/myback_parallel.s.log" or die $!;
        my @lines0 = <FILE>;
        my $filesize_standard = $lines0[0];
        close(FILE);

        #compressed size reporting
        open FILE, "$tmpdir/myback_parallel.c.log" or die $!;
        my @lines1 = <FILE>;
        my $filesize_compressed = $lines1[0];
        close(FILE);

        open(DAT,">>/var/log/messages") || die("Cannot Write to /var/log/messages, Check user permissions.\n");
        flock(DAT, LOCK_EX);
        seek(DAT, 0, SEEK_SET);
        print DAT "$DATE myback_parallel.pl | code: $CODE | $CONTENT | std_size_agg: $filesize_standard | compressed_size_agg: $filesize_compressed\n";
        close(DAT);
    }
}

sub add_standard {
    my $cursize = $_[0];
    my @lines = ();

    #read current size from file 
    if($cursize != 0) { 
    open FILE, "$tmpdir/myback_parallel.s.log" or die $!;
    @lines = <FILE>;
    $filesize_standard = $lines[0];
    close(FILE);
    }
    else {
	$filesize_standard = 0;
    }

    debug_log("24","[INFO] Overall size: $filesize_standard");
    debug_log("24","[INFO] Curr size: $cursize, adding: ($filesize_standard + $cursize)");
    $filesize_standard = $filesize_standard  + $cursize;
    debug_log("24","[INFO] Overall size now: $filesize_standard");
    open(DAT,">$tmpdir/myback_parallel.s.log") || die("Cannot Write to $tmpdir/myback_parallel.s.log, Check user permissions.\n");
    flock(DAT, LOCK_EX);
    seek(DAT, 0, SEEK_SET);
    print DAT "$filesize_standard";
    close(DAT);   
}

sub add_compress {
    my $cursize = $_[0];
    my @lines = ();

    #read current size from file                                                                                  
    if($cursize != 0) {
        open FILE, "$tmpdir/myback_parallel.c.log" or die $!;
        @lines = <FILE>;
        $filesize_compress = $lines[0];
        close(FILE);
    }
    else {
	$filesize_compress = 0;
    }

    debug_log("24","[INFO] Overall compress size: $filesize_compress");
    debug_log("24","[INFO] Curr compress size: $cursize, adding: ($filesize_compress + $cursize)");
    $filesize_compress = $filesize_compress  + $cursize;
    debug_log("24","[INFO] Overall compress size now: $filesize_compress");
    open(DAT,">$tmpdir/myback_parallel.c.log") || die("Cannot Write to $tmpdir/myback_parallel.c.log, Check user permissions.\n");
    flock(DAT, LOCK_EX);
    seek(DAT, 0, SEEK_SET);
    print DAT "$filesize_compress";
    close(DAT);

}

sub loglevel {
    my ($CODE,$CONTENT) = @_;
    if($loglevel == 1 ) {
        print "$CODE: $CONTENT\n";
    }
    elsif($loglevel == 2) {
        if($CONTENT =~ /OK/) { print "$CODE: $CONTENT\n"; }
    }

    # we always print [CRIT] messages in the debug_log function 
    # so no need to do anything with it here
}

#write useful information to permanent debug logfile
sub debug_log {
    # takes the following format: debug_log("code","content");
    # see error code list at top of page
    my($CODE,$CONTENT) = @_;
    my $C = $CODE;
    my $DATE = localtime();
    code_collect($CODE,$CONTENT,$DATE); #write code to tmp file for end-of-script-report
    #check for existence / writable of master log file
    open(DAT,">>$master_log") || system("touch $master_log") || die("[CRIT] Cannot Open/Create Log File $master_log, Check user permissions.\n");
    flock(DAT, LOCK_EX);
    seek(DAT, 0, SEEK_SET);

    #GOOD STATUS CODES
    if($C eq "00" || $C eq "05" || $C eq "06" || $C eq "10" || $C eq "99" || $C eq "13" || $C eq "17" || $C eq "18" || $C eq "19" || $C eq "20" || $C eq "21" || $C eq "23" || $C eq "24") {
        loglevel($CODE,$CONTENT);
        print DAT "$DATE | code:$CODE | $CONTENT\n";
        close(DAT);
        return;
    }
    #BAD STATUS CODES
    elsif($C eq "01" || $C eq "02" || $C eq "03" || $C eq "04" || $C eq "07" || $C eq "08" || $C eq "09" || $C eq "11" || $C eq "12" || $C eq "14" || $C eq "15" || $C eq "16" || $C eq "22" || $C eq "98" || $C eq "25" || $C eq "27") {
        print "$CODE | $CONTENT \n";
        print DAT "$DATE | code:$CODE | $CONTENT\n";
        close(DAT);
        code_reporter("LOGERR","$CODE");
        exit 1;
    }    
    #Very first connection failure code - initial DB connection
    elsif($C eq "66") {
        print "$CODE | $CONTENT \n";
        print DAT "$DATE | code:$CODE | $CONTENT\n";
        close(DAT);
        code_reporter("EXIT","$CODE");
        exit 1;
    }
    else { #unspecified code
        print $CONTENT=$CONTENT." | FATAL ERROR. INVALID ERROR CODE. EXITING FORK.";
        print DAT "$DATE | code:$CODE | $CONTENT\n";
        close(DAT);
        code_reporter("LOGERR");
        exit 1;
    }
}

sub get_versions {
#not implemented yet    
}

#loop through information_schema results for available schemas to backup
sub get_databases {
    my($DBHOST,$DBUSER,$DBPASS) = @_;
    my $DATABASE="INFORMATION_SCHEMA";
    my $dbh = DBI->connect( "DBI:mysql:$DATABASE:$DBHOST", $DBUSER, $DBPASS) 
        or debug_log("01","[CRIT] Error connecting : $DBI::errstr");
    my $sql = ();
    if(@DB_Skip) {
        my $counter = -1;
        my $item_count = @DB_Skip;
        $sql="select SCHEMA_NAME FROM INFORMATION_SCHEMA.SCHEMATA WHERE ";
        while($counter < $item_count) {
            if($counter == -1) {
                $sql .= " SCHEMA_NAME != '$DB_Skip[$counter]'";
            }
            if($counter != $item_count) {
            $sql .= " AND SCHEMA_NAME != '$DB_Skip[$counter]'";
            }
            else {
                $sql .= " SCHEMA_NAME != '$DB_Skip[$counter]'";
            }
            debug_log("06","[OK] Skipping $DB_Skip[$counter] schema for backup.");
            $counter++;
        }
    }
    else {
        $sql = "select SCHEMA_NAME FROM INFORMATION_SCHEMA.SCHEMATA;";
    }
    my $sth = $dbh->prepare($sql) or die "mysql connect failed";
    $sth->execute or die "executing: ", $dbh->errstr;
    while (my $row = $sth->fetchrow_hashref) {
        my $dbname = $row->{'SCHEMA_NAME'};
        push (@Databases,"$dbname");
    }
    $dbh->disconnect;
    debug_log("00","[OK] Got schema names, proceeding to export.");
    return @Databases;
}

#make sure tmpdir exists and is writable
sub verify_tmp_location {
	#we test for tmpdir
    if(-W $tmpdir) {
        debug_log("00","[OK] tmpdir directory is writable.");
        return;
    }
    else {
        system("mkdir -p $tmpdir");
        if(-W $tmpdir) {
            debug_log("00","[OK] tmpdir directory is writable.");
            return;
        }
        else {
            debug_log("02","[CRIT] tmp directory $tmpdir is NOT writable, cannot be created.");
        }
    }
}

#make sure backup dir exists and is writeable
sub verify_dump_location() {
	#we test for backup_dir
    if(-W $backup_dir) {
        debug_log("00","[OK] Backup directory is writable.");
        return;
    }
    else {
        system("mkdir -p $backup_dir");
        if(-W $backup_dir) {
            debug_log("00","[OK] Backup directory is writable.");
            return;
        }
        else {
            debug_log("26","[CRIT] Backup directory $backup_dir is NOT writable, cannot be created.");
       		 }
    }	
}

#spawn threads to run mysqldump for each schema in @Databases array
sub thread_exec {
    my $timer_start_export = [gettimeofday];
    my $thread_id = $_[0]; #thread id - also known as @Databases array index value
    my $schema = $_[1]; #schema name to export    
    my $thread_max = $_[2]; #max threads to operate on, so we know when to move to the file verify function
    my $write_date = $start_date;
    debug_log("05","[OK] Processing mysqldump of schema: $schema");    
    my $dump_file = $backup_dir."/".$schema."-".$write_date;
    system("mysqldump --user=$DBUSER --password=$DBPASS --host=$DBHOST $DBOPTIONS $schema > $dump_file");
    if ($? == -1) {
        debug_log("07","[CRIT] Thread $thread_id for $schema: failed to execute: $!");
    }
    elsif ($? >> 2) {
        debug_log("11","[CRIT] mysqldump: Error: Binlogging on server not active, child exited value 2");
    }
    elsif ($? & 127) {
        debug_log("08","[CRIT] Thread $thread_id for $schema: child died with signal 127, with coredump"),
    }
    elsif ($? & 128) {
        debug_log("09","[CRIT] Thread $thread_id for $schema: child died with signal 128, without coredump"),
    }
    else {
        my $val = $? >> 8;
        my $msg="[INFO] Thread $thread_id for $schema: child exited with value $val";
        debug_log("10","$msg");
        my $elapsed_export_now = tv_interval ( $timer_start_export, [gettimeofday]);
        debug_log("20", "[INFO] Export time for $schema was $elapsed_export_now seconds.");
        file_verify($dump_file,$thread_id,$thread_max,$schema);
    }
    return;
}

sub export_schemas {
    @Databases = @_;
    my $db_counter = @Databases;
    my $thread_id = 0;
    #Begin Parallel::ForkManager Method
    my $q_forks = 0;
    if($db_counter < 33) {
        $q_forks = $db_counter; #number of forks to create based on num of schemas
    }
    else {
        $q_forks = 32;
        debug_log("18","[INFO] Number of schemas exceeds 32, limiting forks to $q_forks.");
    }
    my $pm = new Parallel::ForkManager($q_forks); # create manager with same number of forks as number of schemas
    debug_log("19","[INFO] Created Parallel::ForkManager with Max $db_counter forks for processing.");
    my $code = undef;
    foreach my $schema (@Databases) {
        $pm->start and next; # start the new fork
        $code = thread_exec($thread_id,$schema,$db_counter); #spawns out system process and rest of functions needed
        $thread_id++;
        $pm->finish; # Terminates the child process
    }
    $pm->wait_all_children; #wait for all forks before ending main process
    return $code;
}

#if thread completes successfully, verify the dump file
sub file_verify {
    my $file = $_[0];
    my $thread_id = $_[1];
    my $thread_max = $_[2];
    my $schema = $_[3];
    #purpose here it to get that last line of the file so we can check if it says "--Dump Completed on..."
    #we're NOT using File::ReadBackwards because I'm sick of installing modules with CPAN :)
    open( IN, '<', $file ) or die "Can't open '$file': $!";
    my $size = -s $file;
    my $found;
    my $offset = `cat $file |wc -l`; #read the number of lines in the dumpfile and set the offset, better
                                     #than a hardcoded offset since some dumpfiles may be huge line count
    debug_log("00","[INFO] Verifying $file");
    while( ! defined( $found ) ){
        # prevent seek from overshooting the start of the file.
        $offset = $size if $offset > $size;
        seek( IN, -$offset, SEEK_END ) or debug_log("12","[CRIT] file_verify: seek failed: $!");

        # read line(s) from filehandle. Ideally, offset was chosen
        # such that @lines contains part of the penultimate line
        # and the last line.
        my @lines = <IN>;

        if( @lines > 1 ){
            # more than one lines means we can be sure we do have the last one.
            $found = $lines[ -1 ];
        } elsif( $offset >= $size ) {
            # if we have read the whole file and it's just one line, then that is the last line.
            $found = $lines[ 0 ];
        } else {
            # try again with a bigger offset
            $offset += 256;
        }
    }    
    #print "The last line is: $found\n";
    my $complete = "-- Dump completed";
    if($found =~ /^$complete/i) { 
        #dump verified as complete, proceeding
        debug_log("13","[INFO] $schema export file verified.");
        my $curfilesize = -s $file;
        add_standard($curfilesize);
        file_compress($file,$thread_id,$thread_max,$schema);
    }
    else {
        debug_log("14","[CRIT] $file verify failed. Exiting.");
    }        
}

#if file passes verify, compress it
sub file_compress {
    my $file = $_[0];
    my $thread_id = $_[1];
    my $thread_max = $_[2] - 1; #decrement it so it gets us to the next function.
    my $schema = $_[3];
    my $gzfile = $file.".gz";
    my $buf;
    my $gz = gzopen($gzfile, "wb");
    my $timer_start_compress = [gettimeofday];
    my $elapsed_compress_now = 0;
    open (FILE, $file);
    binmode FILE;
    debug_log("00","[INFO] Attempting compress of $file");
    
    if (! $gz) {
        debug_log("15","[CRIT] Unable to write $gzfile $!");
    }
    else {
        while (my $by = sysread (FILE, $buf, 8192)) {
            if (! $gz->gzwrite($buf)) {
                debug_log("16","[CRIT] Zlib error writing to $gzfile: $gz->gzerror");
            }
        }
        $gz->gzclose();
        debug_log("17","[INFO] $schema file GZipped to '$gzfile'");
        system("chmod 400 $gzfile"); #GZip file is readable only by owner
        system("rm -f $file"); #remove original SQL file
        my $curfilesize = -s $gzfile;
        add_compress($curfilesize);
        $elapsed_compress_now = tv_interval ( $timer_start_compress, [gettimeofday]);
        debug_log("21", "[INFO] Compress time for $schema file was $elapsed_compress_now seconds.");
        debug_log("23","[OK] Finished backup of schema: $schema");
    }
}

sub sendEmail
{
    my ($to, $from, $subject, $message) = @_;
    open(MAIL, "|$sendmail -oi -t");
    print MAIL "From: $from_email\n"; #global from variable
    print MAIL "To: $to\n";
    print MAIL "Subject: $subject\n\n";
    print MAIL "$message\n";
    close(MAIL);
}

sub email_report {
    my $state = $_[0];
    my $subject = $_[1];
	my $rState = undef;
	 if($state eq 0) {  #successful report
	 	$rState = "Ok";
	 }
	  elsif($state eq 1) {  #fail report
	 	$rState = "Critical";
		$subject = "ERROR CODE: $subject";
	 }	 	
    my $full_subject = "===================================
This is a myback_parallel alert notification
===================================
State:       $rState
Customer:    $client 
Check:       MySQL Backup -> myback_parallel.pl -> $hostname
Begin Time:  $start_date
Message:     
$subject";
       
    my $i = 0;
    if($state eq 0) {  #successful report
        my $count = @Report_email;
        while($i < $count) {
            sendEmail("$Report_email[$i]", "myback\@yourserver.com", "MyBack_Parallel: $client-$hostname backup GOOD.", "$full_subject");
            $i++
        }
    }
    elsif($state eq 1) {  #failure report
        my $count = @Report_email;
        while($i < $count) {
            sendEmail("$Report_email[$i]", "myback\@yourserver.com", "MyBack_Parallel: $client-$hostname backup FAIL.", "$full_subject");
            $i++
        }
    }
    else {
        print "wrong code\n";
    }
}

#write codes for this script's session only
sub code_collect {
    my($CODE,$CONTENT,$DATE) = @_;
    #check for existence / writable of master log file                                                                  
    open(DAT,">>$tmpdir/myback_parallel.log") || die("Cannot [open/create] report file: $tmpdir/myback_parallel.log, Check user permissions.\n");
    flock(DAT, LOCK_EX);
    seek(DAT, 0, SEEK_SET);
    print DAT "$DATE | code:$CODE | $CONTENT\n";
    close(DAT);
    return;   
}

sub code_reporter {
    my $status = $_[0];
    my $err= undef;
    if($_[1]) {
        $err = $_[1];
    }
    
    if($status eq "LOGERR") { #we log the error 
        open(DAT,">$tmpdir/myback_parallel.err.log") || die("Cannot Write to $tmpdir/myback_parallel.err.log, Check user permissions.\n");
        flock(DAT, LOCK_EX);
        seek(DAT, 0, SEEK_SET);
        print DAT "$err";
        close(DAT);       
    }
    elsif($status eq "EXIT") { #or we exit and report failure to methods
        print "####END - FATAL ERROR - CHECK DEBUG LOG at $master_log ####\n\n";
        my $tmp_file2 = "$tmpdir/myback_parallel.log";
        my $tmp_file3 = "$backup_dir/backup_report.txt";
        system("mv $tmp_file2 $tmp_file3");
        system("rm -f $tmpdir/myback_parallel.c.log");
        system("rm -f $tmpdir/myback_parallel.s.log");
        if($TBenabled eq "ON") {
            talkback("2","0","0","0"); #we alert the talkback function about failure
        }
	debug_log("00","[INFO] Emailing critical failure report.");
        email_report("1",$err);
        exit 1;
    }
    elsif($status eq "OK") {
        # write standard file size reporting
        open FILE1, "$tmpdir/myback_parallel.s.log" or debug_log("24","[CRIT] Size processing tmp files not found. Exiting. $!");
        my @lines0 = <FILE1>;
        my $filesize_standard = $lines0[0];
        close(FILE1);

        # write compressed size reporting
        open FILE2, "$tmpdir/myback_parallel.c.log" or debug_log("24","[CRIT] Size processing tmp files not found. Exiting. $!");
        my @lines1 = <FILE2>;
        my $filesize_compressed = $lines1[0];
        close(FILE2);
        my $compressed_bytes = $filesize_compressed;
        $filesize_standard = (($filesize_standard /1024) /1024); #MB conversion
        $filesize_compressed =(($filesize_compressed /1024) /1024); #MB conversion 

        my $filesize_standard_hr = sprintf "%.2f",$filesize_standard;
        my $filesize_compressed_hr = sprintf "%.2f",$filesize_compressed;

        debug_log("00","[OK] Aggregate export size: $filesize_standard_hr MB");
        debug_log("00","[OK] Aggregate compressed size: $filesize_compressed_hr MB");

        my $tmp_file0 = "$tmpdir/myback_parallel.log";
        my $tmp_file1 = "$backup_dir/backup_report.txt";


        debug_log("99","[OK] PROCESS END");
        my $elapsed = tv_interval ( $timer0, [gettimeofday]);
        $elapsed = sprintf "%.2f",$elapsed;
        debug_log("00","[OK] Total elapsed time: $elapsed seconds");
        final_log("00","[OK] Total elapsed time: $elapsed seconds");
        system("mv $tmp_file0 $tmp_file1");
        system("rm -f $tmpdir/myback_parallel.c.log");
        system("rm -f $tmpdir/myback_parallel.s.log");

        if($TBenabled eq "ON") {
            talkback("1",$compressed_bytes,$elapsed,$elapsed);
        }
        my $subject0="
            Aggregate export size: $filesize_standard_hr MB
            Aggregate compressed size: $filesize_compressed_hr MB
            Total elapsed time: $elapsed seconds";
        email_report("0",$subject0);
        exit 0;
    }
    else {
        my $subject1 = "Backup FAILED with unknown error. Please see debug logfile on server.";
        email_report("1",$subject1);
        exit 1;
    }
}

sub talkback {
    my $STATUS = $_[0];
    my $SIZE_byte = $_[1]; #compress size
    my $DUMP_DELTA = $_[2]; # export time
    my $COMPRESS_DELTA = $_[3]; #compress time   
    
    open FILE, "$CNF_FILE" or debug_log("00","[INFO] Could not open $CNF_FILE $!");;
    my @lines = <FILE>;
    my $line;
    my $CNF;
    foreach $line (@lines) {
        $line =~ tr/'//d;
        $line =~ tr/`//d;
        $line =~ tr/(//d;
        $line =~ tr/)//d;
        $line =~ tr/"//d;
        $CNF .= $line;
    }
    close FILE;

    my $sql ="INSERT INTO `$TBdatabase`.`hosts_incoming_backup_state` (`id` ,`host_id` ,`process_status_id` ,`file_name` ,`file_size` ,`exec_time` ,`exec_compress` , `cnf_bkup` , `script_version` , `Creation_time`) VALUES (NULL , '$TBid', '$STATUS', '$backup_dir/$DMY', '$SIZE_byte', '$DUMP_DELTA', '$COMPRESS_DELTA', '$CNF', '$VERSION_NUM', NOW( ))";

    my $dbh = DBI->connect( "DBI:mysql:$TBdatabase:$TBhost", $TBuser, $TBpass) or debug_log("00","[INFO] Error connecting for talkback: $DBI::errstr");
    my $sth = $dbh->prepare($sql) or debug_log("00","[INFO] $dbh->errstr");
    $sth->execute or die "executing: ", $dbh->errstr;
}

sub sequence {
    my $code = undef;
    if($DATABASE_LIST eq "ALL") {
        @Databases = ();
        @Databases = get_databases($DBHOST,$DBUSER,$DBPASS);
        export_schemas(@Databases);
    }
    elsif($DATABASE_LIST eq "SOME") {
        if(!@Databases) {
            debug_log("04","[CRIT] @Databases array empty.");
            exit 1;
        }
        else {
            export_schemas(@Databases);
        }
    }
    else {
        debug_log("03","[CRIT] DATABASE_LIST variable set incorrectly.");
    }
}

sub check_mysql {
    debug_log("00","[INFO] Checking if mysqld process is running...");
    system("pgrep mysqld > /dev/null");
    my $val = $? >> 8;
    if($val == 1) {
        debug_log("27","[CRIT] MySQL not running. Exiting.");
        exit 1;
    }
    else {
	debug_log("00","[OK] MySQL process is running.");
    }
    return;
}

sub check_processes {
    system("pgrep mysqldump");
    my $val = $? >> 8;
    if($val == 0) {
        debug_log("22","[CRIT] mysqldump process already running. Exiting.");
        exit 1;
    }
    return;
}

sub check_err {
    open FILE, "$tmpdir/myback_parallel.err.log" or die $!;
    my @lines0 = <FILE>;
    my $err = $lines0[0];
    close(FILE);
    if(!$err) {return "0";}
    else {return $err;}
}

sub first {
    verify_tmp_location();
    verify_dump_location();
    if($DBHOST eq 'localhost' || $DBHOST eq '127.0.0.1') {
	check_mysql(); # make sure mysql is actually running on this host
    }
    check_processes(); #make sure mysqldump isn't already running before we start 
    debug_log("00","[OK] PROCESS START");
    add_standard($filesize_standard); #zero the datasize
    add_compress($filesize_compress); #zero the datasize

    #open the error collector (not error log) and zero it out
    open(DAT,">$tmpdir/myback_parallel.err.log") || die("Cannot Write to $tmpdir/myback_parallel.err.log, Check user permissions.\n");
    flock(DAT, LOCK_EX);
    seek(DAT, 0, SEEK_SET);
    print DAT "";
    close(DAT);
}

#delete files from proper directories for pruning
sub prune_files {
    my $dir; #directory, duh.
    my $sec; #seconds, duh.
    if($dayW eq "0") { #if today is sunday
        $dir = $backup_base."/weekly";
        $sec = 1814400; #delete older than 3 weeks
    }
    elsif($dayM eq "01") { #if today is the first of the month
        $dir = $backup_base."/monthly";
        $sec = 2592000; #delete older than 30 days
    }
    else { #every other day
        $dir = $backup_base;
        $sec = 604800; # delete older than 7 days
    }    
    opendir(DIR,$dir) || die "sub routine prune_files can't open $dir : $!\n";
    my @files = readdir(DIR); 
    close(DIR);
    foreach my $file(@files)
    {
    	debug_log("00","[INFO] PRUNE: Checking file $dir/$file...");
        my $now = time;
        my @stat = stat("$dir/$file");
        if ($stat[9] < ($now - $sec)) { 
            debug_log("00","[OK] PRUNE: Deleting $dir/$file...");
            unlink("$dir/$file") || debug_log("00","[OK] PRUNE: File $dir/$file could not be deleted. Please check manually.");
        }
    }
}

sub welcome {
print <<WELCOME;

Currently running:  myback_parallel.pl
---------------------------------------
version: $VERSION_NUM | date:2008-10-01
by: M.Reid http://themattreid.com
---------------------------------------

host: $DBHOST | user: $DBUSER 
---------------------------------------
ID: STATE NOTE
WELCOME
}

##################################################################################
## Here's where the clown takes his knife out
welcome();
first();
## Global locking for consistent snapshot
my $DATABASE="INFORMATION_SCHEMA";
my $lock_dbh = DBI->connect_cached( "DBI:mysql:$DATABASE:$DBHOST", $DBUSER, $DBPASS) or debug_log("66","[CRIT] Cannot connect to lock tables. Error connecting : $DBI::errstr");
my $lock_sql = "flush tables with read lock;";
my $lock_sth = $lock_dbh->prepare($lock_sql) or debug_log("66","[CRIT] Cannot prepare statement to lock tables. Error connecting : $DBI::errstr");
$lock_sth->execute or debug_log("66","[CRIT] Cannot execute sql to lock tables. Error connecting : $DBI::errstr");
#
## The real work begins below
prune_files();
sequence();
#
## Get rid of the global lock
my $unlock_sql = "unlock tables;";
my $unlock_dbh = DBI->connect_cached( "DBI:mysql:$DATABASE:$DBHOST", $DBUSER, $DBPASS) or debug_log("66","[CRIT] Cannot connect to lock tables. Error connecting : $DBI::errstr");
my $unlock_sth = $unlock_dbh->prepare($unlock_sql) or debug_log("66","[CRIT] Cannot connect to unlock tables. Error connecting : $DBI::errstr");
$unlock_sth->execute or debug_log("66","[CRIT] Cannot connect to unlock tables. Error connecting : $DBI::errstr");
$lock_dbh->disconnect;
#
## Get the last error code and decide if we are 0||1
my $code = check_err();
if($code eq "0") {
    code_reporter("OK");
}
else {
    code_reporter("EXIT",$code);
}
## The horse goes home
##################################################################################



