#!/usr/bin/env python
import os
import argparse
import datetime
import csv
import subprocess
from multiprocessing import Process
from time import sleep

"""
This is a python script application to generate the delta data according to the
pre-configured option. Just run it from command line once the config file has been
properly edited. It wi1ll read the corresponding configs from the config file
and apply those to the main script which generate the table strucrture and
then copy the hive table files to the newly created table.
NOTE: 1. To use the script run `python genertate_delta.py -f pathOfTheConfig`, if no params given,the default path of the config is set to './delta_config.properties'
Change the path of the config file should you put it in a different directory.
2. To generate replica tables in a way each table have different partitions to be copied, start multiple processes of
the scrip and specify different config files for them.
"""

base_beeline_cmd_str = "beeline -u 'jdbc:hive2://zjhydsz10.shbankuat.com:10000/default;" + "principal=hive/zjhydsz10.shbankuat.com@SHBAKNUAT.COM' --fastConnect=true"
# parse the argument or the config file accepted from the command line
parser = argparse.ArgumentParser()
parser.add_argument("--file", "-f", type=str, default='./bdrsync_conf.csv')
args = parser.parse_args()


def read_conf():
    """

    Synopysis: read the configuration table to load configs for all tables to be synced.
    Return: a conf list of row_dict and a mode_list containing the option to be done to the table
    """
    conf_list = []
    mode_list = []
    source_tables = []
    with open(args.file, 'r') as csvfile:
        conf_reader = csv.DictReader(csvfile)
        for row in conf_reader:
            mode = determine_mode(row)
            conf_list.append(row)
            mode_list.append(determine_mode(row))
            if not mode == 'doNothing':
                row_cp = row.copy()
                row_cp['mode'] = mode
                row_cp['date_col'] = row_cp[determine_date_col(mode)]
                source_tables.append(row_cp)
    os.system('mkdir logs/')
    return conf_list, mode_list, source_tables


def update_conf(conf_list, mode_list):
    """
    :param conf_list: A list containing all rows in the config table where each row is a dict type
    :return:
    """
    with open(args.file, 'w') as csvfile:
        fieldnames = ['database_name', 'table_name', 'sync_strategy', 'last_run_date', 'last_sync_date',
                      'next_sync_date', 'redo', 'full']
        writer = csv.DictWriter(csvfile, fieldnames=fieldnames)
        writer.writeheader()
        for i in range(len(conf_list)):
            mode = mode_list[i]
            row_dict = conf_list[i]
            if mode == 'initial':
                row_dict['last_sync_date'] = row_dict['next_sync_date']
                row_dict['next_sync_date'] = update_syncdate_range(row_dict['last_sync_date'])
                row_dict['last_run_date'] = str(datetime.date.today()).replace('-', '')
                writer.writerow(row_dict)
            elif mode == 'redo':
                row_dict['last_run_date'] = str(datetime.date.today()).replace('-', '')
                writer.writerow(row_dict)
            elif mode == 'increment':
                row_dict['last_sync_date'] = row_dict['next_sync_date']
                row_dict['next_sync_date'] = update_syncdate_range(row_dict['last_sync_date'])
                row_dict['last_run_date'] = str(datetime.date.today()).replace('-', '')
                writer.writerow(row_dict)
            elif mode == 'full':
                # test_outlist = ['x xx xxx - /user/hive/t/dt=20191201', 'x xx xxx - /user/hive/t/dt=20191202','']
                exact_partitions, _ = capture_partitions([row_dict])
                row_dict['last_sync_date'] = exact_partitions[0][0][-8:] + '-' + exact_partitions[0][-1][-8:]
                row_dict['next_sync_date'] = update_syncdate_range(row_dict['last_sync_date'])
                row_dict['last_run_date'] = str(datetime.date.today()).replace('-', '')
                row_dict['full'] = '0'
                writer.writerow(row_dict)
            else:  # the mode is 'doNothing'
                writer.writerow(row_dict)


def update_syncdate_range(date_range_str):
    """

    :param date_range_str: the string representing the data range to be synced
    :return: a new string for the next interval
    """
    dates = date_range_str.split('-')
    if len(dates) == 1:
        dates[0] = str(str_to_date(dates[0]) + datetime.timedelta(days=30)).replace('-', '')
        return dates[0]
    else:
        start_time, end_time = str_to_date(dates[0]), str_to_date(dates[1])
        delta = end_time - start_time
        dates[0], dates[1] = str(end_time + datetime.timedelta(days=30)).replace('-', ''), str(
            end_time + datetime.timedelta(days=30) + delta).replace('-', '')
        return '-'.join(dates)


def str_to_date(date_str):
    """

    :param date_str: the str form a date to be transferred to a datatime.date type
    :return:
    """
    return datetime.date(int(date_str[0:4]), int(date_str[4:6]), int(date_str[6:]))


def determine_mode(row_dict):
    """

    :param row_dict: each row in the config table
    :return: a string indentifying the option to be done
    """
    if row_dict['full'] == '1':
        return 'full'
    if row_dict['last_run_date'] == '':
        return "initial"
    if datetime.date.today().replace(day=1) == str_to_date(row_dict['last_run_date']).replace(day=1) and row_dict[
        'redo'] == '1':
        return "redo"
    # elif datetime.date.today().replace(day=1)
    elif not datetime.date.today().replace(day=1) == str_to_date(row_dict['last_run_date']).replace(day=1):
        return "increment"
    return 'doNothing'


def validate_confs(conf_list):
    """
    Synopysis : Will reaise an execption if the neccessary option is missing in the config file 
    """
    for row_dict in conf_list:
        if not row_dict['full'] == '1' and row_dict['next_sync_date'] == '':
            os.system(
                'echo "{} ERROR:Please check the config csv file,the `next_sync_date` field  of line {} should not be empty" >> ./logs/script_errors.log'.format(
                    datetime.datetime.now(row_dict), ))
            raise Exception(
                'ERROR:Please check the config csv file,the `next_sync_date` field  of line {} should not be empty'.format(
                    row_dict))


def determine_date_col(mode):
    """
    helper method to tell the main program which date of data should be copied according to the config values
    :param mode:
    :return:
    """
    if mode == 'redo':
        return 'last_sync_date'
    else:
        return 'next_sync_date'


def generate_date_range(source_tables):
    """
    Synopysis: To validate the start and end partitions and generate a list of partitions between the start and end date
    Return: A list of string representation of date in the specified range for all the tables to be synced
    """
    date_range_all = []
    for i in range(len(source_tables)):
        row_dict = source_tables[i]
        date_list = []
        if len(row_dict['date_col'].split('-')) == 1:
            date_list.append(row_dict['date_col'])
            date_range_all.append(date_list)
        else:
            dates = row_dict['date_col'].split('-')
            start_date = str_to_date(dates[0])
            end_date = str_to_date(dates[1])
            while start_date <= end_date:
                date_list.append(str(start_date).replace('-', ''))
                start_date += datetime.timedelta(days=1)
        date_range_all.append(date_list)
    return date_range_all


# def replicate_tabs_partitions(conf_dict, replica_tables):
#     """
#     Synopysis: Generate the copies of the table structures to the specified destination according to the 'mode' in the config file
#     """
#     hdfs_cmd_base_str = 'hadoop fs -{} /user/hive/warehouse/{}.db/{}'
#     log_cmd_str = ' 2>> copy_errors.log'
#     mode = conf_dict['mode']
#     if mode == 'full':
#         partitions_list = []
#         for i in range(len(replica_tables)):
#             ori_paths = conf_dict['tables'][i].split('.')
#             # need to ignore the _impala_insert_staging folder
#             # First write all folders in the table root directories to a text file
#             os.system(hdfs_cmd_base_str.format('ls', ori_paths[0], ori_paths[1]) + ' > table_root.txt')
#             # then read them out and drop the impala one and then go through the folders
#             with open('./table_root.txt', 'r') as file_reader:
#                 for line in file_reader:
#                     pattern = '_impala_'  # use regular expression to filter out the impala folder
#                     if '-' in line and not re.search(pattern, line):
#                         partitions_list.append(re.search('/.+', line).group(0))
#             for part in partitions_list:
#                 hdfs_cmd_str = 'hadoop fs -cp {} /user/hive/warehouse/bdrsync.db/{}'.format(part, replica_tables[i])
#                 print('Excecuting: ' + hdfs_cmd_str)
#                 os.system(hdfs_cmd_str)
#     elif mode == 'increment_single':
#         for i in range(len(replica_tables)):
#             ori_paths = conf_dict['tables'][i].split('.')
#             if not 'single_partition' in conf_dict:
#                 hdfs_cmd_str = 'hadoop fs -cp /user/hive/warehouse/{}.db/{}/{} /user/hive/warehouse/bdrsync.db/{}{}'.format(
#                     ori_paths[0], ori_paths[1], analyze_partitions(conf_dict['tables']), replica_tables[i],
#                     analyze_partitions(conf_dict['tables']))
#                 print('Excecurting: ' + hdfs_cmd_str)
#                 os.system(hdfs_cmd_str + log_cmd_str)
#             else:  # the single partition has been specified
#                 hdfs_cmd_str = 'hadoop fs -cp /user/hive/warehouse/{}.db/{}/{} /user/hive/warehouse/bdrsync.db/{}{}'.format(
#                     ori_paths[0], ori_paths[1], analyze_partitions(conf_dict['tables']),
#                     analyze_partitions(conf_dict['tables']))
#                 print('Excecuting: ' + hdfs_cmd_str)
#                 os.system(hdfs_cmd_str + log_cmd_str)
#     elif mode == 'increment_range':
#         date_range_list = generate_date_range(conf_dict)
#         for i in range(len(replica_tables)):
#             ori_paths = conf_dict['tables'][i].split('.')
#             for date in date_range_list:
#                 hdfs_cmd_str = 'hadoop fs -cp /user/hive/warehouse/{}.db/{}/{} /user/hive/warehouse/bdrsync.db/{}{}'.format(
#                     ori_paths[0], ori_paths[1], analyze_partitions(conf_dict['tables']), replica_tables[i],
#                     analyze_partitions(conf_dict['tables']))
#                 print('Excecuting: ' + hdfs_cmd_str)
#                 os.system(hdfs_cmd_str + log_cmd_str)


def recover_partitions(replica_tables):
    """
    Synopysis: Repair the partitions of the tables after the files have been copied using `MSCK REPARI TABLE`
    """

    with open('./repair_tables.script', 'w') as file_writer:
        for table in replica_tables:
            file_writer.write('MSCK REPAIR TABLE bdrsync.{};\n'.format(table))
            print("Writing 'MSCK REPAIR TABLE bdrsync.{} to ./repair_tables.script\n'".format(table))
    os.system(
        base_beeline_cmd_str + ' -f ./repair_tables.script' + ' 2>>logs/recover_part.log')


def create_hive_tables(filePath):
    """
    Synopysis: Execute the create table ddls in Hive through the beev line client. It will read the create table script and
    execute the ddls sequentially in a single beeline session. The default file path of the ddl script is './create_tables.scrip'
    """
    try:
        print('Executing: ' + base_beeline_cmd_str + ' -f {}'.format(filePath))
        os.system(base_beeline_cmd_str + ' -f {}'.format(filePath) + ' 2>> logs/hive_create_table_errs.log')
    except:
        os.system('echo "{} unexpected exception in create_hive_tables()" >> ./logs/script_errors.log'.format(
            datetime.datetime.now()))
        Exception(
            'ERROR:An exception occurs,check your config file,typing and the logs/hive_create_table_errs.log in this directory for details')


def generate_replica_tables(source_tables):
    """
    Synopysis: To generatet the name of the to-be-copied tables.
    Return: A list containing the name of replica tables
    """
    replica_tables = ['{}_{}'.format(row['database_name'], row['table_name']) for row in source_tables]
    return replica_tables


def accumalte_beeline_creates(source_tables, replica_tables):
    """
    Synopysis: To accumulate the create table command of all replica tables
    and append them in a file, so that all of them can be executed in one shell command
    Params: the original table list and the copied table list.
    """
    with open('./create_tables.script', 'w') as file_writer:
        for i in range(len(replica_tables)):
            if source_tables[i]['mode'] == 'initial' or source_tables[i]['mode'] == 'full':
                create_str = "CREATE TABLE bdrsync.{} LIKE {};".format(replica_tables[i],
                                                                       source_tables[i]['database_name'] + '.' +
                                                                       source_tables[i][
                                                                           'table_name'])
                print('write ddl ' + create_str + ' to {}'.format('./create_tables.script'))
                file_writer.write(create_str + '\n')
            else:
                print("The hive table is already created for {}".format(replica_tables[i]))


def index_generator():
    """
    :yield: yield a int range from 0 to 3, to help the partition method @link table_distribute() to distribute the full sets of tables
    """
    i = 0
    while True:
        yield i
        i += 1
        if i == 4:
            i = 0


def table_distribute(source_tables, replica_tables):
    """
    distribute both the source tables and the table to be copied so that they can be allocated to serveral processor later
    :param source_tables:
    :param replica_tables:
    :return:
    """
    part1_source, part2_source, part3_source, part4_source, part5_source = [], [], [], [], []
    part1_rep, part2_rep, part3_rep, part4_rep, part5_rep = [], [], [], [], []

    generator = index_generator()
    for i in range(len(source_tables)):
        if not source_tables[i]['full'] == '1':
            index = generator.next()
            if index == 0:
                part1_source.append(source_tables[i])
                part1_rep.append(replica_tables[i])
            elif index == 1:
                part2_source.append(source_tables[i])
                part2_rep.append(replica_tables[i])
            elif index == 2:
                part3_source.append(source_tables[i])
                part3_rep.append(replica_tables[i])
            elif index == 3:
                part4_source.append(source_tables[i])
                part4_rep.append(replica_tables[i])
        else:
            part5_source.append(source_tables[i])
            part5_rep.append(replica_tables[i])

    part1 = [part1_source, part1_rep]
    part2 = [part2_source, part2_rep]
    part3 = [part3_source, part3_rep]
    part4 = [part4_source, part4_rep]
    part5 = [part5_source, part5_rep]
    return [part1, part2, part3, part4, part5]


def capture_partitions(source_tables_part, partition_list_expect=None, test_outlist=None):
    """
    Capture the partitions according to the config tag values of each table
    :param source_tables_part:
    :param partition_list_expect:
    :param test_outlist: a util list for testing
    :return:
    """
    to_be_removed_indices = []
    partition_list_exact = []
    if not source_tables_part[0]['full'] == '1':
        for i in range(len(source_tables_part)):
            cmd_str = 'hadoop fs -ls /user/hive/warehouse/{}/* | egrep "dt=('.format(
                source_tables_part[i]['database_name'] + '.db/' + source_tables_part[i]['table_name'])
            cmd_str += partition_list_expect[i][0]
            for date_str in partition_list_expect[i][1:]:
                cmd_str += '|' + date_str
            cmd_str += ')"'
            out_list = subprocess.Popen(cmd_str, shell=True, stdout=subprocess.PIPE).stdout.read().split('\n')
            if len(out_list) <= 1:
                os.system(
                    'echo "{} ERROR:There is no data files or no match partitions in the table folder {}" >> ./logs/script_errors.log'.format(
                        datetime.datetime.now(), source_tables_part[i]['table_name']))
                to_be_removed_indices.append(i)
                continue
                # raise Exception('ERROR:There is no data files or no match partitions in the table folder {} , '
                #                 'remove this table or check the hdfs and add data files.'.format(
                #     source_tables_part[i]['table_name']))
            single_part_list = [line.split()[-1] for line in out_list[:-1]]
            partition_list_exact.append(single_part_list)
    else:
        for i in range(len(source_tables_part)):
            cmd_str = 'hadoop fs -ls /user/hive/warehouse/{}/* | egrep "dt="'.format(
                source_tables_part[i]['database_name'] + '.db/' + source_tables_part[i]['table_name'])
            if test_outlist is None:
                out_list = subprocess.Popen(cmd_str, shell=True, stdout=subprocess.PIPE).stdout.read().split('\n')
            else:
                out_list = test_outlist
            if len(out_list) <= 1:
                os.system(
                    'echo "{} ERROR:There is no data files in the table folder {}" >> ./logs/script_errors.log'.format(
                        datetime.datetime.now(), source_tables_part[i]['table_name']))
                # raise Exception('ERROR:There is no data files in the table folder {} , remove this table or check the '
                #                 'hdfs and add data files.'.format(source_tables_part[i]['table_name']))
                to_be_removed_indices.append(i)
                continue
            single_part_list = [line.split()[-1] for line in out_list[:-1]]
            partition_list_exact.append(single_part_list)
    return partition_list_exact, to_be_removed_indices


def copy_partitions(partition_list_exact, rep_table_part):
    """

    :param partition_list_exact: the full path of the matching partitions in the data range specified
    :param rep_table_part: the list containning the subset of replica table names
    :return:
    """
    for i in range(len(rep_table_part)):
        for partition in partition_list_exact[i]:
            # TODO add mkdir, need to mkdir first before we can cp
            paths = partition.split('/')
            mkdir_str = 'hadoop fs -mkdir -p /user/hive/warehouse/bdrsync.db/{}'.format(rep_table_part[i])
            for path in paths[6:-1]:
                mkdir_str += '/' + path
            cp_str = 'hadoop fs -cp {} {}'.format(partition, mkdir_str.split()[-1])
            print('Executing: ' + mkdir_str)
            print('Executing: ' + cp_str)
            os.system(mkdir_str + ' 2>>logs/hdfs_cp_errors.log')
            os.system(cp_str + ' 2>>logs/hdfs_cp_errors.log')


def executor_process(part_of_data):
    """
    What one processor will do when got the tables to be processed(these proessors is only responsible for non-full replication).
    Each processor will retrieve the partitions each table needs to be retrieved, filter out the table that can not
    be copied due to name typo or no data files available.
    :param part_of_data: one part of the partioned data that is allocated to one of the processors
    :return:
    """
    try:
        source_tables_part = part_of_data[0]
        rep_tables_part = part_of_data[1]
        partition_list_expect = generate_date_range(source_tables_part)
        partition_list_exact, to_be_removed = capture_partitions(source_tables_part, partition_list_expect)
        rep_tables_part = [rep_tables_part[i] for i in range(len(rep_tables_part)) if i not in to_be_removed]
        copy_partitions(partition_list_exact, rep_tables_part)
        print('finished one part')
    except:
        print("One process exit with errors")


def exec_full_replication(part_of_data):
    """
    Since full replication is more time-consuming, one processor by default will be exclusively responsible for full replication.
    The processor will filter out table that cannot be replicated an start the replication process.
    :param part_of_data: the tables that need full replication
    :return:
    """
    try:
        source_tables_part = part_of_data[0]
        rep_tables_part = part_of_data[1]
        partition_list_exact, to_be_removed = capture_partitions(source_tables_part)
        rep_tables_part = [rep_tables_part[i] for i in range(len(rep_tables_part)) if i not in to_be_removed]
        copy_partitions(partition_list_exact, rep_tables_part)
        print('finished full replication part')
    except:
        print("One process exit with errors")


if __name__ == '__main__':
    """
    The overview of the process:
    The script read the config file to load the configs
    According to the confs the script first creates all replica table structures in the bdrsync database in hive using beeline. 
    It then copies the hdfs files of specified tables and partitions to the bdrsync path in HDFS.
    Finally after the files are already in the place, the script call hive 'msck repair table' command to 
    recognize the newly added partitions to each table.
    """
    print('--The scrip is starting--')
    ##-- The  first half serial part of the whole procedure
    print('\n--Load the configs--')
    conf_list, mode_list, source_tables = read_conf()  # read the confs from file
    print(conf_list)
    print(mode_list)
    sleep(2)
    validate_confs(conf_list)
    print('\n--passed basic config check--')
    sleep(2)

    replica_tables = generate_replica_tables(source_tables)
    print(source_tables)
    print('\n--start writing create table ddls--')
    accumalte_beeline_creates(source_tables, replica_tables)
    sleep(2)
    print('\n--start creating the table structures in HIVE--')
    create_hive_tables('./create_tables.script')

    ##-- The parallel part of the procedure
    ## use four process to handle the tables that don't need full partitons replication

    print('\nstart the parallel processes of partition replication')
    all_parts = table_distribute(source_tables, replica_tables)
    not_full_parts = all_parts[:-1]
    #
    # pool = Pool(4)
    # pool.map_async(executor_process, not_full_parts)
    # the remaining part use a seperate process to only handle the table with full partitions replication
    p_full = Process(target=exec_full_replication, args=[all_parts[-1]])
    p_full.start()
    p_full.join()
    for i in range(4):
        globals()['p_' + str(i)] = Process(target=executor_process, args=[all_parts[i]])
        p = globals()['p_' + str(i)]
        p.start()
        p.join()
    # -- The second half serial part of the whole process
    print('\n--repairing the partitions--')
    recover_partitions(replica_tables)
    sleep(2)
    print('\n--updating the sync configurations--')
    update_conf(conf_list, mode_list)
    print(
        '\n--The script programm reaches its end, check the logs/ directory for any possible error occurs in the whole process--')

    # print('\n--Load the configs--')
    # validate_confs(conf_dict)# check if the content of the config file is valid
    # print('\n--Print out the configs--')
    # for key,value in conf_dict.items():
    #     print('{} = {}'.format(key,value))
    # print('\n--passed basic config check--')

    # replica_tables = generate_replica_tables(conf_dict['tables'])#generate the replica table names with path with db and table

    # print('\n--start writing create table ddls--')
    # accumalte_beeline_creates(conf_dict['tables'],replica_tables)#to accumulate the ddls in a text file

    # print('\n--start creating the table structures in HIVE--')
    # create_hive_tables('./create_tables.script')#use beeline to connect to HIVE with kerberos and execute the ddls to create the replica table structures

    # print('\n--start copying the date files--')
    # replicate_tabs_partitions(conf_dict,replica_tables)#copy the original data to the newly created tables in the way of copying hdfs files

    # print('\n--repair the partitions--')
    # recover_partitions(replica_tables)#repair the partitions of the newly created tables

    # print('--The script has finished -')
    # print('\n--There may be some date partition missing in the source tables thus the copy of data file may fail')
    # print('The failed replications are:')
    # i = 0
    # with open('./copy_errors.log','r') as file_reader:
    #     for line in file_reader:
    #         print(line) 
    #         i += 1
    # if i == 0:
    #     print('None')
    # print('--Check copy_errors.log file for more details if you need--')
