'''
0. 创建 impala 分区
1. 下载文件
1.1 当需要合成24个文件时 组织小时的路径
1.2 当需要合成1个文件时 组织天的路径
1.3 每下载一个小时的文件 合成一次
2. 合成文件
2.1 合成为1个文件
2.2 合称为24个文件
3. 上传文件
上传到以天为级别的目录下
4. 刷新分区
20171214
'''
from hdfs import Client
import impala.dbapi as impala
import time
import datetime
import sys
import os
import logging.handlers

# hdfs 连接地址
hdfs_url = 'http://172.16.1.30:50070'
# impala 配置
impala_config = {
    'host': '172.16.1.39',
    'port': 21050,
    'database': 'blackpanther_test',
    'timeout': 100
}
target_time = sys.argv[1]
# hdfs路径
source_prefix = '/user/hive/warehouse/blackpanther.db/tbl_man_warning_rt/startdate_day={}/startdate_min={}/'
# hdfs目标目录
hdfs_target_path = '/user/hive/warehouse/' + impala_config[
    'database'] + '.db/tbl_man_warning_rt_2/startdate_day={}/'.format(target_time)
# 本地文件保存路径
target_prefix = './original/{}/'.format(target_time)

create_sql = 'ALTER TABLE {}.tbl_man_warning_rt_2 add if not exists PARTITION (startdate_day="{}");'.format(
    impala_config['database'], target_time)
refresh_sql = 'refresh {}.tbl_man_warning_rt_2 PARTITION (startdate_day="{}");'.format(impala_config['database'],
                                                                                       target_time)
# 过滤标记
filter_flag = 'csv'
# 是否删除文件
# 01 : 不删除临时文件 删除合成文件
# 10 : 删除临时文件   不删除合成文件
# 11 : 删除临时文件   删除合成文件
# 00 : 都不删除
isdel = '10'

# 下载文件进度
download_plan_file = './plans/{}.db'.format(target_time)
# 合成文件的名称
target_file = './compounds/{}_{}.csv'.format(time.strftime('%Y%m%d', time.localtime()), target_time)

logging.basicConfig(filename='./logs/{}.log'.format(target_time),
                    format='%(asctime)s,%(levelname)s,%(message)s', datefmt='%Y-%m-%d %H:%M:%S',
                    level=logging.INFO)
log = logging.getLogger(__name__)


def init():
    os.makedirs('./compounds/', exist_ok=True)
    os.system('rm -rf ./compounds/*')
    os.makedirs('./logs/', exist_ok=True)
    os.makedirs('./original/', exist_ok=True)
    os.makedirs('./plans/', exist_ok=True)


def usetime(func):
    def warpp(*args, **values):
        start_time = time.time()
        result = func(*args, **values)
        use_time = time.time() - start_time
        log.info('func {} end use time {}'.format(func.__name__, use_time))
        return result

    return warpp


class GetClient():
    HDFS = 'HDFS'
    IMPALA = 'IMPALA'

    def __init__(self, name):
        self.name = name

    def __enter__(self):
        flag = True
        while flag:
            try:
                if self.name == GetClient.HDFS:
                    self.cli = Client(url=hdfs_url, timeout=(100, 300), session=False)
                elif self.name == GetClient.IMPALA:
                    host = impala_config['host']
                    port = impala_config['port']
                    database = impala_config['database']
                    timeout = impala_config['timeout']
                    self.cli = impala.connect(host=host, port=port, database=database, timeout=timeout)
                flag = False
            except Exception as e:
                log.info(e)

        return self.cli

    def __exit__(self, exc_type, exc_val, exc_tb):
        if exc_tb is None:
            if self.name == GetClient.HDFS:
                log.info('hdfs client close')
            elif self.name == GetClient.IMPALA:
                self.cli.close()
                log.info('impala client close')
        else:
            log.error('type={},val={},tb={}'.format(exc_type, exc_val, exc_tb))


@usetime
def download(path):
    '''
    从hdfs上下载文件
    :param path dict {source:target}
    :param source str hdfs上的文件的路径
    :param target str 本地文件的全路径
    :return:下载失败的路径
    '''
    with GetClient(GetClient.HDFS) as hdfsc, \
            open(download_plan_file, mode='a', encoding='utf-8') as plan:
        error_path = {}
        for source, target in path.items():
            try:
                # 这个地方应该只捕获目录是否存在异常的
                # 但是因为这个地方不会有不存在的目录
                # 所以全部捕获了一下 家中的环境和线上的环境存在一定的误差
                # 过滤出符合条件文件
                source_filename = filter(lambda n: filter_flag in n, hdfsc.list(source))
                for sf in source_filename:
                    # 当文件已经下载过 则不进行下载
                    tmp_source = source + sf
                    tmp_target = target + '_' + sf
                    hdfsc.download(tmp_source, tmp_target, overwrite=True)
                    log.info('download,{},{}'.format(tmp_source, tmp_target))
                plan.writelines('{},{}\n'.format(source, target))
            except Exception as e:
                log.error('hdfs path {} is error {}'.format(source, e))
                error_path[source] = target
    return error_path


@usetime
def handle_path(source, target, start_time):
    '''
    负责生成 source 和 target
    :return: list[dict] 每一个小时一个dict
    '''
    # 创建目录
    os.makedirs(target, exist_ok=True)
    result = {}
    start_time = datetime.datetime.strptime(start_time, '%Y%m%d')
    end_time = start_time + datetime.timedelta(days=1)
    day = start_time.strftime('%Y%m%d')
    old_plan = get_old_download()
    while start_time < end_time:
        minute = start_time.strftime('%Y%m%d%H%M')
        if source.format(day, minute) not in old_plan:
            result[source.format(day, minute)] = target + '{}.csv'.format(minute)
        start_time = start_time + datetime.timedelta(minutes=1)
    return result


def get_old_download():
    # 判断计划文件是否存在 存在则重命名并打开
    old_plan = []
    if os.path.exists(download_plan_file):
        with open(download_plan_file, buffering=1024 * 1024, mode='r', encoding='utf-8') as old:
            for line in old.readlines():
                line = line.split(',')
                # tmp_path = os.path.abspath(os.path.dirname(line[0]))
                old_plan.append(line[0])
    return old_plan


@usetime
def upload():
    with GetClient(GetClient.HDFS) as hdfsc:
        hdfsc.upload(hdfs_target_path, target_file, True)


@usetime
def compound():
    '''
    文件合成
    :param source_path:本地文件的目录
    :param target_file: 最后合成的文件的全路径名称
    :return:
    '''
    # 删除以前的文件 重新进行文件整理
    os.system('rm -rf {}'.format(target_file))
    source_files = os.listdir(target_prefix)
    tmp = 0
    with open(target_file, buffering=1024 * 1024, mode='a', encoding='utf-8') as tf:
        # 获取目录下的所有文件
        for source_file in source_files:
            start_time = time.time()
            source_file = os.path.join(target_prefix, source_file)
            log.info('compound,{}'.format(source_file))
            tmp = tmp + os.path.getsize(source_file)
            with open(source_file, buffering=1024 * 1024, mode='r', encoding='utf-8') as sf:
                for line in sf.readlines():
                    tf.writelines(line)
            use_time = time.time() - start_time
            log.info('{} end use time {}'.format(source_file, use_time))
    log.info('compound success source size = {},\t compound size = {}'.format(tmp, os.path.getsize(target_file)))


@usetime
def partition(sql):
    with GetClient(GetClient.IMPALA) as impala:
        cur = impala.cursor()
        log.info('sql = {}'.format(sql))
        cur.execute(sql)
        impala.commit()
        cur.close()


@usetime
def main():
    start_time = time.time()
    init()
    path = handle_path(source_prefix, target_prefix, target_time)
    error_path = download(path)
    for i in range(3):
        time.sleep(5)
        error_path = download(error_path)

    if len(error_path) != 0:
        print(error_path)
        return
    compound()
    partition(create_sql)
    upload()
    partition(refresh_sql)
    if isdel[0] == '1':
        os.system('rm -rf '.format(target_prefix))
    if isdel[1] == '1':
        os.system('rm -rf '.format(target_file))
    ust_time = time.time() - start_time
    log.info('all use time {}'.format(ust_time))


main()
