import os
import sys
import time
import datetime
import shutil
import sqlite3
from hdfs import Client
from impala.dbapi import connect

'''
功能: impala上的A表中的数据转存到B表中, 可以根据时间(天)选择转存的路径
sqllite : 读取本地数据库，获取数据库中的数据，没有数据则要求输入起止时间，有数据则根据选择继续下载还是重新下载
'''
source_path = '/user/hive/warehouse/blackpanther.db/dw_man_warning_rt/startdate_day={}/'
target_path = '/user/hive/warehouse/blackpanther.db/tbl_man_warning_rt/startdate_day={}/startdate_min={}/'
impala_host = '192.168.1.203'
impala_port = 21050
impala_database = 'blackpanther'
hdfs_url = 'http://192.168.1.203:50070'
sqlite_database = 'dw2btl.db'
# 准备环境
shutil.rmtree('./data/')
os.mkdir('./data/')


def init_sqlite():
    '''
    初始化 sqlite 环境
    '''
    sqlite = get_client('sqlite')
    cursor = sqlite.cursor()
    cursor.execute('create table bw2tbl (path varchar(100), is_success varchar(20))')


def get_start_end_time():
    '''
    根据起止时间返回时间序列
    :return: list
    '''
    # 获取输入的起始时间
    start_time = input('start time: ')
    start_time = datetime.datetime.strptime(start_time, '%Y%m%d')
    # 获取结束的时间
    end_time = input('end time: ')
    end_time = datetime.datetime.strptime(end_time, '%Y%m%d')
    # 存放过程中的时间
    middle_times = [str(start_time).split(' ')[0].replace('-', '')]
    while start_time < end_time:
        start_time = start_time + datetime.timedelta(days=1)
        middle_times.append(str(start_time).split(' ')[0].replace('-', ''))
    print('转存的时间')
    for d in middle_times:
        print(d)
    if input('回车继续') != '':
        sys.exit()
    return middle_times


def get_path():
    middle_times = get_start_end_time()
    # 存放需要遍历的hdfs路径
    source_paths = []
    hdfsc = Client(url=hdfs_url, timeout=(3000, 3000), session=False)
    # 组装需要遍历的路径
    for t in middle_times:
        tmp_source_path = source_path.format(t)
        try:
            # 可能存在天时间的间断，所以异常捕获，不处理，保证程序的继续运行
            filepaths = hdfsc.list(tmp_source_path)
            for f in filepaths:
                source_paths.append(tmp_source_path + f + '/')
        except Exception as e:
            pass
    return source_paths


# 将下载方法提取出来 防止失败退出 du == 1 下载 | du == 2 上传
# 当为上传 source : 上传文件的路径 hdfs | target : 本地文件的路径
# 当为下载 source : 下载文件的路径 hdfs | target : 下载到本地文件的路径
def download4HDFS(source, target, du, hdfsc):
    try:
        if hdfsc is None:
            hdfsc = Client(url=hdfs_url, timeout=(3000, 3000), session=False)
        if du == 1:
            hdfsc.download(source, target, True)
        elif du == 2:
            hdfsc.upload(source, target, True)
    except Exception as e:
        print(e)
        time.sleep(3)
        download4HDFS(source, target, du, hdfsc)


def get_client(type):
    '''
    得到连接
    :param type: hdfs impala sqlite
    :return:
    '''
    try:
        cli = globals()[type]
    except KeyError as err:
        cli = None
    if type == 'hdfs':
        if cli is None:
            cli = Client(url=hdfs_url, timeout=(3000, 3000), session=False)
            globals()['hdfs'] = cli
    elif type == 'impala':
        if cli is None:
            cli = connect(host=impala_host, port=impala_port, timeout=3600, database=impala_database)
            globals()['impala'] = cli
    elif type == 'sqlite':
        if cli is None:
            cli = sqlite3.connect(database=sqlite_database)
            globals()['sqlite'] = cli
    return cli


# 获取每天的时间
# 2. 下载某一天的文件，然后上传到指定路径下，（路径不存在则创建）然后刷新分区
def down_upload():
    source_paths = get_path()
    impala = get_client('impala')
    hdfsc = get_client('hdfs')
    for sp in source_paths:
        # 下载并合并后的文件 取路径中的分钟为文件的名称
        newFile = './' + sp.split('/')[-2] + '.csv'
        for f in hdfsc.list(sp):
            if 'part-' in f:
                download4HDFS(sp + '/' + f, './data/' + f + '.hdfs', 1, hdfsc)
                # hdfsc.download(sp + '/' + f, './data/' + f + '.hdfs', True)
                print(sp + '/' + f + ' download success')
        # 文件合成
        pf = open(newFile, 'w', encoding='utf-8')
        for tf in os.listdir('./data/'):
            with open('./data/' + tf, 'r', encoding='utf-8') as t:
                for line in t:
                    pf.write(line)
        pf.flush()
        pf.close()
        print('new file = {}'.format(newFile))
        # 从路径中获取 天 和 分钟，作为创建分区的参数
        day = sp.split('/')[-3].split('=')[1]
        min = sp.split('/')[-2].split('=')[1]
        # 创建分区
        sql = "ALTER TABLE tbl_man_warning_rt ADD IF NOT EXISTS PARTITION (startdate_day='{}',startdate_min='{}');".format(
            day, min)
        cur = impala.cursor()
        cur.execute(sql)
        # 上传文件到指定目录
        path = target_path.format(day, min)
        download4HDFS(path, newFile, 2, hdfsc)
        # hdfsc.upload(path, newFile, True)
        # 刷新分区
        sql = "REFRESH tbl_man_warning_rt "
        cur.execute(sql)
        impala.commit()
        cur.close()
        print('tbl_man_warning_rt partition create and refresh success')
        # 删除本地文件
        shutil.rmtree('./data/')
        os.mkdir('./data/')
        os.remove(newFile)
        print('day={},min={} is success'.format(day, min))

    # 关闭连接
    impala.close()


down_upload()
