from impala.dbapi import connect
from hdfs import Client
import os
import time
import sys

# 获取本地中是20180801的数据
hdfs_url = 'http://172.16.1.30:50070'
impala_host = '192.168.1.203'
impala_port = 21050
# impala连接的数据库
impala_database = 'blackpanther'
# 本地文件中需要存在的字段, 过滤出相应时间的文件 start_time-table_name-update_time
local_file_in = str(sys.argv[1])
# 过滤出来的文件的后缀
local_file_suffix = '.csv'
# 需要过滤的本地文件的路径
local_file_path = './data/'

print('start load local file')
st = time.time()
local_file_list = [lfl for lfl in os.listdir(local_file_path) if local_file_in in lfl and lfl.endswith(local_file_suffix)]
print('end load local file {}'.format(time.time()-st))
create_partition = "ALTER TABLE tbl_man_warning_rt ADD IF NOT EXISTS PARTITION (startdate_day='{}',startdate_min='{}');"
refresh_table = 'refresh tbl_man_warning_rt;'
target_path = "/user/hive/warehouse/blackpanther.db/tbl_man_warning_rt/startdate_day={}/startdate_min={}/"
# 一次执行的 impala 命令
limit = 20
'''
将本地文件上传到相应的impala分区中
'''
def get_client(type):
    '''
    得到连接
    :param type: hdfs impala
    :return:
    '''
    try:
        cli = globals()[type]
    except KeyError:
        cli = None
    if type == 'hdfs':
        if cli is None:
            count = 1 
            while True:
                try:
                    cli = Client(url=hdfs_url, timeout=(3000, 3000), session=False)
                    globals()['hdfs'] = cli
                    break
                except Exception as e:
                    print(e)
                    print('hdfs count = {}'.format(count))
                    count = count + 1
                    time.sleep(5)
    elif type == 'impala':
        if cli is None:
            count = 1 
            while True:
                try:
                    cli = connect(host=impala_host, port=impala_port, timeout=3600, database=impala_database)
                    globals()['impala'] = cli
                    break
                except Exception as e:
                    print(e)
                    print('impala count = {}'.format(count))
                    count = count + 1
                    time.sleep(5)
    print('{},{}'.format(type,cli))
    return cli


def execute_impala_create_partition_command():
    print('start execute impala create')
    st = time.time()
    impala = get_client('impala')
    cur = impala.cursor()
    count = 1
    for file_name in local_file_list:
        start_time = file_name.split('-')[0]
        print('tbl_man_warning_rt {} start '.format(start_time))
        # 创建相应的分区
        sql = create_partition.format(local_file_in, start_time)
        cur.execute(sql)
        count = count + 1
        if count > limit:
            impala.commit()
            count = 0
        print('tbl_man_warning_rt {} success'.format(start_time))
    impala.commit()
    cur.close()
    print('end execute impala {}'.format(time.time()-st))


def execute_impala_refresh_partition():
    print('start impala refresh')
    st = time.time()
    impala = get_client('impala')
    cur = impala.cursor()
    cur.execute(refresh_table)
    impala.commit()
    cur.close()
    impala.close()
    print('end impala refresh {}'.format(time.time()-st))


def upload2hadoop(source, target):
    try:
        hdfsc = get_client('hdfs')
        hdfsc.upload(source, target, True)
        print('source={},target={}'.format(source,target))
    except Exception as e:
        print(e)
        time.sleep(3)
        upload2hadoop(source, target)


def execute_upload_hadoop():
    print('start hdfs upload')
    st = time.time()
    for file_name in local_file_list:
        # 上传文件到指定目录
        start_time = file_name.split('-')[0]
        path = target_path.format(start_time[:-4],start_time)
        upload2hadoop(path, local_file_path + file_name)
    print('end hdfs upload {}'.format(time.time()-st))
if __name__ == '__main__':
    start_time = time.time()
    execute_impala_create_partition_command()
    execute_upload_hadoop()
    execute_impala_refresh_partition()
    print('ues all time = {}'.format(time.time() - start_time))