#!/usr/bin/env python
# coding=utf-8

"""

@author: tongzhenguo

@time: 2021/6/7 上午11:36

@desc:

封装hive/hdfs操作
"""
import codecs
import os
import random
import time

from pyspark import Row


def csv_to_hive(par_dir, input_path, sep, head_line, target_db, target_table, mode, date):
    """
    导入本地文件到hive
    :param par_dir: 临时文件的父目录
    :param input_path:输入文件路径
    :param sep:字段分割符
    :param head_line:首行是否为字段名
    :param target_db:目标hive库
    :param target_table:目标hive表
    :param mode:写入方式，可选追加还是覆盖
    :param date:对应时间分区值
    :return:str,对应的hive sql
    """
    lines = []
    with codecs.open(input_path, encoding="UTF-8") as f:
        lines.extend(f.readlines())
    if not os.path.exists(par_dir):
        os.mkdir(par_dir)
    # 临时存储文件
    tmp_file = par_dir + "/tmp_%s_%s" % (int(time.time()), random.randint(1, 100))
    with codecs.open(tmp_file, encoding="UTF-8", mode="wb") as tmp_f:
        for idx, line in enumerate(lines):
            # 跳过header行
            if head_line and idx == 0:
                continue
            # Hive默认字段分隔符
            if idx < 3:
                print(idx, u"\u0001".join(line.strip("\n").split(sep)) + u"\n")
            tmp_f.write(u"\u0001".join(line.strip("\n").split(sep)) + u"\n")
    if mode == "overwrite":
        hql = """
            load data local inpath '{path}'
            overwrite into table {target_db}.{target_table} partition(date_p = {date_s})
            """.format(path=tmp_file, date_s=date, target_db=target_db, target_table=target_table)
        return hql
    else:
        hql = """
            load data local inpath '{path}'
            into table {target_db}.{target_table} partition(date_p = {date_s})
            """.format(path=tmp_file, date_s=date, target_db=target_db, target_table=target_table)
        return hql


def load_to_table(spark, path, format, columns_tuple, sep, table_name):
    """加载数据到临时表"""
    # FIXME 这里不谨慎的认为数据都是readly的,之后需要加异常处理
    if format == "text":
        df = spark.read.text(path)
        row = Row(columns_tuple)
        rdd = df.rdd.map(lambda x: x[0].split(sep)).map(lambda r: row(*r))
        new_df = spark.createDataFrame(rdd)
        # new_df.show(10, 100)
        new_df.createOrReplaceTempView(table_name)
    else:
        df = spark.read.orc(path)
        df.createOrReplaceTempView(table_name)


def write_table(df, path, format, mode, sep):
    """存储结果"""
    if format == "csv":
        df.write.csv(path, mode=mode, sep=sep)
    else:
        df.write.orc(path, mode=mode)


if __name__ == "__main__":
    pass
