import datetime
import os
import shutil
import zipfile
import pandas as pd
import uuid
import py7zr
import pymssql
import pymysql
import rarfile

from pymysql.cursors import DictCursor


dir_path = r'//10.99.60.211/PolicyFiles'
extract_path = r'/data/upzipftp/unzip_ftp_source'
table_name = 'ftp_total_data'

from sqlalchemy import create_engine
engine = create_engine("mysql+pymysql://root:root@172.17.240.153:3306/ftp_data_source?charset=utf8")

conn = pymysql.connect(host="172.17.240.153", user="root", password="root", database="ftp_data_source", charset='utf8',
                       use_unicode=True)

"""
dir_path--ftp路径
new_path--解压后的路径，服务器路径
file_name--文件名
other_url--文件在ftp的绝对路径
"""
def unzip_rb_file(rb_other_url_list, new_path, file_name, other_url, flag):
    for index, file in enumerate(rb_other_url_list):
        print("")
        print("循环到的行数：" + str(index) + "  时间" + str(datetime.datetime.now()))
        if os.path.splitext(file)[-1] in ['.xlsx', '.xls']:
            print("方法名unzip_rb_file:" + file, other_url)
            excel_file_path_dcit = dict()
            excel_file_path_dcit[os.path.splitext(file)[0]] = os.path.join(dir_path, file)
            other_url = os.path.join(dir_path, file)
            excel_to_sql(path=os.path.join(dir_path, file), table_name=table_name, conn=engine, other_url=other_url, file_name=file)
        elif os.path.isdir(os.path.join(dir_path, file)):
            cur_path = os.path.join(dir_path, file)
            unzip_file(dir_path=cur_path, new_path=cur_path, file_name=file, other_url=other_url, flag=True)
        elif os.path.splitext(file)[-1] == '.7z':
            un_7z(dir_path, file, other_url, flag)
        elif os.path.splitext(file)[-1] == '.zip':
            unzip(dir_path, file, other_url, flag)
        elif os.path.splitext(file)[-1] == '.rar':
            un_rar(dir_path, file, other_url, flag)


def unzip_file(dir_path, new_path, file_name, other_url, flag):
    path_list = os.listdir(dir_path)
    for file in path_list:
        if os.path.splitext(file)[-1] in ['.xlsx', '.xls']:
            print("方法名unzip_file:" + file, other_url)
            excel_file_path_dcit = dict()
            excel_file_path_dcit[os.path.splitext(file)[0]] = os.path.join(dir_path, file)
            if flag:
                other_url = other_url
            elif not flag:
                other_url = os.path.join(dir_path, file)
            excel_to_sql(path=os.path.join(dir_path, file), table_name=table_name, conn=engine, other_url=other_url, file_name=file)
        elif os.path.isdir(os.path.join(dir_path, file)):
            cur_path = os.path.join(dir_path, file)
            unzip_file(dir_path=cur_path, new_path=cur_path, file_name=file, other_url=other_url, flag=True)
        elif os.path.splitext(file)[-1] == '.7z':
            un_7z(dir_path, file, other_url, flag)
        elif os.path.splitext(file)[-1] == '.zip':
            unzip(dir_path, file, other_url, flag)
        elif os.path.splitext(file)[-1] == '.rar':
            un_rar(dir_path, file, other_url, flag)


def un_7z(dir_path, file, other_url, flag):
    print(file)
    py7zr.Bad7zFile(os.path.join(dir_path, file))
    try:
        with py7zr.SevenZipFile(os.path.join(dir_path, file), mode='r') as zip_file:
            # 获取文件的绝对路径
            other_url_temp = get_file_ftp_path(dir_path, file, other_url, flag)
            extract_path_subset = unzip_file_path_is_exist(extract_path)
            zip_file.extractall(extract_path_subset)
            unzip_file(extract_path_subset, extract_path_subset, file, other_url=other_url_temp, flag=True)
        zip_file.close()
    except Exception as e:
        print(e)


def unzip(dir_path, file, other_url, flag):
    print(file, other_url)
    # 获取文件的绝对路径
    other_url_temp = get_file_ftp_path(dir_path, file, other_url, flag)
    try:
        with zipfile.ZipFile(file=os.path.join(dir_path, file), mode='r') as zf:
            # 判断服务器上解压缩路径是否存在
            extract_path_subset = unzip_file_path_is_exist(extract_path)
            os.makedirs(extract_path_subset)
            for old_name in zf.namelist():
                # print(type(old_name))
                # zip压缩文件下为目录，自动回去遍历目录下的文件
                if os.path.splitext(str(old_name))[-1] == '' or '.doc' in str(old_name) or '.pdf' in str(old_name):
                    continue
                elif '/' in str(old_name):
                    zf.extract(old_name, extract_path_subset)
                elif 'zip' in str(old_name):
                    zf.extract(old_name, extract_path_subset)
                else:
                    zf.extract(old_name, extract_path_subset)

        zf.close()
        unzip_file(extract_path_subset, extract_path_subset, file, other_url=other_url_temp, flag=True)
    except Exception as e:
        print(e)


def un_rar(dir_path, file, other_url, flag):
    print(file)
    other_url_temp = get_file_ftp_path(dir_path, file, other_url, flag)
    extract_path_subset = unzip_file_path_is_exist(extract_path)
    os.makedirs(extract_path_subset)
    try:
        with rarfile.RarFile(file=os.path.join(dir_path, file)) as rf:
            for old_name in rf.namelist():
                # zip压缩文件下为目录，自动回去遍历目录下的文件
                if os.path.splitext(str(old_name))[-1] == '' or '.doc' in str(old_name) or '.docx' in str(
                        old_name) or '.pdf' in str(old_name):
                    continue
                else:
                    rf.extractall(extract_path_subset)
        rf.close()
        unzip_file(extract_path_subset, extract_path_subset, file, other_url=other_url_temp, flag=True)
    except Exception as e:
        print(e)


# 获取文件的绝对路径
def get_file_ftp_path(dir_path, file, other_url, flag):
    if flag is False:
        other_url_temp = os.path.join(dir_path, file)
    else:
        # ftp路径绝对路径
        other_url_temp = os.path.join(other_url, file)
    return other_url_temp


# 判断解压缩文件路径是否存在,ftp文件解压缩出来放在服务器上的绝对路径
def unzip_file_path_is_exist(extract_path_para):
    extract_path_subset = os.path.join(extract_path_para, str(uuid.uuid4()))
    if os.path.isdir(os.path.join(extract_path_para, str(uuid.uuid4()))):
        # 服务器上绝对路径
        extract_path_subset = os.path.join(extract_path_para, str(uuid.uuid4()))
    return extract_path_subset


def query_field():
    sql_query_fields = 'select id,field_name,field_value from field_map_relation'
    cursor = conn.cursor(DictCursor)
    cursor.execute(sql_query_fields)
    field_dict = cursor.fetchall()
    field_list = [i.get("field_value") for i in field_dict]
    return field_list, field_dict


# 数据已pandas批量入库(一个df)
def excel_to_sql(path, table_name, conn, other_url, file_name):
    """
    读取Excel表格中的各sheet页，
    判断其是否有extract_list列中是否存在的列，
    并提取表格中对应数据，
    将其它数据装进other字段
    :param path:文件路径
    # :param extract_list:提取字段列表
    :param table_name: 插入表名
    :param conn:mysql连接器
    :param other_url:这条数据来在ftp的路径
    :param file_name:这条数据来在ftp的路径下的哪个文件
    :return:None
    """
    field_list, field_dict = query_field()
    excel_data = pd.ExcelFile(path)
    for sheet_name in excel_data.sheet_names:
        df = excel_data.parse(sheet_name)
        df = df.dropna(how="all")
        df = df.fillna('')
        if 'Unnamed: 1' in set(df.columns) or 'Unnamed: 2' in set(df.columns) or 'Unnamed: 3' in set(df.columns) or '' in set(df.columns) or len(set(df.columns)) < 5:
            continue
        # 更换列名
        print("excel插入：开始插入数据" + ";shell:" + sheet_name if sheet_name is not None else '' )
        print(" file_name:" + file_name if file_name is not None else '')
        print("工作簿:" + sheet_name + ", 行数和列数:" + str(df.shape))
        df = columns_update(df)
        intersection_columns = list(set(field_list).intersection(set(df.columns)))
        difference_columns = list(set(df.columns).difference(set(intersection_columns)))
        field_data = df[intersection_columns]
        for columns in field_data.columns:
            if columns in field_list:
                field_list.remove(columns)
        field_data1 = field_data.copy()
        for columns in field_list:
            field_data1[columns] = ''
        for item in field_dict:
            if item.get("field_value") != '' and item.get("field_value") in field_data1.keys():
                field_data1.rename(columns={item.get("field_value"): item.get("field_name")}, inplace=True)
        field_data1['sheet_name'] = sheet_name
        field_data1['file_name'] = file_name
        field_data1['other_url'] = other_url
        field_data1['other'] = pd.DataFrame([str(i) for i in df[difference_columns].to_dict(orient='records')])
        field_data1.to_sql(name=table_name, con=conn,  if_exists='append', index=False)
        print("excel插入：数据插入完成")


# 替换列名
def columns_update(df):
    if '下单时间' in set(df.columns) or '订单时间' in (set(df.columns)):
        for columns in df.columns:
            if '下单时间' == columns or '订单时间' == columns:
                df.rename(columns={columns: '下单时间'}, inplace=True)
    if '时间点' in set(df.columns) or '时间点方案1' in (set(df.columns)):
        for columns in df.columns:
            if '时间点' == columns or '时间点方案1' == columns:
                df.rename(columns={columns: '时间点'}, inplace=True)
    if '订单编号' in set(df.columns) or '订单号' in (set(df.columns)) or '联想订单号' in (set(df.columns)):
        for columns in df.columns:
            if '订单编号' == columns or '订单号' == columns or '联想订单号' == columns:
                df.rename(columns={columns: '订单号'}, inplace=True)
    if '总代理名称' in set(df.columns) or 'T1代理名称' in (set(df.columns)) or '代理' in (set(df.columns)) or '代理名称' in (set(df.columns)) or 'T1名称' in (set(df.columns)):
        for columns in df.columns:
            if '总代理名称' == columns or 'T1代理名称' == columns or '代理' == columns or '代理名称' == columns or 'T1名称' == columns:
                df.rename(columns={columns: 'T1代理名称'}, inplace=True)
    if '产品型号' in set(df.columns) or '型号' in (set(df.columns)):
        for columns in df.columns:
            if '产品型号' == columns or '型号' == columns:
                df.rename(columns={columns: '产品型号'}, inplace=True)
    if '销量汇总' in set(df.columns) or '产品数量' in (set(df.columns)):
        for columns in df.columns:
            if '销量汇总' == columns or '产品数量' == columns:
                df.rename(columns={columns: '产品数量'}, inplace=True)
    if 'PO号' in set(df.columns) or '订单PO' in (set(df.columns)):
        for columns in df.columns:
            if 'PO号' == columns or '订单PO' == columns:
                df.rename(columns={columns: '订单PO'}, inplace=True)
    return df


# 数据一条一条入库
def read_file(file_name, path, other_url):
    print("other_url:"+str(other_url) if other_url is not None else '')
    print("path:"+path)
    print("file_name:"+file_name)
    conn.ping(reconnect=True)
    field_list, field_dict = query_field()
    try:
        if os.path.isfile(path):
            sheets = pd.read_excel(path, None)
            for sheet in sheets:

                if '透视' in sheet:
                    continue
                df = pd.read_excel(path, sheet)
                # 去掉没有列名的表和列数量小于5的表
                if 'Unnamed: 1' in set(df.columns) or 'Unnamed: 2' in set(df.columns) or 'Unnamed: 3' in set(
                        df.columns) or '' in set(df.columns) or len(set(df.columns)) < 5:
                    continue
                print("工作簿:" + sheet +", 行数和列数:"+ str(df.shape))
                #去掉所有列都为空的数据
                df = df.dropna(how="all")
                # 将列众的空值替换为''
                df = df.fillna('')
                # 替换列名
                df = columns_update(df)
                df_dict = df.T.to_dict().values()
                print("表数据开始插入")
                for row in df_dict:
                    new_row = dict({"other": []})
                    for key, value in row.items():
                        # if 'Unnamed:' in str(key) or str(key) is None or str(key) == '' or len(str(key)) > 20:
                        #     break
                        if key in field_list:
                            new_row[key] = str(value)
                        else:
                            new_row.get("other").append({key: value})
                    if len(new_row) == 1 and len(new_row['other']) == 0:
                        continue
                    for i in field_dict:
                        if i.get("field_value") != '' and i.get("field_value") in new_row.keys():
                            new_row[i.get("field_name")] = str(new_row.pop(i.get("field_value")))
                    new_row['sheet_name'] = sheet
                    new_row["file_name"] = file_name
                    new_row['other_url'] = other_url
                    new_row['other'] = str(new_row.get("other"))
                    sql = insert_sql('ftp_total_data1', new_row)
                    cursor = conn.cursor()
                    cursor.execute(sql)
                    conn.commit()
                    cursor.close()
                    conn.close()
                print("表数据插入表成功")
    except Exception as e:
        print(e)


def insert_sql(table_name, item):
    ls = [(k, v) for k, v in item.items() if v is not None]
    # print(' %s (' % table_name + ','.join([i[0] for i in ls]) + ')')
    sql = 'INSERT INTO %s (' % table_name + ','.join([i[0] for i in ls]) + ') VALUES (' + ','.join(
        repr(i[1]) for i in ls) + ');'
    return sql


def query_other_url():
    sql = "SELECT DISTINCT CONVERT(nvarchar(2000), OtherURL) OtherURL FROM RBSettlementApply where OtherURL " \
          "like '%.7z' or OtherURL like '%.rar' or OtherURL like '%.zip' or OtherURL like '%.xlsx' or OtherURL like '%.xls' and totalamount>0"
    print(sql)
    conn = pymssql.connect(host='10.99.60.212', user='i_icac_read', password='sA3#kb15', database='RMS')
    cur = conn.cursor()
    cur.execute(sql)
    rb_data = cur.fetchall()
    other_url_list = list()
    for other_url in rb_data:
        other_url_list.append(other_url[0])
    cur.close()
    conn.close()
    print("总共的行数：" + str(len(other_url_list)))
    return other_url_list



def test_unzip_file():
    if os.path.exists(extract_path):
        shutil.rmtree(extract_path)
    if os.path.exists(extract_path):
        shutil.rmtree(extract_path)
    else:
        os.makedirs(extract_path)
    sql = 'truncate table ' + table_name +";"
    cursor = conn.cursor()
    cursor.execute(sql)
    conn.commit()
    cursor.close()
    conn.close()
    other_url_list = query_other_url()
    # other_url_list ['//10.99.60.211/PolicyFiles\003d65ac-cfd7-4baf-94ee-6d3417a13444Q4销售.xls']
    unzip_rb_file(other_url_list, None, None, None, False)


test_unzip_file()

