# -*- encoding: utf-8 -*-
"""
@File    : demo1.py
@Time    : 2021/8/27 13:38
@Author  : zhouys4
"""
import pandas as pd
import zipfile, rarfile, py7zr
import os
from utils.pack_mysql import MySqLHelper
from sqlalchemy import create_engine
from script.field_mapping import columns_update, file_check

db = MySqLHelper()
LI = ['xls', 'csv', 'xlsx']
zip_list = []
rar_list = []
seven_z_list = []
xls_list = []
csv_list = []
other_list = []
xlsb_list = []
year = '2016'


# 获取当前路径下的压缩包
def get_zip_file(path_sss):
    for root, dirs, files in os.walk(path_sss):
        for file in files:
            if os.path.splitext(file)[1].lower() == '.zip':
                zip_list.append(os.path.join(root, file))
            elif os.path.splitext(file)[1].lower() == '.rar':
                rar_list.append(os.path.join(root, file))
            elif os.path.splitext(file)[1].lower() == '.7z':
                seven_z_list.append(os.path.join(root, file))
            elif os.path.splitext(file)[1].lower() == '.xls' or \
                    os.path.splitext(file)[1].lower() == '.xlsx':
                xls_list.append(os.path.join(root, file))
            # elif os.path.splitext(file)[1].lower() == '.xlsx':
            #     xlsx_list.append(os.path.join(root, file))
            elif os.path.splitext(file)[1].lower() == '.csv':
                csv_list.append(os.path.join(root, file))
            elif os.path.splitext(file)[1].lower() == '.xlsb':
                xlsb_list.append(os.path.join(root, file))
            else:
                other_list.append(os.path.join(root, file))
    return zip_list, rar_list, seven_z_list, xls_list, csv_list, xlsb_list


# zip压缩包下面的文件内容
class MZipFile(object):
    '''
    python zipfile 模块处理压缩文件并读取包里面的每个文件内容(行)
    '''

    def __init__(self, zip_path):
        '''
        :param zip_path: zip文件路径
        '''
        self.zip = zipfile.ZipFile(zip_path, 'r')  # 创建一个zipfile

    def get_filecount(self):
        '''
        :return: 返回压缩包里面的文件个数
        '''
        return len(self.zip.namelist())

    def get_one_file(self):
        '''
        :return: 创建一个generator ,每次返回一个文件的内容
        '''
        for name in self.zip.namelist():
            if (name.split('.'))[-1] in LI:
                yield self.read_lines(name)  # 生成器

    def read_lines(self, name):
        '''
        :param name: 文件名
        :return: 整个文件所有行(列表:每一行作为一个元素)
        '''
        demo_excel = pd.ExcelFile(self.zip.open(name))
        for i in demo_excel.sheet_names:
            df = demo_excel.parse(sheet_name=demo_excel.sheet_names[demo_excel.sheet_names.index(i)], header=1)
            # 获取列名
            columns_list = df.columns.tolist()
            print(columns_list)
            # print(df.shape[1],df.shape[0])
            # 获取数据的总行数  判断总行数是否大于100
            if df.shape[0] > 100:
                # 判断字段是否包含

                print('a')
            # print(df.shape[0])
        # table1 = demo_excel.parse(sheet_name=demo_excel.sheet_names[4])
        # return table1

    def get_filenames(self):
        '''
        :return:  返回自拍zip文件里面的所有文件名(列表:每个文件名作为一个元素)
        '''

        return self.zip.namelist()

    def extract_to(self, path):
        '''
        解压zip 文件
        :param path: 解压路径
        '''
        self.zip.extractall(path)
        return path


# rar压缩包下面的文件内容
class MRARFile(object):
    '''
    python zipfile 模块处理压缩文件并读取包里面的每个文件内容(行)
    '''

    def __init__(self, zip_path):
        '''
        :param zip_path: zip文件路径
        '''
        self.zip = rarfile.RarFile(zip_path, 'r')  # 创建一个zipfile

    def get_filecount(self):
        '''
        :return: 返回压缩包里面的文件个数
        '''
        return len(self.zip.namelist())

    def get_one_file(self):
        '''
        :return: 创建一个generator ,每次返回一个文件的内容
        '''
        for name in self.zip.namelist():
            if (name.split('.'))[-1] in LI:
                yield self.read_lines(name)  # 生成器

    def read_lines(self, name):
        '''
        :param name: 文件名
        :return: 整个文件所有行(列表:每一行作为一个元素)
        '''
        demo_excel = pd.ExcelFile(self.zip.open(name))
        for i in demo_excel.sheet_names:
            df = demo_excel.parse(sheet_name=demo_excel.sheet_names[demo_excel.sheet_names.index(i)], header=1)
            # 获取列名
            columns_list = df.columns.tolist()
            print(columns_list)
            # 获取数据的总行数  判断总行数是否大于100
            if df.shape[0] > 100:
                # 判断字段是否包含
                print('a')

    def get_filenames(self):
        '''
        :return:  返回自拍zip文件里面的所有文件名(列表:每个文件名作为一个元素)
        '''

        return self.zip.namelist()

    def extract_to(self, path):
        '''
        解压zip 文件
        :param path: 解压路径
        '''
        self.zip.extractall(path)
        return path


'''
Examples
'''


def msevenfile(path):
    a = py7zr.SevenZipFile(r'G:\tmp\ftp_file_2016\00abd6a7-01b7-437b-9985-83541758a650FJ-ZY-SY-15-008 AIO.7z', 'r')
    with py7zr.SevenZipFile(path, mode='r') as archive:
        allfiles = archive
        print(allfiles)
        for f in allfiles:
            print(f)
        # selective_files = [f if filter_pattern.match(f) ]
        # archive.extract(targets=selective_files)
        # for name in name_list:
        #     if (name.split('.'))[-1] in LI:
        #         print(name)

        #     df = demo_excel.parse(sheet_name=demo_excel.sheet_names[demo_excel.sheet_names.index(i)], header=1)
        #     # 获取列名
        #     columns_list = df.columns.tolist()
        #     print(columns_list)
    # b = a.extractall()
    # print(a.extractall())


# 使用pandas读取xls和xlsx数据
def pandas_xls():
    # path = r'G:\tmp\ftp_file_' + year
    path = r'G:\tmp\test'
    zip_list, rar_list, seven_z_list, xls_list, csv_list, xlsb_list = get_zip_file(path)
    print(len(zip_list), len(rar_list), len(seven_z_list), len(xls_list), len(csv_list), len(xlsb_list))
    list_name = xls_list

    for file_name in list_name:
        print("文件个数：", str(len(list_name)), "当前第", str(list_name.index(file_name)))
        other_url = (file_name.split('\\'))[-1]
        df1 = pd.ExcelFile(file_name)
        for i in range(len(df1.sheet_names)):
            try:
                file_list = list(pd.read_excel(file_name, sheet_name=i))
                converters_dict = {}
                for fi in file_list:
                    converters_dict[fi] = str
                df2 = pd.read_excel(file_name, sheet_name=i, converters=converters_dict)
                df = df2.dropna(axis=0, how='all')
                sheet_name = df1.sheet_names[i]
                if df.shape[0] > 100 and df.shape[1] > 8:
                    aaa = file_check(df)
                    if aaa == '1111111111':
                        columns_update(df)
                        df['other_url'] = other_url
                        df['file_name'] = other_url
                        df['sheet_name'] = sheet_name
                        try:
                            df3 = df[['agent', 'material', 'order_date', 'order_number', 'total_prices', 'number',
                                      'material_describe', 'order_date', 'agent_name', 'other_url', 'file_name',
                                      'sheet_name']]
                            print(df3)
                            # conn = create_engine('mysql+pymysql://root:root@172.17.240.153:3306/icac_zys?charset=utf8')
                            # df3.to_sql('total_data', conn, index=False, if_exists='append')
                            # sql2 = 'insert into write_success_record (other_url,file_name,sheet_name,year_date) VALUES (%s,%s,%s,%s)'
                            # db.insertone(sql2, (other_url, other_url, sheet_name, year))
                        except KeyError as key_error:
                            print("KeyError", key_error)
                            # sql2 = 'insert into write_error_record (other_url,file_name,sheet_name,year_date,cause_error) VALUES (%s,%s,%s,%s,%s)'
                            # db.insertone(sql2, (other_url, other_url, sheet_name, year, key_error))
                    else:
                        print("2")
                        # sql2 = 'insert into write_error_record (other_url,file_name,sheet_name,year_date,cause_error) VALUES (%s,%s,%s,%s,%s)'
                        # ret = db.insertone(sql2, (other_url, other_url, sheet_name, year, aaa))
                        # print("插入{ret}条数据".format(ret=str(ret)))
                else:
                    print('0')
                    # sql2 = 'insert into write_error_record (other_url,file_name,sheet_name,year_date,cause_error) VALUES (%s,%s,%s,%s,%s)'
                    # ret = db.insertone(sql2, (other_url, other_url, sheet_name, year, '数据小于100行或者少于8列'))
                    # print("插入{ret}条数据".format(ret=str(ret)))
            except ValueError as e:
                print('ValueError', e)
                # sql2 = 'insert into write_error_record (other_url,file_name,sheet_name,year_date,cause_error) VALUES (%s,%s,%s,%s,%s)'
                # ret = db.insertone(sql2, (other_url, other_url, df1.sheet_names[i], year, e))
                # print("ValueError插入{ret}条数据".format(ret=str(ret)))
            except OverflowError as e:
                print('OverflowError', e)
                # sql2 = 'insert into write_error_record (other_url,file_name,sheet_name,year_date,cause_error) VALUES (%s,%s,%s,%s,%s)'
                # ret = db.insertone(sql2, (other_url, other_url, df1.sheet_names[i], year, e))
                # print("OverflowError插入{ret}条数据".format(ret=str(ret)))


# 使用pandas读取csv数据
def pandas_csv():
    # path = r'G:\tmp\ftp_file_' + year
    path = r'G:\tmp\test'
    zip_list, rar_list, seven_z_list, xls_list, csv_list, xlsb_list = get_zip_file(path)
    print(len(zip_list), len(rar_list), len(seven_z_list), len(xls_list), len(csv_list), len(xlsb_list))
    list_name = csv_list

    for file_name in list_name:
        print("文件个数：", str(len(list_name)), "当前第", str(list_name.index(file_name)))
        other_url = (file_name.split('\\'))[-1]
        file_list = pd.read_csv(file_name, encoding='gbk',header=1)
        print("file_name", file_list)
        converters_dict = {}
        for fi in file_list:
            converters_dict[fi] = str
        df2 = pd.read_csv(file_name, converters=converters_dict)
        df = df2.dropna(axis=0, how='all')
        # df1 = pd.ExcelFile(file_name)
        # for i in range(len(df1.sheet_names)):
        # try:
        #     file_list = list(pd.read_csv(file_name, encoding='utf-8'))
        #     print("file_name",file_name)
        #     converters_dict = {}
        #     for fi in file_list:
        #         converters_dict[fi] = str
        #     df2 = pd.read_csv(file_name,  converters=converters_dict)
        #     df = df2.dropna(axis=0, how='all')
        #     if df.shape[0] > 100 and df.shape[1] > 8:
        #         aaa = file_check(df)
        #         if aaa == '1111111111':
        #             columns_update(df)
        #             df['other_url'] = other_url
        #             df['file_name'] = other_url
        #             df['sheet_name'] = 'csv文件没有sheet'
        #             try:
        #                 df3 = df[['agent', 'material', 'order_date', 'order_number', 'total_prices', 'number',
        #                           'material_describe', 'order_date', 'agent_name', 'other_url', 'file_name',
        #                           'sheet_name']]
        #                 print(df3)
        #                 # conn = create_engine('mysql+pymysql://root:root@172.17.240.153:3306/icac_zys?charset=utf8')
        #                 # df3.to_sql('total_data', conn, index=False, if_exists='append')
        #                 # sql2 = 'insert into write_success_record (other_url,file_name,sheet_name,year_date) VALUES (%s,%s,%s,%s)'
        #                 # db.insertone(sql2, (other_url, other_url, sheet_name, year))
        #             except KeyError as key_error:
        #                 print("KeyError", key_error)
        #                 # sql2 = 'insert into write_error_record (other_url,file_name,sheet_name,year_date,cause_error) VALUES (%s,%s,%s,%s,%s)'
        #                 # db.insertone(sql2, (other_url, other_url, sheet_name, year, key_error))
        #         else:
        #             print("2")
        #             # sql2 = 'insert into write_error_record (other_url,file_name,sheet_name,year_date,cause_error) VALUES (%s,%s,%s,%s,%s)'
        #             # ret = db.insertone(sql2, (other_url, other_url, sheet_name, year, aaa))
        #             # print("插入{ret}条数据".format(ret=str(ret)))
        #     else:
        #         print('0')
        #         # sql2 = 'insert into write_error_record (other_url,file_name,sheet_name,year_date,cause_error) VALUES (%s,%s,%s,%s,%s)'
        #         # ret = db.insertone(sql2, (other_url, other_url, sheet_name, year, '数据小于100行或者少于8列'))
        #         # print("插入{ret}条数据".format(ret=str(ret)))
        # except ValueError as e:
        #     print('ValueError', e)
        #     # sql2 = 'insert into write_error_record (other_url,file_name,sheet_name,year_date,cause_error) VALUES (%s,%s,%s,%s,%s)'
        #     # ret = db.insertone(sql2, (other_url, other_url, df1.sheet_names[i], year, e))
        #     # print("ValueError插入{ret}条数据".format(ret=str(ret)))
        # except OverflowError as e:
        #     print('OverflowError', e)
        #     # sql2 = 'insert into write_error_record (other_url,file_name,sheet_name,year_date,cause_error) VALUES (%s,%s,%s,%s,%s)'
        #     # ret = db.insertone(sql2, (other_url, other_url, df1.sheet_names[i], year, e))
        #     # print("OverflowError插入{ret}条数据".format(ret=str(ret)))


if __name__ == "__main__":
    # path = r'G:\tmp\ftp_file_2016\00d8275f-68c3-40c9-8d40-970a17df97cd结算.rar'
    # zip = MRARFile(zip_path=path)
    # print("文件个数:", zip.get_filecount())
    # print("文件名列表:", zip.get_filenames())
    # list(zip.get_one_file())
    # path = r'G:\tmp\ftp_file_2016\00abd6a7-01b7-437b-9985-83541758a650FJ-ZY-SY-15-008 AIO.7z'
    # zip = msevenfile(path=path)
    # print("文件个数:", zip.get_filecount(path))
    # print("文件名列表:", zip.get_filenames())
    # list(zip.get_one_file())
    # path = r'G:\tmp\ftp_file_2016'
    # get_zip_file(path)
    # zip = MZipFile(zip_path=path)
    # list(zip.get_one_file())
    pandas_csv()
    # start()
