# -*- coding: utf-8 -*-
'''
@Time : 2021/4/30 16:00
@description:
@Author : yin
@Email : yylai24@163.com
@File : Check.py
@Project : check_data
'''
import pandas as pd
import configparser
import numpy as np
import os
import re
from tqdm import tqdm

config = configparser.ConfigParser()
config.read("config.ini",encoding='utf-8')

class Check:
    def __init__(self):
        self.allInformationPath = config["path"]["allInfoTablePath"]  # 总表路径
        self.infoTablesDir = config["path"]["infoTablesDir"]  # 分表根目录
        self.resultPath = config["path"]["resultPath"]  # 分表根目录
        self.com = re.compile("[0-9]{0,}(.*?)")
    def deal_num(self,data):

        data1 = "".join(self.com.findall(data))
        return data1

    def get_all_info(self, path):
        """
        获取总表数据
        :param path: 总表所在文件夹路径
        :return: DataFrame 总表数据
        """
        rgx = "(.*?)运动训练(.*?).xls"
        # 文件的获取
        full_info = pd.DataFrame(columns={"name", "user_id", "pro_name"})
        for i in os.listdir(path):
            filename = re.match(rgx, i)
            if filename:
                data_path = os.path.join(path, filename.group(0))
                part_data = self.get_not_normal_data(data_path)
                full_info = pd.concat([full_info, part_data], ignore_index=True, axis=0)
            else:
                data_path = os.path.join(path, i)
                part_data = self.get_normal_data(data_path)
                full_info = pd.concat([full_info, part_data], ignore_index=True, axis=0)
        # full_info.reset_index(inplace=True)

        full_info['name'] = full_info['name'].apply(lambda x: x.replace(" ", ""))

        full_info['user_id'] = full_info['user_id'].astype(str).apply(lambda x: x.upper())
        full_info['user_id'] = full_info['user_id'].apply(lambda x: x.upper())
        full_info['pro_name'] = full_info['pro_name'].apply(self.deal_num)
        full_info['pro_name'] = full_info['pro_name'].apply(lambda x: x.replace(" ", ""))
        return full_info

    def get_normal_data(self,path):
        """
        获取每个总表的有效信息，并重新命名字段
        :param path: 总表路径
        :return: 该分表有效字段的所有信息 DataFrame
        """
        sheet_names = list(pd.read_excel(path, sheet_name=None))
        data = pd.DataFrame(columns={"name", "user_id", "pro_name"})

        for i in sheet_names:
            data_path = pd.read_excel(path, sheet_name=i)
            if '录取专业' in data_path.columns:
                if '身份证号' in data_path.columns:
                    data_path = pd.read_excel(path, sheet_name=i, converters={"身份证号": str, "姓名": str})
                    data_path.rename(columns={'姓名': "name", '身份证号': "user_id", '录取专业': "pro_name"}, inplace=True)
                    data_path.drop(index=data_path[data_path['name'].isnull()].index.tolist(), axis=0, inplace=True)
                    part_data = data_path.loc[:, ["name", "user_id", "pro_name"]]
                    data = pd.concat([data, part_data], ignore_index=True, axis=0)
                else:
                    data_path = pd.read_excel(path, sheet_name=i, converters={"姓名": str})
                    data_path['user_id'] = " "
                    data_path.rename(columns={'姓名': "name", '录取专业': "pro_name"}, inplace=True)
                    data_path.drop(index=data_path[data_path['name'].isnull()].index.tolist(), axis=0, inplace=True)
                    part_data = data_path.loc[:, ["name", "user_id", "pro_name"]]
                    data = pd.concat([data, part_data], ignore_index=True, axis=0)
            elif '录取（系科）专业' in data_path.columns:
                if '身份证号' in data_path.columns:
                    data_path = pd.read_excel(path, sheet_name=i, converters={"身份证号": str, "姓名": str})
                    data_path.rename(columns={'姓名': "name", '身份证号': "user_id", '录取（系科）专业': "pro_name"}, inplace=True)
                    data_path.drop(index=data_path[data_path['name'].isnull()].index.tolist(), axis=0, inplace=True)
                    part_data = data_path.loc[:, ["name", "user_id", "pro_name"]]
                    data = pd.concat([data, part_data], ignore_index=True, axis=0)
                else:
                    data_path = pd.read_excel(path, sheet_name=i, converters={"姓名": str})
                    data_path['user_id'] = " "
                    data_path.rename(columns={'姓名': "name", '录取（系科）专业': "pro_name"}, inplace=True)
                    data_path.drop(index=data_path[data_path['name'].isnull()].index.tolist(), axis=0, inplace=True)
                    part_data = data_path.loc[:, ["name", "user_id", "pro_name"]]
                    data = pd.concat([data, part_data], ignore_index=True, axis=0)
            elif '录取专业名称' in data_path.columns:
                if '身份证号' in data_path.columns:
                    data_path = pd.read_excel(path, sheet_name=i, converters={"身份证号": str, "姓名": str})
                    data_path.rename(columns={'姓名': "name", '身份证号': "user_id", '录取专业名称': "pro_name"}, inplace=True)
                    data_path.drop(index=data_path[data_path['name'].isnull()].index.tolist(), axis=0, inplace=True)
                    part_data = data_path.loc[:, ["name", "user_id", "pro_name"]]
                    data = pd.concat([data, part_data], ignore_index=True, axis=0)
                else:
                    data_path = pd.read_excel(path, sheet_name=i, converters={"姓名": str})
                    data_path['user_id'] = " "
                    data_path.rename(columns={'姓名': "name", '录取专业名称': "pro_name"}, inplace=True)
                    data_path.drop(index=data_path[data_path['name'].isnull()].index.tolist(), axis=0, inplace=True)
                    part_data = data_path.loc[:, ["name", "user_id", "pro_name"]]
                    data = pd.concat([data, part_data], ignore_index=True, axis=0)

            elif '专业' in data_path.columns:
                if '身份证号' in data_path.columns:
                    data_path = pd.read_excel(path, sheet_name=i, converters={"身份证号": str, "姓名": str})
                    data_path.rename(columns={'姓名': "name", '身份证号': "user_id", '专业': "pro_name"}, inplace=True)
                    data_path.drop(index=data_path[data_path['name'].isnull()].index.tolist(), axis=0, inplace=True)
                    part_data = data_path.loc[:, ["name", "user_id", "pro_name"]]
                    data = pd.concat([data, part_data], ignore_index=True, axis=0)
                else:
                    data_path = pd.read_excel(path, sheet_name=i, converters={"姓名": str})
                    data_path['user_id'] = " "
                    data_path.rename(columns={'姓名': "name", '专业': "pro_name"}, inplace=True)
                    data_path.drop(index=data_path[data_path['name'].isnull()].index.tolist(), axis=0, inplace=True)
                    part_data = data_path.loc[:, ["name", "user_id", "pro_name"]]

                    # part_data.dropna(axis=0,inplace=True)

                    data = pd.concat([data, part_data], ignore_index=True, axis=0)
        return data


    def get_not_normal_data(self, path):
        """
        获取每个总表的有效信息，并重新命名字段
        :param path: 总表路径
        :return: 该分表有效字段的所有信息 DataFrame
        """
        # 读取excel中的所有表
        sheet_names = list(pd.read_excel(path, sheet_name=None))
        data = pd.DataFrame(columns={"name", "user_id", "pro_name"})
        for i in sheet_names:
            # 异常数理
            try:
                data_sheet = pd.read_excel(path, sheet_name=i, skiprows=[0, 1], header=0, index_col=0,
                                           converters={"身份证号": str})
                data_sheet.rename(columns={'身份证号': "user_id", '姓名': "name", '专业': "pro_name"}, inplace=True)
                data_sheet['user_id'] = data_sheet['user_id']
                data_sheet.dropna(subset=['name'], axis=0, inplace=True)
            except:
                data_sheet = pd.read_excel(path, sheet_name=i, skiprows=[0, 1], header=0, index_col=0,
                                           converters={"身份证号": str})
                data_sheet.rename(columns={'姓名': "name", '专业': "pro_name"}, inplace=True)
                data_sheet["user_id"] = np.nan

            data_part_sheet = data_sheet.loc[:, ['user_id', 'name', 'pro_name']]

            data_part_sheet.dropna(axis=0, inplace=True)
            # 将表中的数据进行拼接
            data = pd.concat([data, data_part_sheet], ignore_index=True, axis=0)
        return data


    def check_data(self,path):
        """
        检查逻辑  ===============================读取分表时由于格式种类多样产生读取bug====================================
        :param path: 分表所在文件夹路径
        :return: 分表中包含有效字段的所有记录
        """
        # 获取待检测的数据
        try:
            data = pd.read_excel(path, converters={"身份证号": str})
        except:
            data = pd.read_excel(path)
        if "录取（系科）专业" in data.columns or "录取专业名称" in data.columns or "所学专业" in data.columns or '进修专业名称' in data.columns:
            # 将空值进行删除
            data.dropna(axis=1, inplace=True)
            if "出生日期" in data.columns:
                data.drop(columns=['出生日期'], axis=1, inplace=True)
                data.rename(columns={"姓名": "name"}, inplace=True)
                data['user_id'] = " "
            elif "身份证号" in data.columns:
                data.rename(columns={"姓名": "name", "身份证号": "user_id"}, inplace=True)
            elif "身份证" in data.columns:
                data.rename(columns={"姓名": "name", "身份证": "user_id"}, inplace=True)
            else:
                data.rename(columns={"姓名": "name", "身份": "user_id"}, inplace=True)
            if '录取（系科）专业' in data.columns:
                data.rename(columns={'录取（系科）专业': "pro_name"}, inplace=True)
            elif '录取专业名称' in data.columns:
                data.rename(columns={'录取专业名称': "pro_name"}, inplace=True)
            elif "所学专业" in data.columns:
                data.rename(columns={'所学专业': "pro_name"}, inplace=True)
            else:
                data.rename(columns={'进修专业名称': "pro_name"}, inplace=True)

        elif "毕业专业" in data.columns:
            data.dropna(axis=1, inplace=True)
            if "出生日期" in data.columns:
                data.drop(columns=['出生日期'], axis=1, inplace=True)
                data.rename(columns={"姓名": "name"}, inplace=True)
                data['user_id'] = " "
            elif "身份证号" in data.columns:
                data.rename(columns={"姓名": "name", "身份证号": "user_id"}, inplace=True)
            elif "身份证" in data.columns:
                data.rename(columns={"姓名": "name", "身份证": "user_id"}, inplace=True)
            else:
                data.rename(columns={"姓名": "name", "身份": "user_id"}, inplace=True)
            data.drop(columns=['毕业专业'], axis=1, inplace=True)
            data['pro_name'] = np.NaN
        else:
            # 判断第一行是专业的情况
            if "姓名" in data.columns:
                for col in data.columns:
                    if data[col].isnull().all():
                        data.drop(columns=col, axis=1, inplace=True)
                # 判断是否存在身份证号这一列
                if "身份证号" in data.columns:
                    data.rename(columns={"姓名": "name", "身份证号": "user_id"}, inplace=True)
                elif "身份证" in data.columns:
                    data.rename(columns={"姓名": "name", "身份证": "user_id"}, inplace=True)
                elif "身份" in data.columns:
                    data.rename(columns={"姓名": "name", "身份": "user_id"}, inplace=True)
                else:
                    data.rename(columns={"姓名": "name"}, inplace=True)
                    data['user_id'] = " "
                # 获取数据全部为NAN的index，并进行删除
                data.drop(index=data[pd.isna(data['name']) == True].index.tolist(), axis=0, inplace=True)
                # 获取最后一列的专业并进行删除
                pro = data.iloc[-1, 0]
                data.drop(index=data.tail(1).index.tolist(), axis=0, inplace=True)
                # 将专业进行设置
                data['pro_name'] = pro[5:]
            else:
                pro_name = data.columns[0][5:]
                # 重读data
                try:
                    data = pd.read_excel(path, skiprows=[0, 1], header=0, converters={"身份证号": str})
                except:
                    data = pd.read_excel(path)
                # 去除空列
                data.dropna(axis=1, inplace=True)
                if "出生日期" in data.columns:
                    data.drop(columns=['出生日期'], axis=1, inplace=True)
                    data.rename(columns={"姓名": "name"}, inplace=True)
                    data['user_id'] = " "
                elif "身份证号" in data.columns:
                    data.rename(columns={"姓名": "name", "身份证号": "user_id"}, inplace=True)
                elif "身份证" in data.columns:
                    data.rename(columns={"姓名": "name", "身份证": "user_id"}, inplace=True)
                else:
                    data.rename(columns={"姓名": "name", "身份": "user_id"}, inplace=True)
                data['pro_name'] = pro_name
        data['name'] = data['name'].apply(lambda x: x.replace(" ", ""))
        data['user_id'] = data['user_id'].astype(str).apply(lambda x: x.replace(" ", ""))
        data['user_id'] = data['user_id'].apply(lambda x: x.upper())
        try:
            data['pro_name'] = data['pro_name'].apply(lambda x: x.replace(" ", ""))
        except:
            pass
        return data

    def checkout_error(self, full_data, check_data):
        # 根据身份证号获取
        # 首先确定user_id不是为空值的
        """
        错误信息匹配
        :param full_data: 总表信息
        :param check_data: 分表信息
        :return:
        """
        # 根据身份证号获取
        # 首先确定user_id不是为空值的
        check_data['user_id'] = check_data['user_id'].astype(str).apply(
            lambda x: x.replace(" ", "") if len(x) >= 10 else np.NaN)
        flag = check_data['user_id'].isnull().any()
        pro_flag = check_data['pro_name'].isnull().any()
        # 身份证号是否存在
        if flag == False:
            # 判断是否有录取专业
            if pro_flag == False:
                # 获取user id
                user_id_list = check_data['user_id'].tolist()
                # 提取出full_dataa中的相关数据并将对应位置上的数据删除
                full_data_part = full_data[full_data['user_id'].isin(user_id_list)].copy()
                full_data.drop(index=full_data.loc[full_data['user_id'].isin(user_id_list), :].index.tolist(), axis=0,
                               inplace=True)
                # 进行检查
                # 合并去重
                full_data_part['flag'] = '总表数据'

                check_data['flag'] = '检测表内的数据'

                check_part_data = pd.concat([check_data, full_data_part], axis=0, ignore_index=True)

                check_part_data.drop_duplicates(subset=['name', 'pro_name', 'user_id'], keep=False, inplace=True)

            else:
                user_id_list = check_data['user_id'].tolist()
                # 提取出full_dataa中的相关数据并将对应位置上的数据删除
                full_data_part = full_data[full_data['user_id'].isin(user_id_list)].copy()
                full_data.drop(index=full_data.loc[full_data['user_id'].isin(user_id_list), :].index.tolist(), axis=0,
                               inplace=True)
                # 进行检查
                # 合并去重
                full_data_part['flag'] = '总表数据'

                check_data['flag'] = '检测表内的数据'

                check_part_data = pd.concat([check_data, full_data_part], axis=0, ignore_index=True)

                check_part_data.drop_duplicates(subset=['name', 'user_id'], keep=False, inplace=True)
        else:
            # 通过姓名来获取数据
            names_list = check_data['name'].tolist()
            pro_name = check_data['pro_name'].tolist()
            # 获取总表中有names_list的所有数据
            full_part_data_name = full_data[full_data['name'].isin(names_list)].copy()
            full_part_data_name['flag'] = '总表数据'
            check_data['flag'] = '检测表内的数据'
            # 根据专业再次进行筛选
            # full_part_data_name_pro=full_part_data_name[full_part_data_name['pro_name'].isin(pro_name)]
            # 进行拼接
            check_part_data = pd.concat([check_data, full_part_data_name], axis=0, ignore_index=True)
            check_part_data.drop_duplicates(subset=['name', 'pro_name'], keep=False,
                                            inplace=True)  # -----------学生没有身份证，将身份证进行填充
        return check_part_data


    def save_error_data(self, path, data):
        """
        保存结果
        :param path: 结果保存路径
        :param data: 匹配结果数据
        """
        if data.empty:
            pass
        else:
            data = data.sort_values(by=['name', 'pro_name'])
            data_shape = data.loc[data['flag'] == '总表数据', :].shape[0]
            if data.shape[0] == data_shape:
                pass
            else:
                data.rename(columns={"name": "姓名", "user_id": "身份证", "pro_name": "专业"}, inplace=True)
                for col in data.columns:
                    if data.loc[:, col].isnull().all():
                        data.drop(columns=col, inplace=True)
                data.to_excel(path, index=None)

    def run(self):
        """
        执行程序
        """
        def get_all_excel(rootdir):
            """
            获取某文件夹底下所有Excel表格的路径
            :param rootdir: 文件夹路径
            :return: Excel表格路径列表
            """
            excel_path_list = []
            for root, dirs, files in os.walk(rootdir):
                for file in files:
                    if file.endswith(".xlsx"):
                        file_path = os.path.join(root, file)
                        excel_path_list.append(file_path)
            return excel_path_list

        all_excel_path = self.allInformationPath # 总表路径
        part_excel_dir = self.infoTablesDir # 分表路径
        if not os.path.exists(self.resultPath):
            os.mkdir(self.resultPath)

        part_excel_path = get_all_excel(part_excel_dir)
        full_data = self.get_all_info(all_excel_path)  # 获取总表数据
        for excel_file_path in tqdm(part_excel_path):
            try:
                save_excel_path = os.path.join(self.resultPath,os.path.basename(excel_file_path).split('.')[0]+'_ERROR.xlsx') # 结果保存路径
                orc_data = self.check_data(excel_file_path) # 获取分表数据
                check_error = self.checkout_error(full_data, orc_data) # 检测错误数据
                self.save_error_data(save_excel_path, check_error) # 保存结果
            except Exception as e:
                with open(os.path.join(self.resultPath,"error.log"),'a+',encoding="utf-8") as tf:
                    tf.write(str(e)+"\t"+excel_file_path+"\n")


