import copy
import re
import logging
from tqdm import tqdm
from service.catboost_model.data_outlier_process import AbnormalDataFilter
from typing import Dict, List, Tuple

import pandas as pd
from pymongo import MongoClient


class DataProcessing:
    def __init__(self, mongo_url: str, database_name: str, tenant_id_list: list):
        """
        :param mongo_url:
        :param database_name:
        :param tenant_id_list:
        :param replace_dict:
        """
        self.mongo_uri = mongo_url
        self.database_name = database_name
        self.data = pd.DataFrame()
        self.tenant_id_list = tenant_id_list
        self.client = self.connect_mongodb()
        self.db = self.client[self.database_name]
        self.pattern = re.compile(r'\([^)]*\)')

    # ================================== mongodb 数据库 ==================================
    def connect_mongodb(self):
        """
        连接mongodb数据库
        """
        client = MongoClient(self.mongo_uri)
        return client

    def close_mongodb(self):
        """
        关闭连接
        """
        self.client.close()

    def get_dictionary_map(self, dictionary_name: str) -> Dict | None:
        """
        从 MongoDB 中获取字典映射
        :param dictionary_name:
        """
        collection = self.db[dictionary_name]
        cursor = collection.find({}, {"_id": 0, "code": 1, "name": 1})
        data = pd.DataFrame(list(cursor))
        if data.empty:
            return None
        else:
            collect_key = data.set_index('code').to_dict()['name']
            for item in collect_key:
                collect_key[item] = self.pattern.sub('', collect_key[item])
                if collect_key[item] == "SO₃":
                    collect_key[item] = "SO3"
                if collect_key[item] == "烧失量":
                    collect_key[item] = "Loss"
                if collect_key[item] == "比表面积":
                    collect_key[item] = "比表"
            return collect_key

    def get_master_data(self, collection_name: str) -> List | None:
        """
        获取对应工厂下的主数据
        """
        collection = self.db[collection_name]
        cursor = collection.find({}, {"_id": 0})
        result = list(cursor)
        if len(result) == 0:
            return None
        else:
            return result

    def deal_dcs_feedback_df(self, df: pd.Series):
        """
        处理DCS反馈数据和搭配比例
        :param df:
        :return:
        """
        expanded_df = pd.json_normalize(df['feedbackIngredient'])
        if df['admixtureRatio'] != {}:
            all_keys = list(df['admixtureRatio'].keys())
            exist_keys = list(df['feedbackIngredient'].keys())
            not_exist_keys = list(set(all_keys) - set(exist_keys))
            for not_item in not_exist_keys:
                del df['admixtureRatio'][not_item]
            all_keys = list(df['admixtureRatio'].keys())
            expanded_df = expanded_df.drop(columns=all_keys)
            for key, value in df['admixtureRatio'].items():
                sum_num = df['feedbackIngredient'][key]
                deno = sum(value.values())
                for k, v in value.items():
                    if k in expanded_df.columns:
                        expanded_df[k] += sum_num * v / deno
                    else:
                        expanded_df[k] = sum_num * v / deno
        if df['gypRatio'] != {} and "石膏" in list(df['feedbackIngredient'].keys()):
            expanded_df = expanded_df.drop(columns=['石膏'])
            sum_num = df['feedbackIngredient']['石膏']
            deno = sum(df['gypRatio'].values())
            for k, v in df['gypRatio'].items():
                if k in expanded_df.columns:
                    expanded_df[k] += sum_num * v / deno
                else:
                    expanded_df[k] = sum_num * v / deno
        return expanded_df.iloc[0]

    def deal_data_for_model(self, data_df: pd.DataFrame, tenant_id: str) -> pd.DataFrame:
        """
        将数据处理成模型训练的格式
        :param data_df:
        :param tenant_id:
        """
        new_df = pd.DataFrame()
        # 处理DCS反馈数据
        expanded_df = data_df.apply(self.deal_dcs_feedback_df, axis=1)
        # expanded_df = self.deal_dcs_feedback_df(data_df.loc[827])
        expanded_df = expanded_df.add_prefix(f"DCS反馈配比平均值-")
        new_df = pd.concat([new_df, expanded_df], axis=1)
        # 处理过程质量数据
        expanded_df = data_df['processQuality'].apply(pd.Series)
        expanded_df = expanded_df.add_prefix(f"过程质量平均值-")
        new_df = pd.concat([new_df, expanded_df], axis=1)
        # 处理化学分析数据
        expanded_df = data_df['materialAnalysis'].apply(pd.Series)
        origin_name_list = expanded_df.columns.tolist()
        for item in origin_name_list:
            expanded_df_1 = expanded_df[item].apply(pd.Series)
            expanded_df_1.dropna(axis=1, how='all', inplace=True)
            expanded_df_1 = expanded_df_1.add_prefix(f"{item}_new_")
            expanded_df = pd.concat([expanded_df, expanded_df_1], axis=1)
        expanded_df.drop(columns=origin_name_list, errors='ignore', inplace=True)
        new_df = pd.concat([new_df, expanded_df], axis=1)
        new_df = new_df.loc[:, [col for col in new_df.columns if "水分" not in col]]
        new_df = new_df.loc[:, [col for col in new_df.columns if "Loss" not in col]]
        try:
            new_df = pd.concat([new_df, data_df[
                ["productCode", "millCode", "productionDate", "clinkerStrengthPredictionActual3d",
                 "clinkerStrengthPredictionActual28d", "checkStrength1d", "checkStrength3d", "checkStrength28d"]]],
                               axis=1)
            new_df.rename(columns={
                "productCode": "品种",
                "millCode": "磨号",
                "productionDate": "时间",
                "checkStrength1d": '水泥1天实测值',
                "checkStrength3d": '水泥3天实测值',
                "checkStrength28d": '水泥28天实测值',
                "clinkerStrengthPredictionActual3d": "熟料3天强度预测",
                "clinkerStrengthPredictionActual28d": "熟料28天强度预测",
            }, inplace=True)
            new_df['tenant_id'] = tenant_id
            return new_df
        except Exception as e:
            print("错误原因：{}，错误工厂：{}".format(e, tenant_id))


    def read_from_mongo_and_deal_data(self) -> pd.DataFrame:
        """
        获取每个工厂的数据和编码字典，在该过程中进行数据的处理
        """
        for tenant_id in tqdm(self.tenant_id_list):
            if tenant_id[0] == 134 or tenant_id[0] == 214:
                continue
            collection_name = "QUALITY_INSPECTION_AVERAGE_{}".format(str(tenant_id[0]))
            dictionary_name = "DICTIONARY_{}".format(str(tenant_id[0]))
            factory_dictionary = self.get_dictionary_map(dictionary_name)
            if factory_dictionary is None:
                logging.warning("==注意：{}映射表下的内容为空，已经跳过该工厂的数据==".format(tenant_id))
                continue
            factory_data = self.get_master_data(collection_name)
            if factory_data is None:
                logging.warning("==注意：{}该表中的数据为空，已经跳过该工厂的数据==".format(tenant_id))
                continue
            # 数据处理
            factory_data_list = self.transform_ci_to_nlp_and_convert_str_to_int_in_train(factory_data,
                                                                                         factory_dictionary)
            # 数据过滤
            factory_df = pd.DataFrame(factory_data_list)
            factory_df = self.data_filtering(factory_df)

            # 将数据处理成模型训练的格式
            factory_df = self.deal_data_for_model(factory_df, tenant_id[0])
            factory_df_group = factory_df.groupby(by=['品种'])
            delete_abnormal = AbnormalDataFilter()
            new_file_data = []
            for item, group_data in factory_df_group:
                filter_columns = ["DCS反馈配比平均值-熟料", "过程质量平均值-比表", "过程质量平均值-SO3"]
                filter_data = delete_abnormal.filter_outliners_by_boxplot(group_data, columns=filter_columns)
                new_file_data.append(filter_data)
            new_file_data = pd.concat(new_file_data)
            self.data = pd.concat([self.data, new_file_data], ignore_index=True)
        return self.data

    def transform_ci_to_nl(self, dictionary, name_dict):
        """
        主数据格式转换
        """
        result_dict = {}
        for key, value in dictionary.items():
            new_key = name_dict.get(key, key) if isinstance(key, str) and key.startswith("CI") else key
            new_value = name_dict.get(value, value) if isinstance(value, str) and value.startswith("CI") else value

            if isinstance(new_value, dict):
                # 递归调用以处理嵌套的字典
                result_dict[new_key] = self.transform_ci_to_nl(new_value, name_dict)
            elif isinstance(new_value, list):
                result_dict[new_key] = [name_dict[item] for item in new_value]
            else:
                result_dict[new_key] = new_value
        return result_dict

    def convert_str_to_float(self, dictionary):
        """
        将字典中的所有嵌套值转换为float
        """
        result_dict = {}
        for key, value in dictionary.items():
            if isinstance(value, dict):
                result_dict[key] = self.convert_str_to_float(value)
            elif isinstance(value, str) and value.replace('.', '', 1).lstrip('-').isdigit():
                result_dict[key] = float(value)
            else:
                # 其他情况保持原样
                result_dict[key] = value

        return result_dict

    def transform_ci_to_nlp_and_convert_str_to_int_in_train(self, factory_data: list, dictionary_dict: dict) -> List:
        """
            将数据中的 CI 编码转换为名称，并将字符串数值转换为浮点数。
            :param factory_data: 工厂主数据
            :param dictionary_dict: 对应工厂的编码映射中文
        """
        new_data = []
        for data_dict in factory_data:
            new_data_dict = self.transform_ci_to_nl(data_dict, dictionary_dict)
            new_data_dict = self.convert_str_to_float(new_data_dict)
            new_data.append(new_data_dict)
        return new_data

    def data_integrity(self, data: pd.Series) -> bool:
        """
        判断数据的完整性以及检查数据是否有效
        1. 检查 feedbackIngredient, materialAnalysis, gypRatio 的键是否一致，
        2. 检查 processQuality 的列是否完整
        3. 检查石灰石的 Loss 是否有效
        4. 检查熟料掺比是否在有效范围内
        """
        # 1. 检查 feedbackIngredient, materialAnalysis, gypRatio 的键是否一致
        feedback_ingredient_set = set(data["feedbackIngredient"].keys())
        material_analysis_set = set(data["materialAnalysis"].keys())
        admixture_ratio_set = set(data['admixtureRatio'].keys())
        gyp_ratio_set = set(data["gypRatio"].keys())
        material_list_set = set(data["materialList"])
        if len(gyp_ratio_set) == 0:
            gyp_name = []
        else:
            gyp_name = ['石膏']
        if (admixture_ratio_set | set(gyp_name)).issubset(feedback_ingredient_set) is False:
            return False

        material_analysis_merge_set = material_analysis_set & (gyp_ratio_set | feedback_ingredient_set)

        # 2. 检查 processQuality 的列是否完整
        process_quality_set = set(data["processQuality"].keys())
        required_columns_1 = {'细度45μm', 'Loss', 'CaO', '比表', 'SO3'}
        required_columns_2 = {'细度45μm', 'Loss', '混合材总掺量', '石灰石掺量', '比表', 'SO3'}

        if "CaO" in process_quality_set:
            flag1 = required_columns_1.issubset(process_quality_set)
        else:
            flag1 = required_columns_2.issubset(process_quality_set)

        # 3. 检查石灰石的 Loss 是否有效
        flag2 = (data["materialAnalysis"].get('石灰石', {}).get('Loss') is not None) and \
                (data["materialAnalysis"].get('石灰石', {}).get('Loss') != 0)

        # 4. 检查熟料掺比是否在有效范围内
        flag3 = 30 < data["feedbackIngredient"].get("熟料", 0) < 90

        return flag1 and flag2 and flag3

    def data_filtering(self, factory_data_df: pd.DataFrame) -> pd.DataFrame:
        """
        数据过滤。
        :param factory_data_df: 工厂数据
        """
        # 1. 过滤 deleted 为 False 的数据
        filtered_data = factory_data_df[factory_data_df.get("deleted") == False]

        # 2. 过滤关键字段为空或无效的数据
        filtered_data = filtered_data[
            filtered_data["feedbackIngredient"].notna() & (filtered_data["feedbackIngredient"] != {}) &
            filtered_data["materialAnalysis"].notna() & (filtered_data["materialAnalysis"] != {}) &
            filtered_data["processQuality"].notna() & (filtered_data["processQuality"] != {}) &
            filtered_data["clinkerStrengthPrediction3d"].notna() &
            filtered_data["clinkerStrengthPrediction28d"].notna() &
            (filtered_data["clinkerStrengthPrediction3d"] > 0) &
            (filtered_data["clinkerStrengthPrediction28d"] > 0)
            ]
        # 3. 检查数据完整性并过滤
        filtered_data = filtered_data[filtered_data.apply(self.data_integrity, axis=1)]
        return filtered_data


if __name__ == '__main__':
    tenant_id_list = [100]
    data_aggregator = DataProcessing(
        mongo_url="mongodb://admin:Admin123qaz@10.50.0.63:27017/admin?authMechanism=SCRAM-SHA-1",
        database_name="cement_ingredient", tenant_id_list=tenant_id_list)
    all_data_df = data_aggregator.read_from_mongo_and_deal_data()
