from .process_data import Process_data
from datetime import datetime
from sklearn.metrics import mean_absolute_error, r2_score
import warnings
from django.conf import settings
from .config import pickle_file_path
import copy
from .log import train_log_config
from . import config
from django.conf import settings
import json
import pandas as pd
from scipy.optimize import curve_fit
from .cement_batching import calc_balance_dry_from_feedback_dry, wet_to_dry, transform_ci_to_nl, \
    convert_str_to_float, integrate_gypsum_into_chemical
import numpy as np
import pickle
import os
import pymongo
from .update20240529 import MyDataIntegrityOrActivities

DataIntegrityOrActivities = MyDataIntegrityOrActivities()
LOGGER = train_log_config()

# 关闭所有警告
warnings.simplefilter("ignore")


def get_db_handle():
    client = pymongo.MongoClient(settings.default_mongo_uri)
    db_handle = client[settings.database_name]  # 获取数据库句柄
    return db_handle


def model_function(x, *args):
    coefficients = args[:-1]
    intercept = args[-1]
    if config.isIntercept:
        res = sum(a * xi for a, xi in zip(coefficients, x)) + intercept
    else:
        res = sum(a * xi for a, xi in zip(coefficients, x))
    return res


def accuracy(y_true, y_pred, acc_threshold):
    correct = sum(1 for true, pred in zip(y_true, y_pred) if abs(true - pred) <= acc_threshold)
    return correct / len(y_true)


def predict(x, y, params_bounds_dict, acc_threshold):
    param_bounds = ([params_bounds_dict[k][0] for k in params_bounds_dict.keys()],
                    [params_bounds_dict[k][1] for k in params_bounds_dict.keys()])
    x = np.array(x)
    x = x.T
    initial_guess = param_bounds[0]
    params, covariance = curve_fit(model_function, x, y, bounds=param_bounds, p0=initial_guess, maxfev=50000)
    y_pred = model_function(x, *params)
    mae = mean_absolute_error(y, y_pred)
    r2 = r2_score(y, y_pred)
    acc = accuracy(y, y_pred, acc_threshold)

    params = np.around(params, 5)
    params_dict = {k: v for k, v in zip(params_bounds_dict.keys(), params)}
    return y_pred, mae, r2, acc, params_dict


def transform_ci_to_nlp_and_convert_str_to_int_in_train(data, master_data):
    new_data = []
    for data_dict in data:
        new_data_dict = transform_ci_to_nl(data_dict, master_data)
        new_data_dict = convert_str_to_float(new_data_dict)
        # print(new_data_dict)
        new_data.append(new_data_dict)
    return new_data


def save_coef(model_type, model_code, params_3d, params_28d):
    # 3. 保存模型到models文件夹
    # 模型参数改造 将变量的存储方式pickle
    # if not os.path.exists('models'):
    #     os.makedirs('models')    # 4. 输出模型训练信息
    # pickle_file_path = "./models/model_coefficients.pkl"
    params = {
        "3d": params_3d,
        "28d": params_28d
    }

    # 创建子文件夹
    if not os.path.exists(pickle_file_path):
        os.makedirs(pickle_file_path)
    full_path = os.path.join(pickle_file_path, model_code + '_{}.pkl'.format(settings.linear_suffix[int(model_type)]))
    # 保存模型系数到pickle文件
    with open(full_path, "wb") as pickle_file:
        pickle.dump(params, pickle_file)
    print(f"模型系数已成功保存到文件: {pickle_file_path}")


def integrate_gypsum_into_material_list(materialList, gyp_ratio):
    gyp_list = list(gyp_ratio.keys())
    # print(materialList, gyp_list)
    for i in gyp_list:
        materialList.remove(i)
    materialList.append("石膏")


def data_reduction(model_type, is_28d_strength, filtered_data, clinker_strength_name, cement_strength_name):
    x, y, date = [], [], []
    filtered_data = copy.deepcopy(filtered_data.to_dict(orient='records'))

    for data_dict in filtered_data:
        # 加入混合材搭配比例
        feedback_wet = data_dict["feedbackIngredient"]
        materials_chemical_dict = data_dict["materialAnalysis"]
        gyp_ratio = data_dict["gypRatio"]

        # 抽象石膏处理
        abs_gyp_flag = True if "石膏" in feedback_wet.keys() else False
        if abs_gyp_flag:
            integrate_gypsum_into_chemical(materials_chemical_dict, gyp_ratio)

        # 整合数据
        materials_chemical_dict = {material: {key: value / 100 for key, value in properties.items()} for
                                   material, properties in materials_chemical_dict.items()}
        feedback_dry = wet_to_dry(feedback_wet, materials_chemical_dict)
        # 这里考虑熟料是用预测值，使用均化后的值
        clinker_strength = data_dict[clinker_strength_name]
        cement_strength = data_dict[cement_strength_name]
        if model_type == 1:
            cement_strength_1d = data_dict["checkStrength1d"]
        if model_type == 2 and is_28d_strength:
            cement_strength_3d = data_dict["checkStrength3d"]  # 增加3天实测值
        process_quality_dict = data_dict["processQuality"]
        material_list = data_dict["materialList"]

        integrate_gypsum_into_material_list(material_list, gyp_ratio)

        material_name_in_train = list(set(material_list) - {"熟料", "石膏"})
        balance_dry = calc_balance_dry_from_feedback_dry(feedback_dry, process_quality_dict, materials_chemical_dict)
        balance_dry = {k: v / 100 for k, v in balance_dry.items()}
        balance_dry.pop("熟料")
        balance_dry.pop("石膏", "default")

        material = copy.deepcopy(balance_dry)
        material = {k: v * clinker_strength for k, v in material.items()}  # 这里要×熟料强度
        material["细度45μm"] = process_quality_dict["细度45μm"]
        material["比表"] = process_quality_dict["比表"]
        material["SO3"] = process_quality_dict["SO3"]
        if model_type == 1:
            material["1d"] = cement_strength_1d
        if model_type == 2 and is_28d_strength:
            material["3d"] = cement_strength_3d
        # 整合3天强度数据
        single_x = material
        single_y = cement_strength - clinker_strength
        x.append(single_x)
        y.append(single_y)
        date.append(data_dict["productionDate"])

    if model_type == 2 and is_28d_strength:
        processquality_name_in_train = ["细度45μm", "比表", "SO3", "3d"]
    elif model_type == 1:
        processquality_name_in_train = ["细度45μm", "比表", "SO3", "1d"]
    else:
        processquality_name_in_train = ["细度45μm", "比表", "SO3"]
    df_x = pd.DataFrame(x)
    col = df_x.columns
    material_name_in_train = [item for item in col if item not in processquality_name_in_train]
    col = material_name_in_train + processquality_name_in_train
    df_x = df_x[col]
    new_x = df_x.values.tolist()

    return new_x, y, date, material_name_in_train, processquality_name_in_train, materials_chemical_dict


def monggo_get_data(db, mill_code_list, product_code_list, whitelist_date_list=None):
    # whitelist_date_list 的
    if whitelist_date_list is None:
        whitelist_date_list = [[datetime(2022, 6, 1), datetime(2030, 1, 1)]]
    # 选择集合
    collection = db[settings.self_learning_collection_name]

    # 查询集合中的所有文档
    cursor = collection.find({}, {"_id": 0})
    # 获取文档的字段数量
    data = list(cursor)

    # 构建查询条件，确保每个字段都包含在结果中
    data = transform_ci_to_nlp_and_convert_str_to_int_in_train(data, config.master_data)
    process_data = Process_data()
    data = process_data.process_adm_ratio(data)
    data = process_data.process_data(data)

    LOGGER.info(f"自学习表数据总共有{len(data)}条")

    filtered_data = []
    for item in data:
        # print(item["productionDate"])
        # 检查 'millCode' 是否在 mill_code_list 中
        if item.get('millCode') in mill_code_list:
            # 检查 'productCode' 是否在 product_code_list 中
            if item.get('productCode') in product_code_list:
                # 检查 'productionDate' 是否在任何一个 whitelist_date_list 中
                if any(start_date <= item.get('productionDate') <= end_date for start_date, end_date in
                       whitelist_date_list):
                    # 如果满足所有条件，则将 item 添加到 filtered_data 中
                    filtered_data.append(item)

    LOGGER.info(
        f"按照磨号{mill_code_list}产品号{product_code_list}时间范围{whitelist_date_list}筛选，剩余{len(filtered_data)}")

    filtered_data_T = []
    # 按照 deleted 字段进行删除
    for item in filtered_data:
        if item.get("deleted") == False:
            filtered_data_T.append(item)
    filtered_data = filtered_data_T
    LOGGER.info(f"deleted筛选，剩余{len(filtered_data)}")

    return filtered_data


def get_x_and_y(model_type, filtered_data):
    filtered_data = pd.DataFrame(filtered_data)
    filtered_data = DataIntegrityOrActivities.data_filter(filtered_data)
    if model_type == 1:
        filtered_data = filtered_data[filtered_data["checkStrength1d"].notna()]
    LOGGER.info(f"经过这个函数筛选后还剩DataIntegrityOrActivities.data_filter还剩： {len(filtered_data)}")
    # 整合3天数据
    filtered_data_3d = filtered_data[filtered_data["checkStrength3d"].notna()]

    LOGGER.info(f"3天的数据有{len(filtered_data_3d)}")
    new_x_3d, y_3d, date_3d, material_name_in_train, processquality_name_in_train_3d, materials_chemical_dict = data_reduction(
        model_type, False, filtered_data_3d, "clinkerStrengthPrediction3d", "checkStrength3d")

    # 整合28天数据
    filtered_data_28d = filtered_data[filtered_data["checkStrength28d"].notna()]
    if model_type == 2:
        filtered_data_28d = filtered_data_28d[filtered_data_28d["checkStrength3d"].notna()]
    LOGGER.info(f"28d的数据有{len(filtered_data_28d)}")
    new_x_28d, y_28d, date_28d, material_name_in_train, processquality_name_in_train_28d, materials_chemical_dict = data_reduction(
        model_type, True, filtered_data_28d, "clinkerStrengthPrediction28d", "checkStrength28d")
    processquality_name_in_train = [processquality_name_in_train_3d, processquality_name_in_train_28d]
    return new_x_3d, y_3d, date_3d, new_x_28d, y_28d, date_28d, material_name_in_train, processquality_name_in_train, materials_chemical_dict


def find_best(df, material_type_in_train):
    from scipy.stats import pearsonr
    from sklearn.preprocessing import MinMaxScaler
    def pearson(x, y):
        x_numeric = np.array(x, dtype=float)
        y_numeric = np.array(y, dtype=float)
        result = pearsonr(x_numeric, y_numeric)[0] if (np.std(x) != 0) else 0
        return result

    if config.is_material_activities_from_general:
        activities_table = DataIntegrityOrActivities.get_activities(material_type_in_train)

    activity = [activities_table[material] for material in material_type_in_train]

    # 将 material_type_in_train 列的数据转换为数值类型
    for material in material_type_in_train:
        df[material] = df[material].astype(float)

    df['pearson'] = df.apply(lambda x: pearson(x[material_type_in_train], activity), axis=1)
    df['综合指标'] = np.power(df['pearson'] + 2, 1 / 6) * (df['r2'] + 1) * (np.power(df['acc'], 2)) / df['mae']
    scaler = MinMaxScaler()
    df['综合指标'] = scaler.fit_transform(df[['综合指标']])
    df = df.sort_values(by=['综合指标', 'mae'], ascending=[False, True])
    all_params = df.iloc[0].to_dict()
    coef_enum_list = df.to_dict(orient='records')
    return coef_enum_list, all_params


def train(model_type, x, y, is_28d_strength, material_type_in_train=["石灰石", "水渣", "建筑垃圾"],
          index_type_in_train=["细度45μm", "比表", "SO3"], acc_threshold=1.2):
    # 根据物料生成数据 ： 细度、比表、SO3   -->    找石灰石、水渣、建筑垃圾
    fineness_ub_list = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
    SO3_lb_list = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
    res_list = []
    strength_1d_ub_list = [100, 250, 500, 750, 1000, 1250, 1500, 1750]
    for fineness_ub in fineness_ub_list:
        for SO3_lb in SO3_lb_list:
            for strength_1d_ub in strength_1d_ub_list:
                params_bounds_dict = {material: [-1.5, -0.005] for material in material_type_in_train}
                if model_type == 1:
                    material_bounds = {
                        "细度45μm": [-2, -fineness_ub / 100],
                        "比表": [0.00001, 0.005],
                        "SO3": [-SO3_lb / 10, 1],
                        "1d": [-strength_1d_ub / 10000, 1.5],
                    }
                else:
                    if is_28d_strength and model_type == 2:
                        material_bounds = {
                            "细度45μm": [-2, -fineness_ub / 100],
                            "比表": [0.00001, 0.005],
                            "SO3": [-SO3_lb / 10, 1],
                            "3d": [-strength_1d_ub / 10000, 1.5],
                        }
                    else:
                        material_bounds = {
                            "细度45μm": [-2, -fineness_ub / 100],
                            "比表": [0.00001, 0.005],
                            "SO3": [-SO3_lb / 10, 1],
                        }

                params_bounds_dict.update(material_bounds)
                intercept = {'截距': [-0.000001, 0.000001]} if (config.isIntercept) else {'截距': [-1e-6, 1e-6]}

                params_bounds_dict.update(intercept)

                y_pre, mae, r2, acc, params = predict(x, y, params_bounds_dict, acc_threshold)
                single_dict = {"mae": mae, "r2": r2, "acc": acc, "y_pre": y_pre.tolist(), "y_true": y}
                single_dict.update(params)
                res_list.append(single_dict)
    df = pd.DataFrame(res_list)

    # # 2. 根据逻辑进行筛选
    coef_enum_list, all_params = find_best(df, material_type_in_train)
    coef_enum_list_dec = [{key: round(value, 5) if isinstance(value, float) else value for key, value in d.items()} for
                          d in coef_enum_list]
    if model_type == 1:
        params = {k: all_params[k] for k in material_type_in_train + index_type_in_train + ['截距', '1d', ]}
    else:
        if is_28d_strength and model_type == 2:
            params = {k: all_params[k] for k in material_type_in_train + index_type_in_train + ['截距', '3d', ]}
        else:
            params = {k: all_params[k] for k in material_type_in_train + index_type_in_train + ['截距', ]}

    eval = {}
    eval['mae'] = round(all_params['mae'], 4)
    eval['r2'] = round(all_params['r2'], 4)
    eval["acc"] = round(all_params["acc"], 4)

    return params, eval, coef_enum_list_dec


def read_coef():
    # pickle_file_path = "./models/model_coefficients.pkl"
    with open(pickle_file_path, "rb") as pickle_file:
        model_coef = pickle.load(pickle_file)
    return model_coef


def auto_train(model_type, filtered_data):
    LOGGER.info("开始读取自学习表")
    new_x_3d, y_3d, date_3d, new_x_28d, y_28d, date_28d, material_name_in_train, processquality_name_in_train, chemical = get_x_and_y(
        model_type, filtered_data)

    LOGGER.info("-----水泥3天强度训练-----")
    coef_3d, ei_3d, coef_enum_list_3d = train(model_type, new_x_3d, y_3d, False,
                                              material_type_in_train=material_name_in_train,
                                              index_type_in_train=processquality_name_in_train[0], acc_threshold=1.2)
    LOGGER.info("水泥3天强度结果: coef_3d = %s, ei_3d = %s", coef_3d, ei_3d)

    LOGGER.info("-----水泥28天强度训练-----")
    coef_28d, ei_28d, coef_enum_list_28d = train(model_type, new_x_28d, y_28d, True,
                                                 material_type_in_train=material_name_in_train,
                                                 index_type_in_train=processquality_name_in_train[1], acc_threshold=1.5)
    LOGGER.info("水泥28天强度结果: coef_28d = %s, ei_28d = %s", coef_28d, ei_28d)

    return coef_3d, coef_28d, ei_3d, ei_28d, coef_enum_list_3d, coef_enum_list_28d, chemical


def get_data_info(df, mill_product_type):
    data_info = {}
    data_info["总条数"] = len(df)
    data_info["涉及到的物料"] = df['materialList'].apply(lambda x: str(set(x))).unique()

    for mill, product in mill_product_type:
        mill_product = mill + "_" + product
        # count_1d = ((df['millCode'] == mill) & (df["productCode"] == product) & (df['checkStrength1d'].notna())).sum()
        count_3d = ((df['millCode'] == mill) & (df["productCode"] == product) & (df['checkStrength3d'].notna())).sum()
        count_28d = ((df['millCode'] == mill) & (df["productCode"] == product) & (df['checkStrength28d'].notna())).sum()
        data_info[mill_product] = {}
        # data_info[mill_product]["1d"] = count_1d
        data_info[mill_product]["3d"] = count_3d
        data_info[mill_product]["28d"] = count_28d
    return data_info


def get_model_type():
    """

    类型-磨号-品种(智能配料-1#生料磨-PO42.5)
    CI_10001162-CI_10001073-CI_10001091
    :return:
    """
    db = get_db_handle()
    collection = db[settings.self_learning_collection_name]

    # 查询集合中的所有文档

    cursor = collection.find({}, {"_id": 0})
    # 获取文档的字段数量
    data = list(cursor)

    # 构建查询条件，确保每个字段都包含在结果中
    data = transform_ci_to_nlp_and_convert_str_to_int_in_train(data, config.master_data)
    df = pd.DataFrame(data)

    uni_comb = df[['millCode', 'productCode']].drop_duplicates().to_dict(orient="records")
    uni_mill = df["millCode"].unique().tolist()
    mill_product_type = [i.values() for i in uni_comb]
    # filter No.1
    data_filter = MyDataIntegrityOrActivities().data_filter(df)

    filtered_data_info = get_data_info(data_filter, mill_product_type)
    LOGGER.info(f"筛选后数据信息为{filtered_data_info}")
    mas_model_type = []
    model_type = []
    algo_type = "CI_10001194"
    nl_to_mas = {v: k for k, v in config.master_data.items()}
    for mill, product in mill_product_type:
        if config.isSeperateMill:
            # 分磨
            model_type.append([[mill], [product]])
        else:
            # 不分磨
            model_type.append([uni_mill, [product]])
        mill_mas, product_mas = nl_to_mas[mill], nl_to_mas[product]
        mas_model_type.append(algo_type + "-" + mill_mas + "-" + product_mas)
    return model_type, mas_model_type


def initialize(model_type, model_code, filtered_data):
    coef_3d, coef_28d, ei_3d, ei_28d, coef_enum_list_3d, coef_enum_list_28d, chemical = auto_train(model_type,
                                                                                                   filtered_data)
    LOGGER.info("pklandjson_zip_upload_local_dev_simulation开始")
    save_coef(model_type, model_code, coef_3d, coef_28d)
    LOGGER.info("----系数写入pkl")


def train_model(model_code, filtered_data):
    for i in range(3):
        try:
            initialize(i, model_code, filtered_data)  # 无实测值入参
        except Exception as e:
            print(f"{model_code}的model_type为{i}的模型训练失败！报错信息是：{e}")
