import json
import os
import bcrypt
import shutil
import pymysql
from database import *
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn.ensemble import RandomForestClassifier
import numpy as np
from sklearn.model_selection import train_test_split
import lightgbm as lgb
import xgboost as xgb
from sklearn.metrics import accuracy_score, classification_report, confusion_matrix,roc_auc_score

def register(name,pwd):
    # temp = {
    #     name:pwd
    # }
    #
    # with open(r"../file/json/user.json","r",encoding="utf-8") as f:
    #     data = json.load(f)
    if select(name):
        return "1" # "用户名已存在，请勿重复注册"
    elif len(pwd)<8 or len(pwd)>(200):
        return "2" #"格式错误，请检查你的密码长度"
    elif not (any([x.isdigit() for x in pwd]) and any([x.isalpha() for x in pwd])):
        return "3"  # "格式错误，请检查你的密码中是否同时包含数字和字母"
    else:
        insert(name,hash_password(pwd))
        # data.update(temp)
        # with open(r"../file/json/user.json","w",encoding="utf-8") as f:
        #     json.dump(data,f)

        if not os.path.exists(r"../file/user"):
            os.mkdir(r"../file/user")
        path = r"../file/user/" + name
        if not os.path.exists(path):
            os.mkdir(path)
            os.mkdir(path+"/inputfile")
            os.mkdir(path+"/downloadfile")
            os.mkdir(path+"/json")
        return True

def login(name,pwd):
    if select(name):
        return verify_password(select(name),pwd)
    else:
        return 0
    # with open(r"../file/json/user.json","r",encoding="utf-8") as f:
    #     data = json.load(f)
    # print(data)
    # if name not in data.keys():
    #     print("here1")
    #     return "1"# 用户名不存在
    # elif pwd != data[name]:
    #     print(type(pwd),type(data[name]),pwd,data[name])
    #     print("here2\n\n")
    #     return "2"#密码
    # else:
    #     print("here3")
    #     return "3"#错误


def change_password(oldusername, newusername, password):
    # print(f'oldusername:{oldusername}')
    # print(f'newusername:{newusername}')
    # print(f'password:{password}')
    temp = {
        newusername: password
    }
    # 读取原json文件
    with open(r"../file/json/user.json", "r", encoding="utf-8") as f:
        data = json.load(f)
    print(f"data:{data}")
    # 改用户名且新用户名存在那么返回false
    print(newusername in data.keys())
    if (newusername != oldusername and newusername in data.keys()):
        print("target111")
        return False
    elif len(password) < 8 or len(password) > 200:
        return False
    elif not (any([x.isdigit() for x in password]) and any([x.isalpha() for x in password])):  # 密码必须由数字和字母组成
        return False
    # 后端返回值为False，前端界面也更新了？？
    print("target222")
    del data[oldusername]  # 删除需要删除的键值对
    data.update(temp)  # 添加修改后的用户信息
    with open(r"../file/json/user.json", "w", encoding="utf-8") as f:
        json.dump(data, f)  # 将修改后的data重新写回 JSON 文件
    if (oldusername != newusername):
        old_path = rf"../file/user/{oldusername}"
        new_path = rf"../file/user/{newusername}"
        if not os.path.exists(old_path):
            raise ValueError("No such directory")  # 如果不存在则报错
        shutil.move(old_path, new_path)  # 修改该用户对应文件夹名称
    return True

# 哈希密码
def hash_password(password):
    salt_rounds = 12
    salt = bcrypt.gensalt(salt_rounds)
    hashed_password = bcrypt.hashpw(password.encode('utf-8'), salt)
    return hashed_password.decode('utf-8')


# 验证密码
def verify_password(hashed_password, password_to_verify):
    # 确保传入的哈希值和待验证的密码都是bytes类型
    hashed_password_bytes = hashed_password.encode('utf-8') if isinstance(hashed_password, str) else hashed_password
    password_to_verify_bytes = password_to_verify.encode('utf-8') if isinstance(password_to_verify,
                                                                                str) else password_to_verify
    return 1 if bcrypt.checkpw(password_to_verify_bytes, hashed_password_bytes) else 0

def RF_Select_Features(data, k):
    '''
    :param data:数据预处理后的数据
    :param k: 需要选取的特征个数
    :return: 放回特征列表，长度为k
    '''
    # 将特征列和目标列分开
    x = data.iloc[:, 1:-1]  # 特征列
    y = data['RES']  # 目标列
    # 创建随机森林分类器
    rf = RandomForestClassifier(n_estimators=100, random_state=42)
    # 训练模型
    rf.fit(x, y)
    # 获取特征重要性
    feature_importances = rf.feature_importances_
    # 获取特征名称
    feature_names = x.columns
    # 将特征重要性与特征名称一一对应起来，并按重要性降序排列
    important_features = list(zip(feature_names, feature_importances))
    important_features.sort(key=lambda x: x[1], reverse=True)
    # 打印重要特征
    # print("重要特征:")
    selected_features = []
    ans = 0
    for feature, importance in important_features:
        # print(f"{feature}: {importance}")
        if ans < 20:
            selected_features.append(feature)
            ans = ans + 1
    print("selected_features=", selected_features)
    params={
        'selected_features':selected_features
    }
    with open("../file/json/selected_features.json",'w') as f:
        json.dump(params,f)
    return selected_features

def Data_preprocessing(data):
    '''
    :param data:需要进行预处理的数据
    :return: 返回预处理好的数据
    数据预处理部分后期有时间可以继续做些研究
    '''
    data = data.fillna(0)
    return data

def model_lightgbm(X_train, X_test, y_train, y_test):
    '''
    :param :输入训练集与测试集
    :return: 返回LightGBM的预测结果，预测结果为0~1之间的小数
    模型会自动保存模型，保存到”../file/json/lgb_params.json“
    '''
    print("lightgbm training...")
    # 假设 scale_pos_weight 是正负样本的权重比例
    # print("训练集中正负样例个数")
    negative_samples = np.sum(y_train == 0)
    positive_samples = np.sum(y_train == 1)
    # print("0:",negative_samples)
    # print("1:",positive_samples)
    # 计算 scale_pos_weight 参数
    scale_pos_weight = negative_samples / positive_samples
    # 定义 LightGBM 参数
    # params = {
    #     'boosting_type': 'gbdt',
    #     'objective': 'binary',
    #     'metric': 'auc',  # 使用 AUC 作为评价指标
    #     # 'num_leaves': 100,
    #     'min_data_in_leaf': 30,
    #     'max_depth': -1,
    #     'learning_rate': 0.05,
    #     "feature_fraction": 0.9,  # 提取的特征比率
    #     "bagging_freq": 1,
    #     "bagging_fraction": 0.8,
    #     "bagging_seed": 11,
    #     "lambda_l1": 0.1,  # l1正则
    #     'verbose': 0,
    #     "random_state": 42,
    #     'scale_pos_weight': scale_pos_weight,  # 设置正负样本权重比例
    # }
    # json格式读取参数
    with open("../file/json/lgb_params.json",'r') as f:
        params=json.load(f)
    params['scale_pos_weight']=scale_pos_weight

    # 将数据转换为 LightGBM 的 Dataset 格式
    lgb_train = lgb.Dataset(X_train, y_train)
    lgb_eval = lgb.Dataset(X_test, y_test, reference=lgb_train)

    # 训练模型
    gbm = lgb.train(params,
                    lgb_train,
                    num_boost_round=580,
                    valid_sets=[lgb_train, lgb_eval],
                    )

    # 在测试集上进行预测
    y_pred_proba1 = gbm.predict(X_test, num_iteration=gbm.best_iteration)

    # 保存模型为pickle文件
    # with open('../file/checkpoint/lgb_model.pkl', 'wb') as f:
    #     pickle.dump(gbm, f)
    gbm.save_model("../file/checkpoint/lgb_model.json")
    return y_pred_proba1

def model_xgboost(X_train, X_test, y_train, y_test):
    '''
    :param :输入划分好的训练集和测试集
    :return: 返回XGBOOST预测结果，预测结果为0~1的小数
    模型会自动保存模型，保存到”../file/json/xgb_params.json“
    '''
    print("xgboost training...")
    # print("训练集中正负样例个数")
    negative_samples = np.sum(y_train == 0)
    positive_samples = np.sum(y_train == 1)
    # print("0:",negative_samples)
    # print("1:",positive_samples)
    # 计算 scale_pos_weight 参数
    scale_pos_weight = negative_samples / positive_samples

    # params = {
    #     'booster': 'gbtree',
    #     'objective': 'binary:logistic',
    #     'eval_metric': 'auc',
    #     'max_depth': 10,
    #     'min_child_weight': 350,
    #     'learning_rate': 0.5,
    #     'gamma': 0.1,
    #     'subsample': 1,
    #     'colsample_bytree': 1,
    #     "random_state": 42,
    #     'scale_pos_weight': scale_pos_weight,
    # }
    with open("../file/json/xgboost_params.json","r") as f:
        params=json.load(f)
    params["scale_pos_weight"]=scale_pos_weight

    dtrain = xgb.DMatrix(X_train, y_train)
    xgbM = xgb.train(params=params, dtrain=dtrain, num_boost_round=50, evals=[(dtrain, 'train')], verbose_eval=False)

    # 创建 XGBoost 分类器
    # xgb = xgb.XGBClassifier(learning_rate=0.05, max_depth=10,
    #                         n_estimators=100, subsample=0.8,
    #                         colsample_bytree=0.8,
    #                         verbose_eval=True
    #                        # scale_pos_weight=scale_pos_weight
    #                        )

    # # 训练模型
    # xgb.fit(X_train, y_train)
    dtest = xgb.DMatrix(X_test)
    # 在测试集上进行预测
    y_pred_proba2 = xgbM.predict(dtest)

    # with open('../file/checkpoint/xgb_model.pkl', 'wb') as f:
    #     pickle.dump(xgbM, f)

    xgbM.save_model("../file/checkpoint/xgb_model.json")
    return y_pred_proba2

def model_lightgbm_xgboost(X_train, X_test, y_train, y_test):
    '''
    :param 输入为划分好的训练集和测试集
    :return: 返回lightgbm_xgboost模型集成的AUC值
    模型会自动保存划分医疗欺诈的阈值（threshold），和两个模型预测结果的权重占比（weight）
    '''
    print("lightgbm_xgboost:")
    # LightGBM预测结果
    y_pred_proba1 = model_lightgbm(X_train, X_test, y_train, y_test)
    # xgboost预测结果
    y_pred_proba2 = model_xgboost(X_train, X_test, y_train, y_test)
    # 选择一个最好的权重
    p_list = np.linspace(0, 1, 100)
    p_best = 0
    ####选阈值
    k_list = np.linspace(0.2, 1, 100)
    k_best = 0;
    Max_Auc = 0
    best_y_pred_proba = []
    y_pred_proba = []
    y_pred = []
    for p in p_list:
        y_pred_proba = y_pred_proba1 * p + y_pred_proba2 * (1 - p)
        for k in k_list:
            # 将预测的概率值转换为类别标签
            y_pred = (y_pred_proba > k).astype(int)
            # 计算模型在测试集上的AUC值
            auc_score = roc_auc_score(y_test, y_pred)
            if auc_score > Max_Auc:
                Max_Auc = auc_score;
                k_best = k
                p_best = p
                best_y_pred_proba = y_pred_proba
    y_pred = (best_y_pred_proba > k_best).astype(int)
    params={
        "lightgbm_xgboost_threshold":k_best,
        "weight":p_best,
    }
    with open('../file/json/lightgbm_xgboost_threshold.json','w') as f:
        json.dump(params,f)
    print("测试集上的最好的AUC值：", Max_Auc, "此时的阈值为：", k_best, "权重：", p_best)
    # threshold = 0.9
    # y_pred = (y_pred_proba1 >= threshold).astype(int)
    # 模型评估
    accuracy = accuracy_score(y_test, y_pred)
    print("准确率：", accuracy)

    # 打印分类报告
    print(classification_report(y_test, y_pred))
    # 打印混淆矩阵
    print("混淆矩阵：")
    print(confusion_matrix(y_test, y_pred))
    return Max_Auc

def data_split(X, y):
    '''
    :param X: 不包括标签的特征数据
    :param y: 标签数据
    :return: None
    自动保存好划分的训练集和测试集
    '''
    X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=42)
    print("----训练集----")
    # print("0:",np.sum(y_train==0))
    # print("1:",np.sum(y_train==1))
    data_train = pd.concat([X_train, y_train], axis=1)
    print(data_train.shape)
    print("----测试集----")
    # print("0:",np.sum(y_test==0))
    # print("1:",np.sum(y_test==1))
    data_test = pd.concat([X_test, y_test], axis=1)
    print(data_test.shape)
    # 保存训练集和测试集
    data_train.to_csv("./data/train.csv", index=False)
    data_test.to_csv("./data/test.csv", index=False)
    print("保存成功！")

def train(path):
    '''
    :param path:数据集的路径
    :return: None
    '''
    ########## 读取数据集 ##########
    data = pd.read_csv(path)
    ########## 数据预处理 ##########
    data = Data_preprocessing(data)
    ########## 特征选取(随机森林) ##########
    selected_features = RF_Select_Features(data, 20)
    x = data[selected_features]
    y = data["RES"]
    ########## 划分数据集 ##########
    X_train, X_test, y_train, y_test = train_test_split(x, y, test_size=0.2, random_state=42)
    ########## 模型训练(lightgbm+xgboost) ##########
    model_lightgbm_xgboost(X_train, X_test, y_train, y_test)

def predict(path):
    '''
    :param path:数据集的路径
    :return: 预测结果
    '''
    ########## 读取数据集 ##########
    data=pd.read_csv(path)
    ########## 数据预处理 ##########
    data = Data_preprocessing(data)
    ########## 获取训练得到的特征 ##########
    with open("../file/json/selected_features.json","r") as f:
        selected_features_json=json.load(f)
    selected_features=selected_features_json["selected_features"]
    x=data[selected_features]
    ########## 获取模型参数 ##########
    # #LightGBM
    gbm_model=lgb.Booster(model_file="../file/checkpoint/lgb_model.json") #创建一个Booster对象
    y_pred_proba1=gbm_model.predict(x)
    #XGBOOST
    xgb_model=xgb.Booster()  #创建一个Booster对象
    xgb_model.load_model("../file/checkpoint/xgb_model.json")
    dx=xgb.DMatrix(x)
    y_pred_proba2=xgb_model.predict(dx)

    with open("../file/json/lightgbm_xgboost_threshold.json","rb") as f:
        threshold_json=json.load(f)
    threshold=threshold_json["lightgbm_xgboost_threshold"]
    weight=threshold_json["weight"]
    ########## 预测模型 ##########
    # y_pred_proba1 = gbm_model.predict(x)
    dtest = xgb.DMatrix(x)
    y_pred_proba2 = xgb_model.predict(dtest)

    y_pred_proba=y_pred_proba1*weight+y_pred_proba2*(1-weight)
    y_pred=(y_pred_proba>threshold).astype(int)
    return y_pred