import os
import pandas as pd
import numpy as np
from django.http import JsonResponse, HttpResponse
from django.shortcuts import render
from django.views import View
from django.conf import settings
import sklearn.metrics
import lightgbm as lgb
import catboost as cbt
import xgboost as xgb
from statistics import mode
import sklearn.metrics
from cvxopt import matrix, solvers
import json
from sklearn.metrics import f1_score
from collections import Counter

# Create combination views here.
# 将上传的文件名作为全局变量保存
train_file_name = ''
predict_file_name = ''


# 上传训练集，接收训练集
class Upload(View):
    """训练功能：上传训练集"""

    def get(self, request):
        return render(request, 'combination/combination-upload.html')

    def post(self, request):
        global train_file_name
        global predict_file_name

        train_csv_file = request.FILES.get('train_csv_file')
        predict_csv_file = request.FILES.get('predict_csv_file')
        # 这里记得加入判断传入文件格式为空，以及文件格式不对的判断

        train_file_name = train_csv_file.name
        predict_file_name = predict_csv_file.name

        if predict_file_name.split('.')[1] != 'csv' and train_file_name.split('.')[1] != 'csv':
            return JsonResponse({'code': 0})

        train_file_path = os.path.join(settings.MEDIA_ROOT, 'csv_dir\\' + train_file_name)
        predict_file_path = os.path.join(settings.MEDIA_ROOT, 'csv_dir\\' + predict_file_name)

        # 学到
        with open(train_file_path, 'wb+') as f:
            for chunk in train_csv_file.chunks():
                f.write(chunk)

        with open(predict_file_path, 'wb+') as f:
            for chunk in predict_csv_file.chunks():
                f.write(chunk)

        return JsonResponse({'code': 1})


# 执行训练
def execute_combination(request):
    global train_file_name
    global predict_file_name
    # ============================ 数据预处理 ================================= #
    # 读入 CSV 文件
    df = pd.read_csv(os.path.join(settings.MEDIA_ROOT, 'csv_dir\\' + train_file_name))
    # df1 = pd.read_csv(os.path.join(settings.STATICFILES_DIRS[0], 'csv_dir\\' + 'validate_1000.csv'))
    df1 = pd.read_csv(os.path.join(settings.STATIC_ROOT, 'csv_dir\\' + 'validate_1000.csv'))
    df2 = pd.read_csv(os.path.join(settings.MEDIA_ROOT, 'csv_dir\\' + predict_file_name))

    # 获取每列的平均值，并用该列的平均值填充空值和 NaN 值
    df = df.apply(lambda x: x.fillna(x.mean()), axis=0)
    df1 = df1.apply(lambda x: x.fillna(x.mean()), axis=0)
    df2 = df2.apply(lambda x: x.fillna(x.mean()), axis=0)

    Xtrain = df.iloc[::, 1:108:]
    Ytrain = df.iloc[:, -1]
    Xva = df1.iloc[::, 1:108:]
    Yva = df1.iloc[:, -1]
    Xtest = df2.iloc[::, 1:108:]

    # 训练集的样本数量、特征数
    sample_size_train = len(Xtrain)  # 行，样本数量
    feature_quantity_train = len(Xtrain.columns)  # 列，特征数
    # 测试集的样本数量、特征数
    sample_size_test = len(Xtest)  # 行，样本数量
    feature_quantity_test = len(Xtest.columns)  # 列，特征数

    kmm = KMM(kernel_type='rbf', B=1)
    beta = kmm.fit(Xtrain, Xva)

    Xtrain = pd.DataFrame(Xtrain)
    Xs_new = beta * Xtrain

    # ==================================训练基础分类器==================================== #
    xg = xgb.XGBClassifier(seed=0)
    xg.fit(Xs_new, Ytrain)
    y_pred = xg.predict(Xva)
    xg_f1 = f1_score(Yva, y_pred, average=None)

    cb = cbt.CatBoostClassifier(eval_metric='AUC')
    cb.fit(Xs_new, Ytrain)
    y_pred = cb.predict(Xva)
    cb_f1 = f1_score(Yva, y_pred, average=None)

    lg = lgb.LGBMClassifier()
    lg.fit(Xs_new, Ytrain)
    y_pred = lg.predict(Xva)
    lg_f1 = f1_score(Yva, y_pred, average=None)

    # ==================================预测==================================== #
    model = []
    for i in range(len(lg_f1)):
        if max(lg_f1[i], xg_f1[i], cb_f1[i]) == lg_f1[i]:
            model.append(lg)
        elif max(lg_f1[i], xg_f1[i], cb_f1[i]) == xg_f1[i]:
            model.append(xg)
        else:
            model.append(cb)

    yp = our_model(Xtest, m1=lg, m2=xg, m3=cb, model=model)

    # ==================================可视化==================================== #
    counter = Counter(yp)
    # 输出分类结果（JSON格式）
    unique = []
    counts = []
    for k, v in counter.items():
        unique.append(k)
        counts.append(v)
    # 对输出结果排序
    unique.sort()
    # 生成统计每种分类的数量
    counts = [dict(counter)[k] for k in unique]
    # 转变为echarts可以接收的格式
    pie_data = dict(counter)
    # 将int64类型的数据转换为int类型
    df['sample_id'] = df['sample_id'].astype(int)
    # 提取sample_id和预测值列的数据
    sample_ids = df['sample_id'].tolist()
    predictions = yp
    # 创建字典
    output_dict = {str(sample_id): prediction for sample_id, prediction in zip(sample_ids, predictions)}
    # 指定保存JSON文件的路径和文件名
    output_file = 'output_combination.json'
    # 将字典保存为JSON文件，指定ensure_ascii=False参数
    with open(os.path.join(settings.MEDIA_ROOT, 'result\\' + output_file), 'w') as f:
        json.dump(output_dict, f, ensure_ascii=False)

    # 返回结果给可视化界面
    context = {
        'unique': unique,
        'counts': counts,
        'pie_data': pie_data,
        'sample_size_train': sample_size_train,
        'feature_quantity_train': feature_quantity_train,
        'sample_size': sample_size_test,
        'feature_quantity': feature_quantity_test,
        'describe': '测试集',
    }

    response = render(request, 'combination/combination-visualization.html', context=context)
    combination_count = int(request.COOKIES.get('combination'))
    combination_count += 1
    response.set_cookie('combination', combination_count)

    return response


# 下载预测结果
def download(request):
    # 读取要下载的文件
    filepath = os.path.join(settings.MEDIA_ROOT, 'result\\' + 'output_combination.json')

    with open(filepath, 'rb') as f:
        filedata = f.read()

    # 返回响应，弹出文件下载对话框
    response = HttpResponse(filedata, content_type='application/json')
    response['Content-Disposition'] = f'attachment; filename="output_combination.json"'

    return response


def kernel(ker, X1, X2, gamma):
    K = None
    if ker == 'linear':
        if X2 is not None:
            K = sklearn.metrics.pairwise.linear_kernel(
                np.asarray(X1), np.asarray(X2))
        else:
            K = sklearn.metrics.pairwise.linear_kernel(np.asarray(X1))
    elif ker == 'rbf':
        if X2 is not None:
            K = sklearn.metrics.pairwise.rbf_kernel(
                np.asarray(X1), np.asarray(X2), gamma)
        else:
            K = sklearn.metrics.pairwise.rbf_kernel(
                np.asarray(X1), None, gamma)
    return K


class KMM:
    def __init__(self, kernel_type='linear', gamma=1.0, B=1.0, eps=None):
        '''
        Initialization function
        :param kernel_type: 'linear' | 'rbf'
        :param gamma: kernel bandwidth for rbf kernel
        :param B: bound for beta
        :param eps: bound for sigma_beta
        '''
        self.kernel_type = kernel_type
        self.gamma = gamma
        self.B = B
        self.eps = eps

    def fit(self, Xs, Xt):
        '''
        Fit source and target using KMM (compute the coefficients)
        :param Xs: ns * dim
        :param Xt: nt * dim
        :return: Coefficients (Pt / Ps) value vector (Beta in the paper)
        '''
        ns = Xs.shape[0]
        nt = Xt.shape[0]
        if self.eps == None:
            self.eps = self.B / np.sqrt(ns)
        K = kernel(self.kernel_type, Xs, None, self.gamma)
        kappa = np.sum(kernel(self.kernel_type, Xs, Xt, self.gamma)
                       * float(ns) / float(nt), axis=1)

        K = matrix(K.astype(np.double))
        kappa = matrix(kappa.astype(np.double))
        G = matrix(np.r_[np.ones((1, ns)), -
                   np.ones((1, ns)), np.eye(ns), -np.eye(ns)])
        h = matrix(np.r_[ns * (1 + self.eps), ns * (self.eps - 1),
                   self.B * np.ones((ns,)), np.zeros((ns,))])

        sol = solvers.qp(K, -kappa, G, h)
        beta = np.array(sol['x'])
        return beta

def our_model(X_test, m1, m2, m3, model):
    i = 0
    t = []
    m = []
    yt = []
    yp = []
    l = []
    pred_l = []
    pro_l = []

    # For each class (normal or a type of attack), find the leader model
    for index, row in X_test.iterrows():
    # 将行转换为一维数组，并添加到列表中
        xi2 = row.values.reshape((107,))

        y_pred1 = m1.predict(xi2.reshape(1, -1))      # model 1 (LightGBM) makes a prediction on text sample xi
        y_pred1 = int(y_pred1[0])
        y_pred2 = m2.predict(xi2.reshape(1, -1),validate_features=False)      # model 2 (XGBoost) makes a prediction on text sample xi
        y_pred2 = int(y_pred2[0])
        y_pred3 = m3.predict(xi2.reshape(1, -1))      # model 3 (Catboost) makes a prediction on text sample xi
        y_pred3 = int(y_pred3[0])

        p1 = m1.predict_proba(xi2.reshape(1, -1))     # The prediction probability (confidence) list of model 1
        p2 = m2.predict_proba(xi2.reshape(1, -1))     # The prediction probability (confidence) list of model 2
        p3 = m3.predict_proba(xi2.reshape(1, -1))     # The prediction probability (confidence) list of model 3

        # Find the highest prediction probability among all classes for each ML model
        y_pred_p1 = np.max(p1)
        y_pred_p2 = np.max(p2)
        y_pred_p3 = np.max(p3)

        if y_pred1 == y_pred2 == y_pred3: # If the predicted classes of all the three models are the same
            y_pred = y_pred1 # Use this predicted class as the final predicted class

        elif y_pred1 != y_pred2 != y_pred3: # If the predicted classes of all the three models are different
            # For each prediction model, check if the predicted class’s original ML model is the same as its leader model
            if model[y_pred1]==m1: # If they are the same and the leading model is model 1 (LightGBM)
                l.append(m1)
                pred_l.append(y_pred1) # Save the predicted class
                pro_l.append(y_pred_p1) # Save the confidence

            if model[y_pred2]==m2: # If they are the same and the leading model is model 2 (XGBoost)
                l.append(m2)
                pred_l.append(y_pred2)
                pro_l.append(y_pred_p2)

            if model[y_pred3]==m3: # If they are the same and the leading model is model 3 (CatBoost)
                l.append(m3)
                pred_l.append(y_pred3)
                pro_l.append(y_pred_p3)

            if len(l)==0: # Avoid empty probability list
                pro_l=[y_pred_p1,y_pred_p2,y_pred_p3]

            elif len(l)==1: # If only one pair of the original model and the leader model for each predicted class is the same
                y_pred=pred_l[0] # Use the predicted class of the leader model as the final prediction class

            else: # If no pair or multiple pairs of the original prediction model and the leader model for each predicted class are the same
                max_p = max(pro_l) # Find the highest confidence

                # Use the predicted class with the highest confidence as the final prediction class
                if max_p == y_pred_p1:
                    y_pred = y_pred1
                elif max_p == y_pred_p2:
                    y_pred = y_pred2
                else:
                    y_pred = y_pred3

        else: # If two predicted classes are the same and the other one is different
            n = mode([y_pred1,y_pred2,y_pred3]) # Find the predicted class with the majority vote
            if model[n] == m2:
                y_pred = model[n].predict(xi2.reshape(1, -1),validate_features=False)
            else:
                y_pred = model[n].predict(xi2.reshape(1, -1)) # Use the predicted class of the leader model as the final prediction class
            y_pred = int(y_pred[0])

        yp.append(y_pred) # Save the predicted classes for all tested samples
    return yp


