from django.shortcuts import render

from django.http.response import HttpResponse
from django.http import JsonResponse
from django.conf import settings
import os
import matplotlib
from sklearn.preprocessing import MinMaxScaler
from sklearn.exceptions import ConvergenceWarning
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import numpy as np
from sklearn import datasets, linear_model
from sklearn.metrics import mean_squared_error, r2_score
from sklearn.decomposition import PCA
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis
from sklearn.cluster import MiniBatchKMeans, KMeans
from sklearn.metrics.pairwise import pairwise_distances_argmin
from sklearn.datasets import make_blobs
from sklearn.svm import SVC
import time
import random

import pandas as pd  # 加载 pandas 模块
import warnings
from matplotlib import pyplot as plt  # 加载绘图模块
# 集成方法分类器
from sklearn.ensemble import AdaBoostClassifier
from sklearn.ensemble import BaggingClassifier
from sklearn.ensemble import ExtraTreesClassifier
from sklearn.ensemble import GradientBoostingClassifier
from sklearn.ensemble import RandomForestClassifier
# 高斯过程分类器
from sklearn.gaussian_process import GaussianProcessClassifier
# 广义线性分类器
from sklearn.linear_model import PassiveAggressiveClassifier
from sklearn.linear_model import RidgeClassifier
from sklearn.linear_model import SGDClassifier
# K近邻分类器
from sklearn.neighbors import KNeighborsClassifier
# 朴素贝叶斯分类器
from sklearn.naive_bayes import GaussianNB
# 神经网络分类器
from sklearn.neural_network import MLPClassifier
# 决策树分类器
from sklearn.tree import DecisionTreeClassifier
from sklearn.tree import ExtraTreeClassifier
# 支持向量机分类器
from sklearn.svm import SVC
from sklearn.svm import LinearSVC
from sklearn.model_selection import train_test_split  # 导入数据集切分模块

from sklearn.metrics import accuracy_score

from matplotlib.colors import ListedColormap  # 加载色彩模块
import numpy as np  # 导入数值计算模块

from sklearn.neural_network import MLPClassifier
from sklearn.pipeline import make_pipeline
from sklearn.preprocessing import StandardScaler

from sklearn.datasets import make_moons, make_circles, make_classification
# Create your views here.
def LinearRegression(request):
    # Load the diabetes dataset
    num = random.randint(2, 9)
    print(num)
    diabetes_X, diabetes_y = datasets.load_diabetes(return_X_y=True)

    # Use only one feature

    diabetes_X = diabetes_X[:, np.newaxis, num]

    # Split the data into training/testing sets
    diabetes_X_train = diabetes_X[:-20]

    diabetes_X_test = diabetes_X[-20:]

    # Split the targets into training/testing sets
    diabetes_y_train = diabetes_y[:-20]
    diabetes_y_test = diabetes_y[-20:]

    # Create linear regression object
    regr = linear_model.LinearRegression()

    # Train the model using the training sets
    regr.fit(diabetes_X_train, diabetes_y_train)

    # Make predictions using the testing set
    diabetes_y_pred = regr.predict(diabetes_X_test)
    timestamp = str(time.time())
    # The coefficients
    print('Coefficients: \n', regr.coef_)
    # The mean squared error
    print('Mean squared error: %.2f'
          % mean_squared_error(diabetes_y_test, diabetes_y_pred))
    # The coefficient of determination: 1 is perfect prediction
    print('Coefficient of determination: %.2f'
          % r2_score(diabetes_y_test, diabetes_y_pred))
    plt.figure(figsize=(8, 6))
    # Plot outputs
    plt.scatter(diabetes_X_test, diabetes_y_test, color='black')
    plt.plot(diabetes_X_test, diabetes_y_pred, color='blue', linewidth=3)
    plt.savefig("./static/" + timestamp + ".png")

    plt.xticks(())
    plt.yticks(())
    plt.clf()
    # plt.show()

    return JsonResponse({"code": 200, "message": "success", "data": timestamp + ".png"}, safe=False)


def Ridge(request):
    X_train = np.c_[.5, 1].T
    y_train = [.5, 1]
    X_test = np.c_[0, 2].T

    # np.random.seed(0)
    classifiers = dict(ols=linear_model.LinearRegression(),
                       ridge=linear_model.Ridge(alpha=.1))
    i = 0
    fig, ax = plt.subplots(1, 2, figsize=(16, 6))
    for name, clf in classifiers.items():
        for _ in range(6):
            this_X = .1 * np.random.normal(size=(2, 1)) + X_train

            clf.fit(this_X, y_train)

            ax[i].plot(X_test, clf.predict(X_test), color='gray')
            ax[i].scatter(this_X, y_train, s=3, c='gray', marker='o', zorder=10)

        clf.fit(X_train, y_train)
        ax[i].plot(X_test, clf.predict(X_test), linewidth=2, color='blue')
        ax[i].scatter(X_train, y_train, s=30, c='red', marker='+', zorder=10)

        ax[i].set_title(name)
        ax[i].set_xlim(0, 2)
        ax[i].set_ylim((0, 1.6))
        ax[i].set_xlabel('X')
        ax[i].set_ylabel('y')

        fig.tight_layout()
        i = i + 1;
    timestamp = str(time.time())
    plt.savefig("./static/" + timestamp + ".png")
    plt.clf()

    return JsonResponse({"code": 200, "message": "success", "data": timestamp + ".png"}, safe=False)

def Linear_discriminant_analysis(request):
    iris = datasets.load_iris()

    X = iris.data
    y = iris.target
    target_names = iris.target_names

    pca = PCA(n_components=2)
    X_r = pca.fit(X).transform(X)

    lda = LinearDiscriminantAnalysis(n_components=2)
    X_r2 = lda.fit(X, y).transform(X)

    # Percentage of variance explained for each components
    print('explained variance ratio (first two components): %s'
          % str(pca.explained_variance_ratio_))

    colors = ['navy', 'turquoise', 'darkorange']
    lw = 2
    plt.figure(figsize=(16, 6))
    plt.subplot(1, 2, 1)
    for color, i, target_name in zip(colors, [0, 1, 2], target_names):
        plt.scatter(X_r[y == i, 0], X_r[y == i, 1], color=color, alpha=.8, lw=lw,
                    label=target_name)
    plt.legend(loc='best', shadow=False, scatterpoints=1)
    plt.title('PCA of IRIS dataset')

    plt.subplot(1, 2, 2)
    for color, i, target_name in zip(colors, [0, 1, 2], target_names):
        plt.scatter(X_r2[y == i, 0], X_r2[y == i, 1], alpha=.8, color=color,
                    label=target_name)
    plt.legend(loc='best', shadow=False, scatterpoints=1)
    plt.title('LDA of IRIS dataset')
    plt.tight_layout()
    timestamp = str(time.time())
    plt.savefig("./static/" + timestamp + ".png")
    plt.clf()
    return JsonResponse({"code": 200, "message": "success", "data": timestamp + ".png"}, safe=False)

def k_means(request):
    batch_size = 45
    centers = [[1, 1], [-1, -1], [1, -1]]
    n_clusters = len(centers)
    X, labels_true = make_blobs(n_samples=3000, centers=centers, cluster_std=0.7)

    # #############################################################################
    # Compute clustering with Means

    k_means = KMeans(init='k-means++', n_clusters=3, n_init=10)
    t0 = time.time()
    k_means.fit(X)
    t_batch = time.time() - t0

    # #############################################################################
    # Compute clustering with MiniBatchKMeans

    mbk = MiniBatchKMeans(init='k-means++', n_clusters=3, batch_size=batch_size,
                          n_init=10, max_no_improvement=10, verbose=0)
    t0 = time.time()
    mbk.fit(X)
    t_mini_batch = time.time() - t0

    # #############################################################################
    # Plot result

    fig = plt.figure(figsize=(8, 3))
    fig.subplots_adjust(left=0.02, right=0.98, bottom=0.05, top=0.9)
    colors = ['#4EACC5', '#FF9C34', '#4E9A06']

    # We want to have the same colors for the same cluster from the
    # MiniBatchKMeans and the KMeans algorithm. Let's pair the cluster centers per
    # closest one.
    k_means_cluster_centers = k_means.cluster_centers_
    order = pairwise_distances_argmin(k_means.cluster_centers_,
                                      mbk.cluster_centers_)
    mbk_means_cluster_centers = mbk.cluster_centers_[order]

    k_means_labels = pairwise_distances_argmin(X, k_means_cluster_centers)
    mbk_means_labels = pairwise_distances_argmin(X, mbk_means_cluster_centers)

    # KMeans
    ax = fig.add_subplot(1, 3, 1)
    for k, col in zip(range(n_clusters), colors):
        my_members = k_means_labels == k
        cluster_center = k_means_cluster_centers[k]
        ax.plot(X[my_members, 0], X[my_members, 1], 'w',
                markerfacecolor=col, marker='.')
        ax.plot(cluster_center[0], cluster_center[1], 'o', markerfacecolor=col,
                markeredgecolor='k', markersize=6)
    ax.set_title('KMeans')
    ax.set_xticks(())
    ax.set_yticks(())
    plt.text(-3.5, 1.8, 'train time: %.2fs\ninertia: %f' % (
        t_batch, k_means.inertia_))

    # MiniBatchKMeans
    ax = fig.add_subplot(1, 3, 2)
    for k, col in zip(range(n_clusters), colors):
        my_members = mbk_means_labels == k
        cluster_center = mbk_means_cluster_centers[k]
        ax.plot(X[my_members, 0], X[my_members, 1], 'w',
                markerfacecolor=col, marker='.')
        ax.plot(cluster_center[0], cluster_center[1], 'o', markerfacecolor=col,
                markeredgecolor='k', markersize=6)
    ax.set_title('MiniBatchKMeans')
    ax.set_xticks(())
    ax.set_yticks(())
    plt.text(-3.5, 1.8, 'train time: %.2fs\ninertia: %f' %
             (t_mini_batch, mbk.inertia_))

    # Initialise the different array to all False
    different = (mbk_means_labels == 4)
    ax = fig.add_subplot(1, 3, 3)

    for k in range(n_clusters):
        different += ((k_means_labels == k) != (mbk_means_labels == k))

    identic = np.logical_not(different)
    ax.plot(X[identic, 0], X[identic, 1], 'w',
            markerfacecolor='#bbbbbb', marker='.')
    ax.plot(X[different, 0], X[different, 1], 'w',
            markerfacecolor='m', marker='.')
    ax.set_title('Difference')
    ax.set_xticks(())
    ax.set_yticks(())
    timestamp = str(time.time())
    plt.savefig("./static/" + timestamp + ".png")
    plt.clf()
    return JsonResponse({"code": 200, "message": "success", "data": timestamp + ".png"}, safe=False)

def svc(request):
    X, y = make_blobs()

    fig, sub = plt.subplots(1, 2, figsize=(8, 3))
    titles = ("break_ties = False",
              "break_ties = True")

    for break_ties, title, ax in zip((False, True), titles, sub.flatten()):

        svm = SVC(kernel="linear", C=1, break_ties=break_ties,
                  decision_function_shape='ovr').fit(X, y)

        xlim = [X[:, 0].min(), X[:, 0].max()]
        ylim = [X[:, 1].min(), X[:, 1].max()]

        xs = np.linspace(xlim[0], xlim[1], 1000)
        ys = np.linspace(ylim[0], ylim[1], 1000)
        xx, yy = np.meshgrid(xs, ys)

        pred = svm.predict(np.c_[xx.ravel(), yy.ravel()])

        colors = [plt.cm.Accent(i) for i in [0, 4, 7]]

        points = ax.scatter(X[:, 0], X[:, 1], c=y, cmap="Accent")
        classes = [(0, 1), (0, 2), (1, 2)]
        line = np.linspace(X[:, 1].min() - 5, X[:, 1].max() + 5)
        ax.imshow(-pred.reshape(xx.shape), cmap="Accent", alpha=.2,
                  extent=(xlim[0], xlim[1], ylim[1], ylim[0]))

        for coef, intercept, col in zip(svm.coef_, svm.intercept_, classes):
            line2 = -(line * coef[1] + intercept) / coef[0]
            ax.plot(line2, line, "-", c=colors[col[0]])
            ax.plot(line2, line, "--", c=colors[col[1]])
        ax.set_xlim(xlim)
        ax.set_ylim(ylim)
        ax.set_title(title)
        ax.set_aspect("equal")
    fig.tight_layout()
    timestamp = str(time.time())
    plt.savefig("./static/" + timestamp + ".png")
    plt.clf()
    return JsonResponse({"code": 200, "message": "success", "data": timestamp + ".png"}, safe=False)
def mlpclassifier(request):

    filePath = os.path.join(settings.MEDIA_ROOT, "excel/MLPClassifier_data.csv")
    data = pd.read_csv(
        filePath, header=0)

    # 建立模型
    models = [
        AdaBoostClassifier(),
        BaggingClassifier(),
        ExtraTreesClassifier(),
        GradientBoostingClassifier(),
        RandomForestClassifier(),
        GaussianProcessClassifier(),
        PassiveAggressiveClassifier(),
        RidgeClassifier(),
        SGDClassifier(),
        KNeighborsClassifier(),
        GaussianNB(),
        MLPClassifier(),
        DecisionTreeClassifier(),
        ExtraTreeClassifier(),
        SVC(),
        LinearSVC()
    ]

    # 依次为模型命名
    classifier_Names = ['AdaBoost', 'Bagging', 'ExtraTrees',
                        'GradientBoosting', 'RandomForest', 'GaussianProcess',
                        'PassiveAggressive', 'Ridge', 'SGD',
                        'KNeighbors', 'GaussianNB', 'MLP',
                        'DecisionTree', 'ExtraTree', 'SVC', 'LinearSVC']

    feature = data[['X', 'Y']]  # 指定特征变量
    target = data['CLASS']  # 指定标签变量
    X_train, X_test, y_train, y_test = train_test_split(
        feature, target, test_size=.3)  # 切分数据集

    # 绘制数据集
    i = 1  # 为绘制子图设置的初始编号参数
    cm = plt.cm.Reds  # 为绘制等高线选择的样式
    cm_color = ListedColormap(['red', 'yellow'])  # 为绘制训练集和测试集选择的样式

    # 栅格化
    x_min, x_max = data['X'].min() - .5, data['X'].max() + .5
    y_min, y_max = data['Y'].min() - .5, data['Y'].max() + .5

    xx, yy = np.meshgrid(np.arange(x_min, x_max, .1),
                         np.arange(y_min, y_max, .1))

    # 模型迭代
    fig = plt.figure(figsize=(18, 12))

    for name, model in zip(classifier_Names, models):
        ax = plt.subplot(4, 4, i)  # 绘制 4x4 子图

        model.fit(X_train, y_train)  # 模型训练
        pre_labels = model.predict(X_test)  # 模型测试
        score = accuracy_score(y_test, pre_labels)  # 模型准确度

        # 根据类的不同选择决策边界计算方法
        if hasattr(model, "decision_function"):
            Z = model.decision_function(np.c_[xx.ravel(), yy.ravel()])
        else:
            Z = model.predict_proba(np.c_[xx.ravel(), yy.ravel()])[:, 1]

        # 绘制决策边界等高线
        Z = Z.reshape(xx.shape)

        ax.contourf(xx, yy, Z, cmap=cm, alpha=.6)

        # 绘制训练集和测试集
        ax.scatter(X_train['X'], X_train['Y'], c=y_train, cmap=cm_color)
        ax.scatter(X_test['X'], X_test['Y'], c=y_test,
                   cmap=cm_color, edgecolors='black')

        # 图形样式设定
        ax.set_xlim(xx.min(), xx.max())
        ax.set_ylim(yy.min(), yy.max())
        ax.set_xticks(())
        ax.set_yticks(())
        ax.set_title('%s | %.2f' % (name, score))
        i += 1
    fig.tight_layout()
    timestamp = str(time.time())
    plt.savefig("./static/" + timestamp + ".png")
    plt.clf()
    return JsonResponse({"code": 200, "message": "success", "data": timestamp + ".png"}, safe=False)
def nlpclassifier_alphas(request):
    h = .02  # 网格中的步长

    alphas = np.logspace(-5, 3, 5)
    names = ['alpha ' + str(i) for i in alphas]

    classifiers = []
    for i in alphas:
        classifiers.append(make_pipeline(
            StandardScaler(),
            MLPClassifier(solver='lbfgs', alpha=i,
                          random_state=1, max_iter=2000,
                          early_stopping=True,
                          hidden_layer_sizes=[100, 100])
        ))

    X, y = make_classification(n_features=2, n_redundant=0, n_informative=2,
                               random_state=0, n_clusters_per_class=1)
    rng = np.random.RandomState(2)
    X += 2 * rng.uniform(size=X.shape)
    linearly_separable = (X, y)

    datasets = [make_moons(noise=0.3, random_state=0),
                make_circles(noise=0.2, factor=0.5, random_state=1),
                linearly_separable]

    figure = plt.figure(figsize=(17, 9))
    i = 1
    # 遍历数据集
    for X, y in datasets:
        # 预处理数据集，分为训练和测试部分
        X = StandardScaler().fit_transform(X)
        X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=.4)

        x_min, x_max = X[:, 0].min() - .5, X[:, 0].max() + .5
        y_min, y_max = X[:, 1].min() - .5, X[:, 1].max() + .5
        xx, yy = np.meshgrid(np.arange(x_min, x_max, h),
                             np.arange(y_min, y_max, h))

        # 只需先绘制数据集
        cm = plt.cm.RdBu
        cm_bright = ListedColormap(['#FF0000', '#0000FF'])
        ax = plt.subplot(len(datasets), len(classifiers) + 1, i)
        # 绘制训练点
        ax.scatter(X_train[:, 0], X_train[:, 1], c=y_train, cmap=cm_bright)
        # 和测试点
        ax.scatter(X_test[:, 0], X_test[:, 1], c=y_test, cmap=cm_bright, alpha=0.6)
        ax.set_xlim(xx.min(), xx.max())
        ax.set_ylim(yy.min(), yy.max())
        ax.set_xticks(())
        ax.set_yticks(())
        i += 1

        # 遍历分类器
        for name, clf in zip(names, classifiers):
            ax = plt.subplot(len(datasets), len(classifiers) + 1, i)
            clf.fit(X_train, y_train)
            score = clf.score(X_test, y_test)

            # 绘制决策边界。为此，我们将为每种颜色分配一种
            # 颜色网格[x_min，x_max] x [y_min，y_max]中的点。
            if hasattr(clf, "decision_function"):
                Z = clf.decision_function(np.c_[xx.ravel(), yy.ravel()])
            else:
                Z = clf.predict_proba(np.c_[xx.ravel(), yy.ravel()])[:, 1]

            # 将结果放入颜色图
            Z = Z.reshape(xx.shape)
            ax.contourf(xx, yy, Z, cmap=cm, alpha=.8)

            # 绘制训练点
            ax.scatter(X_train[:, 0], X_train[:, 1], c=y_train, cmap=cm_bright,
                       edgecolors='black', s=25)
            # 和测试点
            ax.scatter(X_test[:, 0], X_test[:, 1], c=y_test, cmap=cm_bright,
                       alpha=0.6, edgecolors='black', s=25)

            ax.set_xlim(xx.min(), xx.max())
            ax.set_ylim(yy.min(), yy.max())
            ax.set_xticks(())
            ax.set_yticks(())
            ax.set_title(name)
            ax.text(xx.max() - .3, yy.min() + .3, ('%.2f' % score).lstrip('0'),
                    size=15, horizontalalignment='right')
            i += 1

    figure.tight_layout()
    timestamp = str(time.time())
    plt.savefig("./static/" + timestamp + ".png")
    plt.clf()
    return JsonResponse({"code": 200, "message": "success", "data": timestamp + ".png"}, safe=False)
def nlpclassifier_loss(request):
    params = [
        {
            "solver": "sgd",
            "learning_rate": "constant",
            "momentum": 0,
            "learning_rate_init": 0.2,
        },
        {
            "solver": "sgd",
            "learning_rate": "constant",
            "momentum": 0.9,
            "nesterovs_momentum": False,
            "learning_rate_init": 0.2,
        },
        {
            "solver": "sgd",
            "learning_rate": "constant",
            "momentum": 0.9,
            "nesterovs_momentum": True,
            "learning_rate_init": 0.2,
        },
        {
            "solver": "sgd",
            "learning_rate": "invscaling",
            "momentum": 0,
            "learning_rate_init": 0.2,
        },
        {
            "solver": "sgd",
            "learning_rate": "invscaling",
            "momentum": 0.9,
            "nesterovs_momentum": False,
            "learning_rate_init": 0.2,
        },
        {
            "solver": "sgd",
            "learning_rate": "invscaling",
            "momentum": 0.9,
            "nesterovs_momentum": True,
            "learning_rate_init": 0.2,
        },
        {"solver": "adam", "learning_rate_init": 0.01}
    ]

    labels = [
        "constant learning-rate",
        "constant with momentum",
        "constant with Nesterov's momentum",
        "inv-scaling learning-rate",
        "inv-scaling with momentum",
        "inv-scaling with Nesterov's momentum",
        "adam"
    ]

    plot_args = [
        {"c": "red", "linestyle": "-"},
        {"c": "green", "linestyle": "-"},
        {"c": "blue", "linestyle": "-"},
        {"c": "red", "linestyle": "--"},
        {"c": "green", "linestyle": "--"},
        {"c": "blue", "linestyle": "--"},
        {"c": "black", "linestyle": "-"}
    ]

    def plot_on_dataset(X, y, ax, name):
        # for each dataset, plot learning for each learning strategy
        print("\nlearning on dataset %s" % name)
        ax.set_title(name)

        X = MinMaxScaler().fit_transform(X)
        mlps = []
        if name == "digits":
            # digits is larger but converges fairly quickly
            max_iter = 15
        else:
            max_iter = 400

        for label, param in zip(labels, params):
            print("training: %s" % label)
            mlp = MLPClassifier(random_state=0, max_iter=max_iter, **param)

            # some parameter combinations will not converge as can be seen on the
            # plots so they are ignored here
            with warnings.catch_warnings():
                warnings.filterwarnings(
                    "ignore", category=ConvergenceWarning, module="sklearn"
                )
                print(len(mlps))
                mlp.fit(X, y)

            mlps.append(mlp)
            print("Training set score: %f" % mlp.score(X, y))
            print("Training set loss: %f" % mlp.loss_)

        for mlp, label, args in zip(mlps, labels, plot_args):
            ax.plot(mlp.loss_curve_, label=label, **args)

    fig, axes = plt.subplots(2, 2, figsize=(13, 9))
    # load / generate some toy datasets
    iris = datasets.load_iris()
    X_digits, y_digits = datasets.load_digits(return_X_y=True)
    data_sets = [
        (iris.data, iris.target),
        (X_digits, y_digits),
        datasets.make_circles(noise=0.2, factor=0.5, random_state=1),
        datasets.make_moons(noise=0.3, random_state=0),
    ]

    for ax, data, name in zip(
            axes.ravel(), data_sets, ["iris", "digits", "circles", "moons"]
    ):
        plot_on_dataset(*data, ax=ax, name=name)

    fig.subplots_adjust(wspace=0.1)
    fig.legend(ax.get_lines(), labels, ncol=3, loc="upper center")
    plt.subplots_adjust(left=0.04, right=0.96, bottom=0.04, top=0.88)
    timestamp = str(time.time())
    plt.savefig("./static/" + timestamp + ".png")
    plt.clf()
    return JsonResponse({"code": 200, "message": "success", "data": timestamp + ".png"}, safe=False)
def Login(request):
    return JsonResponse({"code": 200, "message": "sss", "data": {
        "id": "1",
        "userName": "super",
        "nickname": "超级管理员大人",
        "avatar": "https://s2.loli.net/2023/08/26/6Xh3wuCV2eMZnEp.png",
        "role": "super",
        "accessToken": "C4e9aa49-599a-e6AD-CdaD-F6Dcbc7F038c",
        "refreshToken": "7eFFdf99-c4b9-A8CF-f5DA-482D80e6A9dD"
    }}, safe=False)
