import base64
import datetime
import os
from io import BytesIO

import requests
import statsmodels.api as sm
from sklearn.linear_model import LinearRegression
import matplotlib
import numpy
from sklearn.cluster import KMeans
import seaborn as sns
import missingno as msno
from django.http import HttpResponse, FileResponse
from django.shortcuts import render
from django.utils.encoding import escape_uri_path
from pandas import read_csv
from math import log
import operator
import pandas as pd
import numpy as np
from sklearn.decomposition import PCA
import matplotlib.pyplot as plt

from blog.models import Catagory

matplotlib.use('Agg')

decisionNode = dict(boxstyle='sawtooth', fc='10')
leafNode = dict(boxstyle='round4', fc='0.8')
arrow_args = dict(arrowstyle='<|-')


# 上传文见
def upload_missfile(request):
    allcategory = Catagory.objects.all()  # 通过Category表查出所有分类
    return render(request, 'miss_values.html', locals())


# 保存文件
def show_missdata(request):
    if request.method == "POST":
        f = request.FILES['csv_file']
        file_path = os.path.join('media/upload', f.name)
        with open(file_path, 'wb') as fp:
            for info in f.chunks():
                fp.write(info)
            fp.close()
            message = '上传成功'
        req_url = request.path
        print('当前url', req_url)
        r = requests.get("http://127.0.0.1:8000{}".format(req_url))
        req_time = r.elapsed.microseconds / 1000
        print('处理时间', req_time)
        data_set = read_csv(file_path, encoding='UTF-8')
        data_title = np.array(data_set.columns.values)  # 获取表头
        data = data_set.values[:, :]
        new_data = np.vstack((data_title, data))
        test_data = []
        for line in new_data:
            ls = []
            for j in line:
                ls.append(j)
            test_data.append(ls)
            # print(test_data)
        miss_data = dict(data_set.isnull().sum())  # 缺失情况
        # 缺失观测的行数
        miss_rows = data_set.isnull().any(axis=1).sum()
        dataSize = data_set.shape
        # 是否缺失
        miss_is_or_not = dict(data_set.isnull().any(axis=0))
        # 各列缺失比例
        miss_ratios = dict(data_set.isnull().sum(axis=0) / data_set.shape[0])
        # 缺失观测的比例
        miss_ratio = str((data_set.isnull().any(axis=1).sum() / data_set.shape[0]) * 100)[:5] + '%'
        return render(request, 'miss_values.html', locals())
    else:
        err_message = '上传失败'
    # data_set = read_csv('data/hz_weather.csv')
    # data = data_set.values[:, :]
    # print(data)
    # test_data = []
    # for line in data:
    #     ls = []
    #     for j in line:
    #         ls.append(j)
    #     test_data.append(ls)
    # return render(request, 'miss_values.html', {'test_data': test_data})


# 缺失值处理
def miss_handle(request):
    local_filename = "media/upload/"
    filename = os.listdir(local_filename)
    filename.sort(key=lambda fn: os.path.getatime(local_filename + fn) if not os.path.isdir(local_filename + fn) else 0)
    df = pd.read_csv('media/upload/{}'.format(filename[-1]), encoding='utf8')
    dataSize = df.shape
    if request.method == "POST":
        select = request.POST.get("miss_values", None)
        chart_type = request.POST.get("chart_values", None)
        if chart_type == 'matrix':
            chart_title = request.POST.get('title', None)
            src = matrix(df, chart_title)
        if chart_type == 'bar':
            chart_title = request.POST.get('title', None)
            src = bar(df, chart_title)
        if chart_type == 'heatmap':
            chart_title = request.POST.get('title', None)
            src = heatmap(df, chart_title)
        if select == 'mean':
            data = df.fillna(df.mean())
            new_data = data.isnull().sum()
            miss_data = dict(data.isnull().sum())  # 缺失情况
            # 缺失观测的行数
            miss_rows = data.isnull().any(axis=1).sum()
            # 是否缺失
            miss_is_or_not = dict(data.isnull().any(axis=0))
            # 各列缺失比例
            miss_ratios = dict(data.isnull().sum(axis=0) / data.shape[0])
            # 缺失观测的比例
            miss_ratio = str((data.isnull().any(axis=1).sum() / data.shape[0]) * 100)[:5] + '%'
            csv_file = time_stamp = '{0:%Y%m%d%H%M%S}'.format(datetime.datetime.now())
            data2 = data.to_csv("media/csv/{}.csv".format(csv_file), index=0)  # 保存CSV数据
            data_title = np.array(data.columns.values)  # 获取表头
            data_1 = data.values[:, :]
            new_data = np.vstack((data_title, data_1))
            test_data = []
            for line in new_data:
                ls = []
                for j in line:
                    ls.append(j)
                test_data.append(ls)
            msg = '缺失值处理成功'
            return render(request, 'miss_values.html', locals())
        else:
            msg = '缺失值处理失败'
        if select == 'median':
            data = df.fillna(df.median())
            new_data = data.isnull().sum()
            miss_data = dict(data.isnull().sum())  # 缺失情况
            # 缺失观测的行数
            miss_rows = data.isnull().any(axis=1).sum()
            # 是否缺失
            miss_is_or_not = dict(data.isnull().any(axis=0))
            # 各列缺失比例
            miss_ratios = dict(data.isnull().sum(axis=0) / data.shape[0])
            # 缺失观测的比例
            miss_ratio = str((data.isnull().any(axis=1).sum() / data.shape[0]) * 100)[:5] + '%'
            csv_file = time_stamp = '{0:%Y%m%d%H%M%S}'.format(datetime.datetime.now())
            data2 = data.to_csv("media/csv/{}.csv".format(csv_file), index=0)  # 保存CSV数据
            data_title = np.array(data.columns.values)  # 获取表头
            data_1 = data.values[:, :]
            new_data = np.vstack((data_title, data_1))
            test_data = []
            for line in new_data:
                ls = []
                for j in line:
                    ls.append(j)
                test_data.append(ls)
            msg = '缺失值处理成功'
            return render(request, 'miss_values.html', locals())
        else:
            msg = '缺失值处理失败'
        if select == 'mode':
            data = df.fillna(df.mode())
            new_data = data.isnull().sum()
            miss_data = dict(data.isnull().sum())  # 缺失情况
            # 缺失观测的行数
            miss_rows = data.isnull().any(axis=1).sum()
            # 是否缺失
            miss_is_or_not = dict(data.isnull().any(axis=0))
            # 各列缺失比例
            miss_ratios = dict(data.isnull().sum(axis=0) / data.shape[0])
            # 缺失观测的比例
            miss_ratio = str((data.isnull().any(axis=1).sum() / data.shape[0]) * 100)[:5] + '%'
            csv_file = time_stamp = '{0:%Y%m%d%H%M%S}'.format(datetime.datetime.now())
            data2 = data.to_csv("media/csv/{}.csv".format(csv_file), index=0)  # 保存CSV数据
            data_title = np.array(data.columns.values)  # 获取表头
            data_1 = data.values[:, :]
            new_data = np.vstack((data_title, data_1))
            test_data = []
            for line in new_data:
                ls = []
                for j in line:
                    ls.append(j)
                test_data.append(ls)
            msg = '缺失值处理成功'
            return render(request, 'miss_values.html', locals())
        else:
            msg = '缺失值处理失败'
        if select == 'delete_miss':
            data = df.dropna(axis=0)
            new_data = data.isnull().sum()
            miss_data = dict(data.isnull().sum())  # 缺失情况
            # 缺失观测的行数
            miss_rows = data.isnull().any(axis=1).sum()
            # 是否缺失
            miss_is_or_not = dict(data.isnull().any(axis=0))
            # 各列缺失比例
            miss_ratios = dict(data.isnull().sum(axis=0) / data.shape[0])
            # 缺失观测的比例
            miss_ratio = str((data.isnull().any(axis=1).sum() / data.shape[0]) * 100)[:5] + '%'
            csv_file = time_stamp = '{0:%Y%m%d%H%M%S}'.format(datetime.datetime.now())
            data2 = data.to_csv("media/csv/{}.csv".format(csv_file), index=0)  # 保存CSV数据
            data_title = np.array(data.columns.values)  # 获取表头
            data_1 = data.values[:, :]
            new_data = np.vstack((data_title, data_1))
            test_data = []
            for line in new_data:
                ls = []
                for j in line:
                    ls.append(j)
                test_data.append(ls)
            msg = '缺失值处理成功'
            return render(request, 'miss_values.html', locals())
        else:
            msg = '缺失值处理失败'
        if select == 'inter':
            data = df.interpolate()
            new_data = data.isnull().sum()
            # print(new_data)
            data1 = data.head()
            print("插值法")
            return render(request, 'miss_values.html', locals())
    return render(request, 'miss_values.html', locals())


# 文件下载
def download(request):
    local_filename = "media/csv/"
    filename = os.listdir(local_filename)
    filename.sort(key=lambda fn: os.path.getatime(local_filename + fn) if not os.path.isdir(local_filename + fn) else 0)
    # d = datetime.datetime.fromtimestamp(os.path.getmtime(local_filename + filename[-1]))
    # print('最新生成的文件是' + filename[-1] + "，时间：" + d.strftime("%Y{y}%m{m}%d{d}%H{h}%M{m1}%S{s}").format(y='年',m='月',d='日',h="时",m1="分",s="秒"))
    # print(filename[-1])
    file = open('media/csv/{}'.format(filename[-1]), 'rb')
    response = FileResponse(file)
    response['Content-Type'] = 'application/octet-stream'
    response['Content-Disposition'] = "attachment; filename*=utf-8''{}".format(escape_uri_path(filename[-1]))
    return response


# 上传文件
def upload_repeatfile(request):
    allcategory = Catagory.objects.all()  # 通过Category表查出所有分类
    return render(request, 'repeat_values.html', locals())


# 保存文件
def show_repdata(request):
    if request.method == "POST":
        f = request.FILES['csv_file']
        file_path = os.path.join('media/upload', f.name)
        with open(file_path, 'wb') as fp:
            for info in f.chunks():
                fp.write(info)
            fp.close()
            message = '上传成功'
        data_set = read_csv(file_path, encoding='UTF-8')
        data_title = np.array(data_set.columns.values)  # 获取表头
        data = data_set.values[:, :]
        new_data = np.vstack((data_title, data))
        test_data = []
        for line in new_data:
            ls = []
            for j in line:
                ls.append(j)
            test_data.append(ls)
        # 数据规模
        data_size = data_set.shape
        # 是否重复
        repeat_is_or_not = dict(data_set.duplicated().value_counts())
        # print(repeat_is_or_not)
        # if repeat_is_or_not.keys() != 'False' and 'True':
        #     true = repeat_is_or_not[True]
        #     false = repeat_is_or_not[False]
        #     repeat_ratio = round((true / false * 100), 2)  # 保留两位小数
        #     print(repeat_ratio)
        # else:
        #     print('没有重复值')
        # 各列缺失比例
        repeat_ratios = dict(data_set.duplicated().value_counts() / data_set.shape[0])
        return render(request, 'repeat_values.html', locals())
    else:
        message = '上传失败'


# 重复值处理
def repeat_handle(request):
    if request.method == "POST":
        local_filename = "media/upload/"
        filename = os.listdir(local_filename)
        filename.sort(
            key=lambda fn: os.path.getatime(local_filename + fn) if not os.path.isdir(local_filename + fn) else 0)
        df = pd.read_csv('media/upload/{}'.format(filename[-1]), encoding='UTF-8')
        df1 = df.drop_duplicates()
        # 数据规模
        data_size = df1.shape
        # 是否重复
        repeat_is_or_not = dict(df1.duplicated().value_counts())
        print(repeat_is_or_not)
        # 各列缺失比例
        true = 0
        false = repeat_is_or_not[False]
        repeat_ratio = round((true / false * 100), 2)  # 保留两位小数
        print(repeat_ratio)
        repeat_ratios = dict(df1.duplicated().value_counts() / df1.shape[0])
        csv_file = time_stamp = '{0:%Y%m%d%H%M%S}'.format(datetime.datetime.now())
        data2 = df1.to_csv("media/csv/{}.csv".format(csv_file), index=0)  # 保存CSV数据
        data_title = np.array(df1.columns.values)  # 获取表头
        data_1 = df1.values[:, :]
        new_data = np.vstack((data_title, data_1))
        test_data = []
        for line in new_data:
            ls = []
            for j in line:
                ls.append(j)
            test_data.append(ls)
            msg = '重复值处理成功'
    return render(request, 'repeat_values.html', locals())


# 上传文件
def upload_outfile(request):
    allcategory = Catagory.objects.all()  # 通过Category表查出所有分类
    return render(request, 'out_values.html', locals())


# 保存文件
def show_outdata(request):
    if request.method == "POST":
        f = request.FILES['csv_file']
        file_path = os.path.join('media/upload', f.name)
        with open(file_path, 'wb') as fp:
            for info in f.chunks():
                fp.write(info)
            fp.close()
            message = '上传成功'
        data_set = read_csv(file_path, encoding='UTF-8')
        data_title = np.array(data_set.columns.values)  # 获取表头
        data = data_set.values[:, :]
        new_data = np.vstack((data_title, data))
        test_data = []
        for line in new_data:
            ls = []
            for j in line:
                ls.append(j)
            test_data.append(ls)
        # 数据规模
        data_size = data_set.shape
        # 是否重复
        # mean1 = data_set['最高气温'].quantile(q=0.25)  # 下四分位差
        # mean2 = data_set['最高气温'].quantile(q=0.75)  # 上四分位差
        # mean3 = mean2 - mean1  # 中位差
        # topnum2 = mean2 + 1.5 * mean3
        # bottomnum2 = mean1 - 1.5 * mean3
        # extent_normal = any(data_set['最高气温'] > topnum2)
        # less_normal = any(data_set['最高气温'] < bottomnum2)
        # df1 = data_set[(data_set['最高气温'] >= bottomnum2) & (data_set['最高气温'] <= topnum2)]
        return render(request, 'out_values.html', locals())
    else:
        message = '上传失败'


# 异常值处理
def outdata_handle(request):
    if request.method == "POST":
        local_filename = "media/upload/"
        filename = os.listdir(local_filename)
        filename.sort(
            key=lambda fn: os.path.getatime(local_filename + fn) if not os.path.isdir(local_filename + fn) else 0)
        df = pd.read_csv('media/upload/{}'.format(filename[-1]), encoding='UTF-8')
        column_value = request.POST.get('outer_data', None)
        # print(column_value)
        mean1 = df[column_value].quantile(q=0.25)  # 下四分位差
        mean2 = df[column_value].quantile(q=0.75)  # 上四分位差
        mean3 = mean2 - mean1  # 中位差
        topnum2 = mean2 + 1.5 * mean3
        bottomnum2 = mean1 - 1.5 * mean3
        extent_normal = any(df[column_value] > topnum2)
        less_normal = any(df[column_value] < bottomnum2)
        # print(extent_normal, less_normal)
        df1 = df[(df[column_value] >= bottomnum2) & (df[column_value] <= topnum2)]
        data_size = df1.shape
        chart_type = request.POST.get("chart_values", None)
        chart_title = request.POST.get('title', None)
        var = request.POST.get('vars', None)  # 检测变量
        if chart_type == 'box':
            src = explor_box(df, var, chart_title)
        csv_file = time_stamp = '{0:%Y%m%d%H%M%S}'.format(datetime.datetime.now())
        data2 = df1.to_csv("media/csv/{}.csv".format(csv_file), index=0)  # 保存CSV数据
        data_title = np.array(df1.columns.values)  # 获取表头
        data_1 = df1.values[:, :]
        new_data = np.vstack((data_title, data_1))
        test_data = []
        for line in new_data:
            ls = []
            for j in line:
                ls.append(j)
            test_data.append(ls)
            # print(new_data)
            msg = '异常值处理成功'
            extent_normal = False
            less_normal = False
    return render(request, 'out_values.html', locals())


# 上传探索性数据集
def upload_explor(request):
    allcategory = Catagory.objects.all()  # 通过Category表查出所有分类
    return render(request, 'exploratory.html', locals())


# 保存文件
def show_explorData(request):
    if request.method == "POST":
        f = request.FILES['csv_file']
        file_path = os.path.join('media/upload', f.name)
        # print(file_path)
        with open(file_path, 'wb') as fp:
            for info in f.chunks():
                fp.write(info)
            fp.close()
            message = '上传成功'
        # df = pd.read_csv(file_path, encoding='gbk')
        data_set = read_csv(file_path, encoding='UTF-8')
        data_title = np.array(data_set.columns.values)  # 获取表头
        data = data_set.values[:, :]
        new_data = np.vstack((data_title, data))
        test_data = []
        for line in new_data:
            ls = []
            for j in line:
                ls.append(j)
            test_data.append(ls)
        describes = data_set.describe()  # 数值型数据规模
        # miss_values = data_set.isnull().sum() # 数据缺失数量
        miss_data = dict(data_set.isnull().sum())  # 缺失情况
        # miss_rows = df.isnull().any(axis=1).sum() # 缺失的行数
        miss_is_or_not = dict(data_set.isnull().any(axis=0))  # 是否缺失
        miss_ratios = dict(data_set.isnull().sum(axis=0) / data_set.shape[0])  # 各列缺失比例
        means = dict(data_set.mean())  # 平均值
        # maxs = dict(data_set.max())  # 最大值
        # mins = dict(data_set.min())  # 最小值
        vars = dict(data_set.var())  # 方差
        stds = dict(data_set.std())  # 标准差
        return render(request, 'exploratory.html', locals())
    else:
        message = '上传失败'


# 探索性数据处理
def explor_handle(request):
    local_filename = "media/upload/"
    filename = os.listdir(local_filename)
    filename.sort(key=lambda fn: os.path.getatime(local_filename + fn) if not os.path.isdir(local_filename + fn) else 0)
    df = pd.read_csv('media/upload/{}'.format(filename[-1]), encoding='UTF-8')
    if request.method == "POST":
        select = request.POST.get("miss_values", None)
        chart_title = request.POST.get('title', None)
        if select == 'box_chart':  # 箱型图
            variable = request.POST.get("vars", None)
            src = box(df,variable, chart_title)
            # miss_values = data_set.isnull().sum() # 数据缺失数量
            miss_data = dict(df.isnull().sum())  # 缺失情况
            # miss_rows = df.isnull().any(axis=1).sum() # 缺失的行数
            miss_is_or_not = dict(df.isnull().any(axis=0))  # 是否缺失
            miss_ratios = dict(df.isnull().sum(axis=0) / df.shape[0])  # 各列缺失比例
            means = dict(df.mean())  # 平均值
            # maxs = dict(df.max())  # 最大值
            # mins = dict(df.min())  # 最小值
            vars = dict(df.var())  # 方差
            stds = dict(df.std())  # 标准差
            # data = pd.read_csv(df)
            csv_file = time_stamp = '{0:%Y%m%d%H%M%S}'.format(datetime.datetime.now())
            data2 = df.to_csv("media/csv/{}.csv".format(csv_file), index=0)  # 保存CSV数据
            data_title = np.array(df.columns.values)  # 获取表头
            data_1 = df.values[:, :]
            new_data = np.vstack((data_title, data_1))
            test_data = []
            for line in new_data:
                ls = []
                for j in line:
                    ls.append(j)
                test_data.append(ls)
            msg = '图表创建成功'
            return render(request, 'exploratory.html', locals())
        else:
            msg = '图表创建失败'
        if select == 'bar':  # 直方图
            sns_title = request.POST.get('title', None)
            variable = request.POST.get("vars", None)
            src = imgs(df, variable, sns_title)
            describes = df.describe()  # 数值型数据规模
            # miss_values = data_set.isnull().sum() # 数据缺失数量
            miss_data = dict(df.isnull().sum())  # 缺失情况
            # miss_rows = df.isnull().any(axis=1).sum() # 缺失的行数
            miss_is_or_not = dict(df.isnull().any(axis=0))  # 是否缺失
            miss_ratios = dict(df.isnull().sum(axis=0) / df.shape[0])  # 各列缺失比例
            means = dict(df.mean())  # 平均值
            # maxs = dict(df.max())  # 最大值
            # mins = dict(df.min())  # 最小值
            vars = dict(df.var())  # 方差
            stds = dict(df.std())  # 标准差
            # data = pd.read_csv(df)
            csv_file = time_stamp = '{0:%Y%m%d%H%M%S}'.format(datetime.datetime.now())
            data2 = df.to_csv("media/csv/{}.csv".format(csv_file), index=0)  # 保存CSV数据
            data_title = np.array(df.columns.values)  # 获取表头
            data_1 = df.values[:, :]
            new_data = np.vstack((data_title, data_1))
            test_data = []
            for line in new_data:
                ls = []
                for j in line:
                    ls.append(j)
                test_data.append(ls)
            msg = '图表创建成功'
            return render(request, 'exploratory.html', locals())
        else:
            msg = '图表创建失败'
        if select == 'violinplot':
            variable = request.POST.get("vars", None)
            src = violinplot(df, variable)
            describes = df.describe()  # 数值型数据规模
            # miss_values = data_set.isnull().sum() # 数据缺失数量
            miss_data = dict(df.isnull().sum())  # 缺失情况
            # miss_rows = df.isnull().any(axis=1).sum() # 缺失的行数
            miss_is_or_not = dict(df.isnull().any(axis=0))  # 是否缺失
            miss_ratios = dict(df.isnull().sum(axis=0) / df.shape[0])  # 各列缺失比例
            means = dict(df.mean())  # 平均值
            # maxs = dict(df.max())  # 最大值
            # mins = dict(df.min())  # 最小值
            vars = dict(df.var())  # 方差
            stds = dict(df.std())  # 标准差
            # data = pd.read_csv(df)
            csv_file = time_stamp = '{0:%Y%m%d%H%M%S}'.format(datetime.datetime.now())
            data2 = df.to_csv("media/csv/{}.csv".format(csv_file), index=0)  # 保存CSV数据
            data_title = np.array(df.columns.values)  # 获取表头
            data_1 = df.values[:, :]
            new_data = np.vstack((data_title, data_1))
            test_data = []
            for line in new_data:
                ls = []
                for j in line:
                    ls.append(j)
                test_data.append(ls)
            msg = '图表创建成功'
            return render(request, 'exploratory.html', locals())
        else:
            msg = '图表创建失败'
        if select == 'var':
            variable = request.POST.get("vars", None)
            variable1 = variable.split(',')
            src = var_relation(df, variable1)
            describes = df.describe()  # 数值型数据规模
            # miss_values = data_set.isnull().sum() # 数据缺失数量
            miss_data = dict(df.isnull().sum())  # 缺失情况
            # miss_rows = df.isnull().any(axis=1).sum() # 缺失的行数
            miss_is_or_not = dict(df.isnull().any(axis=0))  # 是否缺失
            miss_ratios = dict(df.isnull().sum(axis=0) / df.shape[0])  # 各列缺失比例
            means = dict(df.mean())  # 平均值
            # maxs = dict(df.max())  # 最大值
            # mins = dict(df.min())  # 最小值
            vars = dict(df.var())  # 方差
            stds = dict(df.std())  # 标准差
            # data = pd.read_csv(df)
            csv_file = time_stamp = '{0:%Y%m%d%H%M%S}'.format(datetime.datetime.now())
            data2 = df.to_csv("media/csv/{}.csv".format(csv_file), index=0)  # 保存CSV数据
            data_title = np.array(df.columns.values)  # 获取表头
            data_1 = df.values[:, :]
            new_data = np.vstack((data_title, data_1))
            test_data = []
            for line in new_data:
                ls = []
                for j in line:
                    ls.append(j)
                test_data.append(ls)
            msg = '图表创建成功'
            return render(request, 'exploratory.html', locals())
        else:
            msg = '图表创建失败'
        if select == 'var1':
            variable = request.POST.get("vars", None)
            src = var_relation1(df, variable)
            describes = df.describe()  # 数值型数据规模
            # miss_values = data_set.isnull().sum() # 数据缺失数量
            miss_data = dict(df.isnull().sum())  # 缺失情况
            # miss_rows = df.isnull().any(axis=1).sum() # 缺失的行数
            miss_is_or_not = dict(df.isnull().any(axis=0))  # 是否缺失
            miss_ratios = dict(df.isnull().sum(axis=0) / df.shape[0])  # 各列缺失比例
            means = dict(df.mean())  # 平均值
            # maxs = dict(df.max())  # 最大值
            # mins = dict(df.min())  # 最小值
            vars = dict(df.var())  # 方差
            stds = dict(df.std())  # 标准差
            # data = pd.read_csv(df)
            csv_file = time_stamp = '{0:%Y%m%d%H%M%S}'.format(datetime.datetime.now())
            data2 = df.to_csv("media/csv/{}.csv".format(csv_file), index=0)  # 保存CSV数据
            data_title = np.array(df.columns.values)  # 获取表头
            data_1 = df.values[:, :]
            new_data = np.vstack((data_title, data_1))
            test_data = []
            for line in new_data:
                ls = []
                for j in line:
                    ls.append(j)
                test_data.append(ls)
            msg = '图表创建成功'
            return render(request, 'exploratory.html', locals())
        else:
            msg = '图表创建失败'
        if select == 'relativity':
            variable = request.POST.get("vars", None)
            variable1 = variable.split(',')
            print(variable1)
            src = relativity(df, variable1)
            describes = df.describe()  # 数值型数据规模
            # miss_values = data_set.isnull().sum() # 数据缺失数量
            miss_data = dict(df.isnull().sum())  # 缺失情况
            # miss_rows = df.isnull().any(axis=1).sum() # 缺失的行数
            miss_is_or_not = dict(df.isnull().any(axis=0))  # 是否缺失
            miss_ratios = dict(df.isnull().sum(axis=0) / df.shape[0])  # 各列缺失比例
            means = dict(df.mean())  # 平均值
            # maxs = dict(df.max())  # 最大值
            # mins = dict(df.min())  # 最小值
            vars = dict(df.var())  # 方差
            stds = dict(df.std())  # 标准差
            # data = pd.read_csv(df)
            csv_file = time_stamp = '{0:%Y%m%d%H%M%S}'.format(datetime.datetime.now())
            data2 = df.to_csv("media/csv/{}.csv".format(csv_file), index=0)  # 保存CSV数据
            data_title = np.array(df.columns.values)  # 获取表头
            data_1 = df.values[:, :]
            new_data = np.vstack((data_title, data_1))
            test_data = []
            for line in new_data:
                ls = []
                for j in line:
                    ls.append(j)
                test_data.append(ls)
            msg = '图表创建成功'
            return render(request, 'exploratory.html', locals())
        else:
            msg = '图表创建失败'
    return render(request, 'exploratory.html', locals())


# 直方图
def imgs(df, vars, title='直方图'):
    matplotlib.use('Agg')  # 不出现画图的框
    plt.rcParams['font.sans-serif'] = ['SimHei']  # 这两行用来显示汉字
    plt.rcParams['axes.unicode_minus'] = False
    sns.distplot(df[vars])  # 直方图
    plt.title(title, loc='center')
    # sns.set(font_scale=1.8)
    sio = BytesIO()
    plt.savefig(sio, format='png', bbox_inches='tight', pad_inches=0.0)
    data = base64.encodebytes(sio.getvalue()).decode()
    src = 'data:image/png;base64,' + str(data)
    # # 记得关闭，不然画出来的图是重复的
    plt.close()
    return src


# 缺失值柱状图
def bar(df, title='缺失值检测柱状图'):
    matplotlib.use('Agg')  # 不出现画图的框
    plt.rcParams['font.sans-serif'] = ['SimHei']  # 这两行用来显示汉字
    plt.rcParams['axes.unicode_minus'] = False
    msno.bar(df.sample(df.shape[0]))  # 柱状图
    plt.title(title, loc='center')
    sio = BytesIO()
    plt.savefig(sio, format='png', bbox_inches='tight', pad_inches=0.0)
    data = base64.encodebytes(sio.getvalue()).decode()
    src = 'data:image/png;base64,' + str(data)
    # # 记得关闭，不然画出来的图是重复的
    plt.close()
    return src


# 缺失值矩阵图
def matrix(df, title='缺失值检测矩阵图'):
    matplotlib.use('Agg')  # 不出现画图的框
    plt.rcParams['font.sans-serif'] = ['SimHei']  # 这两行用来显示汉字
    plt.rcParams['axes.unicode_minus'] = False
    msno.matrix(df.sample(df.shape[0]), width_ratios=(15, 4), fontsize=18)  # 矩阵图
    plt.title(title, loc='center')
    sio = BytesIO()
    plt.savefig(sio, format='png', bbox_inches='tight', pad_inches=0.0)
    data = base64.encodebytes(sio.getvalue()).decode()
    src = 'data:image/png;base64,' + str(data)
    # # 记得关闭，不然画出来的图是重复的
    plt.close()
    return src


# 缺失值热力图
def heatmap(df, title='缺失值检测热力图'):
    matplotlib.use('Agg')  # 不出现画图的框
    plt.rcParams['font.sans-serif'] = ['SimHei']  # 这两行用来显示汉字
    plt.rcParams['axes.unicode_minus'] = False
    msno.heatmap(df.sample(df.shape[0]))  # 热力图
    plt.title(title, loc='center')
    sio = BytesIO()
    plt.savefig(sio, format='png', bbox_inches='tight', pad_inches=0.0)
    data = base64.encodebytes(sio.getvalue()).decode()
    src = 'data:image/png;base64,' + str(data)
    # # 记得关闭，不然画出来的图是重复的
    plt.close()
    return src


# 箱型图
def box(df,vars, title='探索性数据分析箱型图'):
    print(vars)
    matplotlib.use('Agg')  # 不出现画图的框
    plt.rcParams['font.sans-serif'] = ['SimHei']  # 这两行用来显示汉字
    plt.rcParams['axes.unicode_minus'] = False
    sns.boxplot(df[vars])  # 箱线图
    plt.title(title, loc='center')
    sio = BytesIO()
    plt.savefig(sio, format='png', bbox_inches='tight', pad_inches=0.0)
    data = base64.encodebytes(sio.getvalue()).decode()
    src = 'data:image/png;base64,' + str(data)
    # # 记得关闭，不然画出来的图是重复的
    plt.close()
    return src


# 异常值箱型图
def explor_box(df, vars=None, title='异常值检测箱型图'):
    var = vars
    matplotlib.use('Agg')  # 不出现画图的框
    plt.rcParams['font.sans-serif'] = ['SimHei']  # 这两行用来显示汉字
    plt.rcParams['axes.unicode_minus'] = False
    plt.boxplot(x=df[var])  # 箱线图
    plt.title(title, loc='center')
    sio = BytesIO()
    plt.savefig(sio, format='png', bbox_inches='tight', pad_inches=0.0)
    data = base64.encodebytes(sio.getvalue()).decode()
    src = 'data:image/png;base64,' + str(data)
    # # 记得关闭，不然画出来的图是重复的
    plt.close()
    return src


# 小提琴图
def violinplot(df, vars):
    matplotlib.use('Agg')  # 不出现画图的框
    plt.rcParams['font.sans-serif'] = ['SimHei']  # 这两行用来显示汉字
    plt.rcParams['axes.unicode_minus'] = False
    # sns.pairplot(df[['最高气温','最低气温','日期']]) # 变量之间的关系
    sns.violinplot(df[vars])  # 小提琴图等
    sio = BytesIO()
    plt.savefig(sio, format='png', bbox_inches='tight', pad_inches=0.0)
    data = base64.encodebytes(sio.getvalue()).decode()
    src = 'data:image/png;base64,' + str(data)
    # # 记得关闭，不然画出来的图是重复的
    plt.close()
    return src


# 变量关系
def var_relation(df, vars):
    var = vars
    print(var)
    matplotlib.use('Agg')  # 不出现画图的框
    plt.rcParams['font.sans-serif'] = ['SimHei']  # 这两行用来显示汉字
    plt.rcParams['axes.unicode_minus'] = False
    sns.pairplot(df[var])  # 变量之间的关系
    sio = BytesIO()
    plt.savefig(sio, format='png', bbox_inches='tight', pad_inches=0.0)
    data = base64.encodebytes(sio.getvalue()).decode()
    src = 'data:image/png;base64,' + str(data)
    # # 记得关闭，不然画出来的图是重复的
    plt.close()
    return src


# 变量关系1
def var_relation1(df, vars):
    var = vars
    print(var)
    matplotlib.use('Agg')  # 不出现画图的框
    plt.rcParams['font.sans-serif'] = ['SimHei']  # 这两行用来显示汉字
    plt.rcParams['axes.unicode_minus'] = False
    sns.pairplot(df, hue=var)  # 变量之间的关系
    sio = BytesIO()
    plt.savefig(sio, format='png', bbox_inches='tight', pad_inches=0.0)
    data = base64.encodebytes(sio.getvalue()).decode()
    src = 'data:image/png;base64,' + str(data)
    # # 记得关闭，不然画出来的图是重复的
    plt.close()
    return src


# 数据相关性探查
def relativity(df, vars):
    var = vars
    print(var)
    matplotlib.use('Agg')  # 不出现画图的框
    plt.rcParams['font.sans-serif'] = ['SimHei']  # 这两行用来显示汉字
    plt.rcParams['axes.unicode_minus'] = False
    sns.heatmap(df[var].corr())  # 变量之间的关系
    sio = BytesIO()
    plt.savefig(sio, format='png', bbox_inches='tight', pad_inches=0.0)
    data = base64.encodebytes(sio.getvalue()).decode()
    src = 'data:image/png;base64,' + str(data)
    # # 记得关闭，不然画出来的图是重复的
    plt.close()
    return src


# 文件下载
def download_1(request):
    local_filename = "media/csv/"
    filename = os.listdir(local_filename)
    filename.sort(key=lambda fn: os.path.getatime(local_filename + fn) if not os.path.isdir(local_filename + fn) else 0)
    # d = datetime.datetime.fromtimestamp(os.path.getmtime(local_filename + filename[-1]))
    # print('最新生成的文件是' + filename[-1] + "，时间：" + d.strftime("%Y{y}%m{m}%d{d}%H{h}%M{m1}%S{s}").format(y='年',m='月',d='日',h="时",m1="分",s="秒"))
    # print(filename[-1])
    file = open('media/csv/{}'.format(filename[-1]), 'rb')
    response = FileResponse(file)
    response['Content-Type'] = 'application/octet-stream'
    response['Content-Disposition'] = "attachment; filename*=utf-8''{}".format(escape_uri_path(filename[-1]))
    return response


# 上传knn文件
def upload_knn(request):
    allcategory = Catagory.objects.all()  # 通过Category表查出所有分类
    return render(request, 'knn.html', locals())


# 保存文件
def show_knn(request):
    if request.method == "POST":
        f = request.FILES['csv_file']
        file_path = os.path.join('media/upload', f.name)
        # print(file_path)
        with open(file_path, 'wb') as fp:
            for info in f.chunks():
                fp.write(info)
            fp.close()
            message = '上传成功'
        data_set = read_csv(file_path, encoding='utf8')
        data_title = np.array(data_set.columns.values)  # 获取表头
        data = data_set.values[:, :]
        new_data = np.vstack((data_title, data))
        test_data = []
        for line in new_data:
            ls = []
            for j in line:
                ls.append(j)
            test_data.append(ls)
        # df1 = data_set.drop(data_set.columns.values[-1], axis=1)  # 删除最后一列
        # df2 = numpy.array(df1)  # 将DataFrame转换为array
        # last = data_set.columns.values[-1]  # 获取表头的最后一列
        # label = data_set[last].tolist()  # 将最后一列转换为列表
        # trainData = df2  # 训练集
        # labels = label  # 分类标签
        # testData = [127, 4727]  # 测试集
        # print(testData)
        # knn_result = knn(trainData, testData, labels, 3)
        return render(request, 'knn.html', locals())
    else:
        message = '上传失败'


def knn(trainData, testData, labels, k):
    rowSize = trainData.shape[0]  # 计算训练样本的行数
    diff = np.tile(testData, (rowSize, 1)) - trainData  # 计算训练样本和测试样本的差值
    sqrDiff = diff ** 2  # 计算差值的平方和
    sqrDiffSum = sqrDiff.sum(axis=1)
    distances = sqrDiffSum ** 0.5  # 计算距离
    sortDistance = distances.argsort()  # 对所得的距离从低到高进行排序
    # print(sortDistance)
    count = {}
    for i in range(k):
        vote = labels[sortDistance[i]]
        count[vote] = count.get(vote, 0) + 1
    # 对类别出现的频数从高到低进行排序
    sortCount = sorted(count.items(), key=operator.itemgetter(1), reverse=True)
    # 返回出现频数最高的类别
    # print(sortCount[0][0])
    return sortCount[0][0]


# knn处理
def knn_handle(request):
    local_filename = "media/upload/"
    filename = os.listdir(local_filename)
    filename.sort(key=lambda fn: os.path.getatime(local_filename + fn) if not os.path.isdir(local_filename + fn) else 0)
    df = pd.read_csv('media/upload/{}'.format(filename[-1]), encoding='utf8')
    if request.method == "POST":
        # chart_type = request.POST.get("chart_values", None)
        # if chart_type == 'matrix':
        test_data = request.POST.get("test_data", None)
        k_value = int(request.POST.get("k_value", None))
        test_data1 = test_data.split(',')
        test_data2 = [float(test_data1[i]) for i in range(len(test_data1))]
        print(test_data2)
        print(type(k_value))
        print(k_value)
        chart_title = request.POST.get('title', None)
        csv_file = time_stamp = '{0:%Y%m%d%H%M}'.format(datetime.datetime.now())
        data2 = df.to_csv("media/csv/{}.csv".format(csv_file), index=0)  # 保存CSV数据
        data_title = np.array(df.columns.values)  # 获取表头
        data_1 = df.values[:, :]
        new_data = np.vstack((data_title, data_1))
        test_data = []
        for line in new_data:
            ls = []
            for j in line:
                ls.append(j)
            test_data.append(ls)
        df1 = df.drop(df.columns.values[-1], axis=1)  # 删除最后一列
        df2 = numpy.array(df1)  # 将DataFrame转换为array
        last = df.columns.values[-1]  # 获取表头的最后一列
        label = df[last].tolist()  # 将最后一列转换为列表
        trainData = df2  # 训练集
        labels = label  # 分类标签
        # testData = [127, 4727]  # 测试集
        Train_set = str(len(trainData))
        Test_set = str(len(test_data2))
        knn_result = knn(trainData, test_data2, labels, k_value)
        msg = 'KNN分类成功'
        return render(request, 'knn.html', locals())
    else:
        msg = '分类失败'
    return render(request, 'knn.html', locals())


# 上传linear文件
def upload_linear(request):
    allcategory = Catagory.objects.all()  # 通过Category表查出所有分类
    return render(request, 'linear_reg.html', locals())


# 保存文件
def show_linear(request):
    if request.method == "POST":
        f = request.FILES['csv_file']
        file_path = os.path.join('media/upload', f.name)
        # print(file_path)
        with open(file_path, 'wb') as fp:
            for info in f.chunks():
                fp.write(info)
            fp.close()
            message = '上传成功'
        data_set = read_csv(file_path, encoding='utf8')
        data_title = np.array(data_set.columns.values)  # 获取表头
        data = data_set.values[:, :]
        new_data = np.vstack((data_title, data))
        test_data = []
        for line in new_data:
            ls = []
            for j in line:
                ls.append(j)
            test_data.append(ls)
        return render(request, 'linear_reg.html', locals())
    else:
        message = '上传失败'


# 线性回归处理
def linear_handle(request):
    local_filename = "media/upload/"
    filename = os.listdir(local_filename)
    filename.sort(key=lambda fn: os.path.getatime(local_filename + fn) if not os.path.isdir(local_filename + fn) else 0)
    df = pd.read_csv('media/upload/{}'.format(filename[-1]), encoding='utf8')
    if request.method == "POST":
        chart_type = request.POST.get("chart_values", None)
        if chart_type == 'scatter':
            chart_title = request.POST.get("title", None)
            y_values = request.POST.get("y_values", None)
            x_values = request.POST.get("x_values", None)
            chart_title = request.POST.get('title', None)
            y_label = request.POST.get('y_label', None)
            x_label = request.POST.get('x_label', None)
            print(chart_title, y_values, x_values, y_label, x_label)
            sns.set_style("whitegrid")
            matplotlib.use('Agg')  # 不出现画图的框
            plt.rcParams['font.sans-serif'] = ['SimHei']  # 这两行用来显示汉字
            plt.rcParams['axes.unicode_minus'] = False
            X = df[x_values].values.reshape(-1, 1)
            y = df[y_values].values.reshape(-1, 1)
            reg = LinearRegression()
            reg.fit(X, y)
            model = "Y = {:.5} + ({:.5})X".format(reg.intercept_[0], reg.coef_[0][0])
            predictions = reg.predict(X)
            plt.figure(figsize=(16, 8))
            plt.scatter(df[x_values], df[y_values], c='black')
            plt.plot(df[x_values], predictions, c='blue', linewidth=2)
            plt.title(chart_title)
            plt.xlabel(x_label)
            plt.ylabel(y_label)
            sio = BytesIO()
            plt.savefig(sio, format='png', bbox_inches='tight', pad_inches=0.0)
            data = base64.encodebytes(sio.getvalue()).decode()
            src = 'data:image/png;base64,' + str(data)
            # # 记得关闭，不然画出来的图是重复的
            plt.show()
            plt.close()
            X1 = df[x_values]
            y = df[y_values]
            X2 = sm.add_constant(X1)
            est = sm.OLS(y, X2)
            est2 = est.fit()
            print(est2.summary())
            R_value = est2.rsquared  # R值
            F_value = est2.fvalue  # F值
            P_value = list(est2.pvalues)[1:]  # P值
            sample_size = est2.nobs  # 样本数量
            csv_file = time_stamp = '{0:%Y%m%d%H%M}'.format(datetime.datetime.now())
            data2 = df.to_csv("media/csv/{}.csv".format(csv_file), index=0)  # 保存CSV数据
            data_title = np.array(df.columns.values)  # 获取表头
            data_1 = df.values[:, :]
            new_data = np.vstack((data_title, data_1))
            test_data = []
            for line in new_data:
                ls = []
                for j in line:
                    ls.append(j)
                test_data.append(ls)
            msg = '回归分析成功'
        return render(request, 'linear_reg.html', locals())
    else:
        msg = '回归分析失败'
    return render(request, 'linear_reg.html', locals())


# 上传决策树文件
def upload_decision(request):
    allcategory = Catagory.objects.all()  # 通过Category表查出所有分类
    return render(request, 'decision_tree.html', locals())


# 保存决策树文件
def show_decision(request):
    if request.method == "POST":
        f = request.FILES['csv_file']
        file_path = os.path.join('media/upload', f.name)
        with open(file_path, 'wb') as fp:
            for info in f.chunks():
                fp.write(info)
            fp.close()
            message = '文件上传成功'
        data_set = read_csv(file_path, encoding='utf8')
        data_title = np.array(data_set.columns.values)  # 获取表头
        data = data_set.values[:, :]
        new_data = np.vstack((data_title, data))
        test_data = []
        for line in new_data:
            ls = []
            for j in line:
                ls.append(j)
            test_data.append(ls)

        return render(request, 'decision_tree.html', locals())
    else:
        message = '文件上传失败'


# 决策树处理
def decision_handle(request):
    local_filename = "media/upload/"
    filename = os.listdir(local_filename)
    filename.sort(key=lambda fn: os.path.getatime(local_filename + fn) if not os.path.isdir(local_filename + fn) else 0)
    df = pd.read_csv('media/upload/{}'.format(filename[-1]), encoding='utf8')
    if request.method == "POST":
        label = request.POST.get("label", None)
        label1 = label.split(',')
        print(label1)
        chart_title = request.POST.get("title", None)
        data_size = df.shape
        df1 = np.array(df)
        df2 = df1.tolist()
        dataSet, labels = createDataSet1(df2, label1)  # 创造示列数据
        model_result = createTree(dataSet, labels)  # 输出决策树模型结果
        myTree = retrieveTree(0, model_result)
        src = createPlot(myTree, chart_title)
        csv_file = time_stamp = '{0:%Y%m%d%H%M}'.format(datetime.datetime.now())
        data2 = df.to_csv("media/csv/{}.csv".format(csv_file), index=0)  # 保存CSV数据
        data_title = np.array(df.columns.values)  # 获取表头
        data_1 = df.values[:, :]
        new_data = np.vstack((data_title, data_1))
        test_data = []
        for line in new_data:
            ls = []
            for j in line:
                ls.append(j)
            test_data.append(ls)
        msg = '决策树分析成功'
        return render(request, 'decision_tree.html', locals())
    else:
        msg = '决策树分析失败'
        return render(request, 'decision_tree.html', locals())


# 决策树代码
def calcShannonEnt(dataSet):  # 计算数据的熵(entropy)
    numEntries = len(dataSet)  # 数据条数
    labelCounts = {}
    for featVec in dataSet:
        currentLabel = featVec[-1]  # 每行数据的最后一个字（类别）
        if currentLabel not in labelCounts.keys():
            labelCounts[currentLabel] = 0
        labelCounts[currentLabel] += 1  # 统计有多少个类以及每个类的数量
    shannonEnt = 0
    for key in labelCounts:
        prob = float(labelCounts[key]) / numEntries  # 计算单个类的熵值
        shannonEnt -= prob * log(prob, 2)  # 累加每个类的熵值
    print('信息熵', shannonEnt)
    return shannonEnt


def createDataSet1(dataSet, labels):  # 创造示例数据
    dataSet = dataSet
    labels = labels  # 决策树特征特征
    # print(dataSet)
    return dataSet, labels


def splitDataSet(dataSet, axis, value):  # 按某个特征分类后的数据
    retDataSet = []
    for featVec in dataSet:
        if featVec[axis] == value:
            reducedFeatVec = featVec[:axis]
            reducedFeatVec.extend(featVec[axis + 1:])
            retDataSet.append(reducedFeatVec)
    return retDataSet


def chooseBestFeatureToSplit(dataSet):  # 选择最优的分类特征
    numFeatures = len(dataSet[0]) - 1
    baseEntropy = calcShannonEnt(dataSet)  # 原始的熵
    bestInfoGain = 0
    bestFeature = -1
    for i in range(numFeatures):
        featList = [example[i] for example in dataSet]
        uniqueVals = set(featList)
        newEntropy = 0
        for value in uniqueVals:
            subDataSet = splitDataSet(dataSet, i, value)
            prob = len(subDataSet) / float(len(dataSet))
            newEntropy += prob * calcShannonEnt(subDataSet)  # 按特征分类后的熵
        infoGain = baseEntropy - newEntropy  # 原始熵与按特征分类后的熵的差值
        if (infoGain > bestInfoGain):  # 若按某特征划分后，熵值减少的最大，则次特征为最优分类特征
            bestInfoGain = infoGain
            bestFeature = i
    return bestFeature


def majorityCnt(classList):  # 按分类后类别数量排序，比如：最后分类为2男1女，则判定为男；
    classCount = {}
    for vote in classList:
        if vote not in classCount.keys():
            classCount[vote] = 0
        classCount[vote] += 1
    sortedClassCount = sorted(classCount.items(), key=operator.itemgetter(1), reverse=True)
    return sortedClassCount[0][0]


def createTree(dataSet, labels):
    classList = [example[-1] for example in dataSet]  # 类别：男或女
    if classList.count(classList[0]) == len(classList):
        return classList[0]
    if len(dataSet[0]) == 1:
        return majorityCnt(classList)
    bestFeat = chooseBestFeatureToSplit(dataSet)  # 选择最优特征
    bestFeatLabel = labels[bestFeat]
    myTree = {bestFeatLabel: {}}  # 分类结果以字典形式保存
    del (labels[bestFeat])
    featValues = [example[bestFeat] for example in dataSet]
    uniqueVals = set(featValues)
    for value in uniqueVals:
        subLabels = labels[:]
        myTree[bestFeatLabel][value] = createTree(splitDataSet \
                                                      (dataSet, bestFeat, value), subLabels)
    return myTree


# 决策树绘图========================================================

def plotNode(nodeTxt, centerPt, parentPt, nodeType):
    createPlot.ax1.annotate(nodeTxt, xy=parentPt, xycoords='axes fraction', \
                            xytext=centerPt, textcoords='axes fraction', \
                            va='center', ha='center', bbox=nodeType, arrowprops \
                                =arrow_args)


def getNumLeafs(myTree):
    numLeafs = 0
    firstStr = list(myTree.keys())[0]
    secondDict = myTree[firstStr]
    for key in secondDict:
        if (type(secondDict[key]).__name__ == 'dict'):
            numLeafs += getNumLeafs(secondDict[key])
        else:
            numLeafs += 1
    return numLeafs


def getTreeDepth(myTree):
    maxDepth = 0
    firstStr = list(myTree.keys())[0]
    secondDict = myTree[firstStr]
    for key in secondDict:
        if (type(secondDict[key]).__name__ == 'dict'):
            thisDepth = 1 + getTreeDepth((secondDict[key]))
        else:
            thisDepth = 1
        if thisDepth > maxDepth: maxDepth = thisDepth
    return maxDepth


def retrieveTree(i, model_result):
    # 预先设置树的信息
    listOfTree = [model_result]
    print(type(listOfTree))
    return listOfTree[i]


def createPlot(inTree, title):
    fig = plt.figure(1, facecolor='white')
    fig.clf()
    axprops = dict(xticks=[], yticks=[])
    createPlot.ax1 = plt.subplot(111, frameon=False, **axprops)
    plotTree.totalW = float(getNumLeafs(inTree))
    plotTree.totalD = float(getTreeDepth(inTree))
    plotTree.xOff = -0.5 / plotTree.totalW
    plotTree.yOff = 1.0
    plotTree(inTree, (0.5, 1.0), '')
    plt.rcParams['font.sans-serif'] = ['SimHei']  # 这两行用来显示汉字
    plt.rcParams['axes.unicode_minus'] = False
    plt.title(title)
    sio = BytesIO()
    plt.savefig(sio, format='png', bbox_inches='tight', pad_inches=0.0)
    data = base64.encodebytes(sio.getvalue()).decode()
    src = 'data:image/png;base64,' + str(data)
    # # 记得关闭，不然画出来的图是重复的
    plt.close()
    plt.show()
    return src


def plotMidText(cntrPt, parentPt, txtString):
    xMid = (parentPt[0] - cntrPt[0]) / 2.0 + cntrPt[0]
    yMid = (parentPt[1] - cntrPt[1]) / 2.0 + cntrPt[1]
    createPlot.ax1.text(xMid, yMid, txtString)


def plotTree(myTree, parentPt, nodeTxt):
    numLeafs = getNumLeafs(myTree)
    depth = getTreeDepth(myTree)
    firstStr = list(myTree.keys())[0]
    cntrPt = (plotTree.xOff + (1.0 + float(numLeafs)) / 2.0 / plotTree.totalW, plotTree.yOff)
    plotMidText(cntrPt, parentPt, nodeTxt)
    plotNode(firstStr, cntrPt, parentPt, decisionNode)
    secondDict = myTree[firstStr]
    plotTree.yOff = plotTree.yOff - 1.0 / plotTree.totalD
    for key in secondDict:
        if type(secondDict[key]).__name__ == 'dict':
            plotTree(secondDict[key], cntrPt, str(key))
        else:
            plotTree.xOff = plotTree.xOff + 1.0 / plotTree.totalW
            plotNode(secondDict[key], (plotTree.xOff, plotTree.yOff), cntrPt, leafNode)
            plotMidText((plotTree.xOff, plotTree.yOff), cntrPt, str(key))
    plotTree.yOff = plotTree.yOff + 1.0 / plotTree.totalD


# 上传knn文件
def upload_k_means(request):
    allcategory = Catagory.objects.all()  # 通过Category表查出所有分类
    return render(request, 'k-means.html', locals())


# 保存文件
def show_k_means(request):
    if request.method == "POST":
        f = request.FILES['csv_file']
        file_path = os.path.join('media/upload', f.name)
        with open(file_path, 'wb') as fp:
            for info in f.chunks():
                fp.write(info)
            fp.close()
            message = '上传成功'
        data_set = read_csv(file_path, encoding='utf8')
        data_title = np.array(data_set.columns.values)  # 获取表头
        data = data_set.values[:, :]
        new_data = np.vstack((data_title, data))
        test_data = []
        for line in new_data:
            ls = []
            for j in line:
                ls.append(j)
            test_data.append(ls)
        return render(request, 'k-means.html', locals())
    else:
        message = '上传失败'


# k-means处理
def k_means_handle(request):
    local_filename = "media/upload/"
    filename = os.listdir(local_filename)
    filename.sort(key=lambda fn: os.path.getatime(local_filename + fn) if not os.path.isdir(local_filename + fn) else 0)
    df = pd.read_csv('media/upload/{}'.format(filename[-1]), encoding='utf8')
    if request.method == "POST":
        chart_type = request.POST.get("chart_values", None)
        if chart_type == 'k_chart':
            test_data = request.POST.get("test_data", None)
            col_n = test_data.split(',')
            chart_title = request.POST.get('title', None)
            # print(col_n)
            src = show_k(df, chart_title, col_n)
            csv_file = time_stamp = '{0:%Y%m%d%H%M}'.format(datetime.datetime.now())
            data2 = df.to_csv("media/csv/{}.csv".format(csv_file), index=0)  # 保存CSV数据
            data_title = np.array(df.columns.values)  # 获取表头
            data_1 = df.values[:, :]
            new_data = np.vstack((data_title, data_1))
            test_data = []
            for line in new_data:
                ls = []
                for j in line:
                    ls.append(j)
                test_data.append(ls)
            msg = 'k值分析成功'
            return render(request, 'k-means.html', locals())
        else:
            msg = 'k值分析失败'
        if chart_type == 'scatter':
            chart_title = request.POST.get('title', None)
            k_value = int(request.POST.get("k_value", None))
            test_data = request.POST.get("test_data", None)
            data_size = df.shape
            row = df.shape[0]  # 数据集行数
            ndim = df.ndim  # 数据集维数
            col_n = test_data.split(',')  # 数据列名
            a = pd.DataFrame(df, columns=col_n)
            data = a.values
            # 获取4个随机数
            rarray = np.random.random(size=k_value)
            # 乘以数据集大小——>数据集中随机的4个点
            rarray = np.floor(rarray * row)
            # 转为int
            rarray = rarray.astype(int)
            # print(type(rarray))
            # rarray1=rarray.tolist()
            # print('数据集中随机索引', rarray1)
            # 随机取数据集中的4个点作为初始中心点
            center = data[rarray]
            center1 = center.tolist()
            # print(type(center1))

            # 测试比较偏、比较集中的点，效果依然完美，测试需要删除以上代码
            # center = np.array([[4.6,-2.5],[4.4,-1.7],[4.3,-0.7],[4.8,-1.1]])
            # 1行80列的0数组，标记每个样本所属的类(k[i])
            cls = np.zeros([row], np.int)
            print('初始center=\n', center)
            run = True
            time = 0
            while run:
                time = time + 1
                for i in range(row):
                    # 求差
                    tmp = data[i] - center
                    # 求平方
                    tmp = np.square(tmp)
                    # axis=1表示按行求和
                    tmp = np.sum(tmp, axis=1)
                    # 取最小（最近）的给该点“染色”（标记每个样本所属的类(k[i])）
                    cls[i] = np.argmin(tmp)
                # 如果没有修改各分类中心点，就结束循环
                run = False
                # 计算更新每个类的中心点
                for i in range(k_value):
                    # 找到属于该类的所有样本
                    club = data[cls == i]
                    # axis=0表示按列求平均值，计算出新的中心点
                    newcenter = np.mean(club, axis=0)
                    # 如果新旧center的差距很小，看做他们相等，否则更新之。run置true，再来一次循环
                    ss = np.abs(center[i] - newcenter)
                    if np.sum(ss, axis=0) > 1e-4:
                        center[i] = newcenter
                        run = True
                # print('new center=\n', center)
            print('程序结束，迭代次数：', time)
            # 按类打印图表，因为每打印一次，颜色都不一样，所以可区分出来
            for i in range(k_value):
                club = data[cls == i]
                src = showtable(club)
            # 打印最后的中心点
            showtable(center)
            csv_file = time_stamp = '{0:%Y%m%d%H%M}'.format(datetime.datetime.now())
            data2 = df.to_csv("media/csv/{}.csv".format(csv_file), index=0)  # 保存CSV数据
            data_title = np.array(df.columns.values)  # 获取表头
            data_1 = df.values[:, :]
            new_data = np.vstack((data_title, data_1))
            test_data = []
            for line in new_data:
                ls = []
                for j in line:
                    ls.append(j)
                test_data.append(ls)
            msg = 'k-means聚类成功'
            return render(request, 'k-means.html', locals())
        else:
            msg = 'k-means聚类失败'
    return render(request, 'k-means.html', locals())


# def kmeans(data, n, m, k):
#     # 获取4个随机数
#     rarray = np.random.random(size=k)
#     # 乘以数据集大小——>数据集中随机的4个点
#     rarray = np.floor(rarray*n)
#     # 转为int
#     rarray = rarray.astype(int)
#     print('数据集中随机索引', rarray)
#     # 随机取数据集中的4个点作为初始中心点
#     center = data[rarray]
#     # 测试比较偏、比较集中的点，效果依然完美，测试需要删除以上代码
#     # center = np.array([[4.6,-2.5],[4.4,-1.7],[4.3,-0.7],[4.8,-1.1]])
#     # 1行80列的0数组，标记每个样本所属的类(k[i])
#     cls = np.zeros([n], np.int)
#     print('初始center=\n', center)
#     run = True
#     time = 0
#     while run:
#         time = time + 1
#         for i in range(n):
#             # 求差
#             tmp = data[i] - center
#             # 求平方
#             tmp = np.square(tmp)
#             # axis=1表示按行求和
#             tmp = np.sum(tmp, axis=1)
#             # 取最小（最近）的给该点“染色”（标记每个样本所属的类(k[i])）
#             cls[i] = np.argmin(tmp)
#         # 如果没有修改各分类中心点，就结束循环
#         run = False
#         # 计算更新每个类的中心点
#         for i in range(k):
#             # 找到属于该类的所有样本
#             club = data[cls==i]
#             # axis=0表示按列求平均值，计算出新的中心点
#             newcenter = np.mean(club, axis=0)
#             # 如果新旧center的差距很小，看做他们相等，否则更新之。run置true，再来一次循环
#             ss = np.abs(center[i]-newcenter)
#             if np.sum(ss, axis=0) > 1e-4:
#                 center[i] = newcenter
#                 run = True
#         # print('new center=\n', center)
#     # print('程序结束，迭代次数：', time)
#     # times = time # 迭代次数
#     # 按类打印图表，因为每打印一次，颜色都不一样，所以可区分出来
#     for i in range(k):
#         club = data[cls == i]
#         showtable(club)
#     # 打印最后的中心点
#     showtable(center)

def showtable(data):
    print('data', data)
    x = data.T[0]
    y = data.T[1]
    plt.rcParams['font.sans-serif'] = ['SimHei']  # 这两行用来显示汉字
    plt.rcParams['axes.unicode_minus'] = False
    plt.title('k-means聚类')
    plt.scatter(x, y)
    sio = BytesIO()
    plt.savefig(sio, format='png', bbox_inches='tight', pad_inches=0.0)
    data = base64.encodebytes(sio.getvalue()).decode()
    src = 'data:image/png;base64,' + str(data)
    # # 记得关闭，不然画出来的图是重复的
    plt.close()
    plt.show()
    return src


def show_k(x, title, col):
    # 肘方法看k值
    print(x, title, col)
    SSE = []  # 存放每次结果的误差平方和
    for k in range(1, 9):
        estimator = KMeans(n_clusters=k)  # 构造聚类器
        col = col
        # df = pd.DataFrame(x, columns=col)
        estimator.fit(x[col])
        SSE.append(estimator.inertia_)  # estimator.inertia_获取聚类准则的总和
    X = range(1, 9)
    plt.xlabel('k')
    plt.ylabel('SSE')
    plt.rcParams['font.sans-serif'] = ['SimHei']  # 这两行用来显示汉字
    plt.rcParams['axes.unicode_minus'] = False
    plt.plot(X, SSE, 'o-')
    plt.title(title)
    sio = BytesIO()
    plt.savefig(sio, format='png', bbox_inches='tight', pad_inches=0.0)
    data = base64.encodebytes(sio.getvalue()).decode()
    src = 'data:image/png;base64,' + str(data)
    # # 记得关闭，不然画出来的图是重复的
    plt.close()
    plt.show()
    return src


# 上传pca文件
def upload_pca(request):
    allcategory = Catagory.objects.all()  # 通过Category表查出所有分类
    return render(request, 'PCA.html', locals())


# 保存文件
def show_pca(request):
    if request.method == "POST":
        f = request.FILES['csv_file']
        file_path = os.path.join('media/upload', f.name)
        # print(file_path)
        with open(file_path, 'wb') as fp:
            for info in f.chunks():
                fp.write(info)
            fp.close()
            message = '上传成功'
        data_set = read_csv(file_path, encoding='utf8')
        data_title = np.array(data_set.columns.values)  # 获取表头
        data = data_set.values[:, :]
        new_data = np.vstack((data_title, data))
        test_data = []
        for line in new_data:
            ls = []
            for j in line:
                ls.append(j)
            test_data.append(ls)
        return render(request, 'PCA.html', locals())
    else:
        message = '上传失败'


# pca处理
def pca_handle(request):
    local_filename = "media/upload/"
    filename = os.listdir(local_filename)
    filename.sort(key=lambda fn: os.path.getatime(local_filename + fn) if not os.path.isdir(local_filename + fn) else 0)
    df = pd.read_csv('media/upload/{}'.format(filename[-1]), encoding='utf8')
    if request.method == "POST":
        chart_type = request.POST.get("chart_values", None)
        # if chart_type == 'scatter':
        #     x_value = request.POST.get("x_value", None)
        #     y_value = request.POST.get("y_value", None)
        #     chart_title = request.POST.get('title', None)
        #     x_col = x_value.split(',')
        #     y_col = y_value.split(',')
        #     data_size = df.shape
        #     ndim = df.ndim  # 数据集维数
        #     src = pre_pca(df, chart_title, x_col, y_col)
        #     print(x_col, y_col)
        #     csv_file = time_stamp = '{0:%Y%m%d%H%M}'.format(datetime.datetime.now())
        #     data2 = df.to_csv("media/csv/{}.csv".format(csv_file), index=0)  # 保存CSV数据
        #     data_title = np.array(df.columns.values)  # 获取表头
        #     data_1 = df.values[:, :]
        #     new_data = np.vstack((data_title, data_1))
        #     test_data = []
        #     for line in new_data:
        #         ls = []
        #         for j in line:
        #             ls.append(j)
        #         test_data.append(ls)
        #     msg = '数据降维成功'
        #     return render(request, 'PCA.html', locals())
        # else:
        #     msg = '数据降维失败'
        if chart_type == 'scatter1':
            # x_value = request.POST.get("x_value", None)
            # y_value = request.POST.get("y_value", None)
            pca_value = int(request.POST.get("pca_value", None))
            chart_title = request.POST.get('title', None)
            # x_col = x_value.split(',')
            # y_col = y_value.split(',')
            data_size = df.shape
            ndim = df.ndim  # 数据集维数
            pca = PCA()  # 计算pca
            pca.fit(df)
            components = pca.components_  # 返回模型的各个特征向量
            variance = pca.explained_variance_  # 返回模型的特征值
            variance_ratio = pca.explained_variance_ratio_  # 返回各个成分各自的方差百分比
            print('特征向量', components)
            print('特征值', variance_ratio.tolist())
            print('方差百分比', variance_ratio.tolist())
            variance_ratio1 = variance_ratio.tolist()
            variance1=variance.tolist()
            for variance_ratio2 in variance_ratio1:
                print('遍历数据', variance_ratio2)
            pca = PCA(pca_value)
            pca.fit(df)
            low_df = pca.transform(df) # 降维.并且将格式转为DataFrame
            df1=pd.DataFrame(low_df)
            print('降维后的数据', low_df.tolist())
            # low_df1 = pca.inverse_transform(low_df)  # 恢复数据
            plt.rcParams['font.sans-serif'] = ['SimHei']  # 这两行用来显示汉字
            plt.rcParams['axes.unicode_minus'] = False
            plt.title(chart_title)
            plt.scatter(low_df[:, 0], low_df[:, 1], marker='o')
            sio = BytesIO()
            plt.savefig(sio, format='png', bbox_inches='tight', pad_inches=0.0)
            data = base64.encodebytes(sio.getvalue()).decode()
            src = 'data:image/png;base64,' + str(data)
            # 记得关闭，不然画出来的图是重复的
            plt.close()
            plt.show()
            # src, low_df1, low_df = after_pca(df, chart_title, x_col, y_col, pca_value)
            csv_file = time_stamp = '{0:%Y%m%d%H%M}'.format(datetime.datetime.now())
            data2 = df1.to_csv("media/csv/{}.csv".format(csv_file), index=False,header=False)  # 保存CSV数据
            data_title = np.array(df1.columns.values)  # 获取表头
            data_1 = df1.values[:, :]
            new_data = np.vstack((data_title, data_1))
            test_data = []
            for line in new_data:
                ls = []
                for j in line:
                    ls.append(j)
                test_data.append(ls)
            msg = '数据降维成功'
            return render(request, 'PCA.html', locals())
        else:
            msg = '数据降维失败'
    return render(request, 'PCA.html', locals())


# 数据降维前散点图
def pre_pca(df, title, x_col, y_col):
    rng = np.random.RandomState(10)
    sizes = 100 * rng.rand(100)
    col_n = x_col
    col_n1 = y_col
    a = pd.DataFrame(df, columns=col_n)
    b = pd.DataFrame(df, columns=col_n1)
    plt.rcParams['font.sans-serif'] = ['SimHei']  # 这两行用来显示汉字
    plt.rcParams['axes.unicode_minus'] = False
    plt.title(title)
    plt.scatter(x=a, y=b, marker='o', s=sizes, alpha=0.3)
    sio = BytesIO()
    plt.savefig(sio, format='png', bbox_inches='tight', pad_inches=0.0)
    data = base64.encodebytes(sio.getvalue()).decode()
    src = 'data:image/png;base64,' + str(data)
    # # 记得关闭，不然画出来的图是重复的
    plt.close()
    plt.show()
    return src


# 数据降维后散点图
def after_pca(df, title, x_col, y_col, pca_value):
    pca = PCA()  # 计算pca
    pca.fit(df)
    components = pca.components_  # 返回模型的各个特征向量
    variance = pca.explained_variance_  # 返回模型的特征值
    variance_ratio = pca.explained_variance_ratio_  # 返回各个成分各自的方差百分比
    # print('特征向量', components)
    # print('特征值', variance)
    # print('方差百分比', variance_ratio)
    pca = PCA(pca_value)
    pca.fit(df)
    low_df = pca.transform(df)  # 降维
    low_df1 = pca.inverse_transform(low_df)  # 恢复数据
    plt.rcParams['font.sans-serif'] = ['SimHei']  # 这两行用来显示汉字
    plt.rcParams['axes.unicode_minus'] = False
    plt.title(title)
    plt.scatter(low_df[:, 0], low_df[:, 1], marker='o')
    sio = BytesIO()
    plt.savefig(sio, format='png', bbox_inches='tight', pad_inches=0.0)
    data = base64.encodebytes(sio.getvalue()).decode()
    src = 'data:image/png;base64,' + str(data)
    # # 记得关闭，不然画出来的图是重复的
    plt.close()
    plt.show()
    return src, low_df1, low_df
