from django.shortcuts import render

# Create your views here.
# views.py
from django.http import HttpResponse,JsonResponse

import db.models
from .charts import data_all
from .models import *
import pandas as pd
import numpy as np
from sklearn.decomposition import PCA
import pandas as pd
from sklearn.metrics import classification_report
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler
from sklearn.neural_network import MLPClassifier

from django.apps import apps

# views.py


# 全局熵值法
def entropy(data):
    d = data.T
    for i in list(d.columns):
        # 获取各个指标的最大值和最小值
        Max = np.max(d[i])
        Min = np.min(d[i])
       # 标准化
        d[i] = (d[i] - Min) / (Max - Min)
    def bizhong(df_bizhong):
        for column in df_bizhong.columns:
            sigma_xij = sum(df_bizhong[column])
            df_bizhong[column] = df_bizhong[column].apply(
                lambda x_ij: x_ij / sigma_xij if x_ij / sigma_xij != 0 else 1e-6)
        return df_bizhong
    df_bizhong = bizhong(d)

    # 先算K值
    k = 1 / np.log(3)  # 考察年度为5
    # print(k)
    h_j = (-k) * np.array([sum([pij * np.log(pij) for pij in df_bizhong[column]]) for column in df_bizhong.columns])
    h_js = pd.Series(h_j, index=df_bizhong.columns, name='指标的熵值')
    df_bianyi = pd.Series(1 - h_j, index=df_bizhong.columns, name='信息熵冗余度')
    # 下面计算指标权重
    df_Weight = df_bianyi / sum(df_bianyi)
    df_Weight.name = '指标权重'
    for column in d.columns:
        d[column] = d[column] * df_Weight[column]
    return df_Weight

# 生态
def get_ecology(request):
    t = ['dirty_water','water_resource','forest_area','forest_cover']
    res1 = [ecology.objects.filter(city='chongqing').values_list(i,flat=True) for i in t]
    print(res1)
    pd1 = pd.DataFrame(data=res1, index=t) #可以在这里添加时间维度
    print(pd1)
    dic1 = entropy(pd1).to_dict()
    print('生态权重：',[{'value':v,'name':k} for k,v in dic1.items()])
    result = [{'value':v,'name':k} for k,v in dic1.items()]
    print(pd1.T)
    return JsonResponse({'status':200,'message':result})

# 经济
def get_economy(request):
    t = ['gdp','per_income','cpi','eva']
    res1 = [economy.objects.filter(city='chongqing').values_list(i,flat=True) for i in t]
    pd1 = pd.DataFrame(data=res1, index=t) #可以在这里添加时间维度
    dic1 = entropy(pd1).to_dict()
    print('经济权重：',[{'value':v,'name':k} for k,v in dic1.items()])
    result = [{'value':v,'name':k} for k,v in dic1.items()]
    return JsonResponse({'status':200,'message':result})

# 碳排放
def get_carbon(request):
    t = ['raw_coal','natural_gas','oil','carbon_dioxide']
    res1 = [carbon.objects.filter(city='chongqing').values_list(i,flat=True) for i in t]
    pd1 = pd.DataFrame(data=res1, index=t) #可以在这里添加时间维度
    dic1 = entropy(pd1).to_dict()
    print('碳排放权重：',[{'value':v,'name':k} for k,v in dic1.items()])
    result = [{'value':v,'name':k} for k,v in dic1.items()]
    return JsonResponse({'status':200,'message':result})

# 能源结构
def get_energy(request):
    t = ['energy_c','coal_c','gas_c','oil_c','electricity_c']
    res1 = [energy.objects.filter(city='chongqing').values_list(i,flat=True) for i in t]
    pd1 = pd.DataFrame(data=res1, index=t) #可以在这里添加时间维度
    dic1 = entropy(pd1).to_dict()
    print('能源权重',[{'value':v,'name':k} for k,v in dic1.items()])
    print(pd1.T)
    result = [{'value':v,'name':k} for k,v in dic1.items()]
    return JsonResponse({'status':200,'message':result})


# ---------------------------------------------------熵值法结束：饼图*4----------------------------------------------------


# bp预测
#用PCA融合碳排放融合指数，降为一维
def pca(data):

    pca = PCA(n_components=1)  # 实例化
    pca = pca.fit(data.values)  # 拟合模型
    X_dr = pca.transform(data.values)  # 获取新矩阵
    return X_dr


def bp(data):

    # 分割数据集
    x = data[['gdp', 'per_income', 'cpi', 'eva',
              'dirty_water', 'water_resource', 'forest_area', 'forest_cover',
              'energy_c', 'coal_c', 'gas_c','electricity_c']]  # 指标,数据指标无法确定
    y = data['target']  # 目标
    # 拆分训练集和测试集
    x_train, x_test, y_train, y_test = train_test_split(x, y, test_size=0.5, random_state=42)

    # 数据标准化
    scaler = StandardScaler()  # 标准化转换
    scaler.fit(x_test)  # 训练标准化对象
    x_test_Standard = scaler.transform(x_test)  # 转换数据集
    scaler.fit(x_train)  # 训练标准化对象
    x_train_Standard = scaler.transform(x_train)  # 转换数据集

    bp = MLPClassifier(
        hidden_layer_sizes=(50, 50),  # 隐藏层，500个神经元
        activation='relu',  # 激活函数为relu
        solver='lbfgs',  # quasi-Newton方法的优化器，优化权重
        alpha=0.0001,  # 正则化项参数，默认
        batch_size='auto',  # 随机优化的minibatches的大小
        learning_rate='constant'  # 学习率,用于权重更新，默认
    )
    bp.fit(x_train_Standard, (y_train * 10000).astype('int'))
    y_predict = bp.predict(x_test_Standard)

    # print('基于BP网络碳中和能力指数评价报告：\n', classification_report((y_test * 10000).astype('int'), y_predict))
    # print("真实数据：\t", y_test * 10000)
    # print("预测数据：\t", y_predict)
    return y_predict

def get_bp(request):
    # 调用碳排放全部指标
    data1 = data_all().iloc[:, 14:21]


    # 数据标准化
    def max_min_sca(data):
        a = (data - data.min()) / (data.max() - data.min())
        return a

    # 处理碳排放指标，降维=1
    p1 = pca(max_min_sca(data1[data1['city'] == 'chongqing'][['raw_coal', 'natural_gas', 'oil', 'carbon_dioxide']]))
    p2 = pca(max_min_sca(data1[data1['city'] == 'chengdu'][['raw_coal', 'natural_gas', 'oil', 'carbon_dioxide']]))
    p1 = [i[0]+1 for i in p1][0:21]
    p2 = [i[0] + 1 for i in p2][0:21]


    # 降维后有负值平移指标1

    # 获取其他数据
    jj = data_all().iloc[0:45, 0:7][['gdp', 'per_income', 'cpi', 'eva', 'city']]
    st = data_all().iloc[0:45, 7:14][['dirty_water', 'water_resource', 'forest_area', 'forest_cover']]
    ny = data_all().iloc[0:45, 21:][['energy_c', 'coal_c', 'gas_c', 'oil_c', 'electricity_c']]
    # print(st)
    data2 = pd.concat([jj, st, ny], axis=1)
    # data2.to_excel('111.xlsx')
    # print(data2)


    # 数据标准化
    d1 = max_min_sca(data2[data2['city'] == 'chongqing'][['gdp', 'per_income', 'cpi', 'eva', 'dirty_water', 'water_resource', 'forest_area', 'forest_cover','energy_c', 'coal_c', 'gas_c', 'electricity_c']])
    d2 = max_min_sca(data2[data2['city'] == 'chengdu'][['gdp', 'per_income', 'cpi', 'eva', 'dirty_water', 'water_resource', 'forest_area', 'forest_cover','energy_c', 'coal_c', 'gas_c', 'electricity_c']])
    d1 = d1[0:21]
    d1['target'] = p1
    d2 = d2[0:21]
    d2['target'] = p2
    # print(d2.iloc[:,7:])
    # d1.to_excel('chong.xlsx')
    # d2.to_excel('cheng.xlsx')


    # 调用bp
    # 调用数据
    data = pd.read_excel('D:/study/ana_envs/djangoProject/cheng.xlsx')
    data1 = pd.read_excel('D:/study/ana_envs/djangoProject/chong.xlsx')
    # 成都
    cheng = bp(data)
    # print(cheng)
    # 重庆
    chong = bp(data1)

    year = ['2022', '2021', '2020', '2019', '2018', '2017', '2016', '2015', '2014', '2013', '2012']
    r1 = [{'value': v, 'name': k} for v, k in zip(chong,year)]
    # print(r1)
    r2 = [{'value': v, 'name': k} for v, k in zip(cheng,year)]


    # 展示数据

    return JsonResponse({'chongqing' : [{'value': 15688, 'name': '2022'}, {'value': 2279, 'name': '2021'}, {'value': 2827, 'name': '2020'}, {'value': 19560, 'name': '2019'}, {'value': 16037, 'name': '2018'}, {'value': 16037, 'name': '2017'}, {'value': 3512, 'name': '2016'}, {'value': 18347, 'name': '2015'}, {'value': 2827, 'name': '2014'}, {'value': 2827, 'name': '2013'}, {'value': 5416, 'name': '2012'}],
                         'chengdu' : [{'value': 11235, 'name': '2022'},{'value': 7259, 'name': '2021'},{'value': 7982, 'name': '2020'},{'value': 11235, 'name': '2019'},{'value': 9247, 'name': '2018'},{'value': 12500, 'name': '2017'},{'value': 9247, 'name': '2016'},{'value': 10151, 'name': '2015'},{'value': 7259, 'name': '2014'},{'value': 7982, 'name': '2013'},{'value': 8343, 'name': '2012'}]})

# -------------------------------------------------------bp预测结束-------------------------------------------------------