import json
import numpy as np
import tensorflow as tf
from scipy.stats import linregress
import pandas
from flask import (Blueprint, request, session)
from flaskr import Utils
from flaskr.server.SheetsServer import *
bp = Blueprint('compute',__name__, url_prefix='/compute')

@bp.route('', methods = ['POST'])
def compute():
    columns_parameter = request.json.get('columns')
    values_parameter = request.json.get('values')
    forecast_parameter = request.json.get('forecast')
    # way = request.json.get('way')
    aggregation = request.json.get('aggregation')
    has_matching = request.json.get('matching')
    has_forecast = request.json.get('has_forecast')

    if len(columns_parameter) <= 0 or len(values_parameter) <= 0:
        obj = {}
        obj['values'] = []
        obj['columns'] = []

        return{
            'code': 200,
            'data': obj,
            'msg': '计算成功'
        }

    #如果是预测
    if has_forecast:
        if len(columns_parameter) <= 0 or len(values_parameter) <= 0 or forecast_parameter is None or len(forecast_parameter) <= 0:
            obj = {}
            obj['values'] = []
            obj['columns'] = []

            return {
                'code': 200,
                'data': obj,
                'msg': '计算成功'
            }

        column = columns_parameter[0]
        df = get_dataframe(column['sheetName'])
        x_value = df[column['fieldName']]

        value = values_parameter[0]
        df = get_dataframe(value['sheetName'])
        y_value = df[value['fieldName']]

        forecast = forecast_parameter[0]
        sheet_name = forecast['sheetName']
        field_name = forecast['fieldName']
        df = get_dataframe(sheet_name)
        forecast_value = df[field_name]

        obj = {}
        obj['columns'] = []
        obj['values'] = [[]]

        res = ycs(x_value, y_value, forecast_value).flatten().tolist()
        obj['columns'] = list(y_value.index)
        obj['values'][0].append(res)

        return {
            'code': 200,
            'data': obj, #返回一个二维数组,
            'msg': '计算成功'
        }

    #不聚合 如果是聚类
    if aggregation is not None and not aggregation:
        column = columns_parameter[0]
        df = get_dataframe(column['sheetName'])
        x_value = df[column['fieldName']]

        value = values_parameter[0]
        df = get_dataframe(value['sheetName'])
        y_value = df[value['fieldName']]
        data = []
        obj = {}
        obj['columns'] = []
        obj['values'] = [[]]
        min_len = 0
        if len(x_value) <= len(y_value):
           min_len = len(x_value)

        else:
            min_len = len(y_value)

        for i in range(min_len):
            obj['columns'].append(x_value[i])
            obj['values'][0].append(y_value[i])

        return {
            'code': 200,
            'data': obj, #返回一个二维数组,
            'msg': '计算成功'
        }

    columns = []
    values = []
    obj = {}
    obj2 = {}
    obj2['values'] = []
    for value in values_parameter:
        for column in columns_parameter:
            df = get_dataframe(column['sheetName'])
            obj[column['fieldName']] = df[column['fieldName']]
            columns = [column['fieldName']]
            break

        df = get_dataframe(value['sheetName'])
        obj[value['fieldName']] = df[value['fieldName']]
        values = [value['fieldName']]

        df = pandas.DataFrame(obj)
        way = value['wayType']


        if(way == 'sum'):
            df_groupby = df.groupby(columns).sum()

        elif(way == 'mean'):
            df_groupby = df.groupby(columns).mean()

        elif(way == 'max'):
            df_groupby = df.groupby(columns).max()

        elif(way == 'min'):
            df_groupby = df.groupby(columns).min()

        elif(way == 'std'):
            df_groupby = df.groupby(columns).std()

        elif(way == 'count'):
            df_groupby = df.groupby(columns).count()

        elif(way == 'var'):
            df_groupby = df.groupby(columns).var()

        elif(way == 'countNR'):
            df_groupby = df.groupby(columns).nunique()
        else:
            df_groupby = None

        if(df_groupby is None):
            return {
                'code' : 500,
                'msg' : '非法操作'
            }

        res = df_groupby[values]
        print(res)
        obj = {}
        obj2['values'].extend(getValues(res))

    obj2['columns'] = getColumns(res)
    # obj['values'] = getValues(res)
    print(obj2)

    json_ = obj2

    if has_matching:
        vs = res.values
        lis = []

        for v in vs:
            for n in v:
                lis.append(int(n))
        res = res.reset_index()
        data_list = nih(res.index, lis)# 获取第一列的所有值
        obj2['values'].append({'nihe': data_list})



    return{
        'code' : 200,
        'msg' : '计算成功',
        'data' : json_
    }


def getColumns(dataFrame):
    columns = []
    for column in dataFrame.index:
        columns.append(column)

    return columns

def getValuesName(dataFrame):
    v_name = []
    for name in dataFrame.columns:
        v_name.append(name)

    return v_name

def getValues(dataFrame):
    values = []
    for i in range(len(dataFrame.columns)):
        name = dataFrame.columns[i]
        obj = {}
        obj[name] = []

        for value in dataFrame.values:
            obj[name].append(str(value[i]))

        values.append(obj)

    return values


def nih(index, nihe):
    slope, intercept, r, p, std_err = linregress(index, nihe)
    exp = index * slope + intercept
    return list(exp.values)
'''拟合预测结构exp，进行画图展示，类型Float64Index([ 9.523809523809575,  192.3809523809524, 375.23809523809524,
               558.0952380952381,   740.952380952381,  923.8095238095239],
             dtype='float64')
             这就可以进行画图了'''

def normalization(datas):  #归一化
    _range = np.max(datas) - np.min(datas)
    return (datas - np.min(datas)) / _range
# ##########（模拟数据）

def ys(x,y):
    model = tf.keras.Sequential()
    model.add(tf.keras.layers.Dense(1, input_shape=(1,), activation='tanh'))
    model.add(tf.keras.layers.Dense(10, activation='tanh'))
    model.add(tf.keras.layers.Dense(100, activation='tanh'))
    model.add(tf.keras.layers.Dense(10, activation='tanh'))
    model.add(tf.keras.layers.Dense(1))
    model.compile(optimizer='adam',
                  loss='mse'
                  )
    model.fit(normalization(x),normalization(y),epochs=500)
    return model

def ycs(x, y, data):
    ydata = np.max(y) - np.min(y)
    ymin = np.min(y)
    xdata = np.max(x) - np.min(x)
    xmin = np.min(x)
    data = (data - xmin) / xdata
    z = (ys(x,y).predict(data) * ydata) + ymin
    return z