import os
import time

import torch
import torchvision
from torch import nn
from torch.autograd import Variable
from torch.utils.data import DataLoader
from torchvision import transforms
from torchvision.datasets import MNIST, CIFAR10
from torchvision.utils import save_image
import numpy as np
import pandas as pd
import torch
import os
from datetime import datetime
import matplotlib.pyplot as plt
from torch.utils.data import DataLoader
from torch.autograd import Variable
from torch import nn
from torch import optim
from torchvision import datasets
import torchvision.transforms as transforms
import math
import xlrd
import sys
import seaborn as sns
from pylab import *


def dataProcess(step = 10):
    '''画出13个过程变量和y'''
    f = open(os.getcwd() + "./data/data_trn_20000.csv")
    data = f.readlines()
    paramSize = 13  # x的维度
    l = []
    for i in range(1, len(data)):
        temp1 = data[i].split(',')
        for j in range(1, len(temp1)):
            l.append(temp1[j])
    totalData = np.array(l).reshape([int(len(l) / (paramSize + 1)), (paramSize + 1)])  # shape(totalLen, paramSize + 1)

    columns = ["V0", "V1", "V2", "V3", "V4", "V5", "V6", "V7", "V8", "V9", "V10", "V11", "V12", "Y"]
    totalDf = pd.DataFrame(totalData, columns=columns, dtype="float32")
    totalDf = totalDf.fillna(axis=0, method="ffill")  # 用前一个值替代nan
    totalDf = (totalDf - totalDf.min()) / (totalDf.max() - totalDf.min())

    xDf = totalDf.iloc[:, 0:paramSize]  # (20000, 13)
    yDf = totalDf.iloc[:, paramSize]  # Len 20000

    xsDf = pd.DataFrame(xDf.iloc[0: xDf.shape[0] - step, :])
    xsDf = xsDf.reset_index(drop=True)  # (19990, 130)
    for i in range(1, step):
        temDf = pd.DataFrame(xDf.iloc[i: xDf.shape[0] - step + i, :])
        temDf = temDf.reset_index(drop=True)
        xsDf = pd.concat([temDf, xsDf], axis=1)
    # xsDf.iloc[:20, [3, 16, 29, 42, 55, 68, 81]]
    ysDf = pd.DataFrame(yDf.iloc[step:yDf.shape[0]])  # (19990, 1)

    return xsDf, ysDf






def dataProcessDe(step = 10, posi = "./data/debutanizer.xls"):
    # 脱丁烷塔数据集, 一共有2394个数据, 7个过程变量, 1个质量变量
    '''画出7个过程变量和y'''
    paramSize = 7

    workbook = xlrd.open_workbook(os.getcwd() + posi)
    worksheet = workbook.sheet_by_index(0)
    nrows = worksheet.nrows  # 获取该表总行数
    l = []
    for i in range(nrows):  # 循环打印每一行
        l.append(worksheet.row_values(i))
    totalData = np.array(l)

    columns = ["V0", "V1", "V2", "V3", "V4", "V5", "V6", "Y"]
    totalDf = pd.DataFrame(totalData, columns=columns, dtype="float32")
    totalDf = totalDf.fillna(axis=0, method="ffill")  # 用前一个值替代nan
    # 给他归一化到0~1之间
    totalDf = (totalDf - totalDf.min()) / (totalDf.max() - totalDf.min())

    xDf = totalDf.iloc[:, 0:paramSize]
    yDf = totalDf.iloc[:, paramSize]

    xsDf = pd.DataFrame(xDf.iloc[0: xDf.shape[0] - step, :])
    xsDf = xsDf.reset_index(drop=True)
    for i in range(1, step):
        temDf = pd.DataFrame(xDf.iloc[i: xDf.shape[0] - step + i, :])
        temDf = temDf.reset_index(drop=True)
        xsDf = pd.concat([temDf, xsDf], axis=1)

    ysDf = pd.DataFrame(yDf.iloc[step:yDf.shape[0]])

    return xsDf, ysDf


# 脱丁烷塔, 返回带噪声的数据
def dataProcessDeNoise(step = 10, posi = "./data/debutanizer.xls", noiseScale = 0.03, outlierPercentage = 1):

    # 解决matplotlib绘图中文显示问题, todo: 这个没用
    plt.rcParams['font.sans-serif'] = ['SimHei']
    # plt.rcParams['font.sans-serif'] = ['KaiTi']   # 指定默认字体
    plt.rcParams['axes.unicode_minus'] = False  # 解决保存图像是负号'-'显示为方块的问题

    paramSize = 7
    workbook = xlrd.open_workbook(os.getcwd() + posi)
    worksheet = workbook.sheet_by_index(0)
    nrows = worksheet.nrows  # 获取该表总行数
    l = []
    for i in range(nrows):  # 循环打印每一行
        l.append(worksheet.row_values(i))
    totalData = np.array(l)

    columns = ["V0", "V1", "V2", "V3", "V4", "V5", "V6", "Y"]
    totalDf = pd.DataFrame(totalData, columns=columns, dtype="float32")
    totalDf = totalDf.fillna(axis=0, method="ffill")  # 用前一个值替代nan
    # 给他归一化到0~1之间
    totalDf = (totalDf - totalDf.min()) / (totalDf.max() - totalDf.min())

    xDf = totalDf.iloc[:, 0:paramSize]
    yDf = totalDf.iloc[:, paramSize]

    xsDf = pd.DataFrame(xDf.iloc[0: xDf.shape[0] - step, :]) # (2368, 7)
    sns.set_style("whitegrid")
    # plt.plot(xsDf.iloc[:40, :])
    # plt.title("origin data", fontsize=20)
    # plt.show()

    # 给x加点噪声。
    # white_noise = np.random.normal(loc = 0, scale = noiseScale, size=xDf.shape)
    # xDf = xDf + white_noise
    # plt.plot(xDf.iloc[:40, :])
    # plt.title("加了噪声后的部分采样", fontsize=20)
    # plt.show()
    # 给x加点离群值
    np.random.seed(1)
    # 离群值位置
    #print("xsDf.shape: ", xsDf.shape)

    noiseLoc = np.random.randint(0, xsDf.shape[0], (xsDf.shape[0]//100 * outlierPercentage))
    for i in noiseLoc:
        # 偏移的位置, 均值为偏移0.5
        bias = np.random.random(1)[0] / 2
        featureLoc = np.random.randint(0, 7, (1))[0]
        # print("origin loc x:", i, " y:", featureLoc, " value:", xsDf.iloc[i, featureLoc])
        xsDf.iloc[i, featureLoc] = addBias(xsDf.iloc[i, featureLoc], bias)
        # print("noised loc x:", i, " y:", featureLoc, " value:", xsDf.iloc[i, featureLoc])

    # 滑窗
    xsDf = xsDf.reset_index(drop=True)  # (19990, 130)
    for i in range(1, step):
        temDf = pd.DataFrame(xDf.iloc[i: xDf.shape[0] - step + i, :])
        temDf = temDf.reset_index(drop=True)
        xsDf = pd.concat([temDf, xsDf], axis=1)

    ysDf = pd.DataFrame(yDf.iloc[step:yDf.shape[0]])  # (19990, 1)

    return xsDf.astype("float32"), ysDf

# 加一个outlier偏移量, 保证其在0, 1之间
def addBias(origin, bias):
    res = np.random.choice([origin + bias, origin - bias])
    if res > 1:
        return 1
    elif res < 0:
        return 0
    else:
        return res


if __name__ == "__main__":
    dataProcessDeNoise(step=26, noiseScale=10)


def rmse(a, b):
    c = a.reshape(-1)
    d = b.reshape(-1)
    return math.sqrt(np.sum(np.abs(c - d)*np.abs(c - d))/len(c))


def flatTorch(inputTensor):
    return inputTensor.reshape(inputTensor.size()[0], -1)


class RelatLoss(nn.Module):
    def __init__(self):
        super(RelatLoss, self).__init__()

    """
    :param x:(batchSize, step*varNum), 这么个形状, 如(128, 30*7)
    :param y: 同上, 是预测结果。
    :param weight: (step*varNum, 1)给x-y的每一行增加权重。
    :return:
    """
    def forward(self, x, y, weight):
        batchSize = x.shape[0]
        inputLen = x.shape[1]
        weight = torch.from_numpy(weight)
        # threshold1 = 0.008
        # temp1 = weight.reshape(inputLen, -1)
        # matrixWeight = torch.mm(temp1, temp1.t())
        x, y = flatTorch(x), flatTorch(y)
        temp1 = (x - y) ** 2
        out1 = torch.sqrt(torch.sum(temp1 * weight) / x.shape[0] / x.shape[0])
        # out2 = torch.tensor(0.0)
        # torch.set_printoptions(profile="full")
        # for i in range(batchSize):
        #     temp1 = torch.mm(x[i].reshape(inputLen, -1), x[i].reshape(-1, inputLen))
        #     temp2 = torch.mm(y[i].reshape(inputLen, -1), y[i].reshape(-1, inputLen))
        #     out2Mat = torch.mul((temp1 - temp2) ** 2, matrixWeight)
        #     mask = out2Mat > threshold1  # 使用mask直接不考虑一些相关系数低的变量间关系
        #     out2 += torch.sum(out2Mat[mask])
        # out2 = out2/(batchSize**3)
        return out1 #+ out2*0.2

class MccLoss(nn.Module):
    def __init__(self):
        super(MccLoss, self).__init__()

    """
    :param x:(batchSize, step*varNum), 这么个形状, 如(128, 30*7)
    :param y: 同上, 是预测结果。
    :param sigma: 高斯核大小
    :param weight: 这个也要加权
    """
    # def forward(self, x1, x2, sigma, weight):
    #     return torch.sum(1/sigma * torch.exp_(-(((x1 - x2) ** 2) * torch.from_numpy(weight)) / (2 * sigma * sigma)))
    def forward(self, x, y, sigma, weight):
        batchSize = x.shape[0]
        inputLen = x.shape[1]
        weight = torch.from_numpy(weight)
        threshold1 = 0.008

        x, y = flatTorch(x), flatTorch(y)
        temp1 = (x - y) ** 2  # 矩阵中每个元素自乘
        out1 = -torch.sum(torch.exp(-temp1 * weight) / sigma / sigma / 2)

        return out1


def getWeight(step = 30):
    tempX, tempY = dataProcessDe(step=1)
    tempX = tempX.values
    tempY = tempY.values
    tempRInfo1 = []

    for i in range(tempX.shape[1]):
        temp1 = np.corrcoef(tempX[:, i], tempY[:, 0])[0, 1]
        # tempRInfo.append(np.abs(temp1) > 0.1)
        tempRInfo1.append(np.abs(temp1))
    # print("每个变量和y的相关系数为: ", tempRInfo1, "\n")
    tempRInfo2 = []
    for i in range(step):
        # print(i, ":  ", temp2)
        tempRInfo2 = np.hstack((tempRInfo2, tempRInfo1))
    return tempRInfo2




def test(SAEModel):
    # 参数
    torch.manual_seed(233)
    torch.set_printoptions(profile="full")
    # 输出时不带省略。
    torch.set_printoptions(profile="default")
    historyRecordNum = 5000
    showNum = 1000
    xsDf, ysDf = dataProcess()
    xPredi = torch.from_numpy(
        np.array(xsDf.iloc[historyRecordNum:historyRecordNum + showNum, :]))  # torch.Size([14990, 130])
    yPredi = torch.from_numpy(
        np.array(ysDf.iloc[historyRecordNum:historyRecordNum + showNum, :]))  # torch.Size([14990, 1])

    plt.plot(np.linspace(0, showNum - 1, showNum), yPredi.data, c="blue")
    plt.plot(np.linspace(0, showNum - 1, showNum), SAEModel(xPredi).data, c="red")
    plt.grid()
    plt.title("最终y的预测")
    plt.show()

    plt.plot(np.linspace(0, showNum-1, showNum), xPredi[:, 9], c="blue")
    plt.plot(np.linspace(0, showNum-1, showNum), SAEModel.ae1(xPredi, isHidden=False).
             detach().numpy()[:, 9], c="red")
    plt.title("第一层的重构能力")
    plt.grid()
    plt.show()

    plt.plot(np.linspace(0, showNum - 1, showNum),
             SAEModel.ae1(xPredi, isHidden=True).
             detach().numpy()[:, 9], c="blue")
    plt.plot(np.linspace(0, showNum - 1, showNum),
             SAEModel.ae2(SAEModel.ae1(xPredi, isHidden=True), isHidden=False).
             detach().numpy()[:, 9], c="red")
    plt.title("第二层的重构能力")
    plt.grid()
    plt.show()



def trainSAE(model, xTrainDataloader, yTrainDataloader, batchSize, SAEEpoch, isTuning, lr):
    # epoch = 50

    # 解冻网络中的参数
    # 这里做了参数是否可以反向传播的更改。
    if isTuning:
        print("****微调阶段****")
        for param in model.parameters():
            param.requires_grad = True
        optimizer = optim.SGD(model.parameters(), lr=lr)
    else:
        print("****训练回归层****")
        for param in model.ae1.parameters():
            param.requires_grad = False
        for param in model.ae2.parameters():
            param.requires_grad = False
        optimizer = optim.Adam(model.parameters(), lr=lr)

    criterion = nn.MSELoss()

    for i in range(SAEEpoch):
        sum_loss = 0
        for batchIdx, (x, y) in enumerate(zip(xTrainDataloader, yTrainDataloader)):
            optimizer.zero_grad()
            out = x
            if isTuning:
                # 注意这几个误差, 很有可能完全不是同一个量级的。y的误差权重加大会破坏两个重构, 而且实际效果也差。
                loss = criterion(model.ae1(out), out)
                ae1_out = model.ae1(out, isHidden=True)
                loss += 5 * criterion(model.ae2(ae1_out), ae1_out)
                loss += 6 * criterion(model(out), y)
            else:
                out = model(x)
                loss = 5*criterion(out, y)
            # out = model(x)
            # loss = criterion(out, y)
            loss.backward()
            optimizer.step()

            sum_loss += loss.item()
            if(batchIdx + 1 % 100 == 0) or (batchIdx + 1) == len(xTrainDataloader):
                print("=>epoch:{}, batchInd:{}, trainLoss:{:.4f}".format(i, batchIdx, sum_loss/batchIdx))






