import os
import time

import torch
import torchvision
from torch import nn
from torch.autograd import Variable
from torch.utils.data import DataLoader
from torchvision import transforms
from torchvision.datasets import MNIST, CIFAR10
from torchvision.utils import save_image
import numpy as np
import pandas as pd
import torch
import os
from datetime import datetime
import matplotlib.pyplot as plt
from torch.utils.data import DataLoader
from torch.autograd import Variable
from torch import nn
from torch import optim
from torchvision import datasets
from opening._MyCode.Modules import *
from opening._MyCode.Utils import *
from opening._MyCode.PredictionUtils import *


def trainAE(encoderList, xTrainDataLoader, trainLayer, AEEpoch):
    trainLayer = trainLayer

    # 开始训练网络
    encoderList[trainLayer].train()
    optimizer = optim.Adam(encoderList[trainLayer].parameters(), lr=0.01)
    #testWeight = np.ones(step * varNum)
    testWeight = getWeight(step=step)
    print("weight: ", testWeight) # 最低0.039, 最高0.261

    criterion = nn.MSELoss()
    #criterion = RelatLoss()
    criterion = MccLoss()
    # if trainLayer == 1:
    #     for param in encoderList[0].parameters():
    #         param.requires_grad = False
    #     criterion = nn.MSELoss()

    for i in range(AEEpoch):
        sumLoss = 0
        if trainLayer != 0:
            for j in range(trainLayer):
                for param in encoderList[j].parameters():
                    param.requires_grad = False

        for batchIdx, x in enumerate(xTrainDataLoader):
            optimizer.zero_grad()
            # out = Variable(x)
            out = x
            if trainLayer == 1:
                out = encoderList[0](out, isHidden=True)
            pred = encoderList[trainLayer](out, isHidden=False)

            loss = torch.sqrt(criterion(pred, out))
            #loss = criterion(out, pred, weight=testWeight)
            # loss = criterion(out, pred, sigma=0.2, weight=testWeight)
            sumLoss += loss.item()
            loss.backward()
            optimizer.step()
            if (i < 7 or i % 5 == 0) and ((batchIdx + 1) % 20 == 0 or (batchIdx + 1) == len(xTrainDataloader)):
                print('=>epo:{}, batchIndex: {}, trainLoss: {:.3f}, currentLoss: {:.3f}'
                      .format(i, batchIdx + 1, sumLoss / batchIdx, loss.item()))

'''

# 加载数据参数
def getDataLoader(step = 20, historyRecordNum = 1500, isDe = True, isNoise = False):
    batchSize = 128  # 128

    # 用来训练初始网络的历史数据数量
    torch.set_printoptions(profile="default")
    torch.set_printoptions(precision=5)
    torch.manual_seed(233)


    #xsDf, ysDf = dataProcess(step=step)
    if isNoise:
        xsDf, ysDf = dataProcessDeNoise(step=step, noiseScale = 0.1, outlierPercentage = 20)
    else:
        xsDf, ysDf = dataProcessDe(step=step)

    xTrain = torch.from_numpy(np.array(xsDf.iloc[:historyRecordNum, :]))  # torch.Size([5000, 130])
    xPredi = torch.from_numpy(np.array(xsDf.iloc[historyRecordNum:, :]))  # torch.Size([14990, 130])
    yTrain = torch.from_numpy(np.array(ysDf.iloc[:historyRecordNum, :]))  # torch.Size([5000, 1])
    yPredi = torch.from_numpy(np.array(ysDf.iloc[historyRecordNum:, :]))  # torch.Size([14990, 1])

    xTrainDataloader = DataLoader(xTrain, batch_size=batchSize, shuffle=True, num_workers=8, drop_last=True)
    yTrainDataloader = DataLoader(yTrain, batch_size=batchSize, shuffle=True, num_workers=8, drop_last=True)
    xPrediDataloader = DataLoader(xPredi, batch_size=batchSize, shuffle=True, num_workers=8, drop_last=True)
    yPrediDataloader = DataLoader(yPredi, batch_size=batchSize, shuffle=True, num_workers=8, drop_last=True)

    return xTrainDataloader, yTrainDataloader, xPrediDataloader, yPrediDataloader

if __name__ == "__main__":
    # AE的超参数
    step = 26
    hiddenSize_1, hiddenSize_2 = 12, 6
    historyRecord = 1594
    isDe = True
    isRX = True # x间的关系考虑

    varNum = 7 if isDe else 13
    for step in range(step, step+1, 1):
        for hiddenSize1 in range(hiddenSize_1, hiddenSize_1+1, 1):
            for hiddenSize2 in range(hiddenSize_2, hiddenSize_2+1, 1):
                if isDe:
                    label = "DeSAE"
                else:
                    label = "SAE"
                print("\n", label+str(step)+"To"+str(hiddenSize1)+"to"+str(hiddenSize2))
                # 不重要的参数
                AEEpoch1 = 40

                AEEpoch2 = 0
                xTrainDataloader, yTrainDataloader, xPrediDataloader, yPrediDataloader = \
                    getDataLoader(step = step, historyRecordNum=historyRecord - step, isDe=isDe, isNoise=True)

                encoder1 = AutoEncoder(varNum*step, hiddenSize1)
                encoder2 = AutoEncoder(hiddenSize1, hiddenSize2)
                encoderList = [encoder1, encoder2]
                trainAE(encoderList, xTrainDataloader, 0, AEEpoch1)
                trainAE(encoderList, xTrainDataloader, 1, AEEpoch2)

                SAEModel = FeatureSAE(encoderList, hiddenSize2)
                #pathName = "./modelStorage/"+ label+str(step)+"To"+str(hiddenSize1)+"to"+str(hiddenSize2)+".pth"
                folderPath = "C:/work/pycharm/previous/opening/_MyCode/"
                pathName = folderPath + "essayModel/VRE-AE_脱丁烷塔_离群20%.pth"
                torch.save(SAEModel, pathName)
                print(pathName)

'''
def getDataLoader(step = 20, historyRecordNum = 1500, isDe = True, isNoise = False):
    batchSize = 128  # 128

    # 用来训练初始网络的历史数据数量
    torch.set_printoptions(profile="default")
    torch.set_printoptions(precision=5)
    torch.manual_seed(233)


    #xsDf, ysDf = dataProcess(step=step)
    if isNoise:
        xsDf, ysDf = dataProcessDeNoise(step=step, noiseScale = 0.1, outlierPercentage = 20)
    else:
        xsDf, ysDf = dataProcessDe(step=step)

    xTrain = torch.from_numpy(np.array(xsDf.iloc[2100:, :]))
    xPredi = torch.from_numpy(np.array(xsDf.iloc[2100:, :]))
    yTrain = torch.from_numpy(np.array(ysDf.iloc[2100:, :]))
    yPredi = torch.from_numpy(np.array(ysDf.iloc[2100:, :]))

    xTrainDataloader = DataLoader(xTrain, batch_size=batchSize, shuffle=True, num_workers=8, drop_last=True)
    yTrainDataloader = DataLoader(yTrain, batch_size=batchSize, shuffle=True, num_workers=8, drop_last=True)
    xPrediDataloader = DataLoader(xPredi, batch_size=batchSize, shuffle=True, num_workers=8, drop_last=True)
    yPrediDataloader = DataLoader(yPredi, batch_size=batchSize, shuffle=True, num_workers=8, drop_last=True)

    return xTrainDataloader, yTrainDataloader, xPrediDataloader, yPrediDataloader

if __name__ == "__main__":
    # AE的超参数
    step = 26
    hiddenSize_1, hiddenSize_2 = 12, 6
    historyRecord = 1594
    isDe = True
    isRX = True # x间的关系考虑

    varNum = 7 if isDe else 13
    for step in range(step, step+1, 1):
        for hiddenSize1 in range(hiddenSize_1, hiddenSize_1+1, 1):
            for hiddenSize2 in range(hiddenSize_2, hiddenSize_2+1, 1):
                if isDe:
                    label = "DeSAE"
                else:
                    label = "SAE"
                print("\n", label+str(step)+"To"+str(hiddenSize1)+"to"+str(hiddenSize2))
                # 不重要的参数
                AEEpoch1 = 200

                AEEpoch2 = 0
                xTrainDataloader, yTrainDataloader, xPrediDataloader, yPrediDataloader = \
                    getDataLoader(step = step, historyRecordNum=historyRecord - step, isDe=isDe, isNoise=True)

                encoder1 = AutoEncoder(varNum*step, hiddenSize1)
                encoder2 = AutoEncoder(hiddenSize1, hiddenSize2)
                encoderList = [encoder1, encoder2]
                trainAE(encoderList, xTrainDataloader, 0, AEEpoch1)
                trainAE(encoderList, xTrainDataloader, 1, AEEpoch2)

                SAEModel = FeatureSAE(encoderList, hiddenSize2)
                #pathName = "./modelStorage/"+ label+str(step)+"To"+str(hiddenSize1)+"to"+str(hiddenSize2)+".pth"
                folderPath = "C:/work/pycharm/previous/opening/_MyCode/"
                pathName = folderPath + "essayModel/VC-AE_1800~2100.pth"
                torch.save(SAEModel, pathName)
                print(pathName)



























