import pandas as pd
import statsmodels.api as sm
import numpy as np
import matplotlib.pyplot as plt
from _code.Utils import *
from _code.Modules import *


def trainAE(encoderList, xTrainDataLoader, trainLayer, AEEpoch):

    print("start train layer: ", trainLayer)


    # 开始训练网络
    encoderList[trainLayer].train()
    optimizer = optim.Adam(encoderList[trainLayer].parameters(), lr=0.01)

    criterion = nn.MSELoss()

    if trainLayer == 1:
        for param in encoderList[0].parameters():
            param.requires_grad = False


    for i in range(AEEpoch):
        sumLoss = 0
        if trainLayer == 1:
            for param in encoderList[0].parameters():
                param.requires_grad = False

        for batchIdx, x in enumerate(xTrainDataLoader):

            optimizer.zero_grad()
            # out = Variable(x)
            #noiseX = addNoise(x) # 加上0.1正态分布的噪声
            noiseX = x + torch.randn(x.shape[0], x.shape[1])*0.1
            if trainLayer == 1:
                noiseX = encoderList[0](noiseX, isHidden=True)
            pred = encoderList[trainLayer](noiseX, isHidden=False)

            #print(noiseX)
            if trainLayer == 0:
                # print("\norigin x: \n", x[:5, :5])
                # print("pred: \n", pred[:5, :5])
                loss = torch.sqrt(criterion(pred, x))
            else:
                loss = torch.sqrt(criterion(pred, noiseX))
            sumLoss += loss.item()
            loss.backward()
            optimizer.step()
            if (i < 7 or i % 5 == 0) and ((batchIdx + 1) % 20 == 0 or (batchIdx + 1) == len(xTrainDataloader)):
                print('=>epo:{}, batchIndex: {}, trainLoss: {:.3f}, currentLoss: {:.3f}'
                      .format(i, batchIdx + 1, sumLoss / batchIdx, loss.item()))


if __name__ == "__main__":
    varSize = 4
    hiddenSize1 = 3
    hiddenSize2 = 2
    historyRecordNum = 1000

    AEEpoch1, AEEpoch2 = 30, 6 # 应该为70, 70

    xTrainDataloader, yTrainDataloader, xPrediDataloader, yPrediDataloader = getDataLoader(1000)

    encoder1 = AutoEncoder(varSize, hiddenSize1)
    encoder2 = AutoEncoder(hiddenSize1, hiddenSize2)
    encoderList = [encoder1, encoder2]
    trainAE(encoderList, xTrainDataloader, 0, AEEpoch=AEEpoch1)
    #trainAE(encoderList, xTrainDataloader, 1, AEEpoch=AEEpoch2)

    mySAE = SAE(encoderList=encoderList)
    #torch.save(mySAE, "./modelStorage/"+str(varSize)+"to"+str(hiddenSize1)+"to"+str(hiddenSize2)+".pth")



















