import numpy as np  # linear algebra
import pandas as pd  # data processing, CSV file I/O (e.g. pd.read_csv)
import matplotlib.pyplot as plt

from sklearn.svm import LinearSVC
from sklearn import preprocessing
from sklearn.preprocessing import PolynomialFeatures
from sklearn.metrics import mean_squared_error, mean_absolute_error, r2_score
from sklearn.model_selection import train_test_split, cross_val_score

import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.utils.data import Dataset, DataLoader


import math

from tqdm import tqdm

import itertools

from static.datasets import MyDataset
from network import SelfAttention

n_category=3

def r2_loss(output, target, weights=None):
    target_mean = torch.mean(target)
    ss_tot = torch.sum((target - target_mean) ** 2)
    ss_res = torch.sum((target - output) ** 2)
    r2 = ss_res / ss_tot
    return r2

    # target_mean = torch.mean(target)
    # ss_tot = abs(target - target_mean)*weights
    # ss_res = abs(target - output)
    # r2 = torch.div(ss_res , ss_tot)
    # return torch.mean(r2)

def self_loss(output, target, weights=None):
    gap=output>1
    if output>=1:
        return

def train(dataPath, modelSavePath):
    print('\ntraining: ', dataPath)

    dataset = pd.read_csv(dataPath)
    X = dataset.iloc[:, :-1]
    # y=dataset['L']
    y = dataset.iloc[:, -1]
    # normalize
    X_scaled = preprocessing.StandardScaler().fit_transform(X)
    # X_scaled = preprocessing.MinMaxScaler().fit_transform(X)
    # X_scaled = preprocessing.MaxAbsScaler().fit_transform(X)
    # X_scaled = preprocessing.Normalizer().fit_transform(X)


    # y_a=y.values.reshape(-1,1)
    y = preprocessing.MinMaxScaler().fit_transform(y.values.reshape(-1,1))
    # Splitting the dataset into the Training set and Test set
    X_train, X_test, y_train, y_test = train_test_split(X_scaled, y, test_size=2/10, random_state=3)
    # X_train, X_test, y_train, y_test = train_test_split(X.values, y, test_size=2/10, random_state=3)

    X_train, X_valid, y_train, y_valid = train_test_split(X_train, y_train, test_size=2/8, random_state=0)

    # 训练autoencoder
    # train_dataset = MyDataset(X=X_train, y=y_train)
    # autoEncoder.trainAutoEncoder(train_dataset)
    # Coder = torch.load('./AutoEncoder.pkl')

    print('\ntrain image shape : ', X_train.shape)
    print('train label shape : ', y_train.shape)
    print('valid image shape : ', X_valid.shape)
    print('valid label image : ', y_valid.shape)
    print('test image shape  : ', X_test.shape)
    print('test label image : ', y_test.shape)

    train_dataset = MyDataset(X=X_train, y=y_train)
    valid_dataset = MyDataset(X=X_valid, y=y_valid)
    test_dataset = MyDataset(X=X_test, y=y_test)


    train_loader = DataLoader(dataset=train_dataset, batch_size=128, shuffle=True)
    valid_loader = DataLoader(dataset=valid_dataset, batch_size=128, shuffle=False)
    test_loader = DataLoader(dataset=test_dataset, batch_size=128, shuffle=False)

    model = SelfAttention(X.shape[1],X.shape[0],8,0.5)
    # model=torch.nn.DataParallel(model).cpu()
    print('\nmodel: ')
    print(model)

    optimizer = torch.optim.Adam(model.parameters(), lr=0.001)
    # loss_fn = nn.CrossEntropyLoss()
    loss_fn = nn.MSELoss()
    # loss_fn = nn.NLLLoss()

    mean_train_losses = {'MSE': [], 'MAE': [], 'R2': []}
    mean_valid_losses = {'MSE': [], 'MAE': [], 'R2': []}

    # mean_train_losses = []
    # mean_valid_losses = []
    epochs = 20000

    y_tarin_pred=[]
    for epoch in range(epochs):
        model.train()

        train_losses = {'MSE': [], 'MAE': [], 'R2': []}
        valid_losses = {'MSE': [], 'MAE': [], 'R2': []}
        # train_losses = []
        # valid_losses = []
        for i, (data, labels) in enumerate(train_loader):
            data = data.float()
            labels = labels.float()

            # data, _ = Coder(data)

            optimizer.zero_grad()

            output = model(data)
            if epoch==epochs-1:
                curOutput=output.detach().numpy().tolist()
                # y_tarin_pred.append(curOutput)
                y_tarin_pred+=curOutput

            loss=r2_loss(output, labels)
            # loss=self_loss(output, labels, weights)
            # loss = loss_fn(output, labels)
            # loss = weighted_mse_loss(output, labels, weights)
            # loss = weighted_huber_loss(output, labels, weights)
            loss.backward()
            optimizer.step()

            # train_losses.append(loss.item())

            MAE = mean_absolute_error(output.detach().numpy(), labels.detach().numpy())
            R2_Score = r2_score(output.detach().numpy(), labels.detach().numpy())
            train_losses['MSE'].append(loss.item())
            train_losses['MAE'].append(MAE)
            train_losses['R2'].append(R2_Score)

        if (epoch+1)%100==0:
            print('== loss-func: ',loss)

        model.eval()
        with torch.no_grad():
            for i, (data, labels) in enumerate(valid_loader):
                data = data.float()
                labels = labels.float()

                # data, _ = Coder(data)

                # outputs = model(data)
                outputs = model(data)
                # loss = loss_fn(outputs, labels)

                MSE=mean_squared_error(outputs.detach().numpy(), labels.detach().numpy())
                MAE = mean_absolute_error(outputs.detach().numpy(), labels.detach().numpy())
                R2_Score = r2_score(outputs.detach().numpy(), labels.detach().numpy())
                valid_losses['MSE'].append(MSE)
                valid_losses['MAE'].append(MAE)
                valid_losses['R2'].append(R2_Score)

        mean_train_losses['MSE'].append(np.mean(train_losses['MSE']))
        mean_train_losses['MAE'].append(np.mean(train_losses['MAE']))
        mean_train_losses['R2'].append(np.mean(train_losses['R2']))
        mean_valid_losses['MSE'].append(np.mean(valid_losses['MSE']))
        mean_valid_losses['MAE'].append(np.mean(valid_losses['MAE']))
        mean_valid_losses['R2'].append(np.mean(valid_losses['R2']))

        # mean_train_losses.append(np.mean(train_losses))
        # mean_valid_losses.append(np.mean(valid_losses))

        if epoch % 100 == 0:
            print('epoch : {}, train loss : {:.4f}, {:.4f}, {:.4f}; valid loss : {:.4f}, {:.4f}, {:.4f}'.format(epoch + 1, np.mean(
            train_losses['MSE']), np.mean(train_losses['MAE']), np.mean(train_losses['R2']), np.mean(
            valid_losses['MSE']), np.mean(valid_losses['MAE']), np.mean(valid_losses['R2'])))

    a=np.array(y_tarin_pred)
    plt.plot(y_train, label='y_train')
    plt.plot(a, label='y_tarin_pred')
    plt.title('ours_train', fontsize='large', fontweight = 'bold')
    plt.legend()
    plt.show()

    fig, (ax1, ax2, ax3) = plt.subplots(nrows=1, ncols=3, figsize=(30, 10))
    ax1.plot(mean_train_losses['MSE'], label='train')
    ax1.plot(mean_valid_losses['MSE'], label='valid')
    lines, labels = ax1.get_legend_handles_labels()
    ax1.legend(lines, labels, loc='best')
    ax2.plot(mean_train_losses['MAE'], label='train')
    ax2.plot(mean_valid_losses['MAE'], label='valid')
    lines, labels = ax2.get_legend_handles_labels()
    ax2.legend(lines, labels, loc='best')
    ax3.plot(mean_train_losses['R2'], label='train')
    ax3.plot(mean_valid_losses['R2'], label='valid')
    lines, labels = ax3.get_legend_handles_labels()
    ax3.legend(lines, labels, loc='best')

    plt.show()

    torch.save(model, modelSavePath)


    model=torch.load(modelSavePath)
    test_losses = {'MSE': [], 'MAE': [], 'R2': []}
    with torch.no_grad():
        for i, (data, labels) in enumerate(test_loader):
            data = data.float()
            labels = labels.float()

            # data, _ = Coder(data)

            # outputs = model(data)
            outputs = model(data)
            # loss = loss_fn(outputs, labels)

            MSE = mean_squared_error(outputs.detach().numpy(), labels.detach().numpy())
            MAE = mean_absolute_error(outputs.detach().numpy(), labels.detach().numpy())
            R2_Score = r2_score(outputs.detach().numpy(), labels.detach().numpy())
            test_losses['MSE'].append(MSE)
            test_losses['MAE'].append(MAE)
            test_losses['R2'].append(R2_Score)


    mean_train_losses['MSE'].append(np.mean(test_losses['MSE']))
    mean_train_losses['MAE'].append(np.mean(test_losses['MAE']))
    mean_train_losses['R2'].append(np.mean(test_losses['R2']))
    mean_valid_losses['MSE'].append(np.mean(test_losses['MSE']))
    mean_valid_losses['MAE'].append(np.mean(test_losses['MAE']))
    mean_valid_losses['R2'].append(np.mean(test_losses['R2']))

    print('\ntest: ')
    # MSE = mean_squared_error(y_test, y_pred)
    # MAE = mean_absolute_error(y_test, y_pred)
    # R2_Score = r2_score(y_test, y_pred)
    print("MSE: ", np.mean(test_losses['MSE']))
    print("MAE: ", np.mean(test_losses['MAE']))
    print("R2_Score: ", np.mean(test_losses['R2']))






if __name__ == '__main__':
    # train('../../data/ingotRate.csv', './ingotRatePrediction.pkl')
    train('../../data/ingotRate_normal.csv', './ingotRatePrediction_normal.pkl')
    # train('../../data/ingotRate_average.csv','./ingotRatePrediction_average.pkl')



