import torch
import torch.nn as nn
import torch.optim as optim
from torch.utils.data import DataLoader, TensorDataset
import pandas as pd
import os
import matplotlib.pyplot as plt
from sklearn.preprocessing import StandardScaler, MinMaxScaler
from sklearn.decomposition import PCA
from tqdm import tqdm
import numpy as np
import argparse

# 定义全连接神经网络模型    
class FCNet(nn.Module):
    def __init__(self, input_size, output_size):
        super(FCNet, self).__init__()
        self.fc1 = nn.Linear(input_size, 32)
        self.fc2 = nn.Linear(32, 32)
        self.fc3 = nn.Linear(32, output_size)

    def forward(self, x):
        out = torch.relu(self.fc1(x))
        out = torch.relu(self.fc2(out) + out)
        out = torch.relu(self.fc2(out) + out)
        out = self.fc3(out)
        return out
    
# 定义保存模型的类：包含标准化 + 归一化 + PCA + 神经网络
class ModelWrapper(nn.Module):
    def __init__(self, model, standard, pca, minmax):
        super(ModelWrapper, self).__init__()
        self.model = model
        self.mean_ = torch.tensor(standard.mean_, dtype=torch.float32)
        self.scale_ = torch.tensor(standard.scale_, dtype=torch.float32)
        self.pca_components_ = torch.tensor(pca.components_, dtype=torch.float32)
        self.pca_mean_ = torch.tensor(pca.mean_, dtype=torch.float32)
        self.min_ = torch.tensor(minmax.data_min_, dtype=torch.float32)
        self.max_ = torch.tensor(minmax.data_max_, dtype=torch.float32)

    def forward(self, x):
        x = (x - self.min_) / (self.max_ - self.min_)
        x = (x - self.mean_) / self.scale_
        x = torch.matmul(x - self.pca_mean_, self.pca_components_.T)
        return self.model(x)

class ModelTrain():
    def __init__(self, parser):
        # 可调参数
        self.pca_n_components = parser.parse_args().pca
        self.batch_size_set = parser.parse_args().batch
        self.output_size = 1 #输出层神经元个数
        self.num_epochs = parser.parse_args().epoch
        self.learning_rate = 0.0002 #学习率

        self.file_path = parser.parse_args().input
        self.feature_name = parser.parse_args().target
        self.best_test_loss = float('inf')
        self.train_losses = []
        self.test_losses = []
        self.early_stop = parser.parse_args().early if parser.parse_args().early!=0 else self.num_epochs
        self.wait_epoch = 0
        self.train_flag = True

        self.main()

    def main(self):
        self.dataset = pd.read_table(self.file_path, sep=',')
        self.data_preprocess()
        self.train()
        print(f"file is saved in {self.folder_path}")

    def data_preprocess(self):
        X = self.dataset.drop(['header_stamp', self.feature_name], axis=1)
        y = self.dataset[self.feature_name].values 
        self.input_size = X.shape[1]

        # 分割训练集和测试集，8:2
        train_size = int(0.8 * len(y))
        X_train, X_test = X[:train_size], X[train_size:]
        y_train, y_test = y[:train_size], y[train_size:]

        # 归一化(数据变换到0到1的范围内)
        self.train_minmax = MinMaxScaler()
        X_train_minmax = self.train_minmax.fit_transform(X_train)
        X_test_minmax = self.train_minmax.transform(X_test)

        # 标准化(使同类数据的标准差为1均值为0)
        self.train_standard = StandardScaler()
        X_train_standard = self.train_standard.fit_transform(X_train_minmax)
        X_test_standard = self.train_standard.transform(X_test_minmax)

        # 降维
        self.train_pca = PCA(n_components=self.pca_n_components)
        X_train_pca = self.train_pca.fit_transform(X_train_standard)
        X_test_pca = self.train_pca.transform(X_test_standard)

        # 转换为 Tensor
        X_trian_tensor = torch.tensor(X_train_pca, dtype=torch.float32)
        X_test_tensor = torch.tensor(X_test_pca, dtype=torch.float32)
        y_train_tensor = torch.tensor(y_train, dtype=torch.float32).view(-1, 1)
        y_test_tensor = torch.tensor(y_test, dtype=torch.float32).view(-1, 1)

        # 创建数据加载器
        train_dataset = TensorDataset(X_trian_tensor, y_train_tensor)
        test_dataset = TensorDataset(X_test_tensor, y_test_tensor)
        self.train_loader = DataLoader(train_dataset, batch_size=self.batch_size_set, shuffle=False)
        self.test_loader = DataLoader(test_dataset, batch_size=self.batch_size_set, shuffle=False)
    
    def train(self):
        # 创建模型、损失函数及优化器
        input_size = self.pca_n_components
        model = FCNet(input_size, self.output_size)
        criterion = nn.MSELoss()
        optimizer = optim.Adam(model.parameters(), lr=self.learning_rate)

        # 打印模型信息
        print(model)
        total_params = sum(p.numel() for p in model.parameters() if p.requires_grad)
        print(f"Total Parameters: {total_params}")

        self.wrapped_model = ModelWrapper(model, self.train_standard, self.train_pca, self.train_minmax)

        # 训练模型并绘制训练过程中的损失曲线
        for epoch in range(self.num_epochs):
            with tqdm(total=len(self.train_loader)+len(self.test_loader),
                      desc=f'Epoch {epoch+1}/{self.num_epochs}',unit='batch')as pbar:
                # 训练
                model.train()
                train_loss = 0
                for inputs, labels in self.train_loader:
                    optimizer.zero_grad()
                    outputs = model(inputs)
                    loss = criterion(outputs, labels)
                    loss.backward()
                    optimizer.step()
                    pbar.update(1)
                    train_loss += loss.item()
                self.train_losses.append(np.sqrt(train_loss / len(self.train_loader)))
                # 测试
                model.eval()
                test_loss = 0
                with torch.no_grad():
                    for X_batch, y_batch in self.test_loader:
                        outputs = model(X_batch)
                        loss = criterion(outputs, y_batch)
                        test_loss += loss.item()
                        pbar.update(1)
                self.test_losses.append(np.sqrt(test_loss / len(self.test_loader)))
            print(f'           Train Loss: {self.train_losses[epoch]:.5f}, Test Loss: {self.test_losses[epoch]:.5f}')
            self._save_model_and_params(epoch, test_loss)
            self.plot_loss()
            if not self.train_flag:break

    # 创建一个函数来保存模型和参数
    def _save_model_and_params(self, epoch, test_loss):
        if epoch==0:
            folder_name = "train"
            current_file_path = os.path.abspath(__file__)
            directory_path = os.path.dirname(current_file_path) 
            directory_path = os.path.join(directory_path, "../../weights/runs/FCNet")
            self.folder_path = os.path.join(directory_path, folder_name)
            counter = 1
            while os.path.exists(self.folder_path):
                self.folder_path = os.path.join(directory_path, f"{folder_name}-{counter}")
                counter += 1
            os.makedirs(self.folder_path)
            
        # 保存模型和参数
        self.parameters = {
            'input_size' : self.input_size,
            'batch_size_set': self.batch_size_set,
            'output_size': self.output_size,
            'total_epochs': self.num_epochs,
            'last_epochs' : epoch+1,
            'learning_rate': self.learning_rate,
            'feature_name': self.feature_name,
        }
        params_file_name = os.path.join(self.folder_path, 'parameters.txt')
        with open(params_file_name, 'w') as f:
            for key, value in self.parameters.items():
                f.write(f'{key}: {value}\n')
        
        model_scripted = torch.jit.script(self.wrapped_model)
        if test_loss < self.best_test_loss:
            self.best_test_loss = test_loss
            model_file_name = os.path.join(self.folder_path, 'best.pt')
            model_scripted.save(model_file_name)
            self.wait_epoch = 0
        else:
            self.wait_epoch += 1
        if self.wait_epoch >= self.early_stop:
            print("The test loss is made up in the reduction, and the training is stopped")
            self.train_flag = False
        
        last_model_file_name = os.path.join(self.folder_path, 'last.pt')
        model_scripted.save(last_model_file_name)

    def plot_loss(self):
        # 绘制训练过程中的损失变化曲线
        plt.plot(range(50, 50 + len(self.train_losses[50:])), self.train_losses[50:], label='Training Loss')
        plt.plot(range(50, 50 + len(self.test_losses[50:])), self.test_losses[50:], label='Test Loss')
        plt.xlabel('Epoch')
        plt.ylabel('Loss')
        plt.title('Training and Test Loss')
        plt.legend()
        plt.savefig(self.folder_path + '/loss.png')
        plt.show() 
        plt.clf()


if __name__ == "__main__":
    parser = argparse.ArgumentParser()
    parser.add_argument('--input', default="../../data/prediction_data.csv", help='path to load dataset')
    parser.add_argument('-t', '--target', default="bucket_deg", help='target feature name')
    parser.add_argument('--epoch', type=int, default=300, help='train epochs')
    parser.add_argument('-b', '--batch', type=int, default=5, help='batch size')
    parser.add_argument('--pca', type=int, default=10, help='pca components')
    parser.add_argument('-e','--early', type=int, default=0, help='Stop early and wait for epochs, 0 is not early')

    md = ModelTrain(parser)