#  导入包和定义参数
import sys
import os
import torch
import torch.utils.data as tud
import matplotlib.pyplot as plt
import torch.nn as nn
from tqdm import tqdm
import pandas as pd
import argparse
import numpy as np
from network import MLP


def parse_option():
    parser = argparse.ArgumentParser('MLP')
    parser.add_argument('--epochs', type=int, help='the number of epoch', default=100)
    parser.add_argument("--save_path", type=str, help="the path of model saved",
                        default='..\\models')
    parser.add_argument("--train_file", type=str, help="the path of train file",
                        default='E:\\Downloads\\deep_learning_for_cv-master\\deep_learning_for_cv-master\\datasets\\train_dataset.csv')
    parser.add_argument("--val_file", type=str, help="the path of val file",
                        default='E:\\Downloads\\deep_learning_for_cv-master\\deep_learning_for_cv-master\\datasets\\val_dataset.csv')
    args = parser.parse_args()  # 也可直接使用 args, _ = parser.parse_known_args()
    return args


def load_data(train_file, val_file):
    """加载训练集和验证集数据"""
    try:
        # 读取训练集
        train_data = pd.read_csv(train_file, header=None, skiprows=1)
        print(f"训练集形状: {train_data.shape}")
        
        # 读取验证集
        val_data = pd.read_csv(val_file, header=None, skiprows=1)
        print(f"验证集形状: {val_data.shape}")
        
        return train_data, val_data
    except FileNotFoundError as e:
        print(f"文件未找到: {e}")
        return None, None

def prepare_features(data):
    """准备特征和目标变量，返回PyTorch tensors"""
    try:
        # 假设前9列是特征，最后一列是目标
        X = data.iloc[:, :9].values.astype(np.float32)  # 🔥 确保特征是float32类型
        y = data.iloc[:, -1].values.astype(np.float32)    # 🔥 确保标签是int64类型
        
        # 转换为PyTorch tensors
        X_tensor = torch.FloatTensor(X)
        y_tensor = torch.FloatTensor(y)
        
        print(f"特征张量形状: {X_tensor.shape}")
        print(f"标签张量形状: {y_tensor.shape}")
        
        return X_tensor, y_tensor
    except Exception as e:
        print(f"准备特征时出错: {e}")
        return None, None


def main(args):
    device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
    print("using {} device.".format(device))

    #  定义数据，这里是自己建立
    torch.manual_seed(1)
    train_data, val_data = load_data(args.train_file, args.val_file)

    if train_data is None or val_data is None:
        return
    
    # 准备训练数据
    X_train, y_train = prepare_features(train_data)
    X_val, y_val = prepare_features(val_data)
    train_dataset = tud.TensorDataset(X_train, y_train)
    val_dataset = tud.TensorDataset(X_val, y_val)
    # n_data = torch.ones(1000, 1)  # 内容为一个 100 行 2 列 的 tensor
    # x0 = torch.normal(1 * n_data, 1)
    # y0 = torch.zeros(1000)
    # x1 = torch.normal(-4 * n_data, 1)
    # y1 = torch.ones(1000)
    # x2 = torch.normal(5 * n_data, 1)
    # y2 = torch.ones(1000) * 2
    # x = torch.cat((x0, x1, x2)).type(torch.FloatTensor)
    # y = torch.cat((y0, y1, y2)).type(torch.LongTensor)
    # torch_dataset = tud.TensorDataset(x, y)

    #  画图测试看下数据集合
    # plt.scatter(x[:, 0], x[:, 1], c=y, s=100, lw=0, cmap='RdYlGn')
    # plt.show()

    # train_size, val_size = int(len(torch_dataset)*0.8), len(torch_dataset) - int(len(torch_dataset)*0.8)
    # train_dataset, val_dataset = tud.random_split(torch_dataset, [train_size, val_size])
    train_loader = tud.DataLoader(dataset=train_dataset,
                                  batch_size=16,
                                  shuffle=True, )
    val_loader = tud.DataLoader(dataset=val_dataset,
                                batch_size=16,
                                shuffle=True, )
    # print("using {}  for training, {}   for validation.".format(train_size, val_size))

    net = MLP()
    net.to(device)

    # pata = list(net.parameters())
    optimizer = torch.optim.SGD(net.parameters(), lr=0.00001)
    scheduler = torch.optim.lr_scheduler.StepLR(optimizer, step_size=5, gamma=0.1)  # 设置学习率下降策略
    criterion = nn.MSELoss()

    best_acc = 0.0
    for epoch in range(args.epochs):
        # train
        net.train()
        running_loss = 0.0
        train_bar = tqdm(train_loader, file=sys.stdout)
        for step, train_data in enumerate(train_bar):
            train_x, labels = train_data
            # zero the parameter gradients
            optimizer.zero_grad()

            # forward + backward + optimize
            outputs = net(train_x.to(device))
            loss = criterion(outputs, labels.to(device))
            loss.backward()
            optimizer.step()

            # print statistics
            running_loss += loss.item()
            if step % 2000 == 1999:  # print every 2000 mini-batches
                print(f'train_data:[{epoch + 1}, {step + 1:5d}] loss: {running_loss / 2000:.3f}')
                running_loss = 0.0

        scheduler.step()  # 更新学习率
       # print(scheduler.get_last_lr())

        # validate
        net.eval()
        acc = 0.0  # accumulate accurate number / epoch
        with torch.no_grad():
            val_bar = tqdm(val_loader, file=sys.stdout)
            for val_data  in val_bar:
                val_x, val_labels = val_data 
                outputs = net(val_x.to(device))
                predict_y = outputs.squeeze()
                # acc += torch.eq(predict_y, val_labels.to(device)).sum().item()
                val_size = val_labels.size(0)
                batch_mae = torch.abs(predict_y.float().to(device) - val_labels.float().to(device))
                acc = (batch_mae / val_labels.float().to(device)).sum().item()

        val_accurate = 1 - (acc / val_size)
        print(f'[epoch {epoch+1}] , val_accuracy: {val_accurate:.3f}')
        if val_accurate > best_acc:
            best_acc = val_accurate
            # torch.save(net.state_dict(), args.save_path)
            os.makedirs(os.path.dirname(args.save_path), exist_ok=True)
            # torch.save(net.state_dict(), 'model.pth', args.save_path)
            torch.save(net, 'E:\\Downloads\\deep_learning_for_cv-master\\deep_learning_for_cv-master\\models\\model.pth')
    print('Finished Training')


if __name__ == '__main__':
    args_ = parse_option()
    main(args_)
