from scipy.io import arff
import os
import numpy as np
import torch
import pandas as pd
from torch.utils.data import Dataset
from torch.utils.data import DataLoader
import torch
import torch.nn as nn
import torch.optim as optim
import math  
from DataSet import ArffDataset
from GetDataFromArff import ArffDataProcessor
from torch.utils.data import DataLoader
from Moudle import PreTrainModel, LinearMapping, TransformerModel
from optimizers import RAdam,PlainRAdam,AdamW
import matplotlib.pyplot as plt
from tqdm import tqdm
import time
import random
import os
from glob import glob


seed = 3407
torch.manual_seed(seed)
np.random.seed(seed)
random.seed(seed)
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False

# 检查 CUDA 是否可用
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
print("Using device:", device)

#构建数据
Train_filepath = 'Data/SpokenArabicDigits_TRAIN.arff'
TrainDataProcessor = ArffDataProcessor(filePath = Train_filepath, DataDistribution= None)
# 使用 Test_filepath 加载测试数据
Test_filepath = 'Data/SpokenArabicDigits_TEST.arff'
TestDataProcessor = ArffDataProcessor(filePath = Test_filepath, DataDistribution= None)

print("----------数据文件读取完毕------------")
TrainDataSet = ArffDataset(dataProcessor = TrainDataProcessor, maxLenth = 128, paddingValue = 0.0)
inputDim = TrainDataProcessor.getSampleDim()  # 获取样本的维度
numClasses = TrainDataProcessor.getNumClasses()  # 获取类别的数量
TestDataSet = ArffDataset(dataProcessor = TestDataProcessor, maxLenth = 128, paddingValue = 0.0)
print("--------Dataloader加载完毕-----------")

# 创建模型
# 定义模型参数
outputDim = 24  # 输出数据的维度，根据任务来设置
modelDim = 16
numHeads = 8
numEncoderLayers = 4
dimFeedforward = 24
dropout = 0.1

# 创建模型实例
inputLayer = LinearMapping(inputDim, modelDim)
mainModel = TransformerModel(modelDim, numHeads, numEncoderLayers, dimFeedforward, maxLen= 128,dropout = dropout)
# outputLayer = LinearMapping(modelDim, 1)  
model = PreTrainModel(numClasses, inputLayer, mainModel)

# 检查并加载之前保存的模型参数
model_path = 'best_model.pth'
if os.path.exists(model_path):
    model.load_state_dict(torch.load(model_path))
    print("模型参数加载成功")

# 定义工作：'train' 或 'evaluate'
work = 'evaluate'  # 或 'evaluate'

if work == 'train':
    # 将模型移至 CUDA
    model = model.to(device)

    # 创建loss
    criterion = nn.CrossEntropyLoss()

    # 使用 Adam 优化器
    optimizer = optim.Adam(model.parameters(), lr=1e-3)

    # 确保在训练模式下运行模型
    model.train()
    num_epochs = 1500  # 定义训练的 epochs 数
    losses = []  # 存储每个 epoch 的平均 loss

    best_loss = float('inf')  # 初始化最佳 loss 为无限大,用于判断是否需要保存模型

    for epoch in range(num_epochs):
        start_time = time.time()
        total_loss = 0
        # 创建 DataLoader
        dataloader = DataLoader(TrainDataSet, batch_size=32, shuffle=True)
        for samples, labels, masks in tqdm(dataloader, desc=f"Epoch {epoch+1}/{num_epochs}"):
            samples, labels, masks = samples.to(device), labels.to(device), masks.to(device)
            labels = labels.squeeze()
            masks = masks.transpose(0, 1)
            outputs = model(samples, masks)  # 注意：Transformer模型中True表示要忽略的位置
            optimizer.zero_grad()
            loss = criterion(outputs, labels)
            loss.backward()
            optimizer.step()
            total_loss += loss.item()
            # break
        
        avg_loss = total_loss / len(dataloader)
        losses.append(avg_loss)
        end_time = time.time()
        elapsed_time = end_time - start_time
        
        # 检查并保存最佳模型
        if avg_loss < best_loss:
            best_loss = avg_loss
            torch.save(model.state_dict(), 'best_model.pth')
            improved = 'Improved'
        else:
            improved = 'Not improved'
        
        # 打印当前训练的信息
        print(f"Epoch {epoch+1}/{num_epochs} - Duration: {elapsed_time:.2f}s - Loss: {avg_loss:.8f} - {improved}")
        # break
    # 绘制 loss 曲线
    plt.plot(losses, label='Training Loss')
    plt.title('Loss during Training')
    plt.xlabel('Epoch')
    plt.ylabel('Loss')
    plt.legend()
    plt.savefig('training_loss.png')
    plt.show()

elif work == 'evaluate':

    correct = 0
    total = 0
    # 加载您的测试集或验证集
    dataloader = DataLoader(TestDataSet, batch_size=32, shuffle=False)

    with torch.no_grad():
        for samples, labels, masks in dataloader:
            samples, labels, masks = samples.to(device), labels.to(device), masks.to(device)
            masks = masks.transpose(0, 1)
            # print(model.bn1.running_mean)
            outputs = model(samples, masks)
            _, predicted = torch.max(outputs.data, 1)
            total += labels.size(0)
            correct += (predicted == labels).sum().item()
    accuracy = 100 * correct / total
    print(f'准确率: {accuracy:.2f}%')