import os
import pickle
import time
import torch
from torch.optim import Adam
from torch.utils.data import DataLoader
from torchvision import transforms
from tqdm import tqdm
from data_proc.dataset import SegmentationDataset
import matplotlib.pyplot as plt
import pandas as pd
from utils import save_checkpoint,load_checkpoint,load_dataset_splits



def training_main(config, model, loss_fn):
    if torch.cuda.is_available():
        print(torch.cuda.get_device_name(0))
        print(torch.cuda.device_count())
        print('CUDA Version:', torch.version.cuda)

    # Define paths based on the configuration
    output_path = config['OUTPUT_PATH']
    model_path = config['MODEL_PATH']
    checkpoint_model_path = config['CHECKPOINT_PATH']
    best_model_path = config['BEST_MODEL_PATH']
    history_path = config['HISTORY_PATH']
    epoch_num = config['EPOCH_NUM']
    overwrite_checkpoint_flag = config['CP_OVERWRITE']
    batch_size = config['BATCH_SIZE']
    split_datasets_file_idx = config['SPLIT_FILE_IDX']

    # 在你的训练脚本中调用这个函数
    train_imgs, valid_imgs, train_masks, valid_masks = load_dataset_splits(split_datasets_file_idx)

    # Create a transformation composition
    transform = transforms.Compose([
        transforms.Resize(config['IMAGE_SIZE']),
        transforms.ToTensor()
    ])

    # Load the train and valid datasets
    train_set = SegmentationDataset(train_imgs, train_masks, transform)
    valid_set = SegmentationDataset(valid_imgs, valid_masks, transform)
    print(f'Train images: {len(train_set)}\nvalid images: {len(valid_set)}')

    # Initiate the train and valid loaders
    train_loader = DataLoader(train_set, batch_size=batch_size, **config['KWARGS'])
    valid_loader = DataLoader(valid_set, batch_size=batch_size, **config['KWARGS'])

    # Initiate loss function and optimizer
    optim = Adam(model.parameters(), lr=config['LEARNING_RATE'])

    # Calculate the number of train and valid steps
    train_steps = len(train_set) // batch_size
    valid_steps = len(valid_set) // batch_size

    # Initiate a train history

    # Initiate the timer
    start_time = time.time()

    # 在训练循环开始前初始化最佳验证损失为无穷大
    best_valid_loss = float('inf')

    # 尝试加载现有的检查点
    if os.path.exists(checkpoint_model_path) and overwrite_checkpoint_flag== 0:
        start_epoch, history = load_checkpoint(checkpoint_model_path, model, optim)
    else:
        # Initiate a train history and start epoch
        start_epoch = 0
        history = {'train_loss': [], 'valid_loss': []}

    # Train the model
    for epoch_idx in range(start_epoch, epoch_num):
        print(f'EPOCH: {epoch_idx + 1}/{epoch_num}')
        model.train()
        train_loss = 0.
        valid_loss = 0.

        # Update for progress bar display
        train_bar = tqdm(enumerate(train_loader), total=train_steps, desc='Training', leave=False)
        for i, (images, targets) in train_bar:
            images, targets = images.to(config['DEVICE']), targets.to(config['DEVICE'])
            pred = model(images)
            loss = loss_fn(pred, targets)
            optim.zero_grad()
            loss.backward()
            optim.step()
            train_loss += loss.item()
            train_bar.set_postfix(loss=loss.item())

        valid_bar = tqdm(enumerate(valid_loader), total=valid_steps, desc='validing', leave=False)
        with torch.no_grad():
            model.eval()
            for i, (images, targets) in valid_bar:
                images, targets = images.to(config['DEVICE']), targets.to(config['DEVICE'])
                pred = model(images)
                loss = loss_fn(pred, targets)
                valid_loss += loss.item()
                valid_bar.set_postfix(loss=loss.item())

        avg_train_loss = train_loss / train_steps
        avg_valid_loss = valid_loss / valid_steps
        history['train_loss'].append(avg_train_loss)
        history['valid_loss'].append(avg_valid_loss)

        # 检查是否是最佳模型
        if avg_valid_loss < best_valid_loss:
            best_valid_loss = avg_valid_loss
            torch.save(model.state_dict(), best_model_path)
            print(f"New best model saved with valid loss: {best_valid_loss:.4f}")
        # 每个epoch结束后保存检查点
        save_checkpoint({
            'epoch': epoch_idx + 1,
            'state_dict': model.state_dict(),
            'optimizer': optim.state_dict(),
            'history': history,  # 保存当前的历史数据
            'train_loss': avg_train_loss,
            'valid_loss': avg_valid_loss,
        }, checkpoint_model_path)

        print(f'Train loss: {avg_train_loss:.4f} valid loss: {avg_valid_loss:.4f}')

    end_time = time.time() - start_time
    print(f'Total time: {end_time:.2f}s')

    # Save the model and history dict
    torch.save(model, model_path)
    with open(history_path, 'wb') as f:
        pickle.dump(history, f, protocol=pickle.HIGHEST_PROTOCOL)

    # 使用更美观的配色和风格
    plt.style.use('ggplot')  # 使用ggplot样式，这是一个流行的图形样式
    # 绘制训练损失和验证损失
    plt.figure(figsize=(10, 5))
    plt.plot(history['train_loss'], color='mediumvioletred', linewidth=2, label='Train Loss')
    plt.plot(history['valid_loss'], color='deepskyblue', linewidth=2, label='Validation Loss')
    plt.title('Training and Validation Loss')
    plt.xlabel('Epochs')
    plt.ylabel('Loss')
    plt.legend()
    plt.grid(True)
    plt.savefig(os.path.join(output_path, 'loss_plot.png'))  # 保存图像到模型路径
    plt.show()

    # 将损失数据保存到CSV文件
    df = pd.DataFrame(history)
    df.to_csv(os.path.join(output_path, 'loss_history.csv'), index=False)
