import os
import numpy as np
import pandas as pd
import nibabel as nib
import cv2
import torch
import torch.nn as nn
from torch.utils.data import Dataset, DataLoader
import torchvision.models as models
import timm
from sklearn.preprocessing import StandardScaler
from tqdm import tqdm
import torch.nn.functional as F 
import numpy as np
from sklearn.metrics import confusion_matrix
from sklearn.metrics import f1_score
from sklearn.metrics import accuracy_score
import datetime
from hybrid_model import VisionTransformer
from datasets_crc import MedicalDataset
from hybrid_model import CONFIGS as CONFIGS_ViT_seg
from torchvision import transforms
from dataset_SUV_max import MRIDataset

# 训练参数
EPOCHS = 30
LEARNING_RATE = 1e-3
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')

def train_and_evaluate(model, train_loader, val_loader):
    # 初始化训练日志
    start_time = datetime.datetime.now()
    timestamp = start_time.strftime("%Y%m%d_%H%M%S")
    log_filename = f"training_log_{timestamp}.txt"
    
    # 初始化存储列表
    metrics = {
        'train_loss': [],
        'val_loss': [],
        'val_f1': []
    }

    # 记录初始信息
    with open(log_filename, 'w') as f:
        f.write(f"Training Start Time: {start_time}\n")
        f.write(f"Epochs: {EPOCHS}, Initial LR: {LEARNING_RATE}, Device: {device}\n")
        f.write("Hyperparameters:\n")
        f.write(f"- CrossEntropyLoss\n")
        f.write(f"- Optimizer: AdamW\n")
        f.write(f"- Weight Decay: 1e-4\n")
        f.write(f"- Gradient Clip: 1.0\n\n")

    criterion = torch.nn.CrossEntropyLoss()

    # 分阶段优化策略
    optimizer = torch.optim.AdamW(model.parameters(), lr=LEARNING_RATE, weight_decay=1e-5)

    
    # 动态学习率调整
    scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(
        optimizer, mode='max', factor=0.5, patience=3, verbose=True
    )

    best_val_f1 = 0.0
    for epoch in range(EPOCHS):
        epoch_start = datetime.datetime.now()
        model.train()
        train_loss = 0.0
        
        for batch in tqdm(train_loader, desc=f'Epoch {epoch+1}'):
            images, rois = batch[0].to(device), batch[1].to(device)
            labels = batch[2].to(device)
            pid = batch[3]
            optimizer.zero_grad()
            
            with torch.cuda.amp.autocast():
                outputs = model(images, rois)
                loss = criterion(outputs, labels)
            
            loss.backward()
            nn.utils.clip_grad_norm_(model.parameters(), 1.0)
            optimizer.step()
            
            train_loss += loss.item()
        
        # 验证阶段
        model.eval()
        val_loss = 0.0
        all_preds, all_labels = [], []
        
        with torch.no_grad():
            for batch in tqdm(train_loader, desc=f'Epoch {epoch+1}'):
                images, rois = batch[0].to(device), batch[1].to(device)
                labels = batch[2].to(device)
                pid = batch[3]
                
                outputs = model(images,rois)
                loss = criterion(outputs, labels)
                
                val_loss += loss.item()
                preds = outputs.argmax(dim=1)
                
                all_preds.extend(preds.cpu().numpy())
                all_labels.extend(labels.cpu().numpy())
        
        # 计算指标
        val_acc = accuracy_score(all_labels, all_preds)
        val_f1 = f1_score(all_labels, all_preds, average='macro')
        cm = confusion_matrix(all_labels, all_preds)
        current_lrs = [group['lr'] for group in optimizer.param_groups]
        epoch_duration = datetime.datetime.now() - epoch_start
        
        # 构建日志条目
        log_entry = f"\nEpoch {epoch+1}/{EPOCHS} [{epoch_duration}]"
        log_entry += f"\nTrain Loss: {train_loss/len(train_loader):.4f}"
        log_entry += f"\nVal Loss: {val_loss/len(val_loader):.4f}"
        log_entry += f"\nVal Acc: {val_acc:.4f} | Val F1: {val_f1:.4f}"
        log_entry += f"\nLearning Rates: {[f'{lr:.2e}' for lr in current_lrs]}"
        log_entry += "\nConfusion Matrix:\n" + np.array2string(cm, separator=', ')
        
        # 打印并保存日志
        print(log_entry)
        with open(log_filename, 'a') as f:
            f.write(log_entry + "\n")
        
        metrics['train_loss'].append(train_loss)
        metrics['val_loss'].append(val_loss)
        metrics['val_f1'].append(val_f1)

        # 调整学习率并保存最佳模型
        scheduler.step(val_f1)
        if val_f1 > best_val_f1:
            best_val_f1 = val_f1
            save_msg = f"\n ********** New Best Model (F1: {val_f1:.4f})********** "
            print(save_msg)
            with open(log_filename, 'a') as f:
                f.write(save_msg + "\n")
            torch.save(model.state_dict(), f'best_model_TransUNet.pth')

    # 记录最终信息
    end_time = datetime.datetime.now()
    total_duration = end_time - start_time
    final_log = f"\n\nTraining Complete!\nTotal Duration: {total_duration}\n"
    final_log += f"Best Validation F1: {best_val_f1:.4f}\n"
    final_log += f"Model Saved: best_model_TransUNet.pth"
    
    print(final_log)
    with open(log_filename, 'a') as f:
        f.write(final_log)
    return metrics

if __name__ == "__main__":
    import argparse
    parser = argparse.ArgumentParser()

    parser.add_argument('--vit_patches_size', type=int,
                        default=16, help='vit_patches_size, default is 16')
    parser.add_argument('--img_size', type=int, default=224, help='input patch size of network input')
    parser.add_argument('--data_dir', default="./datasets/single_model", help='data_dir of network input')
    parser.add_argument('--vit_name', type=str, default='R50-ViT-B_16', help='select one vit model')
    args = parser.parse_args()

    data_transform = transforms.Compose([
                                    transforms.ToTensor(),#PIL Image格式的数据才发挥作用
                                    transforms.Resize((224, 224)),
                                    transforms.Normalize([0.5], [0.5]),
                                    transforms.Lambda(lambda x: x.squeeze(0)),
                                    ])
    
    train_dataset = MRIDataset(root_dir='D:/PyChrom/PythonProject/2.medical_image/DeepTrip/datasets/pT/three_label_data/train',
                        transform=data_transform)
    test_dataset = MRIDataset(root_dir='D:/PyChrom/PythonProject/2.medical_image/DeepTrip/datasets/pT/three_label_data/test',
                        transform=data_transform)
    train_loader = DataLoader(train_dataset, batch_size=2,shuffle=True)
    test_loader = DataLoader(test_dataset, batch_size=2 ,shuffle=True)

    num_classes = 2
    config_vit = CONFIGS_ViT_seg[args.vit_name] 
    config_vit.n_classes = num_classes
    if args.vit_name.find('R50') != -1:
        config_vit.patches.grid = (int(args.img_size / args.vit_patches_size), int(args.img_size / args.vit_patches_size))
    
    model = VisionTransformer(config_vit, img_size=224, num_classes=config_vit.n_classes).cuda()
    model.load_from(weights=np.load(config_vit.pretrained_path))
    

    train_and_evaluate(model, train_loader, test_loader)


