import os
os.environ["CUDA_VISIBLE_DEVICES"] = "2"  #用之前看一下
os.environ["CUDA_LAUNCH_BLOCKING"] = "1"  # 获取更详细的 CUDA 错误信息
os.environ["TORCH_USE_CUDA_DSA"] = "1"    # 启用设备端断言

import torch
import torch.nn as nn
import torch.optim as optim
from torch.utils.data import Dataset, DataLoader as TorchDataLoader
from transformers import AutoTokenizer
import pandas as pd
import numpy as np
from sklearn.metrics import accuracy_score, precision_recall_fscore_support, confusion_matrix
import matplotlib.pyplot as plt
import seaborn as sns
from tqdm import tqdm
import json
import os
from typing import Dict, List, Tuple
import warnings
warnings.filterwarnings('ignore')

# 导入前面定义的模型
from cet_model_implementation import CNNEnhancedTransformer, create_cet_model

class SentimentDataset(Dataset):
    """
    情感分析数据集类
    """
    def __init__(self, 
                 texts: List[str], 
                 labels: List[int], 
                 tokenizer, 
                 max_length: int = 512):
        self.texts = texts
        self.labels = labels
        self.tokenizer = tokenizer
        self.max_length = max_length
    
    def __len__(self):
        return len(self.texts)
    
    def __getitem__(self, idx):
        text = str(self.texts[idx])
        label = self.labels[idx]
        
        # 分词和编码
        encoding = self.tokenizer(
            text,
            truncation=True,
            padding='max_length',
            max_length=self.max_length,
            return_tensors='pt'
        )
        
        return {
            'input_ids': encoding['input_ids'].flatten(),
            'attention_mask': encoding['attention_mask'].flatten(),
            'labels': torch.tensor(label, dtype=torch.long)
        }

class DataLoader:
    """
    数据加载器，支持IMDB、SST-2、SST-5数据集
    """
    def __init__(self, tokenizer_name: str = 'bert-base-uncased'):
        self.tokenizer = AutoTokenizer.from_pretrained(tokenizer_name)
        if self.tokenizer.pad_token is None:
            self.tokenizer.pad_token = self.tokenizer.eos_token
    
    def load_imdb_data(self, data_dir: str) -> Tuple[List[str], List[int], List[str], List[int], List[str], List[int]]:
        """
        加载IMDB数据集
        """
        print("Loading IMDB dataset...")
        
        try:
            # 读取parquet格式的数据
            train_df = pd.read_parquet(os.path.join(data_dir, 'train-00000-of-00001.parquet'))
            test_df = pd.read_parquet(os.path.join(data_dir, 'test-00000-of-00001.parquet'))
            
            # 确保列名正确 - 检查实际的列名
            print(f"IMDB train columns: {train_df.columns.tolist()}")
            print(f"IMDB test columns: {test_df.columns.tolist()}")
            
            # 根据实际列名调整 - 假设列名为'text'和'label'
            text_col = 'text'
            label_col = 'label'
            
            if text_col not in train_df.columns:
                # 尝试找到可能的文本列
                for col in train_df.columns:
                    if 'text' in col.lower() or 'sentence' in col.lower() or 'review' in col.lower():
                        text_col = col
                        break
            
            if label_col not in train_df.columns:
                # 尝试找到可能的标签列
                for col in train_df.columns:
                    if 'label' in col.lower() or 'sentiment' in col.lower() or 'score' in col.lower():
                        label_col = col
                        break
            
            print(f"Using columns - Text: {text_col}, Label: {label_col}")
            
            train_texts = train_df[text_col].tolist()
            train_labels = train_df[label_col].tolist()
            test_texts = test_df[text_col].tolist()
            test_labels = test_df[label_col].tolist()
            
            # 确保标签是整数
            train_labels = [int(label) for label in train_labels]
            test_labels = [int(label) for label in test_labels]
            
            # 从训练集中划分10%作为验证集
            train_size = int(0.9 * len(train_texts))
            val_texts = train_texts[train_size:]
            val_labels = train_labels[train_size:]
            train_texts = train_texts[:train_size]
            train_labels = train_labels[:train_size]
            
            print(f"IMDB dataset: {len(train_texts)} training samples, {len(val_texts)} validation samples, {len(test_texts)} test samples")
            
        except Exception as e:
            print(f"Error loading IMDB dataset: {e}")
            print("Warning: Using sample data for IMDB")
            train_texts = [
                "This movie is absolutely fantastic! Great acting and plot.",
                "Terrible movie, waste of time. Poor acting and boring story.",
                "Amazing cinematography and excellent character development.",
                "Worst movie I've ever seen. Completely disappointing.",
            ] * 1000  # 重复以增加数据量
            
            train_labels = [1, 0, 1, 0] * 1000
            
            # 划分验证集
            train_size = int(0.9 * len(train_texts))
            val_texts = train_texts[train_size:]
            val_labels = train_labels[train_size:]
            train_texts = train_texts[:train_size]
            train_labels = train_labels[:train_size]
            
            test_texts = ["This is a good movie.", "This is a bad movie."] * 200
            test_labels = [1, 0] * 200
        
        return train_texts, train_labels, val_texts, val_labels, test_texts, test_labels
    
    def load_sst2_data(self, data_dir: str) -> Tuple[List[str], List[int], List[str], List[int], List[str], List[int]]:
        """
        加载SST-2数据集
        """
        print("Loading SST-2 dataset...")
        
        try:
            # 读取parquet格式的数据
            train_df = pd.read_parquet(os.path.join(data_dir, 'train-00000-of-00001.parquet'))
            val_df = pd.read_parquet(os.path.join(data_dir, 'validation-00000-of-00001.parquet'))
            test_df = pd.read_parquet(os.path.join(data_dir, 'test-00000-of-00001.parquet'))
            
            # 检查列名
            print(f"SST-2 train columns: {train_df.columns.tolist()}")
            print(f"SST-2 validation columns: {val_df.columns.tolist()}")
            print(f"SST-2 test columns: {test_df.columns.tolist()}")
            
            # 根据实际列名调整
            text_col = 'sentence'
            label_col = 'label'
            
            if text_col not in train_df.columns:
                # 尝试找到可能的文本列
                for col in train_df.columns:
                    if 'text' in col.lower() or 'sentence' in col.lower() or 'review' in col.lower():
                        text_col = col
                        break
            
            if label_col not in train_df.columns:
                # 尝试找到可能的标签列
                for col in train_df.columns:
                    if 'label' in col.lower() or 'sentiment' in col.lower() or 'score' in col.lower():
                        label_col = col
                        break
            
            print(f"Using columns - Text: {text_col}, Label: {label_col}")
            
            train_texts = train_df[text_col].tolist()
            train_labels = train_df[label_col].tolist()
            val_texts = val_df[text_col].tolist()
            val_labels = val_df[label_col].tolist()
            test_texts = test_df[text_col].tolist()
            test_labels = test_df[label_col].tolist()
            
            # 确保标签是整数
            train_labels = [int(label) for label in train_labels]
            val_labels = [int(label) for label in val_labels]
            test_labels = [int(label) for label in test_labels]
            
            # 如果没有验证集，从训练集中划分10%
            if len(val_texts) == 0:
                train_size = int(0.9 * len(train_texts))
                val_texts = train_texts[train_size:]
                val_labels = train_labels[train_size:]
                train_texts = train_texts[:train_size]
                train_labels = train_labels[:train_size]
                
            
            print(f"SST-2 dataset: {len(train_texts)} training samples, {len(val_texts)} validation samples, {len(test_texts)} test samples")
            
        except Exception as e:
            print(f"Error loading SST-2 dataset: {e}")
            print("Warning: Using sample data for SST-2")
            train_texts = [
                "The movie is great!",
                "This film is terrible.",
                "Excellent performance by the actors.",
                "Boring and predictable story.",
            ] * 500
            
            train_labels = [1, 0, 1, 0] * 500
            
            # 划分验证集
            train_size = int(0.9 * len(train_texts))
            val_texts = train_texts[train_size:]
            val_labels = train_labels[train_size:]
            train_texts = train_texts[:train_size]
            train_labels = train_labels[:train_size]
            
            test_texts = ["This is a good movie.", "This is a bad movie."] * 100
            test_labels = [1, 0] * 100
        
        # 确保标签是整数且在 [0,1] 范围内
        train_labels = [min(max(int(label), 0), 1) for label in train_labels]
        val_labels = [min(max(int(label), 0), 1) for label in val_labels]
        test_labels = [min(max(int(label), 0), 1) for label in test_labels]
        
        # 打印标签分布
        print(f"SST-2标签分布 - 训练集: {np.bincount(train_labels)}")
        print(f"SST-2标签分布 - 验证集: {np.bincount(val_labels)}")
        print(f"SST-2标签分布 - 测试集: {np.bincount(test_labels)}")
        
        return train_texts, train_labels, val_texts, val_labels, test_texts, test_labels
    
    def load_sst5_data(self, data_dir: str) -> Tuple[List[str], List[int], List[str], List[int], List[str], List[int]]:
        """
        加载SST-5数据集
        """
        print("Loading SST-5 dataset...")
        
        try:
            # 读取jsonl格式的数据
            train_data = []
            with open(os.path.join(data_dir, 'train.jsonl'), 'r', encoding='utf-8') as f:
                for line in f:
                    train_data.append(json.loads(line))
            
            dev_data = []
            with open(os.path.join(data_dir, 'dev.jsonl'), 'r', encoding='utf-8') as f:
                for line in f:
                    dev_data.append(json.loads(line))
            
            test_data = []
            with open(os.path.join(data_dir, 'test.jsonl'), 'r', encoding='utf-8') as f:
                for line in f:
                    test_data.append(json.loads(line))
            
            # 检查数据格式
            print(f"SST-5 train sample: {train_data[0]}")
            print(f"SST-5 dev sample: {dev_data[0]}")
            print(f"SST-5 test sample: {test_data[0]}")
            
            # 提取文本和标签
            train_texts = [item['text'] for item in train_data]
            train_labels = [item['label'] for item in train_data]
            val_texts = [item['text'] for item in dev_data]
            val_labels = [item['label'] for item in dev_data]
            test_texts = [item['text'] for item in test_data]
            test_labels = [item['label'] for item in test_data]
            
            # 如果没有验证集，从训练集中划分10%
            if len(val_texts) == 0:
                train_size = int(0.9 * len(train_texts))
                val_texts = train_texts[train_size:]
                val_labels = train_labels[train_size:]
                train_texts = train_texts[:train_size]
                train_labels = train_labels[:train_size]
            
            print(f"SST-5 dataset: {len(train_texts)} training samples, {len(val_texts)} validation samples, {len(test_texts)} test samples")
            
        except Exception as e:
            print(f"Error loading SST-5 dataset: {e}")
            print("Warning: Using sample data for SST-5")
            train_texts = [
                "This movie is absolutely amazing!",    # label: 4
                "Pretty good movie overall.",           # label: 3
                "The movie is okay, nothing special.",  # label: 2
                "Not a good movie, disappointed.",      # label: 1
                "Terrible movie, complete waste of time." # label: 0
            ] * 400
            
            train_labels = [4, 3, 2, 1, 0] * 400
            
            # 划分验证集
            train_size = int(0.9 * len(train_texts))
            val_texts = train_texts[train_size:]
            val_labels = train_labels[train_size:]
            train_texts = train_texts[:train_size]
            train_labels = train_labels[:train_size]
            
            test_texts = ["Amazing film!", "Good movie.", "Average film.", "Bad movie.", "Awful film."] * 40
            test_labels = [4, 3, 2, 1, 0] * 40
        
        # 确保标签是整数且在 [0,4] 范围内
        train_labels = [min(max(int(label), 0), 4) for label in train_labels]
        val_labels = [min(max(int(label), 0), 4) for label in val_labels]
        test_labels = [min(max(int(label), 0), 4) for label in test_labels]
        
        # 打印标签分布
        print(f"SST-5标签分布 - 训练集: {np.bincount(train_labels)}")
        print(f"SST-5标签分布 - 验证集: {np.bincount(val_labels)}")
        print(f"SST-5标签分布 - 测试集: {np.bincount(test_labels)}")
        
        return train_texts, train_labels, val_texts, val_labels, test_texts, test_labels

class Trainer:
    """模型训练器"""
    def __init__(self,
                 model: nn.Module,
                 device: torch.device,
                 learning_rate: float = 2e-5,
                 weight_decay: float = 0.01,
                 warmup_steps: int = 1000):
        
        self.model = model.to(device)
        self.device = device
        
        # 优化器和调度器
        self.optimizer = optim.AdamW(
            model.parameters(),
            lr=learning_rate,
            weight_decay=weight_decay
        )
        
        self.scheduler = optim.lr_scheduler.ReduceLROnPlateau(
            self.optimizer,
            mode='max',
            factor=0.5,
            patience=3,
            verbose=True
        )
        
        # 损失函数
        self.criterion = nn.CrossEntropyLoss()
        
        # 训练历史
        self.train_history = {
            'loss': [],
            'accuracy': [],
            'val_loss': [],
            'val_accuracy': []
        }
    
    def train_epoch(self, train_loader: DataLoader) -> Tuple[float, float]:
        """训练一个epoch"""
        self.model.train()
        total_loss = 0
        all_predictions = []
        all_labels = []
        
        progress_bar = tqdm(train_loader, desc="Training")
        
        for batch in progress_bar:
            # 移动数据到设备
            input_ids = batch['input_ids'].to(self.device)
            attention_mask = batch['attention_mask'].to(self.device)
            labels = batch['labels'].to(self.device)
            
            # 前向传播
            self.optimizer.zero_grad()
            outputs = self.model(input_ids, attention_mask)
            
            # 计算损失
            loss = self.criterion(outputs['logits'], labels)
            
            # 反向传播
            loss.backward()
            torch.nn.utils.clip_grad_norm_(self.model.parameters(), 1.0)
            self.optimizer.step()
            
            # 记录
            total_loss += loss.item()
            predictions = torch.argmax(outputs['logits'], dim=-1)
            all_predictions.extend(predictions.cpu().numpy())
            all_labels.extend(labels.cpu().numpy())
            
            # 更新进度条
            progress_bar.set_postfix({
                'loss': f'{loss.item():.4f}',
                'acc': f'{accuracy_score(all_labels, all_predictions):.4f}'
            })
        
        avg_loss = total_loss / len(train_loader)
        accuracy = accuracy_score(all_labels, all_predictions)
        
        return avg_loss, accuracy
    
    def evaluate(self, val_loader: DataLoader) -> Dict:
        """评估模型"""
        self.model.eval()
        total_loss = 0
        all_predictions = []
        all_labels = []
        all_attention_weights = []
        
        with torch.no_grad():
            for batch in tqdm(val_loader, desc="Evaluating"):
                input_ids = batch['input_ids'].to(self.device)
                attention_mask = batch['attention_mask'].to(self.device)
                labels = batch['labels'].to(self.device)
                
                outputs = self.model(input_ids, attention_mask)
                loss = self.criterion(outputs['logits'], labels)
                
                total_loss += loss.item()
                predictions = torch.argmax(outputs['logits'], dim=-1)
                all_predictions.extend(predictions.cpu().numpy())
                all_labels.extend(labels.cpu().numpy())
                
                # 保存注意力权重用于可视化
                if len(all_attention_weights) < 10:  # 只保存前10个batch
                    all_attention_weights.append(outputs['attention_weights'])
        
        avg_loss = total_loss / len(val_loader)
        accuracy = accuracy_score(all_labels, all_predictions)
        precision, recall, f1, _ = precision_recall_fscore_support(
            all_labels, all_predictions, average='weighted'
        )
        
        return {
            'loss': avg_loss,
            'accuracy': accuracy,
            'precision': precision,
            'recall': recall,
            'f1': f1,
            'predictions': all_predictions,
            'labels': all_labels,
            'attention_weights': all_attention_weights
        }
    
    def train(self, 
              train_loader: DataLoader, 
              val_loader: DataLoader, 
              epochs: int = 10,
              save_path: str = 'best_model.pt') -> Dict:
        """完整训练流程"""
        
        best_val_accuracy = 0
        
        for epoch in range(epochs):
            print(f"\nEpoch {epoch + 1}/{epochs}")
            print("-" * 50)
            
            # 训练
            train_loss, train_acc = self.train_epoch(train_loader)
            
            # 验证
            val_results = self.evaluate(val_loader)
            val_loss = val_results['loss']
            val_acc = val_results['accuracy']
            
            # 更新学习率
            self.scheduler.step(val_acc)
            
            # 记录历史
            self.train_history['loss'].append(train_loss)
            self.train_history['accuracy'].append(train_acc)
            self.train_history['val_loss'].append(val_loss)
            self.train_history['val_accuracy'].append(val_acc)
            
            # 打印结果
            print(f"Train Loss: {train_loss:.4f}, Train Acc: {train_acc:.4f}")
            print(f"Val Loss: {val_loss:.4f}, Val Acc: {val_acc:.4f}")
            
            # 保存最佳模型
            if val_acc > best_val_accuracy:
                best_val_accuracy = val_acc
                torch.save({
                    'model_state_dict': self.model.state_dict(),
                    'optimizer_state_dict': self.optimizer.state_dict(),
                    'best_val_accuracy': best_val_accuracy,
                    'epoch': epoch,
                    'train_history': self.train_history
                }, save_path)
                print(f"New best model saved with validation accuracy: {val_acc:.4f}")
        
        return {
            'best_val_accuracy': best_val_accuracy,
            'train_history': self.train_history,
            'final_results': val_results
        }

class ExperimentRunner:
    """
    实验运行器，负责管理整个实验流程
    """
    def __init__(self, config: Dict):
        self.config = config
        self.device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
        print(f"Using device: {self.device}")
        
        # 初始化tokenizer
        self.data_loader = DataLoader(config.get('tokenizer_name', 'bert-base-uncased'))
        
        # 结果存储
        self.results = {}
    
    def prepare_data(self, dataset_name: str, data_dir: str) -> Tuple[DataLoader, DataLoader, DataLoader, int]:
        """准备数据"""
        if dataset_name.lower() == 'imdb':
            train_texts, train_labels, val_texts, val_labels, test_texts, test_labels = self.data_loader.load_imdb_data(data_dir)
            num_classes = 2
        elif dataset_name.lower() == 'sst2':
            train_texts, train_labels, val_texts, val_labels, test_texts, test_labels = self.data_loader.load_sst2_data(data_dir)
            num_classes = 2
        elif dataset_name.lower() == 'sst5':
            train_texts, train_labels, val_texts, val_labels, test_texts, test_labels = self.data_loader.load_sst5_data(data_dir)
            num_classes = 5
        else:
            raise ValueError(f"Unsupported dataset: {dataset_name}")
        
        # 创建数据集
        train_dataset = SentimentDataset(
            train_texts, train_labels, 
            self.data_loader.tokenizer,
            self.config.get('max_length', 512)
        )
        
        val_dataset = SentimentDataset(
            val_texts, val_labels,
            self.data_loader.tokenizer,
            self.config.get('max_length', 512)
        )
        
        test_dataset = SentimentDataset(
            test_texts, test_labels,
            self.data_loader.tokenizer,
            self.config.get('max_length', 512)
        )
        
        # 创建数据加载器
        train_loader = torch.utils.data.DataLoader(
            train_dataset,
            batch_size=self.config.get('batch_size', 16),
            shuffle=True,
            num_workers=2
        )
        
        val_loader = torch.utils.data.DataLoader(
            val_dataset,
            batch_size=self.config.get('batch_size', 16),
            shuffle=False,
            num_workers=2
        )
        
        test_loader = torch.utils.data.DataLoader(
            test_dataset,
            batch_size=self.config.get('batch_size', 16),
            shuffle=False,
            num_workers=2
        )
        
        return train_loader, val_loader, test_loader, num_classes
    
    def run_experiment(self, dataset_name: str, data_dir: str) -> Dict:
        """运行单个实验"""
        print(f"\n{'='*60}")
        print(f"Running experiment on {dataset_name} dataset")
        print(f"{'='*60}")
        
        # 准备数据
        train_loader, val_loader, test_loader, num_classes = self.prepare_data(dataset_name, data_dir)
        
        # 创建模型
        model = create_cet_model(
            model_config=self.config.get('model_size', 'base'),
            num_classes=num_classes,
            vocab_size=self.data_loader.tokenizer.vocab_size
        )
        
        print(f"Model parameters: {sum(p.numel() for p in model.parameters() if p.requires_grad):,}")
        
        # 创建训练器
        trainer = Trainer(
            model=model,
            device=self.device,
            learning_rate=self.config.get('learning_rate', 2e-5),
            weight_decay=self.config.get('weight_decay', 0.01)
        )
        
        # 训练模型
        results = trainer.train(
            train_loader=train_loader,
            val_loader=test_loader,  # 使用验证集而不是测试集
            epochs=self.config.get('epochs', 10),
            save_path=f'best_model_{dataset_name.lower()}.pt'
        )
        
        # 在测试集上评估
        test_results = trainer.evaluate(test_loader)
        results['test_results'] = test_results
        
        # 保存结果
        self.results[dataset_name] = results
        
        return results
    
    def run_all_experiments(self, datasets: List[Tuple[str, str]]) -> Dict:
        """运行所有实验"""
        all_results = {}
        
        for dataset_name, data_dir in datasets:
            try:
                results = self.run_experiment(dataset_name, data_dir)
                all_results[dataset_name] = results
            except Exception as e:
                print(f"Error running experiment on {dataset_name}: {str(e)}")
                all_results[dataset_name] = {'error': str(e)}
        
        # 保存所有结果
        with open('experiment_results.json', 'w') as f:
            # 将不可序列化的对象转换为字符串
            serializable_results = {}
            for dataset, result in all_results.items():
                if 'error' not in result:
                    serializable_results[dataset] = {
                        'best_val_accuracy': result['best_val_accuracy'],
                        'train_history': result['train_history']
                    }
                else:
                    serializable_results[dataset] = result
            
            json.dump(serializable_results, f, indent=2)
        
        return all_results
    
    def plot_training_curves(self, results: Dict, save_path: str = 'training_curves.png'):
        """绘制训练曲线"""
        fig, axes = plt.subplots(2, 2, figsize=(15, 12))
        fig.suptitle('Training Curves Comparison', fontsize=16)
        
        datasets = list(results.keys())
        colors = ['blue', 'red', 'green', 'orange', 'purple']
        
        # 检查是否有成功的实验
        successful_experiments = [dataset for dataset in datasets if 'error' not in results[dataset]]
        
        if not successful_experiments:
            print("No successful experiments to plot.")
            plt.close(fig)
            return
        
        for i, dataset in enumerate(datasets):
            if 'error' in results[dataset]:
                continue
                
            history = results[dataset]['train_history']
            color = colors[i % len(colors)]
            
            # Loss curves
            axes[0, 0].plot(history['loss'], label=f'{dataset} Train', color=color, linestyle='-')
            axes[0, 0].plot(history['val_loss'], label=f'{dataset} Val', color=color, linestyle='--')
            
            # Accuracy curves
            axes[0, 1].plot(history['accuracy'], label=f'{dataset} Train', color=color, linestyle='-')
            axes[0, 1].plot(history['val_accuracy'], label=f'{dataset} Val', color=color, linestyle='--')
        
        axes[0, 0].set_title('Loss Curves')
        axes[0, 0].set_xlabel('Epoch')
        axes[0, 0].set_ylabel('Loss')
        axes[0, 0].legend()
        axes[0, 0].grid(True)
        
        axes[0, 1].set_title('Accuracy Curves')
        axes[0, 1].set_xlabel('Epoch')
        axes[0, 1].set_ylabel('Accuracy')
        axes[0, 1].legend()
        axes[0, 1].grid(True)
        
        # Performance comparison bar chart
        dataset_names = []
        accuracies = []
        
        for dataset, result in results.items():
            if 'error' not in result:
                dataset_names.append(dataset)
                accuracies.append(result['best_val_accuracy'])
        
        if dataset_names:  # 只有在有成功的实验时才绘制
            axes[1, 0].bar(dataset_names, accuracies, color=colors[:len(dataset_names)])
            axes[1, 0].set_title('Best Validation Accuracy Comparison')
            axes[1, 0].set_ylabel('Accuracy')
            axes[1, 0].tick_params(axis='x', rotation=45)
        
        # Results table
        axes[1, 1].axis('tight')
        axes[1, 1].axis('off')
        
        table_data = []
        for dataset, result in results.items():
            if 'error' not in result:
                table_data.append([
                    dataset,
                    f"{result['best_val_accuracy']:.4f}",
                    f"{max(result['train_history']['accuracy']):.4f}"
                ])
        
        if table_data:  # 只有在有成功的实验时才创建表格
            table = axes[1, 1].table(
                cellText=table_data,
                colLabels=['Dataset', 'Best Val Acc', 'Best Train Acc'],
                cellLoc='center',
                loc='center'
            )
            table.auto_set_font_size(False)
            table.set_fontsize(10)
            table.scale(1.2, 1.5)
            axes[1, 1].set_title('Results Summary')
        
        plt.tight_layout()
        plt.savefig(save_path, dpi=300, bbox_inches='tight')
        plt.close(fig)  # 关闭图形以避免显示

# 主运行脚本
def main():
    """主函数"""
    # 实验配置
    config = {
        'model_size': 'base',           # 'small', 'base', 'large'
        'batch_size': 16,               # 根据GPU内存调整
        'learning_rate': 2e-5,
        'weight_decay': 0.01,
        'epochs': 10,                    # 为了演示，设置较少的epoch
        'max_length': 256,              # 序列最大长度
        'tokenizer_name': "/data/zcwang/model_huggginggace/llm_download/bert-base-uncased",            # #'bert-base-uncased'
    }
    
    # 数据集路径（根据实际情况修改）
    datasets = [
        # ('IMDB', '/data/zcwang/temp/jsyyx/data/imdb'),
        # ('SST2', '/data/zcwang/temp/jsyyx/data/sst2'),  # 修改为正确的路径
        ('SST5', '/data/zcwang/temp/jsyyx/data/sst5')   # 修改为正确的路径
    ]
    
    # 创建实验运行器
    runner = ExperimentRunner(config)
    
    # 运行所有实验
    print("Starting experiments...")
    results = runner.run_all_experiments(datasets)
    
    # 检查是否有成功的实验
    successful_experiments = [dataset for dataset, result in results.items() if 'error' not in result]
    
    if successful_experiments:
        # 绘制结果
        runner.plot_training_curves(results)
    
    # 打印最终结果摘要
    print("\n" + "="*60)
    print("EXPERIMENT RESULTS SUMMARY")
    print("="*60)
    
    for dataset, result in results.items():
        if 'error' not in result:
            print(f"{dataset:10}: Best Validation Accuracy = {result['best_val_accuracy']:.4f}")
            if 'test_results' in result:
                print(f"{' '*12}Test Accuracy = {result['test_results']['accuracy']:.4f}")
        else:
            print(f"{dataset:10}: Error - {result['error']}")

if __name__ == "__main__":
    main()
# nohup python training_pipeline.py > /data/zcwang/temp/jsyyx/log.txt 2>&1 &