import json
from transformers import AutoTokenizer, AutoModelForCausalLM, TrainingArguments, Trainer
from torch.utils.data import Dataset
import torch
from tqdm import tqdm
import os
import re

# 设置环境变量
os.environ["TOKENIZERS_PARALLELISM"] = "false"
os.environ["CUDA_VISIBLE_DEVICES"] = "3,4,5,6"  # 根据您的GPU配置调整

# 全局配置 - 确保所有适配器使用相同的词汇表配置
GLOBAL_VOCAB_CONFIG = {
    "special_tokens": [
        "<|start_header_id|>", "<|end_header_id|>", 
        "<|eot_id|>", "<|user|>", "<|assistant|>"
    ],
    "target_vocab_size": None,  # 将在第一次运行时确定
    "vocab_config_file": "vocab_config.json"
}

class ChineseEmailDataset(Dataset):
    def __init__(self, file_path, tokenizer, block_size=2048):
        print(f"正在初始化中文邮件数据集: {file_path}")
        self.examples = []
        self.tokenizer = tokenizer
        self.block_size = block_size
        
        # 验证训练数据质量
        self._validate_training_file(file_path)
        
        # 加载并处理训练数据
        with open(file_path, encoding="utf-8") as f:
            text = f.read()
        
        # 按结束标记分割对话
        conversations = text.split("<|eot_id|>")
        conversations = [conv.strip() for conv in conversations if conv.strip()]
        
        print(f"发现 {len(conversations)} 段原始对话")
        
        # 处理每段对话
        valid_count = 0
        skipped_count = 0
        
        for i, conv in enumerate(tqdm(conversations)):
            if not conv.strip():
                continue
                
            # 跳过低质量样本
            if self._is_poor_quality(conv):
                skipped_count += 1
                continue
                
            try:
                # 处理对话为训练格式
                example = self._process_conversation(conv)
                if example:
                    self.examples.append(example)
                    valid_count += 1
                    
            except Exception as e:
                print(f"处理对话 {i} 时出错: {e}")
                continue
        
        print(f"创建了 {valid_count} 个有效样本，跳过了 {skipped_count} 个低质量样本")
        print(f"最终数据集大小: {len(self.examples)}")
        
        # 确保有足够的训练数据
        if len(self.examples) < 10:
            raise ValueError(f"训练数据不足：只有 {len(self.examples)} 个样本")
    
    def _validate_training_file(self, file_path):
        """验证训练数据质量"""
        if not os.path.exists(file_path):
            raise FileNotFoundError(f"训练文件 {file_path} 不存在")
        
        with open(file_path, 'r', encoding='utf-8') as f:
            content = f.read()
        
        # 统计助手回复数量和无效回复
        assistant_count = content.count('<|start_header_id|>assistant<|end_header_id|>')
        invalid_patterns = ['我不知道', '无法回答', '不清楚', '不了解']
        invalid_count = sum(content.count(pattern) for pattern in invalid_patterns)
        
        if assistant_count == 0:
            raise ValueError("训练数据中未找到有效的助手回复")
        
        invalid_ratio = invalid_count / assistant_count if assistant_count > 0 else 0
        
        print(f"训练数据质量检查:")
        print(f"  助手回复总数: {assistant_count}")
        print(f"  无效回复数: {invalid_count}")
        print(f"  无效回复比例: {invalid_ratio:.1%}")
        
        # 警告无效回复过多
        if invalid_ratio > 0.3:
            print("警告：检测到较高比例的无效回复！")
            print("这可能会导致模型性能下降。")
    
    def _is_poor_quality(self, conversation):
        """过滤低质量训练样本"""
        conv_lower = conversation.lower()
        
        # 跳过只有"我不知道"类回复的样本
        assistant_marker = '<|start_header_id|>assistant<|end_header_id|>'
        if assistant_marker in conversation:
            # 提取助手回复
            parts = conversation.split(assistant_marker)
            if len(parts) > 1:
                response = parts[1].strip()
                
                # 检查各种低质量模式
                poor_patterns = [
                    r'^\s*我不知道\s*$',
                    r'^\s*无法回答\s*$',
                    r'^\s*不清楚\s*$',
                    r'我不知道.*我不知道',
                    r'无法.*无法',
                ]
                
                for pattern in poor_patterns:
                    if re.search(pattern, response):
                        return True
                
                # 跳过过短的回复（少于3个字）
                if len(response.replace(' ', '')) < 3:
                    return True
        
        return False
    
    def _process_conversation(self, conversation):
        """处理单个对话为训练格式"""
        assistant_marker = "<|start_header_id|>assistant<|end_header_id|>"
        
        if assistant_marker not in conversation:
            return None
        
        # 对整个对话进行tokenize
        full_text = conversation + "<|eot_id|>"  # 确保正确结尾
        
        tokenized = self.tokenizer.encode(
            full_text,
            add_special_tokens=True,
            max_length=self.block_size,
            truncation=True,
            padding='max_length'
        )
        
        # 找到助手回复开始位置
        assistant_tokens = self.tokenizer.encode(assistant_marker, add_special_tokens=False)
        assistant_pos = self._find_sublist(tokenized, assistant_tokens)
        
        if assistant_pos == -1:
            return None
        
        # 创建标签 - 只在助手回复部分进行训练
        labels = [-100] * len(tokenized)  # 忽略用户部分
        response_start = assistant_pos + len(assistant_tokens)
        
        if response_start < len(tokenized):
            labels[response_start:] = tokenized[response_start:]
        
        return {
            "input_ids": torch.tensor(tokenized, dtype=torch.long),
            "labels": torch.tensor(labels, dtype=torch.long)
        }
    
    def _find_sublist(self, main_list, sublist):
        """在主列表中找到子列表的位置"""
        if not sublist:
            return -1
        sub_len = len(sublist)
        for i in range(len(main_list) - sub_len + 1):
            if main_list[i:i+sub_len] == sublist:
                return i
        return -1
    
    def __len__(self):
        return len(self.examples)
    
    def __getitem__(self, idx):
        return self.examples[idx]

# 自定义数据整理器
class DataCollator:
    def __init__(self, tokenizer):
        self.tokenizer = tokenizer
    
    def __call__(self, features):
        input_ids = torch.stack([f["input_ids"] for f in features])
        labels = torch.stack([f["labels"] for f in features])
        attention_mask = (input_ids != self.tokenizer.pad_token_id).long()
        
        return {
            "input_ids": input_ids,
            "labels": labels,
            "attention_mask": attention_mask
        }

def ensure_consistent_vocabulary(tokenizer, model, config_file="vocab_config.json"):
    """确保所有训练使用一致的词汇表配置"""
    
    # 检查是否存在已保存的词汇表配置
    if os.path.exists(config_file):
        print(f"加载已存在的词汇表配置: {config_file}")
        with open(config_file, 'r', encoding='utf-8') as f:
            saved_config = json.load(f)
        
        target_vocab_size = saved_config['vocab_size']
        special_tokens = saved_config['special_tokens']
        
        print(f"目标词汇表大小: {target_vocab_size}")
        
        # 确保tokenizer有相同的特殊tokens
        current_vocab = set(tokenizer.get_vocab().keys())
        missing_tokens = [token for token in special_tokens if token not in current_vocab]
        
        if missing_tokens:
            print(f"添加缺失的特殊tokens: {missing_tokens}")
            tokenizer.add_special_tokens({"additional_special_tokens": missing_tokens})
        
        # 调整模型词汇表大小
        current_model_size = model.get_input_embeddings().weight.size(0)
        if current_model_size != target_vocab_size:
            print(f"调整模型词汇表大小: {current_model_size} -> {target_vocab_size}")
            model.resize_token_embeddings(target_vocab_size)
        
        # 确保tokenizer大小也匹配
        if len(tokenizer) != target_vocab_size:
            print(f"警告: tokenizer大小({len(tokenizer)})与目标大小({target_vocab_size})不匹配")
            # 这里可能需要更复杂的处理，但通常resize_token_embeddings会处理
        
        return target_vocab_size
    
    else:
        print("创建新的词汇表配置...")
        
        # 设置pad token
        if tokenizer.pad_token is None:
            tokenizer.pad_token = tokenizer.eos_token
            print(f"设置pad_token为: {tokenizer.pad_token}")
        
        # 添加特殊tokens
        special_tokens = GLOBAL_VOCAB_CONFIG["special_tokens"]
        current_vocab = set(tokenizer.get_vocab().keys())
        new_tokens = [token for token in special_tokens if token not in current_vocab]
        
        if new_tokens:
            print(f"添加新的特殊tokens: {new_tokens}")
            tokenizer.add_special_tokens({"additional_special_tokens": new_tokens})
        
        # 调整模型embeddings
        final_vocab_size = len(tokenizer)
        model.resize_token_embeddings(final_vocab_size)
        
        print(f"调整后的词汇表大小 - tokenizer: {len(tokenizer)}, model: {model.get_input_embeddings().weight.size(0)}")
        
        # 保存词汇表配置
        vocab_config = {
            "vocab_size": final_vocab_size,
            "special_tokens": special_tokens,
            "pad_token": tokenizer.pad_token,
            "eos_token": tokenizer.eos_token,
            "new_tokens_added": new_tokens,
            "created_at": torch.cuda.current_device() if torch.cuda.is_available() else "cpu"
        }
        
        with open(config_file, 'w', encoding='utf-8') as f:
            json.dump(vocab_config, f, indent=2, ensure_ascii=False)
        
        print(f"词汇表配置已保存: {config_file}")
        print(f"最终词汇表大小: {final_vocab_size}")
        
        return final_vocab_size

def train_model(model_name, train_file, output_dir, model_type="email_summary"):
    """
    训练特定类型的模型 - 修复版本
    Args:
        model_name: 基础模型路径
        train_file: 训练数据文件
        output_dir: 输出目录
        model_type: 模型类型 ("email_summary", "bill_parser", "general_chat")
    """
    
    print(f"开始训练 {model_type} 模型")
    print(f"基础模型: {model_name}")
    print(f"训练文件: {train_file}")
    print(f"输出目录: {output_dir}")
    print(f"CUDA可用: {torch.cuda.is_available()}")
    
    if torch.cuda.is_available():
        print(f"GPU数量: {torch.cuda.device_count()}")
        for i in range(torch.cuda.device_count()):
            print(f"  GPU {i}: {torch.cuda.get_device_name(i)}")
    
    # 加载分词器
    print("正在加载分词器...")
    tokenizer = AutoTokenizer.from_pretrained(
        model_name,
        trust_remote_code=True,
        use_fast=False
    )
    
    print(f"原始tokenizer词汇表大小: {len(tokenizer)}")
    
    # 加载模型
    print("正在加载模型...")
    model = AutoModelForCausalLM.from_pretrained(
        model_name,
        trust_remote_code=True,
        use_cache=False,
        device_map="auto",
        torch_dtype=torch.bfloat16 if torch.cuda.is_bf16_supported() else torch.float16,
        low_cpu_mem_usage=True
    )
    
    print(f"原始模型词汇表大小: {model.get_input_embeddings().weight.size(0)}")
    
    # *** 关键修复：确保词汇表一致性 ***
    final_vocab_size = ensure_consistent_vocabulary(tokenizer, model)
    
    print(f"统一后的词汇表大小:")
    print(f"  tokenizer: {len(tokenizer)}")
    print(f"  model: {model.get_input_embeddings().weight.size(0)}")
    
    # 验证词汇表大小一致
    current_tokenizer_size = len(tokenizer)
    current_model_size = model.get_input_embeddings().weight.size(0)
    
    if current_tokenizer_size != current_model_size:
        print(f"警告: 词汇表大小仍不匹配!")
        print(f"  tokenizer: {current_tokenizer_size}")
        print(f"  model: {current_model_size}")
        # 强制统一到较大的那个
        target_size = max(current_tokenizer_size, current_model_size)
        print(f"强制统一到: {target_size}")
        model.resize_token_embeddings(target_size)
    
    # 最终验证
    assert len(tokenizer) == model.get_input_embeddings().weight.size(0), \
        f"词汇表大小不匹配: tokenizer({len(tokenizer)}) vs model({model.get_input_embeddings().weight.size(0)})"
    
    print("✓ 词汇表大小验证通过")
    
    # 创建数据集
    print("正在创建训练数据集...")
    dataset = ChineseEmailDataset(train_file, tokenizer, block_size=1024)  # 2M数据用较小的block_size
    
    # 分割训练/验证集 (85/15)
    dataset_size = len(dataset)
    val_size = max(3, int(dataset_size * 0.15))  # 至少3个验证样本
    train_size = dataset_size - val_size
    
    train_dataset, val_dataset = torch.utils.data.random_split(
        dataset, [train_size, val_size]
    )
    
    print(f"训练样本: {len(train_dataset)}")
    print(f"验证样本: {len(val_dataset)}")
    
    # 设置LoRA
    from peft import LoraConfig, get_peft_model, TaskType
    
    # 根据模型类型调整LoRA配置
    if model_type == "general_chat":
        lora_r = 16
        lora_alpha = 32
        lora_dropout = 0.05
    else:  # email_summary, bill_parser
        lora_r = 32
        lora_alpha = 64  
        lora_dropout = 0.1
    
    peft_config = LoraConfig(
        task_type=TaskType.CAUSAL_LM,
        inference_mode=False,
        r=lora_r,
        lora_alpha=lora_alpha,
        lora_dropout=lora_dropout,
        bias="none",
        target_modules=[
            "q_proj", "k_proj", "v_proj", "o_proj",
            "gate_proj", "up_proj", "down_proj"
        ]
    )
    
    model = get_peft_model(model, peft_config)
    model.print_trainable_parameters()
    
    # 根据数据量调整训练步数
    if dataset_size < 50:
        max_steps = 100
        eval_steps = 25
        save_steps = 50
    elif dataset_size < 100:
        max_steps = 200
        eval_steps = 40
        save_steps = 80
    else:
        max_steps = 10
        eval_steps = 5
        save_steps = 10
    
    # 训练参数
    training_args = TrainingArguments(
        output_dir=output_dir,
        
        # === 批次大小配置 ===
        per_device_train_batch_size=1,
        per_device_eval_batch_size=1,
        gradient_accumulation_steps=4,  # 小数据集用较小的累积步数
        
        # === 训练步数（针对2M数据优化）===
        max_steps=max_steps,
        
        # === 学习配置 ===
        learning_rate=3e-5,  # 较小的学习率适合小数据集
        lr_scheduler_type="cosine",
        warmup_ratio=0.15,  # 更多的warmup步骤
        optim="adamw_torch",
        
        # === 日志和评估 ===
        logging_steps=10,
        eval_steps=eval_steps,
        logging_first_step=True,
        
        # === 保存策略 ===
        save_steps=save_steps,
        save_strategy="steps",
        save_total_limit=3,
        eval_strategy="steps",
        
        # === 模型选择 ===
        load_best_model_at_end=True,
        metric_for_best_model="eval_loss",
        greater_is_better=False,
        
        # === 硬件优化 ===
        bf16=torch.cuda.is_bf16_supported(),
        fp16=not torch.cuda.is_bf16_supported(),
        max_grad_norm=1.0,
        gradient_checkpointing=True,
        gradient_checkpointing_kwargs={"use_reentrant": False},
        
        # === 内存优化 ===
        dataloader_pin_memory=False,
        dataloader_num_workers=0,  # 小数据集不需要多进程
        remove_unused_columns=False,
        
        # === 报告 ===
        report_to="none",
    )
    
    # 创建训练器
    trainer = Trainer(
        model=model,
        args=training_args,
        train_dataset=train_dataset,
        eval_dataset=val_dataset,
        data_collator=DataCollator(tokenizer),
    )
    
    # 修复归一化层精度
    for name, module in trainer.model.named_modules():
        if "norm" in name.lower() or "ln_" in name:
            module = module.to(torch.float32)
    
    print("开始训练...")
    model.print_trainable_parameters()
    
    # 训练前清理GPU缓存
    torch.cuda.empty_cache()
    
    try:
        # 开始训练
        trainer.train()
        print("训练成功完成！")
        
        # 保存最终模型
        final_dir = f"{output_dir}/final_model"
        trainer.model.save_pretrained(final_dir)
        tokenizer.save_pretrained(final_dir)
        
        # *** 重要：保存词汇表信息到适配器目录 ***
        vocab_info = {
            "vocab_size": len(tokenizer),
            "model_vocab_size": model.get_input_embeddings().weight.size(0),
            "special_tokens": GLOBAL_VOCAB_CONFIG["special_tokens"],
            "pad_token": tokenizer.pad_token,
            "eos_token": tokenizer.eos_token,
            "adapter_type": model_type,
            "training_completed": True,
            "final_vocab_size": len(tokenizer)
        }
        
        with open(f"{final_dir}/vocab_info.json", 'w', encoding='utf-8') as f:
            json.dump(vocab_info, f, indent=2, ensure_ascii=False)
        
        print(f"最终模型已保存到: {final_dir}")
        print(f"词汇表大小: {len(tokenizer)}")
        
        # 列出所有检查点
        checkpoints = [d for d in os.listdir(output_dir) if d.startswith('checkpoint-')]
        if checkpoints:
            checkpoints.sort(key=lambda x: int(x.split('-')[1]) if x.split('-')[1].isdigit() else 0)
            print("可用检查点:")
            for cp in checkpoints:
                print(f"  {output_dir}/{cp}")
        
        return True
        
    except Exception as e:
        print(f"训练失败: {e}")
        import traceback
        traceback.print_exc()
        
        # 保存紧急检查点
        try:
            emergency_dir = f"{output_dir}/emergency_checkpoint"
            os.makedirs(emergency_dir, exist_ok=True)
            trainer.model.save_pretrained(emergency_dir)
            tokenizer.save_pretrained(emergency_dir)
            
            # 保存紧急状态的词汇表信息
            emergency_vocab_info = {
                "vocab_size": len(tokenizer),
                "model_vocab_size": model.get_input_embeddings().weight.size(0),
                "adapter_type": model_type,
                "training_completed": False,
                "emergency_save": True
            }
            
            with open(f"{emergency_dir}/vocab_info.json", 'w', encoding='utf-8') as f:
                json.dump(emergency_vocab_info, f, indent=2, ensure_ascii=False)
                
            print(f"紧急检查点已保存到: {emergency_dir}")
        except Exception as save_error:
            print(f"保存紧急检查点也失败: {save_error}")
        
        return False

def validate_vocab_consistency():
    """验证所有已训练适配器的词汇表一致性"""
    print("\n" + "="*50)
    print("验证适配器词汇表一致性")
    print("="*50)
    
    adapter_dirs = [
        "./models/qwen-chinese-peft/qwen-email-summary-adapter/final_model",
        "./models/qwen-chinese-peft/qwen-bill-parser-adapter/final_model", 
        "./models/qwen-chinese-peft/qwen-general-chat-adapter/final_model"
    ]
    
    vocab_sizes = {}
    
    for adapter_dir in adapter_dirs:
        vocab_info_path = os.path.join(adapter_dir, "vocab_info.json")
        if os.path.exists(vocab_info_path):
            with open(vocab_info_path, 'r', encoding='utf-8') as f:
                vocab_info = json.load(f)
            
            adapter_name = os.path.basename(os.path.dirname(adapter_dir))
            vocab_sizes[adapter_name] = vocab_info.get('vocab_size', 'unknown')
            
            print(f"{adapter_name}: {vocab_info.get('vocab_size', 'unknown')}")
        else:
            adapter_name = os.path.basename(os.path.dirname(adapter_dir))
            print(f"{adapter_name}: vocab_info.json 不存在")
    
    # 检查一致性
    unique_sizes = set(vocab_sizes.values())
    if len(unique_sizes) == 1:
        print("✓ 所有适配器词汇表大小一致")
    else:
        print("✗ 发现词汇表大小不一致!")
        print("建议重新训练以确保一致性")
    
    return len(unique_sizes) == 1

def main():
    """主训练函数 - 修复版本"""
    
    # 配置
    base_model_name = "../Qwen3-8B"  # 请根据您的模型路径调整
    
    # 训练配置列表
    training_configs = [
        {
            "train_file": "email_summary_training.txt",
            "output_dir": "./models/qwen-chinese-peft/qwen-email-summary-adapter",
            "model_type": "email_summary",
            "description": "邮件总结模型"
        },
        {
            "train_file": "bill_parser_training.txt", 
            "output_dir": "./models/qwen-chinese-peft/qwen-bill-parser-adapter",
            "model_type": "bill_parser",
            "description": "账单解析模型"
        },
        {
            "train_file": "general_chat_training.txt",
            "output_dir": "./models/qwen-chinese-peft/qwen-general-chat-adapter", 
            "model_type": "general_chat",
            "description": "通用聊天适配器"
        }
    ]
    
    print("=" * 60)
    print("开始训练中文邮件处理适配器（词汇表一致性修复版）")
    print("=" * 60)
    
    # 验证基础模型
    if not os.path.exists(base_model_name):
        print(f"错误: 基础模型路径 {base_model_name} 不存在")
        print("请修改 base_model_name 为正确的路径")
        return
    
    success_count = 0
    failed_configs = []
    
    for i, config in enumerate(training_configs, 1):
        print(f"\n{'='*20} 训练 {i}/3 {'='*20}")
        print(f"模型类型: {config['description']}")
        print(f"训练文件: {config['train_file']}")
        
        # 检查训练文件是否存在
        if not os.path.exists(config['train_file']):
            print(f"警告: 训练文件 {config['train_file']} 不存在，跳过此模型...")
            failed_configs.append(config['description'] + " (文件不存在)")
            continue
        
        # 检查文件大小
        file_size = os.path.getsize(config['train_file']) / (1024 * 1024)  # MB
        print(f"文件大小: {file_size:.2f} MB")
        
        if file_size < 0.1:  # 小于100KB
            print(f"警告: 文件 {config['train_file']} 太小，跳过训练...")
            failed_configs.append(config['description'] + " (文件too small)")
            continue
        
        # 创建输出目录
        os.makedirs(config['output_dir'], exist_ok=True)
        
        # 训练模型
        success = train_model(
            model_name=base_model_name,
            train_file=config['train_file'],
            output_dir=config['output_dir'],
            model_type=config['model_type']
        )
        
        if success:
            success_count += 1
            print(f"✓ {config['description']} 训练成功")
        else:
            print(f"✗ {config['description']} 训练失败")
            failed_configs.append(config['description'] + " (训练失败)")
        
        # 清理GPU内存
        torch.cuda.empty_cache()
        print("-" * 50)
    
    print(f"\n{'='*60}")
    print(f"训练完成! 成功训练了 {success_count}/{len(training_configs)} 个适配器")
    
    if failed_configs:
        print(f"失败的配置:")
        for failed in failed_configs:
            print(f"  - {failed}")
    
    # 验证词汇表一致性
    if success_count > 0:
        validate_vocab_consistency()
    
    print("=" * 60)
    
    # 提供使用指南
    if success_count > 0:
        print("\n" + "=" * 40)
        print("训练完成！使用指南:")
        print("=" * 40)
        print("1. 词汇表配置文件: vocab_config.json")
        print("2. 适配器目录结构:")
        for config in training_configs:
            print(f"   - {config['output_dir']}/final_model/")
        print("3. 每个适配器都包含 vocab_info.json 文件")
        print("4. 加载适配器时请确保使用相同的词汇表配置")
        print("=" * 40)

if __name__ == "__main__":
    main()