import os
from enum import Enum
import torch
import random
import numpy as np
from typing import Dict, List, Tuple, Optional, Any, Union

import packaging.version
import transformers
from datasets import DatasetDict, load_dataset, load_from_disk
from datasets.builder import DatasetGenerationError
from transformers import (
    AutoModelForCausalLM,
    AutoTokenizer,
    BitsAndBytesConfig,
    PreTrainedModel,
    PreTrainedTokenizer,
)

from peft import LoraConfig, get_peft_model

#DEFAULT_CHATML_CHAT_TEMPLATE = "{% for message in messages %}\n{{'<|im_start|>' + message['role'] + '\n' + message['content'] + '<|im_end|>' + '\n'}}{% if loop.last and add_generation_prompt %}{{'<|im_start|>assistant\n' }}{% endif %}{% endfor %}"
#DEFAULT_ZEPHYR_CHAT_TEMPLATE = "{% for message in messages %}\n{% if message['role'] == 'user' %}\n{{ '<|user|>\n' + message['content'] + eos_token }}\n{% elif message['role'] == 'system' %}\n{{ '<|system|>\n' + message['content'] + eos_token }}\n{% elif message['role'] == 'assistant' %}\n{{ '<|assistant|>\n'  + message['content'] + eos_token }}\n{% endif %}\n{% if loop.last and add_generation_prompt %}\n{{ '<|assistant|>' }}\n{% endif %}\n{% endfor %}"


#class QwenSpecialTokens(str, Enum):
#    user = "<|im_start|>user"
#    assistant = "<|im_start|>assistant"
#    system = "<|im_start|>system"
#    eos_token = "<|im_end|>"
#    bos_token = "<|im_start|>"
#    pad_token = "<endoftext>"

#    @classmethod
#    def list(cls):
#        return [c.value for c in cls]


#class ChatmlSpecialTokens(str, Enum):
#    user = "<|im_start|>user"
#    assistant = "<|im_start|>assistant"
#    system = "<|im_start|>system"
#    eos_token = "<|im_end|>"
#    bos_token = "<s>"
#    pad_token = "<pad>"

#    @classmethod
#    def list(cls):
#        return [c.value for c in cls]

def create_datasets(tokenizer, data_args, training_args, apply_chat_template=False):
    def preprocess(examples):
        if apply_chat_template and "messages" in examples:
            # 处理对话格式数据
            batch = []
            for conversation in examples["messages"]:
                batch.append(tokenizer.apply_chat_template(conversation, tokenize=False))
            return {"content": batch}
        
        elif apply_chat_template and "instruction" in examples:
            # 处理 Alpaca 格式数据
            batch = []
            for i in range(len(examples["instruction"])):
                messages = []
                messages.append({"role": "system", "content": "你是一个有帮助的助手。"})
                
                user_content = examples["instruction"][i]
                if "input" in examples and examples["input"][i].strip():
                    user_content += "\n" + examples["input"][i]
                messages.append({"role": "user", "content": user_content})
                
                messages.append({"role": "assistant", "content": examples["output"][i]})
                
                formatted_text = tokenizer.apply_chat_template(messages, tokenize=False)
                batch.append(formatted_text)
            
            return {"content": batch}
    
        #else:
            #基础格式： 问题+答案
            #text = [f"{instruction}\n{input}\n{output}\n{id}" for instruction, input, output, id in zip(examples['instruction'], examples['input'], examples['output'], examples['id'])]

        # 使用训练参数中的序列长度，而不是数据参数
        #max_length = training_args.max_seq_length if hasattr(training_args, "max_seq_length") else 1024
        
        #tokenized = tokenizer(
        #    text,
        #    max_length=max_length,
        #    padding='max_length',
        #    truncation=True,
        #    return_tensors='pt'
        #)
        # 创建标签（与输入相同，适用于自回归语言模型）
        #tokenized["labels"] = tokenized["input_ids"].clone()

        #return tokenized

    raw_datasets = DatasetDict()
    
    # 加载数据集
    try:
        # 检查是否是本地文件路径
        if data_args.dataset_name.endswith('.json') or os.path.isfile(data_args.dataset_name):
            print(f"从本地文件加载数据集: {data_args.dataset_name}")
            # 加载本地 JSON 文件
            dataset = load_dataset('json', data_files=data_args.dataset_name)
            
            # 按照6:2:2的比例划分数据集
            # 先将数据集分为训练集(60%)和临时集(40%)
            train_temp = dataset["train"].train_test_split(test_size=0.4, seed=42)
            train_dataset = train_temp["train"]  # 60%作为训练集
            
            # 再将临时集分为验证集(50%)和测试集(50%)，即整体的20%和20%
            val_test = train_temp["test"].train_test_split(test_size=0.5, seed=42)
            val_dataset = val_test["train"]    # 20%作为验证集
            test_dataset = val_test["test"]    # 20%作为测试集
            
            # 存储划分后的数据集
            raw_datasets["train"] = train_dataset
            raw_datasets["validation"] = val_dataset
            raw_datasets["test"] = test_dataset
            
            print(f"数据集划分完成，按照6:2:2的比例划分:")
            print(f"训练集: {len(train_dataset)}条样本")
            print(f"验证集: {len(val_dataset)}条样本")
            print(f"测试集: {len(test_dataset)}条样本")
            
        else:
            # 尝试从 Hugging Face Hub 加载
            print(f"从 Hugging Face Hub 加载数据集: {data_args.dataset_name}")
            for split in data_args.splits.split(","):
                dataset = load_dataset(data_args.dataset_name, split=split)
                if "train" in split:
                    # 如果只有训练集，同样按6:2:2划分
                    train_temp = dataset.train_test_split(test_size=0.4, seed=42)
                    train_dataset = train_temp["train"]
                    
                    val_test = train_temp["test"].train_test_split(test_size=0.5, seed=42)
                    val_dataset = val_test["train"]
                    test_dataset = val_test["test"]
                    
                    raw_datasets["train"] = train_dataset
                    raw_datasets["validation"] = val_dataset
                    raw_datasets["test"] = test_dataset
                elif "validation" in split:
                    raw_datasets["validation"] = dataset
                elif "test" in split:
                    raw_datasets["test"] = dataset
    except Exception as e:
        print(f"加载数据集时出错: {e}")
        try:
            # 尝试从本地磁盘加载
            print(f"尝试从本地磁盘加载数据集: {data_args.dataset_name}")
            for split in ["train", "validation", "test"]:
                try:
                    dataset = load_from_disk(os.path.join(data_args.dataset_name, split))
                    raw_datasets[split] = dataset
                except:
                    pass
            
            # 如果只有训练集，则进行6:2:2划分
            if "train" in raw_datasets and "validation" not in raw_datasets:
                train_temp = raw_datasets["train"].train_test_split(test_size=0.4, seed=42)
                raw_datasets["train"] = train_temp["train"]
                
                val_test = train_temp["test"].train_test_split(test_size=0.5, seed=42)
                raw_datasets["validation"] = val_test["train"]
                raw_datasets["test"] = val_test["test"]
        except Exception as e2:
            print(f"从本地磁盘加载数据集时出错: {e2}")
            raise ValueError(f"无法加载数据集。原始错误: {e}，本地加载错误: {e2}")

    # 确保数据集包含必要的分割
    if "train" not in raw_datasets:
        raise ValueError("训练数据集未找到")
    
    # 处理验证集和测试集
    if "validation" not in raw_datasets:
        print("警告: 验证集未找到，将从训练集中划分20%作为验证集")
        train_val = raw_datasets["train"].train_test_split(test_size=0.25, seed=42)  # 25% of 80% = 20% of total
        raw_datasets["train"] = train_val["train"]
        raw_datasets["validation"] = train_val["test"]
    
    if "test" not in raw_datasets:
        print("警告: 测试集未找到，将从训练集中划分25%作为测试集")
        # 如果没有测试集，从剩余训练集中划分25%作为测试集 (25% of 60% = 15% of total)
        train_test = raw_datasets["train"].train_test_split(test_size=0.25, seed=42)
        raw_datasets["train"] = train_test["train"]
        raw_datasets["test"] = train_test["test"]

    # 应用预处理
    for split in raw_datasets.keys():
        raw_datasets[split] = raw_datasets[split].map(
            preprocess,
            batched=True,
            num_proc=4,
            remove_columns=raw_datasets[split].column_names,
            desc=f"对{split}数据集应用预处理",
        )

    train_data = raw_datasets["train"]
    valid_data = raw_datasets["validation"]
    test_data = raw_datasets["test"]
    
    print(f"训练集大小: {len(train_data)}. 验证集大小: {len(valid_data)}. 测试集大小: {len(test_data)}")
    print(f"训练数据样例: {train_data[0]}")

    return train_data, valid_data, test_data

def create_and_prepare_model(args, data_args, training_args):
    if args.use_unsloth:
        from unsloth import FastLanguageModel
    bnb_config = None
    quant_storage_dtype = None

    if (
        torch.distributed.is_available()
        and torch.distributed.is_initialized()
        and torch.distributed.get_world_size() > 1
        and args.use_unsloth
    ):
        raise NotImplementedError("Unsloth is not supported in distributed training")

    if args.use_4bit_quantization:
        compute_dtype = getattr(torch, args.bnb_4bit_compute_dtype)
        quant_storage_dtype = getattr(torch, args.bnb_4bit_quant_storage_dtype)

        bnb_config = BitsAndBytesConfig(
            load_in_4bit=args.use_4bit_quantization,
            bnb_4bit_quant_type=args.bnb_4bit_quant_type,
            bnb_4bit_compute_dtype=compute_dtype,
            bnb_4bit_use_double_quant=args.use_nested_quant,
            bnb_4bit_quant_storage=quant_storage_dtype,
        )

        if compute_dtype == torch.float16 and args.use_4bit_quantization:
            major, _ = torch.cuda.get_device_capability()
            if major >= 8:
                print("=" * 80)
                print("Your GPU supports bfloat16, you can accelerate training with the argument --bf16")
                print("=" * 80)
        elif args.use_8bit_quantization:
            bnb_config = BitsAndBytesConfig(load_in_8bit=args.use_8bit_quantization)

    # 确定模型的数据类型
    torch_dtype = None
    use_flash_attention = False  # 添加一个标志来跟踪是否使用 Flash Attention
    
    if args.use_flash_attn:
        use_flash_attention = True  # 设置标志
        if hasattr(training_args, "bf16") and training_args.bf16:
            torch_dtype = torch.bfloat16
        else:
            torch_dtype = torch.float16
    else:
        torch_dtype = (
            quant_storage_dtype if quant_storage_dtype and quant_storage_dtype.is_floating_point else torch.float32
        )

    if args.use_unsloth:
        # Load model
        model, _ = FastLanguageModel.from_pretrained(
            model_name=args.model_name_or_path,
            max_seq_length=training_args.max_seq_length,
            dtype=torch_dtype,
            load_in_4bit=args.use_4bit_quantization,
        )
    else:
        # 如果使用 Flash Attention，确保设置正确的参数
        attn_implementation = None
        if use_flash_attention:
            attn_implementation = "flash_attention_2"
            # 确保我们在使用 Flash Attention 时使用正确的数据类型
            if torch_dtype not in [torch.float16, torch.bfloat16]:
                print(f"警告: Flash Attention 2.0 需要 float16 或 bfloat16，但当前使用 {torch_dtype}。已自动切换到 float16。")
                torch_dtype = torch.float16
        
        model = AutoModelForCausalLM.from_pretrained(
            args.model_name_or_path,
            quantization_config=bnb_config,
            trust_remote_code=True,
            attn_implementation=attn_implementation,
            torch_dtype=torch_dtype,
        )

    # 无条件将模型移至 GPU（如果使用 Flash Attention）
    if use_flash_attention and torch.cuda.is_available():
        print("将模型移至 CUDA 设备以支持 Flash Attention 2.0")
        model = model.to('cuda')

    peft_config = None
    chat_template = None
    if args.use_peft_lora and not args.use_unsloth:
        peft_config = LoraConfig(
            lora_alpha=args.lora_alpha,
            lora_dropout=args.lora_dropout,
            r=args.lora_r,
            bias="none",
            task_type="CAUSAL_LM",
            target_modules=args.lora_target_modules.split(",")
            if args.lora_target_modules != "all-linear"
            else args.lora_target_modules,
        )

    special_tokens = None
    chat_template = None
    if args.chat_template_format == "chatml":
        special_tokens = ChatmlSpecialTokens
        chat_template = DEFAULT_CHATML_CHAT_TEMPLATE
    elif args.chat_template_format == "qwen":
        #special_tokens = QwenSpecialTokens
        #chat_template = DEFAULT_CHATML_CHAT_TEMPLATE
        pass

    if special_tokens is not None:
        tokenizer = AutoTokenizer.from_pretrained(
            args.model_name_or_path,
            pad_token=special_tokens.pad_token.value,
            bos_token=special_tokens.bos_token.value,
            eos_token=special_tokens.eos_token.value,
            additional_special_tokens=special_tokens.list(),
            trust_remote_code=True,
        )
        tokenizer.chat_template = chat_template

        # make embedding resizing configurable?
        # Transformers 4.46.0+ defaults uses mean_resizing by default, which fails with QLoRA + FSDP because the
        # embedding could be on meta device, therefore, we set mean_resizing=False in that case (i.e. the status quo
        # ante). See https://github.com/huggingface/accelerate/issues/1620.
        uses_transformers_4_46 = packaging.version.parse(transformers.__version__) >= packaging.version.parse("4.46.0")
        uses_fsdp = os.environ.get("ACCELERATE_USE_FSDP", "false").lower() == "true"
        if (bnb_config is not None) and uses_fsdp and uses_transformers_4_46:
            model.resize_token_embeddings(len(tokenizer), pad_to_multiple_of=8, mean_resizing=False)
        else:
            model.resize_token_embeddings(len(tokenizer), pad_to_multiple_of=8)
    else:
        tokenizer = AutoTokenizer.from_pretrained(args.model_name_or_path, trust_remote_code=True)
        tokenizer.pad_token = tokenizer.eos_token

    if args.use_unsloth:
        # Do model patching and add fast LoRA weights
        model = FastLanguageModel.get_peft_model(
            model,
            lora_alpha=args.lora_alpha,
            lora_dropout=args.lora_dropout,
            r=args.lora_r,
            target_modules=args.lora_target_modules.split(",")
            if args.lora_target_modules != "all-linear"
            else args.lora_target_modules,
            use_gradient_checkpointing=training_args.gradient_checkpointing,
            random_state=training_args.seed,
            max_seq_length=training_args.max_seq_length,
        )

    return model, peft_config, tokenizer

def create_test_dataset(tokenizer, data_args, training_args, apply_chat_template=False, use_test_set=True):
    """
    仅加载测试集或验证集，用于模型评估
    
    参数:
        tokenizer: 分词器
        data_args: 数据参数
        training_args: 训练参数
        apply_chat_template: 是否应用对话模板
        use_test_set: 是否使用测试集，如果为False则使用验证集
    
    返回:
        dataset: 处理后的测试集或验证集
    """
    def preprocess(examples):
        if apply_chat_template and "messages" in examples:
            # 处理对话格式数据
            batch = []
            for conversation in examples["messages"]:
                batch.append(tokenizer.apply_chat_template(conversation, tokenize=False))
            return {"content": batch}
        
        elif apply_chat_template and "instruction" in examples:
            # 处理 Alpaca 格式数据
            batch = []
            for i in range(len(examples["instruction"])):
                messages = []
                messages.append({"role": "system", "content": "你是一个有帮助的助手。"})
                
                user_content = examples["instruction"][i]
                if "input" in examples and examples["input"][i].strip():
                    user_content += "\n" + examples["input"][i]
                messages.append({"role": "user", "content": user_content})
                
                messages.append({"role": "assistant", "content": examples["output"][i]})
                
                formatted_text = tokenizer.apply_chat_template(messages, tokenize=False)
                batch.append(formatted_text)
            
            return {"content": batch}

    raw_datasets = DatasetDict()
    
    # 加载数据集
    try:
        # 检查是否是本地文件路径
        if data_args.dataset_name.endswith('.json') or os.path.isfile(data_args.dataset_name):
            print(f"从本地文件加载数据集: {data_args.dataset_name}")
            # 加载本地 JSON 文件
            dataset = load_dataset('json', data_files=data_args.dataset_name)
            
            # 按照6:2:2的比例划分数据集
            train_temp = dataset["train"].train_test_split(test_size=0.4, seed=42)
            
            # 再将临时集分为验证集(50%)和测试集(50%)，即整体的20%和20%
            val_test = train_temp["test"].train_test_split(test_size=0.5, seed=42)
            
            # 根据参数选择加载验证集或测试集
            if use_test_set:
                raw_datasets["test"] = val_test["test"]
                print(f"从本地文件加载测试集，大小: {len(val_test['test'])}条样本")
            else:
                raw_datasets["validation"] = val_test["train"]
                print(f"从本地文件加载验证集，大小: {len(val_test['train'])}条样本")
            
        else:
            # 尝试从 Hugging Face Hub 加载
            print(f"从 Hugging Face Hub 加载数据集: {data_args.dataset_name}")
            split_name = "test" if use_test_set else "validation"
            
            try:
                # 直接尝试加载指定分割
                dataset = load_dataset(data_args.dataset_name, split=split_name)
                raw_datasets[split_name] = dataset
                print(f"成功加载{split_name}分割，大小: {len(dataset)}条样本")
            except:
                # 如果指定分割不存在，尝试加载训练集并划分
                print(f"{split_name}分割不存在，尝试从训练集划分")
                dataset = load_dataset(data_args.dataset_name, split="train")
                
                # 划分数据集
                train_temp = dataset.train_test_split(test_size=0.4, seed=42)
                val_test = train_temp["test"].train_test_split(test_size=0.5, seed=42)
                
                if use_test_set:
                    raw_datasets["test"] = val_test["test"]
                    print(f"从训练集划分测试集，大小: {len(val_test['test'])}条样本")
                else:
                    raw_datasets["validation"] = val_test["train"]
                    print(f"从训练集划分验证集，大小: {len(val_test['train'])}条样本")
                
    except Exception as e:
        print(f"加载数据集时出错: {e}")
        try:
            # 尝试从本地磁盘加载
            print(f"尝试从本地磁盘加载数据集: {data_args.dataset_name}")
            split_name = "test" if use_test_set else "validation"
            
            try:
                dataset = load_from_disk(os.path.join(data_args.dataset_name, split_name))
                raw_datasets[split_name] = dataset
                print(f"成功从本地磁盘加载{split_name}分割")
            except:
                print(f"无法从本地磁盘加载{split_name}分割")
                raise
                
        except Exception as e2:
            print(f"从本地磁盘加载数据集时出错: {e2}")
            raise ValueError(f"无法加载数据集。原始错误: {e}，本地加载错误: {e2}")

    # 确保数据集包含必要的分割
    split_name = "test" if use_test_set else "validation"
    if split_name not in raw_datasets:
        raise ValueError(f"{split_name}数据集未找到")
    
    # 应用预处理
    raw_datasets[split_name] = raw_datasets[split_name].map(
        preprocess,
        batched=True,
        num_proc=4,
        remove_columns=raw_datasets[split_name].column_names,
        desc=f"对{split_name}数据集应用预处理",
    )

    eval_data = raw_datasets[split_name]
    print(f"评估数据集大小: {len(eval_data)}")
    print(f"评估数据样例: {eval_data[0]}")

    return eval_data

def set_random_seed(seed: int) -> None:
    """设置随机种子以确保可重复性"""
    random.seed(seed)
    np.random.seed(seed)
    torch.manual_seed(seed)
    torch.cuda.manual_seed_all(seed)
    torch.backends.cudnn.deterministic = True

def find_all_linear_modules(model: PreTrainedModel) -> List[str]:
    """查找模型中所有线性层的名称"""
    linear_modules = []
    
    for name, module in model.named_modules():
        if isinstance(module, torch.nn.Linear):
            # 提取父模块名称
            parent_name = name.rsplit(".", 1)[0] if "." in name else name
            if parent_name not in linear_modules:
                linear_modules.append(parent_name)
    
    return linear_modules

def process_dataset_with_chat_template(
    dataset: Dataset,
    tokenizer: PreTrainedTokenizer,
    max_seq_len: int
) -> Dataset:
    """
    处理数据集，应用聊天模板并进行分词
    """
    def tokenize_function(examples):
        # 假设每个示例都有messages字段，包含对话历史
        messages = examples["messages"]
        
        # 应用聊天模板
        text = tokenizer.apply_chat_template(
            messages,
            tokenize=False,
            add_generation_prompt=False
        )
        
        # 分词
        tokenized = tokenizer(
            text,
            max_length=max_seq_len,
            truncation=True,
            padding="max_length",
            return_tensors="pt"
        )
        
        # 准备标签（用于自回归训练）
        tokenized["labels"] = tokenized["input_ids"].clone()
        
        return tokenized
    
    # 应用处理函数
    processed_dataset = dataset.map(
        tokenize_function,
        batched=False,
        remove_columns=dataset.column_names
    )
    
    return processed_dataset

def compute_metrics(eval_preds, tokenizer):
    """
    计算评估指标
    """
    from evaluate import load
    import numpy as np
    
    # 加载评估指标
    rouge = load("rouge")
    
    # 解码预测和标签
    preds, labels = eval_preds
    
    # 过滤掉填充标记
    preds = np.where(preds != -100, preds, tokenizer.pad_token_id)
    labels = np.where(labels != -100, labels, tokenizer.pad_token_id)
    
    # 解码为文本
    decoded_preds = tokenizer.batch_decode(preds, skip_special_tokens=True)
    decoded_labels = tokenizer.batch_decode(labels, skip_special_tokens=True)
    
    # 计算ROUGE分数
    result = rouge.compute(
        predictions=decoded_preds,
        references=decoded_labels,
        use_stemmer=True
    )
    
    # 添加平均长度
    result["gen_len"] = np.mean([len(pred.split()) for pred in decoded_preds])
    
    return {k: round(v * 100, 4) for k, v in result.items()}

def save_model_and_tokenizer(
    model: PreTrainedModel,
    tokenizer: PreTrainedTokenizer,
    output_dir: str
) -> None:
    """
    保存模型和分词器
    """
    os.makedirs(output_dir, exist_ok=True)
    
    # 保存模型
    if hasattr(model, "save_pretrained"):
        model.save_pretrained(output_dir)
    else:
        torch.save(model.state_dict(), os.path.join(output_dir, "pytorch_model.bin"))
    
    # 保存分词器
    tokenizer.save_pretrained(output_dir)