import re
from typing import Dict, Optional

from transformers import AutoTokenizer, Trainer, TrainingArguments, AutoConfig

import torch
import torch.nn.functional as F
import os
from datasets import load_dataset, config
import base_model.nlp.base_model.first_model_mla as fmm

torch.backends.cuda.matmul.allow_tf32 = True  # 启用TF32加速
torch.set_float32_matmul_precision('medium')  # 优化矩阵乘法
# 这个设置用于调试CUDA操作。当设置为1时，CUDA操作会以同步模式运行，这意味着每个CUDA操作在返回之前都会等待完成。这有助于更容易地捕获和定位CUDA相关错误。
# os.environ['CUDA_LAUNCH_BLOCKING'] = '1'
# 这个设置配置了PyTorch的CUDA内存分配策略。expandable_segments:True允许CUDA内存段扩展，而不是预先分配固定大小的内存块。这种设置可以提高内存使用效率，尤其是在处理动态大小的数据集或模型时。
# os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True"
# 这个设置启用了CUDA的直接共享访问（Direct Shared Access, DSA）。DSA允许不同的CUDA上下文之间更高效地共享内存资源，从而可能提升多GPU环境下的性能。
os.environ['TORCH_USE_CUDA_DSA'] = '1'

# 注册自定义配置类
AutoConfig.register("usherTransformer", fmm.UsherConfig)
torch.autograd.set_detect_anomaly(True)


def init():
    # 加载模型，设置 trust_remote_code=True
    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
    # 加载分词器
    config = fmm.UsherConfig()
    # 配置
    config = AutoConfig.from_pretrained(config.path)
    # 分词器
    tokenizer = AutoTokenizer.from_pretrained(config.path)
    # 初始化模型并移动到指定设备
    model = fmm.Transformer(config).to(device)
    # 加载权重，并确保权重加载到正确的设备上
    model_path = config.path + r"\model.pth"
    if not os.path.exists(model_path):
        raise FileNotFoundError(f"Model file not found at {model_path}")
    state_dict = torch.load(model_path, map_location=device)
    model.load_state_dict(state_dict)
    return device, config, tokenizer, model, model_path


device, config, tokenizer, model, model_path = init()


def load_date(path):
    # 数据预处理函数
    def preprocess_function(examples):
        inputs = examples["input"]
        outputs = examples["output"]

        prompts = [
            f"<｜begin_of_sentence｜><｜User｜>{input}<｜Assistant｜>{output}<｜end｜>"
            for input, output in zip(inputs, outputs)
        ]

        tokenized = tokenizer(prompts, max_length=config.max_seq_len, truncation=True, padding="max_length", return_tensors="pt")

        # 生成 labels：input_ids 右偏移一位，最后一个位置设为 -100
        labels = tokenized["input_ids"][:, 1:].clone().detach()  # 截取从第二个 token 开始
        padding = torch.full((labels.shape[0], 1), tokenizer.pad_token_id, dtype=torch.long)  # 补充 填充值 到末尾
        tokenized["check_labels"] = torch.cat([labels, padding], dim=1)

        return tokenized

    # 加载数据集
    dataset = load_dataset('json', data_files=path)

    # 应用数据预处理
    tokenized_datasets = dataset.map(preprocess_function, batched=True)

    # 将数据集分割为训练集和验证集
    train_test_split = tokenized_datasets["train"].train_test_split(test_size=0.2)
    return train_test_split['train'], train_test_split['test']


train_data, test_data = load_date(config.path + r"/train.jsonl")


class RLLHTuner(Trainer):
    def __init__(self, *args, rl_weight=0.1, **kwargs):
        super().__init__(*args, **kwargs)
        self.rl_weight = rl_weight
        self.selfLog = {}
        self.rewards = [fmm.format_reward(5), fmm.length_reward(2), fmm.match_reward(3)]

    def log(self, logs: Dict[str, float], start_time: Optional[float] = None) -> None:
        super().log(logs | self.selfLog, start_time)

    def reward_function(self, generated_texts: str, reference_texts: str):
        '''

        :param generated_texts: 出参
        :param reference_texts: 入参
        :return:
        '''
        reward = 0
        for item in self.rewards:
            reward_reward = item.reward(generated_texts, reference_texts)
            reward += item.scope * reward_reward
            if item.isBreak():
                break
        return reward

    def compute_log_probs(self, logits, actions, pad_token_id):
        """计算每个序列的log概率总和，排除padding位置

        Args:
            logits (torch.Tensor): 形状 (batch_size, seq_len, vocab_size)
            actions (torch.Tensor): 形状 (batch_size, seq_len)
            pad_token_id (int): padding token的ID

        Returns:
            torch.Tensor: 形状 (batch_size,) 的log概率总和
        """
        # 创建掩码：排除padding位置（actions中的pad_token_id）
        mask = actions != pad_token_id  # 形状 (batch_size, seq_len)

        # 通过gather获取对应动作的log概率
        log_probs_selected = torch.gather(logits, 2, actions.unsqueeze(-1)).squeeze(-1)

        # 应用掩码：仅保留有效token的log概率
        log_probs_masked = log_probs_selected.masked_fill(~mask, 0.0)

        # 对每个序列的有效log概率求和
        return log_probs_masked.sum(dim=1)

    def compute_loss(self, model, inputs, return_outputs=False, **kwargs):
        # 计算原始监督损失
        labels = inputs.pop("check_labels")

        outputs = model(**inputs)
        view = outputs.view(-1, config.vocab_size)

        sup_loss = F.cross_entropy(
            view,  # (batch*seq_len, vocab_size)
            labels.view(-1),
            ignore_index=tokenizer.pad_token_id
        )

        # 采样生成
        with torch.no_grad():
            # sampled_tokens = model.generate(
            #     inputs["input_ids"],
            #     do_sample=True,
            #     temperature=1.2,
            #     max_length=150,
            #     top_p=0.9
            # )
            # 修改为：
            softmax = F.log_softmax(outputs, dim=-1)
            temp_out_puts = torch.argmax(softmax, dim=-1)

            generated_texts = tokenizer.batch_decode(temp_out_puts)
            reference_texts = tokenizer.batch_decode(labels)

        # 计算奖励
        rewards = [self.reward_function(g, r) for g, r in zip(generated_texts, reference_texts)]
        rewards = torch.tensor(rewards, device=device).float()

        # 计算策略梯度损失
        log_probs = self.compute_log_probs(softmax, temp_out_puts, pad_token_id=tokenizer.pad_token_id)
        mean = rewards.mean()
        pg_loss = -(log_probs * (rewards - mean)).mean()

        # 混合损失
        total_loss = sup_loss + self.rl_weight * pg_loss

        self.selfLog['sup_loss'] = sup_loss.item()
        self.selfLog['pg_loss'] = pg_loss.item()
        self.selfLog['reward'] = mean.item()

        return (total_loss, outputs) if return_outputs else total_loss


# 在定义 training_args 前添加：
from datetime import datetime

log_subdir = datetime.now().strftime("%Y%m%d-%H%M%S")
log_dir = f"./logs/{log_subdir}"
os.makedirs(log_dir, exist_ok=True)

training_args = TrainingArguments(
    output_dir="./result",  # 指定训练输出目录
    eval_strategy="epoch",  # 设置评估策略为每个epoch结束时进行评估
    learning_rate=5e-4,  # 设置学习率为
    per_device_train_batch_size=config.max_batch_size,  # 设置每个设备的训练批次大小
    per_device_eval_batch_size=config.max_batch_size,  # 设置每个设备的评估批次大小
    num_train_epochs=1,  # 设置训练的总epoch数
    weight_decay=0.01,  # 设置权重衰减系数
    logging_dir=log_dir,  # 指定日志输出目录
    logging_steps=10,  # 设置每x步记录一次日志
    save_total_limit=10,  # 只保留x个检查点
    bf16=True,  # 启用混合精度
)

trainer = RLLHTuner(
    model=model,
    args=training_args,
    train_dataset=train_data,
    eval_dataset=test_data,
    rl_weight=0.15  # 调整RL权重
)

trainer.train()

tokenizer.save_pretrained(config.path)
# 保存模型
torch.save(model.state_dict(), model_path)

forward = fmm.forward(model, tokenizer, config, device, prompt="423877184")
print(forward)
