{
 "cells": [
  {
   "cell_type": "markdown",
   "id": "87a1ebc0",
   "metadata": {},
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "ee2df607",
   "metadata": {},
   "outputs": [],
   "source": [
    "import os\n",
    "os.environ[\"CUDA_VISIBLE_DEVICES\"] = \"1\" "
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "a7f768a4",
   "metadata": {},
   "outputs": [],
   "source": [
    "from unsloth import FastLanguageModel\n",
    "import torch\n",
    "max_seq_length = 2048\n",
    "lora_rank =  32 \n",
    "model, tokenizer = FastLanguageModel.from_pretrained(\n",
    "    model_name=\"/mnt/modelscope/hub/models/Qwen/Qwen3-8B\",\n",
    "    max_seq_length=max_seq_length,\n",
    "    load_in_4bit=False,\n",
    "    fast_inference=False,\n",
    "    max_lora_rank=lora_rank,\n",
    "    gpu_memory_utilization=0.7,\n",
    ")\n",
    "model = FastLanguageModel.get_peft_model(\n",
    "    model,\n",
    "    r=lora_rank,\n",
    "    target_modules=[\n",
    "        \"q_proj\", \"k_proj\", \"v_proj\", \"o_proj\",  # 注意力机制中的查询、键、值、输出投影\n",
    "        \"gate_proj\", \"up_proj\", \"down_proj\",     # 前馈网络中的门控、上行和下行投影\n",
    "    ],\n",
    "    lora_alpha=lora_rank * 2,\n",
    "    use_gradient_checkpointing=\"unsloth\",\n",
    "    random_state=3407,\n",
    ")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "1cd11751",
   "metadata": {},
   "outputs": [],
   "source": [
    "prompt = \"\"\"\n",
    "请对以下回答的质量进行评分，严格按照标准执行：\n",
    "---\n",
    "**问题**：{question}\n",
    "**回答**：{answer}\n",
    "---\n",
    "\n",
    "1. **事实准确性**：答案是否与公认事实一致（如科学共识、权威来源）。\n",
    "2. **完整性**：是否覆盖问题的核心要点。\n",
    "3. **清晰度**：表述是否逻辑清晰、无歧义。\n",
    "\n",
    "{\n",
    "    \"score_fact\": int,\n",
    "    \"score_completeness\": int,\n",
    "    \"score_clarity\": int,\n",
    "    \"overall_score\": float,  # 加权平均分\n",
    "    \"rationale\": str  # 评分理由\n",
    "}\n",
    "\"\"\""
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "0bed3f3a",
   "metadata": {},
   "outputs": [],
   "source": [
    "\n",
    "reasoning_start = \"<思考开始>\"\n",
    "reasoning_end   = \"<思考结束>\"\n",
    "solution_start  = \"<答案开始>\"\n",
    "solution_end    = \"<答案结束>\"\n",
    "\n",
    "system_prompt = f\"\"\"\n",
    "你是专业的人物经历分析师，请严格按以下步骤处理用户问题：\n",
    "1. 分析问题核心要素（人物、时间、事件类型）\n",
    "2. 进行推理时，将完整思考过程放在{reasoning_start}和{reasoning_end}之间\n",
    "3. 最后将答案放在{solution_start}和{solution_end}之间\n",
    "\n",
    "特别注意：\n",
    "- 思考过程需包含：问题拆解、关键信息提取、逻辑推理步骤\n",
    "- 确保答案只包含事实信息，不添加主观评论\n",
    "- 思考过程不要重复相同的内容\n",
    "\"\"\"\n",
    "\n",
    "# 使用示例（训练数据格式）\n",
    "\"\"\"\n",
    "用户输入：于小戈2006年发生了什么关键事件？\n",
    "\n",
    "理想输出：\n",
    "<思考开始>\n",
    "问题要素分析：\n",
    "  - 人物：于小戈\n",
    "  - 时间：2006年\n",
    "  - 需求：关键职业事件\n",
    "已知信息检索：时尚媒体行业从业记录\n",
    "逻辑链：2006年→职业生涯起点→权威媒体入职\n",
    "</思考结束>\n",
    "<答案>2006年，于小戈加入《时尚芭莎》中国版担任实习生，开启职业生涯。</答案>\n",
    "\"\"\"\n",
    "\n",
    "system_prompt\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "f6f9486a",
   "metadata": {},
   "outputs": [],
   "source": [
    "\n",
    "\n",
    "\n",
    "chat_template = \\\n",
    "    \"{% if messages[0]['role'] == 'system' %}\"\\\n",
    "        \"{{ messages[0]['content'] + eos_token }}\"\\\n",
    "        \"{% set loop_messages = messages[1:] %}\"\\\n",
    "    \"{% else %}\"\\\n",
    "        \"{{ '{system_prompt}' + eos_token }}\"\\\n",
    "        \"{% set loop_messages = messages %}\"\\\n",
    "    \"{% endif %}\"\\\n",
    "    \"{% for message in loop_messages %}\"\\\n",
    "        \"{% if message['role'] == 'user' %}\"\\\n",
    "            \"{{ message['content'] }}\"\\\n",
    "        \"{% elif message['role'] == 'assistant' %}\"\\\n",
    "            \"{{ message['content'] + eos_token }}\"\\\n",
    "        \"{% endif %}\"\\\n",
    "    \"{% endfor %}\"\\\n",
    "    \"{% if add_generation_prompt %}{{ '{reasoning_start}' }}\"\\\n",
    "    \"{% endif %}\"\n",
    "\n",
    "chat_template = chat_template\\\n",
    "    .replace(\"'{system_prompt}'\",   f\"'{system_prompt}'\")\\\n",
    "    .replace(\"'{reasoning_start}'\", f\"'{reasoning_start}'\")\n",
    "\n",
    "tokenizer.chat_template = chat_template"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "3819ca29",
   "metadata": {},
   "outputs": [],
   "source": [
    "import pandas as pd\n",
    "import json\n",
    "with open('/home/raii/GPRO/GPRO_sft_lora_data.json', 'r', encoding='utf-8') as file:\n",
    "    dataset = json.load(file)  # 这会得到一个Python列表\n",
    "dataset = pd.DataFrame(dataset)  # 每个字典会自动变成一行\n",
    "required_columns = [\"instruction\", \"input\", \"output\"]\n",
    "available_columns = [col for col in required_columns if col in dataset.columns]\n",
    "dataset= dataset[available_columns]\n",
    "\n",
    "\n",
    "\n",
    "print(\"\\n处理后的数据:\")\n",
    "print(dataset.head())\n",
    "print(dataset.shape)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "9fd30237",
   "metadata": {},
   "outputs": [],
   "source": [
    "\n",
    "def format_dataset(x):\n",
    "\n",
    "    expected_answer = x[\"output\"] # 期望的答案\n",
    "    problem = x[\"input\"] # 问题本身\n",
    "\n",
    "    return [\n",
    "        {\"role\" : \"system\",    \"content\" : system_prompt},#系统设定\n",
    "        {\"role\" : \"user\",      \"content\" : problem},#提问\n",
    "        {\"role\" : \"assistant\", \"content\" : expected_answer},#系统回答\n",
    "    ]\n",
    "import pandas as pd\n",
    "if hasattr(dataset, \"to_pandas\"):  # Hugging Face Dataset\n",
    "    dataset = dataset.to_pandas()\n",
    "elif isinstance(dataset, list):    # 普通列表\n",
    "    dataset = pd.DataFrame(dataset)\n",
    "dataset[\"Messages\"] = dataset.apply(format_dataset, axis=1)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "2d4ec706",
   "metadata": {},
   "outputs": [],
   "source": [
    "\n",
    "\n",
    "tokenizer.apply_chat_template(dataset[\"Messages\"][0], tokenize = False)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "14e7d243",
   "metadata": {},
   "outputs": [],
   "source": [
    "\n",
    "dataset[\"N\"] = dataset[\"Messages\"].apply(lambda x: len(tokenizer.apply_chat_template(x)))\n",
    "\n",
    "\n",
    "dataset = dataset.loc[dataset[\"N\"] <= max_seq_length/2].copy()\n",
    "dataset.shape"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "05b52bce",
   "metadata": {},
   "outputs": [],
   "source": [
    "from datasets import Dataset\n",
    "dataset[\"text\"] = tokenizer.apply_chat_template(dataset[\"Messages\"].values.tolist(), tokenize = False)\n",
    "dataset.reset_index(drop=True, inplace=True)\n",
    "\n",
    "dataset = Dataset.from_pandas(dataset)\n",
    "dataset"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "e9c023bf",
   "metadata": {},
   "outputs": [],
   "source": [
    "\n",
    "import swanlab\n",
    "swanlab.login(api_key=\"63WD2pjuBMnw9OUfYVhaZ\", save=True)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "986a6778",
   "metadata": {},
   "outputs": [],
   "source": [
    "from trl import SFTTrainer, SFTConfig\n",
    "trainer = SFTTrainer(\n",
    "    model = model, # 已经加载并添加了LoRA适配器的模型\n",
    "    tokenizer = tokenizer, # 配套的分词器\n",
    "    train_dataset = dataset, # \n",
    "    args = SFTConfig( # SFT训练的配置参数\n",
    "        dataset_text_field = \"text\",\n",
    "        per_device_train_batch_size = 1,\n",
    "        gradient_accumulation_steps = 1,\n",
    "        warmup_steps = 5,\n",
    "        num_train_epochs = 5,\n",
    "        learning_rate = 2e-4,\n",
    "        logging_steps = 5,\n",
    "        optim = \"adamw_8bit\",\n",
    "        weight_decay = 0.01,\n",
    "        lr_scheduler_type = \"linear\",\n",
    "        seed = 3407,\n",
    "        report_to = \"swanlab\",\n",
    "    ),\n",
    ")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "0ce6f169",
   "metadata": {},
   "outputs": [],
   "source": [
    "trainer.train()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "7ed38bbd",
   "metadata": {},
   "outputs": [],
   "source": [
    "def extract_all_answers(text):\n",
    "    start_marker = \"<答案开始>\"\n",
    "    end_marker = \"<答案结束>\"\n",
    "    answers = []\n",
    "    start_index = 0\n",
    "    \n",
    "    while True:\n",
    "\n",
    "        start_index = text.find(start_marker, start_index)\n",
    "        if start_index == -1:\n",
    "            break\n",
    "        \n",
    "        start_index += len(start_marker)\n",
    "\n",
    "        end_index = text.find(end_marker, start_index)\n",
    "        if end_index == -1:\n",
    "            break\n",
    "        answer = text[start_index:end_index].strip()\n",
    "        answers.append(answer)\n",
    "        start_index = end_index + len(end_marker)\n",
    "    \n",
    "    return answers"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "9a1d69b1",
   "metadata": {},
   "outputs": [],
   "source": [
    "dataset[12][\"Messages\"][:2]"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "bdf55d4f",
   "metadata": {},
   "outputs": [],
   "source": [
    "\n",
    "\n",
    "text = [{'content': '\\n你是专业的人物经历分析师，请严格按以下步骤处理用户问题：\\n1. 分析问题核心要素（人物、时间、事件类型）\\n2. 进行推理时，将完整思考过程放在<思考开始>和<思考结束>之间\\n3. 最后将答案放在<答案开始>和<答案结束>之间\\n\\n特别注意：\\n- 思考过程需包含：问题拆解、关键信息提取、逻辑推理步骤\\n- 确保答案只包含事实信息，不添加主观评论\\n- 思考过程不要重复相同的内容\\n',\n",
    "  'role': 'system'},\n",
    " {'content': '于小戈是谁', 'role': 'user'}]\n",
    "text = tokenizer.apply_chat_template(\n",
    "    text,#dataset[12][\"Messages\"][:2],\n",
    "    tokenize = False,\n",
    "    add_generation_prompt = True, # 必须添加此项以触发模型的生成\n",
    ")\n",
    "\n",
    "\n",
    "from transformers import TextStreamer\n",
    "import re\n",
    "\n",
    "answer = model.generate(\n",
    "    **tokenizer(text, return_tensors = \"pt\").to(\"cuda\"),\n",
    "    temperature = 0.5,\n",
    "    max_new_tokens = 4096,\n",
    "    streamer = TextStreamer(tokenizer, skip_prompt = False),\n",
    ")\n",
    "\n",
    "text = tokenizer.decode(answer[0])\n",
    "\n",
    "all_answers = extract_all_answers(text)\n",
    "\n",
    "for i, ans in enumerate(all_answers, 1):\n",
    "    if(i != 1):\n",
    "        print(f\"答案: {ans}\")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "7a2ad561",
   "metadata": {},
   "outputs": [],
   "source": [
    "\n",
    "del dataset\n",
    "\n",
    "torch.cuda.empty_cache()\n",
    "\n",
    "import gc\n",
    "gc.collect()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "842eacb9",
   "metadata": {},
   "outputs": [],
   "source": [
    "import pandas as pd\n",
    "import json\n",
    "with open('/home/raii/GPRO/GPRO_sft_lora_data.json', 'r', encoding='utf-8') as file:\n",
    "    dataset = json.load(file)  # 这会得到一个Python列表\n",
    "dataset = pd.DataFrame(dataset)  # 每个字典会自动变成一行\n",
    "required_columns = [\"instruction\", \"input\", \"output\",\"answer\"]\n",
    "available_columns = [col for col in required_columns if col in dataset.columns]\n",
    "dataset= dataset[available_columns]\n",
    "\n",
    "print(\"\\n处理后的数据:\")\n",
    "print(dataset.head())\n",
    "print(dataset.shape)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "c8ef54cb",
   "metadata": {},
   "outputs": [],
   "source": [
    "import pandas as pd\n",
    "\n",
    "def format_dataset(x):\n",
    "    return [\n",
    "        {\"role\" : \"system\",    \"content\" : system_prompt},#系统设定\n",
    "        {\"role\" : \"user\",      \"content\" : x[\"input\"]},#提问\n",
    "    ]\n",
    "dataset[\"prompt\"] = dataset.apply(format_dataset, axis=1)\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "111e4305",
   "metadata": {},
   "outputs": [],
   "source": [
    "import numpy as np\n",
    "tokenized = dataset[\"prompt\"].apply(\n",
    "    lambda x: tokenizer.apply_chat_template(x, add_generation_prompt=True, tokenize=True)\n",
    ")\n",
    "lengths = [len(tokens) for tokens in tokenized]\n",
    "maximum_length = max(lengths)\n",
    "print(\"Max Length = \", maximum_length)\n",
    "del tokenized"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "20ab833b",
   "metadata": {},
   "outputs": [],
   "source": [
    "max_prompt_length = maximum_length + 1\n",
    "max_completion_length = max_seq_length - max_prompt_length\n",
    "from vllm import SamplingParams\n",
    "\n",
    "vllm_sampling_params = SamplingParams(\n",
    "    min_p = 0.1, # 最小概率采样（Min-P），忽略概率低于此值的token\n",
    "    top_p = 1.0, # Top-P（核）采样，从累积概率超过p的最小token集合中采样\n",
    "    top_k = -1,  # Top-K采样，-1表示不启用\n",
    "    seed = 3407, \n",
    "    stop = [tokenizer.eos_token], # 遇到EOS令牌时停止生成\n",
    "    include_stop_str_in_output = True, \n",
    ")\n",
    "\n",
    "from trl import GRPOConfig, GRPOTrainer\n",
    "\n",
    "training_args = GRPOConfig(\n",
    "    vllm_sampling_params = vllm_sampling_params,\n",
    "    temperature = 1.0, # 生成时的温度，值越高，随机性越强\n",
    "    learning_rate = 5e-6, # GRPO的学习率通常比SFT小\n",
    "    weight_decay = 0.01,\n",
    "    warmup_ratio = 0.1, # 预热步数占总步数的比例\n",
    "    lr_scheduler_type = \"linear\",\n",
    "    optim = \"adamw_8bit\",\n",
    "    logging_steps = 1, \n",
    "    per_device_train_batch_size = 2,#1 \n",
    "    gradient_accumulation_steps = 1, \n",
    "    num_generations = 4, # 每个prompt生成4个不同的回答进行评估，如果显存不足可以减小此值\n",
    "    max_prompt_length = max_prompt_length, # prompt最大长度\n",
    "    max_completion_length = max_completion_length, # 生成内容最大长度\n",
    "\n",
    "    max_steps = 20, \n",
    "    save_steps = 100, \n",
    "    report_to = \"swanlab\",\n",
    "    output_dir = \"outputs\",\n",
    ")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "cd1be1bb",
   "metadata": {},
   "outputs": [],
   "source": [
    "from unsloth import FastLanguageModel\n",
    "import torch\n",
    "\n",
    "# 设置模型的最大序列长度。可以根据需要增加此值以支持更长的推理和上下文。\n",
    "max_seq_length = 2048\n",
    "lora_rank = 32\n",
    "\n",
    "model_plus, tokenizer_plus = FastLanguageModel.from_pretrained(\n",
    "    model_name=\"/mnt/modelscope/hub/models/Qwen/Qwen3-8B\",\n",
    "    max_seq_length=max_seq_length,\n",
    "    load_in_4bit=False,\n",
    "    fast_inference=False,#设置成ture，会报错\n",
    "    max_lora_rank=lora_rank,\n",
    "    gpu_memory_utilization=0.7,\n",
    ")\n",
    "\n",
    "model_plus = FastLanguageModel.get_peft_model(\n",
    "    model_plus,\n",
    "    r=lora_rank,\n",
    "    target_modules=[\n",
    "        \"q_proj\", \"k_proj\", \"v_proj\", \"o_proj\",  # 注意力机制中的查询、键、值、输出投影\n",
    "        \"gate_proj\", \"up_proj\", \"down_proj\",     # 前馈网络中的门控、上行和下行投影\n",
    "    ],\n",
    "    lora_alpha=lora_rank * 2,\n",
    "    use_gradient_checkpointing=\"unsloth\",\n",
    "    random_state=3407,\n",
    ")\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "40774adb",
   "metadata": {},
   "outputs": [],
   "source": [
    "reward_prompt_template = \"\"\"请对以下QA回答进行评分,最后仅输出最终的评分，评分的计算公式: 相关性*0.2 + 准确性*0.3 + 完整性*0.25 + 清晰度*0.15 + 实用性*0.1)\n",
    "\n",
    "问题: \"{question}\"\n",
    "回答: \"{answer}\"\n",
    "\n",
    "评分规则:\n",
    "- 相关性: -3(偏离)/0(部分)/+3(聚焦)\n",
    "- 准确性: -5(错误)/0(部分)/+3(正确)\n",
    "- 完整性: -2(残缺)/+5(完整)\n",
    "- 清晰度: -2(混乱)/0(冗长)/+3(清晰)\n",
    "- 实用性: -1(无用)/0(理论)/+3(实用)\n",
    "\n",
    "\"\"\"\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "4370de0a",
   "metadata": {},
   "outputs": [],
   "source": [
    "def generate_reward_prompt(question: str, answer: str) -> str:\n",
    "    return reward_prompt_template.format(\n",
    "        question=question.replace('\"', '\\\\\"'),  # 处理引号转义\n",
    "        answer=answer.replace('\"', '\\\\\"')\n",
    "    )"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "9ea36839",
   "metadata": {},
   "outputs": [],
   "source": [
    "from transformers import TextStreamer\n",
    "import re\n",
    "def LLM_model_reward(completions,input,answer,**kwargs):\n",
    "    scores = []\n",
    "    responses = [completion[0][\"content\"] for completion in completions]\n",
    "    extracted_responses = []\n",
    "\n",
    "    start_marker = \"<答案开始>\"\n",
    "    end_marker = \"<答案结束>\"\n",
    "    \n",
    "    for response in responses:\n",
    "        start_idx = response.find(start_marker)\n",
    "        if start_idx == -1:\n",
    "            extracted_responses.append(\"\")\n",
    "        else:\n",
    "            start_idx += len(start_marker)\n",
    "            end_idx = response.find(end_marker, start_idx)\n",
    "            extracted_responses.append(response[start_idx:end_idx] if end_idx != -1 else response[start_idx:]) \n",
    "    \n",
    "    for answer, questions in zip(extracted_responses, input):\n",
    "        reward_prompt = generate_reward_prompt(questions, answer)\n",
    "        text = [{'content': reward_prompt,'role': 'system'},]\n",
    "        text = tokenizer_plus.apply_chat_template(\n",
    "            text,\n",
    "            tokenize = False,\n",
    "            add_generation_prompt = True, # 必须添加此项以触发模型的生成\n",
    "        )\n",
    "        answer_text = model_plus.generate(\n",
    "            **tokenizer(text, return_tensors = \"pt\").to(\"cuda\"),\n",
    "            temperature = 0.5,\n",
    "            max_new_tokens = 4096,\n",
    "        )\n",
    "        text = tokenizer.decode(answer_text[0])\n",
    "        score = re.search(r'</think>\\s*(\\d+\\.?\\d*)<\\|im_end\\|>', text)\n",
    "        if score:\n",
    "            score = float(score.group(1))  # 输出: 3.5\n",
    "        else:\n",
    "            score = 0.0  \n",
    "        scores.append(score)\n",
    "    return scores"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "e25f5f5b",
   "metadata": {},
   "outputs": [],
   "source": [
    "\n",
    "def f1_token_reward(completions, answer,**kwargs):\n",
    "    scores = []\n",
    "    responses = [completion[0][\"content\"] for completion in completions]\n",
    "    extracted_responses = []\n",
    "\n",
    "    start_marker = \"<答案开始>\"\n",
    "    end_marker = \"<答案结束>\"\n",
    "    \n",
    "    for response in responses:\n",
    "\n",
    "        start_idx = response.find(start_marker)\n",
    "        if start_idx == -1:\n",
    "            extracted_responses.append(\"\")\n",
    "        else:\n",
    "            start_idx += len(start_marker)\n",
    "            end_idx = response.find(end_marker, start_idx)\n",
    "            extracted_responses.append(response[start_idx:end_idx] if end_idx != -1 else response[start_idx:])  \n",
    "\n",
    "        # 计算token级F1分数\n",
    "    for extracted_answer, ground_truth in zip(extracted_responses, answer):\n",
    "        f1 = calculate_token_f1(ground_truth, extracted_answer)\n",
    "        s = match_scores(f1)\n",
    "        scores.append(s)\n",
    "    \n",
    "    return scores\n",
    "scores = []\n",
    "def match_scores(f1):\n",
    "    score = 0\n",
    "\n",
    "    if f1 >= 0.7:   # 中等相似度（对应原函数±20%数值匹配）\n",
    "        score += 1.5\n",
    "    elif f1 >= 0.6:   # 基础匹配\n",
    "        score += 0.5\n",
    "    elif f1 >= 0.5:   # 弱惩罚\n",
    "        score -= 1.0\n",
    "    elif f1 >= 0.4:   # 中等惩罚\n",
    "        score -= 3.0\n",
    "    else:             # 严重不匹配（对应原函数转换失败惩罚）\n",
    "        score -= 5.0\n",
    "    return score\n",
    "    \n",
    "def calculate_token_f1(true_answer, pred_answer):\n",
    "    \"\"\"\n",
    "    计算两个文本的token级F1分数\n",
    "    \"\"\"\n",
    "\n",
    "    tokenize = lambda s: [t.strip().lower() for t in s.split() if t.strip()]\n",
    "\n",
    "    # 获取token列表\n",
    "    true_tokens = tokenizer(true_answer)[\"input_ids\"]\n",
    "    pred_tokens = tokenizer(pred_answer)[\"input_ids\"]\n",
    "\n",
    "    # 处理空答案情况\n",
    "    if len(true_tokens) == 0 or len(pred_tokens) == 0:\n",
    "        return 1.0 if (len(true_tokens) == 0 and len(pred_tokens) == 0) else 0.0\n",
    "\n",
    "    # 构建词频统计\n",
    "    true_counts = {}\n",
    "    pred_counts = {}\n",
    "    \n",
    "    for token in true_tokens:\n",
    "        true_counts[token] = true_counts.get(token, 0) + 1\n",
    "    \n",
    "    for token in pred_tokens:\n",
    "        pred_counts[token] = pred_counts.get(token, 0) + 1\n",
    "\n",
    "    # 计算TP（True Positive）\n",
    "    tp = 0\n",
    "    for token, count in true_counts.items():\n",
    "        tp += min(count, pred_counts.get(token, 0))\n",
    "\n",
    "    # 计算FP和FN\n",
    "    fp = len(pred_tokens) - tp\n",
    "    fn = len(true_tokens) - tp\n",
    "\n",
    "    # 计算Precision和Recall\n",
    "    precision = tp / (tp + fp) if (tp + fp) > 0 else 0\n",
    "    recall = tp / (tp + fn) if (tp + fn) > 0 else 0\n",
    "\n",
    "    # 计算F1\n",
    "    if precision + recall == 0:\n",
    "        return 0.0\n",
    "    return 2 * (precision * recall) / (precision + recall)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "e1cfe544",
   "metadata": {},
   "outputs": [],
   "source": [
    "def match_format_reward(completions, **kwargs):\n",
    "    scores = []\n",
    "    for completion in completions:\n",
    "        score = 0\n",
    "        response = completion[0][\"content\"]\n",
    "        score += 0.5 if response.count(reasoning_start)  == 1 else -1.0\n",
    "        score += 0.5 if response.count(reasoning_end)  == 1 else -1.0\n",
    "        score += 0.5 if response.count(solution_start)  == 1 else -1.0\n",
    "        score += 0.5 if response.count(solution_end)    == 1 else -1.0\n",
    "        scores.append(score)\n",
    "    return scores"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "00773993",
   "metadata": {},
   "outputs": [],
   "source": [
    "from datasets import Dataset\n",
    "import pandas as pd\n",
    "\n",
    "# 转换为 datasets.Dataset\n",
    "dataset = Dataset.from_pandas(dataset)\n",
    "\n",
    "print(type(dataset))  # <class 'datasets.arrow_dataset.Dataset'>\n",
    "print(dataset)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "a6fa91e6",
   "metadata": {},
   "outputs": [],
   "source": [
    "\n",
    "trainer = GRPOTrainer(\n",
    "    model = model, # 我们的基础模型\n",
    "    processing_class = tokenizer, # 分词器\n",
    "    reward_funcs = [ # 传入我们定义的所有奖励函数\n",
    "        match_format_reward,\n",
    "        f1_token_reward,\n",
    "        LLM_model_reward,\n",
    "    ],\n",
    "    args = training_args, # 传入训练配置\n",
    "    train_dataset = dataset, # \n",
    ")\n",
    "\n",
    "\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "67f0ba1b",
   "metadata": {},
   "outputs": [],
   "source": [
    "trainer.train()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "648b29c0",
   "metadata": {},
   "outputs": [],
   "source": [
    "text = [{'content': '\\n你是专业的人物经历分析师，请严格按以下步骤处理用户问题：\\n1. 分析问题核心要素（人物、时间、事件类型）\\n2. 进行推理时，将完整思考过程放在<思考开始>和<思考结束>之间\\n3. 最后将答案放在<答案开始>和<答案结束>之间\\n\\n特别注意：\\n- 思考过程需包含：问题拆解、关键信息提取、逻辑推理步骤\\n- 确保答案只包含事实信息，不添加主观评论\\n- 思考过程不要重复相同的内容\\n',\n",
    "  'role': 'system'},\n",
    " {'content': '于小戈是谁', 'role': 'user'}]\n",
    "\n",
    "\n",
    "text = tokenizer.apply_chat_template(\n",
    "    text,\n",
    "    tokenize = False,\n",
    "    add_generation_prompt = True, # 必须添加此项以触发模型的生成\n",
    ")\n",
    "from transformers import TextStreamer\n",
    "import re\n",
    "\n",
    "answer = model.generate(\n",
    "\n",
    "    **tokenizer(text, return_tensors = \"pt\").to(\"cuda\"),\n",
    "\n",
    "    temperature = 0.5,\n",
    "\n",
    "    max_new_tokens = 4096,\n",
    "\n",
    "    streamer = TextStreamer(tokenizer, skip_prompt = False),\n",
    ")\n",
    "\n",
    "text = tokenizer.decode(answer[0])\n",
    "\n",
    "all_answers = extract_all_answers(text)\n",
    "\n",
    "for i, ans in enumerate(all_answers, 1):\n",
    "    if(i != 1):\n",
    "        print(f\"答案: {ans}\")"
   ]
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "G",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.11.13"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 5
}
