{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": 17,
   "metadata": {},
   "outputs": [],
   "source": [
    "# step1 导入相关包\n",
    "import os\n",
    "import json\n",
    "import numpy as np\n",
    "import torch\n",
    "from torch.utils.data import Dataset, DataLoader\n",
    "# from transformers import BertTokenizer, BertForTokenClassification, AdamW\n",
    "from transformers import AutoTokenizer, AutoModelForTokenClassification, TrainingArguments, Trainer, DataCollatorForTokenClassification\n",
    "from peft import get_peft_model, LoraConfig, TaskType, PeftModel\n",
    "from seqeval.metrics import classification_report, f1_score"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 2,
   "metadata": {},
   "outputs": [],
   "source": [
    "categories = set()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 3,
   "metadata": {},
   "outputs": [],
   "source": [
    "# step2 定义继承自Dataset类的自定义类\n",
    "# 首先要分析数据格式, 确定下一步如何处理\n",
    "class NERDataset(Dataset):\n",
    "  def __init__(self, data_file):\n",
    "    self.data = self.load_data(data_file)\n",
    "\n",
    "  def load_data(self, data_file):\n",
    "    Data = {}\n",
    "    with open(data_file, 'rt', encoding='utf-8') as f:\n",
    "      # print('start------', f.read().split('\\n\\n'))\n",
    "      df = f.read().split('\\n\\n')\n",
    "      for idx, line in enumerate(df):\n",
    "          # print('=' * 10, line)\n",
    "          if not line:\n",
    "            break\n",
    "          sentence, labels, char_labels = '', [], []\n",
    "          for i, item in enumerate(line.split('\\n')):\n",
    "            char, tag = item.split(' ')\n",
    "            sentence += char\n",
    "            if tag.startswith('B'):\n",
    "              labels.append([i, i, char, tag[2:]]) # Remove the B- or I-\n",
    "              categories.add(tag[2:])\n",
    "            elif tag.startswith('I'):\n",
    "              labels[-1][1] = i\n",
    "              labels[-1][2] += char\n",
    "          \n",
    "\n",
    "          # 生成字符级标签\n",
    "          char_labels = ['O'] * len(sentence)\n",
    "          for start, end, text, label_type in labels:\n",
    "            for pos in range(start, end + 1):\n",
    "              char_labels[pos] = f'B-{label_type}' if pos == start else f'I-{label_type}'\n",
    "          \n",
    "          Data[idx] = {\n",
    "            'sentence': sentence,\n",
    "            'labels': labels, # 保留原始实体信息\n",
    "            'char_labels': char_labels # 新增：预生成的字符级标签\n",
    "          }\n",
    "      # print(Data)\n",
    "    print(f'数据集中包含的实体类型有：{categories}')\n",
    "    return Data\n",
    "  \n",
    "  def __len__(self):\n",
    "    return len(self.data)\n",
    "\n",
    "  def __getitem__(self, idx):\n",
    "    return self.data[idx]"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "# step3 加载数据集\n",
    "train_data = NERDataset('./dataset/medical.train')"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 5,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "111\n",
      "数据集中包含的实体类型有：{'西医治疗', '西医诊断', '中医治疗', '中药', '其他治疗', '方剂', '中医诊断', '中医治则', '临床表现', '中医证候'}\n"
     ]
    }
   ],
   "source": [
    "print(f'数据集中包含的实体类型有：{categories}')"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 23,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "{'sentence': '现头昏口苦', 'labels': [[3, 4, '口苦', '临床表现']]}\n",
      "第2条训练样本: {'sentence': '目的观察复方丁香开胃贴外敷神阙穴治疗慢性心功能不全伴功能性消化不良的临床疗效', 'labels': [[4, 10, '复方丁香开胃贴', '中医治疗'], [20, 32, '心功能不全伴功能性消化不良', '西医诊断']]}\n"
     ]
    }
   ],
   "source": [
    "# 打印出一个训练样本：\n",
    "print(train_data[0])\n",
    "\n",
    "# print(f\"第2条训练样本: {train_data[1]}\")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 30,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "len: 21\n",
      "{0: 'O', 1: 'B-中医治则', 2: 'I-中医治则', 3: 'B-中医治疗', 4: 'I-中医治疗', 5: 'B-中医证候', 6: 'I-中医证候', 7: 'B-中医诊断', 8: 'I-中医诊断', 9: 'B-中药', 10: 'I-中药', 11: 'B-临床表现', 12: 'I-临床表现', 13: 'B-其他治疗', 14: 'I-其他治疗', 15: 'B-方剂', 16: 'I-方剂', 17: 'B-西医治疗', 18: 'I-西医治疗', 19: 'B-西医诊断', 20: 'I-西医诊断'}\n",
      "{'O': 0, 'B-中医治则': 1, 'I-中医治则': 2, 'B-中医治疗': 3, 'I-中医治疗': 4, 'B-中医证候': 5, 'I-中医证候': 6, 'B-中医诊断': 7, 'I-中医诊断': 8, 'B-中药': 9, 'I-中药': 10, 'B-临床表现': 11, 'I-临床表现': 12, 'B-其他治疗': 13, 'I-其他治疗': 14, 'B-方剂': 15, 'I-方剂': 16, 'B-西医治疗': 17, 'I-西医治疗': 18, 'B-西医诊断': 19, 'I-西医诊断': 20}\n"
     ]
    }
   ],
   "source": [
    "# 建立标签映射字典：这是整个NER任务的核心配置。\n",
    "id2label = {0:'O'}\n",
    "# list(categories) = ['LOC']\n",
    "for c in list(sorted(categories)):\n",
    "    # print(f'entity type: {c}', len(id2label))\n",
    "    id2label[len(id2label)] = f\"B-{c}\"\n",
    "    id2label[len(id2label)] = f\"I-{c}\"\n",
    "label2id = {v: k for k, v in id2label.items()}\n",
    "\n",
    "print(f'len: {len(id2label)}')\n",
    "print(id2label)\n",
    "print(label2id)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 8,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "21\n"
     ]
    }
   ],
   "source": [
    "num_labels=len(list(label2id))\n",
    "print(num_labels)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 24,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "21"
      ]
     },
     "execution_count": 24,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "len(label2id)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 9,
   "metadata": {},
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Some weights of Qwen2ForTokenClassification were not initialized from the model checkpoint at D:\\ghh\\llm_project\\fine-tuning\\LLM-Finetune\\qwen and are newly initialized: ['score.bias', 'score.weight']\n",
      "You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference.\n"
     ]
    }
   ],
   "source": [
    "# step4 加载tokenizer 和 预训练模型\n",
    "\n",
    "# model_name = 'qwen/Qwen2.5-7B'\n",
    "model_name = r'/root/autodl-tmp/Qwen/Qwen2.5-7B-Instruct'\n",
    "# model_name = r'D:\\ghh\\llm_project\\fine-tuning\\LLM-Finetune\\qwen'\n",
    "tokenizer = AutoTokenizer.from_pretrained(model_name)\n",
    "\n",
    "model = AutoModelForTokenClassification.from_pretrained(\n",
    "  model_name, \n",
    "  num_labels=len(label2id),\n",
    "  id2label=id2label,\n",
    "  label2id=label2id\n",
    ")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "# step5 定义数据处理函数\n",
    "def collote_fn(batch_samples):\n",
    "    batch_sentence = [sample['sentence'] for sample in batch_samples]\n",
    "    batch_char_labels = [sample['char_labels'] for sample in batch_samples]  # 直接使用预生成的标签\n",
    "    \n",
    "    batch_inputs = tokenizer(\n",
    "        batch_sentence,\n",
    "        padding=True,\n",
    "        truncation=True,\n",
    "        max_length=512,\n",
    "        return_tensors=\"pt\",\n",
    "        is_split_into_words=True\n",
    "    )\n",
    "    \n",
    "    aligned_labels = []\n",
    "    for i, char_labels in enumerate(batch_char_labels):\n",
    "        word_ids = batch_inputs.word_ids(batch_index=i)\n",
    "        token_labels = []\n",
    "        \n",
    "        for word_idx in word_ids:\n",
    "            if word_idx is None:\n",
    "                token_labels.append(-100)\n",
    "            else:\n",
    "                token_labels.append(label2id[char_labels[word_idx]])\n",
    "        \n",
    "        aligned_labels.append(token_labels)\n",
    "    \n",
    "    batch_inputs['labels'] = torch.tensor(aligned_labels, dtype=torch.long)\n",
    "    \n",
    "    return {\n",
    "        'input_ids': batch_inputs['input_ids'],\n",
    "        'attention_mask': batch_inputs['attention_mask'], \n",
    "        'labels': batch_inputs['labels']\n",
    "    }"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 19,
   "metadata": {},
   "outputs": [],
   "source": [
    "train_dataloader = DataLoader(train_data, batch_size=4, shuffle=True, collate_fn=collote_fn)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "# 获取第一个批次\n",
    "batch = next(iter(train_dataloader))\n",
    "input_ids, attention_mask, labels = batch\n",
    "\n",
    "# 查看批次中的第一个样本\n",
    "print(\"=== 第一个样本详情 ===\", batch)\n",
    "print(f\"input_ids: {input_ids[0]}\")\n",
    "print(f\"attention_mask: {attention_mask[0]}\")\n",
    "print(f\"labels: {labels[0]}\")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "# step6 设置LoraConfig\n",
    "lora_config = LoraConfig(\n",
    "  task_type=TaskType.TOKEN_CLS,  # 重要：使用TOKEN_CLS而不是SEQ_CLS\n",
    "  r=8,\n",
    "  lora_alpha=16,\n",
    "  target_modules=[\"q_proj\", \"k_proj\", \"v_proj\", \"o_proj\", \"gate_proj\", \"up_proj\", \"down_proj\"],\n",
    "  lora_dropout=0.1\n",
    ")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "# step7 使用peft进行lora微调\n",
    "peft_model = get_peft_model(model, lora_config)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "# step8 定义/配置训练参数\n",
    "training_args = TrainingArguments(\n",
    "  output_dir=\"./qwen-ner\",\n",
    "  overwrite_output_dir=True,\n",
    "  num_train_epochs=3,\n",
    "  per_device_train_batch_size=4,\n",
    "  per_device_eval_batch_size=4,\n",
    "  evaluation_strategy=\"steps\",\n",
    "  save_strategy=\"steps\",\n",
    "  eval_steps=100, # 每100步评估一次，减少评估频率\n",
    "  save_steps=100, # 每100步保存一次模型，减少保存频率\n",
    "  logging_steps=20, # 每20步记录一次日志, 减少日志记录频率\n",
    "  learning_rate=1e-4\n",
    ")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 2,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "健壮版本生成的实体类别映射:\n",
      "  中医治则: ['B-中医治则', 'I-中医治则']\n",
      "  中医治疗: ['B-中医治疗', 'I-中医治疗']\n",
      "  中医证候: ['B-中医证候', 'I-中医证候']\n",
      "  中医诊断: ['B-中医诊断', 'I-中医诊断']\n",
      "  中药: ['B-中药', 'I-中药']\n",
      "  临床表现: ['B-临床表现', 'I-临床表现']\n",
      "  其他治疗: ['B-其他治疗', 'I-其他治疗']\n",
      "  方剂: ['B-方剂', 'I-方剂']\n",
      "  西医治疗: ['B-西医治疗', 'I-西医治疗']\n",
      "  西医诊断: ['B-西医诊断', 'I-西医诊断']\n"
     ]
    }
   ],
   "source": [
    "# 定义实体类别映射（将B-和I-标签合并为实体类别）\n",
    "id2label = {0: 'O', 1: 'B-中医治则', 2: 'I-中医治则', 3: 'B-中医治疗', 4: 'I-中医治疗', 5: 'B-中医证候', 6: 'I-中医证候', 7: 'B-中医诊断', 8: 'I-中医诊断', 9: 'B-中药', 10: 'I-中药', 11: 'B-临床表现', 12: 'I-临床表现', 13: 'B-其他治疗', 14: 'I-其他治疗', 15: 'B-方剂', 16: 'I-方剂', 17: 'B-西医治疗', 18: 'I-西医治疗', 19: 'B-西医诊断', 20: 'I-西医诊断'}\n",
    "def create_entity_categories_robust(id2label):\n",
    "    \"\"\"更健壮的实体类别映射生成函数\"\"\"\n",
    "    entity_categories = {}\n",
    "    \n",
    "    # 首先收集所有B-标签\n",
    "    b_labels = {}\n",
    "    for label_id, label_name in id2label.items():\n",
    "        if label_name.startswith('B-'):\n",
    "            entity_type = label_name[2:]\n",
    "            b_labels[entity_type] = label_name\n",
    "    \n",
    "    # 然后为每个B-标签查找对应的I-标签\n",
    "    for entity_type, b_label in b_labels.items():\n",
    "        i_label = f'I-{entity_type}'\n",
    "        \n",
    "        # 检查I-标签是否存在\n",
    "        i_labels_found = []\n",
    "        if i_label in id2label.values():\n",
    "            i_labels_found.append(i_label)\n",
    "        \n",
    "        entity_categories[entity_type] = [b_label] + i_labels_found\n",
    "    \n",
    "    return entity_categories"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "# step9 定义评估函数, 自定义评估指标\n",
    "def compute_metrics(eval_preds):\n",
    "    \"\"\"计算基于词的F1分数 - 适配您的标签体系\"\"\"\n",
    "    logits, labels = eval_preds\n",
    "    predictions = np.argmax(logits, axis=-1)\n",
    "    \n",
    "    # 移除padding和特殊token的标签\n",
    "    true_labels = []\n",
    "    true_predictions = []\n",
    "    \n",
    "    for i in range(len(labels)):\n",
    "        preds = []\n",
    "        trues = []\n",
    "        for j in range(len(labels[i])):\n",
    "            if labels[i][j] != -100:  # 忽略的标签\n",
    "                preds.append(id2label[predictions[i][j]])\n",
    "                trues.append(id2label[labels[i][j]])\n",
    "        \n",
    "        true_predictions.append(preds)\n",
    "        true_labels.append(trues)\n",
    "    \n",
    "    # 计算每个实体类别的F1分数\n",
    "    report = classification_report(true_labels, true_predictions, output_dict=True, zero_division=0)\n",
    "    \n",
    "    # 动态生成实体类别映射\n",
    "    entity_categories = create_entity_categories_robust(id2label)\n",
    "    \n",
    "    # 提取每个实体类别的F1分数\n",
    "    f1_scores = {}\n",
    "    for category, labels_in_category in entity_categories.items():\n",
    "        category_f1_scores = []\n",
    "        \n",
    "        for label in labels_in_category:\n",
    "            if label in report:\n",
    "                category_f1_scores.append(report[label]['f1-score'])\n",
    "        \n",
    "        if category_f1_scores:\n",
    "            # 取该类别的平均F1分数\n",
    "            f1_scores[category] = np.mean(category_f1_scores)\n",
    "        else:\n",
    "            f1_scores[category] = 0.0\n",
    "    \n",
    "    # 计算宏平均F1和微平均F1\n",
    "    macro_f1 = report['macro avg']['f1-score']\n",
    "    micro_f1 = f1_score(true_labels, true_predictions, zero_division=0)\n",
    "    \n",
    "    result = {\n",
    "        'macro_f1': macro_f1,\n",
    "        'micro_f1': micro_f1,\n",
    "        **f1_scores\n",
    "    }\n",
    "    \n",
    "    return result"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "# 数据收集器\n",
    "data_collator = DataCollatorForTokenClassification(\n",
    "  tokenizer=tokenizer,\n",
    "  padding=True\n",
    ")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "# step10 定义Trainer\n",
    "trainer = Trainer(\n",
    "  model=peft_model,\n",
    "  args=training_args,\n",
    "  train_dataset=train_dataloader,\n",
    "  data_collator=data_collator,\n",
    "  compute_metrics=compute_metrics\n",
    ")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "# step11 训练\n",
    "trainer.train()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "# step11 训练完成后，打印 f1 指标等信息\n",
    "def print_detailed_f1_scores(metrics):\n",
    "  \"\"\"打印每个实体类别的详细F1分数\"\"\"\n",
    "  print(\"\\n\" + \"=\"*60)\n",
    "  print(\"各实体类别F1分数详细报告\")\n",
    "  print(\"=\"*60)\n",
    "  \n",
    "  # 提取实体类别的F1分数（排除总体指标）\n",
    "  entity_scores = {}\n",
    "  for key, value in metrics.items():\n",
    "    if key not in ['eval_loss', 'eval_macro_f1', 'eval_micro_f1', 'eval_runtime', \n",
    "                  'eval_samples_per_second', 'eval_steps_per_second', 'epoch']:\n",
    "        entity_scores[key] = value\n",
    "  \n",
    "  # 按F1分数排序\n",
    "  sorted_scores = sorted(entity_scores.items(), key=lambda x: x[1], reverse=True)\n",
    "  \n",
    "  for entity, f1 in sorted_scores:\n",
    "    print(f\"{entity:>10}: {f1:.4f}\")\n",
    "  \n",
    "  print(\"-\"*60)\n",
    "  if 'eval_macro_f1' in metrics:\n",
    "    print(f\"{'宏平均F1':>10}: {metrics['eval_macro_f1']:.4f}\")\n",
    "  if 'eval_micro_f1' in metrics:\n",
    "    print(f\"{'微平均F1':>10}: {metrics['eval_micro_f1']:.4f}\")\n",
    "  print(\"=\"*60)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "# 训练完成后，在评估时打印详细结果\n",
    "final_metrics = trainer.evaluate()\n",
    "# 提取F1分数\n",
    "f1_scores = extract_f1_scores_from_metrics(final_metrics)\n",
    "print_detailed_f1_scores(f1_scores)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "# 1. 基础柱状图（matplotlib）\n",
    "import matplotlib.pyplot as plt\n",
    "\n",
    "# 设置中文字体（解决中文显示问题）\n",
    "plt.rcParams['font.sans-serif'] = ['SimHei', 'DejaVu Sans']  # 用来正常显示中文标签\n",
    "plt.rcParams['axes.unicode_minus'] = False  # 用来正常显示负号\n",
    "def plot_basic_bar(f1_scores):\n",
    "  \"\"\"绘制基础柱状图\"\"\"\n",
    "  categories = list(f1_scores.keys())\n",
    "  scores = list(f1_scores.values())\n",
    "  \n",
    "  plt.figure(figsize=(12, 8))\n",
    "  bars = plt.bar(categories, scores, color='skyblue', edgecolor='navy', alpha=0.7)\n",
    "  \n",
    "  # 添加数值标签\n",
    "  for bar, score in zip(bars, scores):\n",
    "      plt.text(bar.get_x() + bar.get_width()/2, bar.get_height() + 0.005, \n",
    "              f'{score:.3f}', ha='center', va='bottom', fontsize=10)\n",
    "  \n",
    "  plt.title('中医药命名实体识别 - 各实体类别F1分数', fontsize=16, fontweight='bold', pad=20)\n",
    "  plt.xlabel('实体类别', fontsize=12)\n",
    "  plt.ylabel('F1分数', fontsize=12)\n",
    "  plt.xticks(rotation=45, ha='right')\n",
    "  plt.ylim(0.5, 0.9)  # 设置y轴范围，突出差异\n",
    "  plt.grid(axis='y', alpha=0.3, linestyle='--')\n",
    "  plt.tight_layout()\n",
    "  plt.show()\n",
    "\n",
    "plot_basic_bar(f1_scores)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "# step12 保存模型\n",
    "peft_model.save_pretrained(\"./qwen-ner-model\")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "# step13 加载微调后的模型进行推理\n",
    "from transformers import pipeline\n",
    "ner_model = PeftModel.from_pretrained(model, \"./qwen-ner-model\")\n",
    "nlp_ner = pipeline(\"ner\", model=ner_model, tokenizer=tokenizer, aggregation_strategy=\"simple\")\n",
    "# 测试推理\n",
    "example_text = \"患者，男，45岁，主诉头痛两天。诊断为偏头痛，给予中医治疗，使用针灸和中药方剂。\"\n",
    "ner_results = nlp_ner(example_text)\n",
    "print(ner_results)"
   ]
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python (tuning_env)",
   "language": "python",
   "name": "tuning_env"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.10.18 | packaged by Anaconda, Inc. | (main, Jun  5 2025, 13:08:55) [MSC v.1929 64 bit (AMD64)]"
  },
  "orig_nbformat": 4,
  "vscode": {
   "interpreter": {
    "hash": "5b5cfca19401015264b4741f08a5ab630bb44e69b46d09de08cfb7d2d4656fc3"
   }
  }
 },
 "nbformat": 4,
 "nbformat_minor": 2
}
