{
 "cells": [
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## 下载数据集"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "import subprocess\n",
    "import os\n",
    "\n",
    "result = subprocess.run('bash -c \"source /etc/network_turbo && env | grep proxy\"', shell=True, capture_output=True, text=True)\n",
    "output = result.stdout\n",
    "for line in output.splitlines():\n",
    "    if '=' in line:\n",
    "        var, value = line.split('=', 1)\n",
    "        os.environ[var] = value\n",
    "\n",
    "from huggingface_hub import notebook_login\n",
    "\n",
    "notebook_login()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "from datasets import load_dataset\n",
    "\n",
    "\n",
    "ds_reason = load_dataset(\"Ronndy/medical_o1_sft_Chinese\",cache_dir='./data/reason')\n",
    "ds_no_reason = load_dataset(\"BAAI/IndustryInstruction_Health-Medicine\",cache_dir = './data/no_reason')\n"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## 处理数据"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "### 1. 推理数据集处理"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "import pandas as pd\n",
    "from datasets import load_dataset\n",
    "import random\n",
    "\n",
    "\n",
    "# 随机选择4500条数据\n",
    "random.seed(42)  # 固定随机种子\n",
    "selected_indices_reason = random.sample(range(len(ds_reason['train'])), 4500)\n",
    "selected_samples_reason = ds_reason['train'].select(selected_indices_reason)\n",
    "\n",
    "# 准备提取问题、COT和回答\n",
    "reason_data = []\n",
    "for sample in selected_samples_reason:\n",
    "    messages = sample['messages']\n",
    "    \n",
    "    # 初始化变量\n",
    "    question_reason = \"\"\n",
    "    cot_reason = \"\"\n",
    "    answer_reason = \"\"\n",
    "    \n",
    "    # 提取用户问题(最后一个user消息)\n",
    "    for msg in reversed(messages):  # 倒序查找确保获得最后一个user问题\n",
    "        if msg['role'] == 'user':\n",
    "            question_reason = msg['content']\n",
    "            break\n",
    "    \n",
    "    # 提取assistant的COT和回答\n",
    "    for msg in messages:\n",
    "        if msg['role'] == 'assistant':\n",
    "            content = msg['content']\n",
    "            # 提取COT部分\n",
    "            cot_start = content.find('<think>') + len('<think>')\n",
    "            cot_end = content.find('</think>')\n",
    "            cot_reason = content[cot_start:cot_end].strip() if cot_start != -1 and cot_end != -1 else ''\n",
    "            \n",
    "            # 提取回答部分\n",
    "            resp_start = content.find('<response>') + len('<response>')\n",
    "            resp_end = content.find('</response>')\n",
    "            answer_reason = content[resp_start:resp_end].strip() if resp_start != -1 and resp_end != -1 else ''\n",
    "            break\n",
    "    \n",
    "    reason_data.append({\n",
    "        'question_reason': question_reason,\n",
    "        'cot_reason': cot_reason,\n",
    "        'answer_reason': answer_reason\n",
    "    })\n",
    "\n",
    "# 创建DataFrame\n",
    "df_reason = pd.DataFrame(reason_data)\n",
    "\n",
    "# 检查结果\n",
    "print(f\"推理数据集样本数: {len(df_reason)}\")\n",
    "print(df_reason.head(3))\n",
    "\n",
    "# 保存到CSV（UTF-8-BOM编码避免中文乱码）\n",
    "df_reason.to_csv('medical_reason_data.csv', index=False, encoding='utf-8-sig')"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "### 2. 非推理数据集处理"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "from datasets import load_dataset, Dataset\n",
    "from unsloth.chat_templates import standardize_sharegpt\n",
    "import json\n",
    "import random\n",
    "\n",
    "\n",
    "\n",
    "# 2. 随机选择1500条非推理数据\n",
    "random.seed(42)\n",
    "selected_indices_no_reason = random.sample(range(len(ds_no_reason['train'])), 1500)\n",
    "selected_samples_no_reason = ds_no_reason['train'].select(selected_indices_no_reason)\n",
    "\n",
    "# 3. 准备数据并转换为Dataset格式\n",
    "data_for_standardization_no_reason = []\n",
    "for sample_no_reason in selected_samples_no_reason:\n",
    "    data_for_standardization_no_reason.append({\n",
    "        \"conversations\": sample_no_reason[\"conversations\"]\n",
    "    })\n",
    "\n",
    "# 转换为Hugging Face Dataset\n",
    "dataset_no_reason = Dataset.from_list(data_for_standardization_no_reason)\n",
    "\n",
    "# 4. 标准化处理\n",
    "standardized_dataset_no_reason = standardize_sharegpt(dataset_no_reason)\n",
    "\n",
    "# 5. 转换为列表并保存JSON\n",
    "standardized_list_no_reason = standardized_dataset_no_reason.to_list()\n",
    "output_path_no_reason = \"standardized_no_reason_data.json\"\n",
    "with open(output_path_no_reason, 'w', encoding='utf-8') as f_no_reason:\n",
    "    json.dump(standardized_list_no_reason, f_no_reason, ensure_ascii=False, indent=2)\n",
    "\n",
    "print(f\"非推理数据处理完成，已保存到 {output_path_no_reason}\")\n",
    "print(f\"总样本数: {len(standardized_list_no_reason)}\")\n",
    "print(\"示例第一条非推理数据:\")\n",
    "print(json.dumps(standardized_list_no_reason[0], ensure_ascii=False, indent=2))"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## 合并数据"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "import json\n",
    "import pandas as pd\n",
    "import random\n",
    "from collections import defaultdict\n",
    "\n",
    "# 1. 加载推理数据\n",
    "df_reason = pd.read_csv('medical_reason_data.csv')\n",
    "reason_data = [{\n",
    "    'question': row['question_reason'],\n",
    "    'cot': row['cot_reason'],\n",
    "    'answer': row['answer_reason'],\n",
    "    'type': 'reason',\n",
    "    'is_multi_turn': False\n",
    "} for _, row in df_reason.iterrows()]\n",
    "\n",
    "# 2. 加载非推理数据\n",
    "with open('standardized_no_reason_data.json', 'r', encoding='utf-8') as f:\n",
    "    no_reason_data = []\n",
    "    for dialog in json.load(f):\n",
    "        # 兼容两种可能的格式\n",
    "        if 'conversations' in dialog:\n",
    "            convs = dialog['conversations']\n",
    "            # 检查消息格式\n",
    "            if len(convs) > 0 and isinstance(convs[0], dict):\n",
    "                if 'from' in convs[0]:  # 标准格式\n",
    "                    no_reason_data.append({\n",
    "                        'conversations': convs,\n",
    "                        'type': 'no_reason',\n",
    "                        'is_multi_turn': True\n",
    "                    })\n",
    "                elif 'role' in convs[0]:  # 可能的替代格式\n",
    "                    no_reason_data.append({\n",
    "                        'conversations': [{'from': msg['role'], 'value': msg['content']} for msg in convs],\n",
    "                        'type': 'no_reason',\n",
    "                        'is_multi_turn': True\n",
    "                    })\n",
    "\n",
    "# 3. 合并并打乱原始数据\n",
    "combined = reason_data + no_reason_data\n",
    "random.shuffle(combined)\n",
    "\n",
    "# 4. 处理多轮对话拆分\n",
    "final_data = []\n",
    "dialog_id = 0\n",
    "\n",
    "for item in combined:\n",
    "    if not item['is_multi_turn']:\n",
    "        final_data.append({\n",
    "            'question': item['question'],\n",
    "            'cot': item['cot'],\n",
    "            'answer': item['answer'],\n",
    "            'type': item['type'],\n",
    "            'dialog_id': None\n",
    "        })\n",
    "    else:\n",
    "        conversations = item['conversations']\n",
    "        human_msgs = []\n",
    "        gpt_msgs = []\n",
    "        \n",
    "        for msg in conversations:\n",
    "            # 兼容不同字段名\n",
    "            speaker = msg.get('from') or msg.get('role')  # 尝试两种可能的键\n",
    "            content = msg.get('value') or msg.get('content')  # 尝试两种可能的键\n",
    "            \n",
    "            if speaker and content:\n",
    "                if speaker.lower() in ['human', 'user']:\n",
    "                    human_msgs.append(content.replace('问：', '').strip())\n",
    "                elif speaker.lower() in ['gpt', 'assistant']:\n",
    "                    gpt_msgs.append(content.replace('答：', '').strip())\n",
    "        \n",
    "        min_len = min(len(human_msgs), len(gpt_msgs))\n",
    "        for i in range(min_len):\n",
    "            final_data.append({\n",
    "                'question': human_msgs[i],\n",
    "                'cot': None,\n",
    "                'answer': gpt_msgs[i],\n",
    "                'type': item['type'],\n",
    "                'dialog_id': dialog_id\n",
    "            })\n",
    "        dialog_id += 1\n",
    "\n",
    "# 5. 保存最终JSON文件\n",
    "output_path = 'combined_medical_data.json'\n",
    "with open(output_path, 'w', encoding='utf-8') as f:\n",
    "    json.dump(final_data, f, ensure_ascii=False, indent=2)\n",
    "\n",
    "print(f\"数据处理完成，已保存到 {output_path}\")\n",
    "print(f\"总数据量: {len(final_data)}\")\n",
    "print(f\"其中推理数据: {len(reason_data)}\")\n",
    "print(f\"非推理对话组数: {dialog_id}\")\n",
    "print(\"\\n示例数据:\")\n",
    "print(json.dumps(final_data[:3], ensure_ascii=False, indent=2))"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## 测试集处理"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "import json\n",
    "import random\n",
    "from datasets import load_dataset\n",
    "\n",
    "# 设置随机种子保证可复现\n",
    "random.seed(42)\n",
    "\n",
    "def process_reason_test(ds_reason_test, sample_size=450):\n",
    "    \"\"\"处理推理数据测试集\"\"\"\n",
    "    # 随机抽样指定数量\n",
    "    total_samples = len(ds_reason_test)\n",
    "    selected_indices = random.sample(range(total_samples), min(sample_size, total_samples))\n",
    "    selected_samples = [ds_reason_test[i] for i in selected_indices]\n",
    "    \n",
    "    reason_test_data = []\n",
    "    for sample in selected_samples:\n",
    "        messages = sample['messages']\n",
    "        \n",
    "        # 提取最后一个用户问题\n",
    "        question = next((msg['content'] for msg in reversed(messages) if msg['role'] == 'user'), \"\")\n",
    "        \n",
    "        # 提取assistant的COT和回答\n",
    "        assistant_msg = next((msg['content'] for msg in messages if msg['role'] == 'assistant'), \"\")\n",
    "        cot_start = assistant_msg.find('<think>') + len('<think>')\n",
    "        cot_end = assistant_msg.find('</think>')\n",
    "        cot = assistant_msg[cot_start:cot_end].strip() if cot_start != -1 and cot_end != -1 else ''\n",
    "        \n",
    "        resp_start = assistant_msg.find('<response>') + len('<response>')\n",
    "        resp_end = assistant_msg.find('</response>')\n",
    "        answer = assistant_msg[resp_start:resp_end].strip() if resp_start != -1 and resp_end != -1 else ''\n",
    "        \n",
    "        reason_test_data.append({\n",
    "            'question': question,\n",
    "            'cot': cot,\n",
    "            'answer': answer,\n",
    "            'type': 'reason',\n",
    "            'dialog_id': None\n",
    "        })\n",
    "    return reason_test_data\n",
    "\n",
    "def process_no_reason_test(ds_no_reason_test, sample_size=50):\n",
    "    \"\"\"处理非推理数据测试集\"\"\"\n",
    "    # 随机抽样指定数量\n",
    "    total_samples = len(ds_no_reason_test)\n",
    "    selected_indices = random.sample(range(total_samples), min(sample_size, total_samples))\n",
    "    selected_samples = [ds_no_reason_test[i] for i in selected_indices]\n",
    "    \n",
    "    no_reason_test_data = []\n",
    "    dialog_id = 0\n",
    "    \n",
    "    for sample in selected_samples:\n",
    "        conversations = sample['conversations']\n",
    "        human_msgs = []\n",
    "        gpt_msgs = []\n",
    "        \n",
    "        for msg in conversations:\n",
    "            # 兼容不同字段名\n",
    "            speaker = msg.get('from') or msg.get('role')\n",
    "            content = msg.get('value') or msg.get('content')\n",
    "            \n",
    "            if speaker and content:\n",
    "                if speaker.lower() in ['human', 'user']:\n",
    "                    human_msgs.append(content.replace('问：', '').strip())\n",
    "                elif speaker.lower() in ['gpt', 'assistant']:\n",
    "                    gpt_msgs.append(content.replace('答：', '').strip())\n",
    "        \n",
    "        # 确保问题和回答配对\n",
    "        min_len = min(len(human_msgs), len(gpt_msgs))\n",
    "        for i in range(min_len):\n",
    "            no_reason_test_data.append({\n",
    "                'question': human_msgs[i],\n",
    "                'cot': None,\n",
    "                'answer': gpt_msgs[i],\n",
    "                'type': 'no_reason',\n",
    "                'dialog_id': f\"test_{dialog_id}\"\n",
    "            })\n",
    "        dialog_id += 1\n",
    "    \n",
    "    return no_reason_test_data\n",
    "\n",
    "# 处理测试集（推理数据450条，非推理数据50条）\n",
    "reason_test_processed = process_reason_test(ds_reason['test'], sample_size=450)\n",
    "no_reason_test_processed = process_no_reason_test(ds_no_reason['test'], sample_size=50)\n",
    "\n",
    "# 合并并打乱测试集数据\n",
    "combined_test = reason_test_processed + no_reason_test_processed\n",
    "random.shuffle(combined_test)\n",
    "\n",
    "# 保存测试集结果\n",
    "test_output_path = 'combined_medical_test.json'\n",
    "with open(test_output_path, 'w', encoding='utf-8') as f:\n",
    "    json.dump(combined_test, f, ensure_ascii=False, indent=2)\n",
    "\n",
    "print(f\"测试集处理完成，已保存到 {test_output_path}\")\n",
    "print(f\"测试集总量: {len(combined_test)}\")\n",
    "print(f\"其中推理数据: {len(reason_test_processed)} (抽样450条)\")\n",
    "print(f\"非推理数据: {len(no_reason_test_processed)} (从50组对话拆分得到)\")\n",
    "print(\"\\n测试集示例数据:\")\n",
    "print(json.dumps(combined_test[0], ensure_ascii=False, indent=2))"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## 微调"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "deepspeed --include 'localhost:0,1,2' train.py"
   ]
  }
 ],
 "metadata": {
  "language_info": {
   "name": "python"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 2
}
