{
 "cells": [
  {
   "cell_type": "markdown",
   "id": "87a1ebc0",
   "metadata": {},
   "source": [
    "# LORA "
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "a7f768a4",
   "metadata": {},
   "outputs": [],
   "source": [
    "from unsloth import FastLanguageModel\n",
    "import torch\n",
    "\n",
    "\n",
    "max_seq_length = 2048\n",
    "\n",
    "\n",
    "lora_rank =  32 \n",
    "\n",
    "\n",
    "model, tokenizer = FastLanguageModel.from_pretrained(\n",
    "    model_name=\"/mnt/modelscope/hub/models/Qwen/Qwen3-8B\",\n",
    "    max_seq_length=max_seq_length,\n",
    "    load_in_4bit=False,\n",
    "    fast_inference=False,\n",
    "    max_lora_rank=lora_rank,\n",
    "    gpu_memory_utilization=0.7,\n",
    ")\n",
    "\n",
    "\n",
    "model = FastLanguageModel.get_peft_model(\n",
    "    model,\n",
    "    r=lora_rank,\n",
    "    target_modules=[\n",
    "        \"q_proj\", \"k_proj\", \"v_proj\", \"o_proj\",  \n",
    "        \"gate_proj\", \"up_proj\", \"down_proj\",     \n",
    "    ],\n",
    "    lora_alpha=lora_rank * 2,\n",
    "    use_gradient_checkpointing=\"unsloth\",\n",
    "    random_state=3407,\n",
    ")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "0bed3f3a",
   "metadata": {},
   "outputs": [],
   "source": [
    "# 优化后的特殊标记定义 - 使用中文增强模型理解\n",
    "reasoning_start = \"<思考开始>\"\n",
    "reasoning_end   = \"<思考结束>\"\n",
    "solution_start  = \"<答案开始>\"\n",
    "solution_end    = \"<答案结束>\"\n",
    "\n",
    "system_prompt = f\"\"\"\n",
    "你是专业的人物经历分析师，请严格按以下步骤处理用户问题：\n",
    "1. 分析问题核心要素（人物、时间、事件类型）\n",
    "2. 进行推理时，将完整思考过程放在{reasoning_start}和{reasoning_end}之间\n",
    "3. 最后将简洁答案放在{solution_start}和{solution_end}之间\n",
    "\n",
    "特别注意：\n",
    "- 思考过程需包含：问题拆解、关键信息提取、逻辑推理步骤\n",
    "- 确保答案只包含事实信息，不添加主观评论\n",
    "- 思考过程不要重复相同的内容\n",
    "\"\"\"\n",
    "\n",
    "\n",
    "system_prompt\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "f6f9486a",
   "metadata": {},
   "outputs": [],
   "source": [
    "# 创建一个自定义的聊天模板（Chat Template）。\n",
    "\n",
    "chat_template = \\\n",
    "    \"{% if messages[0]['role'] == 'system' %}\"\\\n",
    "        \"{{ messages[0]['content'] + eos_token }}\"\\\n",
    "        \"{% set loop_messages = messages[1:] %}\"\\\n",
    "    \"{% else %}\"\\\n",
    "        \"{{ '{system_prompt}' + eos_token }}\"\\\n",
    "        \"{% set loop_messages = messages %}\"\\\n",
    "    \"{% endif %}\"\\\n",
    "    \"{% for message in loop_messages %}\"\\\n",
    "        \"{% if message['role'] == 'user' %}\"\\\n",
    "            \"{{ message['content'] }}\"\\\n",
    "        \"{% elif message['role'] == 'assistant' %}\"\\\n",
    "            \"{{ message['content'] + eos_token }}\"\\\n",
    "        \"{% endif %}\"\\\n",
    "    \"{% endfor %}\"\\\n",
    "    \"{% if add_generation_prompt %}{{ '{reasoning_start}' }}\"\\\n",
    "    \"{% endif %}\"\n",
    "\n",
    "\n",
    "chat_template = chat_template\\\n",
    "    .replace(\"'{system_prompt}'\",   f\"'{system_prompt}'\")\\\n",
    "    .replace(\"'{reasoning_start}'\", f\"'{reasoning_start}'\")\n",
    "\n",
    "tokenizer.chat_template = chat_template"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "3819ca29",
   "metadata": {},
   "outputs": [],
   "source": [
    "import pandas as pd\n",
    "import json\n",
    "\n",
    "\n",
    "with open('/home/raii/GPRO/thinking_sft_lora_data.json', 'r', encoding='utf-8') as file:\n",
    "    dataset = json.load(file)  \n",
    "\n",
    "dataset = pd.DataFrame(dataset)  \n",
    "\n",
    "required_columns = [\"instruction\", \"input\", \"output\"]\n",
    "available_columns = [col for col in required_columns if col in dataset.columns]\n",
    "dataset= dataset[available_columns]\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "9fd30237",
   "metadata": {},
   "outputs": [],
   "source": [
    "# 定义一个函数，用于将数据集中的每一行格式化为我们需要的对话格式。\n",
    "def format_dataset(x):\n",
    "    expected_answer = x[\"output\"]\n",
    "    problem = x[\"input\"] \n",
    "\n",
    "    return [\n",
    "        {\"role\" : \"system\",    \"content\" : system_prompt},#系统设定\n",
    "        {\"role\" : \"user\",      \"content\" : problem},#提问\n",
    "        {\"role\" : \"assistant\", \"content\" : expected_answer},#系统回答\n",
    "    ]\n",
    "import pandas as pd\n",
    "\n",
    "\n",
    "if hasattr(dataset, \"to_pandas\"): \n",
    "    dataset = dataset.to_pandas()\n",
    "elif isinstance(dataset, list):    \n",
    "    dataset = pd.DataFrame(dataset)\n",
    "\n",
    "dataset[\"Messages\"] = dataset.apply(format_dataset, axis=1)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "14e7d243",
   "metadata": {},
   "outputs": [],
   "source": [
    "dataset[\"N\"] = dataset[\"Messages\"].apply(lambda x: len(tokenizer.apply_chat_template(x)))\n",
    "dataset = dataset.loc[dataset[\"N\"] <= max_seq_length/2].copy()\n",
    "\n",
    "dataset.shape"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "05b52bce",
   "metadata": {},
   "outputs": [],
   "source": [
    "from datasets import Dataset\n",
    "\n",
    "dataset[\"text\"] = tokenizer.apply_chat_template(dataset[\"Messages\"].values.tolist(), tokenize = False)\n",
    "dataset.reset_index(drop=True, inplace=True)\n",
    "dataset = Dataset.from_pandas(dataset)\n",
    "\n",
    "dataset"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "e9c023bf",
   "metadata": {},
   "outputs": [],
   "source": [
    "# 直接在代码中配置 SwanLab（替换为你的 API Key）\n",
    "import swanlab\n",
    "swanlab.login(api_key=xxx, save=True)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "986a6778",
   "metadata": {},
   "outputs": [],
   "source": [
    "from trl import SFTTrainer, SFTConfig\n",
    "\n",
    "# 实例化SFTTrainer，用于进行有监督微调。\n",
    "trainer = SFTTrainer(\n",
    "    model = model, \n",
    "    tokenizer = tokenizer, \n",
    "    train_dataset = dataset, \n",
    "    args = SFTConfig( \n",
    "        dataset_text_field = \"text\",\n",
    "        per_device_train_batch_size = 1,\n",
    "        gradient_accumulation_steps = 1,\n",
    "        warmup_steps = 5,\n",
    "        num_train_epochs = 5,\n",
    "        learning_rate = 2e-4,\n",
    "        logging_steps = 5,\n",
    "        optim = \"adamw_8bit\",\n",
    "        weight_decay = 0.01,\n",
    "        lr_scheduler_type = \"linear\",\n",
    "        seed = 3407,\n",
    "        report_to = \"swanlab\",\n",
    "    ),\n",
    ")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "0ce6f169",
   "metadata": {},
   "outputs": [],
   "source": [
    "trainer.train()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "7ed38bbd",
   "metadata": {},
   "outputs": [],
   "source": [
    "def extract_all_answers(text):\n",
    "    start_marker = \"<答案开始>\"\n",
    "    end_marker = \"<答案结束>\"\n",
    "    answers = []\n",
    "    start_index = 0\n",
    "    \n",
    "    while True:\n",
    "        # 查找开始标记\n",
    "        start_index = text.find(start_marker, start_index)\n",
    "        if start_index == -1:\n",
    "            break\n",
    "        \n",
    "        start_index += len(start_marker)\n",
    "        # 查找结束标记\n",
    "        end_index = text.find(end_marker, start_index)\n",
    "        if end_index == -1:\n",
    "            break\n",
    "        \n",
    "        # 提取答案\n",
    "        answer = text[start_index:end_index].strip()\n",
    "        answers.append(answer)\n",
    "        \n",
    "        # 移动到结束标记之后继续搜索\n",
    "        start_index = end_index + len(end_marker)\n",
    "    \n",
    "    return answers"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "bdf55d4f",
   "metadata": {},
   "outputs": [],
   "source": [
    "# SFT训练后进行推理测试\n",
    "\n",
    "text = tokenizer.apply_chat_template(\n",
    "    dataset[12][\"Messages\"][:2],\n",
    "    tokenize = False,\n",
    "    add_generation_prompt = True, # 必须添加此项以触发模型的生成\n",
    ")\n",
    "dataset[12][\"Messages\"][:2]\n",
    "\n",
    "from transformers import TextStreamer\n",
    "import re\n",
    "# 使用model.generate进行推理\n",
    "answer = model.generate(\n",
    "    **tokenizer(text, return_tensors = \"pt\").to(\"cuda\"),\n",
    "    temperature = 0.5,\n",
    "    max_new_tokens = 4096,\n",
    "    streamer = TextStreamer(tokenizer, skip_prompt = False),\n",
    ")\n",
    "\n",
    "text = tokenizer.decode(answer[0])\n",
    "# 使用示例\n",
    "all_answers = extract_all_answers(text)\n",
    "\n",
    "for i, ans in enumerate(all_answers, 1):\n",
    "    if(i != 1):\n",
    "        print(f\"答案: {ans}\")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "7a2ad561",
   "metadata": {},
   "outputs": [],
   "source": [
    "\n",
    "del dataset\n",
    "torch.cuda.empty_cache()\n",
    "import gc\n",
    "gc.collect()"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "f4d9aaf7",
   "metadata": {},
   "source": [
    "# GPRO"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "842eacb9",
   "metadata": {},
   "outputs": [],
   "source": [
    "import pandas as pd\n",
    "import json\n",
    "\n",
    "# 加载JSON文件\n",
    "with open('/home/raii/GPRO/thinking_sft_lora_data.json', 'r', encoding='utf-8') as file:\n",
    "    dataset = json.load(file) \n",
    "\n",
    "dataset = pd.DataFrame(dataset) \n",
    "\n",
    "required_columns = [\"instruction\", \"input\", \"output\",\"answer\"]\n",
    "available_columns = [col for col in required_columns if col in dataset.columns]\n",
    "dataset= dataset[available_columns]\n",
    "\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "c8ef54cb",
   "metadata": {},
   "outputs": [],
   "source": [
    "import pandas as pd\n",
    "\n",
    "def format_dataset(x):\n",
    "    return [\n",
    "        {\"role\" : \"system\",    \"content\" : system_prompt},#系统设定\n",
    "        {\"role\" : \"user\",      \"content\" : x[\"input\"]},#提问\n",
    "    ]\n",
    "dataset[\"prompt\"] = dataset.apply(format_dataset, axis=1)\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "111e4305",
   "metadata": {},
   "outputs": [],
   "source": [
    "import numpy as np\n",
    "tokenized = dataset[\"prompt\"].apply(\n",
    "    lambda x: tokenizer.apply_chat_template(x, add_generation_prompt=True, tokenize=True)\n",
    ")\n",
    "lengths = [len(tokens) for tokens in tokenized]\n",
    "maximum_length = max(lengths)\n",
    "print(\"Max Length = \", maximum_length)\n",
    "del tokenized"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "20ab833b",
   "metadata": {},
   "outputs": [],
   "source": [
    "# 设置GRPO训练的参数\n",
    "\n",
    "# prompt的最大长度，留一个token的余量\n",
    "max_prompt_length = maximum_length + 1\n",
    "# completion（模型生成部分）的最大长度\n",
    "max_completion_length = max_seq_length - max_prompt_length\n",
    "\n",
    "from vllm import SamplingParams\n",
    "# 配置vLLM的采样参数，这些参数在GRPO训练中用于从模型生成多个不同的回答。\n",
    "vllm_sampling_params = SamplingParams(\n",
    "    min_p = 0.1, # 最小概率采样（Min-P），忽略概率低于此值的token\n",
    "    top_p = 1.0, # Top-P（核）采样，从累积概率超过p的最小token集合中采样\n",
    "    top_k = -1,  # Top-K采样，-1表示不启用\n",
    "    seed = 3407, # 采样种子，保证可复现性\n",
    "    stop = [tokenizer.eos_token], # 遇到EOS令牌时停止生成\n",
    "    include_stop_str_in_output = True, # 在输出中包含停止符\n",
    ")\n",
    "\n",
    "from trl import GRPOConfig, GRPOTrainer\n",
    "# 配置GRPO训练参数\n",
    "training_args = GRPOConfig(\n",
    "    vllm_sampling_params = vllm_sampling_params, # 传入vLLM采样参数\n",
    "    temperature = 1.0, # 生成时的温度，值越高，随机性越强\n",
    "    learning_rate = 5e-6, # GRPO的学习率通常比SFT小\n",
    "    weight_decay = 0.01,\n",
    "    warmup_ratio = 0.1, # 预热步数占总步数的比例\n",
    "    lr_scheduler_type = \"linear\",\n",
    "    optim = \"adamw_8bit\",\n",
    "    logging_steps = 1, # 每一步都记录日志，便于观察\n",
    "    per_device_train_batch_size = 2,#1 # GRPO中，这个值通常会被num_generations覆盖\n",
    "    gradient_accumulation_steps = 1, #1 # 梯度累积，可以增加到4以获得更平滑的训练\n",
    "    num_generations = 4, # 每个prompt生成4个不同的回答进行评估，如果显存不足可以减小此值\n",
    "    max_prompt_length = max_prompt_length, # prompt最大长度\n",
    "    max_completion_length = max_completion_length, # 生成内容最大长度\n",
    "    # num_train_epochs = 1, # 训练总轮数\n",
    "    max_steps = 20, # 无论数据量多少，训练在100次参数更新后结束。\n",
    "    save_steps = 100, # 每100步保存一次模型\n",
    "    report_to = \"swanlab\", # 日志报告平台\n",
    "    output_dir = \"outputs\", # 模型和日志输出目录\n",
    ")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "e25f5f5b",
   "metadata": {},
   "outputs": [],
   "source": [
    "#token_F1奖励函数\n",
    "def f1_token_reward(completions, answer,**kwargs):\n",
    "    scores = []\n",
    "    responses = [completion[0][\"content\"] for completion in completions]\n",
    "    extracted_responses = []\n",
    "    # 定义关键标记\n",
    "    start_marker = \"<答案开始>\"\n",
    "    end_marker = \"<答案结束>\"\n",
    "    \n",
    "    for response in responses:\n",
    "        # 从回复中提取答案文本\n",
    "        start_idx = response.find(start_marker)\n",
    "        if start_idx == -1:\n",
    "            extracted_responses.append(\"\")\n",
    "        else:\n",
    "            start_idx += len(start_marker)\n",
    "            end_idx = response.find(end_marker, start_idx)\n",
    "            extracted_responses.append(response[start_idx:end_idx] if end_idx != -1 else response[start_idx:])  \n",
    "        \n",
    "        # 计算token级F1分数\n",
    "    for extracted_answer, ground_truth in zip(extracted_responses, answer):\n",
    "        f1 = calculate_token_f1(ground_truth, extracted_answer)\n",
    "        s = match_scores(f1)\n",
    "        scores.append(s)\n",
    "    \n",
    "    return scores\n",
    "scores = []\n",
    "def match_scores(f1):\n",
    "    score = 0\n",
    "\n",
    "    if f1 >= 0.8:   # 高相似度（对应原函数±10%数值匹配）\n",
    "        score += 2.0\n",
    "    elif f1 >= 0.7:   # 中等相似度（对应原函数±20%数值匹配）\n",
    "        score += 1.5\n",
    "    elif f1 >= 0.6:   # 基础匹配\n",
    "        score += 0.5\n",
    "    elif f1 >= 0.5:   # 弱惩罚\n",
    "        score -= 1.0\n",
    "    elif f1 >= 0.4:   # 中等惩罚\n",
    "        score -= 3.0\n",
    "    else:             # 严重不匹配（对应原函数转换失败惩罚）\n",
    "        score -= 5.0\n",
    "    return score\n",
    "    \n",
    "def calculate_token_f1(true_answer, pred_answer):\n",
    "    \"\"\"\n",
    "    计算两个文本的token级F1分数\n",
    "    \"\"\"\n",
    "    # 分词函数（按空白字符分割，忽略大小写）\n",
    "    tokenize = lambda s: [t.strip().lower() for t in s.split() if t.strip()]\n",
    "    \n",
    "    # 获取token列表\n",
    "    true_tokens = tokenizer(true_answer)[\"input_ids\"]\n",
    "    pred_tokens = tokenizer(pred_answer)[\"input_ids\"]\n",
    "    \n",
    "    # 处理空答案情况\n",
    "    if len(true_tokens) == 0 or len(pred_tokens) == 0:\n",
    "        return 1.0 if (len(true_tokens) == 0 and len(pred_tokens) == 0) else 0.0\n",
    "    \n",
    "    # 构建词频统计\n",
    "    true_counts = {}\n",
    "    pred_counts = {}\n",
    "    \n",
    "    for token in true_tokens:\n",
    "        true_counts[token] = true_counts.get(token, 0) + 1\n",
    "    \n",
    "    for token in pred_tokens:\n",
    "        pred_counts[token] = pred_counts.get(token, 0) + 1\n",
    "    \n",
    "    # 计算TP（True Positive）\n",
    "    tp = 0\n",
    "    for token, count in true_counts.items():\n",
    "        tp += min(count, pred_counts.get(token, 0))\n",
    "    \n",
    "    # 计算FP和FN\n",
    "    fp = len(pred_tokens) - tp\n",
    "    fn = len(true_tokens) - tp\n",
    "    \n",
    "    # 计算Precision和Recall\n",
    "    precision = tp / (tp + fp) if (tp + fp) > 0 else 0\n",
    "    recall = tp / (tp + fn) if (tp + fn) > 0 else 0\n",
    "    \n",
    "    # 计算F1\n",
    "    if precision + recall == 0:\n",
    "        return 0.0\n",
    "    return 2 * (precision * recall) / (precision + recall)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "e1cfe544",
   "metadata": {},
   "outputs": [],
   "source": [
    "def match_format_reward(completions, **kwargs):\n",
    "    scores = []\n",
    "    for completion in completions:\n",
    "        score = 0\n",
    "        response = completion[0][\"content\"]\n",
    "        score += 0.5 if response.count(reasoning_start)  == 1 else -1.0\n",
    "        score += 0.5 if response.count(reasoning_end)  == 1 else -1.0\n",
    "        score += 0.5 if response.count(solution_start)  == 1 else -1.0\n",
    "        score += 0.5 if response.count(solution_end)    == 1 else -1.0\n",
    "        scores.append(score)\n",
    "    return scores"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "00773993",
   "metadata": {},
   "outputs": [],
   "source": [
    "from datasets import Dataset\n",
    "import pandas as pd\n",
    "\n",
    "# 转换为 datasets.Dataset\n",
    "dataset = Dataset.from_pandas(dataset)\n",
    "\n",
    "print(type(dataset))  # <class 'datasets.arrow_dataset.Dataset'>\n",
    "print(dataset)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "a6fa91e6",
   "metadata": {},
   "outputs": [],
   "source": [
    "# 实例化GRPOTrainer\n",
    "trainer = GRPOTrainer(\n",
    "    model = model, \n",
    "    processing_class = tokenizer, \n",
    "    reward_funcs = [ # 传入我们定义的所有奖励函数\n",
    "        match_format_reward,\n",
    "        f1_token_reward,\n",
    "    ],\n",
    "    args = training_args, \n",
    "    train_dataset = dataset, \n",
    ")\n",
    "\n",
    "\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "67f0ba1b",
   "metadata": {},
   "outputs": [],
   "source": [
    "trainer.train()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "648b29c0",
   "metadata": {},
   "outputs": [],
   "source": [
    "# text = [{'content': '\\n你是专业的人物经历分析师，请严格按以下步骤处理用户问题：\\n1. 分析问题核心要素（人物、时间、事件类型）\\n2. 进行推理时，将完整思考过程放在<思考开始>和<思考结束>之间\\n3. 最后将简洁答案放在<答案开始>和<答案结束>之间\\n\\n特别注意：\\n- 思考过程需包含：问题拆解、关键信息提取、逻辑推理步骤\\n- 确保答案只包含事实信息，不添加主观评论\\n- 思考过程不要重复相同的内容\\n',\n",
    "#   'role': 'system'},\n",
    "#  {'content': '于小戈在创业过程中推出了哪些应用和小程序？请分别说出名称和上线时间。', 'role': 'user'}]\n",
    "text =  [{'content': '于小戈是谁', 'role': 'user'}]\n",
    "#于小戈的互联网产品有哪些？请说明它们的名称和上线时间。\n",
    "text = tokenizer.apply_chat_template(\n",
    "    text,\n",
    "    tokenize = False,\n",
    "    add_generation_prompt = True, # 必须添加此项以触发模型的生成\n",
    ")\n",
    "from transformers import TextStreamer\n",
    "import re\n",
    "# 使用model.generate进行推理\n",
    "answer = model.generate(\n",
    "    # 将文本输入转换为PyTorch张量，并移动到CUDA设备上。\n",
    "    **tokenizer(text, return_tensors = \"pt\").to(\"cuda\"),\n",
    "    # temperature=0 表示使用贪心解码，选择概率最高的token，生成结果更具确定性。\n",
    "    temperature = 0.5,\n",
    "    # 设置生成的最大新token数量。\n",
    "    max_new_tokens = 4096,\n",
    "    #使用TextStreamer可以流式输出结果\n",
    "    streamer = TextStreamer(tokenizer, skip_prompt = False),\n",
    ")\n",
    "\n",
    "text = tokenizer.decode(answer[0])\n",
    "# 使用示例\n",
    "all_answers = extract_all_answers(text)\n",
    "\n",
    "for i, ans in enumerate(all_answers, 1):\n",
    "    if(i != 1):\n",
    "        print(f\"答案: {ans}\")"
   ]
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "G",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.11.13"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 5
}
