import subprocess
import os

result = subprocess.run('bash -c "source /etc/network_turbo && env | grep proxy"', shell=True, capture_output=True, text=True)
output = result.stdout
for line in output.splitlines():
    if '=' in line:
        var, value = line.split('=', 1)
        os.environ[var] = value

from unsloth import FastLanguageModel,is_bfloat16_supported
from datasets import load_dataset
from trl import SFTTrainer
from transformers import TrainingArguments
import torch

#加载模型
max_seq_length = 512
model,tokenizer = FastLanguageModel.from_pretrained(
    model_name = "/root/lanyun-tmp/models/DeepSeek-R1-Distill-Qwen-1.5B",
    max_seq_length = max_seq_length,
    dtype = torch.bfloat16 if is_bfloat16_supported() else torch.float16,
    load_in_4bit = True,
)

prompt_style = """以下是描述任务的说明，并附有提供进一步背景的输入。
请写一个恰当的回应，完成该任务。
在回答之前，请仔细思考问题，并创建一个逐步的思维链，以确保逻辑清晰且准确。

### 任务描述：
您是一位在临床推理、诊断和治疗计划方面具有高级知识的医学专家。
请回答以下医学问题。

### 问题：
{}

### 回答：
<think>
{}
</think>
{}"""

EOS_TOKEN = tokenizer.eos_token
def formatting_prompts_func(examples):
    inputs = examples["Question"]
    cots = examples["Complex_CoT"]
    outputs = examples["Response"]
    texts = []
    for input, cot, output in zip(inputs, cots, outputs):
        text = prompt_style.format(input, cot, output) + EOS_TOKEN
        texts.append(text)
    return {
        "text": texts,
    }

#加载数据集
dataset = load_dataset("/root/lanyun-tmp/datasets/medical-o1-reasoning-SFT","zh",split="train[0:2000]")
dataset = dataset.map(formatting_prompts_func, batched=True)

#设置LoRA训练参数
model = FastLanguageModel.get_peft_model(
    model,
    r=16,
    target_modules=[
        "q_proj",
        "k_proj",
        "v_proj",
        "o_proj",
        "gate_proj",
        "up_proj",
        "down_proj",
    ],
    lora_alpha=32,
    lora_dropout=0,
    bias="none",
    use_gradient_checkpointing="unsloth",  # True or "unsloth" # 启用动态梯度检查点
    random_state=3407,#固定随机种子
    use_rslora=False,#是否启用 RS-LoRA
    loftq_config=None, #QLoRA配置
)

#配置训练参数
train_args = TrainingArguments(
    output_dir="/root/lanyun-tmp/models/output_2",
    overwrite_output_dir=True,
    num_train_epochs=40,  # 进一步减少轮数
    per_device_train_batch_size=10,
    save_steps=100,
    save_total_limit=20,
    prediction_loss_only=True,
    save_strategy="epoch",
    #精度设置
    fp16=not torch.cuda.is_bf16_supported(),
    bf16=torch.cuda.is_bf16_supported(),
    gradient_accumulation_steps=16,
    seed=3407,
)

trainer = SFTTrainer(
    model=model,
    tokenizer=tokenizer,
    train_dataset=dataset,
    dataset_text_field="text",
    args=train_args,
    max_seq_length=max_seq_length,
    dataset_num_proc=8
)
print("开始训练微调模型")
trainer_state = trainer.train()
print("模型训练完成")
model.save_pretrained_merged("/root/lanyun-tmp/models/new_model_2",tokenizer,save_method='merged_16bit')
print("模型保存完成")
