# -*- coding: utf-8 -*-
import torch
import json
import os
from pathlib import Path
from tqdm import tqdm  # 新增进度条库
from torch.utils.data import Dataset, DataLoader
from transformers import (
    AutoTokenizer,
    AutoModelForCausalLM,
    BitsAndBytesConfig
)
from accelerate import Accelerator
from transformers import Adafactor

# ==============================
# 配置参数（可根据硬件调整）
# ==============================
CONFIG = {
    "student_model": "Qwen/Qwen2.5-Math-0.5B",
    "teacher_data": "./teacher_data.jsonl",
    "max_length": 128,
    "gradient_accumulation": 8,
    "learning_rate": 1e-6,
    "num_epochs": 1,
    "freeze_ratio": 0.95,
    "quant_4bit": True,
    "output_dir": "./distilled_model"  # 新增输出目录配置
}

# ==============================
# 1. 数据加载（内存优化版）
# ==============================
class StreamDataset(Dataset):
    def __init__(self, file_path):
        self.file_path = Path(file_path)
        self.total_samples = self._count_lines()
    
    def _count_lines(self):
        with open(self.file_path, 'r', encoding="utf-8") as f:
            return sum(1 for _ in f)
    
    def __len__(self):
        return self.total_samples
    
    def __getitem__(self, idx):
        with open(self.file_path, 'r', encoding="utf-8") as f:
            for i, line in enumerate(f):
                if i == idx:
                    return json.loads(line)
        raise IndexError

# ==============================
# 2. 模型初始化（内存优化）
# ==============================
def load_model():
    bnb_config = BitsAndBytesConfig(
        load_in_4bit=CONFIG["quant_4bit"],
        bnb_4bit_quant_type="nf4",
        bnb_4bit_use_double_quant=True,
    ) if CONFIG["quant_4bit"] else None

    local_model_path = "./models/Qwen2.5-Math-1.5B" 

    model = AutoModelForCausalLM.from_pretrained(
        local_model_path,
        load_in_8bit=False,
        load_in_4bit=False,
        device_map="cpu",
        torch_dtype=torch.float32,
        offload_folder="offload"
    )
    
    total_layers = len(model.model.layers)
    freeze_layers = int(total_layers * CONFIG["freeze_ratio"])
    for param in model.model.layers[:freeze_layers].parameters():
        param.requires_grad = False
    
    return model

# ==============================
# 3. 训练流程（含进度显示和模型保存）
# ==============================
def main():
    # 初始化加速器
    accelerator = Accelerator(cpu=True)
    
    # 加载组件
    model = load_model()
    tokenizer = AutoTokenizer.from_pretrained("./models/Qwen2.5-Math-1.5B")
    dataset = StreamDataset(CONFIG["teacher_data"])
    train_loader = DataLoader(dataset, batch_size=1, shuffle=True)

    # 优化器
    optimizer = Adafactor(
        filter(lambda p: p.requires_grad, model.parameters()),
        lr=CONFIG["learning_rate"],
        scale_parameter=False,
        relative_step=False
    )

    # 准备组件
    model, optimizer, train_loader = accelerator.prepare(
        model, optimizer, train_loader
    )

    # 训练循环（添加进度条）
    model.train()
    for epoch in range(CONFIG["num_epochs"]):
        progress_bar = tqdm(
            train_loader,
            desc=f"Epoch {epoch+1}/{CONFIG['num_epochs']}",
            dynamic_ncols=True
        )
        
        total_loss = 0
        for step, batch in enumerate(progress_bar):
            with accelerator.accumulate(model):
                # 数据预处理
                inputs = tokenizer(
                    batch["question"],
                    max_length=CONFIG["max_length"],
                    padding="max_length",
                    truncation=True,
                    return_tensors="pt"
                )
                labels = tokenizer(
                    batch["answer"],
                    max_length=CONFIG["max_length"],
                    padding="max_length",
                    truncation=True,
                    return_tensors="pt"
                )["input_ids"]

                # 前向传播
                with torch.autocast(device_type="cpu", dtype=torch.bfloat16):
                    outputs = model(**inputs, labels=labels)
                    loss = outputs.loss

                # 反向传播
                accelerator.backward(loss)
                if accelerator.sync_gradients:
                    accelerator.clip_grad_norm_(model.parameters(), 1.0)
                optimizer.step()
                optimizer.zero_grad()

                # 更新进度条
                total_loss += loss.item()
                progress_bar.set_postfix({
                    "loss": f"{loss.item():.4f}",
                    "avg_loss": f"{total_loss/(step+1):.4f}"
                })

            # 演示模式提前停止
            if step >= 100:
                break

    # 保存完整模型（包含配置和分词器）
    output_dir = CONFIG["output_dir"]
    os.makedirs(output_dir, exist_ok=True)
    
    # 保存模型
    accelerator.wait_for_everyone()
    unwrapped_model = accelerator.unwrap_model(model)
    unwrapped_model.save_pretrained(
        output_dir,
        is_main_process=accelerator.is_main_process,
        save_function=accelerator.save,
        max_shard_size="200MB"
    )
    
    # 保存分词器
    tokenizer.save_pretrained(output_dir)
    
    print(f"\n✅ 模型蒸馏完成，已保存至：{output_dir}")

if __name__ == "__main__":
    main()