import asyncio
import uuid
import time
from fastapi import FastAPI, BackgroundTasks, HTTPException, status
from pydantic import BaseModel
from typing import List, Dict, Optional
import aiojobs
import torch
from transformers import AutoModelForCausalLM, AutoTokenizer, TrainingArguments, Trainer
from datasets import Dataset
import json
import os
from datetime import datetime
import logging

# 配置日志
logging.basicConfig(
    level=logging.INFO,
    format="%(asctime)s - %(name)s - %(levelname)s - %(message)s",
    handlers=[
        logging.FileHandler("finetune_service.log"),
        logging.StreamHandler()
    ]
)
logger = logging.getLogger("finetune_service")

app = FastAPI(title="大模型微调服务", description="支持高并发的模型微调HTTP接口")

# 任务状态存储
task_store: Dict[str, Dict] = {}
# 微调数据存储目录
DATA_DIR = "finetune_data"
os.makedirs(DATA_DIR, exist_ok=True)

# 任务状态枚举
class TaskStatus:
    PENDING = "pending"
    PROCESSING = "processing"
    COMPLETED = "completed"
    FAILED = "failed"

# 请求模型
class FinetuneRequest(BaseModel):
    model_name: str
    data: List[Dict[str, str]]  # 微调数据，格式如[{"prompt": "...", "response": "..."}]
    epochs: int = 3
    batch_size: int = 2
    learning_rate: float = 2e-5
    output_dir: str = "finetuned_models"
    metadata: Optional[Dict] = None

class TaskStatusRequest(BaseModel):
    task_id: str

# 初始化任务调度器，限制并发数
@app.on_event("startup")
async def startup_event():
    app.state.scheduler = await aiojobs.create_scheduler(limit=5)  # 限制同时运行的微调任务数
    logger.info("微调服务启动，任务调度器已初始化")

@app.on_event("shutdown")
async def shutdown_event():
    await app.state.scheduler.close()
    logger.info("微调服务关闭，任务调度器已停止")

# 保存微调数据到文件
def save_finetune_data(task_id: str, data: List[Dict[str, str]]) -> str:
    filename = f"{task_id}_data.json"
    filepath = os.path.join(DATA_DIR, filename)
    with open(filepath, "w", encoding="utf-8") as f:
        json.dump(data, f, ensure_ascii=False, indent=2)
    return filepath

# 执行微调任务
async def run_finetuning(task_id: str, request_data: FinetuneRequest):
    try:
        # 更新任务状态为处理中
        task_store[task_id]["status"] = TaskStatus.PROCESSING
        task_store[task_id]["start_time"] = datetime.now().isoformat()
        logger.info(f"开始处理微调任务: {task_id}, 模型: {request_data.model_name}")

        # 1. 保存数据
        data_path = save_finetune_data(task_id, request_data.data)
        logger.info(f"微调数据已保存: {data_path}")

        # 2. 加载数据集
        def load_dataset():
            with open(data_path, "r", encoding="utf-8") as f:
                data = json.load(f)
            
            # 格式化数据为模型所需格式
            formatted_data = []
            for item in data:
                # 这里根据模型要求格式化prompt和response
                formatted_text = f"### 问题: {item['prompt']}\n### 回答: {item['response']}"
                formatted_data.append({"text": formatted_text})
            
            return Dataset.from_list(formatted_data)
        
        dataset = await asyncio.to_thread(load_dataset)
        logger.info(f"数据集加载完成，样本数量: {len(dataset)}")

        # 3. 加载模型和分词器
        def load_model_tokenizer():
            tokenizer = AutoTokenizer.from_pretrained(request_data.model_name)
            if tokenizer.pad_token is None:
                tokenizer.pad_token = tokenizer.eos_token
            
            model = AutoModelForCausalLM.from_pretrained(
                request_data.model_name,
                torch_dtype=torch.float16,
                device_map="auto"
            )
            return model, tokenizer
        
        model, tokenizer = await asyncio.to_thread(load_model_tokenizer)
        logger.info(f"模型和分词器加载完成: {request_data.model_name}")

        # 4. 数据预处理
        def preprocess_function(examples):
            return tokenizer(examples["text"], truncation=True, max_length=512, padding="max_length")
        
        tokenized_dataset = dataset.map(
            preprocess_function,
            batched=True,
            remove_columns=["text"]
        )
        logger.info("数据集预处理完成")

        # 5. 设置训练参数
        output_dir = os.path.join(request_data.output_dir, task_id)
        os.makedirs(output_dir, exist_ok=True)

        training_args = TrainingArguments(
            output_dir=output_dir,
            num_train_epochs=request_data.epochs,
            per_device_train_batch_size=request_data.batch_size,
            learning_rate=request_data.learning_rate,
            logging_dir=f"{output_dir}/logs",
            logging_steps=10,
            save_strategy="epoch",
            fp16=True,  # 混合精度训练，加速训练过程
            report_to="none"
        )

        # 6. 初始化Trainer并开始训练
        def train_model():
            trainer = Trainer(
                model=model,
                args=training_args,
                train_dataset=tokenized_dataset
            )
            trainer.train()
            # 保存最终模型
            trainer.save_model(os.path.join(output_dir, "final"))
            tokenizer.save_pretrained(os.path.join(output_dir, "final"))
            return output_dir
        
        result_dir = await asyncio.to_thread(train_model)
        logger.info(f"微调完成，模型保存路径: {result_dir}")

        # 7. 更新任务状态为完成
        task_store[task_id]["status"] = TaskStatus.COMPLETED
        task_store[task_id]["end_time"] = datetime.now().isoformat()
        task_store[task_id]["result"] = {
            "model_path": result_dir,
            "message": "微调成功完成"
        }

    except Exception as e:
        # 处理异常，更新任务状态为失败
        error_msg = f"微调任务失败: {str(e)}"
        logger.error(error_msg)
        task_store[task_id]["status"] = TaskStatus.FAILED
        task_store[task_id]["end_time"] = datetime.now().isoformat()
        task_store[task_id]["error"] = error_msg
    finally:
        # 清理GPU内存
        if "model" in locals():
            del model
            torch.cuda.empty_cache()

# 提交微调任务接口
@app.post("/finetune", summary="提交模型微调任务", status_code=status.HTTP_202_ACCEPTED)
async def submit_finetune(request: FinetuneRequest, background_tasks: BackgroundTasks):
    # 生成任务ID
    task_id = str(uuid.uuid4())
    
    # 初始化任务信息
    task_store[task_id] = {
        "task_id": task_id,
        "status": TaskStatus.PENDING,
        "created_time": datetime.now().isoformat(),
        "model_name": request.model_name,
        "parameters": {
            "epochs": request.epochs,
            "batch_size": request.batch_size,
            "learning_rate": request.learning_rate,
            "output_dir": request.output_dir
        },
        "metadata": request.metadata or {}
    }
    
    # 将任务添加到调度器
    await app.state.scheduler.spawn(run_finetuning(task_id, request))
    logger.info(f"微调任务已提交: {task_id}")
    
    return {
        "status": "accepted",
        "task_id": task_id,
        "message": "微调任务已加入队列，将在资源可用时开始处理",
        "created_time": task_store[task_id]["created_time"]
    }

# 查询任务状态接口
@app.get("/finetune/status/{task_id}", summary="查询微调任务状态")
async def get_finetune_status(task_id: str):
    if task_id not in task_store:
        raise HTTPException(
            status_code=status.HTTP_404_NOT_FOUND,
            detail=f"任务ID不存在: {task_id}"
        )
    
    return task_store[task_id]

# 获取所有任务列表
@app.get("/finetune/tasks", summary="获取所有微调任务列表")
async def list_all_tasks(status: Optional[str] = None):
    tasks = list(task_store.values())
    if status:
        tasks = [t for t in tasks if t["status"] == status]
    return {
        "total": len(tasks),
        "tasks": tasks
    }

if __name__ == "__main__":
    import uvicorn
    # 使用uvicorn运行服务，启用多进程处理HTTP请求
    uvicorn.run(
        "finetune_service:app",
        host="0.0.0.0",
        port=8000,
        workers=4,  # 根据CPU核心数调整
        reload=False
    )
