import torch 
from typing import Optional 
from pydantic import BaseModel 
from fastapi import FastAPI, HTTPException, Header 
import uvicorn 
from transformers import (
    AutoModelForCausalLM, AutoTokenizer, AutoConfig, GenerationConfig, TextIteratorStreamer 
) 
from fastapi.responses import StreamingResponse 
import asyncio 
import os

def load_hf_tokenizer(model_name_or_path, fast_tokenizer=True): 
    tokenizer = AutoTokenizer.from_pretrained(
        model_name_or_path, fast_tokenizer=fast_tokenizer, trust_remote_code=True) 
    tokenizer.pad_token = tokenizer.eos_token 
    if tokenizer.bos_token is None: 
        tokenizer.bos_token = tokenizer.eos_token 
    tokenizer.padding_side = 'left' 
    tokenizer.truncation_side = "left" 
    return tokenizer 

def create_hf_model(model_name_or_path): 
    model_config = AutoConfig.from_pretrained(model_name_or_path, trust_remote_code=True) 
    model = AutoModelForCausalLM.from_pretrained(
        model_name_or_path, 
        config=model_config, 
        trust_remote_code=True 
    ) 
    return model 

class GenRequest(BaseModel): 
    prompt: str 
    max_input_length: Optional[int] = 2048 
    max_new_tokens: Optional[int] = 512 
    temperature: Optional[float] = 0.2 
    top_p: Optional[float] = 0.95 
    do_sample: Optional[bool] = True 

app = FastAPI(title="MyModel API") 
model = None 
tokenizer = None 
device = None 

API_TOKEN = os.environ.get("API_TOKEN", None) 

@app.on_event("startup") 
def startup_event(): 
    global model, tokenizer, device, gen_base_cfg 
    base_model = os.environ.get("BASE_MODEL", "/data/yangbo/model/codellama_merge") 
    print("Loading model:", base_model) 
    
    # 从环境变量读取GPU卡ID，默认为0号卡 
    gpu_id = os.environ.get("GPU_ID", "0") 
    
    # 设置CUDA可见设备 
    os.environ["CUDA_VISIBLE_DEVICES"] = gpu_id 
    
    tokenizer = load_hf_tokenizer(base_model, fast_tokenizer=True) 
    model = create_hf_model(base_model) 
    device = torch.device(f"cuda:0" if torch.cuda.is_available() else "cpu") 
    try: 
        model = model.half() 
    except Exception: 
        pass 
    model.to(device) 
    model.eval() 
    gen_base_cfg = { 
        "do_sample": True, 
        "top_p": 0.95, 
        "temperature": 0.2, 
        "num_return_sequences": 1 
    } 
    print(f"Model ready. Device: {device}, Using GPU ID: {gpu_id}") 

def to_device(batch, device): 
    output = {} 
    for k, v in batch.items(): 
        try: 
            output[k] = v.to(device) 
        except: 
            output[k] = v 
    return output 

@app.post("/generate") 
def generate(req: GenRequest, authorization: Optional[str] = Header(None)): 
    if API_TOKEN: 
        if not authorization or authorization != f"Bearer {API_TOKEN}": 
            raise HTTPException(status_code=401, detail="Unauthorized") 

    prompts = "[INST]" + req.prompt + "[/INST]" 
    inputs = tokenizer( 
        prompts, 
        return_tensors="pt", 
        padding=True, 
        truncation=True, 
        max_length=req.max_input_length 
    ) 
    inputs = to_device(inputs, device) 

    gen_cfg = GenerationConfig( 
        temperature=req.temperature, 
        top_p=req.top_p, 
        do_sample=req.do_sample, 
        num_return_sequences=1 
    ) 
    with torch.no_grad(): 
        outs = model.generate( 
            **inputs, 
            generation_config=gen_cfg, 
            max_new_tokens=req.max_new_tokens, 
            pad_token_id=tokenizer.eos_token_id 
        ) 
    text = tokenizer.decode(outs[0], skip_special_tokens=True) 
    return {"text": text.replace(prompts, "")} 

@app.post("/generate_stream") 
def generate_stream(req: GenRequest, authorization: Optional[str] = Header(None)): 
    if API_TOKEN: 
        if not authorization or authorization != f"Bearer {API_TOKEN}": 
            raise HTTPException(status_code=401, detail="Unauthorized") 

    prompt_text = "[INST]" + req.prompt + "[/INST]" 
    inputs = tokenizer( 
        prompt_text, 
        return_tensors="pt", 
        padding=True, 
        truncation=True, 
        max_length=req.max_input_length 
    ) 
    inputs = to_device(inputs, device) 

    gen_cfg = GenerationConfig( 
        temperature=req.temperature, 
        top_p=req.top_p, 
        do_sample=req.do_sample, 
        num_return_sequences=1 
    ) 

    # TextIteratorStreamer 用于流式输出 
    streamer = TextIteratorStreamer(tokenizer, skip_special_tokens=True, timeout=10.0) 

    # 后台生成线程 
    import threading 
    threading.Thread( 
        target=model.generate, 
        kwargs={ 
            **inputs, 
            "max_new_tokens": req.max_new_tokens, 
            "temperature": req.temperature, 
            "top_p": req.top_p, 
            "do_sample": req.do_sample, 
            "streamer": streamer, 
            "pad_token_id": tokenizer.eos_token_id 
        }, 
        daemon=True 
    ).start() 

    # 返回 StreamingResponse 
    return StreamingResponse(streamer, media_type="text/plain") 

if __name__ == "__main__": 
    uvicorn.run("generation_app_stream:app", host="0.0.0.0", port=8010, workers=1)  # 在1卡上跑