import uvicorn
from fastapi import FastAPI, Request
from transformers import AutoTokenizer, AutoModelForQuestionAnswering
import torch
from PyCmpltrtok.common import sep


def load_model(model_path):
    sep()
    sep('正在加载模型……')
    tokenizer = AutoTokenizer.from_pretrained(model_path)
    print(tokenizer)
    model = AutoModelForQuestionAnswering.from_pretrained(model_path)
    print(model)
    sep('Put model onto dev')
    dev = torch.device('cuda:0')
    print(dev)
    sep('done')
    model = model.to(dev)
    
    return model, tokenizer, dev
    
    
if '__main__' == __name__:
    model_path = '/home/yunpeng/checkpoints/cmrc2018-zh-roberta-wwm-ext-large/2024_11_29_18_50_41_501385_temp0/checkpoint-2536'  # New PC    
    root_path = ''

    app = FastAPI(
        title="My API",
        version='0.1.0',
        root_path=root_path,
    )

    @app.post("/qa")
    async def stream_chat(request: Request):
        
        # 接收输入
        req_json = await request.json()  # 请求json
        # 获取输入
        q = req_json['q']
        context = req_json['context']
        print('q:', q)
        print('context:', context)

        # a = q
        # html = q + f'<span style="background-color: green;">{q}</span>' + context
        
        inputs = tokenizer(
            q,
            context,
            max_length=512,
            truncation="only_second",
            padding=False,
            return_tensors='pt',
        )
        inputs = {k: v.to(dev) for k, v in inputs.items()}
        outputs = model(**inputs)
        start = outputs['start_logits'].argmax(-1)[0]
        end = outputs['end_logits'].argmax(-1)[0]
        skip_special_tokens = False
        a = tokenizer.decode(inputs['input_ids'][0][start:end+1], skip_special_tokens=skip_special_tokens)
        prefix = tokenizer.decode(inputs['input_ids'][0][:start], skip_special_tokens=skip_special_tokens)
        suffix = tokenizer.decode(inputs['input_ids'][0][end+1:], skip_special_tokens=skip_special_tokens)
        html = f'{prefix}<span style="background-color: green;">{a}</span>{suffix}'
        
        return {
            'a': a,
            'html': html,
        }

    model, tokenizer, dev = load_model(model_path)
    uvicorn.run(app, host='0.0.0.0', port=7777, root_path=root_path)