"""
NOTE: This API server is used only for demonstrating usage of AsyncEngine and simple performance benchmarks.
It is not intended for production use. For production use, we recommend using our OpenAI compatible server.
We are also not going to accept PRs modifying this file, please change `vllm/entrypoints/openai/api_server.py` instead.
"""

import argparse

from fastapi import FastAPI, Request
from fastapi.responses import JSONResponse, Response, StreamingResponse

import uvicorn
from bigdl.llm.transformers import AutoModelForCausalLM as TAutoModelForCausalLM 
from modelscope.hub.snapshot_download import snapshot_download
from transformers import AutoTokenizer
from transformers import AutoModelForCausalLM  #debug
from config import Config



config = Config()
TIMEOUT_KEEP_ALIVE = 10  # seconds.
app = FastAPI()
snapshot_download(model_id=config.model_name, cache_dir=config.cache_dir)
model = TAutoModelForCausalLM.from_pretrained(config.model_path, load_in_4bit=True)

tokenizer = AutoTokenizer.from_pretrained(config.model_path)
device="cpu"


async def generate(text):

    model_inputs = tokenizer([text], return_tensors="pt").to(device)

    generated_ids = model.generate(
        model_inputs.input_ids,
        max_new_tokens=512
    )
    generated_ids = [
        output_ids[len(input_ids):] for input_ids, output_ids in zip(model_inputs.input_ids, generated_ids)
    ]
    response = tokenizer.batch_decode(generated_ids, skip_special_tokens=True)[0]
    return response



@app.post("/")
@app.get("/")
async def read_root(request: Request):
    if request.method == 'POST':
            request_dict = await request.json()
            prompt = request_dict.pop('prompt')
    else:
        prompt = request.query_params.get('prompt')

    result = await generate(prompt)
    return result


if __name__ == "__main__":
    parser = argparse.ArgumentParser()
    parser.add_argument("--host", type=str, default=None)
    parser.add_argument("--port", type=int, default=8000)
    args = parser.parse_args()
    uvicorn.run(app=app,
                host=args.host,
                port=args.port,
                log_level="debug",
                timeout_keep_alive=TIMEOUT_KEEP_ALIVE)
