import argparse
import flask
import requests
from flask import request
import os
import torch
import json
import time # time.strftime("%Y-%m-%d", time.localtime())
from transformers import AutoTokenizer, AutoModelForCausalLM
import sched
from args import *
model_map = {
    # "model_name" : (model, tokenizer, last_use_time)
}

app = flask.Flask(__name__)
tokenizer_map = {
    # "model_name" :  tokenizer
}
@app.route("/control/start", methods=["POST"])
def model_start():
    # POST {"model":"qwen1.5-7b-chat", "device":"cpu|cuda:0", "keep":15}
    try:
        model_name = request.json["model"]
        model, tokenizer, last_use_time = model_map.get(model_name, (None, None, -1))
        if model is None:
            model_path = os.path.join(MODEL_PATH, model_name)
            model = AutoModelForCausalLM.from_pretrained(model_path, trust_remote_code=True)
            tokenizer = AutoTokenizer.from_pretrained(model_path, trust_remote_code=True, use_fast=False)
            model_map[model_name] = (model, tokenizer, time.time())
        if "deepseek" in model_name:
            model.generation_config.do_sample = False
        device = request.json.get("device", "cpu")
        model_map[model_name] = (model.to(torch.device(device)), tokenizer, time.time())
        torch.cuda.empty_cache()
        return json.dumps({"status":0, "message":f"load model {model_name} on {device} success！"}, ensure_ascii=False, indent=4)
        
    except Exception as e:
        return json.dumps({"status":-1, "message":f"{e}"}, ensure_ascii=False, indent=4)



def start(model_name: str):
    try:
        model, tokenizer, last_use_time = model_map.get(model_name, (None, None, -1))
        if model is None:
            model_path = os.path.join(MODEL_PATH, model_name)
            model = AutoModelForCausalLM.from_pretrained(model_path, trust_remote_code=True)
            tokenizer = AutoTokenizer.from_pretrained(model_path, trust_remote_code=True, use_fast=False)
            model_map[model_name] = (model, tokenizer, time.time())
        if "deepseek" in model_name:
            model.generation_config.do_sample = False
        device = cuda_cast.get(model_name)
        model_map[model_name] = (model.to(torch.device(device)), tokenizer, time.time())
        torch.cuda.empty_cache()
        return json.dumps({"status": 0, "message": f"load model {model_name} on {device} success！"}, ensure_ascii=False,
                          indent=4)

    except Exception as e:
        return json.dumps({"status": -1, "message": f"{e}"}, ensure_ascii=False, indent=4)

@app.route("/control/stop", methods=["POST"])
def model_stop():
    # POST {"model":"qwen1.5-7b-chat"}
    try:
        model_name = request.json["model"]
        model, tokenizer, last_use_time = model_map.get(model_name, (None, None, -1))
        model_map[model_name] = (model.to(torch.device("cpu")), tokenizer, last_use_time)
        torch.cuda.empty_cache()
        return json.dumps({"status":0, "message":f"offload model {model_name} success！"}, ensure_ascii=False, indent=4)
        
    except Exception as e:
        return json.dumps({"status":-1, "message":f"{e}"}, ensure_ascii=False, indent=4)

@app.route("/control/status", methods=["POST", "GET"])
def model_status():
    try:
        torch.cuda.empty_cache()
        status = {"available_memory":[], "model_allocated":{}} # 考虑返回模型参数量
        num_gpus =  torch.cuda.device_count()
        for gpu_id in range(num_gpus):
            with torch.cuda.device(gpu_id):
                device = torch.cuda.current_device()
                gpu_properties = torch.cuda.get_device_properties(device)
                total_memory = gpu_properties.total_memory / (1024**3)
                allocated_memory = torch.cuda.memory_allocated() / (1024**3)
                status["available_memory"].append(total_memory - allocated_memory)
        for model_name in model_map.keys():
            status["model_allocated"][model_name] = str(model_map[model_name][0].device)
        return json.dumps({"status":0, "message":"success!", "status":status}, ensure_ascii=False, indent=4)
        
    except Exception as e:
        return json.dumps({"status":-1, "message":f"{e}"}, ensure_ascii=False, indent=4)
    
@app.route("/control/call", methods=["POST"])
def model_call():
    try:
        model_name = request.json["model"]
    except:
        return json.dumps({'status': -1, 'message': f'parameter model miss!'}, ensure_ascii=False, indent=4)
    try:
        # POST {"model":"qwen1.5-7b-chat", "text":"...", "max_new_tokens":512}
        model_name, text, max_new_tokens = request.json["model"], request.json["text"], request.json["max_new_tokens"]
        model, tokenizer, last_use_time = model_map.get(model_name, (None, None, -1))
        model_inputs = tokenizer([text], return_tensors="pt").input_ids.to(model.device)
        generated_ids = model.generate(model_inputs, max_new_tokens=max_new_tokens, temperature=0.7)[0]
        response_text = tokenizer.decode(generated_ids[len(model_inputs[0]):])
        response_text = response_text.replace("<|im_end|>", "")
        return json.dumps({"status":0, "message":"success!", "data":response_text}, ensure_ascii=False, indent=4)
    except Exception as e:
        start(model_name)
        return json.dumps({"status":-1, "message":f"first init of model or some problems with model {model_name}\n  restarting model(waiting for 1 min)!\nerror detail\n{e}"}, ensure_ascii=False, indent=4)


@app.route("/control/chat", methods=["POST"])
def model_chat():
    try:
        model_name = request.json["model"]
    except:
        return json.dumps({'status': -1, 'message': f'parameter model miss!'}, ensure_ascii=False, indent=4)
    try:
        # POST {"model":"qwen1.5-7b-chat", "text":"...", "max_new_tokens":512}
        model_name, text, max_new_tokens = request.json["model"], request.json["text"], request.json["max_new_tokens"]
        messages = construct_template(model_name, text)
        tokenizer = tokenizer_map.get(model_name, None)
        if tokenizer is None:
            tokenizer = AutoTokenizer.from_pretrained(f"{MODEL_PATH}/{model_name}", trust_remote_code=True, use_fast=False)
            tokenizer_map[model_name] = tokenizer
        text = tokenizer.apply_chat_template(messages, tokenize=False, add_generation_prompt=True)


        model, tokenizer, last_use_time = model_map.get(model_name, (None, None, -1))
        model_inputs = tokenizer([text], return_tensors="pt").input_ids.to(model.device)
        generated_ids = model.generate(model_inputs, max_new_tokens=max_new_tokens, temperature=0.7)[0]
        response_text = tokenizer.decode(generated_ids[len(model_inputs[0]):])
        response_text = response_text.replace("<|im_end|>", "")
        return json.dumps({"status":0, "message":"success!", "data":response_text}, ensure_ascii=False, indent=4)
    except Exception as e:
        start(model_name)
        return json.dumps({"status":-1, "message":f"first init of model or some problems with model {model_name}\n  restarting model(waiting for 1 min)!\nerror detail\n{e}"}, ensure_ascii=False, indent=4)



def construct_template(model_name, content):
    messages = [
            {"role": "user", "content": f"{content}"}
        ]
    if "chatglm3" in model_name:
        messages = [
            {"role": "<|system|>", "content": "你是一个有用的助手。"},
            {"role": "<|user|>", "content": f"{content}"}
            # {"role": "<|assistant|>", "content": f"}
        ]
    elif "qwen1.5" in model_name:
        messages = [
            {"role": "system", "content": "你是一个有用的助手。"},
            {"role": "user", "content": f"{content}"}
            # {"role": "assistant", "content": f"}
        ]
    elif "baichuan2" in model_name:
        messages = [
            {"role": "<reserved_106>", "content": f"{content}"}
            # {"role": "<reserved_107>", "content": f"}
        ]
    elif "internlm2" in model_name:
        messages = [
            {"role": "system", "content": "你是一个有用的助手。"},
            {"role": "user", "content": f"{content}"}
            # {"role": "assistant", "content": f"}
        ]
    elif "deepseek" in model_name:
        messages = [
            {"role": "User", "content": f"{content}"}
            # {"role": "Assistant", "content": f"}
        ]
    elif "yi" in model_name:
        messages = [
            {"role": "user", "content": f"{content}"}
            # {"role": "assistant", "content": f"}
        ]
    elif "mistral" in model_name:
        messages = [
            {"role": "[INST]", "content": "你是一个有用的助手。"},
            {"role": "[INST]", "content": f"{content}"}
            # {"role": "[/INST]", "content": f"}
        ]
    elif "llama2" in model_name:
        messages = [
            {"role": "[INST] <<SYS>>", "content": "你是一个有用的助手。"},
            {"role": "[INST]", "content": f"{content}"}
            # {"role": "[/INST]", "content": f"}
        ]
    elif "llama3" in model_name:
        messages = [
            {"role": "system", "content": "你是一个有用的助手。"},
            {"role": "user", "content": f"{content}"}
            # {"role": "assistant", "content": f"}
        ]
    elif "gemma" in model_name:
        messages = [
            {"role": "user", "content": f"{content}"}
        ]
    return messages


if __name__ == "__main__":
    parser = argparse.ArgumentParser()
    parser.add_argument("--port",   type=int,  default=8898)
    args = parser.parse_args()

    app.run(host="0.0.0.0", port=args.port)

