from fastapi import FastAPI, Request
from transformers import AutoTokenizer, AutoModel
import uvicorn, json, datetime
import torch

import os
os.environ["CUDA_VISIBLE_DEVICES"] = "0"

# DEVICE = "cuda"
# DEVICE_ID = "0"
# CUDA_DEVICE = f"{DEVICE}:{DEVICE_ID}" if DEVICE_ID else DEVICE


# def torch_gc():
#     if torch.cuda.is_available():
#         with torch.cuda.device(CUDA_DEVICE):
#             torch.cuda.empty_cache()
#             torch.cuda.ipc_collect()

def torch_gc():
    if torch.cuda.is_available():
        num_gpus = torch.cuda.device_count()
        for device_id in range(num_gpus):
            with torch.cuda.device(device_id):
                torch.cuda.empty_cache()
                torch.cuda.ipc_collect()


app = FastAPI()


@app.post("/")
async def create_item(request: Request):
    global model, tokenizer
    json_post_raw = await request.json()
    json_post = json.dumps(json_post_raw)
    json_post_list = json.loads(json_post)
    prompt = json_post_list.get('prompt')
    prompt = prompt.replace("\t", "").replace("\n", "").replace("\\", "")
    # task = json_post_list.get("task")
    content = "agent:你好，先生，请问您贵姓？user:我姓陈，陈旧的陈。agent:您的联系电话是多少？user:18918589021。"
    # if task == 0:
    # prompt = "根据通话内容抽取用户姓名和手机号:输出为用户姓名：，手机号： " + prompt
    prompt = "示例:\n"+content+"\n输出为\n"+ "用户姓名：陈先生,手机号：18918589021。\n"+"根据以上示例抽取下面通话内容中用户姓名和手机号\n" +prompt
    
    # if task == 1:
    #     prompt = "根据通话内容提取用户姓名，结果仅为姓名或没有提取到回复无:" + prompt
    # if task == 2:
    #     prompt = "根据通话内容提取用户手机号,回复结果为json格式" + prompt
    # history = json_post_list.get('history')
    history = []
    max_length = json_post_list.get('max_length')
    top_p = json_post_list.get('top_p')
    temperature = json_post_list.get('temperature')
    temperature = 0.01
    response, history = model.chat(tokenizer,
                                   prompt,
                                   history=history,
                                   max_length=max_length if max_length else 2048,
                                   top_p=top_p if top_p else 0.7,
                                   temperature=temperature if temperature else 0.95)
    print(response)
    response = response.replace("\"", "").replace("\n", "").replace("\\", "")
    # response = '根据通话内容，可以提取出用户姓名为陈，手机号为18918589021，住址为秀沿西路189号。'
    now = datetime.datetime.now()
    time = now.strftime("%Y-%m-%d %H:%M:%S")
    import re

    def extract_name_and_phone(text):
        # 提取姓名
        # name_pattern = r'[\u4e00-\u9fa5]{2,4}'  # 假设姓名为2到4个汉字
        name_pattern = r'(?<=姓名.)[\u4e00-\u9fa5]{1,3}'
        # name_pattern = r'^[\u4e00-\u9fa5]{1,2}(·[\u4e00-\u9fa5]{1,2})*[\u4e00-\u9fa5]{1,2}$'
        # name_pattern = r"姓名*(\w+)"
        
        name_match = re.search(name_pattern, text)
        if name_match:
            name = name_match.group() 
        else:
            name = None
        
        # 提取电话号码
        phone_pattern = r'1\d{10}'  # 假设手机号码为11位数字，并且以1开头
        phone_matches = re.findall(phone_pattern, text)
        if phone_matches:
            phone = phone_matches[0]  # 只取第一个匹配到的号码
        else:
            phone = None
        
        return name, phone
    name, phone = extract_name_and_phone(response)
    
    answer = {
        "response":response,
        "name": name,
        "number": phone,
        "status": 200,
        "time": time
    }
    log = "[" + time + "] " + '", prompt:"' + prompt + '", response:"' + repr(response) + '"'
    print(log)
    torch_gc()
    return answer


if __name__ == '__main__':
    # tokenizer = AutoTokenizer.from_pretrained("THUDM/chatglm2-6b", trust_remote_code=True)
    # model = AutoModel.from_pretrained("THUDM/chatglm2-6b", trust_remote_code=True).cuda()
    tokenizer = AutoTokenizer.from_pretrained("/yfzx/models/ZhipuAI/chatglm3-6b", trust_remote_code=True)
    model = AutoModel.from_pretrained("/yfzx/models/ZhipuAI/chatglm3-6b", trust_remote_code=True).cuda()
    # 多显卡支持，使用下面三行代替上面两行，将num_gpus改为你实际的显卡数量
    # model_path = "THUDM/chatglm2-6b"
    # tokenizer = AutoTokenizer.from_pretrained(model_path, trust_remote_code=True)
    # model = load_model_on_gpus(model_path, num_gpus=2)
    model.eval()
    uvicorn.run(app, host='0.0.0.0', port=7662, workers=1)
