from fastapi import FastAPI
from pydantic import BaseModel
from openai import OpenAI
import os
import ast
import server_config
import json

app = FastAPI(title="NLU Processing Service")

# 配置参数（实际使用时应从环境变量或配置文件中读取）
OPENAI_API_BASE = os.getenv("OPENAI_API_BASE", "http://0.0.0.0:10085/v1")
MODEL_DOMAIN = os.getenv("MODEL_DOMAIN", "lora_domain")
MODEL_SEMANTIC = os.getenv("MODEL_SEMANTIC", "lora_semantic")
MODEL_REWRITE = os.getenv("MODEL_REWRITE", "nlu_model")

# 系统提示词配置
system_content_rewrite = server_config.SYSTEM_CONTENT_REWRITE
system_message = [{"role": "system", "content": system_content_rewrite}]

system_content_segment = server_config.SYSTEM_CONTENT_SEGMENT
system_content_classify = server_config.SYSTEM_CONTENT_CLASSIFY
system_content_intent = server_config.SYSTEM_CONTENT_INTENT
system_content_semantic = server_config.SYSTEM_CONTENT_SEMANTIC
system_content_semantic_v1 = server_config.SYSTEM_CONTENT_SEMANTIC_v1

domain_en_dict = server_config.domain_en_dict
intent_dict = server_config.intent_dict
semantic_dict = server_config.semantic_dict
semantic_dict_v1 = server_config.semantic_dict_v1

# 初始化OpenAI客户端
client = OpenAI(
    api_key="EMPTY",  # vLLM不需要实际API密钥
    base_url=OPENAI_API_BASE
)

#分词
class segRequest(BaseModel):
    query: str

class segResponse(BaseModel):
    result: list[str]

#改写
class rewriteRequest(BaseModel):
    queryHistory: list
    query: str

class rewriteResponse(BaseModel):
    result: str

#分类
class classifyRequest(BaseModel):
    query: str

class classifyResponse(BaseModel):
    result: dict

#语义
class semanticRequest(BaseModel):
    data: dict

class semanticResponse(BaseModel):
    result: dict


class nluRequest(BaseModel):
    query: str

class nluResponse(BaseModel):
    result: dict

@app.post("/segment", response_model=segResponse)
async def segment(request: segRequest):
    query = request.query
    chat_response = client.chat.completions.create(
        model="nlu_model",
        messages=[
            {"role": "system", "content": system_content_segment},
            {"role": "user", "content": query}
        ],
        max_tokens=128,
        extra_body={
            "chat_template_kwargs": {"enable_thinking": False},
        },
    )
    res = chat_response.choices[0].message.content
    # result = { "query_raw": query, "query_new": res}
    result = res.split("|")
    return segResponse(
        result=result
    )

@app.post("/rewrite", response_model=rewriteResponse)
async def rewrite(request: rewriteRequest):
    query = request.query
    messages_now = [{"role": "user", "content": query }]
    
    query_history = request.queryHistory
    # print(query_history)
    messages_history = []
    if len(query_history) > 0:
        for message in query_history:
            if len(message) == 1:
                messages_history.append({"role": "user", "content": message[0]})
            if len(message) == 2:
                messages_history.append({"role": "user", "content": message[0]})
                messages_history.append({"role": "assistant", "content": message[1]})

    messages = system_message + messages_history + messages_now
    # print(f"LOG:{messages}")
    response = client.chat.completions.create(
        model="nlu_model",
        messages=messages,
        temperature=0.2,
        max_tokens=128,
        extra_body={"chat_template_kwargs": {"enable_thinking": False}}
    )

    query_rewritten = response.choices[0].message.content

    # result = { "query_raw": query, "query_new": query_rewritten}

    return rewriteResponse(
        result=query_rewritten
    )

@app.post("/classify", response_model=classifyResponse)
async def classify(request: classifyRequest):
    query = request.query
    chat_response = client.chat.completions.create(
        model=MODEL_DOMAIN,
        messages=[
            {"role": "system", "content": system_content_classify},
            {"role": "user", "content": query},
        ],
        max_tokens=128,
        temperature=0.2,
        extra_body={
            "chat_template_kwargs": {"enable_thinking": False},
        },
    )
    res = chat_response.choices[0].message.content
    if res in domain_en_dict:
        domain = domain_en_dict[res]
    else:
        domain = "other"
    
    result = {"text": query, "domain": domain, "intent": ""}
    
    if domain in intent_dict:
        
        intent_content = {"text":{query},"domain":{domain},"intent":intent_dict[domain]}
        response = client.chat.completions.create(
            model=MODEL_DOMAIN, # 根据实际模型调整
            #model="nlu_model",
            messages=[
                {"role": "system", "content": system_content_intent},
                {"role": "user", "content": f"{intent_content}"} # 用户消息内容可以很简单，因为所有信息都在系统提示中
            ],
            temperature=0.3,
            max_tokens=16,
            extra_body={"chat_template_kwargs": {"enable_thinking": False}}
        )
        intent=response.choices[0].message.content
        # result = {"text": query, "domain": res, "intent": ""}
        result['intent']=intent
    
    return classifyResponse(
        result=result
    )

# @app.post("/semantic", response_model=semanticResponse)
# async def semantic(request: semanticRequest):
#     semantic_content = request.data
#     text = semantic_content['text']
#     domain = semantic_content['domain']
#     intent = semantic_content['intent']
#     semantic_content['slot'] = {}

#     semantic_content = {"text": text, "domain": domain, "intent": intent, "slot": {}}
#     result = semantic_content
#     if domain in semantic_dict and intent in semantic_dict[domain]:
#         semantic_slot = semantic_dict[domain][intent]
#         semantic_content["slot"] = semantic_slot

#         semantic_response = client.chat.completions.create(
#             model="lora_semantic",
#             messages=[
#                 {"role": "system", "content": system_content_semantic},
#                 {"role": "user", "content": f"{semantic_content}"},
#             ],
#             temperature=0.3,
#             max_tokens=64,
#             extra_body={"chat_template_kwargs": {"enable_thinking": False}}
#         )
#         # semantic_content=semantic_response.choices[0].message.content
#         result=ast.literal_eval(semantic_response.choices[0].message.content)
#     return semanticResponse(
#         result=result
#     )

@app.post("/semantic", response_model=semanticResponse)
async def semantic(request: semanticRequest):
    semantic_content = request.data
    text = semantic_content['text']
    domain = semantic_content['domain']
    intent = semantic_content['intent']
    semantic_content['slot'] = {}

    result = {"text": text, "domain": domain, "intent": intent, "slot": {}}
    domain_intent = f"{domain}-{intent}"

    if domain_intent in semantic_dict_v1:
        semantic_content = f"domain:{domain}\nintent:{intent}\ntext:{text}\n{semantic_dict_v1[domain_intent]}"

        semantic_response = client.chat.completions.create(
            model="lora_semantic",
            messages=[
                {"role": "system", "content": system_content_semantic_v1},
                {"role": "user", "content": semantic_content},
            ],
            temperature=0.3,
            max_tokens=64,
            extra_body={"chat_template_kwargs": {"enable_thinking": False}}
        )
        slot = {}
        for pair in semantic_response.choices[0].message.content.strip().split('\n'):  # 按行拆分
            key, value = pair.split(':', 1)        # 仅分割第一个冒号，避免值中含冒号
            slot[key.strip()] = value.strip() 
        result["slot"] = slot
    return semanticResponse(
        result=result
    )


@app.post("/nlu", response_model=nluResponse)
async def nlu(request: nluRequest):
    query = request.query
    chat_response = client.chat.completions.create(
        model=MODEL_DOMAIN,
        messages=[
            {"role": "system", "content": system_content_classify},
            {"role": "user", "content": query},
        ],
        max_tokens=16,
        temperature=0.2,
        extra_body={
            "chat_template_kwargs": {"enable_thinking": False},
        },
    )
    res = chat_response.choices[0].message.content
    if res in domain_en_dict:
        domain = domain_en_dict[res]
    else:
        domain = "other"
    
    intent = res

    result = {"text": query, "domain": domain, "intent": ""}
    if domain in intent_dict:
        
        intent_content = {"text":{query},"domain":{domain},"intent":intent_dict[domain]}
        response = client.chat.completions.create(
            model="lora_domain", # 根据实际模型调整
            #model="nlu_model",
            messages=[
                {"role": "system", "content": system_content_intent},
                {"role": "user", "content": f"{intent_content}"} # 用户消息内容可以很简单，因为所有信息都在系统提示中
            ],
            temperature=0.3,
            max_tokens=16,
            extra_body={"chat_template_kwargs": {"enable_thinking": False}}
        )
        intent=response.choices[0].message.content
        # result = {"text": query, "domain": res, "intent": ""}
        result['intent']=intent   

    result = {"text": query, "domain": domain, "intent": intent, "slot": {}}
    domain_intent = f"{domain}-{intent}"

    if domain_intent in semantic_dict_v1:
        semantic_content = f"domain:{domain}\nintent:{intent}\ntext:{query}\n{semantic_dict_v1[domain_intent]}"

        semantic_response = client.chat.completions.create(
            model="lora_semantic",
            messages=[
                {"role": "system", "content": system_content_semantic_v1},
                {"role": "user", "content": semantic_content},
            ],
            temperature=0.3,
            max_tokens=64,
            extra_body={"chat_template_kwargs": {"enable_thinking": False}}
        )
        slot = {}
        for pair in semantic_response.choices[0].message.content.strip().split('\n'):  # 按行拆分
            key, value = pair.split(':', 1)        # 仅分割第一个冒号，避免值中含冒号
            slot[key.strip()] = value.strip() 
        result["slot"] = slot
    return nluResponse(
        result=result
    )

# 服务信息字典
services_info = {
    "segment": {
        "description": "分句模型",
        "endpoints": {
            "POST /segment": "对文本进行分句，返回分句结果"
        }
    },
    "rewrite": {
        "description": "改写模型",
        "endpoints": {
            "POST /rewrite": "对文本进行上下文改写，返回改写结果",
        }
    },
    "classify": {
        "description": "分类模型",
        "endpoints": {
            "POST /classify": "对文本进行分类，返回分类结果"
        }
    },
    "semantci": {
        "description": "语义模型",
        "endpoints": {
            "POST /semantci": "对文本进行语义提取，返回语义结果"
        }
    }
}

# 服务查询接口
@app.get("/services", summary="获取所有可用模型服务", tags=["服务查询"])
async def get_services():
    return {
        "message": "可用模型列表",
        "services": services_info,
        "total_services": len(services_info)
    }

# 根端点
@app.get("/", summary="模型服务信息", tags=["概览"])
async def root():
    return {
        "message": "欢迎模型服务",
        "version": "1.0.0",
        "available_services": list(services_info.keys()),
        "documentation": "/docs",
        "services_endpoint": "/services"
    }
if __name__ == "__main__":
    import uvicorn
    uvicorn.run(app, host="0.0.0.0", port=10086)