from langchain_ollama.llms import OllamaLLM
from tqdm import tqdm

from prompt import recognizer_prompt, length_checker_prompt, summary_prompt, length_incorrect_prompt
from typing_extensions import TypedDict,Any,List,Dict,Optional
from langgraph.graph import StateGraph,START,END
from utils import processed_data, remove_thinking_tag, process_text_fn
from concurrent.futures import ProcessPoolExecutor
from collections import Counter


class State(TypedDict):
    final_result : Optional[Dict[str, Any]]
    conversation_history: List[Dict[str, Any]]


def read_data(file_path):
    """Reads data from a file and returns it as a list of lines."""
    with open(file_path, 'r') as file:
        data = file.readlines()
    return [eval(line) for line in data]


def recognize_agent(state:State):
    llm = OllamaLLM(
        model="qwen3:1.7b",
        base_url="http://localhost:11434",
        temperature=0.8,
    )

    chat_history = state.get("conversation_history", [])
    message_list = [{"role": "system", "content": recognizer_prompt.content}]
    for msg in chat_history:
        if msg["role"] in ["user","length_check_agent"]:
            message_list.append({"role": "user", "content": msg["content"]})
        elif msg["role"] == "recognize_agent":
            message_list.append({"role": "assistant", "content": msg["content"]})
    response = llm.invoke(message_list)
    # response = remove_thinking_tag(response)
    chat_history.append({"role": "recognize_agent", "content": response})
    return {
        "conversation_history": chat_history
    }

def length_check_agent(state:State):
    llm = OllamaLLM(
        model="qwen3:1.7b",
        base_url="http://localhost:11434",
        temperature=0.7,
    )
    chat_history = state.get("conversation_history", [])
    message_list = [{"role": "system", "content": length_checker_prompt.content},
                    {"role": "user", "content": chat_history[-1]["content"]}]
    try:

        response = llm.invoke(message_list)
        response = eval(remove_thinking_tag(response))
    except Exception as e:
        response = {"phone": "1"}
    if len(response['phone']) != 11:
        content = f"""
             NO。当前识别的电话号码长度为{len(response['phone'])},不正确，不符合11位，请继续识别。
             {length_incorrect_prompt}
        """
    else:
        content = f"""
            "is_valid":"YES",
            "phone": '{response['phone']}'
        """
    chat_history.append({"role": "length_check_agent", "content": content})
    return {
        "conversation_history": chat_history
    }

def route_decide(state:State):
    s = state.get("conversation_history", [])
    last_message = s[-1]
    if "length_check_agent" in last_message.get("role",""):
        if "YES" in last_message.get("content",""):
            return "finish"
        elif len(state.get("conversation_history", [])) >=10:
            return "finish"
        else:
            return "recognize_agent"
    else:
        return "recognize_agent"


def summary_agent(state:State):
    llm = OllamaLLM(
        model="qwen3:1.7b",
        base_url="http://localhost:11434",
        temperature=0.5,
    )
    chat_history = state.get("conversation_history", [])
    last_message = chat_history[-1]
    recognize_agent_messages = [m for m in chat_history if m.get("role","")=="length_check_agent"]
    last_recognize_agent_message = recognize_agent_messages[-1]

    if  "UNKNOWN" in last_message.get("content","") or "NO" in last_message.get("content",""):
        return {
            "final_result": {"phone": "无"}
        }
    else:
        try:
            response = llm.invoke([{"role": "system", "content": summary_prompt},
                           {"role": "user", "content":last_recognize_agent_message["content"]}])
            response = eval(remove_thinking_tag(response))
        except Exception as e:
            response = {"phone": "无"}
        return {
            "final_result": response
        }


def build_graph():
    graph = StateGraph(State)
    graph.add_node("recognize_agent", recognize_agent)
    graph.add_node("length_check_agent", length_check_agent)
    graph.add_node("summary_agent", summary_agent)
    graph.add_edge(START, "recognize_agent")
    graph.add_edge("recognize_agent", "length_check_agent")
    graph.add_conditional_edges("length_check_agent", route_decide, {
        "finish": "summary_agent",
        "recognize_agent": "recognize_agent"
    })
    graph.add_edge("summary_agent", END)

    return graph


def fetch_one(data):
    processed_data = process_text_fn(data["conversation"])
    graph = build_graph()
    compiled_graph = graph.compile()
    result = compiled_graph.invoke({"conversation_history":[{"role":"user","content": processed_data}]})
    return result['final_result']['phone']

def main():
    lines = read_data('b.jsonl')
    f = open("result.jsonl","a+")
    for line in tqdm(lines):
        try:
            with ProcessPoolExecutor(max_workers=5) as executor:
                futures = [executor.submit(fetch_one,line) for _ in range(5)]
                results = [future.result() for future in futures]
            final = sorted( [(k ,v) for k,v in Counter(results).items()], key=lambda x:x[1], reverse=True)
            number = final[0][0]
            f.write(str({"call_id": line["call_id"], "phone": number}) + "\n")

        except Exception as e :
            print(line,e)
            pass
    f.close()
if __name__ == '__main__':
    print(main())