import sys
from yachalk import chalk
sys.path.append("..")
import json
import client as client
import concurrent.futures
from typing import Optional, List, Dict

# 全局超时时间、分块参数
DEFAULT_TIMEOUT = 60    # 每块的超时时间（秒）
CHUNK_SIZE    = 2000   # 每次调用最多发送多少字符
OVERLAP       = 200    # 块与块之间重叠多少字符

def _call_with_timeout(func, timeout: float, *args, **kwargs):
    """
    在单独线程中执行 func，超时后取消并抛出 TimeoutError。
    """
    with concurrent.futures.ThreadPoolExecutor(max_workers=1) as executor:
        future = executor.submit(func, *args, **kwargs)
        try:
            return future.result(timeout=timeout)
        except concurrent.futures.TimeoutError:
            future.cancel()
            raise

def extractConcepts(
    prompt: str,
    metadata: Dict = {},
    model: str = "mistral-openorca:latest",
    timeout: float = DEFAULT_TIMEOUT
) -> Optional[List[Dict]]:
    """
    分块调用 client.generate，每块超时后立即停止并返回已累积的部分结果。
    """
    # 1. 准备系统提示
    SYS_PROMPT = (
        "Your task is extract the key concepts (and non personal entities) mentioned in the given context. "
        "Extract only the most important and atomistic concepts, if  needed break the concepts down to the simpler concepts."
        "Categorize the concepts in one of the following categories: "
        "[event, concept, place, object, document, organisation, condition, misc]\n"
        "Format your output as a list of json with the following format:\n"
        "[\n"
        "   {\n"
        '       "entity": The Concept,\n'
        '       "importance": The concontextual importance of the concept on a scale of 1 to 5 (5 being the highest),\n'
        '       "category": The Type of Concept,\n'
        "   }, \n"
        "{ }, \n"
        "]\n"
    )

    # 2. 将长 prompt 切分成多块
    L = len(prompt)
    starts = list(range(0, L, CHUNK_SIZE - OVERLAP))
    chunks = [prompt[i : min(i + CHUNK_SIZE, L)] for i in starts]

    all_concepts: List[Dict] = []
    for idx, chunk in enumerate(chunks, 1):
        print(f"[extractConcepts] Processing chunk {idx}/{len(chunks)}…")
        try:
            # 3. 单块调用并强制超时
            response, _ = _call_with_timeout(
                client.generate,
                timeout,
                model_name=model,
                system=SYS_PROMPT,
                prompt=chunk
            )
            items = json.loads(response)
            # merge metadata
            items = [dict(item, **metadata) for item in items]
        except concurrent.futures.TimeoutError:
            print(f"[extractConcepts] Chunk {idx} 超时 {timeout}s，停止并返回已累积部分")
            break
        except Exception as e:
            print(f"[extractConcepts] Chunk {idx} 解析或生成失败：{e}，停止并返回已累积部分")
            break

        # 4. 累积本块结果
        all_concepts.extend(items)

    return all_concepts


def graphPrompt(
    input: str,
    metadata: Dict = {},
    model: str = "mistral-openorca:latest",
    timeout: float = DEFAULT_TIMEOUT
) -> Optional[List[Dict]]:
    """
    在 timeout 秒内调用 client.generate，超时后直接返回空列表。
    """
    if model is None:
        model = "mistral-openorca:latest"

    SYS_PROMPT = (
        "You are a network graph maker who extracts terms and their relations from a given context. "
        "You are provided with a context chunk (delimited by ```) Your task is to extract the ontology "
        "of terms mentioned in the given context. These terms should represent the key concepts as per the context. \n"
        "Thought 1: While traversing through each sentence, Think about the key terms mentioned in it.\n"
        "\tTerms may include object, entity, location, organization, person, \n"
        "\tcondition, acronym, documents, service, concept, etc.\n"
        "\tTerms should be as atomistic as possible\n\n"
        "Thought 2: Think about how these terms can have one on one relation with other terms.\n"
        "\tTerms that are mentioned in the same sentence or the same paragraph are typically related to each other.\n"
        "\tTerms can be related to many other terms\n\n"
        "Thought 3: Find out the relation between each such related pair of terms. \n\n"
        "Format your output as a list of json. Each element of the list contains a pair of terms "
        "and the relation between them, like the following: \n"
        "[\n"
        "   {\n"
        '       "node_1": "A concept from extracted ontology",\n'
        '       "node_2": "A related concept from extracted ontology",\n'
        '       "edge": "relationship between the two concepts, node_1 and node_2 in one or two sentences"\n'
        "   }, {...}\n"
        "]"
    )
    USER_PROMPT = f"context: ```{input}``` \n\n output: "

    try:
        # 用超时包装调用
        response, _ = _call_with_timeout(
            client.generate,
            timeout,
            model_name=model,
            system=SYS_PROMPT,
            prompt=USER_PROMPT
        )
        result = json.loads(response)
        return [dict(item, **metadata) for item in result]

    except concurrent.futures.TimeoutError:
        print(f"[graphPrompt] ⚠️ 超时 {timeout}s，返回空列表")
        return []

    except Exception as e:
        print("\n\n[graphPrompt] ERROR ### Here is the buggy response or exception:", e)
        return None
