import os

import api_caller
import file_utils
import threading
from concurrent.futures import ThreadPoolExecutor, as_completed
from concurrent.futures import TimeoutError as FutureTimeoutError


def call_api(api_name, prompt, raw_text, entities_name):
    """Call the specified API with entities."""
    if api_name == "Deepseek":
        return api_caller.call_deepseek_api(prompt + raw_text + "\n可选的实体有：" + str(entities_name)).replace("`", "")
    elif api_name == "Zhipu":
        return api_caller.call_zhipu_api(prompt + raw_text + "\n可选的实体有：" + str(entities_name)).replace("`", "")
    elif api_name == "Doubao":
        return api_caller.call_doubao_api(prompt + raw_text + "\n可选的实体有：" + str(entities_name)).replace("`", "")

def entity_extract(input_file="math.txt", entity_extraction_output_file="entity_category.txt", timeout=1000):
    prompt = file_utils.load_text_file("entity_extraction_prompt.txt")
    raw_text = file_utils.load_text_file(input_file)

    for i in range(10):
        # Define the APIs to call
        api_calls = []
        for j in range(200):
            api_calls.append(("Doubao", j, prompt, raw_text))

        response_parts = []

        def run_api_call(api_name, index, prompt, raw_text):
            """Helper function to run a single API call and capture the response."""
            try:
                response = call_api(api_name, prompt, raw_text, [])
                response_parts.append(response)
            except Exception as e:
                print(f"Error calling {api_name} API #{index}: {e}")

        # Using ThreadPoolExecutor to manage threads
        with ThreadPoolExecutor(max_workers=40) as executor:
            futures = {executor.submit(run_api_call, api_name, index, prompt, raw_text) for
                       (api_name, index, prompt, raw_text) in api_calls}

            # Wait for the futures to complete, with a timeout
            try:
                for future in as_completed(futures, timeout=timeout):
                    future.result()  # Raise any exceptions that occurred during execution
            except FutureTimeoutError:
                print("Timeout occurred while waiting for API calls to complete.")

        # Combine the responses
        response = "\n".join(response_parts)

    # Parse the entity extraction results into a list. Each line is split by commas, with the first element being the entity name and the second being the category.
    entities_info = response.split("\n")
    entities = []
    for entity_info in entities_info:
        if entity_info == "":
            continue
        entity = entity_info.split(",")
        entities.append(entity)

    # Deduplicate entities based on name
    entity_set = set()
    deduped_entity_list = []
    for entity in entities:
        if entity[0] not in entity_set:
            entity_set.add(entity[0])
            deduped_entity_list.append(entity)
    entities = deduped_entity_list

    # Check if the entity name length is valid (<8 Chinese characters), and remove invalid ones
    valid_entities = []
    for entity in entities:
        if len(entity[0]) < 8:
            valid_entities.append(entity)
    entities = valid_entities

    # Check if the category is valid and remove invalid ones
    valid_categories = ["基础概念", "几何学", "解析几何", "概率统计", "逻辑证明", "高等数学", "函数方程", "代数"]
    deduped_entity_list = []
    for entity in entities:
        if len(entity) < 2:
            continue
        if entity[1] in valid_categories:
            deduped_entity_list.append(entity)
    entities = deduped_entity_list

    # Save the entity extraction results
    result = ""
    for entity in entities:
        result += entity[0] + "," + entity[1] + "\n"
    file_utils.save_text_file(entity_extraction_output_file, result)

    return entities


def relation_extract(input_file="math.txt", output_file="relation.txt", entities=[]):
    prompt = file_utils.load_text_file("relation_extraction_prompt.txt")
    raw_text = file_utils.load_text_file(input_file)

    # Extract entity names
    entities_name = [entity[0] for entity in entities]

    # Construct API calls for relation extraction in a multithreaded manner
    response_parts = []

    def run_api_call(api_name, index, prompt, raw_text, entities_name):
        """Helper function to run a single API call for relation extraction and capture the response."""
        try:
            response = call_api(api_name, prompt, raw_text, entities_name)
            response_parts.append(response)
        except Exception as e:
            print(f"Error calling {api_name} API #{index}: {e}")

    for i in range(10):
        api_calls = []
        for j in range(200):
            api_calls.append(("Doubao", j, prompt, raw_text, entities_name))

        # Using ThreadPoolExecutor to manage threads
        with ThreadPoolExecutor(max_workers=50) as executor:
            futures = {executor.submit(run_api_call, api_name, index, prompt, raw_text, entities_name) for
                       (api_name, index, prompt, raw_text, entities_name) in api_calls}

            # Wait for the futures to complete, with a timeout
            try:
                for future in as_completed(futures, timeout=1000):  # Adjust timeout as needed
                    future.result()  # Raise any exceptions that occurred during execution
            except FutureTimeoutError:
                print("Timeout occurred while waiting for API calls to complete.")

        # Combine the responses
        response = "\n".join(response_parts)

    # Construct the relation tuples list
    relation_info = response.split("\n")
    relation_list = []
    for relation in relation_info:
        relation = relation.split(",")
        if len(relation) < 3:
            continue
        if relation[0] in entities_name and relation[2] in entities_name:
            for entity in entities:
                if entity[0] == relation[0]:
                    relation.append(entity[1])
                if entity[0] == relation[2]:
                    relation.append(entity[1])
            relation_list.append(relation)

    # Relations of length less than 7 are considered invalid
    valid_relations = []
    for relation in relation_list:
        if len(relation[1]) < 7:
            valid_relations.append(relation)
    relation_list = valid_relations

    # Deduplicate relations based on unique entity pairs
    unique_relations = set()
    deduped_relation_list = []
    for relation in relation_list:
        if (relation[0], relation[2]) not in unique_relations:
            unique_relations.add((relation[0], relation[2]))
            deduped_relation_list.append(relation)
    relation_list = deduped_relation_list

    # Save the relation extraction results
    result = ""
    for relation in relation_list:
        result += relation[0] + "," + relation[1] + "," + relation[2] + "," + relation[3] + "," + relation[4] + "\n"
    file_utils.save_text_file(output_file, result)

    return relation_list


def llm_extraction():
    raw_data_root = f"../raw_data/"
    input_file = os.path.join(raw_data_root, "math.txt")
    entity_extraction_output_file = os.path.join(raw_data_root, "entity_category.txt")
    relation_extract_output_file = os.path.join(raw_data_root, "relation.txt")

    entities = entity_extract(input_file=input_file, entity_extraction_output_file=entity_extraction_output_file)
    relations = relation_extract(input_file=input_file, output_file=relation_extract_output_file, entities=entities)

    return entities, relations


def main():
    llm_extraction()


if __name__ == "__main__":
    main()