# -*- coding: utf-8 -*-
from globalvariables import *
import re
from outline import Outline, Section, is_valid_section_number
import asyncio
import json
import logging
import ast
from outline import *
import random
import aiohttp
from json_repair import repair_json
import numpy as np

log = logging.getLogger(__name__)


def is_all_jings(s):
    pattern = r'^#+$'
    return bool(re.match(pattern, s))


def is_valid_section_title(s):
    return ":" in s or "：" in s


def get_parent_id(section_number):
    nums = section_number.split(".")
    if len(nums) == 1:
        return "root"
    else:
        return ".".join(nums[:-1])


def parseText2Outline(title="", outline_text=""):
    def extract_section_info(text):
        # 定义正则表达式模式
        pattern = r'^\s*- (\d+(?:\.\d+)*) (.*)$'
        match = re.match(pattern, text)
        if match:
            sec_number = match.group(1)
            title = match.group(2).strip()
            return {"sec_number": sec_number, "title": title}
        return None

    def get_parent_id(sec_number):
        tmps = sec_number.split(".")
        if len(tmps) > 1:
            return ".".join(tmps[:-1])
        elif len(tmps) == 1:
            return 'root'
        else:
            return None

    outline = Outline()
    root = Section(section_identifier="root", title=title)
    outline.add_node(root)

    lines = outline_text.split("\n")
    for line in lines:
        print(line)
    for line in lines:
        line = line.strip()
        section_info = extract_section_info(line)
        if section_info:
            sec_number = section_info["sec_number"]
            sec_title = section_info["title"]
            section = Section(section_identifier=sec_number, title=sec_title)
            parent_id = get_parent_id(sec_number)
            if parent_id:
                parent_identifier = "_".join(parent_id.split("."))
                # 判断父节点是否存在
                if outline.get_node(parent_identifier):
                    outline.add_node(section, parent_identifier)
                else:
                    pass
        else:
            continue

    # 删除掉conclusion和refernce章节
    children = outline.get_children()
    for child in children:
        if child.data["title"] == "conclusion" or child.data["title"] == "Conclusion" or child.data[
            "title"] == "结论" or child.data["title"] == "总结" or "reference" in child.data["title"].lower() or "参考文献" in child.data["title"]:
            outline.remove_sub_outline(child.identifier)
    return outline


def extract_descriptions(tree):
    outline_list = []

    # 遍历树中的所有节点
    for node in tree.all_nodes():
        data = node.data
        if "description" in data and data["description"]:
            # 构造层级路径
            path = tree.rsearch(node.identifier)
            last_part = str(tree.get_node(node.identifier).data["title"])  # 直接获取当前节点的标题
            outline_list.append({last_part: data["description"]})

    return outline_list


def try_parse_ast_to_json(function_string: str) -> tuple[str, dict]:
    """
     # 示例函数字符串
    function_string = "tool_call(first_int={'title': 'First Int', 'type': 'integer'}, second_int={'title': 'Second Int', 'type': 'integer'})"
    :return:
    """

    tree = ast.parse(str(function_string).strip())
    ast_info = ""
    json_result = {}

    # 查找函数调用节点并提取信息
    for node in ast.walk(tree):
        if isinstance(node, ast.Call):
            # 检查 node.func 的类型
            if isinstance(node.func, ast.Name):
                function_name = node.func.id
            elif isinstance(node.func, ast.Attribute):
                function_name = node.func.attr
            else:
                function_name = "UnknownFunction"

            args = {kw.arg: kw.value for kw in node.keywords}
            ast_info += f"Function Name: {function_name}\r\n"

            for arg, value in args.items():
                ast_info += f"Argument Name: {arg}\n"
                ast_info += f"Argument Value: {ast.dump(value)}\n"
                try:
                    json_result[arg] = ast.literal_eval(value)
                except (ValueError, SyntaxError):
                    # 如果无法解析，则保留原始字符串
                    json_result[arg] = ast.dump(value)

    return ast_info, json_result


def try_parse_json_object(input: str) -> tuple[str, dict]:
    """JSON cleaning and formatting utilities."""
    # Sometimes, the LLM returns a json string with some extra description, this function will clean it up.

    result = None
    try:
        # Try parse first
        result = json.loads(input)
    except json.JSONDecodeError:
        log.info("Warning: Error decoding faulty json, attempting repair")

    if result:
        return input, result

    # Clean up json string.
    input = (
        input.replace("{{", "{")
            .replace("}}", "}")
            .replace('"[{', "[{")
            .replace('}]"', "}]")
            .replace("\\", " ")
            .replace("\\n", " ")
            .replace("\n", " ")
            .replace("\r", "")
            .strip()
    )

    # Remove JSON Markdown Frame
    if input.startswith("```json"):
        input = input[len("```json"):]
    if input.startswith("```"):
        input = input[len("```"):]
    if input.endswith("```"):
        input = input[: len(input) - len("```")]

    try:
        result = json.loads(input)
    except json.JSONDecodeError:
        # Fixup potentially malformed json string using json_repair.
        json_info = str(repair_json(json_str=input, return_objects=False))

        # Generate JSON-string output using best-attempt prompting & parsing techniques.
        try:

            if len(json_info) < len(input):
                json_info, result = try_parse_ast_to_json(input)
            else:
                result = json.loads(json_info)

        except json.JSONDecodeError:
            log.exception("error loading json, json=%s", input)
            return json_info, {}
        else:
            if not isinstance(result, dict):
                log.exception("not expected dict type. type=%s:", type(result))
                return json_info, {}
            return json_info, result
    else:
        return input, result


def claim_format_transform(chunk_claims, claims_in_papers, outline):
    leaves = outline.leaves()
    # 建立从叶子节点id到声明块的映射关系
    leaves_claims_chunks_mapping = {leaf.identifier: [] for leaf in leaves}
    for chunk_claims_batch in chunk_claims:
        for key, value in chunk_claims_batch.items():
            for leaf in leaves:
                if f" {leaf.identifier.replace('_', '.')} " in ''.join(value):
                    leaves_claims_chunks_mapping[leaf.identifier].append(key)
    claims_in_leaves = {}  # 初始化以叶子节点id为键，声明块列表为值的字典
    for leaf in leaves:
        # 对于每个叶子章节，创建一个声明块列表
        claims_in_leaves[leaf.identifier] = []
        for claims_chunk_id in leaves_claims_chunks_mapping[leaf.identifier]:
            # 对于每个叶子章节映射的声明块id，获取一个声明块
            paper_id, sery = claims_chunk_id.split('_')
            claims_chunk = ''
            for paper_chunks_claims in claims_in_papers:
                if paper_chunks_claims['paper_id'] == paper_id:
                    claims_chunk = '\n'.join(paper_chunks_claims['chunk_claims'][int(sery)])
                    break
            claims_chunk_dict = {'chunk_id': claims_chunk_id, 'claims_chunk': claims_chunk}
            claims_in_leaves[leaf.identifier].append(claims_chunk_dict)
    return claims_in_leaves


def ext_name_year(original_filename):
    pattern = r'Conf_Paper_Meta_Data_([A-Za-z]+)_?(\d{4})|Journal_Paper_Meta_Data_([A-Za-z_]+)_with'
    match = re.search(pattern, original_filename)
    if match:
        if match.group(1):  # 如果是会议
            name = match.group(1)
            year = match.group(2)
        else:  # 如果是期刊
            name = match.group(3).replace('_', ' ')
            year = None  # 期刊没有年份信息
    else:
        name = None
        year = None
    return name, year


def Increasetitle2outline(outline, Title_abstract_conclusions_dict):
    root_node = outline.get_node(outline.root)
    root_node.data['title'] = Title_abstract_conclusions_dict['title']
    root_node.data['content'] = Title_abstract_conclusions_dict['abstract']
    # 创建conclusions节点，作为root的子节点
    conclusions_node_section_identifier = outline.get_last_child_identifier()
    conclusions_node = Section(section_identifier=conclusions_node_section_identifier, title="Conclusions",
                               content=Title_abstract_conclusions_dict['conclusions'])
    outline.add_node(conclusions_node, outline.root)



def extract_and_split_keywords(string_list):
    # 停用词集合，如果以s结尾，则去除s
    stopwords = {
        "in", "of", "on", "at", "to", "with", "and", "or", "for", "a", "the",
        "an", "via", "Method", "Algorithm", "Technology", "Technologie", "Function",
        "Approach", "Approache", "Proces", "Architecture", "Task", "Framework", "Augmenting",
        "Capabilitie", "Capability", "Capacity", "Power", "Performance", "Scale",
        "Improving", "Improve", "Ability", "Abilitie", "Efficiency", "Enhancing",
        "Boosting", "Enhance", "Boost", "Techniques", "Technique", "Paradigm",
        "Approach", "Mitigating", "Mitigate", "Alleviating", "Alleviate", "System",
        "Reducing", "Reduce", "Phenomena", "Addressing", "Addres", "Curb", "Curbing",
    }
    result = []
    for s in string_list:
        # 过滤包含中文字符的字符串
        if re.search(r'[\u4e00-\u9fff]', s):
            continue

        # 去除括号及其内容
        s = re.sub(r'\(.*?\)', '', s)

        # 替换连字符为空格
        s = re.sub(r'-', ' ', s)
        # 分割单词并过滤停用词
        words = re.findall(r'\b[a-zA-Z]+(?:-[a-zA-Z]+)*\b', s)
        # 将复数形式的单词转为单数形式
        words_sin = [re.sub(r's$', '', word) for word in words]
        words_not_too_short = [word for word in words_sin if len(word) > 2]
        filtered_words = [word for word in words_not_too_short if word not in stopwords]

        result.append(filtered_words)
    return result


def divide_data(dictionary, m):
    category_counts = {}
    for key, value in dictionary.items():
        category_counts[key] = len(value)

    total_count = sum(category_counts.values())
    per_part_count = total_count // m

    parts = [[] for _ in range(m)]

    for key, value in dictionary.items():
        category_count = category_counts[key]
        per_part_category_count = category_count // m
        remaining = category_count % m

        random.shuffle(value)

        start = 0
        for i in range(m):
            count = per_part_category_count
            if remaining > 0:
                count += 1
                remaining -= 1
            parts[i].extend([(key, item) for item in value[start:start + count]])
            start += count

    result = [dict() for _ in range(m)]
    for i, part in enumerate(parts):
        for category, item in part:
            if category not in result[i]:
                result[i][category] = []
            result[i][category].append(item)

    return result



async def get_embedding(input: list[str], semaphore, model: str = 'embedding-3', dimensions=2048,
                        token="efedb300c8468ae1315a5474228100f6.cZbs8qwDylqU2GzD"):
    url = 'https://open.bigmodel.cn/api/paas/v4/embeddings'

    headers = {
        "Authorization": "Bearer " + token,  # API Key
        "Content-Type": "application/json"
    }
    async with semaphore:
        async with aiohttp.ClientSession(timeout=aiohttp.ClientTimeout(total=90)) as session:
            try:
                # 构建请求体
                body = {
                    "model": model,  # 要调用的模型名称
                    "input": input,
                    "dimensions": dimensions,
                }
                # 发送 POST 请求
                async with session.post(url, json=body, headers=headers) as response:
                    # 确保请求成功
                    if response.status == 200:
                        res = await response.json()
                        data = res["data"]
                        result = [data[i]["embedding"] for i in range(len(data))]
                        return result
                    else:
                        print(f"Error: {response.status}, {await response.text()}")
                        return None
            except Exception as e:
                print(f"KKError during API call: {e}")
                return None


async def get_embeddings_for_list(input_list: list[str], semaphore, token='efedb300c8468ae1315a5474228100f6.cZbs8qwDylqU2GzD'):
    batch_size = 64
    tasks = []
    # 将输入列表分割成小批次
    for i in range(0, len(input_list), batch_size):
        batch = input_list[i:i + batch_size]
        task = asyncio.create_task(get_embedding(input=batch, token=token, semaphore=semaphore))
        tasks.append(task)
    # 并行处理所有批次
    results = await asyncio.gather(*tasks)
    final_result = []
    index = 0
    for result in results:
        if result is not None:
            final_result.extend(result)
        else:
            # 获取当前批次的实际长度
            current_batch_length = min(batch_size, len(input_list) - index)
            # 用 [0] * 2048 占位
            final_result.extend([[0] * 2048] * current_batch_length)
        index += batch_size
    return final_result


def cosine_similarity(vector1, vector2):
    dot_product = np.dot(vector1, vector2)
    norm_vector1 = np.linalg.norm(vector1)
    norm_vector2 = np.linalg.norm(vector2)
    return dot_product / (norm_vector1 * norm_vector2)

def extract_sup_numbers(text):
    # 定义正则表达式模式
    pattern = r'<sup>([\d,\s]+)</sup>'
    # 使用 findall 方法查找所有匹配的 <sup> 标签内的内容
    matches = re.findall(pattern, text)
    numbers = []
    for match in matches:
        # 将匹配到的内容按逗号分割
        parts = match.split(',')
        for part in parts:
            # 去除可能存在的空白字符
            num_str = part.strip()
            if num_str.isdigit():
                # 将数字字符串转换为整数并添加到结果列表中
                numbers.append(int(num_str))
    return numbers