import os
import logging
import pandas as pd
from typing import Dict, List
from pathlib import Path
from graph_analyzer.graph import GraphNode


# 定义Trie节点
class TrieNode:
    def __init__(self):
        self.children = {}
        self.is_end_of_key = False
        self.value = None

# 定义Trie树
class Trie:
    def __init__(self):
        self.root = TrieNode()

    # 向Trie中插入一个键
    def insert(self, key, value):
        node = self.root
        for char in key:
            if char not in node.children:
                node.children[char] = TrieNode()
            node = node.children[char]
        node.is_end_of_key = True
        node.value = value

    # 在name字符串中查找所有匹配的键
    def search_in_string(self, string):
        matched_values = []
        for i in range(len(string)):
            node = self.root
            j = i
            # 从字符串的每个字符开始，逐字符查找匹配
            while j < len(string) and string[j] in node.children:
                node = node.children[string[j]]
                if node.is_end_of_key:
                    matched_values.append(node.value)
                j += 1
        return matched_values

# # 定义匹配函数
# def match_values(trie, name):
#     # 在name字符串中查找所有匹配的键
#     matched_values = trie.search_in_string(name)
#     return '\n\n'.join(matched_values)

def match_codes(trie, name):
    matched_nodes = trie.search_in_string(name)
    matched_codes = ['\n'.join(ii.code_info) for ii in matched_nodes]
    return '\n'.join(matched_codes)


def match_names(trie, name):
    matched_nodes = trie.search_in_string(name)
    matched_names = [ii.scope for ii in matched_nodes]
    return '\n'.join(matched_names)


def complex_map(df, match_dict):
# 构建Trie树并插入所有键
    trie = Trie()
    for key, value in match_dict.items():
        trie.insert(key, value)

    df['Code Stack'] = df['Op Name'].apply(lambda name: match_codes(trie, name))
    df['Scope Name'] = df['Op Name'].apply(lambda name: match_names(trie, name))
    return df


def find_npy_files(directory):
    npy_files = []
    # 检查当前路径是否是一个以 .npy 结尾的文件
    if directory.endswith('.npy') and os.path.isfile(directory):
        npy_files.append(Path(directory).resolve())
        return npy_files

    npy_files = list(Path(directory).rglob('*.npy'))
    return npy_files


def write_to_csv(param: Dict, output_dir: str, append: bool):
    # 打开CSV文件以写入模式
    os.makedirs(output_dir, exist_ok=True)
    file_name = os.path.join(output_dir, "mapping.csv")
    data = [(name, res1, res2) for name, (res1, res2) in param.items()]
    df = pd.DataFrame(data, columns=['File Path', 'Code Stacks', 'Scope Name'])
    # 如果 append 为 True 并且文件已经存在，追加写入
    if append and os.path.exists(file_name):
        # 清洗数据，筛选掉空字符串
        df = df[(df['Code Stacks'] != '') | (df['Scope Name'] != '')]
        df.to_csv(file_name, mode='a', header=False, index=False)
    else:
        # 否则，覆盖写入或者文件不存在时正常写入
        df.to_csv(file_name, mode='w', header=True, index=False)


def find_statistic_files(directory):
    pattern = os.path.join(directory, '**', "statistic.csv")
    statistic_files = list(glob.glob(pattern))
    return statistic_files


def bind_for_statistic(statistic_files: List[str], match_dict: Dict):
    for statistic_file in statistic_files:
        df = pd.read_csv(statistic_file)
        df = complex_map(df, match_dict)
        logging.info(f"Processing {statistic_file} completed, code stack saved in {statistic_file}")
        df.to_csv(statistic_file, index=False)


def bind_code_info_for_data(input_dir: str, nodes: Dict[str, GraphNode]) -> Dict[str, str]:
    # 待重构后优化性能
    match_dict = {}
    for node in nodes.values():
        # 屏蔽子图节点
        if node.is_subgraph:
            continue
        # 获取规范化后的scope name
        scope_name = node.scope.replace("/", "_")
        match_dict[scope_name] = node
    npy_files = find_npy_files(input_dir)

    bind_result = {}
    if not npy_files:
        statistic_files = find_statistic_files(input_dir)
        bind_for_statistic(statistic_files, match_dict)
        return bind_result

    for npy_file in npy_files:
        directory, file_name = os.path.split(npy_file)  # 拆分路径
        name_without_ext = os.path.splitext(file_name)[0]  # 提取文件名（去掉扩展名）
        if name_without_ext.isdigit():
            # 3. 读取find.csv文件
            csv_file_path = os.path.join(directory, 'mapping.csv')
            df = pd.read_csv(csv_file_path, header=None)
    
            # 4. 查找是否有与xxx.npy匹配的条目
            matching_row = df[df[0] == file_name]  # 假设A列存储文件名
            if not matching_row.empty:
                corresponding_name = matching_row[1].values[0]
                print(f"The corresponding name in column B is: {corresponding_name}")
            else:
                corresponding_name = None
                print(f"No entry found for {file_name} in find.csv.")
            name_without_ext = os.path.splitext(corresponding_name)[0]
        npy_path = os.path.realpath(npy_file)
        node_scope = name_without_ext.split(".")[1]
        trie = Trie()
        for key, value in match_dict.items():
            trie.insert(key, value)
        bind_code = match_codes(trie, node_scope)
        bind_name = match_names(trie, node_scope)
        bind_result[npy_path] = (bind_code, bind_name)
    return bind_result
