import os
import re
import json
import base64
import pandas as pd
from urllib.parse import quote

from META import KB_PATH
from file_encryptor import encryptor
from utlis import SPLIT_CHAR, EMPTY_DIR_PLACEHOLDER


def get_dir_tree(record_paths, dir_path='', max_depth=None):
    def generate_directory_tree(file_list):
        tree = {'__file_count__': 0}
        # 构建目录树
        for file_path in file_list:
            parts = file_path.split(SPLIT_CHAR)
            current_level = tree

            for part in parts[:-1]:  # 遍历到倒数第二个元素（目录）
                if part not in current_level:
                    current_level[part] = {'__file_count__': 0}  # 添加文件数和目录数的键
                current_level = current_level[part]
            
            # 处理文件名
            if parts[-1] != EMPTY_DIR_PLACEHOLDER:
                if '__file_count__' in current_level:
                    current_level['__file_count__'] += 1
                else:
                    current_level['__file_count__'] = 1
        return tree

    def add_dir_count(node, path=''):
        # Initialize the directory count
        dir_count = 0
        dir_exist = False

        # Traverse through all items in the current node
        for key, value in node.items():
            if isinstance(value, dict) and key not in ['__file_count__', '__dir_count__']:
                # It's a subdirectory, increment the count and recursively calculate its dir_count
                sub_dir_path = os.path.join(path, key)
                dir_count += 1
                add_dir_count(value, sub_dir_path)
    
        if os.path.isdir(KB_PATH + os.path.sep + path):
            dir_exist = True
        # Set the current node's __dir_count__ field
        node['__dir_count__'] = dir_count
        node['__dir_exist__'] = dir_exist

    def prune_dir_tree(dir_tree, max_depth, current_depth=0):
        if current_depth >= max_depth:
            # 达到最大深度时，删除子目录，只保留文件计数
            for key in list(dir_tree.keys()):
                if key not in ('__file_count__', '__dir_count__'):
                    del dir_tree[key]
            return

        # 递归遍历子目录
        for key, value in dir_tree.items():
            if isinstance(value, dict):
                prune_dir_tree(value, max_depth, current_depth + 1)
                
    query_path = dir_path + SPLIT_CHAR            
    if dir_path != '':
        file_list = [path for path in record_paths if path.startswith(query_path)]
        dir_tree = generate_directory_tree(file_list)
        dirs = dir_path.split(SPLIT_CHAR)
        for dir in dirs:
            dir_tree = dir_tree[dir]
    else:
        if not record_paths:
            file_list = list()
        else:
            file_list = record_paths
        dir_tree = generate_directory_tree(file_list)

    add_dir_count(dir_tree)
    if max_depth is not None:
        prune_dir_tree(dir_tree, max_depth)
    return dir_tree


def parse_kb_ptxt(real_file_dir, file_dir, content, linkage, img_base64=False, tbl_html=False, img_bin=False, tbl_pd=False):
    content = json.loads(content)
    content = next(iter(content.values()))[0]
    know_id, content_index_list = next(iter(content.items()))
    contents = [next(iter(row.values())) for row in content_index_list]

    resource = {}
    linkages = linkage.split('\n')
    content_str = ''.join(contents).replace('__HHF__', '\n')

    # 为正文添加p标签防止前端出错
    # content_list = re.split(r'(TABLE_.*?_TABLE|IMAGE_.*?_IMAGE)', content_str)
    # content_str = ''
    # for para in content_list:
    #     if para.startswith('IMAGE_') or para.startswith('TABLE_'):
    #         content_str += para
    #     else:
    #         content_str += f'<p>{para}</p>'

    img_record = {}
    img_record_pth = os.path.join(real_file_dir, 'image_record.json')
    if os.path.isfile(img_record_pth):
        if encryptor.encrypt:
            img_record = encryptor.load_from_file(img_record_pth)
        else:
            with open(img_record_pth, 'r', encoding='utf-8') as f:
                img_record = json.load(f)

    tbl_record = {}
    tbl_record_pth = os.path.join(real_file_dir, 'table_record.json')
    if os.path.isfile(tbl_record_pth):
        if encryptor.encrypt:
            tbl_record = encryptor.load_from_file(tbl_record_pth)
        else:
            with open(tbl_record_pth, 'r', encoding='utf-8') as f:
                tbl_record = json.load(f)
    for link in linkages:
        # NOLINK
        if link.startswith('IMAGE_'):
            if link in img_record:
                img_name = img_record[link]
                if img_bin or img_base64:
                    img_path = os.path.join(real_file_dir, img_name)
                    if encryptor.encrypt:
                        img_data = encryptor.load_from_file(img_path)
                    else:
                        with open(img_path, 'rb') as fd:
                            img_data = fd.read()
                    if img_base64:
                        img_data = base64.b64encode(img_data).decode('utf-8')
                        content_str = content_str.replace(link, f'<img src="data:image/png;base64,{img_data}" alt="Example Image">')
                    resource[link] = img_data
                else:
                    encoded_url = quote(file_dir + SPLIT_CHAR + img_name, safe=":/")
                    resource[link] = encoded_url
            else:
                content_str = content_str.replace(link, '')
        elif link.startswith('TABLE_'):
            if link in tbl_record:
                tbl_name = tbl_record[link]
                tbl_path = os.path.join(real_file_dir, tbl_name)
                if encryptor.encrypt:
                    tbl_df = encryptor.load_from_file(tbl_path)
                else:
                    tbl_df = pd.read_csv(tbl_path, encoding='utf-8', index_col=False)
                tbl_df.columns = ['' if 'Unnamed:' in str(col) else col for col in tbl_df.columns]    
                if tbl_pd:
                    # json_data = tbl_df.to_json(orient='records', force_ascii=False)
                    # resource[link] = json_data
                    tbl_df = tbl_df.fillna('')
                    resource[link] = tbl_df
                else:
                    html = tbl_df.to_html(escape=True, index=False, na_rep='', justify='center').replace('\n', '').replace('\\n', '')
                    if tbl_html:
                        resource[link] = html
                    else:
                        content_str = content_str.replace(link, html)
            else:
                content_str = content_str.replace(link, '')
    return know_id, content_str.strip(), resource


def get_file_tree(real_file_dir, file_dir, query_path, img_base64=False, tbl_html=False, img_bin=False, tbl_pd=False):
    ptxt_path = os.path.join(real_file_dir, 'KB_PTXT.csv')
    if encryptor.encrypt:
        ptxt_df = encryptor.load_from_file(ptxt_path)
    else:
        ptxt_df = pd.read_csv(ptxt_path, encoding='utf-8', index_col=False)
    tree = {}
    query_know_id = ''
    for _, row in ptxt_df.iterrows():
        path = row['path']
        content = row['content']
        linkage = row['linkage']
        
        path_parts = path.split("-->")
        current_node = tree
        for part in path_parts:
            if part not in current_node:
                current_node[part] = {}
            current_node = current_node[part]

        know_id, content_str, resource = parse_kb_ptxt(real_file_dir, file_dir, content, linkage, img_base64, tbl_html, img_bin, tbl_pd)
        if path == query_path:
            query_know_id = know_id

        current_node['__know_id__'] = know_id
        current_node['__content__'] = content_str
        current_node['__resource__'] = resource
    return tree, query_know_id



if __name__ == "__main__":
    with open('KB_TEMPS_DEMO/KB_path_dic.json', mode='r', encoding='utf-8') as f:
        record_path_ref = json.load(f)
    # record_paths = [ t['system_path'] for t in record_path_ref.values()]
    # print(record_paths)      
    record_paths = record_path_ref.keys()
    dir_tree = get_dir_tree(record_paths, "124、低压配电设计规范.docx", 1)
    print(json.dumps(dir_tree, indent=4, ensure_ascii=False))

    dir_path = '124、低压配电设计规范.docx'
    if dir_path == '':
        dir_count = dir_path.count(SPLIT_CHAR)
    else:
        dir_count = dir_path.count(SPLIT_CHAR) + 1
    print(dir_count)
    files = list()
    file_list = [path for path in record_paths if path.startswith(dir_path)]
    for path in file_list:
        dirs = path.split(SPLIT_CHAR)
        if len(dirs) - 1 ==  dir_count:
            files.append(dirs[-1])
    print(files)
