import os
import json
import numpy as np
import pandas as pd
import time
import re
from sqlite_pool import SqliteDB
from datetime import datetime
from loguru import logger

from table_parser import parse_headers, parse_tb_contents, postprocess_tb
from utlis import intersect_lst, path_handle, extract_nested_dic_vals, flatten_list, tokenize2stw_remove, gen_str_codes, SPLIT_CHAR, parse_fragment_path
from file_encryptor import encryptor
import threading
g_lock = threading.Lock()



def gen_bfs_tree(start_path, skip_lst=['Supplementary Files', 'desktop.ini']):
    bfs_tree = {}
    
    for root, dirs, files in os.walk(start_path):
        paths = path_handle(root, 'split')
        
        intersect_ = intersect_lst(skip_lst, paths)
        if len(intersect_)>0:
            continue
            
        sub_dict = bfs_tree
        for folder in paths[1:]:
            sub_dict = sub_dict.setdefault(folder, {})
        
        str_path = os.path.join(*paths)
        if str_path==start_path:
            continue
    return bfs_tree


def bfs_keys(tree, tokenizer, model, vectorize_texts):
    for key, value in tree.items():
        if isinstance(value, dict):
            value = bfs_keys(value, tokenizer, model, vectorize_texts)  # Recurse into sub-dictionaries
            key, embedding = vectorize_texts(key, tokenizer, model)
            tree[key] = {"embedding": embedding, "sub_tree": value}
    return tree


def draw_path(tree, path, file_suffixes, paths):
    file_suffixes = tuple(file_suffixes)
    for key in tree:
        new_path = path + (key, )
        if isinstance(tree[key], dict) and tree[key]: # check if this is a directory
            draw_path(tree[key], new_path, file_suffixes, paths)
        elif key.endswith(file_suffixes):
            paths.append(new_path)
    return paths


def form_path_tree(start_path, skip_lst=['Supplementary Files', 'desktop.ini']):
    know_tree = {}
    for root, dirs, files in os.walk(start_path):
        paths = path_handle(root, 'split')
        
        intersect_ = intersect_lst(skip_lst, paths)
        if len(intersect_)>0:
            continue
            
        sub_dict = know_tree
        for folder in paths[1:]:
            sub_dict = sub_dict.setdefault(folder, {})
        
        str_path = os.path.join(*paths)
        if str_path==start_path:
            continue
            
        for file in files:
            sub_dict.setdefault(file, {})
    return know_tree


def replace_items(text, row, img_record, tb_record, temp_path, KB_PATH):
    pattern = re.compile(r'(TABLE_.*?_TABLE|IMAGE_.*?_IMAGE)')
    matches = pattern.findall(text)
    
    res_texts = []
    res_labels = []
    res_keywords = []
    res_summaries = []
    res_knowids= []
    
    split_texts = re.split(pattern, text)
    type_lables = []
    replaced_texts = []
    replaced_keywords = []
    replaced_summaries = []
    replaced_ids = []
    
    for part in split_texts:
        if part in matches and 'TABLE_' in part:
            tb_path = tb_record[part].replace('-->', os.path.sep)
            tb_path = os.path.join(KB_PATH, tb_path)
            if encryptor.encrypt:
                tb_df = encryptor.load_from_file(tb_path)
            else:
                tb_df = pd.read_csv(tb_path, encoding='utf-8')
            if len(tb_df) == 0 or tb_df.empty or tb_df.isna().all().all():
                continue
            tb_df = postprocess_tb(tb_df)
            df_temp = parse_headers(tb_df=tb_df, temp_path=temp_path, mode='parse')
            _, _, tb_strs = parse_tb_contents(df_temp, mode='parse', return_lst=True)
            type_lables.extend([part]*len(tb_strs))
            replaced_ids.extend([row['know_id']]*len(tb_strs))
            replaced_texts.extend(tb_strs)
            replaced_keywords.extend([row['keywords']]*len(tb_strs))
            replaced_summaries.extend([row['summary']]*len(tb_strs))
        
        elif part in matches and 'IMAGE_' in part: # by default, each image only corresponds to one row
            img_path = img_record[part].split('-->')[-1]
            type_lables.append(part)
            replaced_ids.append(row['know_id'])
            replaced_keywords.append(row['keywords'])
            replaced_summaries.append(row['summary'])
            replaced_texts.append(img_path)
        
        elif '__SUMMARY__' in row['linkage']: # by default, each summary only corresponds to one row
            parent_path = row['path'].replace('__摘要总结__', '').replace('__包括__', '').split('-->')[-1]
            type_lables.append('SUMMARY_' + parent_path + '_SUMMARY')
            replaced_ids.append(row['know_id'])
            replaced_texts.append(parent_path + '包括' + text)
            replaced_keywords.append(row['keywords'])
            replaced_summaries.append(row['summary'])
        
        else:
            type_lables.append('PTXT')
            replaced_ids.append(row['know_id'])
            replaced_texts.append(part)
            replaced_keywords.append(row['keywords'])
            replaced_summaries.append(row['summary'])
        
    for i, rt in enumerate(replaced_texts):
        if not rt=='':
            res_texts.append(rt)
            res_labels.append(type_lables[i])
            res_knowids.append(replaced_ids[i])
            res_keywords.append(replaced_keywords[i])
            res_summaries.append(replaced_summaries[i])
    return res_texts, res_labels, res_keywords, res_summaries, res_knowids
    

def process_pathvec(system_path, path_item, vec_func, added_path_vecs, record_path_ref, tokenizer, model, stopwords=[]):
    def assign_path_weights(n, mode='normal'):
        if mode=='normal':
            weights = np.ones(n)
        else:
            indices = np.linspace(-1, 1, n)
            weights = 1 - (indices**2)
            weights = 0.5 * weights + 0.5
        return weights.tolist()
    
    # path_item = '-->'.join([part for i, part in enumerate(path_item.split('-->')) if i not in [0, 2]]) # only for developing WIKI data
    path_its = path_item.split('-->')
    path_desc = tokenize2stw_remove(path_its, stopwords)
    _, path_vec = vec_func(path_item, tokenizer, model)
    # weights = assign_path_weights(len(path_its))
    # path_vec = np.mean([w * p for w, p in zip(weights, path_vec)], axis=0)
    
    try:
        added_path_vecs.append(path_vec)
    except:
        added_path_vecs = np.vstack((added_path_vecs, path_vec))
        
    if not path_item in record_path_ref.keys():
        record_path_ref.update({path_item : {'system_path': system_path, 'tokens': '->'.join(path_desc)}})
    return added_path_vecs, record_path_ref
    

def vec_individual_contents(contents_, vec_func, tokenizer, model, cut_len=1024, dic_keys=[]):
    if not isinstance(contents_, list):
        contents_ = [contents_]
    
    contents_ = [c.replace('__HHF__', '').replace('Unnamed', '').strip() for c in contents_]
    if len(dic_keys)>0 and len(contents_)>0:
        key_inject = '\n'.join(dic_keys)
        contents_ = [key_inject +' ' + c for c in contents_]
          
    content_vecs = np.empty((0, 1024), dtype=np.float32)
    while True:
        try:
            _, content_vecs = vec_func(contents_, tokenizer, model)
            break
        except Exception as e:
            contents_ = [content[ :cut_len] for content in contents_]
            for content in contents_:
                try:
                    _, vec_ = vec_func(content, tokenizer, model)
                except Exception as e:
                    print('\t vectorizing fails because {}'.format(e))
                    logger.exception('fail! e: {}', e)
                content_vecs = np.vstack((content_vecs, vec_))
            break
    return content_vecs
    

def get_dir_path(fragment_path, KB_PATH):
    i = 0
    real_file_dir = ''
    fragment_paths = fragment_path.split(SPLIT_CHAR)
    for path in fragment_paths:
        temp_path = os.path.join(real_file_dir, path)
        if not os.path.isdir(os.path.join(KB_PATH, temp_path)):
            break
        real_file_dir = temp_path
        i += 1

    file_dir = f'{SPLIT_CHAR}'.join(fragment_paths[:i])
    sub_path = f'{SPLIT_CHAR}'.join(fragment_paths[i:])
    return os.path.join(KB_PATH, real_file_dir), file_dir, sub_path


def check_progress(add_filename, add_dir, process):
    if add_filename:
        sql = 'select 1 from import_progress where file_name=? and dir_path=? limit 1'
        exist_file = SqliteDB().selectone(sql, (add_filename, add_dir))
        if not exist_file:
            added_contents = []
            added_vectors = np.empty((0, 1024), dtype=np.float32)
            added_paths = []
            added_path_vecs = np.empty((0, 1024), dtype=np.float32)
            added_types = []
            added_lengths = []
            added_record_path_ref = {}
            added_tokens = []
            added_keywords= []
            added_summaries = []
            added_knowids = []

            added_df = pd.DataFrame({'content':added_contents, 
                                    'path':added_paths, 
                                    'type':added_types, 
                                    'length':added_lengths, 
                                    'keywords':added_keywords,
                                    'summary':added_summaries,
                                    'know_id':added_knowids, 
                                    'tokens':added_tokens})
            return False, added_df, added_vectors, added_path_vecs, added_record_path_ref
        
        sql = 'update import_progress set progress=?, end_time=? where file_name=? and dir_path=?' 
        SqliteDB().update(sql, (process, datetime.now(), add_filename, add_dir))
    return True, None, None, None, None


def vectorize_contents(current_all_contents_df, diff_paths, user, tokenizer, model, vec_func, img_record, tb_record, stopwords, USER_SETTINGS, cut_len=2046, merge_global=False, add_dir='', add_filename=''):
    added_contents = []
    added_vectors = []
    added_paths = [] # there can be duplicated paths
    added_path_vecs = []
    
    added_types = []
    added_lengths = []
    added_record_path_ref = {}
    added_tokens = []
    added_keywords= []
    added_summaries = []
    added_knowids = []
    
    print('\t\tcurrently add {} knowledge pieces...'.format(len(diff_paths)))
    diff_txt_paths = [path for path in diff_paths if not path.startswith('images-->') and not path.startswith('tables-->')]
    diff_txt_df = current_all_contents_df[current_all_contents_df['path'].isin(diff_txt_paths)]
    print('\t\tcurrently add {} knowledge pieces from textual docs...'.format(len(diff_txt_df)))

    total_num = len(diff_txt_df)
    # 1. check and update contents in documents and individual contents (e.g., KB_PTXT.csv)
    for r, row in diff_txt_df.iterrows():
        # 1.1 handle paths:
        path_item = row['path']
        system_path = user + '-->' + path_item
        added_path_vecs, added_record_path_ref = process_pathvec(system_path, path_item, vec_func, added_path_vecs, added_record_path_ref, tokenizer, model, stopwords)
        process = np.round((r+1)/total_num, 8)
        print('\tknowledge pool construction rate {}, current path: {}'.format(process, path_item))
        exist_file, none_added_df, none_added_vectors, none_added_path_vecs, none_added_record_path_ref = check_progress(add_filename, add_dir, process)
        if not exist_file:
            return none_added_df, none_added_vectors, none_added_path_vecs, none_added_record_path_ref

        # 1.2 handle contents
        kg_contents_ = row['content']        
        record_dic = json.loads(kg_contents_)
        contents_ = extract_nested_dic_vals(record_dic)
        # avoid the impact of place-holders on embedding  DOUBLE-CHECK, other place-holders?
        contents_ = [c.strip() for c in contents_ if not re.sub('__HHF__', '', c).strip()=='']
        # type, real_file_dir, file_dir, sub_path, fragment_name = parse_fragment_path(user, USER_SETTINGS['KB_PATH'], path_item)
        # contents_.insert(0, sub_path)
        try:
            dic_keys = [e for e in list(record_dic.keys()) if e!='UNK']
        except Exception as e:
            print(e, '\n', record_dic)
        
        replaced_contents = []
        replaced_labels = []
        replaced_keywords =[]
        replaced_summaries = []
        replaced_knowids = []
        for content in contents_:
            temp_contents, temp_labels, temp_keywords, temp_summaries, temp_knowids = replace_items(content, row, img_record, tb_record, USER_SETTINGS['TEMP_RES_PATH'], USER_SETTINGS['KB_PATH'])
            replaced_contents.append(temp_contents)
            replaced_labels.append(temp_labels)
            replaced_knowids.append(temp_knowids)
            replaced_keywords.append(temp_keywords)
            replaced_summaries.append(temp_summaries)
            
        labels_ = flatten_list(replaced_labels)
        contents_ = flatten_list(replaced_contents)
        know_ids_ = flatten_list(replaced_knowids)
        keywords_ = flatten_list(replaced_keywords)
        summaries_ = flatten_list(replaced_summaries)
            
        content_vecs = vec_individual_contents(contents_, vec_func, tokenizer, model, cut_len, dic_keys=dic_keys)
        content_lens = [len(c) for c in contents_]
        content_tokens = tokenize2stw_remove(contents_, stopwords)
        
        if merge_global:
            global_content_vec = vec_individual_contents('\n'.join(contents_), vec_func, tokenizer, model, cut_len)
            # content_vecs = np.squeeze(np.array([np.sum([global_content_vec, v.reshape(1, -1)], axis=0) for v in content_vecs ]), axis=1)
            content_vecs = global_content_vec
            contents_ = ['\n'.join(contents_)]
            labels_ = ['\n'.join(labels_)]
            content_lens = ['\n'.join(contents_)]
        
        added_paths.extend([system_path]*len(contents_))
        added_contents.extend(contents_)
        added_vectors.extend(content_vecs)
        
        added_types.extend(labels_)
        added_knowids.extend(know_ids_)
        added_lengths.extend(content_lens)
        added_tokens.extend(content_tokens)
        added_keywords.extend(keywords_)
        added_summaries.extend(summaries_)
    
    assert (len(added_paths)==len(added_types) and len(added_types)==len(added_lengths) and len(added_lengths)==len(added_contents) and len(added_contents)==len(added_vectors) and len(added_vectors)==len(added_tokens))
    added_df = pd.DataFrame({'content':added_contents, 
                             'path':added_paths, 
                             'type':added_types, 
                             'length':added_lengths, 
                             'keywords':added_keywords,
                             'summary':added_summaries,
                             'know_id':added_knowids, 
                             'tokens':added_tokens})

    if len(added_vectors) == 0:
        added_vectors = np.empty((0, 1024), dtype=np.float32)
    else:
        added_vectors = np.array(added_vectors)
    if len(added_path_vecs) == 0:
        added_path_vecs = np.empty((0, 1024), dtype=np.float32)
    else:
        added_path_vecs = np.array(added_path_vecs)

    # 5. add individual images
    diff_img_paths = [path for path in diff_paths if path.startswith('images-->')]
    img_num = 1
    img_total_num = len(diff_img_paths)
    print('\tcurrently add {} individual image pieces...'.format(len(diff_img_paths)))
    for img_label, img_path in img_record.items():
        system_path = user + '-->images-->' + img_path
        diff_img_path = 'images-->' + img_path

        if diff_img_path in diff_img_paths:
            process = np.round(img_num/img_total_num, 8)
            print('\tknowledge pool construction rate {}, current path: {}'.format(process, diff_img_path))
            exist_file, none_added_df, none_added_vectors, none_added_path_vecs, none_added_record_path_ref = check_progress(add_filename, add_dir, process)
            if not exist_file:
                return none_added_df, none_added_vectors, none_added_path_vecs, none_added_record_path_ref
            
            img_num += 1    
            img_content = os.path.splitext(img_path.split('-->')[-1])[0]
            added_path_vecs, added_record_path_ref= process_pathvec(system_path, img_path, vec_func, added_path_vecs, added_record_path_ref, tokenizer, model, stopwords)
            content_vecs = vec_individual_contents([img_content], vec_func, tokenizer, model)
            added_vectors = np.vstack((added_vectors, content_vecs))
            content_tokens = tokenize2stw_remove([img_content], stopwords)
            # ***CURRENTLY, USING CONTENT AS KEYWRODS AND SUMMARY***
            added_df.loc[len(added_df)] = [img_content, system_path, img_label, len(img_content), img_content, img_content, gen_str_codes(img_label), '->'.join(content_tokens)]
            
    # 6. add individual tables
    diff_tb_paths = [path for path in diff_paths if path.startswith('tables-->')]
    print('\tcurrently add {} individual table pieces...'.format(len(diff_tb_paths)))
    for tb_label, tb_path in tb_record.items():
        system_path = user + '-->tables-->' + tb_path       
    return added_df, added_vectors, added_path_vecs, added_record_path_ref


def get_injection_paths(root_path, USER_SETTINGS, know_df_cols):
    img_record = {}
    tb_record = {}
    current_all_contents_df = pd.DataFrame(columns=know_df_cols)

    for root, _, files in os.walk(root_path):
        for file in files:
            if 'KB' in file and file.endswith('.csv'):
                file_path = os.path.join(root, file)                
                if encryptor.encrypt:
                    file_df = encryptor.load_from_file(file_path)[know_df_cols]
                else:
                    file_df = pd.read_csv(file_path, encoding='utf-8', index_col=False)[know_df_cols]
                    print(USER_SETTINGS['ROOT_LEN'])

                dir_term = os.path.dirname(file_path).split(os.sep)[USER_SETTINGS['ROOT_LEN'] : ]
                # file_df['path'] = ['-->'.join(dir_term  + [str(cp)]) for cp in file_df['path'].values]
                file_df.loc[:, 'path'] = ['-->'.join(dir_term + [str(cp)]) for cp in file_df['path'].values]
                current_all_contents_df = pd.concat([current_all_contents_df, file_df], ignore_index=True)
                
            elif file == 'image_record.json':
                file_path = os.path.join(root, file)
                if encryptor.encrypt:
                    img_dic = encryptor.load_from_file(file_path)
                else:
                    with open(file_path, 'r', encoding='utf-8') as f:
                        img_dic = json.load(f)
                dir_term = os.path.dirname(file_path).split(os.sep)[USER_SETTINGS['ROOT_LEN'] : ]
                for link in img_dic:
                    img_dic[link] = '-->'.join(dir_term  + [img_dic[link]]) 
                img_record.update(img_dic)

            elif file == 'table_record.json':
                file_path = os.path.join(root, file)
                if encryptor.encrypt:
                    tb_dic = encryptor.load_from_file(file_path)
                else:
                    with open(file_path, 'r', encoding='utf-8') as f:
                        tb_dic = json.load(f)
                dir_term = os.path.dirname(file_path).split(os.sep)[USER_SETTINGS['ROOT_LEN'] : ]
                for link in tb_dic:
                    tb_dic[link] = '-->'.join(dir_term  + [tb_dic[link]]) 
                tb_record.update(tb_dic)
    return current_all_contents_df, img_record, tb_record

#每次新增片段后调用当前方法
def encode_user_kb(KB_PATH, USER_SETTINGS, vectorize_texts, tokenizer, model, know_df_cols, all_df_cols, stopwords=[], add_dir='', add_filename='', remove_dirs={}):
    user = USER_SETTINGS['KB_PATH'].split('_')[-1]
    start = time.time()
    
    current_all_contents_df, img_record, tb_record = get_injection_paths(KB_PATH, USER_SETTINGS, know_df_cols) # current_all_content_df: read KB_.csv in individual dirs
    current_txt_paths = [user + '-->' + p for p in list(current_all_contents_df['path'].values)]
    current_img_paths = [user + '-->images-->' + p for p in list(img_record.values())]
    current_tb_paths = [user + '-->tables-->' + p for p in list(tb_record.values())]
    current_paths = current_txt_paths + current_img_paths #+ current_tb_paths

    with g_lock:
        record_paths, record_path_ref, all_vec, all_path_vec, all_contents_df = load_current_kb(USER_SETTINGS, all_df_cols)
    
    # 1. generate content_df from processed documents
    all_diff_paths = ['-->'.join(p.split('-->')[1:]) for p in intersect_lst(current_paths, record_paths, mode='diff')]
    all_remove_paths = ['-->'.join(p.split('-->')[1:]) for p in intersect_lst(record_paths, current_paths, mode='diff')]

    if add_dir:
        diff_paths = [path for path in all_diff_paths if add_dir in path]
        remove_paths = []
    elif remove_dirs:
        diff_paths = []
        remove_paths = [path for path in all_remove_paths if any(remove_dir in path for remove_dir in remove_dirs)]
    else:
        diff_paths = all_diff_paths
        remove_paths = all_remove_paths
        
    if len(remove_paths)>0:
        # generate vectors based on contents in full paths
        print('\tcurrently remove {} knowledge pieces...'.format(len(remove_paths)))
        print(remove_paths)
        # if USER_SETTINGS['REGENERATE'] and torch.cuda.is_available():
        all_contents_df, all_vec, all_path_vec, record_path_ref = remove_from_kb(remove_paths, 
                                                                                record_path_ref,
                                                                                USER_SETTINGS,
                                                                                all_vec,
                                                                                all_path_vec,
                                                                                all_contents_df)
    if len(diff_paths)>0:
        # generate vectors based on contents in full paths
        added_df, added_vectors, added_path_vecs, added_record_path_ref = vectorize_contents(
                                                                                            current_all_contents_df, 
                                                                                            diff_paths,
                                                                                            user,
                                                                                            tokenizer, 
                                                                                            model, 
                                                                                            vectorize_texts,
                                                                                            img_record, 
                                                                                            tb_record, 
                                                                                            stopwords, 
                                                                                            USER_SETTINGS,
                                                                                            add_dir=add_dir,
                                                                                            add_filename=add_filename)
                                                                                                   
    if len(remove_paths) > 0 or len(diff_paths) > 0: 
        assert len(record_path_ref)==len(all_path_vec)
        USER_SETTINGS['EMBEDDING_LEN'] = all_vec.shape[-1]
        with g_lock:
            if len(diff_paths)>0:
                record_paths, record_path_ref, all_vec, all_path_vec, all_contents_df = load_current_kb(USER_SETTINGS, all_df_cols)
                all_vec = np.vstack((all_vec, added_vectors))
                all_path_vec = np.vstack((all_path_vec, added_path_vecs))
                all_contents_df = pd.concat([all_contents_df, added_df], ignore_index=True)  
                record_path_ref.update(added_record_path_ref)
        
            np.save(USER_SETTINGS['KB_VEC_PATH'], all_vec)
            np.save(USER_SETTINGS['KB_PATH_VEC_PATH'], all_path_vec)
            if encryptor.encrypt:
                encryptor.save_to_file(all_contents_df, USER_SETTINGS['KB_CONTENT_PATH'])
            else:
                all_contents_df.to_csv(USER_SETTINGS['KB_CONTENT_PATH'], encoding='utf-8', index=False)
            
            if encryptor.encrypt:
                encryptor.save_to_file(record_path_ref, USER_SETTINGS['KB_PATH_JSON'])
            else:
                with open(USER_SETTINGS['KB_PATH_JSON'], mode='w', encoding='utf-8') as f:
                    json.dump(record_path_ref, f ,ensure_ascii=False, indent=4)
    
    print('\t知识库内容向量化完成，花费时间{}分钟'.format(np.round((time.time()-start)/60, 3)))
    return USER_SETTINGS, all_path_vec, record_path_ref, all_contents_df, all_vec, tb_record, img_record


def load_current_kb(USER_SETTINGS, all_df_cols):
    try:
        all_vec = np.load(USER_SETTINGS['KB_VEC_PATH'])
        all_path_vec = np.load(USER_SETTINGS['KB_PATH_VEC_PATH'])
        if encryptor.encrypt:
            all_contents_df = encryptor.load_from_file(USER_SETTINGS['KB_CONTENT_PATH'])
        else:
            all_contents_df = pd.read_csv(USER_SETTINGS['KB_CONTENT_PATH'], encoding='utf-8')
        print('\tlaunch existing KB succeed...\t')
    except:
        all_vec = np.empty((0, 1024), dtype=np.float32)
        all_path_vec = np.empty((0, 1024), dtype=np.float32)
        all_contents_df = pd.DataFrame(columns=all_df_cols)
    
    try:
        if encryptor.encrypt:
            record_path_ref = encryptor.load_from_file(USER_SETTINGS['KB_PATH_JSON'])
        else:
            with open(USER_SETTINGS['KB_PATH_JSON'], mode='r', encoding='utf-8') as f:
                record_path_ref = json.load(f)
        record_paths = [t['system_path'] for t in record_path_ref.values()]        
    except:
        record_path_ref = {}
        record_paths = []
    return record_paths, record_path_ref, all_vec, all_path_vec, all_contents_df
    

def remove_from_kb(remove_paths, record_path_ref, USER_SETTINGS, all_vec, all_path_vec, all_contents_df):
    def get_key_index(d, key):
        try:
            return list(d.keys()).index(key) # 返回1-based index
        except ValueError:
            return -1  # 如果key不存在，返回-1
          
    total_ = len(remove_paths)
    process_num = 1
    start = time.time()
    path_ids = list()
    system_paths = list()
    for path_item in remove_paths:
        system_path = USER_SETTINGS['USER_ID'] + SPLIT_CHAR + path_item
        # print('\tknowledge pool remove progress {}, current path: {}'.format(np.round(process_num/total_, 3), path_item))
        system_paths.append(system_path)
        path_item = path_item.removeprefix('images-->')
        index = get_key_index(record_path_ref, path_item)
        if index == -1:
            continue
        path_ids.append(index)
        
    # 1. handle path description:
    if len(path_ids) > 0:
        indices_to_remove = np.isin(np.arange(len(all_path_vec)), path_ids)
        all_path_vec = all_path_vec[~indices_to_remove]
        for path_item in remove_paths:
            path_item = path_item.removeprefix('images-->')
            del record_path_ref[path_item]

    # 2. handle contents
    content_ids = all_contents_df.index[all_contents_df['path'].isin(system_paths)].tolist()
    if len(content_ids) > 0:
        all_contents_df = all_contents_df.drop(content_ids).reset_index(drop=True)
        indices_to_remove = np.isin(np.arange(len(all_vec)), content_ids)
        all_vec = all_vec[~indices_to_remove]
        if all_contents_df.shape[0] != len(all_vec):
                raise ValueError('all_contents_df id is inconsistent with all_vec id')
        process_num += 1
    
    print('\t删除知识库内容完成，花费时间{}分钟'.format(np.round((time.time()-start)/60, 3)))
    return all_contents_df, all_vec, all_path_vec, record_path_ref


def requirements_(store_path):
    import sys
    import pkg_resources
    python_version = sys.version.split()[0]

    installed_packages = sorted(["{}=={}".format(d.project_name, d.version) for d in pkg_resources.working_set])
    meta_lines = [f"# Python {python_version}"] + installed_packages
    with open(store_path, "w", encoding="utf-8") as f:
        f.write("\n".join(meta_lines))


if __name__ == "__main__":
    print()
        
        
        
        
        
        
        
        
        



