import os
import shutil
import pandas as pd
import re
import json
import numpy as np
import torch as T
import io
import base64
from docx import Document

from txt2doc import generate_doc_from_txt, add_paras, process_lines_for_doc
from LLM import add_files_to_tree, bfs_filtering, bfs_reverse, find_closest, find_by_content_voting, merge_cloest_soft, rerank_

from knowledge_generator import vectorize_know, process_full_contents, inject_knowledge
from txt_parser import parse_text_knowlege
from pdf_parser import parse_pdf, convert_pdf2dics
from doc_parser import parse_docx, convert_doc2dics
from table_parser import parse_xlsx4inject, parse_headers, parse_tb_contents
from image_parser import parse_image, IMAGE_PATTERN
from md_parser import parse_md

from query_enhancer import label_queries
from encoder_finetuner import gen_train_data_from_interactions
from kb_encoder import encode_user_kb
from txt_parser import extact_local_keywords2summary
from txt2doc import rewrite_

from utlis import clean_file, intersect_lst, remove_duplicates_orderkept, create_reply, process_dup_paths_df, gen_str_codes, flatten_dic_dfs, restore_graph_by_paths
from utlis import use_llm_api, get_node_level, flatten_dict, get_bottom_level_titles, merge_df, expand_summary_paths, process_texts4displaty, SPLIT_CHAR
from utlis import know_df_cols, all_df_cols, post_request, parse_fragment_path

from META import KB_PATH, USER_SETTINGS, model_config, llm_apis, llm_histories, model, tokenizer, local_llm, local_llm_tz, vectorize_texts, stopwords
from META import opt_memory, fine_tuner, sequence_reasoner, marker_reasoner, rerank_m3
import META
from loguru import logger
from file_encryptor import encryptor
from sqlite_pool import SqliteDB
from datetime import datetime
import pypandoc
from sentence_transformers import util



def extract_and_concatenate_rows(df, column, values_list, keep_order=True):
    matched_df = df[df[column].isin(values_list)]
    non_matched_df = df[~df[column].isin(values_list)]

    if keep_order:
        cat_type = pd.CategoricalDtype(categories=values_list, ordered=True)
        matched_df['_tmp'] = pd.Categorical(matched_df[column], categories=values_list, ordered=True)
        matched_df = matched_df.sort_values('_tmp')
        matched_df.drop('_tmp', axis=1, inplace=True)

    final_df = pd.concat([matched_df, non_matched_df], ignore_index=True)
    return matched_df, non_matched_df, final_df


def get_max_indices(lst, n):
    seen = set()
    for i, elem in enumerate(lst):
        if elem not in seen:
            seen.add(elem)
        if len(seen) == n:
            break

    for i in range(len(lst)):
        if lst[i] not in seen:
            return i
    return None


def similarity_query(texts, text_vectors, q_vec, topk, msg, paths, rerank_num=15):
    semantic_scores = util.cos_sim(q_vec, text_vectors)
    semantic_scores = list(semantic_scores.detach().numpy().reshape(-1))

    ids = list(np.argsort(semantic_scores)[::-1])
    contents = [texts[i] for i in ids]
    paths = [paths[i] for i in ids]

    rerank_max_i = get_max_indices(paths, rerank_num)
    rerank_paths = paths[:rerank_max_i]
    rerank_contents = contents[:rerank_max_i]
    rerank_paths, rerank_contents, rerank_scores = rerank_m3(msg, rerank_paths, rerank_contents)

    if topk > rerank_num:
        topk_max_i = get_max_indices(paths, topk)
        rerank_paths += paths[rerank_max_i:topk_max_i]
        rerank_contents += contents[rerank_max_i:topk_max_i]
    return rerank_contents, rerank_paths


def checkerboard_find(user_message, topk=None, soft_merge=True, rerank=None, hybrid=USER_SETTINGS['HYBRID_SEARCH'], data_type=1):
    '''
        :function find the most relevant knowledge file, based on <both file path name and file contents within>
                    we will combine the two methods through a voting mechanism
        :parameters
            user_message: the input query
            soft_merge: combine the results using a soft or hard manner
        :returns return a dictionary that includes the following:
            reply: refined message of the returned knowledge files (for front-end printing)
            sim_contents: a list of relevant knowledge files
            intentions: the original user query
            q_vector: the vector of the user query
    '''
    
    if topk==None:
        topk = USER_SETTINGS['TOP_K']
        
    bak_user_message = user_message
    punc_pattern = r'[\.,?!;:"\'\[\]\(\)\{\}，？！；：“”‘’【】（）]'
    user_message = re.sub(punc_pattern, '', user_message)
    user_message, q_vector = vectorize_texts(user_message, tokenizer, model)
    
    if data_type == 1:
        dfs_path_descs = META.full_paths#list
        dfs_path_tokens = META.full_path_tokens#list
        dfs_path_vectors = META.full_path_vectors#numpy
        full_path_ref = META.full_path_ref#dict

        all_contents_df = META.all_contents_df#pandas
        all_contents_tokens = META.all_contents_tokens#list
        all_vec = META.all_vec#numpy

    elif data_type == 2:
        full_path_ref = {}
        dfs_path_vectors = np.empty((0, 1024), dtype=np.float32)
        dfs_path_tokens = []
        dfs_path_descs = []
        for i, path in enumerate(META.full_path_ref):
            if not path.endswith('.png') and not path.endswith('.jpg') and not path.endswith('.jpeg'):
                full_path_ref[path] = META.full_path_ref[path]
                dfs_path_descs.append(META.full_paths[i])
                dfs_path_tokens.append(META.full_path_tokens[i])
                dfs_path_vectors = np.vstack((dfs_path_vectors, META.full_path_vectors[i]))
        
        all_vec = np.empty((0, 1024), dtype=np.float32)
        all_contents_tokens = []
        all_contents_df = []
        for i, row in META.all_contents_df.iterrows():
            path = row['path']
            if not path.endswith('.png') and not path.endswith('.jpg') and not path.endswith('.jpeg'):
                all_contents_df.append(row)
                all_contents_tokens.append(META.all_contents_tokens[i])
                all_vec = np.vstack((all_vec, META.all_vec[i]))
        all_contents_df = pd.DataFrame(all_contents_df)

    elif data_type == 3:
        full_path_ref = {}
        dfs_path_vectors = np.empty((0, 1024), dtype=np.float32)
        dfs_path_tokens = []
        dfs_path_descs = []
        for i, path in enumerate(META.full_path_ref):
            if path.endswith('.png') or path.endswith('.jpg') or path.endswith('.jpeg'):
                full_path_ref[path] = META.full_path_ref[path]
                dfs_path_descs.append(META.full_paths[i])
                dfs_path_tokens.append(META.full_path_tokens[i])
                dfs_path_vectors = np.vstack((dfs_path_vectors, META.full_path_vectors[i]))

        all_vec = np.empty((0, 1024), dtype=np.float32)
        all_contents_tokens = []
        all_contents_df = []
        for i, row in META.all_contents_df.iterrows():
            path = row['path']
            if path.endswith('.png') or path.endswith('.jpg') or path.endswith('.jpeg'):
                all_contents_df.append(row)
                all_contents_tokens.append(META.all_contents_tokens[i])
                all_vec = np.vstack((all_vec, META.all_vec[i]))
        all_contents_df = pd.DataFrame(all_contents_df)
        
    # 1. find by path searching
    sim_paths, _, similarities, _ = find_closest(dfs_path_descs, dfs_path_vectors, q_vector, topk, USER_SETTINGS, hybrid=hybrid, msg=user_message, stopwords=stopwords, token_corpus=dfs_path_tokens, rerank=rerank)
    sim_paths = [full_path_ref[p]['system_path'] for p in sim_paths]
    
    # 2. find by content voting
    voted_paths = find_by_content_voting(user_message, q_vector, all_contents_df, all_vec, topk, USER_SETTINGS, max_candidates=200, mode='normal', hybrid=hybrid, stopwords=stopwords, token_corpus=all_contents_tokens, rerank=rerank)
    
    # 3. merge results by re-weighting
    if soft_merge:
        merged_paths = merge_cloest_soft(sim_paths, voted_paths, lst_weights=[1,3], nonexist_panelty=True)
    else:
        merged_paths = intersect_lst(sim_paths, voted_paths)
    
    # 4. rerank to improve the results, taking additional seconds
    if rerank:
        rerank_res = rerank_(merged_paths, msg=user_message, topk=topk, USER_SETTINGS=USER_SETTINGS)
        merged_paths = rerank_res.values.reshape(-1).tolist()
    else:
        merged_paths = merged_paths[ :topk]

    detail = []
    data_types = []
    filter_merged_paths = []
    for sim_content_path in merged_paths:
        contents = all_contents_df[all_contents_df['path'] == sim_content_path]['content'].tolist()
        if len(contents) == 0:
            logger.warning('sim_content:{}', sim_content_path)
            continue
        all_content = ''.join(contents).replace('__HHF__', '\n')
        detail.append(all_content)
        
        if '-->images-->' in sim_content_path:
            type = 3
        elif '__摘要总结__' in sim_content_path and sim_content_path.endswith('__包括__'):
            type = 4
        else:
            type = 2
        data_types.append(type)
        filter_merged_paths.append(sim_content_path)
        if len(filter_merged_paths) == topk:
            break

    reply = create_reply(filter_merged_paths, user_message)
    response_data = {
        'reply':reply,
        'sim_contents':filter_merged_paths,
        'merged_paths':filter_merged_paths,
        'intentions':bak_user_message,
        'q_vector':q_vector.tolist(), # convert numpy to list for json serializing
        'inject_fill_signal':False,
        'detail':detail,
        'data_types':data_types
    }
    return response_data


def matching_df(sim_contents, all_contents_df, user_intention, USER_SETTINGS, split_char='-->', summary_term='__摘要总结__', save_session=False):
    paths_df = pd.DataFrame({'path': sim_contents})
    ini_filtered_df = paths_df.merge(all_contents_df, on='path', how='left')
    # 恢复列顺序 path 为第一列则无需更改
    # path_column = ini_filtered_df.pop('path')  # 移除 'path' 列
    # ini_filtered_df.insert(1, 'path', path_column)  # 在索引 1 的位置插入
    
    match_dfs = pd.DataFrame(columns=list(all_contents_df.columns))
    summary_texts = []
    parent_nodes = set()
    for _, row in ini_filtered_df.iterrows():
        if 'SUMMARY_' in row['type'] and '_SUMMARY' in row['type']:
            parent_node = re.findall(r'SUMMARY_(.*?)_SUMMARY', row['type'])[0]
            if parent_node in parent_nodes:
                continue
            parent_nodes.add(parent_node)
            summary_paths, summary_df = expand_summary_paths(all_contents_df, parent_node, split_char, summary_term)
            if not summary_df['path'].isin(match_dfs['path']).any():
                match_dfs = pd.concat([match_dfs, summary_df], ignore_index=True)
            summary_graph, summary_texts = restore_graph_by_paths(summary_paths, split_char, summary_term) # ****if we have multiple summary term, we only record the last one, which can be incorrect
        else:
            match_dfs = pd.concat([match_dfs, pd.DataFrame([row])], ignore_index=True)

    if ini_filtered_df.empty:
        match_dfs = pd.DataFrame([], columns=['path', 'content', 'type', 'length', 'keywords', 'summary', 'tokens', 'know_id', 'intention'])
        match_dfs['path'] = sim_contents
        match_dfs['content'] = ''
        match_dfs['type'] = 'PTXT'
        match_dfs['length'] = 0
        match_dfs['know_id'] = gen_str_codes('')
        match_dfs['tokens'] = ''
        match_dfs['keywords'] = ''
        match_dfs['summary'] = ''
        match_dfs['intention'] = ''
    elif match_dfs.empty:
        match_dfs = ini_filtered_df
        match_dfs['type'] = 'PTXT'

    match_dfs['intention'] = user_intention

    if save_session:
        if encryptor.encrypt:
            if os.path.exists(USER_SETTINGS['MATCH_DF']):
                old_match_dfs = encryptor.load_from_file(USER_SETTINGS['MATCH_DF'])
                all_match_dfs = pd.concat([old_match_dfs, match_dfs], ignore_index=True)
            else:
                all_match_dfs = match_dfs
            encryptor.save_to_file(all_match_dfs, USER_SETTINGS['MATCH_DF'])
        else:
            if not os.path.isfile(USER_SETTINGS['MATCH_DF']):
                match_dfs.to_csv(USER_SETTINGS['MATCH_DF'], mode='a', index=False, encoding='utf-8', header=True)
            else:
                match_dfs.to_csv(USER_SETTINGS['MATCH_DF'], mode='a', index=False, encoding='utf-8', header=False)
    return match_dfs, '\n'.join(summary_texts)


def process_display(know_df, img_record, tb_record, KB_PATH, show_image=True):
    reply = ''
    for i, row in know_df.iterrows():
        kg_contents_ = row['content']
        type = row['type']
        
        if type=='PTXT':
            reply += kg_contents_.replace('__HHF__', '\n')

        elif 'TABLE_' in type:
            tb_path = tb_record[type].replace('-->', os.path.sep)
            tb_path = os.path.join(KB_PATH, tb_path)
            if encryptor.encrypt:
                tb_df = encryptor.load_from_file(tb_path)
            else:
                tb_df = pd.read_csv(tb_path, encoding='utf-8', index_col=False)

            tb_df.columns = ['' if 'Unnamed:' in str(col) else col for col in tb_df.columns]    
            html = tb_df.to_html(escape=True, index=False, na_rep='', justify='center').replace('\n', '').replace('\\n', '')
          
        elif 'IMAGE_' in type and show_image:
            img_path = img_record[type].replace('-->', os.path.sep)
            img_path = os.path.join(KB_PATH, img_path)
            if encryptor.encrypt:
                file_data = encryptor.load_from_file(img_path)
            else:
                with open(img_path, 'rb') as fd:
                    file_data = fd.read()
            base64_data = base64.b64encode(file_data).decode('utf-8')
            reply += f'<img src="data:image/png;base64,{base64_data}" alt="Example Image">'
    return reply


def checkerboard_integrate_contents(user_intention, sim_contents, save_session=True, show_image=True):
    match_dfs, summary_texts = matching_df(sim_contents, META.all_contents_df, user_intention, USER_SETTINGS, save_session=save_session)
    merged_df = merge_df(match_dfs)
    integrated_contents = process_display(merged_df, META.img_record, META.tb_record, META.KB_PATH, show_image)
    return integrated_contents, summary_texts, merged_df


def checkerboard_answer(user_intention, sim_contents, gen_doc=False, act_marker=None, llm_histories=llm_histories, api_name='qwen_api', isModel=False, llm_input_limit=3000, save_session=False, show_image=True, add_paras={}):
    rewrite_fields = ['words', 'topic', 'avoid_topics', 'type', 'style', 'pages']
    if USER_SETTINGS['USE_LOCAL_LLM']: # this function supports local_llm
        api_name = 'local_api'
    
    ref_contents, summary_texts, merged_df = checkerboard_integrate_contents(user_intention, sim_contents, save_session, show_image)
    res4answer = (summary_texts + '\n' + ref_contents).strip()

    if gen_doc and act_marker=='输出':
        clean_file(USER_SETTINGS['OUT_PATH'], mode='clean') # delete the file to avoid caching and duplicated downloads
        clean_file(USER_SETTINGS['DOC_PATH'], mode='remove')
        generate_doc_from_txt(META.KB_PATH, 
                                merged_df,
                                USER_SETTINGS['DOC_PATH'], 
                                USER_SETTINGS['OUT_PATH'],
                                META.tb_record, 
                                META.img_record,
                                None, # font_lst
                                None, # tree
                                llm_apis[api_name],
                                local_llm,
                                USER_SETTINGS['LOCAL_LLM_NAME'],
                                local_llm_tz,
                                llm_histories=llm_histories,
                                model_config=model_config,
                                rewrite_threshold=USER_SETTINGS['REWRITE_THRESHOLD']
                                )
        return {'reply':res4answer, 'doc_file':'Res_doc.docx'} #'answers':answers, 'txt_file':'Res_text.txt', 
    
    else:    
        # content_texts = process_texts4displaty(merged_df, META.img_record, META.tb_record, META.KB_PATH)
        if act_marker=='提问':
            if len(summary_texts)>0 and len(res4answer)>llm_input_limit: # If user chooses summary and the length exceeds the limit, we only use summary
                res4answer = summary_texts
            res_content = checkerboard_simple_ask(user_intention, res4answer, api_name=api_name, isModel=isModel)
            return {'reply':res_content}

        elif act_marker=='重写':
            res_content = rewrite_(res4answer, llm_apis[api_name], USER_SETTINGS['LOCAL_LLM_NAME'], local_llm, local_llm_tz, llm_histories, model_config, task='rewrite-paras', add_paras=add_paras, rewrite_fields=rewrite_fields)
            return {'reply':res_content}
        
        elif act_marker=='填空':
            res_content = checkerboard_filling_tb(user_intention, res4answer, api_name=api_name)
            try:
                res_content = json.dumps(res_content, ensure_ascii=False)
            except:
                res_content = str(res_content)
            return {'reply':res_content}
        
        elif act_marker=='输出':
            return {'reply':res_content}


def answer_stream(user_intention, sim_contents, act_marker=None, llm_histories=llm_histories, api_name='qwen_stream_api', isModel=False, llm_input_limit=3000, save_session=False, show_image=True, add_paras={}):
    rewrite_fields = ['words', 'topic', 'type', 'style', 'pages']
    if USER_SETTINGS['USE_LOCAL_LLM']:
        api_name = 'local_stream_api'

    ref_contents, summary_texts, _ = checkerboard_integrate_contents(user_intention, sim_contents, save_session, show_image)
    res4answer = (summary_texts + '\n' + ref_contents).strip()
    
    if act_marker=='提问':
        if len(res4answer)>llm_input_limit and len(summary_texts)>0: # currently, if user chooses summary and the length exceeds the limit, we only use summary
            res4answer = summary_texts
        
        res = checkerboard_simple_ask(user_intention, res4answer, api_name=api_name, isModel=isModel)
        if isinstance(res, str):
            res = f'data: {res}\n\n'
        yield from res
        
    elif act_marker=='重写':
        res = rewrite_(res4answer, llm_apis[api_name], USER_SETTINGS['LOCAL_LLM_NAME'], local_llm, local_llm_tz, llm_histories, model_config, task='rewrite-paras', add_paras=add_paras, rewrite_fields=rewrite_fields)
        if isinstance(res, str):
            res = f'data: {res}\n\n'
        yield from res


def answer_stream_doc(user_intention, res4answer, act_marker=None, llm_histories=llm_histories, api_name='qwen_stream_api'):
    if USER_SETTINGS['USE_LOCAL_LLM']:
        api_name = 'local_stream_api'
    
    if act_marker=='提问':
        res = checkerboard_simple_ask(user_intention, res4answer, api_name=api_name, isModel=False)
        if isinstance(res, str):
            res = f'data: {res}\n\n'
        yield from res

    if act_marker=='重写':
        res = rewrite_(res4answer, llm_apis[api_name], USER_SETTINGS['LOCAL_LLM_NAME'], local_llm, local_llm_tz, llm_histories, model_config, task='rewrite-paras')
        if isinstance(res, str):
            res = f'data: {res}\n\n'
        yield from res

    elif act_marker=='缩写':
        res = rewrite_(res4answer, llm_apis[api_name], USER_SETTINGS['LOCAL_LLM_NAME'], local_llm, local_llm_tz, llm_histories, model_config, task='abridge-paras')
        if isinstance(res, str):
            res = f'data: {res}\n\n'
        yield from res
    
    elif act_marker=='扩写':
        res = rewrite_(res4answer, llm_apis[api_name], USER_SETTINGS['LOCAL_LLM_NAME'], local_llm, local_llm_tz, llm_histories, model_config, task='extension-paras')
        if isinstance(res, str):
            res = f'data: {res}\n\n'
        yield from res


def checkerboard_simple_ask(intention, texts, llm_histories=llm_histories, api_name=None, isModel=True, if_judge=False):
    # response, llm_histories = use_llm_api(llm_apis[api_name],
    #                                         histories=llm_histories,
    #                                         paras={ 'task':'if-history',
    #                                                 'query':intention,
    #                                                 'texts':'',
    #                                                 'local_model_name':USER_SETTINGS['LOCAL_LLM_NAME'],
    #                                                 'model':'qwen-max',
    #                                                 'local_model':local_llm, 
    #                                                 'local_tz': local_llm_tz,
    #                                                 'use_his':True},
    #                                         config=model_config)
    
    response, llm_histories = use_llm_api(llm_apis[api_name],
                                            histories=llm_histories,
                                            paras={ 'task':'talk-kb', # talk-kb financial_judge
                                                    'if_judge':if_judge,
                                                    'query':intention, 
                                                    'texts':texts, 
                                                    'add_req': '尽可能简明扼要',
                                                    'local_model_name':USER_SETTINGS['LOCAL_LLM_NAME'],
                                                    'out_limit':USER_SETTINGS['LLM_OUT_LIMIT'],
                                                    'model':'qwen-plus',
                                                    'local_model':local_llm, 
                                                    'local_tz': local_llm_tz,
                                                    'use_his':True},
                                            config=model_config)
    try:
        judge = response['judge']
        if judge=='是':
            answer = response['answer']
        else:
            if isModel:
                answer = response['answer']
            else:
                answer = '知识库中暂无相关资料，我们会持续完善。'        
    except:
        answer = response
    return answer


def checkerboard_filling_tb(keyword, tb_texts, llm_histories=llm_histories, api_name='qwen_api', model='qwen-plus'):
    response, llm_histories = use_llm_api(llm_apis[api_name],
                                            histories=llm_histories,
                                            paras={ 'task':'filling-tb',
                                                    'query':keyword, 
                                                    'texts':tb_texts,
                                                    'local_model_name':USER_SETTINGS['LOCAL_LLM_NAME'],
                                                    'model':model,
                                                    'local_model':local_llm, 
                                                    'local_tz': local_llm_tz,
                                                    'use_his':False },
                                            config=model_config)
    try:
        answer = response['answer']      
    except:
        answer = response
    return answer


def checkerboard_qlabel(query, qlabel, api_name='gpt_api'):
    fuzzy_warning = '系统侦测到您的提问可能过于模糊，可能影响回答质量，请考虑是否优化提问。'
    
    if qlabel:
        labeled_queries = []
        queries = [query] # UNDER DEVELOPMENT: divide a query into multiple ones if possible
        temp_labels = label_queries(queries, model_config, USER_SETTINGS, llm_histories, api_name=None)
        
        for lq in temp_labels:
            if lq[1]=='fuzzy':
                labeled_queries.append({'query':lq[0], 'predefined_res':fuzzy_warning})
            else:
                labeled_queries.append({'query':lq[0], 'predefined_res':''})
                
        return labeled_queries
    else:
        return [{'query':query, 'predefined_res':''}]
     
    
def checkerboard_learn(reply, user_intention, sim_contents, user_selected_ids, current_markers):
    local_train_data = gen_train_data_from_interactions(USER_SETTINGS, 
                                      user_intention, 
                                      sim_contents, 
                                      user_selected_ids,
                                      all_contents_df=META.all_contents_df,
                                      mode='both',
                                      add_neg=10)
    
    if USER_SETTINGS['BN_RL']==True and (len(local_train_data)>=USER_SETTINGS['N_TRIGGER'] and len(local_train_data) % USER_SETTINGS['N_TRIGGER']==0):       
        # fine-tune global encoder
        fine_tuner.model_setting()
        fine_tuner.model_finetuning()
        fine_tuner.model_fusing()
        
        # train local reasoner
        opt_memory.eval(len(reply), 
                        user_intention, 
                        sim_contents, 
                        user_selected_ids, 
                        current_markers)

    
def checkerboard_reason(intention, sim_contents, merged_paths, mode='llm', api_name='gpt_api', llm_histories=llm_histories, seq_reasoner=sequence_reasoner, mark_reasoner=marker_reasoner):
    if seq_reasoner==None or mark_reasoner==None:
        print('No reasoner loaded')
        return None, None, None
    
    # path_contents = [ '-->'.join(path.split(os.sep)[USER_SETTINGS['ROOT_LEN'] : ]) for path in merged_paths ]
    path_contents = [ path.split(os.sep)[-1] for path in merged_paths ]

    content_lst_text = ''
    for i, pc in enumerate(path_contents):
        content_lst_text =  content_lst_text + f"\u3010{i+1} \u3011 " + pc + '\n'
    
    try:
        if mode=='llm' and not api_name==None:
            print('\tusing llm to reason...')
            # 1. selected id reasoner
            answer, llm_histories = use_llm_api(llm_apis[api_name],
                                                  histories=llm_histories,
                                                  paras={'task':'reason' , 'texts':content_lst_text.strip(), 'query':intention}, 
                                                  config=model_config)
            
            if not answer['match']:
                matched_id = -1
            else:
                matched_id = int(answer['match']) - 1 # the list index begins at 0
                matched_id = max(matched_id, -1)
                if matched_id > len(merged_paths) - 1:
                    matched_id = -1
            matched_term = merged_paths[matched_id]
            
            # 2. Under development, act marker reasoner
            marker_ids = [0,1,0]
            
            return matched_term, str(matched_id+1), marker_ids
        
        elif mode=='rl':
            try:
                print('\tusing local model to reason...')
                seq_reasoner.load_checkpoint()
                seq_reasoner.eval()
                
                _, intention_embed = vectorize_texts(intention, tokenizer, model)
                _, content_embeds = vectorize_texts(path_contents, tokenizer, model)
                
                content_embeds = content_embeds[np.newaxis, :]
                intention_embed = intention_embed.reshape(1, 1, intention_embed.shape[0])
                
                intention_embed = np.repeat(intention_embed, USER_SETTINGS['TOP_K'], axis=1)
                embeddings = np.concatenate((content_embeds, intention_embed), axis=2) # combine query and existing knowledge
                # reason knowledge pieces by local model
                seq_embeddings = T.tensor(embeddings).to(seq_reasoner.device)  
                seq_pred_arr = seq_reasoner(seq_embeddings)
                seq_pred_ids = [int(i) for i in (seq_pred_arr>=0.5).float().cpu().numpy().flatten()]
                # reason action markers by local model
                mak_embeddings = T.tensor(embeddings).to(mark_reasoner.device)
                mak_pred_arr = mark_reasoner(mak_embeddings)
                mak_pred_ids = [int(i) for i in (mak_pred_arr>=0.5).float().cpu().numpy().flatten()]
                
                seq_return_ids = [str(i+1) for i, val in enumerate(seq_pred_ids) if val == 1]
                seq_return_terms = [path_contents[i] for i, val in enumerate(seq_pred_ids) if val == 1]
                               
                return seq_return_terms, ','.join(seq_return_ids), mak_pred_ids
            except Exception as e:
                print('local model loading failed... \n', e)
                return checkerboard_reason(intention, sim_contents, merged_paths, mode='llm')

        else:
            pass
    except Exception as e:
        logger.exception('checkerboard_reason fail! e:{}', e)
        return None, None, None
    
    
def checkerboard_autofill(tree, template_path, filled_contents, filled_ids, filled_markers, llm_histories=llm_histories, api_name='qwen_api', path_val_pairs=[], model_config=model_config):
    clean_file(USER_SETTINGS['OUT_PATH'], mode='clean') # delete the file to avoid caching and duplicated downloads
    clean_file(USER_SETTINGS['REPORT_PATH'], mode='remove')
    # USER_SETTINGS['REPORT_PATH'] = os.path.join(TEMP_RES_PATH, 'Report_doc.docx')
    bottom_level_titles = get_bottom_level_titles(tree)
    
    if '.doc' in template_path:
        # 1. create the file
        # doc_template = Document(template_path)
        doc_res = Document()
        
        # 2. write the titles and contents
        if encryptor.encrypt:
            match_dfs = encryptor.load_from_file(USER_SETTINGS['MATCH_DF'])
        else:
            match_dfs = pd.read_csv(USER_SETTINGS['MATCH_DF'], encoding='utf-8')
        match_groups = match_dfs.groupby('intention', sort=False)
        
        all_titles = flatten_dic_dfs(tree)[1: ]
        for title in all_titles:
            level = get_node_level(tree, title)
            doc_res = add_paras(doc_res, [title], level)
            
            if title in bottom_level_titles: # improve code logic here, the filled_contents seem not USEFUL
                if len(filled_contents) > 0:
                    content = filled_contents.pop(0)
                else:
                    content = 'NULL'

                if content=='NULL':
                    doc_res = add_paras(doc_res, ['(未找到知识，请补充)'], -1, [{'bold':True, 'hc':True}])
                elif title in match_groups.groups.keys():
                    subgroup = match_groups.get_group(title)
                    current_df = merge_df(subgroup)
                    doc_res = process_lines_for_doc(current_df, 
                                                    doc_res, 
                                                    KB_PATH,
                                                    META.tb_record, 
                                                    META.img_record, 
                                                    None, # font_dic
                                                    None, # tree
                                                    llm_apis[api_name],
                                                    local_llm,
                                                    USER_SETTINGS['LOCAL_LLM_NAME'],
                                                    local_llm_tz,
                                                    llm_histories=llm_histories,
                                                    model_config=model_config,
                                                    rewrite_threshold=99999999) #USER_SETTINGS['REWRITE_THRESHOLD']        

        if encryptor.encrypt:
            binary_stream = io.BytesIO()
            doc_res.save(binary_stream)
            binary_stream.seek(0)
            binary_data = binary_stream.getvalue()
            encryptor.save_to_file(binary_data, USER_SETTINGS['REPORT_PATH'])
        else:
            doc_res.save(USER_SETTINGS['REPORT_PATH'])
    
    elif '.xls' in template_path:
        for path, value in path_val_pairs:
            keys = path.split('-->')
            current = tree
            for key in keys[:-1]:  # Traverse to the second last key
                current = current[key]
            last_key = keys[-1]
            try: 
                vals = list(json.loads(value).values())
            except:
                vals = [value]
            current[last_key] = vals  # Set the value at the last key

        flat_dic = flatten_dict(tree)
        filled_df = pd.DataFrame(flat_dic)
        filled_df.columns = pd.MultiIndex.from_tuples(filled_df.columns)
        filled_df.to_excel(USER_SETTINGS['TB_PATH'], encoding='utf-8')
    else:
        pass
    # clean_file(USER_SETTINGS['MATCH_DF'], mode='clean')


def ocr_pdf(pdf_path, kb_dir, save, api_name):
    ocr_dir, _ = os.path.splitext(pdf_path)
    ocr_timeout = USER_SETTINGS['OCR_TIMEOUT']
    with open(pdf_path, 'rb') as r_fd:
        files = {"file": r_fd}  # 'file' 是服务器期望的字段名
        msg, status_code = post_request('http://218.17.187.47:35010/pdf', files=files, timeout=(60, ocr_timeout))
    if status_code != 200:
        raise ConnectionError(msg)
    pattern = r'!\[.*?\]'
    full_text = re.sub(pattern, '![]', msg['full_text'])    
    images = msg['images']
        
    for name, base64_str in images.items():
        image_path = os.path.join(ocr_dir, name).replace(os.path.sep, '/')
        full_text = full_text.replace(f'![]({name})', f'![]({image_path})')
        image_data = base64.b64decode(base64_str)
        with open(image_path, "wb") as file:
            file.write(image_data)
    # texts = ''
    # full_texts = full_text.split('\n')
    # for para in full_texts:
    #     if para.startswith('#'):
    #         texts += f'\n{para.strip()}'
    # answer, llm_histories = use_llm_api(llm_apis['gpt_api'],
    #                                     histories=[],
    #                                     paras={'task':'correct-heading' , 'texts':texts, 'query':'', 'model':'gpt-4o'}, 
    #                                     config=model_config)
    # new_full_text = ''                                    
    # for para in full_texts:
    #     if para.startswith('#') and para.strip() in answer:
    #         new_full_text += f'{answer[para.strip()]}\n'
    #     else:
    #         new_full_text += f'{para}\n'

    md_path = os.path.join(ocr_dir, os.path.basename(ocr_dir) + '.md')
    with open(md_path, 'w', encoding='utf-8') as w_fd:
        w_fd.write(full_text)

    docx_path = md_path.replace('.md', '.docx')
    pypandoc.convert_file(md_path, 'docx', outputfile=docx_path)

    start_line = ''
    end_line = ''
    special_doc_type = None #'standard'     
    parsed_structure = parse_docx(docx_path, kb_dir, special_doc_type=special_doc_type, start_text=start_line.strip(), end_text=end_line.strip())
    json_know, failed_paths = convert_doc2dics(parsed_structure, save, kb_dir, USER_SETTINGS['PARSING_ISSUES'], model_config, llm_apis[api_name], llm_histories, USER_SETTINGS['LOCAL_LLM_NAME'], local_llm, local_llm_tz, USER_SETTINGS['LOCAL_SUMMARY'], know_df_cols)
    return json_know, failed_paths


def checkerboard_inject_parse(filename=None, file_full_path=None, llm_histories=llm_histories, api_name='qwen_api', save=True, **kwargs):        
    if USER_SETTINGS['USE_LOCAL_LLM']: # this function supports local_llm
        api_name = 'local_api'
            
    if not filename==None or not file_full_path==None:
        # 1. handle raw file path, for web-usage, we can not have the full path, we can only define that the file exists in a folder
        if not file_full_path==None:
            filename = file_full_path.split(os.sep)[-1]    
        raw_file_path = os.path.join(USER_SETTINGS['SUPP_FILE_PATH'], filename)

        if file_full_path is not None and os.path.exists(file_full_path):
            print("\treceiving file objects from local scanning..")
            with open(file_full_path, 'rb') as file_obj:
                with open(raw_file_path, 'wb') as des_f:
                    shutil.copyfileobj(file_obj, des_f)
        else:
            print("\treceiving file objects from the web...")
            file_obj = kwargs['file_obj']
            file_obj.save(raw_file_path)
            file_obj.seek(0)
        
        # 2. we can specify the path where the parsed knowledge is saved
        dir_terms = kwargs['kb_dir'].split(SPLIT_CHAR)

        if '.png' not in raw_file_path and '.jpg' not in raw_file_path and '.jpeg' not in raw_file_path:   
            dir_terms.append(filename)
        dir_terms.insert(0, USER_SETTINGS['KB_PATH'])
        kb_dir = os.path.join(*dir_terms)
        add_dir = f'{SPLIT_CHAR}'.join(dir_terms[1:])

        # for some formats (e.g., images), create individual directory
        individal_dir_pattern = re.compile(IMAGE_PATTERN, re.IGNORECASE)
        if not individal_dir_pattern.match(raw_file_path):
            os.makedirs(kb_dir, exist_ok=True)
        else:
            os.makedirs('INDIVIDUAL RESOURCE', exist_ok=True) # ****UNDER DEVELOPMENT**** can be refined based on specific formats

        values = (filename, add_dir, 0, '', datetime.now(), datetime.now())
        sql = 'insert into import_progress(file_name, dir_path, progress, error, start_time, end_time) values(?, ?, ?, ?, ?, ?) on conflict(file_name, dir_path) do update set start_time=excluded.start_time, end_time=excluded.end_time, progress=excluded.progress'
        SqliteDB().insert(sql, values)

        # 3. start parsing based on input file formats
        if '.txt' in raw_file_path:
            know_title = kwargs['know_title'] if kwargs['know_title'] else os.path.splitext(filename)[0]
            json_know, failed_paths = parse_text_knowlege(source_txt_path=raw_file_path, know_title=know_title, call_llm=llm_apis[api_name], llm_histories=llm_histories)
            
        elif '.pdf' in raw_file_path:
            json_know, failed_paths = ocr_pdf(raw_file_path, kb_dir, save, api_name)
            # try:
            #     start_page = kwargs['start_symbol']
            #     end_page = int(kwargs['end_symbol'])
            #     if start_page=='':
            #         start_page = 0
            #     if end_page=='':
            #         end_page = None
            # except:
            #     start_page = 0
            #     end_page = None
            
            # parsed_structure = parse_pdf(raw_file_path, kb_dir, start_page, end_page)
            # json_know, failed_paths = convert_pdf2dics(parsed_structure, save, kb_dir, model_config, llm_apis[api_name], llm_histories, USER_SETTINGS['LOCAL_LLM_NAME'], local_llm, local_llm_tz, know_df_cols, USER_SETTINGS['LOCAL_SUMMARY'])
                
        elif '.doc' in raw_file_path:
            try:
                start_line = kwargs['start_symbol']
                end_line = kwargs['end_symbol']
            except:
                start_line = ''
                end_line = ''
            special_doc_type = None # None #'standard' 
            
            parsed_structure = parse_docx(raw_file_path, kb_dir, special_doc_type=special_doc_type, start_text=start_line.strip(), end_text=end_line.strip())
            json_know, failed_paths = convert_doc2dics(parsed_structure, save, kb_dir, USER_SETTINGS['PARSING_ISSUES'], model_config, llm_apis[api_name], llm_histories, USER_SETTINGS['LOCAL_LLM_NAME'], local_llm, local_llm_tz, USER_SETTINGS['LOCAL_SUMMARY'], know_df_cols)
            
        elif '.xls' in raw_file_path:
            json_know, failed_paths = parse_xlsx4inject(raw_file_path, USER_SETTINGS['TEMP_RES_PATH'], kb_dir, 'parse', USER_SETTINGS['LOCAL_SUMMARY'], llm_apis[api_name], llm_histories, USER_SETTINGS['LOCAL_LLM_NAME'], local_llm, local_llm_tz, model_config, know_df_cols)

        elif '.png' in raw_file_path or '.jpg' in raw_file_path or '.jpeg' in raw_file_path:
            know_title = kwargs['know_title'] if kwargs['know_title'] else os.path.splitext(filename)[0]
            json_know, failed_paths = parse_image(file_obj, raw_file_path, know_title=know_title, call_llm=llm_apis[api_name], llm_histories=llm_histories, KB_PATH=USER_SETTINGS['KB_PATH'], kb_dir=kb_dir)

        elif '.md' in raw_file_path:
            json_know = parse_md(raw_file_path, kb_dir, call_llm=llm_apis[api_name], llm_histories=llm_histories, local_llm_name=USER_SETTINGS['LOCAL_LLM_NAME'], local_llm=local_llm, local_llm_tz=local_llm_tz, model_config=model_config, local_summary=USER_SETTINGS['LOCAL_SUMMARY'])
        # if len(failed_paths)==0:
        #     with open(os.path.join(USER_SETTINGS['TEMP_RES_PATH'], 'Parsed_files.txt'), 'a', encoding='utf-8') as f:
        #         f.write(raw_file_path + '\n')

        # os.remove(raw_file_path)
        update_kb(add_dir=add_dir, add_filename=filename)
        values = (100, datetime.now(), filename, add_dir)
        sql = 'update import_progress set progress=?, end_time=? where file_name=? and dir_path=?'
        SqliteDB().insert(sql, values)
    return json_know, kb_dir


def checkerboard_inject_search(content, know_file_name, llm_histories=llm_histories, api_name='qwen_api', use_content=True, summary_know=False, alpha=1):
    display_file = '-->'.join(know_file_name.split(os.sep)[USER_SETTINGS['ROOT_LEN'] : ])    
    print('\t成功检索知识文件-->{}\t\t进入知识切片匹配...'.format(display_file))
    
    # UNDER DEVELOPMENT if summarize input knowledge?
    
    
    # search within the file
    kg_summary, kg_vector = vectorize_texts(kg_summary, tokenizer, model)
    parent_keys, know_key_vecs, parsed_paths, key_ = vectorize_know(know_file_name, vectorize_texts, model, tokenizer, use_content)
    sim_inner_keys, sim_ids, similarities, _ = find_closest(parent_keys, know_key_vecs, kg_vector, topk=3)
    
    current2inject = []
    for sim_key in sim_inner_keys:
        current2inject.append(display_file + '-->' + sim_key)
    
    current2inject = remove_duplicates_orderkept(current2inject)
    kg_reply = create_reply(current2inject, kg_summary)

    kg_response = {
        'reply' : kg_reply,
        'sim_contents' : current2inject,
        'intentions' : kg_summary,
        'q_vector' : kg_vector.tolist(), # convert numpy to list for json serializing
        'inject_fill_signal': True,
        'full_content': content
    }
    
    return kg_response


def checkerboard_judge(act_markers):
    act_dic = {0:'重写', 
                   1:'输出', 
                   2:'提问',
                   3:'缩写',
                   4:'扩写'} # key-> location, val-> action
    try:
        act = act_dic[act_markers.index(1)]
    except:
        print('error, probably all markers are reasoned as 0, use default action...')
        act = '返回'
    return act


def load_resource(resource, kb_dir, inner_key):
    for link, data in resource.items():
        if link.startswith('TABLE_'):
            tb_record = {}
            tb_record_pth = os.path.join(kb_dir, 'table_record.json')
            if os.path.exists(tb_record_pth):
                if encryptor.encrypt:
                    tb_record = encryptor.load_from_file(tb_record_pth)
                else:
                    with open(tb_record_pth, 'r', encoding='utf-8') as f:
                        tb_record = json.load(f)
            tb_df = data['data']
            if link in tb_record:
                tb_name = tb_record[link]
            else:
                tb_name = data['name']
                tb_record.update({link : tb_name})
                if encryptor.encrypt:
                    encryptor.save_to_file(tb_record, tb_record_pth)
                else:
                    with open(tb_record_pth, 'w', encoding='utf-8') as f:
                        json.dump(tb_record, f, ensure_ascii=False, indent=4)

            tb_path = os.path.join(kb_dir, tb_name)
            # tbl_html = data['data']
            # tb_df = pd.read_html(StringIO(tbl_html))[0]
            if encryptor.encrypt:
                encryptor.save_to_file(tb_df, tb_path)
            else:
                tb_df.to_csv(tb_path, encoding='utf-8', index=False)

        elif link.startswith('IMAGE_'):
            img_record = {}
            img_record_pth = os.path.join(kb_dir, 'image_record.json')
            if os.path.exists(img_record_pth):
                if encryptor.encrypt:
                    img_record = encryptor.load_from_file(img_record_pth)
                else:
                    with open(img_record_pth, 'r', encoding='utf-8') as f:
                        img_record = json.load(f)

            img_base64 = data['data'].split(',')[-1]
            if link in img_record:
                image_name = img_record[link]
            else:
                image_name = data['name']
                img_record.update({link : image_name})
                if encryptor.encrypt:
                    encryptor.save_to_file(img_record, img_record_pth)
                else:
                    with open(img_record_pth, 'w', encoding='utf-8') as f:
                        json.dump(img_record, f, ensure_ascii=False, indent=4)

            image_output_path = os.path.join(kb_dir, image_name)
            img_bin = base64.b64decode(img_base64)
            if encryptor.encrypt:
                encryptor.save_to_file(img_bin, image_output_path)
            else:
                with open(image_output_path, 'wb') as image_file:
                    image_file.write(img_bin)


def checkerboard_create_know(dir_name, content, inner_key, resource={}, api_name='qwen_api', know_df_cols=know_df_cols):
    '''
        :function add new individual knowledge or inject knowledge to existing structure
    '''
    
    if USER_SETTINGS['USE_LOCAL_LLM']: # this function supports local_llm
        api_name = 'local_api'

    kb_dir = os.path.join(KB_PATH, dir_name.replace(SPLIT_CHAR, os.path.sep))
    if not os.path.exists(kb_dir):
        os.mkdir(kb_dir)

    kb_path = os.path.join(kb_dir, 'KB_PTXT.csv')
    if not os.path.exists(kb_path):
        doc_df = pd.DataFrame(columns=know_df_cols)
    else:
        if encryptor.encrypt:
            doc_df = encryptor.load_from_file(kb_path)
        else:
            doc_df = pd.read_csv(kb_path, index_col=False, encoding='utf-8')

    pattern = re.compile(r'(TABLE_.*?_TABLE|IMAGE_.*?_IMAGE)')
    matches = pattern.findall(content)
    if len(matches)==0:
        matches = 'NOLINK'
    else:
        matches = '\n'.join(matches)

    keywords = []
    local_summary = ''
    if len(content)>USER_SETTINGS['SUMMARY_THRESHOLD'] and USER_SETTINGS['LOCAL_SUMMARY']:
        keywords, local_summary = extact_local_keywords2summary(content, llm_apis[api_name], llm_histories, USER_SETTINGS['LOCAL_LLM_NAME'], local_llm, local_llm_tz, model_config)
    
    know_dic, know_id = process_full_contents(content, inner_key, split_char='$$&&$&$&$$$')
    know_dic = json.dumps(know_dic, ensure_ascii=False, indent=4)        

    keywords_str = ' '.join(keywords)
    if inner_key in doc_df['path'].values:
        # *****UNDER DEVELOPMENT******如果 path 存在，更新 content
        doc_df.loc[doc_df['path'].astype(str) == inner_key, ['content', 'linkage', 'summary', 'keywords', 'know_id']] = [know_dic, matches, local_summary, keywords_str, know_id]
    else:
        # 如果 path 不存在，追加新行
        temp_df = pd.DataFrame({'path':[inner_key],
                                'content':[know_dic],
                                'linkage':[matches],
                                'summary':[local_summary],
                                'keywords':[keywords_str],
                                'know_id':[know_id]})
        doc_df = pd.concat([doc_df, temp_df], ignore_index=True)

    doc_df = process_dup_paths_df(doc_df)
    logger.info('kb_path:{}', kb_path)
    if encryptor.encrypt:
        encryptor.save_to_file(doc_df, kb_path)
    else:
        if not os.path.exists(kb_path):
            doc_df.to_csv(kb_path, encoding='utf-8')
        else:
            doc_df.to_csv(kb_path, encoding='utf-8', index=False)
    load_resource(resource, kb_dir, inner_key)
    update_kb(add_dir=dir_name)
    print('\t know file write successfully...')


def table_structure_recog(filename=None, tb_path=None):
    # tb_path = os.path.join(USER_SETTINGS['KB_PATH'], 'templates', filename)
    df_temp = parse_headers(tb_path=tb_path, mode='fill')
    _, tb_paths, search_keys = parse_tb_contents(df_temp, mode='fill', return_lst=True)
    tb_structure, _ = restore_graph_by_paths(tb_paths)
    return search_keys, tb_structure
    
    
def update_kb(add_dir='', add_filename='', remove_dirs={}):
    _, update_full_path_vectors, update_full_path_ref, update_all_contents_df, update_all_vec, tb_record, img_record = encode_user_kb(KB_PATH, USER_SETTINGS, vectorize_texts, tokenizer, model, know_df_cols, all_df_cols, stopwords, add_dir, add_filename, remove_dirs)
    META.full_path_ref = update_full_path_ref
    META.full_paths = list(update_full_path_ref.keys())
    if len(update_full_path_ref) > 0:
        META.full_path_tokens = [d['tokens'].split('->') for d in update_full_path_ref.values()]
    META.full_path_vectors = update_full_path_vectors
    META.all_contents_df = update_all_contents_df
    if len(update_all_contents_df) > 0:
        META.all_contents_tokens = [str(t).split('->') for t in update_all_contents_df['tokens'].tolist()]
    META.all_vec = update_all_vec
    META.tb_record = tb_record
    META.img_record = img_record


def save_res(res_df, res_path, queries, pred_topks, pred_answers, used_times):
    res_df['query'] = queries
    res_df['Top-K'] = pred_topks
    res_df['answer'] = pred_answers
    res_df['time'] = used_times
    res_df.to_excel(res_path, sheet_name='res', encoding='utf-8', index=False, engine='openpyxl')    

    
if __name__ == "__main__":
    info = '''
        送貨單號 Docket No. ZG5250120000700005
        Truck No. 車牌: PE4699
        Time of water cement added: 09:45
        Time arrive site: 11:13
        完成落貨時間 Discharge completed time: 12:10
        立方米 Cubic metres: 7
        今日訂貨量 Qty of Order: 190 
        塌度 Target slamp: 150
    '''

    tb_path = r"C:\Users\chengke\Desktop\testdir\待生成\test2fill.xlsx"
    out_tb_path = r"C:\Users\chengke\Desktop\testdir\待生成\test_filled.xlsx"
    search_keys, tb_structure = table_structure_recog(tb_path=tb_path)
    key_vals = {}

    for key in search_keys:
        val = checkerboard_filling_tb(key, info, llm_histories=llm_histories, api_name='qwen_api', model='qwen-max')
        key_vals.update({key:val})
    
    df = pd.DataFrame(key_vals)
    # 输出为 Excel 文件（注意文件路径需要是 .xlsx 结尾）
    df.to_excel(out_tb_path, index=False)
    os.startfile(out_tb_path)


    # from utlis import remove_spaces
    # import time
    #
    # st = time.time()
    # entry_path = r'C:\Users\DELL\Desktop\testdir'
    # files2parse = []
    # target_suffixs = ['.docx', '.xlsx', '.pdf']
    #
    # for root, dirs, files in os.walk(entry_path):
    #     for file in files:
    #         file_path = os.path.join(root, file)            
    #         file_suffix = os.path.splitext(file_path)[1]
    #         if file_suffix in target_suffixs:
    #             files2parse.append(file_path)
    #
    # for file_path in files2parse:
    #     file_size = (os.path.getsize(file_path)/1024)/1024
    #     print('*********** FILE SIZE {} mb **************'.format(file_size))
    #     if file_size<=5: # 10
    #         json_know, kb_dir = checkerboard_inject_parse(file_full_path=file_path)
    #
    #     kb_path = r"D:\Prototype\Checkerboard\KB_TEMPS_DEMO\all_contents.csv"
    #     kb_size = (os.path.getsize(kb_path)/1024)/1024
    #     print('************************* KB SIZE {} mb**************************'.format(kb_size))
    #
    # print('\t ', str(time.time()-st))
    
    # data_path = r'D:\OneDrive\Code Warehouse\RAG4CM\data\all_queries_info.xlsx'
    # res_path = r'D:\OneDrive\Code Warehouse\RAG4CM\data\temp.xlsx'
    #
    # finetune_queries_path = r'D:\OneDrive\Code Warehouse\RAG4CM\data\finetune queries.txt'
    # checkerboard_res_path = 'D:\OneDrive\Code Warehouse\RAG4CM\data\checkerboard_16_files_base_voting2path.xlsx'
    #
    # data_df = pd.read_excel(data_path, sheet_name='all_queries')
    # data_df.columns =['query', 'answer', 'node path', 'true answer', 'engineering prob', 'source']
    # # data_df.set_index('idx', inplace=True)
    #
    # '''
    #     meta analysis for the testing data
    # '''
    #
    # q_types = data_df['engineering prob'].value_counts()
    # print('the query type distribution is as follows: \n', q_types)
    #
    # # data_df['length'] = data_df['query'].str.len() + data_df['response'].str.len()
    #
    # num_tables = data_df['source'].str.contains('TABLE').sum()
    # num_images = data_df['source'].str.contains('IMAGE').sum()
    # num_texts = data_df['source'].str.contains('TEXT').sum()
    # print('the data contain {} text paragraphs {} tables and {} images'.format(num_texts, num_tables, num_images))
    #
    # '''
    #     data testing
    # '''
    # # checkerboard_res_df = pd.read_excel(checkerboard_res_path, sheet_name='res')
    # # CKB_current = checkerboard_res_df[pd.notna(checkerboard_res_df['answer'])]
    # # CKB_df = checkerboard_res_df[pd.isna(checkerboard_res_df['answer'])]
    # # rm_queries = CKB_df['query'].tolist()
    #
    # res_df = pd.DataFrame(columns=['query', 'answer', 'Top-K', 'time'])
    # topk = 6
    # # finetune_mode = 'both' # 'use content'
    # # finetue_queries = []
    # # finetune_ratio = 0.6
    # # all_queries = data_df['query'].values
    # #
    # # for qe in all_queries:
    # #     if random.random()<=0.6:
    # #         finetue_queries.append(qe)
    # #
    # # with open(finetune_queries_path, 'w') as f:
    # #     for qe in finetue_queries:
    # #         f.write(f"{qe}\n")
    # #
    # # with open(finetune_queries_path, 'r') as f:
    # #     finetue_queries = f.readlines()
    # # finetue_queries = [qe.strip() for qe in finetue_queries]
    #
    # issues = []
    # queries = []
    # pred_answers = []
    # pred_topks = []
    # used_times = []
    # for i, row in data_df.iterrows():
    #     try:
    #         print('\t begin the QA at {}s RFI'.format(i))
    #         st_time = time.time()   
    #
    #         rfi = row['query']
    #         # if not rfi in rm_queries:
    #         #     continue
    #
    #         true_top1 = remove_spaces(row['node path'].replace('<--', '').replace('-->', '').split('//')[-1], handle_punctuation=True)
    #         true_response = row['true answer']
    #         pred_response = ''
    #
    #         reply = checkerboard_find(rfi, topk)
    #         # txts4answer = checkerboard_answer(rfi, reply['merged_paths'][ : topk], gen_doc=False)
    #         # txts4answer = txts4answer[ : 5999]
    #         # pred_response = checkerboard_simple_ask(rfi, txts4answer, llm_histories=llm_histories, api_name='qwen_api')
    #         ed_time = time.time()
    #
    #         sim_contents = reply['merged_paths']
    #         pred_tops = [remove_spaces(re.sub(r'_[0-9]+$', '', p.split('-->')[-1]), handle_punctuation=True) for p in sim_contents[ : topk]]
    #
    #         # if not rfi in finetue_queries:
    #         #     continue
    #         # if true_top1 in pred_tops:
    #         #     user_selected_ids = pred_tops.index(true_top1)
    #         #     local_train_data = gen_train_data_from_interactions(USER_SETTINGS, 
    #         #                                       rfi, 
    #         #                                       sim_contents, 
    #         #                                       [user_selected_ids],
    #         #                                       all_contents_df=META.all_contents_df,
    #         #                                       mode=finetune_mode,
    #         #                                       add_neg=10)
    #
    #         try:
    #             top_loc = pred_tops.index(true_top1)
    #         except:
    #             top_loc = -1
    #         print(pred_tops, '\t', true_top1, '\t', str(top_loc))
    #
    #         queries.append(rfi)
    #         used_times.append(np.round(ed_time-st_time, 3))
    #         pred_answers.append(pred_response)
    #         pred_topks.append(top_loc)    
    #         assert len(queries)==len(pred_answers) and len(pred_answers)==len(pred_topks) and len(pred_topks)==len(used_times)
    #
    #     except Exception as e:
    #         print(e)
    #         save_res(res_df, res_path, queries, pred_topks, pred_answers, used_times)
    #         issues.append(rfi)
    #
    # save_res(res_df, res_path, queries, pred_topks, pred_answers, used_times)
    # print(issues)
    #
    # # from layout_parser import merge_title_levels
    # # root = os.path.join(META.KB_PATH , 'Supplementary Files')
    # #
    # # file_name = '4. 工程标书测试_桩基施工方案.docx'
    # # # file_path = os.path.join(root, '毕节县志提纲.txt')
    # # # file_path = os.path.join(root, '豆美1号_选育报告.docx')
    # # file_path = os.path.join(root, file_name)
    # #
    # # kb_dir = os.path.join(META.KB_PATH, file_name)
    # # auto_recog = False
    # # save = True
    # # start_text='解析开始行'
    # # end_text='解析结束行'
    # #
    # # try:
    # #     os.mkdir(kb_dir)
    # #     print('\tcreating KB directory at {}'.format(kb_dir))
    # # except:
    # #     pass
    
    
    

    
    
    
    
    

    


    
    



