import shutil
import re
import io
import os
import copy
import json
import time
import sys               
import ctypes
import base64
import numpy as np
import pandas as pd
from loguru import logger
from urllib.parse import unquote
from datetime import datetime, timedelta
from werkzeug.exceptions import HTTPException
from flask_cors import CORS
from file_encryptor import encryptor
from sqlite_pool import SqliteDB
from flask import Flask, render_template, request, jsonify, Response, make_response, send_file

from QA_moduled import checkerboard_find, checkerboard_answer, checkerboard_autofill, checkerboard_reason, checkerboard_create_know, checkerboard_judge, table_structure_recog
from QA_moduled import checkerboard_qlabel, checkerboard_learn, checkerboard_inject_search, checkerboard_inject_parse, update_kb, answer_stream, answer_stream_doc
from txt2doc import generate_doc_dic, generate_doc_paths, transform_to_hierarchy
from META import USER_SETTINGS, settings_,KB_PATH, model_config, llm_apis, user, model, tokenizer, vectorize_texts
from utlis import path_handle, clean_file, SPLIT_CHAR, EMPTY_DIR_PLACEHOLDER, is_valid_windows_filename, match_df_cols
from utlis import know_df_cols, parse_fragment_path, tokenize2stw_remove, merge_df, gen_str_codes, process_path_texts, parser_context
from tree import get_dir_tree, get_file_tree, parse_kb_ptxt
from txt2doc import write_table



app = Flask(__name__)
CORS(app)  # 对整个应用启用 CORS
pd.set_option('display.max_columns', None)
import META
from kb_encoder import remove_from_kb, g_lock, get_injection_paths

def init_table():
    sql = """
CREATE TABLE IF NOT EXISTS import_progress (
  id INTEGER PRIMARY KEY AUTOINCREMENT,
  file_name TEXT NOT NULL,-- 文件名
  dir_path TEXT NOT NULL,-- 目录路径
  progress INTEGER DEFAULT 0, -- 进度0-100
  error TEXT DEFAULT '',
  start_time DATETIME DEFAULT NULL,
  end_time DATETIME DEFAULT NULL,
  UNIQUE(file_name, dir_path)
);"""
    SqliteDB().execute(sql)
    sql = """
CREATE TABLE IF NOT EXISTS empty_dir (
  id INTEGER PRIMARY KEY AUTOINCREMENT,
  empty_dir_path TEXT NOT NULL,--空目录路径
  UNIQUE(empty_dir_path)
);"""
    SqliteDB().execute(sql)
    sql = 'insert or ignore into empty_dir(empty_dir_path) values(?)'
    dir_path = '默认目录'
    SqliteDB().insert(sql, (dir_path + SPLIT_CHAR + EMPTY_DIR_PLACEHOLDER, ))
    real_dir_path = os.path.join(META.KB + META.user, dir_path.replace(SPLIT_CHAR, os.path.sep))
    os.makedirs(real_dir_path, exist_ok=True)

init_table()

@app.before_request
def log_request_info():
    request.init_time = time.time()

@app.after_request
def log_response_info(response):
    status_code = response.status_code
    if status_code == 500:
        return response
    
    if 'event-stream' not in response.content_type and response.direct_passthrough:
        return response
    if request.endpoint == 'progress' and request.method == 'GET':
        return response
    body_len = response.content_length
    spend_time = (time.time() - request.init_time) * 1000
    request_body = {}
    # if request.is_json:
    #     request_body = request.get_json()
    #     if 'q_vector' in request_body:
    #         request_body.pop('q_vector', None)
    logger.info('{} {} {} {} {}bytes {:.2f}ms {}', 
                request.remote_addr, request.method,
                unquote(request.full_path), status_code, body_len, 
                spend_time, request_body)

    return response

@app.errorhandler(Exception)
def handle_global_exception(e):
    if isinstance(e, HTTPException):
        if hasattr(e, 'data') and e.data:
            response = e.data
        else:
            response = {
                "message": e.description
            }
        status_code = e.code
        return jsonify(response), status_code

    response = {
        "message": 'Internal Server Error'
    }
    status_code = 500

    request_body = {}
    # if request.is_json:
    #     request_body = request.get_json()
    #     request_body.pop('q_vector', None)
    spend_time = (time.time() - request.init_time) * 1000
    logger.exception('{} {} {} {} e:{} {:.2f}ms {}', request.remote_addr, request.method, 
                        unquote(request.full_path), status_code, e, spend_time, request_body)
    return jsonify(response), status_code


'''
    :PART1 knowledge searching and retrieval
'''
@app.route('/')
def index():
    return render_template('index.html')


@app.route('/ask', methods=['POST'])
def ask():
    message = request.json['message']
    qlabel = request.json['ifQlabel']
    
    if 'topk' in request.json:
        topk = request.json['topk']
    else:
        topk = None
    if 'data_type' in request.json:
        data_type = request.json['data_type']
    else:
        data_type = 1
    
    labeled_queries = checkerboard_qlabel(message, qlabel)
    for q in labeled_queries:
        if q['predefined_res']=='': # normal condition
            response_data = checkerboard_find(q['query'], topk, data_type=data_type)
        else:
            predefined_res = q['predefined_res']
            response_data = {
                'reply' : predefined_res,
                'sim_contents' : [],
                'merged_paths': [],
                'intentions' : q['query'],
                'q_vector' : [],
                'inject_fill_signal': False
            }
    response_data.update({'feedback_required' : False})
    return jsonify(response_data)


@app.route('/process_satisfactory_reply', methods=['POST'])
def process_satisfactory_reply(): # **** after the user select one knowledge it will trigger (Wrong in auto-generating, it should allow the user to select as many answers as he needs)
    user_intention = request.json['intentions']
    sim_contents = request.json['sim_contents']
    save_session = request.json.get('save_session', False)

    user_selected_ids = []
    for id_ in request.json['indices'].split(','):
        selected_id = (int(id_)-1)
        if selected_id < 0 or selected_id >= len(sim_contents):
            return jsonify({'reply':'未能找到匹配的知识'})
        user_selected_ids.append(selected_id)
    
    current_markers = request.json['current_markers'] # if use LLM rewrite in the operations
    gen_doc = request.json['gen_doc']
    auto_fill = request.json['auto_fill']
    auto_reasoning = request.json['auto_reasoning']
    
    if auto_fill or auto_reasoning:
        gen_doc=False
    
    act = checkerboard_judge(current_markers[-1]) # use '-1' to get the current marker set
    try:
        file_template_path = request.json['file_template_path']
        if 'xls' in file_template_path:
            act = '填空'
    except:
        pass
    
    selected_contents = [sim_contents[i] for i in user_selected_ids]
    reply = checkerboard_answer(user_intention, selected_contents, gen_doc, act, save_session=save_session)
    checkerboard_learn(reply, user_intention, sim_contents, user_selected_ids, current_markers)
    return jsonify(reply)


@app.route('/auto_fill_keys', methods=['POST'])
def auto_fill_keys():
    clean_file(USER_SETTINGS['MATCH_DF'], mode='clean')
    file = request.files['file']
    filename = file.filename
    background_term = request.form['background']
    file_path = os.path.join(USER_SETTINGS['TEMPLATE_DIR'], filename)
    file.save(file_path)
    
    if filename== '':
        return jsonify({'reply': '请检查文件名和文件内容不为空'})
    
    elif '.doc' in filename:    
        use_cache = True #request.form.get('useCache') == 'true'
        # convert a docx file into a hierarchical tree structure, and use the paths to the bottom nodes as 'search keys'
        tree, file_path = generate_doc_dic(use_cache, filename, call_llm=llm_apis['gpt_api'], template_dir=USER_SETTINGS['TEMPLATE_DIR'], cache_path=USER_SETTINGS['DOC_INFO_PATH'], model_config=model_config, special_doc_type=None)
        doc_paths = generate_doc_paths(tree['ROOT'])
        
        search_keys = [path_handle(pth, mode='convert').split(';')[-1] for pth in doc_paths ] + ['TEMP_HOLDER']
        tree_trasnformed = transform_to_hierarchy(tree)
        
        tree_str = json.dumps(tree, ensure_ascii=False)
        # os.remove(file_path)
        return jsonify({'search_keys':search_keys, 'structure':tree_str, 'trans_structure':tree_trasnformed[0], 'file_path':file_path, 'copy_keys': copy.deepcopy(search_keys)})
    
    elif '.xls' in filename:
        search_keys, tb_structure = table_structure_recog(filename)
        search_keys += ['TEMP_HOLDER']
        # os.remove(file_path)
        return jsonify({'search_keys':search_keys, 'structure':tb_structure, 'trans_structure':tb_structure, 'file_path':file_path, 'copy_keys': copy.deepcopy(search_keys)})
    else:
        return jsonify({'reply': '遇到目前不支持的自动填充文件类型...'})


@app.route('/next_key_fill', methods=['POST'])
def next_key_fill():
    search_keys = request.json['searchKeys']
    print('\tinput search keys for fill: ', search_keys)
    if not search_keys:
        return jsonify({'error':'search_keys 不存在'}), 400
    current_key = search_keys.pop(0)
    keys_len = len(search_keys)

    print('\tafter pop search keys for fill: ', search_keys) # find the current key for searching the answer
    return jsonify({'search_keys': search_keys, 
                    'current_key': current_key, 
                    'keys_len':keys_len})


@app.route('/auto_fill', methods=['POST'])
def auto_fill():
    current_key = request.json['currentKey']    
    if 'topk' in request.json:
        topk = request.json['topk']
    else:
        topk = None
    if 'data_type' in request.json:
        data_type = request.json['data_type']
    else:
        data_type = 1
    response_data = checkerboard_find(current_key, topk, data_type=data_type)
    return jsonify(response_data)


@app.route('/auto_reasoning', methods=['POST'])
def auto_reasoning():
    '''
        we use the method to auto-reason the next steps to taken, based on LLM planning and local learner
    '''
    intention = request.json['message']
    sim_contents = request.json['sim_contents']
    merged_paths = request.json['merged_paths']
    mode = request.json['mode']
      
    reasoned_seq_term, reasoned_seq_id, reasoned_markers = checkerboard_reason(intention, sim_contents, merged_paths, mode)
    print('current reasoning model is {} \n auto reasoned seq ids --> {}, seq terms --> {}, markers --> {}'.format(mode, reasoned_seq_id, reasoned_seq_term, ','.join([str(m) for m in reasoned_markers])))
    return jsonify({'reasoned_term': reasoned_seq_term, 'reasoned_id': reasoned_seq_id, 'reasoned_markers': reasoned_markers})


@app.route('/merge_auto_fill', methods=['POST'])
def merge_auto_fill():
    '''
        when all search keys have answers, fill in (genereate) the full document for downloading
    '''
    filled_contents = request.json['filled_contents']
    filled_ids = request.json['filled_ids']
    doc_tree = json.loads(request.json['doc_tree'])
    copy_keys = request.json['copy_keys']
    filled_markers = request.json['filled_markers']
    file_template_path = request.json['file_path']

    path_val_pairs = []
    for i, content in enumerate(filled_contents):
        if content=='SKIP':
            path_val_pairs.append((copy_keys[i], ''))
        else:
            path_val_pairs.append((copy_keys[i], content))
    
    checkerboard_autofill(doc_tree, file_template_path, filled_contents, filled_ids, filled_markers, path_val_pairs=path_val_pairs)
    return jsonify({'doc_file':'Report_doc.docx', 'tb_file':'Res_tb.xlsx'})


@app.route('/reply', methods=['POST'])
def reply():
    user_intention = request.json['intentions']
    sim_contents = request.json['sim_contents']
    
    user_selected_ids = []
    for id_ in request.json['indices'].split(','):
        selected_id = (int(id_)-1)
        if selected_id < 0 or selected_id >= len(sim_contents):
            return jsonify({'reply':'未能找到匹配的知识'})
        user_selected_ids.append(selected_id)
    
    current_markers = request.json['current_markers'] # if use LLM rewrite in the operations
    gen_doc = request.json['gen_doc']
    auto_fill = request.json['auto_fill']
    auto_reasoning = request.json['auto_reasoning']
    
    if auto_fill or auto_reasoning:
        gen_doc=False
    
    act = checkerboard_judge(current_markers[-1]) # use '-1' to get the current marker set
    try:
        file_template_path = request.json['file_template_path']
        if 'xls' in file_template_path:
            act = '填空'
    except:
        pass
    
    selected_contents = [sim_contents[i] for i in user_selected_ids]
    return Response(answer_stream(user_intention, selected_contents, gen_doc, act), content_type='text/event-stream')


@app.route('/replyDoc', methods=['POST'])
def reply_doc():
    content = request.json['content']
    act = request.json['act']
    user_intention = request.json.get('user_intention', '')

    cleaned_content = re.sub(r'<img[^>]*>', '', content)
    if act not in ('提问', '重写', '缩写', '扩写'):
        return jsonify({'error': '仅支持重写、缩写、扩写'}), 400
    return Response(answer_stream_doc(user_intention, cleaned_content, act), content_type='text/event-stream')


from docx import Document
from docx.text.paragraph import Paragraph
from docx.shared import Cm, Pt, RGBColor
from docx.oxml.ns import qn
from docx.enum.text import WD_ALIGN_PARAGRAPH


# def write_table(document, tb_df):
#     table = document.add_table(rows=tb_df.shape[0]+1, cols=tb_df.shape[1], style='Table Grid')
#     table.autofit = True
    
#     # add column headers
#     for j, col in enumerate(tb_df.columns.fillna('')):
#         table.cell(0, j).text = col
    
#     # add the DataFrame data to the table
#     for i in range(tb_df.shape[0]):
#         for j in range(tb_df.shape[1]):
#             table.cell(i+1, j).text = str(tb_df.iat[i, j])


RESOURCE_PATTERN = re.compile(r'(TABLE_.*?_TABLE|IMAGE_.*?_IMAGE)')
def build_docx(data):
    doc = Document()
    def process_section(section, level=0):
        for key, text in section.items():
            if key == '__content__':
                matches = RESOURCE_PATTERN.findall(text)
                split_texts = re.split(RESOURCE_PATTERN, text)

                for part in split_texts:
                    if part in matches and 'TABLE_' in part:
                        tbl_pd = section['__resource__'][part]
                        write_table(doc, tbl_pd)
                    
                    elif part in matches and 'IMAGE_' in part:
                        img_bin = section['__resource__'][part]
                        para = doc.add_paragraph()
                        para.alignment = WD_ALIGN_PARAGRAPH.CENTER
                        para.add_run().add_picture(io.BytesIO(img_bin), width=Cm(10))
                        para = doc.add_paragraph()
                        para.alignment = WD_ALIGN_PARAGRAPH.CENTER
                    else:
                        para = doc.add_paragraph(part)
                        for run in para.runs:
                            run.font.name = '宋体'
                            run._element.rPr.rFonts.set(qn('w:eastAsia'), '宋体')

            elif key not in ('__know_id__', '__resource__'):
                heading = doc.add_heading(key, level=level + 1)
                # 设置标题字体为宋体
                for run in heading.runs:
                    run.font.name = '宋体'
                    run._element.rPr.rFonts.set(qn('w:eastAsia'), '宋体')
                process_section(text, level + 1)

    process_section(data)
    return doc


# @app.route('/download2/<file_name>', methods=['GET'])
# def download(file_name):
#     type, real_file_dir, file_dir, sub_path, fragment_name = parse_fragment_path(META.user, KB_PATH, file_name)
#     file_name = os.path.basename(real_file_dir)
#     file_tree, know_id = get_file_tree(real_file_dir, file_dir, sub_path)
#     doc = build_docx(file_tree)
#     binary_stream = io.BytesIO()
#     doc.save(binary_stream)
#     binary_stream.seek(0)

#     file_data = binary_stream.getvalue()
#     response = Response(file_data, mimetype='application/octet-stream')
#     response.headers['Content-Disposition'] = f"attachment; filename*=UTF-8''{quote(file_name)}"

#     # Add cache control headers
#     response.headers["Cache-Control"] = "no-cache, no-store, must-revalidate"
#     response.headers["Pragma"] = "no-cache"
#     response.headers["Expires"] = "0"
#     return response


@app.route('/download/<file_name>', methods=['GET'])
def download_file(file_name):
    if file_name.endswith('.png') or file_name.endswith('.jpg') or file_name.endswith('.jpeg'):
        type, real_file_dir, file_dir, sub_path, fragment_name = parse_fragment_path(META.user, KB_PATH, file_name)
        if file_name.endswith('.png'):
            mimetype = 'image/png'
        else:
            mimetype = 'image/jpeg'
        res_dir = os.path.abspath(KB_PATH)
        file_path = os.path.join(res_dir, real_file_dir, fragment_name)
        if encryptor.encrypt:
            file_data = encryptor.load_from_file(file_path)
        else:
            with open(file_path, 'rb') as fd:
                file_data = fd.read()
        return send_file(io.BytesIO(file_data), mimetype=mimetype)
    elif file_name in ('Res_doc.docx', 'Report_doc.docx'):
        directory = USER_SETTINGS['TEMP_RES_PATH']  # Define the absolute directory on the server where the files are stored
        res_dir = os.path.abspath(directory)
        file_path = os.path.join(res_dir, file_name)
        if encryptor.encrypt:
            file_data = encryptor.load_from_file(file_path)
        else:
            with open(file_path, 'rb') as fd:
                file_data = fd.read()
        return send_file(io.BytesIO(file_data), mimetype='application/octet-stream', as_attachment=True, download_name=file_name)
    else:
        type, real_file_dir, file_dir, sub_path, fragment_name = parse_fragment_path(META.user, KB_PATH, file_name)
        file_name = os.path.splitext(os.path.basename(real_file_dir))[0] + '.docx'
        file_tree, know_id = get_file_tree(real_file_dir, file_dir, sub_path, img_bin=True, tbl_pd=True)
        doc = build_docx(file_tree)
        binary_stream = io.BytesIO()
        doc.save(binary_stream)
        binary_stream.seek(0)
        return send_file(binary_stream, mimetype='application/octet-stream', as_attachment=True, download_name=file_name)

'''
    :PART2 knowledge injection
'''
@app.route('/inject_create', methods=['POST'])
def inject_create():    
    inner_key = request.json['inner_key']
    know_content = request.json['know_content']
    resource = request.json.get('resource', {})
    
    kb_dir = request.json.get('kb_dir')
    if not kb_dir:
        kb_dir = '默认目录'
    checkerboard_create_know(kb_dir, know_content, inner_key, resource)
    
    return jsonify({'code' : 200})
    

@app.route('/inject_parse', methods=['POST'])
def inject_parse(data):
    if data is not None:
        request = data
    if 'file' in request.files:
        file = request.files['file']
        know_title = request.form['know_title']

        start_symbol = request.form['range'].split('-->')[0]
        end_symbol = request.form['range'].split('-->')[-1]
        all_parse = request.form['all_parse']
        kb_dir = request.form.get('kb_dir')
        KB_PATH = request.form.get('kb_path','sys')
        user = request.form.get('user','sys')
        if not kb_dir:
            kb_dir = '默认目录'
        type, real_file_dir, file_dir, sub_path, fragment_name = parse_fragment_path(user, KB_PATH, kb_dir)
        if kb_dir != file_dir:
            return jsonify({'error': '请选中注入目录而非标题'}), 400
        
        if file.filename == '':
            return jsonify({'error': 'No selected file'}), 400
        
        if file:
            json_know, know_dir = checkerboard_inject_parse(filename=file.filename, 
                                                  start_symbol=start_symbol,
                                                  end_symbol=end_symbol,
                                                  know_title=know_title,
                                                  kb_dir=kb_dir,
                                                  file_obj=file,
                                                  all_parse=all_parse)
            
            return jsonify({'json_know': json_know, 
                                'know_dir': know_dir})
    else:
        try:
            plain_know = request.json['plain_know']
            know_title = request.json['know_title']
                        
            json_know, _ = checkerboard_inject_parse(plain_text_know=plain_know, know_title=know_title)
            return jsonify({'json_know': json_know})
        except Exception as e:
            logger.exception('checkerboard_inject_parse fail! e:{}', e)
            return jsonify({'error': 'bad request, No file part and No text knowledge'}), 400


@app.route('/inject_parse', methods=['DELETE'])
def cancel_inject_parse():
    try:
        file_names = request.json
    except:
        file_names = request.json['file_names']
    for file_name in file_names:
        sql = 'select progress, dir_path from import_progress where file_name=? order by end_time desc'
        row = SqliteDB().selectone(sql, (file_name, ))
        if not row:
            return jsonify({'code': 404, 'message':'File name does not exist'})
        
        progress = row[0]
        dir_path = row[1]
        real_dir_path = dir_path.replace(SPLIT_CHAR, os.path.sep)
        if progress == 100:
            return jsonify({'code': 403, 'message':'Cannot cancel the injected file'})

        sql = 'delete from import_progress where file_name=? and dir_path=?'
        SqliteDB().delete(sql, (file_name, dir_path))

        kb_dir = os.path.join(KB_PATH, real_dir_path)
        if os.path.exists(kb_dir):
            shutil.rmtree(kb_dir)

        # img_record_pth =  os.path.join(KB_PATH, file_name, 'image_record.json')
        # if os.path.exists(img_record_pth):
        #     if encryptor.encrypt:
        #         img_record = encryptor.load_from_file(img_record_pth)
        #     else:
        #         with open(img_record_pth, 'r', encoding='utf-8') as f:
        #             img_record = json.load(f)
        #     filtered_data = {k: v for k, v in img_record.items() if not v.startswith(file_name)}
        #     delete_data = [ v for _, v in img_record.items() if v.startswith(file_name)]
        #     for path in delete_data:
        #         img_path = os.path.join(KB_PATH, file_name, path.split('-->')[-1])
        #         if os.path.exists(img_path):
        #             os.remove(img_path)
        #     if len(filtered_data) == 0:
        #         os.remove(img_record_pth)
        #     else:
        #         if encryptor.encrypt:
        #             encryptor.save_to_file(filtered_data, img_record_pth)
        #         else:
        #             with open(img_record_pth, 'w', encoding='utf-8') as f:
        #                 json.dump(filtered_data, f, ensure_ascii=False, indent=4)

        # table_record_pth =  os.path.join(KB_PATH, file_name, 'table_record.json')
        # if os.path.exists(table_record_pth):
        #     if encryptor.encrypt:
        #         table_record = encryptor.load_from_file(table_record_pth)
        #     else:
        #         with open(table_record_pth, 'r', encoding='utf-8') as f:
        #             table_record = json.load(f)
        #     filtered_data = {k: v for k, v in table_record.items() if not v.startswith(file_name)}
        #     delete_data = [ v for _, v in table_record.items() if v.startswith(file_name)]
        #     for path in delete_data:
        #         table_path = os.path.join(KB_PATH, file_name, path.split('-->')[-1])
        #         if os.path.exists(table_path):
        #             os.remove(table_path)
        #     if len(filtered_data) == 0:
        #         os.remove(table_record_pth)
        #     else:
        #         if encryptor.encrypt:
        #             encryptor.save_to_file(filtered_data, table_record_pth)
        #         else:
        #             with open(table_record_pth, 'w', encoding='utf-8') as f:
        #                 json.dump(filtered_data, f, ensure_ascii=False, indent=4)
    return jsonify({'code': 200})


def clean_asset(real_file_dir, linkages):
    img_record_path = os.path.join(real_file_dir, 'image_record.json')
    if not os.path.exists(img_record_path):
        return
    if encryptor.encrypt:
        img_dic = encryptor.load_from_file(img_record_path)
    else:
        with open(img_record_path, 'r', encoding='utf-8') as f:
            img_dic = json.load(f)

    tbl_record_path = os.path.join(real_file_dir, 'table_record.json')
    if encryptor.encrypt:
        tbl_dic = encryptor.load_from_file(tbl_record_path)
    else:
        with open(tbl_record_path, 'r', encoding='utf-8') as f:
            tbl_dic = json.load(f)

    for linkage in linkages:
        links = linkage.split('\n')
        for link in links:
            if link in img_dic:
                img_name = img_dic[link]
                img_path = os.path.join(real_file_dir, img_name)
                del img_dic[link]
                if os.path.isfile(img_path):
                    os.remove(img_path)

            elif link in tbl_dic:
                tbl_name = tbl_dic[link]
                tbl_path = os.path.join(real_file_dir, tbl_name)
                del tbl_dic[link]
                if os.path.isfile(tbl_path):
                    os.remove(tbl_path)

    if encryptor.encrypt:
        encryptor.save_to_file(img_dic, img_record_path)
    else:
        with open(img_record_path, 'w', encoding='utf-8') as f:
            json.dump(img_dic, f, ensure_ascii=False, indent=4)

    if encryptor.encrypt:
        encryptor.save_to_file(tbl_dic, tbl_record_path)
    else:
        with open(tbl_record_path, 'w', encoding='utf-8') as f:
            json.dump(tbl_dic, f, ensure_ascii=False, indent=4)


@app.route('/fragment', methods=['DELETE'])
def fragment():
    remove_dirs = set()
    fragment_paths = request.json['fragment_paths']
    for fragment_path in fragment_paths:
        type, real_file_dir, file_dir, sub_path, fragment_name = parse_fragment_path(META.user, KB_PATH, fragment_path)

        if '.png' in fragment_name or '.jpg' in fragment_name or '.jpeg' in fragment_name:
            remove_dirs.add(file_dir)
            img_record_pth =  os.path.join(real_file_dir, 'image_record.json')
            if os.path.exists(img_record_pth):
                if encryptor.encrypt:
                    img_record = encryptor.load_from_file(img_record_pth)
                else:
                    with open(img_record_pth, 'r', encoding='utf-8') as f:
                        img_record = json.load(f)
                img_path = os.path.join(real_file_dir, fragment_name)
                if os.path.exists(img_path):
                    os.remove(img_path)
                filtered_data = {k: v for k, v in img_record.items() if v != fragment_name}
                if encryptor.encrypt:
                    encryptor.save_to_file(filtered_data, img_record_pth)
                else:
                    with open(img_record_pth, 'w', encoding='utf-8') as f:
                        json.dump(filtered_data, f, ensure_ascii=False, indent=4)
        else:
            remove_dirs.add(file_dir)
            kb_ptxt_path =  os.path.join(real_file_dir, 'KB_PTXT.csv')
            if os.path.exists(kb_ptxt_path):
                
                if encryptor.encrypt:
                    ptxt_df = encryptor.load_from_file(kb_ptxt_path)
                else:
                    ptxt_df = pd.read_csv(kb_ptxt_path, index_col=False, encoding='utf-8')
                linkages = ptxt_df.loc[ptxt_df['path'].astype(str) == sub_path, 'linkage'].tolist()    
                ptxt_df = ptxt_df[ptxt_df['path'].astype(str) != sub_path]
                # if len(ptxt_df) == 0:
                #     sql = 'delete from import_progress where dir_path=?'
                #     SqliteDB().delete(sql, (file_dir, ))
                #     shutil.rmtree(os.path.dirname(kb_ptxt_path))
                # else:
                if encryptor.encrypt:
                    encryptor.save_to_file(ptxt_df, kb_ptxt_path)
                else:
                    ptxt_df.to_csv(kb_ptxt_path, encoding='utf-8', index=False)
                if linkages:
                    clean_asset(real_file_dir, linkages)
    update_kb(remove_dirs=remove_dirs)
    return jsonify({'code' : 200})

        
@app.route('/inject_search', methods=['POST'])
def inject_search():
    current_markers = request.json['current_markers']
    sim_contents = request.json['sim_contents']
    content = request.json['intentions']
    
    user_selected_ids = [(int(id_)-1) for id_ in request.json['indices'].split(',')]
    know_file_name = [sim_contents[i] for i in user_selected_ids][0]
    
    kg_response = checkerboard_inject_search(content, know_file_name)   
    return jsonify(kg_response)


# @app.route('/inject_fill', methods=['POST'])
# def inject_fill():
#     user_selected_id = [(int(id_)-1) for id_ in request.json['indices'].split(',')][0] # suppose injection only has one target
#     kg_summary = request.json['kg_summary']
#     content = request.json['full_content']
#     inject_paths = request.json['inject_paths']
#
#     current_markers = request.json['current_markers']
#
#     injected_file = checkerboard_inject_fill(content, inject_paths[user_selected_id])
#     return jsonify({'inject_file': injected_file})


@app.route('/inspect', methods=['POST'])
def inspect():
    return None


@app.route('/progress', methods=['GET'])
def progress():
    data = list()
    before = datetime.now() - timedelta(hours=1)
    sql = "select file_name, progress, dir_path, end_time from import_progress where end_time > ? order by end_time desc"
    rows = SqliteDB().selectall(sql, (before, ))
    for row in rows:
        file_name = row[0]
        progress = row[1]
        dir_path = row[2]
        if file_name.endswith('.png') or file_name.endswith('.jpg') or file_name.endswith('.jpeg'):
            dir_path = dir_path + SPLIT_CHAR + file_name
        data.append({'file_name':file_name, 'progress':progress, 'dir_path':dir_path})
    return jsonify({'data': data})


@app.route('/dirTree', methods=['GET'])
def dir_tree(data):
    if data is not None:
        request = data
        user = request.get_json().get('user')
    dir_path = request.args.get('dir_path', default='', type=str)
    max_depth = request.args.get('max_depth', default=None)
    
    empty_dir_paths = []
    sql = 'select empty_dir_path from empty_dir'
    rows = SqliteDB().selectall(sql)
    for row in rows:
        empty_dir_path = row[0]
        empty_dir_paths.append(empty_dir_path)

    # 从KB_path_dic.json文件取数据在发生编辑后会导致顺序错乱
    # current_paths = META.full_paths + empty_dir_paths
    USER_SETTINGS = settings_(user, None)
    # 从KB_PTXT.csv文件取数据会导致文件没注入完也能手动刷新展示
    current_all_contents_df, img_record, tb_record = get_injection_paths(KB_PATH, USER_SETTINGS, know_df_cols)
    current_txt_paths = [p for p in list(current_all_contents_df['path'].values)]
    current_img_paths = [p for p in list(img_record.values())]
    current_tb_paths = [p for p in list(tb_record.values())]
    current_paths = current_txt_paths + current_img_paths + empty_dir_paths #+ current_tb_paths

    dir_tree = get_dir_tree(current_paths, dir_path, max_depth)
    return Response(json.dumps({'dir_tree': dir_tree}), content_type='application/json; charset=utf-8')
    # jsonify 会导致字典按key排序
    # return jsonify({'dir_tree': dir_tree})


@app.route('/fragments', methods=['GET'])
def fragments(data):
    if data is None:
        dir_path = request.args.get('dir_path', type=str)
    else:
        dir_path = data.get('dir_path')
    if dir_path == '':
        dir_count = dir_path.count(SPLIT_CHAR)
    else:
        dir_count = dir_path.count(SPLIT_CHAR) + 1
    files = list()
    labels = list()
    data_types = list()
    file_list = list()
    label_list = list()
    KB_PATH = data.get('KB_PATH')
    user = data.get('user')

    # 从KB_path_dic.json文件取数据在发生编辑后会导致顺序错乱，故从KB_PTXT.csv文件取数据
    # file_list = [path for path in META.full_paths if path.startswith(dir_path)]
    
    # 从KB_PTXT.csv文件取数据会导致文件没注入完也能手动刷新展示
    _, real_file_dir, file_dir, sub_path, _ = parse_fragment_path(user, KB_PATH, dir_path)
    ptxt_path = os.path.join(real_file_dir, 'KB_PTXT.csv')
    if os.path.isfile(ptxt_path):
        if encryptor.encrypt:
            ptxt_df = encryptor.load_from_file(ptxt_path)
        else:
            ptxt_df = pd.read_csv(ptxt_path, encoding='utf-8', index_col=False)
        ptxt_df = ptxt_df[ptxt_df['path'].str.startswith(sub_path)]
        txt_list = ptxt_df['path'].apply(lambda x: file_dir + SPLIT_CHAR + x).tolist()
        label_list = ptxt_df['keywords'].fillna('计算机 人工智能 网络安全 建筑工程').apply(lambda x:x.split(' ') if x else []).tolist()
        file_list += txt_list
        
    img_record = {}
    image_path = os.path.join(real_file_dir, 'image_record.json')
    if os.path.isfile(image_path):
        if encryptor.encrypt:
            img_record = encryptor.load_from_file(image_path)
        else:
            with open(image_path, 'r', encoding='utf-8') as f:
                img_record = json.load(f)

    img_label = {}
    img_label_pth = os.path.join(real_file_dir, 'image_label.json')
    if os.path.exists(img_label_pth):
        if encryptor.encrypt:
            img_label = encryptor.load_from_file(img_label_pth)
        else:
            with open(img_label_pth, 'r', encoding='utf-8') as f:
                img_label = json.load(f)

        img_list = []
        img_label_list = []
        for img_id, name in img_record.items():
            if img_id.startswith('IMAGE_UPLOAD_'):
                img_list.append(file_dir + SPLIT_CHAR + name)
                img_label_list.append(img_label[name])
        file_list += img_list
        label_list += img_label_list

    for path, label in zip(file_list, label_list):
        dirs = path.split(SPLIT_CHAR)
        if len(dirs) - 1 ==  dir_count:
            files.append(dirs[-1])
            labels.append(label)
            if '.png' in path or '.jpg' in path or '.jpeg' in path:
                type = 3
            elif '__摘要总结__' in path and path.endswith('__包括__'):
                type = 4
            else:
                type = 2
            data_types.append(type)
    return jsonify({'fragments': files, 'data_types':data_types, 'count': len(files), 'labels':labels}) #['机器人', '人工智能']


@app.route('/fragmentContent', methods=['GET'])
def fragment_content():
    fragment_path = request.args.get("fragment_path", type=str)
    title = fragment_path.split(SPLIT_CHAR)[-1]
    if fragment_path.endswith('.jpeg') or fragment_path.endswith('.png') or fragment_path.endswith('.jpg'):
        # path = META.user + SPLIT_CHAR + 'images' + SPLIT_CHAR + fragment_path
        image_path = fragment_path.replace(SPLIT_CHAR, os.path.sep)
        res_dir = os.path.abspath(KB_PATH)
        file_path = os.path.join(res_dir, image_path) 
        if encryptor.encrypt:
            file_data = encryptor.load_from_file(file_path)
        else:
            with open(file_path, 'rb') as fd:
                file_data = fd.read()

        base64_data = base64.b64encode(file_data).decode('utf-8')
        return jsonify({'title': title, 'content': base64_data, 'type':3})

    else:
        type, real_file_dir, file_dir, sub_path, fragment_name = parse_fragment_path(META.user, KB_PATH, fragment_path)
        ptxt_path = os.path.join(real_file_dir, 'KB_PTXT.csv')
        if encryptor.encrypt:
            ptxt_df = encryptor.load_from_file(ptxt_path)
        else:
            ptxt_df = pd.read_csv(ptxt_path, encoding='utf-8', index_col=False)
        ini_filtered_df = ptxt_df[ptxt_df['path'] == sub_path]
        for _, row in ini_filtered_df.iterrows():
            content = row['content']
            linkage = row['linkage']
            know_id, content_str, resource = parse_kb_ptxt(real_file_dir, file_dir, content, linkage, img_base64=True)

        return jsonify({'title': title, 'content': content_str, 'type':type, 'resource':{}})


@app.route('/fragmentContent', methods=['PUT'])
def modity_fragment_content():
    fragment_path = request.json.get('fragment_path')
    content = request.json.get('content')
    # resource = request.json.get('resource', {})
    type, real_file_dir, file_dir, sub_path, fragment_name = parse_fragment_path(META.user, KB_PATH, fragment_path)
    with g_lock:
        META.all_contents_df, META.all_vec, META.full_path_vectors, META.full_path_ref = remove_from_kb([fragment_path], 
                                                                    META.full_path_ref,
                                                                    USER_SETTINGS,
                                                                    META.all_vec,
                                                                    META.full_path_vectors,
                                                                    META.all_contents_df)
        np.save(USER_SETTINGS['KB_VEC_PATH'], META.all_vec)
        np.save(USER_SETTINGS['KB_PATH_VEC_PATH'], META.full_path_vectors)
        if encryptor.encrypt:
            encryptor.save_to_file(META.all_contents_df, USER_SETTINGS['KB_CONTENT_PATH'])
        else:
            META.all_contents_df.to_csv(USER_SETTINGS['KB_CONTENT_PATH'], encoding='utf-8', index=False)
        
        if encryptor.encrypt:
            encryptor.save_to_file(META.full_path_ref, USER_SETTINGS['KB_PATH_JSON'])
        else:
            with open(USER_SETTINGS['KB_PATH_JSON'], mode='w', encoding='utf-8') as f:
                json.dump(META.full_path_ref, f ,ensure_ascii=False, indent=4)

    ptxt_path = os.path.join(real_file_dir, 'KB_PTXT.csv')
    if encryptor.encrypt:
        ptxt_df = encryptor.load_from_file(ptxt_path)
    else:
        ptxt_df = pd.read_csv(ptxt_path, encoding='utf-8', index_col=False)
    linkages = ptxt_df.loc[ptxt_df['path'].astype(str) == sub_path, 'linkage'].tolist()
    clean_asset(real_file_dir, linkages)

    img_i = 1
    tbl_i = 1
    new_content = ''
    last_sen = ''
    resource = {}
    for type, info in parser_context(content):
        if type=='text':
            new_content += info
            last_sen = info
        elif type=='image':
            img_summary = f'图-{img_i}{fragment_name} {last_sen}'
            img_summary = process_path_texts(img_summary, last=30)
            link = 'IMAGE_' + gen_str_codes(img_summary) + '_IMAGE'
            resource[link] = {'name':img_summary + '.png', 'data':info}
            new_content += link
            img_i += 1
        elif type=='table':
            tbl_summary = f'表-{tbl_i}{fragment_name} {last_sen}'
            tbl_summary = process_path_texts(tbl_summary, last=50)
            link = 'TABLE_' + gen_str_codes(tbl_summary) + '_TABLE'
            resource[link] = {'name':tbl_summary + '.csv', 'data':info}
            new_content += link
            tbl_i += 1

    checkerboard_create_know(file_dir, new_content, sub_path, resource)
    return jsonify({'message':'success'})

#新建目录
@app.route('/dir', methods=['POST'])
def add_dir(data):
    if data is  None:
        return { "error": "dir_path不能为空"}
    request = data
    dir_path = request.json.get('dir_path')
    if not is_valid_windows_filename(dir_path.split(SPLIT_CHAR)[-1]):
        return make_response(jsonify({'message': f'目录 "{dir_path}" 不符合命名规则'}), 400)

    real_dir_path = os.path.join(request.json.get('dir') + request.json.get('user','sys'), dir_path.replace(SPLIT_CHAR, os.path.sep))
    os.makedirs(real_dir_path, exist_ok=True)
    sql = 'insert or ignore into empty_dir(empty_dir_path) values(?)'
    SqliteDB().insert(sql, (dir_path + SPLIT_CHAR + EMPTY_DIR_PLACEHOLDER, ))
    return jsonify({'message':'success'})
    

# def is_uploading(dir_path):
#     sql = 'select dir_path from knowledge_import where status in (1, 2)'
#     rows = SqliteDB().selectall(sql)
#     for row in rows:
#         upload_dir = row[0]
#         if dir_path.startswith(upload_dir):
#             return True
#     return False


@app.route('/dir', methods=['DELETE'])
def delete_dir():
    dir_path = request.json.get('dir_path')
    if not dir_path:
        return make_response(jsonify({'message': f'你要删除的目录不存在'}), 400)
    
    sql = f"delete from empty_dir where empty_dir_path like '{dir_path + SPLIT_CHAR}%'"
    SqliteDB().delete(sql)

    real_file_dir = os.path.join(KB_PATH, dir_path.replace(SPLIT_CHAR, os.path.sep))
    if os.path.isdir(real_file_dir):
        sql = 'delete from import_progress where dir_path=?'
        SqliteDB().delete(sql, (dir_path, ))
        shutil.rmtree(real_file_dir)
        remove_dirs = {dir_path}
        update_kb(remove_dirs=remove_dirs)
    else:
        type, real_file_dir, file_dir, sub_path, fragment_name = parse_fragment_path(META.user, KB_PATH, dir_path)
        ptxt_path = os.path.join(real_file_dir, 'KB_PTXT.csv')
        if os.path.isfile(ptxt_path):
            if encryptor.encrypt:
                ptxt_df = encryptor.load_from_file(ptxt_path)
            else:
                ptxt_df = pd.read_csv(ptxt_path, encoding='utf-8', index_col=False)
            linkages = ptxt_df.loc[ptxt_df['path'].astype(str).str.startswith(sub_path), 'linkage'].tolist()
            ptxt_df = ptxt_df[~ptxt_df['path'].str.startswith(sub_path)]
            # if len(ptxt_df) == 0:
            #     sql = 'delete from import_progress where dir_path=?'
            #     SqliteDB().delete(sql, (file_dir, ))
            #     shutil.rmtree(real_file_dir)
            # else:
            if encryptor.encrypt:
                encryptor.save_to_file(ptxt_df, ptxt_path)
            else:
                ptxt_df.to_csv(ptxt_path, encoding='utf-8', index=False)
            clean_asset(real_file_dir, linkages)
            remove_dirs = {file_dir}
            update_kb(remove_dirs=remove_dirs)
    return jsonify({'message':'success'})


@app.route('/fileTree', methods=['GET'])
def file_tree():
    fragment_path = request.args.get('fragment_path', type=str)
    type, real_file_dir, file_dir, sub_path, fragment_name = parse_fragment_path(META.user, KB_PATH, fragment_path)
    file_tree, know_id = get_file_tree(real_file_dir, file_dir, sub_path)
    return Response(json.dumps({'file_tree':file_tree, 'know_id': know_id}), content_type='application/json; charset=utf-8')


@app.route('/label', methods=['PUT'])
def modity_label():
    fragment_path = request.json.get('fragment_path')
    label = request.json.get('label')
    keywords_str = ' '.join(label)
    _, real_file_dir, file_dir, sub_path, fragment_name = parse_fragment_path(META.user, KB_PATH, fragment_path)
    if fragment_name.endswith('.png') or fragment_name.endswith('.jpg') or fragment_name.endswith('.jpeg'):
        img_label_pth = os.path.join(real_file_dir, 'image_label.json')
        if os.path.exists(img_label_pth):
            if encryptor.encrypt:
                img_label = encryptor.load_from_file(img_label_pth)
            else:
                with open(img_label_pth, 'r', encoding='utf-8') as f:
                    img_label = json.load(f)

            img_label[fragment_name] = label
            if encryptor.encrypt:
                encryptor.save_to_file(img_label, img_label_pth)
            else:
                with open(img_label_pth, 'w', encoding='utf-8') as f:
                    json.dump(img_label, f, ensure_ascii=False, indent=4)
    else:
        ptxt_path = os.path.join(real_file_dir, 'KB_PTXT.csv')
        if encryptor.encrypt:
            ptxt_df = encryptor.load_from_file(ptxt_path)
        else:
            ptxt_df = pd.read_csv(ptxt_path, encoding='utf-8', index_col=False)

        ptxt_df.loc[ptxt_df['path'].astype(str) == sub_path, 'keywords'] = keywords_str

        if encryptor.encrypt:
            encryptor.save_to_file(ptxt_df, ptxt_path)
        else:
            ptxt_df.to_csv(ptxt_path, encoding='utf-8', index=False)
    return jsonify({'message':'success'})


@app.route('/moveDir', methods=['POST'])
def move_dir():
    old_dir_path = request.json.get('old_dir_path')
    if old_dir_path == '默认目录':
        return make_response(jsonify({'message': f'不允许移动默认目录'}), 400)
    new_dir_path = request.json.get('new_dir_path')
    old_real_file_dir = os.path.join(KB_PATH, old_dir_path.replace(SPLIT_CHAR, os.path.sep))
    new_real_file_dir = os.path.join(KB_PATH, new_dir_path.replace(SPLIT_CHAR, os.path.sep))
    if not os.path.isdir(old_real_file_dir):
        return make_response(jsonify({'message': f'你要移动或修改的目录不存在'}), 400)
    elif not os.path.isdir(os.path.dirname(new_real_file_dir)):
        return make_response(jsonify({'message': f'新目录上层目录不存在'}), 400)
    elif os.path.isdir(new_real_file_dir):
        return make_response(jsonify({'message': f'新目录早已存在'}), 400)
    # 检查目录是否为空
    if not os.listdir(old_real_file_dir):
        return make_response(jsonify({'message': f'不允许移动空目录'}), 400)
    old_dir_path += SPLIT_CHAR
    new_dir_path += SPLIT_CHAR
    system_old_dir = user + SPLIT_CHAR + old_dir_path
    system_new_dir = user + SPLIT_CHAR + new_dir_path
    system_old_image_dir = user + SPLIT_CHAR + 'images' + SPLIT_CHAR + old_dir_path
    system_new_image_dir = user + SPLIT_CHAR + 'images' + SPLIT_CHAR + new_dir_path
    # 删除键再添加会导致顺序改变
    new_full_path_ref = dict()
    new_full_path_vectors = np.empty((0, 1024), dtype=np.float32)
    
    for (path_item, infos), path_vec in zip(META.full_path_ref.items(), META.full_path_vectors):
        if path_item.startswith(old_dir_path):
            new_path_item = path_item.replace(old_dir_path, new_dir_path)
            new_system_path = infos['system_path'].replace(system_old_dir, system_new_dir)
            new_system_path = new_system_path.replace(system_old_image_dir, system_new_image_dir)
            new_tokens = tokenize2stw_remove(new_path_item.split('-->'))
            new_full_path_ref[new_path_item] = {'system_path':new_system_path, 'tokens':'->'.join(new_tokens)}
            _, path_vec = vectorize_texts(new_system_path, tokenizer, model)
            logger.info('new move path is being converted into vector:{}', new_system_path)
        else:
            new_full_path_ref[path_item] = infos
        new_full_path_vectors = np.vstack((new_full_path_vectors, path_vec))
    
    new_img_record = dict()
    for img_id, img_path in META.img_record.items():
        if img_path.startswith(old_dir_path):
            new_img_path = img_path.replace(old_dir_path, new_dir_path)
            new_img_record[img_id] = new_img_path
        else:
            new_img_record[img_id] = img_path

    new_tb_record = dict()
    for tbl_id, tbl_path in META.tb_record.items():
        if tbl_path.startswith(old_dir_path):
            new_tbl_path = tbl_path.replace(old_dir_path, new_dir_path)
            new_tb_record[tbl_id] = new_tbl_path
        else:
            new_tb_record[tbl_id] = tbl_path

    with g_lock:
        shutil.move(old_real_file_dir, new_real_file_dir)
        sql = f"delete from empty_dir where empty_dir_path like '{old_dir_path}%'"
        SqliteDB().delete(sql)
        META.full_paths = list(new_full_path_ref.keys())
        META.full_path_ref = new_full_path_ref
        META.img_record = new_img_record
        META.tb_record = new_tb_record
        system_old_dir_escaped = re.escape(system_old_dir)
        META.all_contents_df['path'] = META.all_contents_df['path'].str.replace(f'^{system_old_dir_escaped}', system_new_dir, regex=True)
        system_old_image_dir_escaped = re.escape(system_old_image_dir)
        META.all_contents_df['path'] = META.all_contents_df['path'].str.replace(f'^{system_old_image_dir_escaped}', system_new_image_dir, regex=True)
        META.full_path_vectors = new_full_path_vectors

        np.save(USER_SETTINGS['KB_PATH_VEC_PATH'], new_full_path_vectors)
        if encryptor.encrypt:
            encryptor.save_to_file(META.all_contents_df, USER_SETTINGS['KB_CONTENT_PATH'])
        else:
            META.all_contents_df.to_csv(USER_SETTINGS['KB_CONTENT_PATH'], encoding='utf-8', index=False)
        
        if encryptor.encrypt:
            encryptor.save_to_file(META.full_path_ref, USER_SETTINGS['KB_PATH_JSON'])
        else:
            with open(USER_SETTINGS['KB_PATH_JSON'], mode='w', encoding='utf-8') as f:
                json.dump(META.full_path_ref, f ,ensure_ascii=False, indent=4)
    return jsonify({'message':'success'})


def find_sub_path(sub_path, paths):
    for path in paths:
        if path.startswith(sub_path + SPLIT_CHAR):
            return sub_path + SPLIT_CHAR
        elif sub_path == path:
            return sub_path
    return None


@app.route('/moveFragments', methods=['POST'])
def move_fragments():
    old_fragment_path = request.json.get('old_fragment_path')
    new_fragment_path = request.json.get('new_fragment_path')
    _, old_real_file_dir, file_dir, old_sub_path, old_fragment_name = parse_fragment_path(META.user, KB_PATH, old_fragment_path)
    _, new_real_file_dir, file_dir, new_sub_path, new_fragment_name = parse_fragment_path(META.user, KB_PATH, new_fragment_path)
    if old_real_file_dir != new_real_file_dir:
        return make_response(jsonify({'message': f'片段只能在同一文件目录下修改或移动'}), 400)
    
    if old_sub_path == new_sub_path:
        return jsonify({'message':'success'})

    if old_fragment_path.endswith('.png') or old_fragment_path.endswith('.jpg') or old_fragment_path.endswith('.jpeg'):
        img_record_path = os.path.join(old_real_file_dir, 'image_record.json')
        if encryptor.encrypt:
            img_dic = encryptor.load_from_file(img_record_path)
        else:
            with open(img_record_path, 'r', encoding='utf-8') as f:
                img_dic = json.load(f)
        
        img_label_path = os.path.join(old_real_file_dir, 'image_label.json')
        if encryptor.encrypt:
            img_label_dic = encryptor.load_from_file(img_label_path)
        else:
            with open(img_label_path, 'r', encoding='utf-8') as f:
                img_label_dic = json.load(f)

        key = next((k for k, v in img_dic.items() if v == old_fragment_name), None)
        if key:
            img_dic[key] = new_fragment_name
            img_label_dic[new_fragment_name] = img_label_dic.pop(old_fragment_name)
            os.rename(os.path.join(old_real_file_dir, old_fragment_name), os.path.join(new_real_file_dir, new_fragment_name))

            if encryptor.encrypt:
                encryptor.save_to_file(img_label_dic, img_label_path)
            else:
                with open(img_label_path, 'w', encoding='utf-8') as f:
                    json.dump(img_label_dic, f, ensure_ascii=False, indent=4)

            if encryptor.encrypt:
                encryptor.save_to_file(img_dic, img_record_path)
            else:
                with open(img_record_path, 'w', encoding='utf-8') as f:
                    json.dump(img_dic, f, ensure_ascii=False, indent=4)
    else:
        ptxt_path = os.path.join(old_real_file_dir, 'KB_PTXT.csv')
        if encryptor.encrypt:
            ptxt_df = encryptor.load_from_file(ptxt_path)
        else:
            ptxt_df = pd.read_csv(ptxt_path, encoding='utf-8', index_col=False)

        old_sub_path = find_sub_path(old_sub_path, ptxt_df["path"].values)
        if not old_sub_path:
            return make_response(jsonify({'message': f'你要移动或修改的片段不存在'}), 400)
        
        if old_sub_path.endswith(SPLIT_CHAR):
            new_sub_path += SPLIT_CHAR
        
        if new_sub_path in ptxt_df["path"].values:
            return make_response(jsonify({'message': f'新片段早已存在'}), 400)

        old_sub_path_escaped = re.escape(old_sub_path)
        ptxt_df['path'] = ptxt_df['path'].str.replace(f'^{old_sub_path_escaped}', new_sub_path, regex=True)
        ptxt_df['path'] = ptxt_df['path'].str.replace(f'^__摘要总结__{old_sub_path_escaped}', f'__摘要总结__{new_sub_path}', regex=True)
        if encryptor.encrypt:
            encryptor.save_to_file(ptxt_df, ptxt_path)
        else:
            ptxt_df.to_csv(ptxt_path, encoding='utf-8', index=False)
    
    update_kb(remove_dirs={file_dir})
    update_kb(add_dir=file_dir)
    return jsonify({'message':'success'})
    

def process_match_df(know_df, KB_PATH, tb_record, img_record):  
    old_path = ''
    content = ''
    resource = {}
    for i, row in know_df.iterrows():
        path = row['path']
        kg_contents_ = row['content']
        if pd.isna(kg_contents_) or not kg_contents_ or '-->' in kg_contents_:
            continue
        type = row['type']
            
        if path != old_path:
            if old_path:
                content += '__HHF__'
            old_path = path

        if type=='PTXT':
            content += kg_contents_
        elif type.startswith('TABLE_'):
            tb_path = tb_record[type].replace('-->', os.path.sep)
            tb_path = os.path.join(KB_PATH, tb_path)
            if encryptor.encrypt:
                tb_df = encryptor.load_from_file(tb_path)
            else:
                tb_df = pd.read_csv(tb_path, encoding='utf-8', index_col=False)
            tb_df.columns = ['' if 'Unnamed:' in str(col) else col for col in tb_df.columns]    
            html = tb_df.to_html(escape=True, index=False, na_rep='', justify='center').replace('\n', '').replace('\\n', '')
            # resource[type] = html
            content += html
        elif type.startswith('IMAGE_'):
            img_path = img_record[type].replace('-->', os.path.sep)
            img_path = os.path.join(KB_PATH, img_path)
            if encryptor.encrypt:
                image_binary_data = encryptor.load_from_file(img_path)
            else:
                with open(img_path, 'rb') as fd:
                    image_binary_data = fd.read()
            img_data = base64.b64encode(image_binary_data).decode('utf-8')
            resource[type] = img_data
            content += type
    content = content.replace('__HHF__', '\n')
    return content, resource


@app.route('/docTree', methods=['POST'])
def doc_tree():
    tree = request.json['ROOT']
    if os.path.isfile(USER_SETTINGS['MATCH_DF']):
        if encryptor.encrypt:
            match_dfs = encryptor.load_from_file(USER_SETTINGS['MATCH_DF'])
        else:
            match_dfs = pd.read_csv(USER_SETTINGS['MATCH_DF'], encoding='utf-8')
    else:
        match_dfs = pd.DataFrame(columns=match_df_cols)
    match_groups = match_dfs.groupby('intention', sort=False)
    fill_tree(tree, match_groups)
    return Response(json.dumps({'doc_tree': tree}), content_type='application/json; charset=utf-8')


def fill_tree(tree, match_groups):
    for title, sub_dict in tree.items():
        if sub_dict and isinstance(sub_dict, dict):
            fill_tree(sub_dict, match_groups)
        else:
            if title in match_groups.groups.keys():
                subgroup = match_groups.get_group(title)
                current_df = merge_df(subgroup)
                content, resource = process_match_df(current_df, KB_PATH, META.tb_record, META.img_record)
            else:
                content = '(未找到知识，请补充)'
                resource = {}
            tree[title] = {'__content__':content, '__resource__':resource}


@app.route('/docContent', methods=['PUT'])
def modity_doc_content():
    intentions = request.json.get('intentions')
    # answers = request.json.get('answers')
    content = request.json.get('content')
    path = request.json.get('path')
    if os.path.isfile(USER_SETTINGS['MATCH_DF']):
        if encryptor.encrypt:
            match_dfs = encryptor.load_from_file(USER_SETTINGS['MATCH_DF'])
        else:
            match_dfs = pd.read_csv(USER_SETTINGS['MATCH_DF'], encoding='utf-8')
        
        match_ids = match_dfs.index[match_dfs['intention'] == intentions].tolist()
        if len(match_ids) > 0:
            match_dfs = match_dfs.drop(match_ids).reset_index(drop=True)
    else:
        match_dfs = pd.DataFrame(columns=match_df_cols)
        
    # add_df = []
    # for answer in answers:
    #     path = answer['path']
    #     content = answer['content']
    #     # resource = answer['resource']
    #     # type = answer['type']
    #     _, new_real_file_dir, file_dir, new_sub_path, _ = parse_fragment_path(META.user, KB_PATH, path)
    #     pattern = re.compile(r'(TABLE_.*?_TABLE|IMAGE_.*?_IMAGE)')
    #     matches = pattern.findall(content)
    #     split_texts = re.split(pattern, content)
    #     for part in split_texts:
    #         if part in matches and part.startswith('IMAGE_'):
    #             text = ''
    #             img_record_path = os.path.join(new_real_file_dir, 'image_record.json')
    #             with open(img_record_path, 'r', encoding='utf-8') as fd:
    #                 img_record = json.load(fd)
    #             text = img_record[part]
    #             add_df.append({'path':path, 'content':text, 'type':part, 'length':len(text), 'keywords':'', 'summary':'', 'tokens':'', 'know_id':gen_str_codes(text), 'intention':intentions})
    #         # elif part in matches and part.startswith('TABLE_'):
    #         #     tbl_record_path = os.path.join(new_real_file_dir, 'table_record.json')
    #         #     with open(tbl_record_path, 'r', encoding='utf-8') as fd:
    #         #         tbl_record = json.load(fd)
    #         #     text = tbl_record[part]
    #         #     need_append.append({'path':path, 'content':text, 'type':part, 'length':len(text), 'keywords':'', 'summary':'', 'tokens':'', 'know_id':gen_str_codes(text), 'intention':intentions})
    #         else:
    #             add_df.append({'path':path, 'content':part, 'type':'PTXT', 'length':len(part), 'keywords':'', 'summary':'', 'tokens':'', 'know_id':gen_str_codes(part), 'intention':intentions})
    # match_dfs = pd.concat([match_dfs, pd.DataFrame(add_df)], ignore_index=True)

    match_dfs = pd.concat([match_dfs, pd.DataFrame([{'path':path, 'content':content, 'type':'PTXT', 'length':len(content), 'keywords':'', 'summary':'', 'tokens':'', 'know_id':gen_str_codes(content), 'intention':intentions}])], ignore_index=True)
    if encryptor.encrypt:
        encryptor.save_to_file(match_dfs, USER_SETTINGS['MATCH_DF'])
    else:
        match_dfs.to_csv(USER_SETTINGS['MATCH_DF'], index=False, encoding='utf-8', header=True)

    return jsonify({'message':'success'})


@app.route('/docContent', methods=['DELETE'])
def delete_doc_content():
    intentions = request.json.get('intentions')
    if os.path.isfile(USER_SETTINGS['MATCH_DF']):
        if encryptor.encrypt:
            match_dfs = encryptor.load_from_file(USER_SETTINGS['MATCH_DF'])
        else:
            match_dfs = pd.read_csv(USER_SETTINGS['MATCH_DF'], encoding='utf-8')
        
        match_ids = match_dfs.index[match_dfs['intention'] == intentions].tolist()
        if len(match_ids) > 0:
            match_dfs = match_dfs.drop(match_ids).reset_index(drop=True)
    else:
        match_dfs = pd.DataFrame(columns=match_df_cols)

    if encryptor.encrypt:
        encryptor.save_to_file(match_dfs, USER_SETTINGS['MATCH_DF'])
    else:
        match_dfs.to_csv(USER_SETTINGS['MATCH_DF'], index=False, encoding='utf-8', header=True)

    return jsonify({'message':'success'})



if __name__ == "__main__":
    host = USER_SETTINGS['HOST']
    port = USER_SETTINGS['PORT']
    server_name = f'llm_{port}'
    logger.add(f'log/{server_name}.log',
    level="INFO", format='{time} | {level} | {file}:{function}:{line} - {message}',
    rotation='1 day', enqueue=False, encoding='utf-8', retention='30 days', backtrace=False, diagnose=True)
    # 创建一个命名的互斥锁，避免同一时间多个实例运行
    mutex = ctypes.windll.kernel32.CreateMutexW(None, False, server_name)
    if ctypes.windll.kernel32.GetLastError() == 183:  # ERROR_ALREADY_EXISTS
        logger.error("Another instance of this application is already running.")
        sys.exit(0)
    app.run(host=host, port=port, debug=False)
    
    

    


