import pdfplumber
import pymupdf
import os
import json
import numpy as np
import pandas as pd
import re

from META import KB_PATH, USER_SETTINGS, model_config, llm_apis
from utlis import find_frequent, restore_graph_by_paths, use_llm_api, process_dup_paths_df, gen_str_codes, remove_duplicates_orderkept, path_handle
from txt_parser import postprocess_leaf_dics
from table_parser import parse_headers
from layout_parser import pred_titles_binary
from knowledge_generator import process_full_contents
from file_encryptor import encryptor



def extract_page_info(page):
    def flexible_slice(list_, start=3, end=-3):
        while start >= 0 and -end >= 0 and len(list_[start:end]) == 0:
            start -= 1  # Decrease the start index
            end += 1    # Increase the end index (in negative)
        return list_[start:end]

    line_pos_begin = []
    line_pos_end = []
    txt_sizes = []
    txt_colors = []
    
    blocks = page.get_text('dict')['blocks']
    line_infos = []
    for b, block in enumerate(blocks):
        if block['type'] == 0: # this means the block contains text
            for line in block['lines']:
                skip_line = False
                text_info = {
                    'block' : b+1,
                    'text' : '',
                    'size' : [],
                    'color' : [],
                    'xspan' : (np.floor(line['bbox'][0]), np.floor(line['bbox'][2]))  # bounding box of the entire line, not span
                }

                skip_line = []
                text_spans = line['spans']
                for s, span in enumerate(text_spans):
                    text_ = span['text']
                    if not text_.strip():
                        skip_line.append(True)
                    else:
                        text_info['text'] += span['text']  # add a space between spans
                        # color_ = find_frequent(text_info['color'])
                        text_info['color'].append(span['color'])
                        text_info['size'].append( np.floor(span['size']) )  # font size     
                        line_pos_begin.append(text_info['xspan'][0])
                        line_pos_end.append(text_info['xspan'][1])
                        
                        txt_sizes.append(find_frequent(text_info['size']))
                        skip_line.append(False)

                if all(skip_line):
                    continue
                else:
                    line_infos.append(text_info)
    
    line_pos_begin.sort()
    line_pos_begin = flexible_slice(line_pos_begin)
    line_pos_end.sort()
    line_pos_end = flexible_slice(line_pos_end)
    
    page_infos = {
        page.number : {
            'min_pos_begin':np.min(line_pos_begin),
            'max_pos_begin':np.max(line_pos_begin),
            'min_pos_end':np.min(line_pos_end),
            'max_pos_end':np.max(line_pos_end),
            'common_size':find_frequent(txt_sizes),
            'lines':line_infos,            
        }
    }
    return page_infos
    

def eval_pdf_headings(page_infos, all_table_txts, pre_define_titles, call_llm, local_llm_name, local_llm, local_llm_tz, model_config):
    all_candi_titles = pd.DataFrame()
    all_line_txts = []
    for i, page_info in enumerate(page_infos):        
        line_txts = [line['text'].strip() for line in page_info[i]['lines']]
        all_line_txts.extend(line_txts)
    all_candi_titles = pred_titles_binary(all_line_txts, call_llm, local_llm_name, local_llm, local_llm_tz, model_config)
    
    # UNDERDVELOPMENT, additional conditions specific to pdf files
    for i, row in all_candi_titles.iterrows():
        if row['line'] in all_table_txts:
            all_candi_titles.loc[i, 'title']=False
        if row['line'].upper().strip() in pre_define_titles:
            all_candi_titles.loc[i, 'title']=True    
    return all_candi_titles


def get_leaf_node_paths(dictionary, split='-->'):
    paths = []
    def recurse(current_dict, current_path):
        if isinstance(current_dict, dict):
            # Check if the dictionary is empty (a leaf node)
            if not current_dict:  
                paths.append(current_path)
                return
            # Otherwise, continue to recurse
            for key, value in current_dict.items():
                recurse(value, current_path + [key])
        else:
            paths.append(current_path)

    recurse(dictionary, [])
    return [split.join(path) for path in paths]


def extract_pdf_tables(file_path, kb_dir):
    all_table_infos = {}
    all_table_txts = []
    with pdfplumber.open(file_path) as pdf:        
        for num, page in enumerate(pdf.pages):
            tables = page.extract_tables()
            for j, table_data in enumerate(tables):
                table = pd.DataFrame(table_data)
                table = parse_headers(tb_df=table, temp_path=os.getcwd(), mode='parse')
                tb_begin, tb_cells, table_id = handle_table_pdf(table, kb_dir)
                if not tb_begin in list(all_table_infos.keys()):
                    all_table_infos.update({(str(num)+'-->'+tb_begin) : table_id})
                    all_table_txts.extend(tb_cells)                 
    return all_table_infos, all_table_txts


def extract_pdf_images(file_path, kb_dir):
    all_image_infos = {}
    all_image_txts = []
    with pdfplumber.open(file_path) as pdf:
        for num, page in enumerate(pdf.pages):
            images = page.images
            for k, image in enumerate(images):# print(image) will print the image metadata
                print()
    return all_image_infos, all_image_txts

    
def handle_table_pdf(table, kb_dir, local_summary=False):
    if not local_summary:
        tb_summary = ','.join(table.columns.astype(str))
    else:
        tb_summary = ''
    
    tb_cell_txts = remove_duplicates_orderkept(table.values.flatten().tolist())
    tb_df = pd.DataFrame(table)
    table_id = 'TABLE_' + gen_str_codes(tb_df.to_csv(index=False)) + '_TABLE'

    tb_path = os.path.join(kb_dir, (path_handle(tb_summary, mode='sanitize') + '.csv'))
    if encryptor.encrypt:
        encryptor.save_to_file(tb_df, tb_path)
    else:
        tb_df.to_csv(tb_path, encoding='utf-8', index=False)
    
    # 3. update global table directory for reusing
    tb_record_pth = os.path.join(kb_dir, ('table_record.json'))
    try:
        if encryptor.encrypt:
            tb_record = encryptor.load_from_file(tb_record_pth)
        else:
            with open(tb_record_pth, 'r', encoding='utf-8') as f:
                tb_record = json.load(f)
    except Exception as e:
        tb_record = {}
    
    tb_record.update({table_id : kb_dir.split(os.sep)[-1] + '-->' + (tb_summary + '.csv')})
    if encryptor.encrypt:
        encryptor.save_to_file(tb_record, tb_record_pth)
    else:
        with open(tb_record_pth, 'w', encoding='utf-8') as f:
            json.dump(tb_record, f, ensure_ascii=False, indent=4)

    return ','.join(table.columns.astype(str)), tb_cell_txts, table_id


def refine_page_infos(all_page_infos, toc_titles):
    for k, info in enumerate(all_page_infos):
        remove_ids = []
        page_lines = info[k]['lines']
        for i, line in enumerate(page_lines):
            if 'the agents’ environments' in line['text']:
                print()
            if i<len(page_lines)-1:
                toc_check_1 = ' '.join([line['text'], page_lines[i+1]['text']])
                if toc_check_1.upper().strip() in toc_titles:
                    line['text'] = toc_check_1
                    remove_ids.append(i+1)

            if i<len(page_lines)-2:
                toc_check_2 = ' '.join([line['text'], page_lines[i+1]['text'], page_lines[i+2]['text']])
                if toc_check_2.upper().strip() in toc_titles:
                    line['text'] = toc_check_2
                    remove_ids.extend([i+1, i+2])                   
        for j in remove_ids:
            info[k]['lines'].pop(j)
    return all_page_infos


def parse_pdf(file_path, kb_dir, start_page=0, end_page=None, local_summary=False, call_llm=None, llm_histories=[], local_llm_name=None, local_llm=None, local_llm_tz=None, model_config=None):
    merge_df = pd.DataFrame(columns=['path', 'content_lst', 'path_identifier'])
    pdf_doc = pymupdf.open(file_path)
    
    # 1. extract infos and handle images and tables
    all_page_infos = []    
    for page in pdf_doc:
        print('\tprocessing PDF page {}'.format(page.number))
        if page.number<start_page :
            continue
        page_infos = extract_page_info(page)
        all_page_infos.append(page_infos)
    
    all_table_infos, all_table_txts = extract_pdf_tables(file_path, kb_dir)
    # all_image_infos, all_image_txts = extract_pdf_images(file_path, kb_dir)
    
    toc = pdf_doc.get_toc()
    toc_titles = [t[1].upper().strip() for t in toc]
    all_page_infos = refine_page_infos(all_page_infos, toc_titles)
    
    # 2. evaluate candidate titles
    info_with_titles = eval_pdf_headings(all_page_infos, all_table_txts, toc_titles, call_llm, local_llm_name, local_llm, local_llm_tz, model_config)
    info_with_titles.to_csv(os.path.join(os.getcwd(), 'temp_title_res.csv'), encoding='utf-8')
    
    # 3. organize contents
    all_table_begins = list(all_table_infos.keys())
    current_heading = file_path.split(os.sep)[-1].split('.')[0] # use file name as the initial title
    current_contents = []
    dict_list = []
    
    for page in pdf_doc:
        if page.number<start_page :
            continue
        page_info_lines = all_page_infos[page.number][page.number]['lines']
        current_contents = []   
        for lid, line in enumerate(page_info_lines):
            line_txt = line['text'].strip()
            tb_begin_key = (str(page.number)+'-->'+line_txt)
            if tb_begin_key in all_table_begins:
                current_contents.append(all_table_infos[tb_begin_key])
            if line_txt in all_table_txts:
                continue
            
            ref_ = info_with_titles[info_with_titles['line'].isin([line_txt])]['title'].values
            if all(list(ref_))==True:
                print('\t next heading: ', line_txt)
                if len(current_contents)>0 and len(dict_list)>0 and dict_list[-1][-1]['heading']==current_heading:
                    dict_list[-1][-1]['content'].extend(current_contents)
                    current_contents = []
                else:
                    if len(current_contents)>0:
                        dict_list.append(([current_heading], {'heading':current_heading, 'content':current_contents}))
                        current_contents = []
                current_heading = line_txt
            else:
                current_contents.append(line_txt)
            # if the last line is reached, append the remaining contents
            if lid==len(page_info_lines)-1:
                dict_list.append(([current_heading], {'heading':current_heading, 'content':current_contents}))
    return dict_list


def convert_pdf2dics(leaf_dics, save, kb_dir, model_config, call_llm, llm_histories, local_llm_name, local_llm, local_llm_tz, know_df_cols=[], local_summary=False, split_char='-->'):
    leaf_dics = postprocess_leaf_dics(leaf_dics, model_config, call_llm, llm_histories, local_llm_name, local_llm, local_llm_tz, local_summary)
            
    doc_name = os.path.splitext(kb_dir.split(os.sep)[-1])[0]
    failed_lst = [] # for debug
    if save==True:
        doc_df = pd.DataFrame(columns=know_df_cols)
        df_list = []
        path_keys = []

        for i, know_item in enumerate(leaf_dics):
            key = know_item[0]
            if key.strip()=='':
                continue
            
            path_keys.append((doc_name + split_char + key))
            know_val = '\n'.join(know_item[1]['content'])
            
            inner_key = key.split(split_char)[-1]
            pattern = re.compile(r'(TABLE_.*?_TABLE|IMAGE_.*?_IMAGE)')
            matches = pattern.findall(know_val)
            if len(matches)==0:
                matches = ['NOLINK']
            else:
                matches = ['\n'.join(matches)]
            
            if '__摘要总结__' in key:
                matches = ['__SUMMARY__'] + know_val.split('-**-')
                matches = ['-**-'.join(matches)]
            
            try:
                know_dic, know_id = process_full_contents(know_val, inner_key)
                know_dic = json.dumps(know_dic, ensure_ascii=False, indent=4)
                know_keywords = ' '.join(know_item[2])
                know_summary = know_item[-1]
                df_list.append(pd.DataFrame({'path': [key], 
                                             'content': [know_dic], 
                                             'linkage':matches,
                                             'summary': know_summary,
                                             'keywords': know_keywords, 
                                             'know_id':know_id}))
              
            except Exception as e:
                print(e)
                failed_lst.append(key)
                raise

        doc_df = pd.concat(df_list, ignore_index=True)
        doc_df = process_dup_paths_df(doc_df)
        if encryptor.encrypt:
            encryptor.save_to_file(doc_df, os.path.join(kb_dir, 'KB_PTXT.csv'))
        else:
            doc_df.to_csv(os.path.join(kb_dir, 'KB_PTXT.csv'), encoding='utf-8')
        
        doc_graph, dic_texts = restore_graph_by_paths(path_keys, split_char, '__摘要总结__')
        graph_path = os.path.join(kb_dir, 'graph.json')
        if encryptor.encrypt:
            encryptor.save_to_file(doc_graph, graph_path)
        else:
            with open(graph_path, 'w', encoding='utf-8') as f:
                json.dump(doc_graph, f, ensure_ascii=False, indent=4)
    return graph_path, failed_lst



if __name__ == "__main__":
    from META import USER_SETTINGS, model_config, llm_apis, llm_histories, local_llm, local_llm_tz
    import pymupdf4llm
    import pathlib
  
    pdf_path = r'C:\Users\DELL\Desktop\testdir\测试文件.pdf'
    # md_text = pymupdf4llm.to_markdown(pdf_path)
    # pathlib.Path(os.path.join(os.getcwd(), "md_res.md")).write_bytes(md_text.encode())
    
    file_name = pdf_path.split(os.sep)[-1]
    kb_path = r'D:\Prototype\Checkerboard\知识固化库_DEMO'
    kb_dir = os.path.join(kb_path, file_name)
    if not os.path.exists(kb_dir):
        os.mkdir(kb_dir)
    
    save = True
    know_df_cols = ['path', 'content', 'linkage', 'summary', 'keywords', 'know_id']
    
    leaf_dics = parse_pdf(pdf_path, kb_dir)
    convert_pdf2dics(leaf_dics, save, kb_dir, model_config, llm_apis['local_api'], llm_histories, USER_SETTINGS['LOCAL_LLM_NAME'], local_llm, local_llm_tz, know_df_cols=know_df_cols)
    

    print('parsing finished...')
        



