import os
import json
import re
import zipfile
import numpy as np
import pandas as pd

from docx import Document
from docx.oxml.text.paragraph import CT_P
from docx.oxml.table import CT_Tbl
from docx.oxml.ns import qn
from docx.table import Table
from docx.text.paragraph import Paragraph

from utlis import remove_spaces, find_matches_parsing
from utlis import remove_duplicates_orderkept, gen_str_codes, process_dup_paths_df, process_path_texts, restore_graph_by_paths
from knowledge_generator import process_full_contents
from txt_parser import postprocess_leaf_dics
from image_parser import detect_images
from layout_parser import pred_titles_binary, pred_titles_level, merge_title_levels
from file_encryptor import encryptor
import xml.etree.ElementTree as ET



def get_leaf_dics(node, path=[], summary_term=''):
    '''
        :function find all bottom-level knowledge pieces and flat them into a list, each element contains the path from root to bottom
    '''
    leaf_dic_paths = []
    summary_content = []
    
    if isinstance(node, dict) and 'content' in node:
        current_path = path + [node['heading']] if 'heading' in node else path
        if any(isinstance(item, dict) for item in node['content']):
            for item in node['content']:
                leaf_dic_paths.extend(get_leaf_dics(item, current_path))
        else:
            leaf_dic_paths.append((current_path, node))
            
    # if there is no 'content' key, it exists between higher-level and the lower-level sections
    else:
        iso_node = {'heading': path, 'content': [node]} 
        leaf_dic_paths.append((path, iso_node))
    
    return leaf_dic_paths


def handle_image(docx_path, image_files, output_directory, headings_stack, current_heading, image_index, image_file_info):
    # 1. open the local directory
    img_record_pth = os.path.join(output_directory, 'image_record.json')
    try:
        if encryptor.encrypt:
            img_record = encryptor.load_from_file(img_record_pth)
        else:
            with open(img_record_pth, 'r', encoding='utf-8') as f:
                img_record = json.load(f)
    except Exception as e:
        img_record = {}
        
    if image_index < len(image_files):
        # 2. generate image contents
        try:
            add_im_heading = headings_stack[-1]['content'][-1]
        except:
            add_im_heading = ''
        
        if (current_heading + ' ' + add_im_heading).strip()=='':
            image_summary = '图-' + str(image_index+1) + output_directory # OR use LVM to summarize
        else:
            image_summary = '图-' + str(image_index+1) + (current_heading + ' ' + add_im_heading).strip()
            
        image_summary = process_path_texts(image_summary, last=30)
        img_id = 'IMAGE_' + gen_str_codes(image_summary) + '_IMAGE'
        headings_stack[-1]['content'].append(img_id)
    
        # save images to the file system
        image_extension = os.path.splitext(image_file_info.filename)[-1]
        image_output_path = os.path.join(output_directory, f'{image_summary}{image_extension}')
        
        with zipfile.ZipFile(docx_path, 'r') as zip_ref:
            if encryptor.encrypt:
                encryptor.save_to_file(zip_ref.read(image_file_info.filename), image_output_path)
            else:
                with open(image_output_path, 'wb') as image_file:
                    image_file.write(zip_ref.read(image_file_info.filename))
        image_index += 1
        
        # 3. update global image directory for reusing
        img_record.update({img_id : image_summary + image_extension})
        if encryptor.encrypt:
            encryptor.save_to_file(img_record, img_record_pth)
        else:
            with open(img_record_pth, 'w', encoding='utf-8') as f:
                json.dump(img_record, f, ensure_ascii=False, indent=4)
    return headings_stack, image_index


def handle_table(block, output_directory, headings_stack, current_heading):
    # 1. open the local directory
    tb_record_pth = os.path.join(output_directory, ('table_record.json'))
    try:
        if encryptor.encrypt:
            tb_record = encryptor.load_from_file(tb_record_pth)
        else:
            with open(tb_record_pth, 'r', encoding='utf-8') as f:
                tb_record = json.load(f)
    except Exception as e:
        tb_record = {}

    # 2. generate table id
    try:
        add_tb_heading = headings_stack[-1]['content'][-1]
    except:
        add_tb_heading = ''
        
    tb_df = table2df_doc(block)
    if len(tb_df) == 0 or tb_df.empty or tb_df.isna().all().all():
        return headings_stack
    
    if add_tb_heading=='':
        tb_summary = ('表-' + current_heading + ' ' + (' '.join(tb_df.columns))).strip()
    else:
        tb_summary = ('表-' + current_heading + ' ' + add_tb_heading).strip()
    tb_summary = process_path_texts(tb_summary, last=50)
    
    tb_str = tb_df.to_csv(index=False)
    table_id = 'TABLE_' + gen_str_codes(tb_str) + '_TABLE'
    headings_stack[-1]['content'].append(table_id)
    
    # 3. save tables to the file system
    tb_path = os.path.join(output_directory, tb_summary+'.csv')
    if encryptor.encrypt:
        encryptor.save_to_file(tb_df, tb_path)
    else:
        tb_df.to_csv(tb_path, encoding='utf-8', index=False)

    # 4. update global table directory for reusing
    tb_record.update({table_id : tb_summary + '.csv'})
    if encryptor.encrypt:
        encryptor.save_to_file(tb_record, tb_record_pth)
    else:
        with open(tb_record_pth, 'w', encoding='utf-8') as f:
            json.dump(tb_record, f, ensure_ascii=False, indent=4)
    return headings_stack


def iter_block_items(doc):
    for child in doc.element.body:
        if isinstance(child, CT_P):
            paragraph = Paragraph(child, doc)
            text = paragraph.text.strip()
        
            image_found = False
            for run in paragraph.runs:
                if 'graphicData' in run._element.xml:
                    ET.register_namespace('a', 'http://schemas.openxmlformats.org/drawingml/2006/main')
                    ET.register_namespace('r', 'http://schemas.openxmlformats.org/officeDocument/2006/relationships')

                    # Try to extract image relationship ID
                    blip = run._element.xpath('.//a:blip/@r:embed')
                    if blip:
                        image_id = blip[0]
                        # Get the image path from the document's relationships
                        image_part = doc.part.related_parts.get(image_id)
                        image_path = image_part.partname if image_part else None
                        yield (paragraph, 'IMAGE', image_path)
                        image_found = True
            if not image_found:
                yield (paragraph, 'PTXT')
                    
        elif isinstance(child, CT_Tbl):
            yield (Table(child, doc), 'TABLE')


def matches_number_dot_pattern(text):
    # primary_regex = r"^(\d+(\.\d+)+)(\s+.+)?$"
    primary_regex = r"^([A-Za-z0-9]+(\s*\.\s*[A-Za-z0-9]+)+)(.*)?$"
    primary_match = re.match(primary_regex, text)
    
    if primary_match:
        number_dot_pattern = primary_match.group(1)
        num_dots = number_dot_pattern.count('.')
        return True, num_dots + 1
    else:
        secondary_regex = r"^(\d+\.)(\s+.+)?$"
        secondary_match = re.match(secondary_regex, text)
        if secondary_match:
            return True, 1
        else:
            return False, 0


def table2df_doc(table):
    data = []
    for row in table.rows:
        row_data = [cell.text.strip() for cell in row.cells]
        data.append(row_data)
    df = pd.DataFrame(data)
    
    df.columns = df.iloc[0] # convert the first row into columns (by default the columns are 0, 1, 2...
    df = df.drop(df.index[0])
    # 删除全为空的行
    df = df.dropna(how='all')
    # 删除全为空的列
    df = df.dropna(axis=1, how='all')
    df.reset_index(drop=True, inplace=True)
    return df

        
def parse_doc_headings(para, text, special_doc_type=None):
    level = None
    style_name = para.style.name
    
    pPr = para._element.find(qn('w:pPr'))
    if not pPr==None:
        plvl = pPr.find(qn('w:outlineLvl'))
    else:
        plvl = None
    
    # 1. check .docx style settings
    if style_name.startswith('Heading') or style_name.startswith('标题'):
        try:
            outline_level = int(style_name.split(' ')[1])
            return outline_level
        except Exception as e:
            print(e)
    # 2. check .docx paragraph numbering settings
    elif plvl is not None:
        outline_level = int(plvl.get(qn('w:val'))) + 1
        return outline_level
    #3. check special titles based on doc types
    else:
        if special_doc_type=='standard':
            matched, num = matches_number_dot_pattern(text)
            if matched:
                return num
        else: # allow for adding regular expressions for other doc types
            pass
    return level

# 如果整个段落都是加粗的
# def bold_headings(para):
#     for run in para.runs:
#         if not run.bold:
#             return None
#     return 1

def parse_docx(docx_path, kb_dir=None, special_doc_type=None, start_text="", end_text=""):      
    doc = Document(docx_path)
    doc_structure = []
    
    heading_data = pd.DataFrame(columns=['text', 'level']) 
    headings_stack = [{'level': -1, 'content': doc_structure}]
    current_heading = ''
    image_index = 0
    
    if start_text=="" and end_text=="":
        start_processing = True
    else:
        start_processing =False
    
    image_files = detect_images(docx_path, source_type='docx')
    
    # fix bug if there is no title:
    doc_titles = {}
    for block_tuple in iter_block_items(doc):
        block = block_tuple[0]
        if isinstance(block, Paragraph):
            text = remove_spaces(re.sub(r'\n', '__HHF__', block.text.strip()))
            outline_level = parse_doc_headings(block, block.text.strip(), special_doc_type)
            doc_titles.update({text : outline_level})
    
    all_none = all(item is None for item in doc_titles.values())
    if all_none:
        text = docx_path.split(os.sep)[-1].split('.')[0]
        outline_level = 1
        heading_data.loc[len(heading_data)] = [text, outline_level]
        current_heading = text
        new_content = {'heading': text, 'content': [], 'level': outline_level}
        headings_stack[-1]['content'].append(new_content)  # Add to parent's content
        headings_stack.append(new_content)  # Push onto the stack
        print('\tthere is no title in the docment, use doc name as the initial top title: ', text)
    
    # now go into detailed parsing with at least one default title
    for block_tuple in iter_block_items(doc):
        block = block_tuple[0]
        label = block_tuple[1]
        
        if isinstance(block, Paragraph):
            text = remove_spaces(re.sub(r'\n', '__HHF__', block.text.strip()))
            if (not start_text=="") and (start_text in text.strip()):
                start_processing = True

            if (not end_text=="") and (end_text in text.strip()):
                break

            if start_processing:
                if len(text.strip())==0 and label=='PTXT':
                    continue
                
                outline_level = parse_doc_headings(block, block.text.strip(), special_doc_type)
                heading_data.loc[len(heading_data)] = [text, outline_level]
                
                if text.strip():
                    if outline_level is not None:
                        print('found title: ', text, ' level is ', outline_level)
                        try:
                            last_heading = headings_stack[-1]['heading']
                            if last_heading==text:
                                continue
                        except:
                            pass
                        
                        while headings_stack[-1]['level'] >= outline_level:
                            headings_stack.pop()  # Find the correct parent level
                        
                        current_heading = text                        
                        new_content = {'heading': text, 'content': [], 'level': outline_level}
                        headings_stack[-1]['content'].append(new_content)  # Add to parent's content
                        headings_stack.append(new_content)  # Push onto the stack
                    else:
                        headings_stack[-1]['content'].append(text)
                
                if label=='IMAGE':
                    image_path = block_tuple[2]
                    for image_file in image_files:
                        if image_path.endswith(image_file.filename):
                            headings_stack, image_index = handle_image(docx_path, image_files, kb_dir, headings_stack, current_heading, image_index, image_file)
                            break
                    
        elif isinstance(block, Table):
            if start_processing:
                headings_stack = handle_table(block, kb_dir, headings_stack, current_heading)
        else:
            pass
    
    # record training data
    heading_data['binary_level'] = heading_data['level'].apply(lambda x: 1 if not x==None else 0)
    if encryptor.encrypt:
        encryptor.save_to_file(heading_data, os.path.join(kb_dir, 'table_train.csv'))
    else:
        heading_data.to_csv(os.path.join(kb_dir, 'table_train.csv'), encoding='utf-8')
    return {'content' : doc_structure}
  
    
def convert_doc2dics(parsed_structure, save=True, kb_dir=None, ISSUE_FILES_PATH=None, model_config=None, call_llm=None, llm_histories=None, local_llm_name=None, local_llm=None, local_llm_tz=None, local_summary=False, know_df_cols=[], split_char='-->'):       
    leaf_dics = get_leaf_dics(parsed_structure)
    leaf_dics = postprocess_leaf_dics(leaf_dics, model_config, call_llm, llm_histories, local_llm_name, local_llm, local_llm_tz, local_summary)
    
    doc_name = os.path.splitext(kb_dir.split(os.sep)[-1])[0]
    failed_lst = [] # for debug
    doc_graph = {}
    if len(leaf_dics) == 0:
        return doc_graph, failed_lst
    
    if save==True:
        doc_df = pd.DataFrame(columns=know_df_cols)
        df_list = []
        path_keys = []

        for _, know_item in enumerate(leaf_dics): # know_item has 4 parameters: path_identifier {head content} keywords summary
            key = know_item[0]
            if key.strip()=='':
                continue
            
            path_keys.append((doc_name + split_char + key))
            know_val = '\n'.join(know_item[1]['content'])
            inner_key = key.split(split_char)[-1]
            matches = find_matches_parsing(know_val, key)

            try:
                know_dic, know_id = process_full_contents(know_val, inner_key)
                know_dic = json.dumps(know_dic, ensure_ascii=False, indent=4)
                know_keywords = ' '.join(know_item[2])
                know_summary = know_item[-1]
                df_list.append(pd.DataFrame({'path': [key], 
                                             'content': [know_dic], 
                                             'linkage':matches,
                                             'summary': know_summary,
                                             'keywords': know_keywords, 
                                             'know_id':know_id}))
            except Exception as e:
                print(e)
                failed_lst.append(key)
                raise

        doc_df = pd.concat(df_list, ignore_index=True)
        doc_df = process_dup_paths_df(doc_df)
        if encryptor.encrypt:
            encryptor.save_to_file(doc_df, os.path.join(kb_dir, 'KB_PTXT.csv'))
        else:
            doc_df.to_csv(os.path.join(kb_dir, 'KB_PTXT.csv'), encoding='utf-8')
        
        doc_graph, _ = restore_graph_by_paths(path_keys, split_char, '__摘要总结__')
        graph_path = os.path.join(kb_dir, 'graph.json')
        if encryptor.encrypt:
            encryptor.save_to_file(doc_graph, graph_path)
        else:
            with open(graph_path, 'w', encoding='utf-8') as f:
                json.dump(doc_graph, f, ensure_ascii=False, indent=4)
            
        # encryptor.save_to_file(failed_lst, ISSUE_FILES_PATH)
        # with open(ISSUE_FILES_PATH, 'a', encoding='utf-8') as f:
        #     for ff in failed_lst:
        #         f.write(ff + '\n')
                    
    return doc_graph, failed_lst
        
        

if __name__ == "__main__":
    from META import USER_SETTINGS, llm_apis, llm_histories, local_llm, local_llm_tz
    from utlis import know_df_cols, all_df_cols
    
    filename= '测试文件.docx'
    special_doc_type = None # 'standard'
    raw_file_path = os.path.join(USER_SETTINGS['KB_PATH'], 'Supplementary Files', filename)
    kb_dir = os.path.join(USER_SETTINGS['KB_PATH'], filename)
    
    try:
        os.makedirs(kb_dir, exist_ok=True)
        print('\t creating KB directory at {}'.format(kb_dir))
    except:
        pass
    
    start_line = ''
    end_line = ''
    auto_recog = False
    save = True
    local_summary =True
    
            
    print('')

       
        
        
        
        



