import os
import re
import json
import io

from docx import Document
from docx.text.paragraph import Paragraph
from docx.shared import Pt, RGBColor
from docx.shared import Pt
from docx.shared import Cm
from docx.oxml.ns import qn
from docx.enum.text import WD_ALIGN_PARAGRAPH
from docx.enum.text import WD_COLOR_INDEX

from utlis import *
from md_parser import convert_markdwon2docx
from doc_parser import iter_block_items, parse_doc_headings



def erase_contents(docx_path):
    if os.path.isfile(docx_path):
        os.remove(docx_path)


def find_title_level(title, doc_tree, current_level=0):
    if title in doc_tree:
        return current_level
    for key, value in doc_tree.items():
        level = find_title_level(title, value, current_level + 1)
        if level is not None:
            return level
    return None


def generate_doc_paths(tree, path=[], link=';'):
    if path is None:
        path = []

    paths = []
    for key, value in tree.items():
        current_path = path + [key]
        # 新结构：有 serial 和 children
        if isinstance(value, dict) and "children" in value:
            children = value["children"]
            if children:
                paths.extend(generate_doc_paths(children, current_path, link))
            else:
                paths.append(link.join(current_path))
        # 老结构：普通嵌套 dict
        elif isinstance(value, dict):
            if value:
                paths.extend(generate_doc_paths(value, current_path, link))
            else:
                paths.append(link.join(current_path))
        else:
            paths.append(link.join(current_path))
    return paths


def generate_doc_dic(use_cache, file_name, call_llm=None, template_dir=None, cache_path=None, model_config=None, special_doc_type=None):
    file_path = os.path.join(template_dir, file_name)
    
    if use_cache:
        if encryptor.encrypt:
            tree = encryptor.load_from_file(cache_path)
        else:
            with open(cache_path, 'r', encoding='utf-8') as f:
                tree = json.load(f)
    else:
        doc = Document(file_path)
        doc_texts_levels = []
        
        for block_tuple in iter_block_items(doc):            
            block = block_tuple[0]
            label = block_tuple[1]
            
            if isinstance(block, Paragraph):
                # current version only supports filling empty documents, not supporting documents with partially filled contents
                text = remove_spaces(block.text.strip())
                if label=='PTXT' and len(text.strip())>0:
                    outline_level = parse_doc_headings(block, text, special_doc_type)
                    doc_texts_levels.append((text, outline_level))
        
        try:
            full_text = '\n'.join([t[0] for t in doc_texts_levels])
            tree, _ = use_llm_api(call_llm,
                                    histories=[],
                                    paras={'task':'parse-titles', 'query':'', 'texts':full_text, 'cache_path':cache_path},
                                    config=model_config)
        except Exception as e:
            print('\ttry using online level parsing fail due to ', e, 'using local parser instead...')
            tree = restore_graph_by_texts(doc_texts_levels)
    return tree, file_path


def generate_doc_from_txt(KB_PATH, know_df, docx_pth, txt_path, tb_record, img_record, font_lst=None, tree=None, call_llm=None, local_llm=None, local_llm_name=None, local_llm_tz=None, llm_histories=None, model_config=None, rewrite_threshold=None):
    # write info into local text file
    # with open(txt_path, encoding='utf-8', mode='r') as f:
    #     txt_lines = f.readlines()
    
    # write info into local docx file
    document = Document()
    document = process_lines_for_doc(know_df, document, KB_PATH, tb_record, img_record, font_lst, tree, call_llm, local_llm, local_llm_name, local_llm_tz, llm_histories, model_config, rewrite_threshold)
    if encryptor.encrypt:
        binary_stream = io.BytesIO()
        document.save(binary_stream)
        binary_stream.seek(0)
        binary_data = binary_stream.getvalue()
        encryptor.save_to_file(binary_data, docx_pth)
    else:
        document.save(docx_pth)
    
    # UNDERDEVELOPMENT: generating other file types


def rewrite_(full_texts, call_llm, local_llm_name, local_llm, local_llm_tz, llm_histories, model_config, task=None, add_paras={}, rewrite_fields=[]):
    try:
        para_dic = {field: add_paras.get(field, '') for field in rewrite_fields}
        para_dic.update({'task':task,
                'texts':full_texts, 
                'query':'', 
                'local_model_name':local_llm_name,
                'local_model':local_llm,
                'local_tz':local_llm_tz
                })
        res_content, llm_histories = use_llm_api(call_llm,
                        histories = llm_histories,
                        paras=para_dic,
                        config=model_config
                        )
        return res_content
    except Exception as e:
        print('\trewrite texts fails...', e)
        return None


def handle_paras(document, agg_texts, font_lst, call_llm, local_llm_name, local_llm, local_llm_tz, llm_histories, model_config, rewrite_threshold, rewrite_histories, last_section_contents='', theme='', trace_num=2):
    if len(agg_texts)==0:
        pass
    else:
        temp_contents = ''.join(agg_texts).split('__HHF__')
        contents = [c.strip().replace('\n', '') for c in temp_contents if not c.strip()=='']
        
        temp_contents, contents_merge = rewrite_(contents, call_llm, local_llm_name, local_llm, local_llm_tz, llm_histories, model_config, task='rewrite-paras-whole', last_section_contents=last_section_contents, theme=theme, trace_num=trace_num, rewrite_threshold=rewrite_threshold)
        if temp_contents==None:
            rewrite_contents = contents_merge
        else:
            rewrite_contents = temp_contents

        rewrite_update = pd.DataFrame({'origin': contents_merge, 'rewrite': '\n'.join(rewrite_contents).strip()})
        rewrite_histories = pd.concat([rewrite_histories, rewrite_update], ignore_index=True)

        document = add_paras(document, rewrite_contents, title_level=-1, font_lst=font_lst)
        agg_texts = []
    return document, agg_texts, rewrite_histories
    

def process_lines_for_doc(know_df, document, KB_PATH, tb_record, img_record, font_lst=None, tree=None, call_llm=None, local_llm=None, local_llm_name=None, local_llm_tz=None, llm_histories=None, model_config=None, rewrite_threshold=None, trace_num=1):  
    agg_texts = []
    current_subtitle = ''
    last_section_contents = ''
    rewrite_histories = pd.DataFrame(columns=['origin', 'rewrite'])
    
    for _, row in know_df.iterrows():
        kg_contents_ = row['content']
        if pd.isna(kg_contents_) or not kg_contents_ or '-->' in kg_contents_:
            continue
        
        type = row['type']
        current_subtitle = row['path'].split('-->')[-1]            
        if type=='PTXT':
            agg_texts.append(kg_contents_)
            # for type, info in parser_context(kg_contents_):
            #     if type=='text':
            #         agg_texts.append(info)
            #     elif type=='image':
            #         document, agg_texts = handle_paras(document, agg_texts, font_lst, call_llm, llm_histories, rewrite_threshold)
            #         img_base64 = info.split(',')[-1]
            #         img_data = base64.b64decode(img_base64)
            #         img_path = io.BytesIO(img_data)
            #         para = document.add_paragraph()
            #         para.alignment = WD_ALIGN_PARAGRAPH.CENTER
            #         para.add_run().add_picture(img_path, width=Cm(10))
            #         para = document.add_paragraph()
            #         para.alignment = WD_ALIGN_PARAGRAPH.CENTER
            #     elif type=='table':
            #         document, agg_texts = handle_paras(document, agg_texts, font_lst, call_llm, llm_histories, rewrite_threshold)
            #         write_table(document, info)
        else:
            last_section_contents = '\n'.join(rewrite_histories[-trace_num:]['origin'].to_list())
            document, agg_texts, rewrite_histories = handle_paras(document, agg_texts, font_lst, call_llm, local_llm_name, local_llm, local_llm_tz, llm_histories, model_config, rewrite_threshold, rewrite_histories, last_section_contents=last_section_contents, theme=current_subtitle, trace_num=trace_num)
            if 'TABLE_' in type:
                tb_path = tb_record[type].replace('-->', os.path.sep)
                tb_path = os.path.join(KB_PATH, tb_path)
                if encryptor.encrypt:
                    tb_df = encryptor.load_from_file(tb_path)
                else:
                    tb_df = pd.read_csv(tb_path, encoding='utf-8')
                write_table(document, tb_df)
                
            elif 'IMAGE_' in type:
                img_path = img_record[type].replace('-->', os.path.sep)
                img_path = os.path.join(KB_PATH, img_path)
                if encryptor.encrypt:
                    image_binary_data = encryptor.load_from_file(img_path)
                    img_path = io.BytesIO(image_binary_data)
                
                para = document.add_paragraph()
                para.alignment = WD_ALIGN_PARAGRAPH.CENTER
                para.add_run().add_picture(img_path, width=Cm(10))
                para = document.add_paragraph()
                para.alignment = WD_ALIGN_PARAGRAPH.CENTER
            
            elif 'EQUATION_' in type:
                pass # UNDER DEVELOPMENT
    
    if len(agg_texts)>0:
        last_section_contents = '\n'.join(rewrite_histories[-trace_num:]['rewrite'].to_list())
        document, agg_texts, rewrite_histories = handle_paras(document, agg_texts, font_lst, call_llm, local_llm_name, local_llm, local_llm_tz, llm_histories, model_config, rewrite_threshold, rewrite_histories, last_section_contents=last_section_contents, theme=current_subtitle, trace_num=trace_num)
    
    rewrite_histories.to_csv('../KB_TEMPS_DEMO/rewrite_debug.csv')
    document.save('../KB_TEMPS_DEMO/res_doc.docx')
    return document
    
    
def add_paras(document, input_txts, title_level=-1, font_lst=None, dic_opt='general_1'):
    font_dics = {
        'general_1' : {
                1 : { 'size':16, 'color':RGBColor(0, 0, 0), 'cn_font':u'宋体', 'en_font':'Times New Roman',  'bold':True, 'italic':False, 'hc': False },
                2 : { 'size':14, 'color':RGBColor(0, 0, 0), 'cn_font':u'宋体', 'en_font':'Times New Roman',  'bold':True, 'italic':False, 'hc': False },
                3 : { 'size':14, 'color':RGBColor(0, 0, 0), 'cn_font':u'宋体', 'en_font':'Times New Roman',  'bold':True, 'italic':False, 'hc': False },
                4 : { 'size':14, 'color':RGBColor(0, 0, 0), 'cn_font':u'宋体', 'en_font':'Times New Roman',  'bold':True, 'italic':False, 'hc': False },
                5 : { 'size':14, 'color':RGBColor(0, 0, 0), 'cn_font':u'宋体', 'en_font':'Times New Roman',  'bold':True, 'italic':False, 'hc': False },
                6 : { 'size':14, 'color':RGBColor(0, 0, 0), 'cn_font':u'宋体', 'en_font':'Times New Roman',  'bold':True, 'italic':False, 'hc': False },
                7 : { 'size':12, 'color':RGBColor(0, 0, 0), 'cn_font':u'宋体', 'en_font':'Times New Roman',  'bold':True, 'italic':False, 'hc': False },
                -1 : { 'size':12, 'color':RGBColor(0, 0, 0), 'cn_font':u'宋体', 'en_font':'Times New Roman',  'bold':False, 'italic':False, 'hc': False }
                # can be continuously enriched
            }
        }
    
    default_font = font_dics[dic_opt]
    for d, txt in enumerate(input_txts):
        if not font_lst==None:
            font_dic_bottom = font_lst[d]
            default_font[-1].update(font_dic_bottom)
        font_dic = default_font
        
        try:
            style = font_dic[title_level]['style']
            if style=='List Bullet':
                para_ = document.add_paragraph(txt, style='List Bullet')
            # elif
                
            else:
                para_ = document.add_paragraph(txt)
        except: # if there is no style setting
            para_ = document.add_paragraph(txt)
        para_.alignment = WD_ALIGN_PARAGRAPH.JUSTIFY        

        for run in para_.runs:
            run.font.size = Pt(font_dic[title_level]['size'])
            run.font.color.rgb = font_dic[title_level]['color']
            run.font.name = font_dic[title_level]['en_font']
            run._element.rPr.rFonts.set(qn('w:eastAsia'), font_dic[title_level]['cn_font'])
            run.bold = font_dic[title_level]['bold']
            run.italic = font_dic[title_level]['italic']
            if font_dic[title_level]['hc']==True:
                run.font.highlight_color = WD_COLOR_INDEX.YELLOW
            else:
                run.font.highlight_color = None
                
    return document
    

def parse_fonts(para, title_level, cn_font=u'宋体'):
    try:
        first_run = para.runs[0]
        parsed_font = {'text' : para.text, 
                     'bold' : first_run.bold, 
                     'italic' : first_run.italic,
                     'size' : first_run.font.size.pt, 
                     'en_font': first_run.font.name,
                     'cn_font': cn_font,
                     'color': first_run.font.color.rgb }
        return {title_level: parsed_font}
    except Exception as e:
        print(e)
        return None


def write_table(document, tb_df):
    table = document.add_table(rows=tb_df.shape[0]+1, cols=tb_df.shape[1], style='Table Grid')
    table.autofit = True
    
    # add column headers
    for j, col in enumerate(tb_df.columns.fillna('')):
        table.cell(0, j).text = col
    
    # add the DataFrame data to the table
    for i in range(tb_df.shape[0]):
        for j in range(tb_df.shape[1]):
            table.cell(i+1, j).text = str(tb_df.iat[i, j])
            
            
def transform_to_hierarchy(data): # transform nested directory to JSON form (i.e., dictionary-list nested structure)
    result = []
    for key, value in data.items():
        children = transform_to_hierarchy(value)
        node = {"name": key}
        if children:
            node["children"] = children
        result.append(node)
    return result

              
if __name__ == "__main__":

    print()

    
    
    
    
    
    
    
    
    
    
    
    
    
    
    
    
    
    
    