import numpy as np
import os
import pandas as pd
import re
import json
from file_encryptor import encryptor

from utlis import know_df_cols, gen_str_codes, find_matches_parsing, restore_graph_by_paths
from knowledge_generator import process_full_contents
from txt_parser import extact_local_keywords2summary
from image_parser import image_summary_url
from table_parser import tb_summarize, identify_tables, extract_tables_by_forms



def eval_md_headings(line, call_llm=None, local_llm_name=None, local_llm=None, local_llm_tz=None, model_config=None):
    # all_candi_titles = pred_titles_binary(all_line_txts, call_llm, local_llm_name, local_llm, local_llm_tz, model_config)
    # UNDERDVELOPMENT, additional conditions specific to md files
    match = re.match(r'^\s*(#+)\s*(.*)$', line)  # 允许 `#` 后面有空格或直接跟文字
    if match:
        level = len(match.group(1))  # 计算 `#` 的数量
        title_text = line.lstrip('#').strip()  # 删除 `#` 并去除前后空格
        return title_text, level
    return None, None  
    

def split_txt4markdown(text):
    # Step 1: Split based on '#' treating them as one symbol, but keep the markdown characters in the results
    split_text = re.split(r'(#+\s*|\n+)', text)
    
    # Step 2: Split on '-' even between '**', but keep the '-' in the results as well
    split_result = []
    for section in split_text:
        # Splitting on hyphen but keeping it
        split_result.extend(re.split(r'(-)', section))
    
    # Step 3: Remove empty lines and merge lines with only markdown symbols with the next line
    cleaned_result = []
    buffer = ""
    
    for i, line in enumerate(split_result):
        cleaned_line = line.strip()
        if not cleaned_line:  # Skip empty lines
            continue
        if re.match(r'^[#-]+$', cleaned_line):  # Line contains only markdown symbols
            buffer = cleaned_line  # Store it in the buffer
        else:
            if buffer:
                cleaned_result.append(buffer + cleaned_line)  # Merge with the next line
                buffer = ""
            else:
                cleaned_result.append(cleaned_line)
    return cleaned_result


def convert_markdwon2docx(md_text):
    bold_pattern = r'\*\*(.*?)\*\*'
    italic_pattern = r'\*(.*?)\*'
    segments = split_txt4markdown(md_text)
    
    lines = []
    font_info_list = []
    
    for segment in segments:
        segment = segment.strip()
        if not segment:
            continue

        font_info = {
            'bold': False,
            'italic': False,
            'style' : 'Not Specified'
        }
        
        if re.match(r'(#+)', segment):
            segment = re.sub(r'(#+)', '', segment).strip()
            font_info['style'] = 'Heading'

        # Check for bold and italic text
        if '**' in segment:
            font_info['bold'] = False
            segment = re.sub(bold_pattern, r'\1', segment)
       
        if '*' in segment and not '**' in segment:  # Avoid treating bold as italic
            font_info['italic'] = False
            segment = re.sub(italic_pattern, r'\1', segment)
        
        # Handle lists starting with '-'
        if segment.startswith('-'):
            segment = segment.replace(r'^-', '')
            # font_info['style'] = 'List Bullet'

        lines.append(segment)
        font_info_list.append(font_info)
    return lines, font_info_list


def clean_md_table_lines(table_lines, start_line_num):
    expected_columns = table_lines[0].count('|') - 1
    cleaned_lines = []
    error_lines = []  # To record line numbers that need cleaning

    for i, line in enumerate(table_lines):
        line_columns = line.count('|') - 1
        current_line_num = start_line_num + i  # Calculate the current line number in the original file
        if line_columns == expected_columns:
            cleaned_lines.append(line)
        else:
            error_lines.append(current_line_num)
            if line_columns > expected_columns:
                parts = line.split('|')
                cleaned_line = '|'.join(parts[:expected_columns + 1]) # If there are more columns, combine them (or drop extra columns)
                cleaned_lines.append(cleaned_line)
            elif line_columns < expected_columns:
                # If there are fewer columns, pad the line (or you could skip it)
                cleaned_line = line + '|' * (expected_columns - line_columns)
                cleaned_lines.append(cleaned_line)
    
    return cleaned_lines, error_lines


def update_df_list(df_list, content, inner_key, path, call_llm, llm_histories, local_llm_name, local_llm, local_llm_tz, model_config, local_summary, add_req=''):
    know_dic, know_id = process_full_contents(content, inner_key)
    know_dic = json.dumps(know_dic, ensure_ascii=False, indent=4)
    matches = find_matches_parsing(content, path)

    if local_summary:
        keywords, summary = extact_local_keywords2summary(content, call_llm, llm_histories, local_llm_name, local_llm, local_llm_tz, model_config, add_req=add_req)
    else:
        keywords = ''
        summary = ''
    df_list.append(pd.DataFrame({'path': [path], 
                                    'content': [know_dic], 
                                    'linkage':matches,
                                    'summary': summary,
                                    'keywords': ' '.join(keywords), 
                                    'know_id':know_id})) 
    content = ''
    return df_list, content, know_id
    

def parse_md(file_path, kb_dir, call_llm=None, llm_histories=None, local_llm_name=None, local_llm=None, local_llm_tz=None, model_config=None, local_summary=False, split_char='-->'):
    # define helper functions:
    def is_image_md(line):
        md_img_pattern = r'!\[.*?\]\(([^)\s]+)\)'
        imgs = re.findall(md_img_pattern, line)
        return imgs

    # read .md lines:
    with open(file_path, 'r', encoding='utf-8') as file:
        lines = file.readlines()
        total_len = len(lines)

    # initialize-- open the local directories for images and tables
    img_record_pth = os.path.join(kb_dir, 'image_record.json')
    try:
        if encryptor.encrypt:
            img_record = encryptor.load_from_file(img_record_pth)
        else:
            with open(img_record_pth, 'r', encoding='utf-8') as f:
                img_record = json.load(f)
    except Exception as e:
        img_record = {}
    
    tb_record_pth = os.path.join(kb_dir, ('table_record.json'))
    try:
        if encryptor.encrypt:
            tb_record = encryptor.load_from_file(tb_record_pth)
        else:
            with open(tb_record_pth, 'r', encoding='utf-8') as f:
                tb_record = json.load(f)
    except Exception as e:
        tb_record = {}

    # initialize-- create vars
    doc_df = pd.DataFrame(columns=know_df_cols)
    df_list = []
    path_stack = []
    paths = []
    error_line_numbers = []
        
    table_lines = []
    current_pg_num = 0
    current_heading = ''
    base_level = None
    current_heading_level = -1
    content = ''

    # proceed parsing
    for i, line in enumerate(lines):
        print('\tparsing progress {}'.format(np.round(i/total_len, 4)))
        line = line.strip()

        if '<!--' in line and '-->' in line: # 注释信息
            if 'page' in line and 'number' in line: 
                current_pg_num += 1
            line = '' # this can be done in a more flexible manner using LLM, e.g., extract note information

        current_heading, current_heading_level = eval_md_headings(line)
        if current_heading_level is not None: # indicate a new path should be evaluated or added
            if not content.strip()=='': # record contents of the last path and reset content
                df_list, content, _ = update_df_list(df_list, content, inner_key, path, call_llm, llm_histories, local_llm_name, local_llm, local_llm_tz, model_config, local_summary)

            # update path based on path name and level
            inner_key = current_heading
            if base_level is None:
                base_level = current_heading_level
            elif current_heading_level<base_level:
                base_level = current_heading_level
            
            adjusted_level = current_heading_level - base_level + 1  # 使最顶层始终从1开始
            while len(path_stack) >= adjusted_level:
                path_stack.pop()
            path_stack.append(current_heading)
            path = '-->'.join(path_stack)
            paths.append(path)

        else: # no path change, remain in the same hierarchy
            # a. handle lines containing images
            imgs = is_image_md(line)
            if len(imgs)>0:
                for img_line in imgs:
                    # geneate image summary and id
                    image_summary = image_summary_url(img_line, call_llm, llm_histories, local_llm_name, local_llm, local_llm_tz, model_config, local_summary)
                    image_output_path = os.path.join(kb_dir, f'{image_summary}')
                    img_id = 'IMAGE_' + gen_str_codes((img_line + image_summary)) + '_IMAGE'

                    # ****UNDERDEVELOPMENT**** save images to the file system (based on the output_path)

                    # update the local image dictionary
                    img_record.update({img_id : image_summary}) # in md file, the url often contains the .format
                    if encryptor.encrypt:
                        encryptor.save_to_file(img_record, img_record_pth)
                    else:
                        with open(img_record_pth, 'w', encoding='utf-8') as f:
                            json.dump(img_record, f, ensure_ascii=False, indent=4)
                    # generate content used in KB_PTXT
                    content = content + '\n' + img_id + '\n'
            
            # b. handle lines containing tables
            tb_bool, form, _ = identify_tables(line)
            if tb_bool: # now first support general .md table, HTML-based table should be included
                table_lines.append(line)

                tb_bool_next, _, _ = identify_tables(lines[i+1].strip())
                if not tb_bool_next or i==len(lines)-1:
                    if form=='md':
                        cleaned_table_lines, error_lines = clean_md_table_lines(table_lines, start_line_num=i)
                        tb_str = '\n'.join(cleaned_table_lines)
                        error_line_numbers.extend(error_lines)
                        try:
                            tb_df = extract_tables_by_forms(tb_str, 'md')
                        except Exception as e:
                            print('parsing table fails because ', e, ' file line ', i)
                    
                    elif form=='html':
                        pass # ****UNDER DEVELOPMENT****
                    
                    table_id = 'TABLE_' + gen_str_codes(tb_str) + '_TABLE'
                    tb_summary, _ = tb_summarize(tb_df, inner_key, call_llm, llm_histories, local_llm_name, local_llm, local_llm_tz, model_config, local_summary)

                    tb_path = os.path.join(kb_dir, tb_summary + '.csv')
                    if encryptor.encrypt:
                        encryptor.save_to_file(tb_df, tb_path)
                    else:
                        tb_df.to_csv(tb_path, encoding='utf-8', index=False)

                    # update global table directory for reusing
                    tb_record.update({table_id : tb_summary + '.csv'})
                    if encryptor.encrypt:
                        encryptor.save_to_file(tb_record, tb_record_pth)
                    else:
                        with open(tb_record_pth, 'w', encoding='utf-8') as f:
                            json.dump(tb_record, f, ensure_ascii=False, indent=4)

                    content = content + '\n' + table_id + '\n'
                    table_lines = [] # Reset table_lines after storing the DataFrame
            # c. handle plain texts
            if len(imgs)==0 and not tb_bool:
                content = content + '\n' + line.strip() + '\n'
    
    if not content.strip()=='': # handle the remaining contents, append them to the last section
        df_list, content, _ = update_df_list(df_list, content, inner_key, path, call_llm, llm_histories, local_llm_name, local_llm, local_llm_tz, model_config, local_summary)

    doc_df = pd.concat(df_list, ignore_index=True)
    doc_df.to_csv(os.path.join(kb_dir, 'KB_PTXT.csv'), encoding='utf-8')
    doc_graph, _ = restore_graph_by_paths(paths, split_char, '__摘要总结__')
    graph_path = os.path.join(kb_dir, 'graph.json')
    if encryptor.encrypt:
        encryptor.save_to_file(doc_graph, graph_path)
    else:
        with open(graph_path, 'w', encoding='utf-8') as f:
            json.dump(doc_graph, f, ensure_ascii=False, indent=4)
    return doc_graph


if __name__ =="__main__":
    
    # test_text = '''
    #     ### 图1.4.2-9 混凝土灌注示意图
        
    #     #### B. 开阀打开漏斗阀门，放下封底砼。首批砼灌入孔底后，立即探测孔内砼面高度，计算出导管内埋置深度。如符合要求，即可正常灌注。
        
    #     #### C. 水下混凝土浇灌桩基混凝土采用罐车运输泵送配合导管灌注。灌注开始后，紧凑连续地进行，严禁中途停顿。
        
    #     #### D. 混凝土灌注操作要点：
        
    #     1. **防止拌和物溢出**：在灌注过程中，防止混凝土拌和物从漏斗顶溢出或从漏斗外掉入孔底，以确保测探的准确性。      
    #     2. **观察管内情况**：注意观察管内混凝土下降和孔内水位升降情况，及时测量孔内混凝土面高度，正确指挥导管的提升和拆除。      
    #     3. **埋置深度控制**：导管的埋置深度控制在2～4m，经常测探孔内混凝土面的位置，及时调整导管埋深。     
    #     4. **提升导管**：保持轴线竖直和位置居中，逐步提升。如出现导管法兰卡挂钢筋骨架，转动导管使其脱开后，再移到钻孔中心。拆除导管动作要快，时间控制在15min内，同时防止螺栓、橡胶垫和工具等掉入孔中。       
    #     5. **清洗导管**：已拆下的管节立即用清水清洗干净，堆放整齐。导管循环使用4～8次后，重新进行水密性试验。      
    #     6. **防止高压气囊形成**：灌注过程中，当导管内混凝土不满，含有空气时，后续混凝土要徐徐灌入，不可整斗地灌入漏斗和导管，以免在导管内形成高压气囊，挤出管节间的橡皮垫，而使导管漏水。
    #     7. **防止钢筋骨架被顶托上升**：
    #        - 尽量缩短混凝土总的灌注时间，防止顶层混凝土进入钢筋骨架时混凝土的流动性过小。
    #        - 当混凝土面接近和初进入钢筋骨架时，使导管底口处于钢筋笼底口3m以下和1m以上处，并慢慢灌注混凝土，以减小混凝土从
    # '''
    # lines, font_dics = convert_markdwon2docx(test_text)
    
    
    KB_PATH = '..' + os.sep + '知识固化库_DEMO\默认目录'
    # file_path = r'D:\Local codes\知识固化库_DEMO\Supplementary Files\Biodesign 医疗器械创业指南-661-856.json'
    # file_path = r'C:\Users\chengke\Desktop\testdir\三农问答.md'
    file_path = r'C:\Users\DELL\Desktop\testdir\三农问答.md'
    
    # doc_df = parse_json(file_path, KB_PATH)
    parse_md(file_path, KB_PATH)
            
    print()
    
    
    