import json
import os
import sys
import re
import pandas as pd

from torch.utils.data import DataLoader, Dataset
import torch.nn as nn
from transformers import BertModel, BertTokenizer

sys.path.append(os.path.abspath('../Layout Parser/'))
# from application.model import train_model, load_model, save_model, predict
from utlis import use_llm_api


# class TitleDetectionModel(nn.Module):
#     def __init__(self, encoder_path):
#         super(TitleDetectionModel, self).__init__()
#         self.bert = BertModel.from_pretrained(encoder_path)
#         self.classifier = nn.Linear(768, 1)
#         self.level_classifier = nn.Linear(768, 7)
#
#     def forward(self, input_ids, attention_mask):
#         outputs = self.bert(input_ids=input_ids, attention_mask=attention_mask)
#         cls_output = outputs.last_hidden_state[:, 0, :]
#         title_output = self.classifier(cls_output).squeeze(-1)
#         level_output = self.level_classifier(cls_output)
#         return title_output, level_output
#
#
# class TitleDetectionDataset(Dataset):
#     def __init__(self, dataframe, encoder_path):
#         self.data = dataframe
#         self.tokenizer = BertTokenizer.from_pretrained(encoder_path)
#
#     def __len__(self):
#         return len(self.data)
#
#     def __getitem__(self, idx):
#         row = self.data.iloc[idx]
#         previous_sentence = row['previous_sentence']
#         current_sentence = row['current_sentence']
#         next_sentence = row['next_sentence']
#
#         inputs = self.tokenizer(previous_sentence, current_sentence, next_sentence,
#                                 return_tensors='pt', max_length=512, padding='max_length', truncation=True)
#         title_label = torch.tensor(row['binary_level']).float()
#         level_label = torch.tensor(row['level']).long()
#
#         if title_label.item() == 0:
#             level_label = torch.tensor(-1).long()
#
#         return inputs, title_label, level_label
#
#
# def train_title_model(model, model_path):
#     file = request.files['file']
#     file_path = os.path.join(app.config['UPLOAD_FOLDER'], file.filename)
#     file.save(file_path)
#
#     if os.path.exists(model_path):
#         model = load_model(model, model_path)
#
#     model = train_model(file_path, model)
#     save_model(model, model_path)
#     return jsonify({"message": "Model trained and saved successfully"})


def divide_titles(input_titles, chunk_size=30, overlap_size=0):
    sub_titles = []
    start = 0

    while start < len(input_titles):
        end = start + chunk_size
        sub_titles.append(input_titles[start:end])
        start = end - overlap_size

    return sub_titles


def retrieve_examples(data, retrieve_count=20):
    smallest_value = min(data.values())
    items = list(data.items())

    start_index = next(i for i, item in enumerate(items) if item[1] == smallest_value)
    result = dict(items[start_index:start_index + retrieve_count])
    return result


def predict_titles(model, tokenizer, previous_sentence, current_sentence, next_sentence):
    title_prediction, level_prediction = predict(model, tokenizer, previous_sentence, current_sentence, next_sentence)
    return title_prediction, level_prediction


def pred_titles_binary(record_texts, call_llm=None, local_llm_name=None, local_llm=None, local_llm_tz=None, model_config=None, doc_type=None):
    # define particular doc types (TO BE ENRICH)
    docs_no_length_conditions = ['EXAM']
    
    # 1. positive title features
    regex_num_en = r"^\d+(?:\s*[\.、]\s*\d+)*\s*(?=\s|[\.、]|$)" # both nun_en and num_cn includes situations of brackets
    regex_num_cn = r"^[一二三四五六七八九十]+(?:\s*[、；，\.,]\s*[一二三四五六七八九十]+)*\s*"  # maybe to flexible
    regex_letter = r"^[A-Za-z][\.\)\）、,、>][\s]?.*" # avoid matching sentences like 'a good day...'
    regex_num_special = r"^第([一二三四五六七八九十百千万\d]+)"
    pos_regex_conditions = [regex_num_en, regex_num_cn, regex_letter, regex_num_special]
    
    # 2. negative title features
    neg_condition_num = r"^\d{3,}"
    neg_condition_specific = r"(?i)(https?://\S+|www\.\S+|P\.S|(?:\b\d{0,2}\s*(?:a\.m|p\.m)\b))"
    neg_conditions = [neg_condition_num, neg_condition_specific]

    # 3. document specific features
    regex_md = r'^(#{1,6}|\*\*)\s*.*$'
    
    # 5. clean input texts
    texts_processed = []
    for txt in record_texts:
        if txt.startswith('<') and txt.endswith('>'):
            continue
        elif txt.startswith('!['):
            continue
        else:
            texts_processed.append(txt)
        
    # 6. begin evaluation
    all_condi_res = pd.DataFrame(columns=['line', 'feature', 'lang', 'title'])
    for i, txt in enumerate(texts_processed):
        txt = txt.strip()
        condi_res = []
        
        # 6.1. check regression expression condition
        for c, regex in enumerate(pos_regex_conditions):
            res = bool(re.match(regex, txt))
            condi_res.append(res)
    
        # 6.2. check if document type-specific condition
        res = bool(re.match(regex_md, txt))
        condi_res.append(res)
        
        # 6.3. check herustic length condition, if a line is shorter than a specific length
        cn_patterns = re.compile(r'[\u4e00-\u9fff\u3000-\u303f\uff00-\uffef]')
        cn_chars = re.findall(cn_patterns, re.sub(r'\s+', '', txt))
        cn_ratio = len(cn_chars)/len(re.sub(r'\s+', '', txt))
        
        if (cn_ratio>=0.5):
            judge_lang = '中文'
        else:
            judge_lang = '非中文'
        
        # if not doc_type in docs_no_length_conditions:
        #     if judge_lang=='中文' and len(txt)<len_cond_cn:
        #         condi_res.append(True)
        #     elif judge_lang=='非中文' and len(txt.split(' '))<len_cond_en:
        #         condi_res.append(True)
        #     else:
        #         condi_res.append(False)
            
        # 6.4 initial evaluation
        if any(condi_res):
            all_condi_res.loc[len(all_condi_res)]=[txt, condi_res, judge_lang, True]
        else:
            all_condi_res.loc[len(all_condi_res)]=[txt, condi_res, judge_lang, False]
    
    # 7. filter out initially selected titles
    for i, row in all_condi_res.iterrows():
        if row['title']==False:
            continue
        
        line = row['line']
        line = re.sub(r"^#+\s*", "", line) # handle .md lines by first removing special character #
        condi_neg_res = []
        # 1. check negative conditions based on regex
        for regex in neg_conditions:
            res = bool(re.search(regex, line))
            condi_neg_res.append(res)
        
        # 2. check same pattern conditon (if two titles have same pattern but do not have plain texts in between, they are removed)
        row_feature = row['feature']
        same_condi_res = all_condi_res[all_condi_res['feature'].isin([row_feature])]
        same_condi_res = same_condi_res[same_condi_res.index > i]
        
        if len(same_condi_res)>0:
            nearest_row = same_condi_res.iloc[0]
            nearest_idx = same_condi_res.index[0]
            
            condi_between = all_condi_res.iloc[i+1:nearest_idx]
            val_between = condi_between['title'].values
            if all(val_between):
                condi_neg_res.append(True)
                all_condi_res.loc[nearest_idx, 'title'] = False
                                
            # if nearest_idx-i<=1:
            #     condi_neg_res.append(True)
            #     all_condi_res.loc[nearest_idx, 'title'] = False
            
        # 3. use LLM to further filter out illegal titles...
        # 3.1 evaluate based on if the line belongs to certain types (e.g., references)
    #     paras_judge_refs={
    #             'task':'judge-refs', 
    #             'query': line,
    #             'texts':'',
    #             'model':'qwen-plus',
    #             'local_model_name':'qwen',
    #             'local_model':local_llm,
    #             'local_tz':local_llm_tz,
    #             'out_limit':5
    #         }
        
    #     llm_judge_refs = call_local_llm(paras=paras_judge_refs)
    #     if 'True' in llm_judge_refs:
    #         print('\tref line detected: ', line)
    #         condi_neg_res.append(True)
            
        # 3.2 evaluate based on if the line begins with legal expressions
    #     for regex in pos_regex_conditions:
    #         if bool(re.match(regex, line)):
    #             line = re.sub(regex, "", line, count=1)
            
    #     line = re.split(r"[.。；;:：！!?？]", line)[0]   
    #     paras_judge_complete={
    #             'task':'judge-complete', 
    #             'query': line,
    #             'texts':'',
    #             'model':'qwen-plus',
    #             'local_model_name':'qwen',
    #             'local_model':local_llm,
    #             'local_tz':local_llm_tz,
    #             'out_limit':5
    #         }
        
    #     llm_judge_complete = call_local_llm(paras=paras_judge_complete)
    #     if 'False' in llm_judge_refs:
    #         print('\tincomplete line detected: ', line)
    #         condi_neg_res.append(True)

        # 4. final evaluation
        if any(condi_neg_res):
            all_condi_res.loc[i, 'title'] = False
        
    return all_condi_res


def pred_titles_level(titles, llm_histories, model_config, llm_apis):
    titles_list = divide_titles(titles)
    all_his_titles = []
    
    for j, current_titles in enumerate(titles_list):
        if len(all_his_titles)>0:
            example = all_his_titles[-1]
            last_key, last_dic = list(example['ROOT'].items())[-1]
            his_titles = [last_key] + flatten_dic_dfs(last_dic)
            
            his_ref_text = '\n'.join(his_titles).strip()
            example = json.dumps(example, ensure_ascii=False, indent=4)
        else:
            example = ''
            his_ref_text= ''
        
        # print(example)
        title_text = his_ref_text + '\n' + '\n'.join(current_titles)        
        title_res = pred_titles_LLM(title_text, example, llm_histories, model_config, llm_apis)
        all_his_titles.append(title_res)
    return all_his_titles


def pred_titles_LLM(title_text, example, llm_histories, config, llm_apis, api_name='gpt_api'):
    title_structure, llm_histories = use_llm_api(llm_apis[api_name],
                                            histories=llm_histories,
                                            paras={ 'task':'parse-titles',
                                                    'query':'',
                                                    'texts':title_text,
                                                    'example':example,
                                                    'use_his':False},
                                            config=config)
        
    return title_structure


def merge_title_levels(dicts_list):
    merged_dict = dicts_list[0].copy()

    for i in range(1, len(dicts_list)):
        current_dict = dicts_list[i].copy()
        
        last_key = list(merged_dict.keys())[-1]
        first_key = list(current_dict.keys())[0]
        merged_dict[first_key] = merged_dict.pop(last_key)
        
        merged_dict.update(current_dict)

    return merged_dict
    

def parse_outline_hier(markdown_text):
    """
    将带缩进的 Markdown 列表转换为嵌套结构，适合one-off策略
    """
    lines = markdown_text.strip().splitlines()
    stack = []
    root = []

    for line in lines:
        line = line.replace('markdown', '') # handle possible unexpected outputs
        if not line.strip():
            continue

        stripped = line.lstrip()
        indent = len(line) - len(stripped)
        match = re.match(r"[-*+] (.+)", stripped)
        if not match:
            continue

        title = match.group(1).strip()
        node = {"chapter": title, "children": [], 'serial':1}
        level = indent // 2  # 每两个空格作为一级（可根据实际情况调整）
        if level == 0:
            node['serial'] = len(root)+1
            root.append(node)
            stack = [(level, node)]
        else:
            while stack and stack[-1][0] >= level:
                stack.pop()
            if stack:
                parent = stack[-1][1]
                node['serial'] = len(parent['children']) + 1
                parent["children"].append(node)
            stack.append((level, node))
    return root


def outline_to_markdown(nodes, level=1, keep_list=False):
    """
    将嵌套提纲结构转换为 Markdown 形式。
    - nodes: 列表，每个元素为 {"title": ..., "children": [...] }
    - level: 当前标题级别，默认为1（即 "# "）

    返回：
    - markdown_text: str
    """
    markdown_lines = []

    for node in nodes:
        prefix = "#" * level
        markdown_lines.append(f"{prefix} {node['chapter']}")
        if node.get("children"):
            child_md = outline_to_markdown(node["children"], level + 1)
            markdown_lines.append(child_md)
    if keep_list:
        return markdown_lines
    else:
        return "\n".join(markdown_lines)


if __name__ == "__main__":
    # import re
    #
    # line ='一、二测试'
    # regex_letter = r"^[A-Za-z][\)\）\.,、\s]+"
    # regex_num = r"^\d+(?:\s*[\.,]\s*\d+)*\s*"
    #
    # res = re.match(regex_num_cn, line)
    # print(res)
    
    from META import USER_SETTINGS, llm_apis, local_llm, local_llm_tz, model_config
    path_ = r'C:\Users\DELL\Desktop\testdir'
    doc_type = ''
    
    with open(path_, 'r', encoding='utf-8-sig') as f:
        contents = f.readlines()
    
    contents = [c.strip() for c in contents if not c.strip()=='']
    all_condi_res = pred_titles_binary(contents, llm_apis['local_api'], USER_SETTINGS['LOCAL_LLM_NAME'], local_llm, local_llm_tz, model_config)
    
    all_condi_res.to_csv(os.path.join(os.getcwd(), 'temp_title_res.csv'), encoding='utf-8')
    
    # def pred_outlines(docx_path, title_model, title_tokenizer, llm_histories=None, model_config=None, llm_apis=None, cache_path=None, rule_cut=35, auto_recog=False):
    #     doc = Document(docx_path)
    #     doc_name = docx_path.split(os.sep)[-1]
    #
    #     record_texts = []
    #     for block_tuple in iter_block_items(doc):
    #         block = block_tuple[0]
    #         label = block_tuple[1]
    #
    #         if isinstance(block, Paragraph):
    #             if label=='IMAGE':
    #                 text = '这是一张图，不是标题'
    #             else:
    #                 text = remove_spaces(block.text.strip())
    #         elif isinstance(block, Table):
    #             text = '这是一张表格，不是标题'
    #         record_texts.append(text)
    #
    #     record_texts = [t.strip() for t in record_texts if not t.strip()=='']
    #     pred_titles = []
    #     if auto_recog:
    #         try:
    #             if encryptor.encrypt:
    #                 cache_data = encryptor.load_from_file(cache_path)
    #             else:
    #                 with open(cache_path, 'r', encoding='utf-8') as f:
    #                     cache_data = json.load(f)
    #         except:
    #             cache_data = {}
    #
    #         raw_title_structures = cache_data.get(doc_name, None)
    #         if raw_title_structures==None:
    #             pred_bi_titles = pred_titles_binary(record_texts, title_model, title_tokenizer, rule_cut)
    #             pred_bi_titles = remove_duplicates_orderkept(pred_bi_titles)
    #             raw_title_structures = pred_titles_level(pred_bi_titles, llm_histories, model_config, llm_apis)
    #
    #             cache_data[doc_name] = raw_title_structures
    #             if encryptor.encrypt:
    #                 encryptor.save_to_file(cache_data, cache_path)
    #             else:
    #                 with open(cache_path, 'w', encoding='utf-8') as f:
    #                     json.dump(cache_data, f, ensure_ascii=False, indent=4)
    #
    #         raw_title_structures = [d['ROOT'] for d in raw_title_structures]
    #         all_title_structure = merge_title_levels(raw_title_structures)
    #         pred_titles = get_key_levels(all_title_structure)
    #
    #     return pred_titles
    
        
        
        
        
        



