import re
import pandas as pd
import os
from bs4 import BeautifulSoup

from knowledge_generator import process_full_contents
from utlis import remove_spaces, use_llm_api



def clean_texts_by_form(text, form='html'):
    # try html
    soup = BeautifulSoup(text, "html.parser")
    text = soup.get_text(separator=" ", strip=True)
    # try other formats

    # optional, use LLM to remove remaining unexpected characters
    # ****UNDER DEVELOPMENT****
    return text


def parse_text_knowlege(text=None, source_txt_path=None, know_title='UNK', call_llm=None, llm_histories=[], summary_threshold=50):    
    if not source_txt_path == None:
        with open(source_txt_path, encoding='utf-8') as f:
            txt_lines = []
            for line in f: # by default, regard one line as a separate graph
                line = re.sub(r'\s', '', line)
                txt_lines.append(line)
        txt_lines = ''.join(txt_lines)
    else:
        txt_lines = text
    
    json_know, know_id = process_full_contents(txt_lines, know_title)
   
    return json_know, []


def divide_long_contents(texts, max_threshold=2000, min_threshold=500):
    sublists = []
    current_sublist = []
    current_word_count = 0
    
    for text in texts:
        word_count = len(text)
        if current_word_count + word_count > max_threshold:
            sublists.append(current_sublist)
            current_sublist = [text]
            current_word_count = word_count
        else:
            current_sublist.append(text)
            current_word_count += word_count
    
    if current_sublist:
        sublists.append(current_sublist)
    
    last_count = sum(len(text) for text in sublists[-1])
    if len(sublists) > 1 and last_count < min_threshold:
        sublists[-2].extend(sublists[-1])
        sublists.pop()
    
    return sublists, len(sublists)


def extact_local_keywords2summary(texts, call_llm, llm_histories, local_llm_name, local_llm, local_llm_tz, model_config, out_limit=50, kw_num=3, add_req=''):
    response, _ = use_llm_api(call_llm,
                                        histories=llm_histories,
                                        paras={ 'task':'summary-keywords',
                                                'query':'',
                                                'texts':texts,
                                                'local_model_name':local_llm_name,
                                                'out_limit':out_limit,
                                                'kw_number':kw_num,
                                                'local_model':local_llm, 
                                                'local_tz': local_llm_tz,
                                                'use_his':False,
                                                'add_req':add_req},
                                        config=model_config)
    if response==None:
        keywords = []
    else:
        try:
            keywords = re.split(r'[;；]', response['keywords'])
        except:
            keywords = re.split(r'[;；]', response)
        
    summary, _ = use_llm_api(call_llm,
                                    histories=llm_histories,
                                    paras={ 'task':'summary-plain',
                                            'query':'',
                                            'texts':texts,
                                            'local_model_name':local_llm_name,
                                            'model':'qwen-plus',
                                            'out_limit':out_limit,
                                            'local_model':local_llm, 
                                            'local_tz': local_llm_tz,
                                            'use_his':False,
                                            'add_req':add_req},
                                    config=model_config) 
    if summary==None:
       summary = '' 
    return keywords, summary


def summary_sections(df, level, max_depth, records, local_summary=False, call_llm=None, llm_histories=None, local_llm_model=None, local_llm=None, local_llm_tz=None, model_config=None, split_char='-->'):
    filter_df = df[df['path'].apply(len) >= level]
    filter_df = filter_df[~filter_df['path_identifier'].isin(records)]
    grouped = filter_df.groupby(df['path'].apply(lambda x: tuple(x[:level-1])), sort=False) # group by one level higher
    
    summary_data = []
    for parent, group in grouped:
        parent = list(parent)
        current_bottom_paths = group['path_identifier']
        
        if len(group)>1 and len(parent)>0:
            parent[-1] = "__摘要总结__" + parent[-1] + '__包括__'
            merged_path = parent
            path_identifier = split_char.join(merged_path)
            
            if not path_identifier in records:
                print('\tsummary path at ' + path_identifier)
                records.append(path_identifier)
                
                temp_merge_content = [t.removeprefix('__摘要总结__').removesuffix('__包括__') for t in group['path'].apply(lambda x: x[level-1])]
                merged_content = '-**-'.join(list(dict.fromkeys(temp_merge_content)))
                
                if local_summary:
                    pre_summaries = '\n'.join([f"{x}:{y}" for x, y in zip(temp_merge_content, group['local_summary'].tolist())])
                    merged_keywords, merged_local_summary = extact_local_keywords2summary(pre_summaries, call_llm, llm_histories, local_llm_model, local_llm, local_llm_tz, model_config)
                    summary_data.append((merged_path, [merged_content], path_identifier, merged_keywords, merged_local_summary))
                else:
                    summary_data.append((merged_path, [merged_content], path_identifier, [], '')) # May need modification
        
    summary_df = pd.DataFrame(summary_data, columns=filter_df.columns)
    return summary_df, records

    
def postprocess_leaf_dics(dict_list, model_config, call_llm, llm_histories, local_llm_name, local_llm, local_llm_tz, local_summary, merge_key='heading', content_key='content', split_char='-->'):    
    '''
        :function 1 merge bottom-level contents with the same key (heading)
    '''
    merged_dict = {} 
    for identifier, d in dict_list:
        identifier = split_char.join(identifier)
            
        if identifier in merged_dict:
            merged_dict[identifier][content_key].extend(d[content_key])
        else:
            merged_dict[identifier] = {merge_key: d[merge_key], content_key: list(d[content_key])}
    
    merged_list = [(identifier, v['content']) for identifier, v in merged_dict.items()]
    merge_df = pd.DataFrame(merged_list, columns=['path_identifier', 'content_lst'])
    merge_df['path'] = merge_df['path_identifier'].apply(lambda x:x.split(split_char))
    merge_df = merge_df[['path', 'content_lst', 'path_identifier']]    

    '''
        :function 2 dividing long contents
    '''
    df_with_divides = pd.DataFrame(columns=['path', 'content_lst', 'path_identifier'])
    for i, row in merge_df.iterrows():
        if len(row['path'])==0:
            continue

        local_contents = row['content_lst']
        if len(local_contents)>0:
            sublists, num = divide_long_contents(local_contents)
        else:
            num = 0
            
        if num<=1:
            df_with_divides.loc[len(df_with_divides)] = row
        else:
            head = row['path_identifier']
            if not head:
                head = '序言'
            for k in range(num):
                sub_head = head + '-->' + head.split('-->')[-1] + " 第" + str(k+1) + "部分"
                df_with_divides.loc[len(df_with_divides)] = {'path':sub_head.split(split_char), 'content_lst':sublists[k], 'path_identifier':sub_head}
    
    '''
        :function 3 generate local labels (summary) for bottom-level sections
    '''
    df_with_labels = pd.DataFrame(columns=['path', 'content_lst', 'path_identifier', 'keywords', 'local_summary'])
    pattern = re.compile(r'(TABLE_.*?_TABLE|IMAGE_.*?_IMAGE)')
    for i, row in df_with_divides.iterrows():
        contents_ = '。'.join(row['content_lst'])
        contents_ = re.sub(pattern, '', contents_)
        
        keywords = []
        summary = ''
        if len(contents_)>200 and local_summary:
            keywords, summary = extact_local_keywords2summary(contents_, call_llm, llm_histories, local_llm_name, local_llm, local_llm_tz, model_config)
        df_with_labels.loc[len(df_with_labels)] = {'path': row['path'], 
                                                   'content_lst': row['content_lst'], 
                                                   'path_identifier': row['path_identifier'],
                                                   'keywords': keywords,
                                                   'local_summary': summary}
          
    '''
        :function 3 generate summaries at different hierarchical levels
    '''
    def path_depth(path):
        return len(path)
    
    max_depth = df_with_labels['path'].apply(path_depth).max()
    final_df = df_with_labels.copy()
    records = []
    for level in range(max_depth, 0, -1):
        summary_df, records  = summary_sections(final_df, level, max_depth, records, local_summary, call_llm, llm_histories, local_llm_name, local_llm, local_llm_tz, model_config)
        final_df = pd.concat([final_df, summary_df]).reset_index(drop=True)
        
    merged_list_with_summary = []
    for i, row in final_df.iterrows():
        merged_list_with_summary.append((row['path_identifier'], {'heading': row['path'][-1].strip(), 'content':row['content_lst']}, row['keywords'], row['local_summary']))
    return merged_list_with_summary



if __name__ == "__main__":
    import os
    from META import KB_PATH, USER_SETTINGS, model_config, llm_apis
    
    test_text = '杨之乐是副研究员，博士，女王大学毕业。' #'杨之乐是副研究员，博士，女王大学毕业。'  <--杨之乐 -->
    json_know = parse_text_knowlege(KB_PATH, source_txt_path=os.path.join(KB_PATH, 'Supplementary Files/Text_knowledge.txt'), call_llm=llm_apis['qwen_api'])
    
    print(json_know)
        
        
        
        
        
        
        
        
        



