import os
import re
import json
import pandas as pd
import numpy as np
import datetime
from bs4 import BeautifulSoup

from collections import OrderedDict
from txt_parser import extact_local_keywords2summary
from utlis import gen_str_codes, flatten_dic2paths, remove_spaces, restore_graph_by_paths
from knowledge_generator import process_full_contents
from file_encryptor import encryptor
import threading
g_tbl_lock = threading.Lock()



def html_table_to_dataframe(html_text):
    soup = BeautifulSoup(html_text, 'html.parser')
    table = soup.find('table')

    headers = [th.get_text(strip=True) for th in table.find_all('th')]
    rows = []
    for tr in table.find_all('tr'):
        cells = [td.get_text(strip=True) for td in tr.find_all('td')]
        if cells:
            rows.append(cells)
    df = pd.DataFrame(rows, columns=headers if headers else None)
    return df


def identify_tables(line):
    form = None
    tables = []

    html_tb_pattern = r'<table.*?>.*?</table>'
    tables = re.findall(html_tb_pattern, line, re.DOTALL)
    if bool(tables):
        form = 'html'
        return True, form, tables
    
    md_tb_match = line.startswith('|') and line.endswith('|')
    if md_tb_match:
        form = 'md'
        return True, form, tables
    
    else:
        return False, None, None
    # UNDER DEVELOPMENT other forms of tables...


def extract_tables_by_forms(tb_txt, form):
    if form=='html':
        tb_df = html_table_to_dataframe(tb_txt)
    elif form=='md':
        tb_df = pd.read_table(pd.io.common.StringIO(tb_txt), sep='|', engine='python', on_bad_lines='skip')
        tb_df = tb_df.drop(columns=tb_df.columns[0])  # Drop extra leading column
        tb_df = tb_df.drop(columns=tb_df.columns[-1]) # Drop extra trailing column
        tb_df.columns = tb_df.columns.str.strip()  # Clean up headers
        tb_df = tb_df.applymap(lambda x: x.strip() if isinstance(x, str) else x)
    else:
        tb_df = None # UNDER DEVELOPMENT other forms of tables...
    return tb_df


def tb_summarize(tb_df, current_heading, call_llm=None, llm_histories=None, local_llm_name=None, local_llm=None, local_llm_tz=None, model_config=None, local_summary=False, max_len=30):
    if local_summary:
        tb_keywords, tb_summary = extact_local_keywords2summary(tb_df.to_csv(index=False), call_llm, llm_histories, local_llm_name, local_llm, local_llm_tz, model_config)
    else:
        tb_keywords = list(tb_df.columns)
        tb_summary = ('表-' + current_heading + ' ' + (' '.join(tb_df.columns))).strip()[:max_len]
    return tb_summary, tb_keywords


def parse_headers(tb_path=None, tb_df={}, temp_path=None, mode='fill'):
    # 1. read table dataframe by input data or path
    if not tb_path==None:
        if tb_path.endswith('.xlsx') or tb_path.endswith('.csv'):
            if encryptor.encrypt:
                df_temp = encryptor.load_from_file(tb_path)
            else:
                df_temp = pd.read_excel(tb_path)
            df_temp = postprocess_tb(df_temp)
        else:
            raise
    elif len(tb_df)>0:
        # with g_tbl_lock:
        #     if encryptor.encrypt:
        #         encryptor.save_to_file(tb_df, os.path.join(temp_path, 'temp_tb.csv'))
        #         df_temp = encryptor.load_from_file(os.path.join(temp_path, 'temp_tb.csv'))
        #     else:
        #         tb_df.to_csv(os.path.join(temp_path, 'temp_tb.csv'), index=False)
        #         if list(tb_df.columns)==list(range(tb_df.shape[1])):
        #             df_temp = pd.read_csv(os.path.join(temp_path, 'temp_tb.csv'))
        #         else:
        #             df_temp = pd.read_csv(os.path.join(temp_path, 'temp_tb.csv'), header=None)
        df_temp = tb_df
    else:
        print('\tcurrently only support .csv and .xlsx!')
        raise

    # 2. manually set nan values as the column for detailed parsing
    df_temp.loc[-1] = df_temp.columns
    df_temp.index = df_temp.index + 1
    df_temp = df_temp.sort_index()
    df_temp.columns = [np.nan] * df_temp.shape[1]

    # 3. parse or fill
    if mode=='fill': # if there is a index column, fix that by checking three times
        # ****UNDER DEVELOPMENT**** parse structure
        header_rows = []
        for idx, row in df_temp.iterrows():
            if not row.isnull().all():
                header_rows.append(idx)
    elif mode=='parse':
        # ****UNDER-DEVELOPMENT**** WE NEED TO AUTO-RECOGNIZE THE COLUMNS
        non_na_row = df_temp[df_temp.notna().any(axis=1)].head(1)
        non_na_idx = non_na_row.index[0] if not non_na_row.empty else None
        header_rows = [non_na_idx]
    else:
        pass

    if len(header_rows)==0 or (all(h is None for h in header_rows)):
        return None
    elif len(header_rows)>1:
        head_lst = []
        for i in range(0, len(header_rows)):
            temp_lst = df_temp.iloc[i].ffill().bfill().tolist()
            head_lst.append(temp_lst)
        new_header = pd.MultiIndex.from_arrays(np.array(head_lst))
    else:
        new_header = df_temp.iloc[header_rows[-1]].ffill().bfill().tolist()

    df_temp.columns = new_header
    df_temp = df_temp.iloc[(header_rows[-1])+1:]
    df_temp = df_temp.reset_index(drop=True)
    return df_temp


def parse_tb_contents(df_temp, mode, parent_dic={}, file_name='', sheet_name='', return_lst=False):
    '''
        :returns 'tb_res', a data frame
                 'tb_strs', for generating uuid
    '''
    def dict2sentence(d):
        parts = [f"{key}: {value}" for key, value in d.items() if pd.notna(value)]
        return ', '.join(parts)    
    
    tb_tree, tb_res = dataframe_columns_to_structure(df_temp, parent_dic, file_name, sheet_name)
    tb_paths = flatten_dic2paths(tb_tree)
    
    if mode=='fill':
        if return_lst:
            tb_strs = tb_paths
        else:
            tb_strs = '\n'.join(tb_paths)
    elif mode=='parse':
        tb_strs = []
        tb_res = tb_res.fillna('').infer_objects(copy=False)
        tb_res_list = tb_res.to_dict(orient='records') # each row as a record
        for tbd in tb_res_list:
            tb_strs.append(dict2sentence(tbd))
        
        tb_strs = [t for t in tb_strs if not t.strip()=='']
        if not return_lst:
            tb_strs = '\n'.join(tb_strs)
    return tb_res, tb_paths, tb_strs


def dataframe_columns_to_structure(df, parent_dic, file_name, sheet_name):
    if isinstance(df.columns, pd.MultiIndex):
        # Convert MultiIndex columns to a nested dictionary (tree-like structure)
        columns = pd.DataFrame(df.columns.tolist())
        for level in range(columns.shape[1]):
            columns[level] = process_duplicate_cols(columns[level])
        
        new_columns = pd.MultiIndex.from_frame(columns)
        tree_structure = multiindex_to_tree(new_columns)
    else:
        # If columns are not MultiIndex, convert them to a dictionary with empty dictionaries as values
        new_columns = process_duplicate_cols(df.columns)
        tree_structure = {col: {} for col in new_columns}
    
    df.columns = new_columns
    if (not file_name=='') and (not sheet_name==''):
        parent_dic[file_name][sheet_name] = tree_structure
    elif (not sheet_name==''):
        parent_dic[sheet_name] = tree_structure
    elif (not file_name==''):
        parent_dic[file_name] = tree_structure
    else:
        parent_dic = tree_structure
    return parent_dic, df
        

def multiindex_to_tree(multiindex):
    """ Convert a MultiIndex to a tree-like nested dictionary structure. """
    def tree():
        return OrderedDict()
    
    root = tree()
    for keys in multiindex:
        current_level = root
        for key in keys:
            if key not in current_level:
                current_level[key] = tree()
            current_level = current_level[key]

    def convert_to_dict(d):
        if isinstance(d, OrderedDict):
            d = {k: convert_to_dict(v) for k, v in d.items()}
        return d
    return convert_to_dict(root)


def postprocess_tb(df, drop=False):
    if drop:
        # 删除全为空的行列
        before_cols = set(df.columns)
        df = df.dropna(how='all')
        df = df.dropna(axis=1, how='all')
        after_cols = set(df.columns)
        dropped_columns = before_cols - after_cols
        print("\t被删除的列:", list(dropped_columns))
        df.reset_index(drop=True, inplace=True)

    df.columns = [str(col).replace('\n', '') for col in df.columns] # Replace '\n' in column headers which can cause unexpected errors
    df.columns = [np.nan if 'Unnamed' in col else col for col in df.columns] # Replace Unnamed strs with nan values in columns for downstream processing
    df = df.map(lambda x: x.replace('\n', '') if isinstance(x, str) else x) # Replace '\n' in each cell
    df = process_datetime_cells(df)
    return df


def process_nan4records(df):
    df = process_datetime_cells(df)
    df = df.astype(object)
    df = df.where(pd.notnull(df), None)
    tb_json = df.to_dict(orient="records")
    return tb_json


def process_datetime_cells(df):
    df = df.copy()
    def convert(x):
        if isinstance(x, (pd.Timestamp, datetime.datetime)):
            return x.strftime("%Y-%m-%d %H:%M:%S")
        return x
    return df.apply(lambda col: col.map(convert))


def process_duplicate_cols(columns):
    col_count = {}
    new_columns = []
    for col in columns:
        if col in col_count:
            new_columns.append(f"{col}_{col_count[col]}")
            col_count[col] += 1
        else:
            new_columns.append(col)
            col_count[col] = 1
    return new_columns
    

def parse_xlsx4inject(file_path, temp_path, kb_dir, mode='fill', local_summary=False, call_llm=None, llm_histories=None, local_llm_name=None, local_llm=None, local_llm_tz=None, model_config=None, know_df_cols=[]):
    failed_files = []        
    sheets_dict = pd.read_excel(file_path, sheet_name=None)
    table_df = pd.DataFrame(columns=know_df_cols)
    file_name = os.path.splitext(os.path.basename(file_path))[0]
        
    df_list = []
    all_sheets = sheets_dict.items()
    print('\tthere are {} sheets in the table'.format(len(all_sheets)))
    
    tb_record_pth = os.path.join(kb_dir, ('table_record.json'))
    try:
        if encryptor.encrypt:
            tb_record = encryptor.load_from_file(tb_record_pth)
        else:
            with open(tb_record_pth, 'r', encoding='utf-8') as f:
                tb_record = json.load(f)
    except Exception as e:
        tb_record = {}
    
    all_tb_paths = []
    temp_json_graph = []
    for sheet_name, sheet_content in all_sheets:
        tb_path = f"{sheet_name}"
        # ****UNDER DEVELOPMENT**** analyzing table structure vertical/horizontal, get sub tables (multiple tables in one sheet)
        sheet_tbs = [sheet_content] # currently, by default, one sheet crresponds to one table

        for tb in sheet_tbs:
            try:
                tb = postprocess_tb(tb, drop=True) # the returned tb does not contain any rows/columns that are all nan values
                if len(tb)==0 or tb.empty or tb.isna().all().all():
                    continue

                tb_df = parse_headers(tb_df=tb, temp_path=temp_path, mode=mode)
                tb_res, tb_paths, tb_strs = parse_tb_contents(tb_df, mode, parent_dic={file_name:{sheet_name:{}}}, file_name=file_name, sheet_name=sheet_name)
                tb_summary, keywords = tb_summarize(tb_res, sheet_name, call_llm, llm_histories, local_llm_name, local_llm, local_llm_tz, model_config, local_summary)

                if mode=='parse':
                    tb_name = remove_spaces('表-' + sheet_name) + '.csv'
                    if encryptor.encrypt:
                        encryptor.save_to_file(tb_res, os.path.join(kb_dir, tb_name))
                    else:
                        tb_res.to_csv(os.path.join(kb_dir, tb_name), encoding='utf-8', index=False)
                    
                    # 2. update global table directory for reusing
                    tb_id = 'TABLE_' + gen_str_codes(tb_strs[:1000]) + '_TABLE'
                    tb_record.update({tb_id : tb_name})
                    
                    # 3. process contents as knowledge dictionary
                    know_dic, know_id = process_full_contents(tb_id, tb_path)
                    know_dic = json.dumps(know_dic, ensure_ascii=False, indent=4)
                    df_list.append(pd.DataFrame({'path':[tb_path], 'content':[know_dic], 'linkage':[tb_id], 'summary':tb_summary, 'keywords':' '.join(keywords), 'know_id':know_id}))
                    
                    tb_js_data = process_nan4records(tb_res)
                    temp_json_graph.append({'tb_name':sheet_name, 'tb_data':tb_js_data, 'tb_path':os.path.join(kb_dir, tb_name), 'tb_summary':tb_summary, 'tb_keywords':' '.join(keywords)}) #tb_res.to_dict(orient="records")
                
            except Exception as e:
                print('\tparse table fails, because ', e)
                failed_files.append(tb_path)
                raise
            all_tb_paths.extend(tb_paths)
    
    tb_graph, _ = restore_graph_by_paths(all_tb_paths)
    if mode=='parse':
        graph_path = os.path.join(kb_dir, 'graph.json')
        temp_json_graph_path = os.path.join(kb_dir, 'temp_json_graph.json')
        if encryptor.encrypt:
            encryptor.save_to_file(tb_graph, graph_path)
        else:
            with open(graph_path, 'w', encoding='utf-8') as f:
                json.dump(tb_graph, f, ensure_ascii=False, indent=4)
            with open(temp_json_graph_path, 'w', encoding='utf-8') as f:
                json.dump(temp_json_graph, f, ensure_ascii=False, indent=4)

        if encryptor.encrypt:
            encryptor.save_to_file(tb_record, tb_record_pth)
        else:
            with open(tb_record_pth, 'w', encoding='utf-8') as f:
                json.dump(tb_record, f, ensure_ascii=False, indent=4)
        
        table_df = pd.concat(df_list, ignore_index=True)
        if encryptor.encrypt:
            encryptor.save_to_file(table_df, os.path.join(kb_dir, 'KB_PTXT.csv'))
        else:
            table_df.to_csv(os.path.join(kb_dir, 'KB_PTXT.csv'), encoding='utf-8')
    return tb_graph, failed_files



if __name__ == "__main__":
    from META import USER_SETTINGS, llm_apis, llm_histories, model_config #, local_llm, local_llm_tz, 
    from utlis import know_df_cols

    filename= '斯迪克财务模型'
    suffix = '.xlsx'
    
    api_name = 'qwen_api'
    local_summary = USER_SETTINGS['LOCAL_SUMMARY']

    raw_file_path = r"C:\Users\chengke\Desktop\testdir\斯迪克财务模型.xlsx"
    # raw_file_path = r"C:\Users\DELL\Desktop\testdir\斯迪克财务模型.xlsx"
    kb_dir = os.path.join(USER_SETTINGS['KB_PATH'], (filename + suffix))
    os.makedirs(kb_dir, exist_ok=True)

    _, failed_paths = parse_xlsx4inject(raw_file_path, USER_SETTINGS['TEMP_RES_PATH'], kb_dir, 'parse', local_summary, llm_apis[api_name], [], None, None, None, model_config, know_df_cols)
    # _, failed_paths = parse_xlsx4inject(raw_file_path, USER_SETTINGS['TEMP_RES_PATH'], kb_dir, 'parse', local_summary)
    
        
        
        
        



