import json
import os

from preprocess.data_utils import traverse_dir
import pandas as pd
import streamlit as st
from preprocess.file_paser.ofd_paser.invoice import Invoice
from preprocess.file_paser.ofd_paser.ofd_utils import unzip_file
from preprocess.file_paser.xlsx_parser.xlsx_parse import prepare_excel_table
from preprocess.file_paser.paser_utils import re_filter


def multi_parse(ofd_dir, unzip_path):
    """
    批量解析ofd文件，会在该文件夹同级的位置，创建unzipped文件夹和保存解析结果
    :param ofd_dir: ofd文件存储的文件夹路径
    :param result_name: 输出文件的路径
    :return: None
    """
    # multi
    exception_ofd_path = []
    iter_files = traverse_dir(ofd_dir, ['ofd'])
    data_list = []
    file_name_list = []
    for f, f_name in iter_files:
        zip_path = os.path.join(f)
        file_path = unzip_file(zip_path=zip_path, unzip_path=os.path.join(unzip_path, f_name))
        try:
            num_of_rows_added = len(data_list)
            data_list.extend(Invoice(file_path).result_list)
            num_of_rows_added = len(data_list) - num_of_rows_added
            file_name_list.extend([f_name] * num_of_rows_added)
        except Exception as e:
            exception_ofd_path.append(file_path)
    df = pd.DataFrame(data_list)
    if file_name_list:
        df.insert(0, 'file_name', file_name_list)
    return df, exception_ofd_path


def streamlit_main():
    """
    试着用streamlit做了个可视化的上传解析样例，主要是想尝试直接解析文件流不保存文件
    因为是demo，这里上传文件限定100个以内
    :return: None
    """
    # streamlit
    uploaded_files = st.file_uploader("Choose a file", accept_multiple_files=True)
    if len(uploaded_files) <= 1000:
        invoice_datas = []
        for uploaded_file in uploaded_files:
            invoice_data = Invoice(uploaded_file)
            invoice_datas.extend(invoice_data.result_list)
        st.dataframe(pd.DataFrame(invoice_datas))
    else:
        st.markdown(":red[[Warning]: Too many files.]")


def to_row_string(row):
    """
    :param row: Series类型,其中每个元素是一个单元格信息，每个index是对应的表头信息
    :return: 转换为字符串，如 ：
    发票号码：23312000000002136011
    开票日期：2023年01月29日
    ...
    合计税额：53235.0
    """
    row_text = []
    for idx, cell in row.items():
        row_text.append(f'{idx}：{cell}')
    res = '\n'.join(row_text)
    return res


def process_df(df, block_max_len):
    """
    1. 合并df中相同文件名下的表格内容
    2. 处理后的df转换为字典
    :param df: 可解析为发票的ofd的dataframe
    :param  block_max_len:
    :return: 结果字典
    """
    table = df.loc[:, '项目名称':'税额']
    desc_before_table = df.loc[:, '购买方名称':'合计税额']
    desc_after_table = df.loc[:, '备注':]
    df_final = pd.DataFrame()

    # 去重准备工作
    files_duplicates = df['file_name'].tolist()
    lens = [0]
    unique_files = [files_duplicates[0]]

    for split_idx in range(1, len(files_duplicates)):
        if files_duplicates[split_idx] != files_duplicates[split_idx - 1]:
            lens.append(split_idx)
            unique_files.append(files_duplicates[split_idx])
    lens.append(len(files_duplicates))
    df_final['file_name'] = unique_files
    content_ls = []
    table_info_ls = []
    file_list = []
    for idx, split_idx in enumerate(range(len(lens) - 1)):
        # 基于相同的文件名分割表格
        cur_table = table.iloc[lens[split_idx]:lens[split_idx + 1], :]
        # 调用excel表格处理函数进行处理
        table_ls, extra_data_ls = prepare_excel_table(cur_table, block_max_len)
        table_info = {}
        table_labels = []
        for table_id, sub_table in enumerate(table_ls):
            table_split = sub_table.split('\n')
            table_head, row_data = table_split[0:2], table_split[2:]
            table_info[f'table_{idx}_{table_id}'] = {'row_data': row_data, 'table_head': table_head}
            table_labels.append(f'<table:table_{idx}_{table_id}>')
        table_info_ls.append(table_info)
        text_before = to_row_string(desc_before_table.iloc[lens[split_idx], :])
        text_after = to_row_string(desc_after_table.iloc[lens[split_idx], :])
        if extra_data_ls:
            text_after = text_after + '\n' + '\n'.join(extra_data_ls)
        content_ls.append(text_before + text_after + ''.join(table_labels))
        file_list.append(
            {"file_name": unique_files[idx], 'image_info': {}, 'table_info': table_info_ls[idx],
             'content': re_filter(content_ls[idx])})
    return file_list


def parse_ofd(ofd_dir, out_dir, unzip_dir, block_max_len):
    print('正在提取并划分ofd文件...')
    df, exception_ofd_path = multi_parse(ofd_dir, unzip_dir)
    if df.empty:
        file_list = list()
    else:
        file_list = process_df(df, block_max_len)  # 发票类型的ofd处理
    if exception_ofd_path:
        from preprocess.file_paser.ofd_paser.process_ofd_exception import process_exception
        for path in exception_ofd_path:
            file_name = os.path.basename(path)
            content = re_filter(process_exception(path))
            file_list.append({'content': content, 'table_info': {}, 'image_info': {}, 'file_name': file_name})

    res = {'file_num': len(file_list), 'file_suffix': '.ofd', 'file_list': file_list}

    with open(out_dir + '/ofd_content.json', 'w+', encoding='utf-8') as f:
        json.dump(res, f, ensure_ascii=False)


if __name__ == "__main__":
    parse_ofd('../../dataset/ofd_files', f'../out/parse_split/parse_out', '../out/ofd_unziped')
