
import xml.etree.ElementTree as ET
from translator import DataProcessor
from comm.comm import get_db_tables
from pathlib import Path
from conf.settings import logger
import pandas as pd

BASE_DIR = Path(__file__).resolve().parent


def convert_ts_to_csv(file_path):

    file_stem = Path(file_path).stem

    # 打开.ts文件并读取内容
    with open(file_path, 'r', encoding='utf-8') as file:
        # 解析XML数据
        tree = ET.parse(file)
        root = tree.getroot()


    # 遍历XML中的message标签
    src_list = []
    trans_list = []
    for message in root.findall('.//message'):
        source_text = message.find('source').text
        translation_text = message.find('translation').text
        src_list.append(source_text)
        trans_list.append(translation_text)

    # 转成df
    data = {"cn": src_list, "en": trans_list, "file_path": str(Path(file_path))}
    df = pd.DataFrame(data)
    dest_path = BASE_DIR / 'to_be_checked' / 'csv_files' / f'{file_stem}.csv'
    if not df.empty:
        df.to_csv(dest_path, encoding='utf_8_sig', index=False)


def convert_ts_by_folder(folder_path):
    folder = Path(folder_path)
    ts_files = list(folder.glob('*.ts'))

    for ts in ts_files:
        convert_ts_to_csv(ts)


def check_translations(folder_path):
    """

    :param folder_path:
    :return:
    """

    folder = Path(folder_path)
    csv_files = list(folder.glob('*.csv'))

    # 读取目录中的所有CSV文件并合并成一个大的DataFrame
    df_list = []
    for filename in csv_files:
        df = pd.read_csv(filename, encoding='utf-8')
        df_list.append(df)

    # 合并所有DataFrame为一个大的DataFrame
    big_df = pd.concat(df_list, ignore_index=True)

    # 找出chinese值对应多个en值的记录
    cn_en_counts = big_df.groupby('src')['trans'].nunique()
    cn_with_multiple_en = cn_en_counts[cn_en_counts > 1].index
    inconsistent_df = big_df[big_df['src'].isin(cn_with_multiple_en)]

    # 排序
    inconsistent_df = inconsistent_df.sort_values(by='src')

    # 将结果导出到Excel文件中
    inconsistent_df.to_excel(folder.parent / 'inconsistent_translations.xlsx', index=False)


def compare_peer_csv(csv_file_1, csv_file_2, key_index: int):
    """
    比较2个csv文件的差异（行数，列数，值）
    :param csv_file_1: 第一个csv文件
    :param csv_file_2: 第二个csv文件
    :param key_index: 主键列索引，从0开始
    :return:
    """

    df1 = pd.read_csv(csv_file_1)
    row_num1, col_num1 = df1.shape

    df2 = pd.read_csv(csv_file_2)
    row_num2, col_num2 = df2.shape

    # 检查行/列数量
    sy_diff_row = set()
    if row_num1 != row_num2:
        logger.warning(f'存在{abs(row_num1 - row_num2)}行差异：{row_num1} VS {row_num2}')
        df1_first_col_vals = set(df1[df1.columns[key_index]].to_list())
        df2_first_col_vals = set(df2[df2.columns[key_index]].to_list())

        sy_diff_row = df1_first_col_vals.symmetric_difference(df2_first_col_vals)
        logger.warning(f'存在差异的首列字段值：{sy_diff_row}')

    sy_diff_col = set()
    if col_num1 != col_num2:
        logger.warning(f'存在{abs(col_num1 - col_num2)}列差异：{col_num1} VS {col_num2}')
        df1_col_names = set(df1.columns)
        df2_col_names = set(df2.columns)

        sy_diff_col = df1_col_names.symmetric_difference(df2_col_names)
        logger.warning(f'存在差异的列名：{sy_diff_col}')

    # 丢弃相同记录
    diff = pd.concat([df1, df2]).drop_duplicates(keep=False)

    df_merged = diff.astype(str)
    df_merged = df_merged.groupby('iNodeType', as_index=False).agg(lambda x: '|!|'.join(set(x)))

    # 自定义高亮单元格的函数
    def highlight_diffs(s):
        return ['background-color: yellow' if '|!|' in str(val) else '' for val in s]

    # 自定义高亮整行的函数
    def highlight_rows(row):
        highlight = row.iloc[key_index] in list(map(str, sy_diff_row))
        return ['background-color: red' if highlight else '' for _ in row]

    # 自定义高亮整列的函数
    def highlight_cols(column):
        return ['background-color: red' for _ in column]

    # 创建Styler对象
    df_styled = df_merged.style

    # 应用高亮差异的样式
    df_styled = df_styled.apply(highlight_diffs, subset=pd.IndexSlice[:, df_merged.columns[1:]])
    df_styled = df_styled.apply(highlight_rows, axis=1)
    df_styled = df_styled.apply(highlight_cols, axis=0, subset=list(sy_diff_col))

    # 导出到Excel
    df_styled.to_excel('highlighted_diffs.xlsx', engine='openpyxl', index=False)


def compare_peer_db():
    """
    新旧db对比
    :return:
    """
    # todo

    db_curr = BASE_DIR / 'to_be_checked/db_files/current/iAppsSupport/NewTable/MFCONFIG_bak.db'
    db_prev = BASE_DIR / 'to_be_checked/db_files/prev/iAppsSupport/NewTable/MFCONFIG_bak.db'

    # 整体对比表的差异
    tables_curr = set(get_db_tables(db_curr))
    tables_prev = set(get_db_tables(db_prev))
    tables_diff = tables_curr.symmetric_difference(tables_prev)
    if tables_diff:
        logger.warning(f'数据库表存在差异：{tables_diff}')

    # 对比每个同名表内容的差异
    trans = DataProcessor(db_path='',
                          table_name='',
                          origin_col='',
                          target_col='',
                          rule='')
    trans.convert_db_to_csv(table_list=[], csv_folder='')
    trans.convert_db_to_csv(table_list=[], csv_folder='')

    compare_peer_csv('', '', key_index=0)


if __name__ == '__main__':
    print('hello lumos')