import sqlite3
import shutil
import pandas as pd
from enum import Enum
from pathlib import Path
from comm.comm import alter_path
from comm.rc4_decrypt import decryptSQLite
from conf.settings import logger
from zhconv import convert
from pygtrans import Translate


class Rule(Enum):
    # 翻译规则
    CN_TO_HK = 'zh-hk'  # 中文转繁体
    CN_TO_EN = 'en'  # 中文转英文


class Translator:
    def __init__(self, rule: Rule, chunk_size: int = 1000):
        self.rule = rule
        self.chunk_size = chunk_size

    def translate(self, data_list):
        df = pd.DataFrame()
        texts_all = []
        for i in range(0, len(data_list), self.chunk_size):
            curr_list = data_list[i:i + self.chunk_size]

            if self.rule is Rule.CN_TO_EN:
                # 翻译为英文
                logger.info(f'正在进行中文转英文翻译...')
                try:
                    # 使用谷歌翻译API，需要科学上网
                    result = Translate().translate(curr_list, target=self.rule.value)
                    texts = [x.translatedText for x in result]
                except Exception as e:
                    logger.error(f"翻译过程中发生错误: {e}")
                    return df
            elif self.rule is Rule.CN_TO_HK:
                # 翻译为繁体中文
                logger.info(f'正在进行中文转繁体翻译...')
                texts = [convert(text, self.rule.value) if text else '' for text in curr_list]
            else:
                logger.error(f"不支持的翻译规则: {self.rule.value}")
                return df

            texts_all.extend(texts)

            logger.info(f'正在处理第{i + 1}批，每批{self.chunk_size}条')

        # 合并数据
        df[self.rule.name] = pd.Series(data=texts_all)

        return df


class DataProcessor:

    def __init__(self,
                 db_path: str,
                 table_name: str,
                 origin_col: str,
                 target_col: str,
                 rule: str):

        self.db_path = Path(db_path)
        self.table_name = table_name
        self.origin_col = origin_col
        self.target_col = target_col
        self.rule = Rule[rule.upper()]

        self.translated_col = self.rule.name     # 翻译结果列名
        self.chunk_size = 1000          # 翻译批次大小
        self.drop_translated = False    # 是否删除已翻译的数据

        base_path = Path(__file__).resolve().parent
        self.db_folder = (base_path / Path(self.db_path).resolve()).parent
        self.csv_file = f'{self.db_folder}/{self.db_path.name}#{self.table_name}.csv'

    @classmethod
    def from_config(cls, config):

        df = pd.read_csv('conf/db_conf0502.csv')
        if df.empty:
            logger.error(f'数据库文件 {config} 为空')
            return

        processor_list = []
        for index, row in df.iterrows():
            processor = DataProcessor(db_path=row['db_file'],
                                      table_name=row['table_name'],
                                      origin_col=row['origin_col'],
                                      target_col=row['target_col'],
                                      rule=row['rule'])
            processor_list.append(processor)

        return processor_list

    def process(self):
        logger.info(f'正在处理文件 {self.db_path}')

        self.backup_db()
        self.db_path = alter_path(self.db_path, tail='_bak')

        self.convert_db_to_csv(table_list=[self.table_name])

        self.do_translate()
        self.generate_update_sql()

    def backup_db(self):
        """
        备份数据库文件
        :return:
        """
        logger.info(f'正在备份数据库文件 {self.db_folder}...')

        db_files = list(self.db_folder.glob('**/*.db'))
        logger.info(f'发现 {len(db_files)} 个数据库文件')

        for db_path in db_files:
            src_path = Path(db_path)
            des_path = src_path.parent / f'{src_path.stem}_bak{src_path.suffix}'

            if des_path.exists():
                logger.warning(f'{src_path} 备份已存在，跳过')
                continue

            if src_path.stem.endswith('_bak'):
                logger.warning(f'{src_path} 已是备份文件，跳过')
                continue

            try:
                # 尝试连接数据库，如果连接失败，说明数据库已加密
                conn = sqlite3.connect(db_path)
                cursor = conn.cursor()
                cursor.execute("SELECT name FROM sqlite_master WHERE type='table';")
            except Exception as e:
                logger.warning('数据库可能已加密，尝试解密处理...')
                password = b'MobileForensic@2013@'
                ret = decryptSQLite(db_path, des_path, password)
                if ret:
                    logger.info(f'{db_path} 解密成功')
                else:
                    logger.warning(f'{db_path} 解密失败')
            else:
                # 数据库未加密，直接重命名文件
                logger.info('数据库未加密，直接复制文件')
                shutil.copy(db_path, des_path)

    def convert_db_to_csv(self, table_list=None, csv_folder=None, with_ref=True):
        """
        将数据库转换为CSV文件
        :param db_path: 数据库文件路径
        :param table_list: 要转换的表名列表，默认为None，表示转换所有表
        :param csv_folder: CSV文件保存的文件夹路径，默认为None，表示保存到与数据库文件相同的文件夹
        :param with_ref: 是否包含文件路径引用信息，默认为False
        :return: None
        """

        tables = table_list or [self.table_name]

        if csv_folder:
            csv_folder = Path(csv_folder)
            csv_folder.mkdir(parents=True, exist_ok=True)
        else:
            csv_folder = self.db_path.parent

        # 遍历所有表并将其转换为CSV
        for table_name in tables:
            with sqlite3.connect(self.db_path) as conn:
                sql = f"SELECT * FROM {table_name}"
                df = pd.read_sql_query(sql, conn)
                if with_ref:
                    df['file_ref'] = f'{str(self.db_path)}#{table_name}'

                # 保存为CSV文件
                self.csv_file = csv_folder / f'{self.db_path.name}#{table_name}.csv'
                df.to_csv(self.csv_file, index=False, encoding='utf_8_sig')

                logger.info(f'{self.db_path.stem}的{self.csv_file.name} 已保存至 {csv_folder}')

    def do_translate(self, drop_translated: bool = False):
        """
        翻译CSV文件中的数据
        :param drop_translated: 是否删除翻译结果列
        :return:
        """

        df = pd.read_csv(self.csv_file)
        df = df.fillna('')

        if self.drop_translated:
            if self.rule.name in df.columns:
                df.drop(self.rule.name, axis=1, inplace=True)
                logger.info(f'已删除原有列 {self.rule.name}')

        try:
            data_list = df[self.origin_col].tolist()
            translated_df = Translator(self.rule, self.chunk_size).translate(data_list)

            # 合并数据
            if translated_df.empty:
                logger.warning(f'翻译结果为空，请检查输入数据')
                return

            # 将新列插入到倒数第二列的位置
            original_columns = df.columns.tolist()
            original_columns.insert(len(original_columns) - 1, translated_df.columns[0])
            df_result = pd.concat([df, translated_df], axis=1)[original_columns]
        except Exception as e:
            logger.error(f'翻译过程出错：{e}')
            return

        df_result.to_csv(self.csv_file, index=False, encoding='utf_8_sig')
        logger.info(f'翻译结果已保存至 {self.csv_file}')

    def generate_update_sql(self):
        """
        生成更新数据库的SQL语句
        :return: None
        """

        df = pd.read_csv(self.csv_file)
        df = df.fillna('')

        dataset = list(zip(df[self.origin_col].tolist(), df[self.translated_col].tolist()))

        sql_file = alter_path(self.csv_file, suffix='.sql')
        with open(sql_file, 'w+', encoding='utf-8') as f:

            count = 0
            for origin, translated in dataset:
                logger.info(f'Generating {count + 1} of {len(dataset)} ...')

                if translated:
                    sql = f'UPDATE {self.table_name} SET {self.target_col}="{translated}" WHERE {self.origin_col}="{origin}";'

                    f.write(sql + '\n')
                    count += 1
        logger.info(f'SQL文件已生成，共 {count} 条语句')


def main():
    config_file = 'conf/db_conf0502.csv'
    processor_list = DataProcessor.from_config(config_file)

    for processor in processor_list:
        processor.process()


if __name__ == '__main__':
    main()
