#!/usr/bin/python3
# -*- coding: utf-8 -*-
'''
@Author: Phoenix
@Date: 2020-08-10 23:33:35

工具集
'''

import os
import re
import shutil
from configparser import ConfigParser
from datetime import datetime
from hashlib import md5
from itertools import cycle
from json import dumps, load
from os import path
from subprocess import Popen
from sys import exit


def print_err(value: str):
    '''
    打印错误消息，红色字体
    '''
    print(f'\033[0;31;40mERROR: {value}\033[0m')


def print_info(value: str):
    '''
    打印完成消息，绿色字体
    '''
    print(f'\033[0;32;40mINFO: {value}\033[0m')


def print_warn(value: str):
    '''
    打印警告消息，黄色字体
    '''
    print(f'\033[0;33;40mWARNING: {value}\033[0m')


try:
    import chardet
except ImportError:
    print_err('未安装chardet模块，即将开始安装，期间请勿进行其他操作！')
    subp = Popen(
        'pip3 install chardet -i https://mirrors.aliyun.com/pypi/simple/', bufsize=-1
    )
    subp.wait()
    if subp.poll() == 0:
        import chardet

        print_info('chardet模块安装完成！')
    else:
        print_err('chardet模块安装超时，请稍后重试！')
        subp.kill()
        exit(0)

try:
    import langid
except ImportError:
    print_err('未安装langid模块，即将开始安装，期间请勿进行其他操作！')
    subp = Popen(
        'pip3 install langid -i https://mirrors.aliyun.com/pypi/simple/', bufsize=-1
    )
    subp.wait()
    if subp.poll() == 0:
        import langid

        print_info('langid模块安装完成！')
    else:
        print_err('langid模块安装超时，请稍后重试！')
        subp.kill()
        exit(0)

# try:
#     from ftlangdetect import detect
# except ImportError:
#     print_err('未安装fasttext-langdetect模块，即将开始安装，期间请勿进行其他操作！')
#     subp = Popen('pip3 install fasttext-langdetect -i https://mirrors.aliyun.com/pypi/simple/', bufsize=-1)
#     subp.wait()
#     if subp.poll() == 0:
#         from ftlangdetect import detect
#     else:
#         print_err('fasttext-langdetect模块安装超时，请稍后重试！')
#         subp.kill()
#         exit(0)

# 错误代码表
MY_ERROR_CODES = {
    'ERR_10001': '翻译API接口出现重复性错误！请根据API提供的报错信息检查相关设置。',
    'ERR_10002': '输入文本长度超过了翻译API接口限制！请适当缩短文本。',
}

# 待处理标记，此处写死，避免用户修改导致文本不通用
TODO_MARK = 'TODO'
# 忽略标记，此处写死，避免用户修改导致文本不通用
NONE_MARK = 'NONE'

# 全局变量
GLOBAL_DATA = {
    'google_rate': 1,
    'pass_filter': [],
    'json_max_cache': 0,
    'rpg_white_list': [],
    'rpg_duplicate_removal_list': [],
    'rpg_type_array_object': [],
    'rpg_script_regexp': [],
    'rpy_update_old_abspath': '',
    'rpy_update_new_abspath': '',
    'rpy_update_bap_max_cache': 0,
    'rpy_trans_input_abspath': '',
    'rpy_trans_bap_max_cache': 0,
    'todo_mark': False,
}

# 软件目录的绝对路径
BASE_ABSPATH = path.abspath('.')

RPGM_INPUT_ABSPATH = os.path.join(BASE_ABSPATH, 'rpgm-input')
RPGM_OUTPUT_ABSPATH = os.path.join(BASE_ABSPATH, 'rpgm-output')

RPGM_GAME_TXT = 'gameText.json'
RPGM_BAK_GAME_TXT = 'bak_gameText.json'

# 提取模式
EXTRACT = 'EXTRACT'
# 写入模式
WRITEIN = 'WRITEIN'

# json更新标记
KEY_PHOENIX = '__Phoenix__'

# rpy中标准原译组结束标识符
END_SAY = '-*- END -*-'

KEY_MARK1 = '=====*'
KEY_MARK2 = '*====='

# 空行
PATTERN_EMPTY_LINE = re.compile(r'^\s*$')

# rpy 标识符 正则表达式
PATTERN_IDENTIFIER = re.compile(r'^\s*translate\s*.*\s(.*):')
# rpy old 正则表达式
PATTERN_OLD = re.compile(r'^\s*old\s*"(.*)"')
# rpy new 正则表达式
PATTERN_NEW = re.compile(r'^\s*new\s*"(.*)"')
# rpy say原文 正则表达式
PATTERN_OLD_SAY = re.compile(r'^\s*#+\s*(".*?[^\\]"|[\S\s]*?)\s*"(.*)"')
# rpy say译文 正则表达式
# TODO 译文行暂时无法判断当前是否是注释
PATTERN_NEW_SAY = re.compile(r'^\s*(".*?[^\\]"|[\S\s]*?)\s*"(.*)"\s*(.*)')
# rpy 字符串who 正则表达式
PATTERN_WHO = re.compile(r'^"(.*?[^\\])"')

# rpg Mapxxx
PATTERN_MAP = re.compile(r'^Map\d{3}$')


def merge_dict(dict1: dict, dict2: dict) -> dict:
    '''
    合并字典
    '''
    if not dict1:
        dict1 = {}

    if not dict2:
        dict2 = {}

    return {**dict1, **dict2}


def del_key_from_dict(datas: dict, key: str) -> bool:
    '''
    删除字典中指定key元素
    '''

    if not key or key.strip() == '':
        return False

    if not datas or not key in datas:
        return False

    del datas[key]
    return True


def get_md5(parm_str: str, cut=False) -> str:
    '''
    获取字符串32/16位md5值
    '''

    if not isinstance(parm_str, str):
        print_err('md5加密错误，传入参数非字符串！')
        return ''

    m = md5()
    m.update(parm_str.encode("utf-8"))

    return m.hexdigest()[8:-8] if cut else m.hexdigest()


def read_json(_file: str, mode='r', encod='utf-8-sig') -> dict:
    '''
    读取JSON文件

    - _file: 文件的绝对路径
    - mode: 文本打开模式
    - encod: 文本打开编码格式
    '''

    datas = {}
    if not path.isfile(_file):
        return datas

    try:
        with open(_file, mode, encoding=encod) as j:
            datas = load(j)
            j.close()
    except Exception as e:
        datas = {}
        print_err(f'{_file}读取JSON数据异常：{e.args[0]}')
    finally:
        return datas


def write_json(_file: str, datas: dict, indent=4, bak=False):
    '''
    写入JSON文件

    - _file: 文件的绝对路径
    - datas: 字典数据
    - indent: JSON要使用的缩进长度
    - bak: 是否备份原文件
    '''

    _path, _filename = path.split(_file)
    if _filename[-5:] != '.json':
        print_err(f'{_filename}不是JSON文件！')
        return

    file_is_exist = path.exists(_file)
    # 存在原文件，且需要备份
    if bak and file_is_exist:
        shutil.copy(
            _file,
            path.join(
                _path,
                _filename[:-5]
                + '_'
                + datetime.now().strftime('%Y_%m_%d_%H%M%S')
                + _filename[-5:],
            ),
        )

    with open(_file, 'w', encoding=get_file_encoding(_file)) as fp:
        if file_is_exist:
            print(f'正在更新 {_filename}，请稍候……')
        else:
            print(f'正在创建 {_filename}，请稍候……')
        fp.write(dumps(datas, indent=indent, ensure_ascii=False))
        fp.close()
        if file_is_exist:
            print_info(f'{_filename} 已更新！\n')
        else:
            print_info(f'{_filename} 已创建！\n')


def to_int(val: any) -> int:
    '''
    将数字字符串转成整形。非数字字符串则返回0
    '''

    try:
        res = int(val)
        return res
    except ValueError:  # 报类型错误，说明不是整型的
        try:
            ress = float(val)  # 用这个来验证是不是浮点字符串
            return int(ress)
        except (
            ValueError
        ):  # 如果报错，说明即不是浮点，也不是int字符串，是一个真正的字符串
            return 0


def is_int(val: any) -> bool:
    '''
    判断传入参数是否是整型纯数字
    '''

    try:
        int(val)
        return True
    except ValueError:
        return False


def check_langs(txt: str) -> str:
    '''
    检测语言，查询结果参考ISO 639-1语言编码标准
    '''

    if txt.strip() == '':
        return 'en'

    return langid.classify(txt)[0]
    # return detect(txt, low_memory=False).lang


def matching_langs(txt: str, langs: str) -> bool:
    '''
    匹配符合指定列表中语种的文本。匹配返回True，反之返回False。
    由于语言检测程序的限制，此方法存在一定误差。

    - txt: 待匹配文本
    - langs: 语种列表只能是字符串的形式，多种语种可用','隔开的形式，如：'zh,ru'
    '''

    # 传入的待匹配文本为空字符串或语种列表为空字符串时，返回True
    if txt.strip() == '' or langs.strip() == '':
        return True

    # try:
    #     if langid.classify(txt)[0] != 'en':
    #         return True
    # except Exception as e:
    #     return False

    # return False

    langlist = langs.split(',')

    for lang in langlist:
        lang = lang.strip()
        if lang == '':
            continue

        try:
            if langid.classify(txt)[0] == lang:
                # if detect(txt, low_memory=False).lang == lang:
                return True
        except Exception as e:
            continue
    return False


def full_2_half(txt: str) -> str:
    '''
    将字符串中的全角符号转换成半角符号
    '''

    txt_new = ''

    for char in txt:
        s_int = ord(char)
        # 单独处理空格
        if s_int == 12288:
            s_int = 32
        elif 65281 <= s_int <= 65374:
            s_int -= 65248
        else:
            txt_new += char
            continue

        half = chr(s_int)
        txt_new += half

    return txt_new


def half_2_full(txt: str) -> str:
    '''
    将字符串中的半角符号转换成全角符号
    '''

    txt_new = ''
    for char in txt:
        s_int = ord(char)
        # 单独处理空格
        if s_int == 32:
            s_int = 12288
        elif 33 <= s_int <= 126:
            s_int += 65248
        else:
            txt_new += char
            continue

        full = chr(s_int)
        txt_new += full

    return txt_new


def zhpun_2_enpun(txt: str) -> str:
    '''
    将字符串中的中文标点符号转换为英文标点符号
    '''

    # 处理常用标点符号
    chs_pun = '，。！？：；【】（）《》“”‘’'
    en_pun = ',.!?:;[]()<>""\'\''
    trantab = str.maketrans(chs_pun, en_pun)
    txt = txt.translate(trantab)
    return txt


def enpun_2_zhpun(txt: str, no_blank=False) -> str:
    '''
    将字符串中的英文标点符号转换为中文标点符号
    '''

    # 使用迭代器处理单引号。
    # 由于英文中存在使用'来表示名词的所有格以及缩写等作用，只存在单个单引号，所以不便直接使用迭代器进行替换。
    # def _obj():
    #     return next(cycle(['‘', '’']))

    # s = re.sub(r"[\']", _obj(), s)

    # 使用迭代器处理双引号
    def _obj2():
        return next(cycle(['“', '”']))

    txt = re.sub(r"[\"]", _obj2(), txt)

    # 处理常用标点符号
    # E_pun = u',.!?:;[]()<>'
    # C_pun = u'，。！？：；【】（）《》'
    # E_pun = u',!?:;()<>'
    # C_pun = u'，！？：；（）《》'
    # trantab = str.maketrans(E_pun, C_pun)
    # s = s.translate(trantab)

    # 删除字符串中的空行
    if no_blank:
        txt = txt.replace(' ', '')
    return txt


def has_upper_letter(txt: str) -> bool:
    '''
    查询字符串中是否含有大写英文字母，没有返回False；反之返回True
    '''

    if txt.strip() == '':
        return False

    my_re = re.compile(r'[A-Z]', re.S)
    res = re.findall(my_re, txt)
    if not len(res):
        return False

    return True


def has_lower_letter(txt: str) -> bool:
    '''
    查询字符串中是否含有小写英文字母，没有返回False；反之返回True
    '''

    if txt.strip() == '':
        return False

    my_re = re.compile(r'[a-z]', re.S)
    res = re.findall(my_re, txt)
    if not len(res):
        return False

    return True


def copy_json(file_1: str, file_2: str):
    '''
    针对键不同，但值相同的情况下（如多语言支持），将一个json的值拷贝至另一个json中。需要保证json中的键一一对应。
    '''

    print('扫描中，请稍等……')
    json_1 = read_json(file_1)
    json_2 = read_json(file_2)

    keys_1 = list(json_1.keys())
    keys_2 = list(json_2.keys())
    for idx, key in enumerate(keys_1):
        if key == KEY_PHOENIX:
            continue
        if json_1[key].strip() == '':
            continue
        if idx >= len(keys_2):
            continue
        if json_2[keys_2[idx]].strip() != '':
            continue
        json_2[keys_2[idx]] = json_1[key]

    _path, _name = path.split(file_2)
    write_json(path.join(_path, _name[:-5] + '_new' + _name[-5:]), json_2)


def remove_escape(txt: str) -> str:
    '''
    删除文本中的转义字符，避免云翻译因转义字符的影响导致漏翻或语意错误
    '''

    if txt.strip() == '':
        return txt

    # TODO 这里的处理过于粗糙，需要进一步优化
    txt = txt.replace(r'\"', '"')
    txt = txt.replace(r"\'", "'")
    txt = txt.replace(r'\a', '')
    txt = txt.replace(r'\b', '')
    txt = txt.replace(r'\n', '')
    txt = txt.replace(r'\v', '')
    txt = txt.replace(r'\t', '')
    txt = txt.replace(r'\r', '')
    txt = txt.replace(r'\f', '')
    txt = txt.replace('‘', "'")
    txt = txt.replace('’', "'")
    txt = txt.replace('“', '"')
    txt = txt.replace('”', '"')
    return txt


def change_phoenix_mark(datas: dict, mark=False):
    '''
    切换更新标记
    '''

    if not datas:
        return

    datas[KEY_PHOENIX] = mark


def switch_change_mark(base=False, change=False) -> bool:
    '''
    切换更改标记
    '''
    if not change:
        return base
    return change


def get_file_encoding(file_path: str) -> str:
    '''
    获取文本编码
    '''
    try:
        with open(file_path, 'rb') as f:
            raw_data = f.read()
            f.close()
            result = chardet.detect(raw_data)
            encoding = result['encoding']

        # Check for BOM
        if raw_data.startswith(b'\xef\xbb\xbf'):
            encoding = 'utf-8-sig'
        elif raw_data.startswith(b'\xff\xfe'):
            encoding = 'utf-16'
        elif raw_data.startswith(b'\xfe\xff'):
            encoding = 'utf-16'
        else:
            encoding = 'utf-8'
    except:
        encoding = 'utf-8'
    finally:
        return encoding


def is_renpy_translation_file(file_path: str) -> bool:
    '''
    判断指定文件是否是Ren'Py翻译文件。此函数功能较弱，只能进行简单识别，不能处理部分复杂情况下的错误。

    - file_path: 文件绝对路径
    '''

    inp = open(file_path, 'r', encoding=get_file_encoding(file_path))
    lines = inp.readlines()
    inp.close()
    for line in lines:
        # 如果能匹配到translate标识符，则返回True
        if PATTERN_IDENTIFIER.match(line) != None:
            return True
    return False


def get_config():
    '''
    调用get方法，获取配置的数据
    '''

    configpath = path.join(BASE_ABSPATH, 'config.ini')
    if not path.isfile(configpath):
        return

    conf = ConfigParser()  # 调用读取配置模块中的类
    conf.optionxform = lambda option: option
    conf.read(configpath, encoding=get_file_encoding(configpath))

    GLOBAL_DATA['pass_filter'] = (
        conf.get('filter_texts', 'pass_filter').upper().split(',')
    )

    json_max_cache = conf.getint('json_trans_tool', 'json_max_cache')
    if json_max_cache > 0:
        GLOBAL_DATA['json_max_cache'] = json_max_cache

    GLOBAL_DATA['rpg_white_list'] = conf.get('rpg_trans_tool', 'rpg_white_list').split(
        ','
    )
    GLOBAL_DATA['rpg_duplicate_removal_list'] = conf.get(
        'rpg_trans_tool', 'rpg_duplicate_removal_list'
    ).split(',')
    GLOBAL_DATA['rpg_type_array_object'] = conf.get(
        'rpg_trans_tool', 'rpg_type_array_object'
    ).split(',')
    GLOBAL_DATA['rpg_script_regexp'] = conf.get(
        'rpg_trans_tool', 'rpg_script_regexp'
    ).split(',')

    GLOBAL_DATA['rpy_update_old_abspath'] = conf.get(
        'rpy_update_tool', 'rpy_old_abspath'
    )
    GLOBAL_DATA['rpy_update_new_abspath'] = conf.get(
        'rpy_update_tool', 'rpy_new_abspath'
    )
    rpy_bap_max_cache = conf.getint('rpy_update_tool', 'rpy_bap_max_cache')
    if rpy_bap_max_cache > 0:
        GLOBAL_DATA['rpy_update_bap_max_cache'] = rpy_bap_max_cache

    GLOBAL_DATA['rpy_trans_input_abspath'] = conf.get(
        'rpy_trans_tool', 'rpy_input_abspath'
    )
    rpy_bap_max_cache = conf.getint('rpy_trans_tool', 'rpy_bap_max_cache')
    if rpy_bap_max_cache > 0:
        GLOBAL_DATA['rpy_trans_bap_max_cache'] = rpy_bap_max_cache

    GLOBAL_DATA['todo_mark'] = (
        True if conf.get('common_settings', 'todo_mark') == '1' else False
    )


get_config()
