#百度通用翻译API,不包含词典、tts语音合成等资源，如有相关需求请联系translate_api@baidu.com
# coding=utf-8

import http.client
import hashlib
import urllib
import random
import json
import re
import math
from bs4 import BeautifulSoup
import os
import HOL4inWeb.password


def translate_api(q, appid=HOL4inWeb.password.RP_BDtranslate_ID, secretKey=HOL4inWeb.password.RP_BDtranslate_key, httpClient = None, myurl = '/api/trans/vip/translate', fromLang = 'auto', toLang = 'zh'):
    salt = random.randint(32768, 65536)
    sign = appid + q + str(salt) + secretKey
    sign = hashlib.md5(sign.encode()).hexdigest()
    myurl = myurl + '?appid=' + appid + '&q=' + urllib.parse.quote(q) + '&from=' + fromLang + '&to=' + toLang + '&salt=' + str(
    salt) + '&sign=' + sign
    try:
        httpClient = http.client.HTTPConnection('api.fanyi.baidu.com')
        httpClient.request('GET', myurl)
        # response是HTTPResponse对象
        response = httpClient.getresponse()
        result_all = response.read().decode("utf-8")
        result = json.loads(result_all)
        #print (result)
    except Exception as e:
        print (e)
    finally:
        if httpClient:
            httpClient.close()
    return result

# print(translate_api('balance'))

def translate_migate(text):
    result = ''
    t = translate_api(text)
    for count,each in enumerate(t['trans_result']):
        result += (each['dst'])
        if count+1 != len(t['trans_result']):
            result += '\n'

    return result

def translate_split(text):
    result = ''
    try:
        if text.find('{*') >= 0:
            result += translate_migate(text[:text.find('{*')])
            result += text[text.find('{*'):text.find('*}')+2]
            text = text[text.find('*}')+2:]
            while text.find('{*') > 0:
                result += translate_migate(text[:text.find('{*')])
                if text.find('*}') > 0:
                    result += text[text.find('{*'):text.find('*}')+2] + '\n'
                    text = text[text.find('*}')+2:]
                else: 
                    if text.find('\n*') > 0:
                        result += text[text.find('{*'):text.find('\n*')+2] + '}\n'
                        text = text[text.find('\n*')+2:]
                    else:
                        result += text[text.find('{*'):]
                        text = []
            else:
                if len(text) > 0:
                    result += translate_migate(text)
        else:
            result += translate_migate(text)
    except:
        print("translate fail:",text)
    return result

def translate_premium(text):
    # CHR_START = 8998
    CHR_START = 48
    func = list(set(re.findall('%\*.*?\*%', text)))
    if func != []:
        # alphabet = [chr(x) for x in range(945, 945+len(func))]
        alphabet = ['40082020'+str(x).zfill(math.ceil(len(func)/10)) for x in range(0,len(func))]
        for count,each in enumerate(func):
            text = text.replace(each, alphabet[count])
    # print(text)
    result = translate_split(text)
    # print(result)
    if func != []:
        for count,each in enumerate(func):
            result = result.replace(alphabet[count], each)
    return result

def get_a_text(url):
    attributes = ['STRUCTURE', 'SYNOPSIS', 'LIBRARY', 'DESCRIPTION', 'FAILURE', 'EXAMPLE', 'COMMENTS', 'USES', 'SEEALSO'][::-1]
    soup = BeautifulSoup(open(url), "html.parser")
    # b = re.sub(r'\s+', ' ', b)
    try:
        brkt = soup.find_all(class_='BRKT')
        for br in brkt:
            br.string = '%%*%s*%%'%br.text.replace('\n', '/n\\').replace(' ', '/b\\')
        xmpl = soup.find_all(class_='XMPL')
        for xm in xmpl:
            xm.string = '{*%s*}'%xm.text.replace('\n', '/n\\').replace(' ', '/b\\')
        field_name = soup.find_all(class_='FIELD-NAME')
        for fn in field_name:
            fn.string = '$*%s*$'%fn.text
    except:
        print('no brkt or xmpl:%s'%url)
    b = re.sub('\s\s+', '\n', re.sub('\s', ' ', soup.body.dl.text)).replace('/n\\', '\n').replace('/b\\', ' ')
    
    sortResult = {}
    for x in attributes:
        if re.search('\$\*%s\*\$'%x, b) and re.search('\$\*%s\*\$'%x, b).span()[0] >= 0:
            sortResult[x] = re.search('\$\*%s\*\$'%x, b).span()[0]
        else:
            sortResult[x] = 0
    attributes = [x[0] for x in sorted(sortResult.items(), key=lambda x: x[1], reverse=True)]
    for x in attributes:
        if re.search('\$\*%s\*\$'%x, b) and re.search('\$\*%s\*\$'%x, b).span()[0] >= 0:
            sortResult[x] = b[re.search('\$\*%s\*\$'%x, b).span()[1]:].strip()
            b = b[:re.search('\$\*%s\*\$'%x, b).span()[0]]
        else:
            sortResult[x] = None
    seealsoDir = {}
    if sortResult['SEEALSO']:
        href = soup.find_all(href=re.compile('.*'))
        for hr in href:
            if hr.text and hr.text in sortResult['SEEALSO']:
                seealsoDir[hr.text]=os.path.join(os.path.dirname(url), hr['href'])
    sortResult['SEEALSO'] = seealsoDir
    if sortResult['LIBRARY'] and sortResult['LIBRARY'] != sortResult['STRUCTURE']:
        sortResult['SYNOPSIS'] += '%%* LIBRARY: %s *%%'%sortResult['LIBRARY']
    sortResult['DATATIME'] = os.stat(url).st_mtime
    return sortResult

if __name__ == "__main__":
    url = 'file:///home/satone/HOL/help/Docfiles/HTML/bossLib.wlog_tac.html'[7:]
    sortResult = get_a_text(url)
    print(sortResult)
    # print(sortResult['COMMENTS'])
    # text = "COND_REWRITE1_TAC th\n fails if th cannot be transformed into the required form by the function COND_REWR_CANON. Otherwise, it fails if no match is found or the theorem cannot be instantiated."
    result = translate_premium(sortResult['DESCRIPTION'])
    print(result)
# '/home/satone/HOL/help/Docfiles/HTML/bossLib.WF_REL_TAC.html'