# -*- coding:utf-8 -*-

import requests,re

TRANSLATION_HTML_TEMPLATE_old = '''
<div id="translationcontent-{no}" class="card">
    <div class="card-header">
        <span>{no}</span>&nbsp;<span>{source_sen}</span>
    </div>
    <div class="card-body">
        <ul class="list-group">
            <li class="list-group-item">{translation_sen}</li>
            <li class="list-group-item" data-bs-toggle="collapse" data-bs-target="#vocabulary-{no}" aria-expended="false" aria-controls="vocabulary-{no}">
                <span class="btn btn-outline-secondary btn-sm">vocabulary</span>
            </li>
            <div class="card collapse form-floating" id="vocabulary-{no}">
                <textarea class="form-control" style="height:200px" readonly>{vocabulary_sen}</textarea>
            </div>
        </ul>
    </div>
</div>'''
TRANSLATION_HTML_TEMPLATE = '''        <div id="translationcontent-{no}" class="card">
            <div class="card-header">
                <span>{no}</span>&nbsp;<span>{source_sen}</span>
            </div>
            <div class="card-body">
                <ul class="list-group">
                    <li class="list-group-item">{translation_sen}</li>
                    <li class="list-group-item" >
                        <span class="btn btn-outline-secondary btn-sm" data-bs-toggle="collapse" data-bs-target="#vocabulary-{no}" aria-expended="false" aria-controls="vocabulary-{no}" onclick="javascript:showOrHide('showOrHideVocabulary-{no}');">vocabulary</span>
                        <div class="card collapse form-floating" style="border:0;height:20px" id="vocabulary-{no}">
{vocabulary_sen}
                         </div>
                         <span id="showOrHideVocabulary-{no}" onclick="javascript:showOrHide('showOrHideVocabulary-{no}');" class="btn btn-outline-secondary btn-sm" data-bs-toggle="collapse" data-bs-target="#vocabulary-{no}" aria-expended="false" aria-controls="vocabulary-{no}" style="display:none">close</span>
                    </li>
                </ul>
            </div>
        </div>'''
#
#添加请求头
HEADERS = {
    'User-Agent':'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/63.0.3239.132 Safari/537.36 QIHU 360SE'
}


DICT_URL_JP = 'https://dict.youdao.com/result?word=%s&lang=ja'

#-------------ENGLISH begin----------------#
DICT_URL = 'http://dict.youdao.com/w/%s/'
DICT_FOR_SENTENCE_URL = 'http://dict.youdao.com/w/%s/#keyfrom=dict2.top'
pattern_phonetic = '<span class="phonetic">(.+?)</span>'
pattern_dict = '<div class="trans-container">[ ]*<ul>[ ]*?(<li>.+?</li>)*[ ]*?</ul>'
pattern_dict_addition = '<p class="additional">\\[(.+?)\\]</p>[ ]*?</div>'
pattern_wordgroup = '<span class="contentTitle"><a class="search-js" href=".+?">(.+?)</a></span>[ ]*?(.+?)[ ]*?</p>'
pattern_sentence = '<div class="trans-container"> <p>(.+?)</p> <p>(.+?)</p>'
'''分句'''
from nltk.tokenize import sent_tokenize
def getsentencelist(context):
    return sent_tokenize(context)

'''分词'''
from nltk.tokenize import word_tokenize
def getwordlist(sentences):
    return word_tokenize(sentences)

'''停止词'''
from .myconstantnlp import stopwordDict
'''基于Porter词干提取算法'''
from nltk.stem.porter import PorterStemmer
ps= PorterStemmer()
#stem = ps.stem('running')  #  u'multipli'
def psstem(word):
    return ps.stem(word)

'''词形还原'''
from nltk.stem import WordNetLemmatizer
wn = WordNetLemmatizer()
def wnlem(word):
    return wn.lemmatize(word)

'''基于Lancaster 词干提取算法'''
from nltk.stem.lancaster import LancasterStemmer
ls = LancasterStemmer()
#ls.stem('multiply')      #  'multiply' 
def lsstem(word):
    return ls.stem(word)
'''WordNetLemmatizer获取词汇'''
from nltk.stem.wordnet import WordNetLemmatizer
wnl = WordNetLemmatizer()
def wnllem(word):
    return wnl.lemmatize(word)

#'''基于Snowball 词干提取算法   '''
from nltk.stem import SnowballStemmer
snowball_stemmer = SnowballStemmer('english')
snowball_stemmer.stem('multiply')   #  u'multipli'

def getTargetList(exp,content):
    p = re.compile(exp)
    return p.findall(content)

#获取单词释义页面的html文本
def getWebByWord(dicturlpattern:str,word:str):
    resp = requests.get(dicturlpattern % word,headers = HEADERS)
    return resp.text
#查单词
def getWordMean(word):
    html = getWebByWord(DICT_URL,word).replace('\n','').replace('\t','').replace('\r','')
    text = '<li>%s</li>\n%s\n<li>%s</li>\n' % (getTargetList(pattern_phonetic,html),getTargetList(pattern_dict,html),getTargetList(pattern_dict_addition,html))
    wordGroupList = []#getTargetList(pattern_wordgroup,html)#词组
    for _e,_c in wordGroupList:
        #print('%s-%s' % (_e,_c.replace(' ','')))
        text = '{0}{1:<25}--->{2}\n'.format(text,_e,_c.replace(' ',''))
    return text
#获取句字释义页面的html文本
def getWebBySentence(dicturlpattern:str,sentence:str):
    resp = requests.get(dicturlpattern % sentence,headers = HEADERS)
    return resp.text
#查句字
def getSentenceMean(dicturlpattern:str,sentence:str):
    html = getWebBySentence(dicturlpattern,sentence).replace('\n','').replace('\t','').replace('\r','')
    textList = getTargetList(pattern_sentence,html)
    text = ''
    for _ec in textList:
        text += '%s\n' % _ec[1]
    return text
'''分解一个句字的单词'''
#句字与词义的dict集合list （音标待添加）
senList = []
divide_mark = '-'
def processSentence(sentence):
    '''
    返回结果的数据结构：字典
    resultData = {
            dataDict:{source_sen:"source",translation_sen:"translate",vocabulary_sen:"cihui"},
            dataTxt:'text',
            dataExcel:senList
    }
    '''
    #记录已查询过的单词
    word_record = {}
    text = divide_mark*25+'sentence'+divide_mark*25
    text = '%s\n%s' % (text,sentence)
    tokens = getwordlist(sentence)
    text = '%s\n%s' % (text,'*'*10+'words'+'*'*10)
    #拼装句字与词义list 用来生成excel
    excel_sentence = ''
    vocabularyElement = ''
    for token in tokens:
        token = token.replace("'","").replace('"','')
        if word_record.get(token) != None :
            word_record[token] += 1
            continue
        #将未查过的词存入word_record
        else :
            word_record[token] = 1
        if not stopwordDict.__contains__(token.lower()):
            _word = wnllem(token)
            ps_word = psstem(token)
            vocabularyElement = '%s\n<ul><li>%s</li>' % (vocabularyElement,'{0:<20}--->{1:<20}|{2}'.format(token,ps_word,_word))
            #text = '%s\n%s' % (text,'{0:<20}{1:>6}--->{2:<20}|{3}'.format(token,' ',psstem(token),_word))
            text = '%s\n%s' % (text,vocabularyElement)
            #拼装句字与词义list 用来生成excel
            excel_sentence = '%s\n%s' % (excel_sentence,'{0:<20}--->{1:<20}|{2}'.format(token,ps_word,_word))
            wordMean = getWordMean(_word)
            if len(wordMean)<7: #wordMean-->[,],\n,[,],\n
                wordMean = getWordMean(ps_word)
            vocabularyElement = '%s\n%s</ul><hr>' % (vocabularyElement,wordMean)
            text = '%s\n%s' % (text,wordMean)
            #拼装句字与词义list 用来生成excel
            excel_sentence = '%s\n%s' % (excel_sentence,wordMean)
    #英译中
    cn_sentence = getSentenceMean(DICT_FOR_SENTENCE_URL,sentence)
    #拼装句字与词义list 用来生成excel
    senDict = {'sen':sentence,'cn_sen':cn_sentence,'phonetic':'','paraphrase':excel_sentence}
    senList.append(senDict)
    dataDict = {"source_sen":sentence,"translation_sen":cn_sentence,"vocabulary_sen":vocabularyElement.replace("['",'').replace("']",'').replace("[]",'').replace("]', '[",'], [')}
    return {'dataDict':dataDict,'dataTxt':text+cn_sentence+'\n','dataExcel':senList}

#查询英语句字的主函数
def mainProcessOfEnglish(sentences:str):
    '''
    返回结果的数据结构：list 元素是字典类型的对象
    resultData = [
        {source_sen:"source",translation_sen:"translate",vocabulary_sen:"cihui"},
        {source_sen:"source",translation_sen:"translate",vocabulary_sen:"cihui"},
        {source_sen:"source",translation_sen:"translate",vocabulary_sen:"cihui"}
    ]
    '''
    resultData = {}
    dataDictList = []
    output_text = ''
    sentenceList = getsentencelist(sentences)
    sentenceNo = 1
    for sen in sentenceList:
        sentenceDict = processSentence(sen)
        dataDictList.append(sentenceDict['dataDict'])
        output_text = output_text + sentenceDict['dataTxt'].replace('sentence'+divide_mark,'sentence '+str(sentenceNo)+divide_mark)
        sentenceNo += 1
    #try:#测试代码
    #    text_file = open('result.txt','wb')
    #    text_file.write(output_text.encode('utf-8'))
    #except Exception as ex:
    #    print('create file exception:::------'+str(ex))
    #finally:
    #    text_file.close()
    return dataDictList
#-------------ENGLISH end----------------#

if __name__ == "__main__":
    #print(TRANSLATION_HTML_TEMPLATE.format(no="15",source_sen="Bank of England launches biggest interest rate hike in 27 years, predicts lengthy recession.",translate_sen="zhongwen",vocabulary="cihui"))
    mainProcessOfEnglish('Having trouble? We’d like to help!')
