# -*- coding=utf-8 -*-
import requests
import re
#from bs4 import BeautifulSoup

HEADERS = {'User-Agent':'Mozilla/5.0 (Windows NT 6.1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/79.0.3945.130 Safari/537.36'
          ,'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8' # 客户端能够接收的内容类型
          ,'Accept-Language': 'en-US,en;q=0.5' # 浏览器可接受的语言
          #,'Connection': 'keep-alive' # 表示是否需要持久连接
          }
pattern_word_cont = '<h1 class="keyword" tip="音节划分：(.+?)">'
pattern_word_phonetic = '<bdo lang="EN-US">(.+?)</bdo>'
#pattern_dict_basic = '<ul class="dict-basic-ul">([\s\S]*)<span>(.+?)</span>([\s\S]*)<strong>(.+?)</strong>([\s\S]*)</ul>'
pattern_dict_basic = '<li><span>(.+?)</span><strong>(.+?)</strong></li>'
pattern_dict_basic_addition = '<li><strong>(.+?)</strong></li>'
pattern_dict_detail = ''
#得到一个patter
def getRePattern(_express):
    return re.compile(_express)

#用patter得到要检索的字符串数组
def getTargetList(_express,_source_str):
    _patter = getRePattern(_express)
    return _patter.findall(_source_str)
def getHaiciPageByWord(word):
    _response = requests.get('https://dict.cn/search?q='+word,headers=HEADERS)
    _content = _response.text
    #print(_content)
    return _content

def generateFile(text):
    try:
        _file = open('web.html','wb')
        _file.write(text.encode('utf-8'))
    except Exception as ex:
        print('generateFile exception::: '+str(ex))
    finally:
        _file.close()
def readFile(_file):
    try:
        _f = open(_file,'rb')
        _content =  _f.read().decode('utf-8')
        return _content
    except Exception as ex:
        print('readFile exception::: '+str(ex))
    finally:
        _f.close() 
#import pprint
def getWordMean(word):
    web_content = getHaiciPageByWord(word).replace('\t','').replace('\n','').replace('\r','')
    #print(getTargetList(pattern_word_cont,web_content))
    #print(getTargetList(pattern_word_phonetic,web_content))
    #pprint.pprint(getTargetList(pattern_dict_basic,web_content))
    basic_dict_list = getTargetList(pattern_dict_basic,web_content)
    basic_dict_addition_list = getTargetList(pattern_dict_basic_addition,web_content)
    text = ''
    for dic in basic_dict_list:
        text = text+str(dic)+'\n'
    for dict_add in basic_dict_addition_list:
        text = text+str(dict_add)+'\n'
    return '%s\n%s\n%s' % (str(getTargetList(pattern_word_cont,web_content)).replace('&#183;',"'"),str(getTargetList(pattern_word_phonetic,web_content)),text)


if __name__ == '__main__':
    try:
       #generateFile(getHaiciPageByWord('run'))
        print(getWordMean('run'))
    except Exception as ex:
        print('__main__::: '+str(ex))