import requests
from bs4 import BeautifulSoup
import os

def bd_cidian(zc):
    url='https://hanyu.baidu.com/s#detailmean'
    url_params={'wd':zc,'ptype':'zici'}
    r=requests.get(url,params=url_params)
    r.encoding='utf-8'
    html=r.text
##    print(html)
    soup=BeautifulSoup(html,'lxml')
    ci=soup.find('strong')
    pinyin=soup.find('b')
    
    #jielong=soup.find('b',attrs={'data-id':'jielong'})

    print('---------')
    #头部
    try:
        
        headinfo=soup.find('div',attrs={'id':'word-header'})
        print(headinfo.get_text().strip())
    except:
        pass
    #拼音
    try:
        py=pinyin.get_text().strip()
        print(py)
    except:
        pass
    
    #基本释义
    try:
        print('---------')
        basem=soup.find('div',attrs={'id':'basicmean-wrapper'})
        print(basem.get_text().strip())
    except:
        pass
    #组词
    try:
        print('---------')
        zuci=soup.find('div',attrs={'id':'zuci-wrapper'})
        print(zuci.get_text().strip())
    except:
        pass
    #谜语
    try:
        print('---------')
        miyu=soup.find('div',attrs={'id':'miyu-wrapper'})
        print(miyu.get_text().strip())
    except:
        pass
    #解释
    try:
        print('---------')
        jiexi=soup.find('div',attrs={'id':'detailmean-wrapper'})
        jiexilist=jiexi.find_all('li')
        for jx in jiexilist:
            print(jx.get_text().strip())
    except:
        pass
    #出处
    try:
        print('---------')
        chucu=soup.find('div',attrs={'id':'source-wrapper'})
        print(chucu.get_text().strip())
    except:
        pass
    #例句
    try:
        print('---------')
        liju=soup.find('div',attrs={'id':'liju-wrapper'})
        print(liju.get_text().strip())
    except:
        pass
    #近反义
    try:
        print('---------')
        jinyi=soup.find('div',attrs={'id':'syn_ant_wrapper'})
        print(jinyi.get_text().strip())
    except:
        pass
    #接龙(成语才有)
    try:
        print('---------')
        jielong=soup.find('div',attrs={'id':'jielong-wrapper'})
        print(jielong.get_text().strip())
    except:
        pass
    #百科
    try:
        print('---------')
        baike=soup.find('div',attrs={'id':'baike-wrapper'})
        print(baike.get_text().strip())
    except:
        pass
    #英文翻译
    try:
        print('---------')
        fanyi=soup.find('div',attrs={'id':'fanyi-wrapper'})
        print(fanyi.get_text().strip())
    except:
        pass
    print('----------------------------------------')
    #诗词
    #作者诗
    try:
        print('---------')
        sg=soup.find('div',attrs={'id':'poem-detail-header'})
        print(sg.get_text().strip())
    except:
        pass
    #译文
    try:
        print('---------')
        yw=soup.find('div',attrs={'id':'means_zhushi_div'})
        print(yw.get_text().strip())
    except:
        pass
    #赏释
    try:
        print('---------')
        sx=soup.find('div',attrs={'id':'poem-detail-shangxi'})
        print(sx.get_text().strip())
    except:
        pass
    #作者介绍
    try:
        print('---------')
        sx=soup.find('div',attrs={'id':'poem-detail-author'})
        print(sx.get_text().strip())
    except:
        pass
'''
    duyin=soup.find('a',href='#')
    htmllist=soup.find_all('div',class_='tab-content')
    print(len(htmllist))
    for dd in htmllist:
        print(dd.get_text().strip())
    
    dyurl=duyin.get('url').strip()
    r_dy=requests.get(dyurl)
    pathname=os.getcwd()#当前路径
    yci=ci.get_text().strip()
    dyname=yci+'.mp3'
    with open(pathname+'/'+dyname,'wb') as f:
        f.write(r_dy.content)
        print('已保存读音文件')

    
    
    
'''

if __name__=='__main__':
    while True:
        print('----------------------------------------')
        print('百度汉语')
        input_ci=input('输入字词：')
        if input_ci:
            bd_cidian(input_ci)
    
