import urllib.request
import urllib.parse
import re
from bs4 import BeautifulSoup

def get_url():
    url = str(input('请输入关键词：'))
    url='https://baike.baidu.com/item/'+urllib.parse.quote(url)
    return url
def open_url(url):
    user = 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/79.0.3945.88 Safari/537.36'
    req = urllib.request.Request(url)
    req.add_header('User-Agent',user)
    response = urllib.request.urlopen(req)
    html_doc = response.read() 
    soup = BeautifulSoup(html_doc,'html.parser')
    return soup
def get_ct(soup):
    tag_1=soup.find_all('a',target="_blank")
    ct_url=re.search(r'/item.+" target=',str(tag_1[38])).group()
    ct_url='https://baike.baidu.com'+ct_url[:-9]
    yx = re.search(r'共.+个义项',str(tag_1[38])).group()
    yx=int(yx[1:-3])
    tag_2=soup.find_all('meta')
    tag_2=str(tag_2[3])
    
    return ct_url,yx,tag_1,tag_2

def print_yx(ct_url,yx):
    list2=[]
    t=0
    
    response = urllib.request.urlopen(ct_url)
    html_doc = response.read()
    soup = BeautifulSoup(html_doc,'html.parser')

    tag_1=soup.find_all('a',target="_blank")
    for i in range(yx):
        yx_url=re.search(r'/item.+" target=',str(tag_1[38+i])).group()
        yx_url=str(tag_1[38+i].string)+'-> https://baike.baidu.com'+yx_url[:-9]
        list2.append(yx_url)
    for j in range(len(list2)):
        try:
            if t<10:
                print(list2[j])
                t+=1
            else:
                t=str(input('请输入任何字符继续打印，输入q退出：'))  
                if t == 'q':
                    break
                else :
                    for x in range(j,len(list2)):
                        print(list2[x])
        except TypeError:
            pass

def print_all():
    try:
        url = get_url()
        soup=open_url(url)
        list1=get_ct(soup)
        
        ct_url=list1[0]
        yx = list1[1]
        tag_1=list1[2]
        tag_2=list1[3]
        print(tag_2[15:-22]+'\n\n\n下边打印相关链接：')
        print(str(tag_1[38].string)+' -> '+ct_url)
        print_yx(ct_url,yx)
    except IndexError:
        print('百度百科尚未收录词条')
    



print_all()

