import urllib.request
import urllib.parse
import re
from bs4 import BeautifulSoup as bs


# 0
# 根据浏览器user-agent参数辨认，辨认各个浏览器

# 1
# 网络信息传输中会出现丢包现象，爬虫需考虑


# 2
# 请求由客户端发出，服务器接受

# 3
# 如此加入header信息
# request.addheaders = [('User-agent', client_version)]
# request.add_header()

# 4
# 代理：多一个中间人意味着多一层丢包可能

# 5
# get +post

# 6
# cookie鉴定登录

def main():
    url = "http://baike.baidu.com/view/284853.htm"
    response = urllib.request.urlopen(url)
    html = response.read()
    soup = bs(html, 'html.parser')  # python默认解析器

    for each in soup.find_all(href = re.compile('view')):
        print(each.text, '->', ''.join(['http://baike.baidu.com', each['href']]))
        # 上边用join 不用+拼接，是因为join()被证明执行效率高很多

def main2():
    keyword = input('请输入关键词：')
    #将{"key":"value"}转换成key=value,用于搜索url最后
    keyword = urllib.parse.urlencode({"word":keyword})
    response = urllib.request.urlopen("http://baike.baidu.com/search/word?%s" % keyword)
    html = response.read()
    soup = bs(html,"html.parser")

    for each in soup.find_all(href=re.compile("view")):
        content = ''.join([each.text])
        url2 = ''.join(['http://baike.baidu.com',each['href']])
        # print(each)打印匹配上view的href
        # print(url2)
        # <a href = "/view/615796.htm" target = "_blank"> 形部一平 </ a>
        # http://baike.baidu.com/view/615796.htm

        response2 = urllib.request.urlopen(url2)
        html2 = response2.read()
        soup2 = bs(html2,'html.parser')
        # print(soup2.h2)打印网页中h2标签
        # < h2 class ="block-title" > 目录 < / h2 >

        if soup2.h2:
            content = ''.join([content,soup2.h2.text])
        content = ''.join([content,'->',url2])
        print(content)

if __name__ == '__main__':
    main2()
