import requests
import urllib.request

from lxml import etree


# 通过百度搜索，得到相关词语（爬虫得到结果）

def getHTML(keyword):
    # 通过关键词keyword，得到百度搜索后的html结果
    headers = {
        'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
        'Accept-Encoding': 'gzip, deflate, compress',
        'Accept-Language': 'en-us;q=0.5,en;q=0.3',
        'Cache-Control': 'max-age=0',
        'Connection': 'keep-alive',
        'User-Agent': 'Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:22.0) Gecko/20100101 Firefox/22.0'
    }  # 定义头文件，伪装成浏览器
    # 编码关键词
    data1 = {
        'wd': keyword
    }
    data1_encode = urllib.parse.urlencode(data1)
    url = 'https://www.baidu.com/s?' + data1_encode

    x = requests.get(url, headers=headers)
    data = x.content

    # 将<class 'bytes'>转换为<class 'str'>
    data = data.decode("utf-8")
    return data


def processData(data):
    # 处理data，得到所需结果
    res = []
    html = etree.HTML(data)
    html_data = html.xpath('//*[@id="con-ar"]/div[1]/div/div/div[2]/div')  # 大的定位
    tmp = []  # 用来存相关词的element
    for item in html_data:
        # 一个item里面包括4个(一行)
        four = item.xpath('./div')
        # 每一个里面又包含3个div，第2个是我们要的。分别是[图片，名词，描述]
        for one in four:
            tmp.append(one.xpath('./div[2]')[0])
    res = [x.xpath('./a/text()')[0] for x in tmp]
    return res


def getRelatedWords(keyword):
    # 封装一下。直接搜索关键词，得出相关词
    data = getHTML(keyword)
    res = processData(data)
    return res


if __name__ == "__main__1":
    # 测试，搜索关键词，保存到文件
    data = getHTML(keyword="氨基酸")
    with open("test1.html", "w", encoding="utf-8") as f:
        f.write(data)
    print("finish")

if __name__ == "__main__2":
    # 测试，从文件中读取，并解析
    with open("test1.html", "r", encoding="utf-8") as f:
        x = f.read()
        res = processData(x)

if __name__ == "__main__":
    print("\n")
    keyword = "漂亮"
    print("搜索关键词为：", keyword)
    res = getRelatedWords(keyword=keyword)
    print("相关词为:", res)
    print("\n")