# -*- encoding: utf-8 -*-
# @Author: XieYinJie @ProjectName: 毕设爬虫 @DateTime: 2020/8/28 17:03

import requests
from lxml import etree
import random
from webSpiderFacility.HeadersAndProxy.SpiderHeaders import HEADERS_VALUES

url = 'https://www.baidu.com/s?wd={}'
searchLst = ['嘿嘿', 'Hadoop集群', 'xpath语法', '工作', '知乎', '你好', '哈哈', 'abcdefg']

for i in HEADERS_VALUES:
    searchStr = random.choice(searchLst)
    print(f'请求头：{i}\n\t搜索 {searchStr}')
    html = requests.get(url.format(searchStr), headers={'User-Agent': i})
    contents = html.text
    print(f'\t状态：{html.status_code}\n\t内容长度：{len(contents)}')
    htmlPage = etree.HTML(html.text)
    contentLeft = htmlPage.xpath('//div[@id="content_left"]/div[contains(@class, "c-container")]')
    contentLst = []
    if isinstance(contentLeft, list):
        for contents in contentLeft:
            title = "".join(contents.xpath('./h3/a//text()')).strip()  # 获取匹配到的内容
            contentLst.append(title)
        print(f'\t获取到的 {contentLst}')
    else:
        print(type(contentLeft))

