# -*- encoding: utf-8 -*-
# @Author: XieYinJie @ProjectName: 毕设爬虫 @DateTime: 2020/8/28 15:24
import random
import requests
from lxml import etree
# from webSpiderFacility.HeadersAndProxy import SpiderHeaders
import SpiderHeaders


class SearchBaiDu:
    def __init__(self):
        self.module_url =  'https://www.baidu.com/s?wd={}&pn={}' # {}里面是要搜索的内容
        self.headers = {'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/84.0.4147.135 Safari/537.36 Edg/84.0.522.63'}  # 防止没有获取到

    def requestSearch(self, search, page):
        headers = SpiderHeaders.random_header()  # 随机一个请求头
        print(headers)
        headers = headers if headers else self.headers  # 防止没有获取到
        html = requests.get(self.module_url.format(search, page * 10), headers=headers)
        if html.status_code == 200:
            return html.text
        else:
            self.requestSearch(search, page=page)  # 否则再次调用这个函数

    # 解析信息列表
    def parseInfoList(self, search, page=1):
        page = page - 1
        htmlPage = self.requestSearch(search, page=page)
        htmlPage = etree.HTML(htmlPage)
        contentLeft = htmlPage.xpath('//div[@id="content_left"]/div[contains(@class, "c-container")]')
        contentsList = []
        if isinstance(contentLeft, list):
            for contents in contentLeft:
                title = "".join(contents.xpath('./h3/a//text()')).strip()  # 获取匹配到的内容
                url = "".join(contents.xpath('./h3/a/@href'))  # 获取URL
                info = "".join(contents.xpath('.//div[@class="c-abstract"]//text()'))  # 简介信息
                if title and url:  # 内容可以没有
                    data = {
                        'title': title, 'url': url, 'content': info, 'page': page, 'searchBy': '百度'
                    }
                    contentsList.append(data)
            return contentsList
        else:
            return None


if __name__ == '__main__':
    searchLst = ['我的大学', '生活', 'Python', '工作', '知乎', '你好', '哈哈', 'abcdefg']
    searchStr = random.choice(searchLst)
    print(f'搜索{searchStr}')
    searchBaiDu = SearchBaiDu()
    info = searchBaiDu.parseInfoList(searchStr)
    print(info)


