# -*- encoding: utf-8 -*-
# @Author: XieYinJie @ProjectName: 毕业设计非flask环境 @DateTime: 2020/9/2 14:07

from webSpiderFacility.HeadersAndProxy import SpiderHeaders
import requests
from lxml import etree
import random


class SearchBiYing:
    def __init__(self):
        self.module_url =  'https://cn.bing.com/search?q={}{}first={}' # {}里面是要搜索的内容 中间的参数是用来找近一年的信息
        self.headers = {'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/84.0.4147.135 Safari/537.36 Edg/84.0.522.63'}  # 防止没有获取到
        self.modle = '&filters=ex1%3a"ez5_18142_18507"&'  # 近一年的信息，前两次没有匹配到的话，就改变参数
        self.count = 0
        self.nextPage = ''

    def requestsSearch(self, search, page):
        if self.count == 5:
            self.count = 0
        headers = SpiderHeaders.random_header()  # 获取一个请求头
        headers = headers if headers else self.headers
        self.modle = self.modle if self.count < 2 else '&'
        html = requests.get(self.module_url.format(search,self.modle , page * 10 + 1), headers=headers)
        if html.status_code == 200:
            return html.text
        else:
            self.count += 1
            self.requestsSearch(search, page)  # 没有获取到的话，回调本函数
    def parseData(self, htmlPage):
        htmlPage = etree.HTML(htmlPage)
        contentLists = htmlPage.xpath('//ol[@id="b_results"]/li')  # 信息列表
        dataLists = []
        self.nextPage = 'https://cn.bing.com' + "".join(htmlPage.xpath('//li/a//div[contains(text(), "下一页")]/../@href'))
        if isinstance(contentLists, list):
            for content in contentLists:
                title = "".join(content.xpath('.//h2/a//text()'))  # 匹配到的内容
                url = "".join(content.xpath('.//h2/a/@href'))  # URL
                info = "".join(content.xpath('./div[@class="b_caption"]//p//text()'))
                if title and url:
                    data = {
                        'title': title, 'url': url, 'content': info, 'searchBy': '必应'
                    }
                    dataLists.append(data)
            return dataLists
        else:
            return None

    def parseInfoList(self, search, page=1):
        page = page -1
        htmlPage = self.requestsSearch(search, page)
        datas = self.parseData(htmlPage)
        return datas

    def nextPageDef(self, flag=0):
        headers = SpiderHeaders.random_header()  # 获取一个请求头
        headers = headers if headers else self.headers
        if flag == 0:
            nextPage = self.nextPage
        htmlPage = requests.get(self.nextPage, headers)
        if htmlPage.status_code == 200:
            return htmlPage.text
        else:
            self.nextPageDef(flag=1)


if __name__ == '__main__':
    searchLst = ['我的大学', '生活', 'Python', '工作', '知乎', '你好', '哈哈', 'abcdefg']
    # searchStr = random.choice(searchLst)
    searchStr = '哈哈'
    print(f'搜索{searchStr}')

    bying = SearchBiYing()
    data = bying.parseInfoList(searchStr, page=2)
    print(data)
