# -*- coding: utf-8 -*-
import requests, re, urllib, sys, json
from bs4 import BeautifulSoup

class SearchKey:
    def __init__(self):  # 搜索关键字,引擎
        # self.tag = tag  # 关键字
        # self.engines = engines  # 引擎
        self.headers = {
            'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
            'Accept-Encoding': 'gzip, deflate, compress',
            'Accept-Language': 'en-us;q=0.5,en;q=0.3',
            'Cache-Control': 'max-age=0',
            'Connection': 'keep-alive',
            'User-Agent:': 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/74.0.3729.131 Safari/537.36'
        }  # 定义头文件，伪装成浏览器

    def GetBaiDuUrlList(self, tag):
        headers = self.headers
        url = 'http://www.baidu.com/s?rtt=1&bsst=1&cl=2&tn=news&rsv_dl=ns_pc&pn=0&word=' + urllib.parse.quote_plus(tag)
        RESPONSE = requests.get(url, headers, timeout=5)
        PAGEHTML = RESPONSE.text
        NEWSMUM = '<span class="nums">.+?((\d+,)*\d+).+?</span>'
        PAGENUM = int(re.search(NEWSMUM, PAGEHTML).group(1).replace(",", "")) // 1
        URLLIST = []
        PAGEINDEX = 0
        while PAGEINDEX <= PAGENUM:
            BAIDUARCURL = 'http://www.baidu.com/s?rtt=4&bsst=1&cl=2&tn=news&rsv_dl=ns_pc&pn=' + str(
                PAGEINDEX) + '&word=' + urllib.parse.quote_plus(tag)
            RESPONSEPAGE = requests.get(BAIDUARCURL, headers, timeout=5)
            ALLHTML = RESPONSEPAGE.text
            DOMBEGIN = ALLHTML.find('<div id="content_left">')
            DOMEND = ALLHTML.find('<div id="gotoPage">')
            NEWHTML = ALLHTML[DOMBEGIN:DOMEND]
            soup = BeautifulSoup(NEWHTML, 'lxml')
            arcurlall = soup.find_all("h3", class_="c-title")
            for url in arcurlall:
                if url not in URLLIST:
                    URLLIST.append(url.find('a').get('href'))
                else:
                    continue
            if len(arcurlall) < 9:
                break
            PAGEINDEX += 10
        return URLLIST


if __name__ == "__main__":
    obj = SearchKey()
    user = obj.GetBaiDuUrlList("百度上市")
    print(len(user))

# # user1 = User('tom huang','19930101')
# print(user1.age())
# print(user1.names,user1.names2,user1.shengris)