# coding: utf-8
# 文件名称: baidu_spider.py
# 创建时间: 2021/6/5 11:06

"""
需求；
1. 采集某个关键词前10页的搜索结果
"""
import requests
from lxml import etree
import time
import random
import re
from bs4 import BeautifulSoup

url  = "https://www.baidu.com/s?ie=UTF-8&wd=%E9%A4%90%E9%A5%AE%E5%BA%97%E5%A6%82%E4%BD%95%E5%BC%95%E6%B5%81"
KEY = '餐饮店如何引流'
HEADERS = {
    'User-Agent':'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/90.0.4430.212 Safari/537.36 Edg/90.0.818.66',
    'Cookie':'BAIDUID=D1B7F7E5067E408E8FF2D5B56AF1F0C9:FG=1; BIDUPSID=D1B7F7E5067E408E8FF2D5B56AF1F0C9; PSTM=1616743631; BD_UPN=12314753; __yjs_duid=1_f87b59dec35da9e663fd3923d795e6621619352934897; H_WISE_SIDS=107320_110085_127969_128699_131423_154212_165135_165935_166148_169066_169882_170817_170872_170936_171573_171711_172471_172644_172679_173414_173592_173601_173610_173625_173829_173921_173923_174194_174196_174322_174443_174446_174477_174771_174806_174814_175098_175215; MSA_WH=400_700; BDORZ=B490B5EBF6F3CD402E515D22BCDA1598; BDUSS=5uRG85UktHalRZemhVSlRscm14QUMwdlMwZUFVeTM1ZUJKZ3hSbFhSRnRZTmxnSVFBQUFBJCQAAAAAAAAAAAEAAAAB7asmbGFueXVucWluZzIwMTIAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAG3TsWBt07Fgfk; BDUSS_BFESS=5uRG85UktHalRZemhVSlRscm14QUMwdlMwZUFVeTM1ZUJKZ3hSbFhSRnRZTmxnSVFBQUFBJCQAAAAAAAAAAAEAAAAB7asmbGFueXVucWluZzIwMTIAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAG3TsWBt07Fgfk; H_PS_PSSID=31253_34004_33676_33607_26350; ab_sr=1.0.0_MGNhNDU5ZmE4Y2Q4OThkNTViNTY0Y2EzMzUxYzI3ZDViYmY5Mzk5MTRiYmM0OGJiZGVjOTljYjIyYWEyYmQ0YTM0OTJmZWU0NjFjMzA0MmE2MjE5NTg1NTU2NzgzN2U5; delPer=0; BD_CK_SAM=1; PSINO=7; sug=3; sugstore=0; ORIGIN=0; bdime=0; H_PS_645EC=ee5a%2B7md%2FeGecNrWNEgkYETJI9QJkDrjAUjJ6OEliVY%2FQW1Dl9WgVqSXDoQ; BA_HECTOR=2la4042gah242h057p1gbmhng0r; BDSVRTM=118; COOKIE_SESSION=9259_0_8_5_20_16_1_3_7_3_14_4_777_0_0_0_1622876822_0_1622886129%7C9%230_0_1622886129%7C1'
}

def get_lis_description(url):
    response = requests.get(url, headers=HEADERS)
    text = response.text

    ye_lis = []
    # 使用BeautifulSoup解析
    soup = BeautifulSoup(text, 'lxml')
    lis = soup.find_all('div', class_ = 'c-abstract')
    for li in lis:
        li_e = list(li.strings)[1:]
        ye_lis.append(''.join(li_e))

    return ye_lis

    # 使用lxml解析

    # html = etree.HTML(text)
    # description = html.xpath("//div[@class='c-abstract']//text()")
    # description_lis = []
    # for d in description:
    #
    #     r1 = re.search(r'\d+\w\d+\w.*', d)
    #     if r1 == None:
    #         description_lis.append(d)
    #     else:
    #         continue
    #
    #
    # print(description_lis)
    #
    # return description_lis

def spider():
    urls = [f'https://www.baidu.com/s?wd={KEY}&pn={i * 10}' for i in range(3)]
    descriptions = []
    for url in urls:
        description = get_lis_description(url)
        # 从每页中随机抓取一条信息添加到总表
        try:
            r = random.randint(1, len(description))
            descriptions.append(description[r])
        except:
            print('添加失败！')

        #descriptions.extend(description)
        # 爬取每页后休眠时间
        sleeptime = random.randint(4,10)
        sleep = time.sleep(sleeptime)

    print('\n'.join(descriptions))
if __name__ == '__main__':
    spider()