import scrapy
import sys

from scrapy import Selector, Request

from Baidu_GF.items import BaiduGfItem

class BaiduSpider(scrapy.Spider):
    name = "Baidu"
    allowed_domains = ["www.baidu.com"]
    keyword = input("请输入搜索关键字：")
    page_num = int(input("请输入所需爬取网页数量："))
    start_urls = 'https://www.baidu.com/baidu?wd={}'.format(keyword)#wd为关键字
    base_url = 'https://www.baidu.com'
    num = 0  # 用于表示已经爬取的网页数目
    page = 1  # 用于显示当前所在页码

    def start_requests(self):
        for self.page in range(1,self.page_num,1):
            yield Request(
                self.start_urls,
                # meta={'proxy' : 'socks5://127.0.0.1:33211'}
                # callback=self.parse()
            )

    def parse(self, response,**kwargs):
        for a in response.xpath('//div[@id="wrapper_wrapper"]/div[@id="container"]/div[@id="content_left"]//h3/a'):#获取每一页的网页标题
            self.num += 1
            print("正在爬取第{}个网页:".format(self.num))
            # 判断self.num是否超过self.page_num
            if self.num <= self.page_num:
                item = BaiduGfItem()
                title = ''.join(a.xpath('./em/text() | ./text()').extract())
                item['title'] = title.strip()
                url = a.xpath('@href').extract()
                item['visit_url'] = url
                yield item
            else:
                sys.exit(0)  # 结束运行
        # 获取下一页
        next_page_url = self.base_url + str(response.xpath(
            './/div[@class="page-inner_2jZi2"]//a[' + str(self.num + 1) + ']/@href').extract())
        yield scrapy.Request(next_page_url, callback=self.parse)  # 使用"下一页"的url去请求，使用parse函数作为回调函数接收response
        print("您当前所在页码:" + str(self.page))


