import scrapy
from scrapy import Request
import time
class NewsSpider(scrapy.Spider):
    name = 'news'
    allowed_domains = ['buaa.edu.cn']
    start_urls = ['https://news.buaa.edu.cn/zhxw/1.htm']
    page_url = 'https://news.buaa.edu.cn/zhxw/%d.htm'
    page = 1000
    def parse(self, response):
        if response.url.__contains__('https://news.buaa.edu.cn/zhxw/'):
            hrefs = response.xpath('//div[@class="listleftop1 auto"]/h2/a/@href').extract()
            for href in hrefs:
                href = 'https://news.buaa.edu.cn/' + href[3:]
                print(href)
                yield Request(href)

            if self.page < 1111:  # 如果需要抓多页这里设置
                self.page += 1
                new_url = self.page_url % self.page
                time.sleep(1)
                # 在解析过程中产生新的url，需要对新的url再次发起请求时，yield 手动调用scrapy.Request方法对象，
                yield scrapy.Request(url=new_url, callback=self.parse)

        else:
            title = response.xpath('//div[@class="newslefttit auto"]/h1/text()').extract_first()
            content = response.xpath('//div[@class="v_news_content"]//p/text()').extract()
            content = ''.join(content)
            if title != None:
                info = {
                    'url': response.url,
                    'title': title,
                    'content': content
                }
                yield info




