# -*- coding: utf-8 -*-
import scrapy
from lxml import etree
from fenghuang.items import FenghuangItem


class NewsUrlSpider(scrapy.Spider):
	name = 'news_url'
	allowed_domains = ['news.ifeng.com']
	start_urls = ['http://news.ifeng.com/listpage/11502/20190615/1/rtlist.shtml']

	def parse(self, response):
		new_urls = response.xpath('//div[@class="newsList"]//li/a/@href').extract()
		new_texts = response.xpath('//div[@class="newsList"]//li/a/text()').extract()
		new_times = response.xpath('//div[@class="newsList"]//li/h4/text()').extract()
		for i in range(len(new_urls)):
			item = FenghuangItem()
			item['title'] = new_texts[i]
			item['news_url'] = new_urls[i]
			item['time'] = new_times[i]
			yield item

		next_page = response.xpath('//div[@class="m_page"]//span')
		if next_page and len(next_page) > 0:
			is_has_next_page = False
			for e in next_page:
				url_text = e.xpath('.//a/text()').extract()[0]
				if '下一页' in url_text:
					is_has_next_page = True
					next_page_url = e.xpath('.//a/@href').extract()[0]
					yield scrapy.Request(next_page_url, callback=self.parse)
			# 如果当前页没有下一页则去前一天的数据
			if not is_has_next_page:
				pre_day = response.xpath('//div[@id="backDay"]/a/@href').extract()[0]
				yield scrapy.Request(pre_day, callback=self.parse)
