from copy import deepcopy
from scrapy_redis.spiders import RedisSpider
from scrapy import Request
from ..items import LianjiaIndexItem


class IndexSpider(RedisSpider):
	name = 'index'
	redis_key = "index:start_urls"

	sign = True

	# custom_settings = {
	#     # RedisSpider
	#
	#     'SCHEDULER': 'scrapyRedis.index_Scha.py.Scheduler'
	# }
	def __init__(self, *args, **kwargs):
		# print("crawl:", self.spider.setting)
		super(IndexSpider, self).__init__(*args, **kwargs)

	def parse(self, response):
		# self.crawler.

		index_request = response.xpath('//*[@class="content__list"]/div')
		index_url = int(response.xpath('//*[@class="content__article"]/div/@data-curpage').extract_first())
		page_count = int(response.xpath('//*[@class="content__article"]/div/@data-totalpage').extract_first())
		print(index_url, page_count)
		for detail_requests in index_request:
			detail_url = detail_requests.xpath('.//*[@class="twoline"]/@href').extract_first()
			if "apartment" in str(detail_url):
				self.logger.warn(f"是公寓{detail_url}")
			else:
				items = LianjiaIndexItem()
				items['district'] = detail_requests.xpath('.//*[@class="content__list--item--des"]/a/text()').extract()[
					0]
				next_url = response.urljoin(detail_url)
				# 打一个标记flags=1，区分详情页与首页信息
				yield Request(url=next_url, callback=None, flags=["details"], meta={"items": deepcopy(items)})

		if page_count != index_url:
			next_url = response.urljoin(f"pg{str(index_url + 1)}")  # 下一页
			yield Request(url=next_url, callback=self.parse)
		else:
			if self.sign:
				for sq_url in self.urbanArea(response):
					yield Request(url=sq_url, callback=self.parse)
			else:
				if self.sign and len(self.crawler.engine.slot.scheduler) == 0:
					self.start_requests()
				# self.crawler.engine.close_spider(self, "结束")

	def urbanArea(self, response):
		print(response.url)
		self.sign = False
		areas = response.xpath('//*[@data-target="area"]/li')
		for area in areas:
			area_city = area.xpath('./a[text()!="不限"]/@href').extract_first()
			yield response.urljoin(area_city)
