# -*- coding: utf-8 -*-
from urllib.parse import urljoin

import scrapy
from scrapy import Request

from scrapy.spiders import CrawlSpider, Rule
from scrapy_redis.spiders import RedisCrawlSpider, RedisSpider
from carhome.items import CarParamentsItem, CarhomeItem
from scrapy.linkextractors import LinkExtractor
import re


class MulticarSpider(RedisSpider):
	name = 'multicar'
	#redisSprider中不要这两个
	allowed_domains = ['www.autohome.com.cn']
	bash_url = 'https://www.autohome.com.cn/grade/carhtml/'
	# start_urls = ['https://www.autohome.com.cn/car/']

	redis_key = 'multicar:start_urls'
	# rules = (Rule(LinkExtractor(allow=(r'https://www.autohome.com.cn/\d*/\#levelsource=000000000_0&pvareaid=101594',)),
	#               callback="parse_item"),)
	def parse(self, response):
		# pattern = re.compile(r"//www.autohome.com.cn/\d*/\#levelsource=000000000_0&pvareaid=101594'")
		urls = response.xpath("//a[re:test(@href,'^//www.autohome.com.cn/\d*/\#levelsource=000000000_0&pvareaid=101594')]/@href").extract()
		for url in urls:
			url = urljoin('https:',url)
			yield Request(url, callback=self.parse_item)
	# def start_requests(self):
	# 	for ch in range(0x41,0x5A):
	# 		try:
	# 			yield Request(self.bash_url+chr(ch)+".html")
	# 		except:
	# 			pass
	# def __init__(self,*args,**kwargs):
	# 	domain = kwargs.pop('domain','')
	# 	self.allowed_domain = filter(None,domain.split(','))
	# 	super(MulticarSpider,self).__init__(*args, **kwargs)

	def parse_item(self, response):
		# href+正则
		# urls = response.xpath('//a[re:test(@href,"^\/spec\/\d+\/\#pvareaid\=101605")]')
		# for url in urls:
		# 	print(url)
		# 	yield Request(url, callback=self.parse_car)
		speclist = response.xpath('//*[@id="speclist20"]')
		try:
			for interval_list in speclist.xpath('ul/li'):
				url = interval_list.xpath('div/div/p/a/@href').extract_first(default='N/A')
				url = urljoin('https://www.autohome.com.cn/',url)
				yield Request(url, callback=self.parse_car)
		except:
			pass

	def parse_car(self, response):
		self.logger.info(f'this is a car page: {response.url}')
		carparaments = CarParamentsItem()
		car = CarhomeItem()
		breadnav = response.xpath('//*[@class="breadnav fn-left"]')
		detail = response.xpath('//div[@class="cardetail fn-clear"]/div[2]/div[2]/ul')
		header = response.xpath('//div[@class="cardetail fn-clear"]/div[2]/div[1]/ul')
		pattern = re.compile('.*\/spec\/(.*?)\/.*')
		try:
			car['name'] = breadnav.xpath('a[3]/text()').extract_first()
			car['type'] = breadnav.xpath('a[2]/text()').extract_first(default='N/A')
			car['model'] = breadnav.xpath('a[last()]/text()').extract_first()
			car['indictprice'] = header.xpath('li[3]/span/text()').extract_first()
			car['rate'] = detail.xpath('li[1]/a[2]/text()').extract_first(default=0)
			try:
				car['rate'] = car['rate'][:-1] if car['rate'] else 0
			except:
				car['rate'] = 0
			try:
				car['indictprice'] = car['indictprice'].split(":")[1].strip() if car['indictprice'] else 0
			except:
				car['indictprice'] = 0
			try:
				car['allprice'] = header.xpath('li[3]/div[1]/span/a/text()').extract_first()
			except:
				car['allprice'] = ''
			try:
				car['secondprice'] = header.xpath('li[3]/div[2]/a/text()').extract_first(default=0)[:-1]
			except:
				car['secondprice'] = ''
			car_id= pattern.search(response.url).group(1)
			car['model_id'] = car_id
			print('yield', response.url)
			yield car
			# 作为验证参数是否插入的依据
			carparaments['model_url'] = str(response.url)
			carparaments = self.get_paraments(car_id, carparaments, detail)
			yield carparaments
		except:
			print('no pass')
			pass

	def get_paraments(self, car_id, carparaments, detail):
		pat = re.compile("(.*?)(\(.*\))")
		try:
			carparaments['model_id'] = car_id
			carparaments['size'] = detail.xpath('li[3]/text()').extract_first(default='N/A')
			carparaments['gas'] = detail.xpath('li[4]/text()').extract_first(default='N/A')
			try:
				carparaments['gas'] = pat.search(carparaments['gas']).group(1) or ''
			except:
				pass
			engline = detail.xpath('li[7]/text()').extract_first()
			try:
				engls = re.split(' ', engline)
				carparaments['horsepower'] = engls[1]
				carparaments['cylinder'] = engls[2]
				carparaments['intake'] = engls[0]
			except:
				pass
			carparaments['gear_box'] = detail.xpath('li[last()-2]/text()').extract_first(default='N/A')
			carparaments['driver'] = detail.xpath('li[last()-1]/text()').extract_first(default='N/A')
		except:
			pass
		finally:
			return carparaments