# -*- coding: utf-8 -*-
import scrapy
from chezhi.items import ChezhiItem
from chezhi.chezhi import listcode


class TousuSpider(scrapy.Spider):
	name = 'tousu'
	allowed_domains = ['www.12365auto.com']

	def start_requests(self):
		url = 'http://www.12365auto.com/zlts/0-1878-0-0-0-0_0-0-{0}.shtml'
		for i in range(1, 51):
			yield scrapy.Request(url.format(i), callback=self.pare)

	def pare(self, response):

		trs = response.xpath('//table//tr')
		# trs=article_list.xpath("tr")
		# items=[]
		# print(listcode)
		# list1=[]
		for i in trs[1:]:
			items = ChezhiItem()
			items['id'] = i.xpath("td[1]/text()")[0].extract()
			items['brand'] = i.xpath("td[2]/text()")[0].extract()
			items['car'] = i.xpath("td[3]/text()")[0].extract()
			items['car_style'] = i.xpath("td[4]/text()")[0].extract()
			items['content'] = i.xpath("td[5]/a/text()")[0].extract()
			url = i.xpath("td[5]/a/@href")[0].extract()
			question_list = i.xpath("td[6]//text()")[0].extract()
			codes = question_list.split(',')
			# print(article['id'],codes,[listcode.get(j) for j in codes][:-1])
			items['question'] = str([listcode.get(j) for j in codes][:-1])
			items['state'] = i.xpath("td[8]/em/text()")[0].extract()
			# items.append(article)
			# print(url)
			# print (items,'------------')
			# yield items
			yield scrapy.Request(url=url, meta={'items': items}, callback=self.pare_detail, dont_filter=True)

	def pare_detail(self, response):
		items = response.meta['items']
		items['time'] = response.xpath('//div[@class="jbqk"]/ul/li[5]/text()')[0].extract()
		items['detail_content'] = response.xpath('//div[@class="tsnr"]/p/text()')[0].extract()
		items['Reply'] = response.xpath('//div[@class="tshf"]/p/text()')[0].extract()
		# print(items)
		yield items




