# -*- coding: utf-8 -*-
import scrapy
from daydayup_spider.items import DaydayupSpiderItem


class TtpaiSpider(scrapy.Spider):
    name = 'ttpai'
    allowed_domains = ['www.ttpai.cn']

    # start_urls = ['http://www.bssoz.com/html/notice_list/']
    def start_requests(self):
        url = 'https://www.ttpai.cn/quanguo/list-p{0}'
        yield scrapy.Request('https://www.ttpai.cn/quanguo/list',
                             callback=self.middle_parse)
        for i in range(2, 2485):
            print(i)
            yield scrapy.Request(url.format(i), callback=self.middle_parse, dont_filter=True)

    def middle_parse(self, response):
        url = 'https://www.ttpai.cn'

        b = response.xpath('//ul[@class="car-list "]/li/a/@href')
        #print(b)

        for i in b:
            items = DaydayupSpiderItem()
            #print(url + i.extract())

            items['url'] = url + i.extract()
            yield scrapy.Request(url + i.extract(), meta={'items': items}, callback=self.detailParse, dont_filter=True)

    def detailParse(self, response):

        items = response.meta['items']
        if (len(response.xpath('//h1[@class="title text-ellipsis"]//text()')) == 1):
            items['title'] = response.xpath('//h1[@class="title text-ellipsis"]//text()')[0].extract().replace("\\n", "").replace("\n", "").strip()
        else:
            items['title'] = None
        if (len(response.xpath('//p[@class="closing-date"]//text()')) == 1):
            items['update_time'] = response.xpath('//p[@class="closing-date"]//text()')[0].extract().replace("\\n", "").replace("\n", "").strip()
        else:
            items['update_time'] = None
        if (len(response.xpath('//span[@class="price"]//text()')) == 1):
            items['price'] = response.xpath('//span[@class="price"]//text()')[0].extract().replace("\\n", "").replace("\n", "").strip()
        else:
            items['price'] = None

        if (len(response.xpath('//div[@class="detail-car clearfix mlr10 mb30"]/p[1]/span[1]/text()')) != 0):
            items['place'] = response.xpath('//div[@class="detail-car clearfix mlr10 mb30"]/p[1]/span[1]/text()')[0].extract().replace("\\n", "").replace("\n", "").strip()

        else:
            items['place'] = None

        if (len(response.xpath('//div[@class="detail-car clearfix mlr10 mb30"]/p[2]/span[1]/text()')) != 0):
            items['mileage'] = response.xpath('//div[@class="detail-car clearfix mlr10 mb30"]/p[2]/span[1]/text()')[0].extract().replace("\\n", "").replace("\n", "").strip()
        else:
            items['mileage'] = None

        if (len(response.xpath('//div[@class="detail-car clearfix mlr10 mb30"]/p[3]/span[1]/text()')) != 0):
            items['sale_time'] = response.xpath('//div[@class="detail-car clearfix mlr10 mb30"]/p[3]/span[1]/text()')[0].extract().replace("\\n", "").replace("\n", "").strip()
        else:
            items['sale_time'] = None

        if (len(response.xpath('//div[@class="detail-car clearfix mlr10 mb30"]/p[4]/span[1]/text()')) != 0):
            items['color'] = response.xpath('//div[@class="detail-car clearfix mlr10 mb30"]/p[4]/span[1]/text()')[0].extract().replace("\\n", "").replace("\n", "").strip()
        else:
            items['color'] = None

        if (len(response.xpath('//div[@class="detail-car clearfix mlr10 mb30"]/p[5]/span[1]/text()')) != 0):
            items['type'] = response.xpath('//div[@class="detail-car clearfix mlr10 mb30"]/p[5]/span[1]/text()')[0].extract().replace("\\n", "").replace("\n", "").strip()
        else:
            items['type'] = None

        if (len(response.xpath('//div[@class="detail-car clearfix mlr10 mb30"]/p[6]/span[1]/text()')) != 0):
            items['changes'] = response.xpath('//div[@class="detail-car clearfix mlr10 mb30"]/p[6]/span[1]/text()')[0].extract().replace("\\n", "").replace("\n", "").strip()
        else:
            items['changes'] = None

        if (len(response.xpath('//p[@class="list-line"]//text()')) != 0):
            items['detail'] = response.xpath('//p[@class="list-line"]/text()')[0].extract().replace("\\n", "").replace("\n", "").strip()
        else:
            items['detail'] = None

        yield items
