# -*- coding: utf-8 -*-
import scrapy

from chengdu.items import ChengduItem


class LianjiaSpider(scrapy.Spider):
    name = 'lianjia'
    allowed_domains = ['cd.lianjia.com']
    base_url = "https://cd.lianjia.com/chengjiao/"
    page_number = 1
    start_urls = [base_url + "pg" + str(page_number) + "/"]
    page_max_number = 101
    restrict_days_flag = False
    allow_days = ['2019.10.28','2019.10.29','2019.10.30','2019.10.31']

    def parse(self, response):
        li_list = response.xpath('//ul[@class="listContent"]/li')
        has_next_page = True
        for li in li_list:
            deal_date = li.xpath('//div[@class="dealDate"]/text()').get()
            if self.restrict_days_flag is True:
                has_next_page = False
                for day in self.allow_days:
                    print(deal_date)
                    print('day:' + day)
                    if deal_date.find(day) > -1:
                        print('进来')
                        detail_url = li.xpath('./a[1]/@href').get()
                        # time.sleep(random.randint(1, 10))
                        yield scrapy.Request(detail_url, callback=self.parse_detail)
                        has_next_page = True
                        break
            else:
                detail_url = li.xpath('./a[1]/@href').get()
                # time.sleep(random.randint(1, 10))
                yield scrapy.Request(detail_url, callback=self.parse_detail)

        if has_next_page is True and self.page_number <= self.page_max_number:
            self.page_number += 1
            # time.sleep(random.randint(1,10))
            yield scrapy.Request(self.base_url + "pg" + str(self.page_number) + "/", callback=self.parse)

    def parse_detail(self, response):
        print(response)
        item = ChengduItem()
        item['url'] = str(response.url)
        title = response.xpath('//h1[@class="index_h1"][1]/text()').get()
        item['title'] = title
        sale_date = response.xpath('//div[@class="wrapper"][1]/span/text()').get()
        item['sale_date'] = sale_date
        right_info = response.xpath('//div[@class="info fr"]')
        sale_price = right_info.xpath('./div[@class="price"]/span[@class="dealTotalPrice"]/i[1]/text()').get()
        item['sale_price'] = sale_price
        publish_price = right_info.xpath('./div[@class="msg"]/span[1]/label[1]/text()').get()
        item['publish_price'] = publish_price
        unit_price = right_info.xpath('./div[@class="price"]/b[1]//text()').get()
        item['unit_price'] = unit_price
        transaction_days = right_info.xpath('./div[@class="msg"]/span[2]/label[1]/text()').get()
        item['transaction_days'] = transaction_days
        change_time = right_info.xpath('./div[@class="msg"]/span[3]/label[1]/text()').get()
        item['change_time'] = change_time
        look_times = right_info.xpath('./div[@class="msg"]/span[4]/label[1]/text()').get()
        item['look_times'] = look_times
        scan_times = right_info.xpath('./div[@class="msg"]/span[6]/label[1]/text()').get()
        item['scan_times'] = scan_times

        base_content = response.xpath('//div[@class="fl m-left"]/div[@class="newwrap baseinform"]/div[@class='
                                      '"introContent"]/div[@class="base"]/div[@class="content"]/ul[1]')
        room_size = base_content.xpath('./li[3]/text()').get()
        item['room_size'] = room_size.replace(" ","")
        room_type = base_content.xpath('./li[1]/text()').get()
        item['room_type'] = room_type.replace(" ","")
        floor = base_content.xpath('./li[2]/text()').get()
        item['floor'] = floor.replace(" ","")

        created_time = base_content.xpath('./li[8]/text()').get()
        item['created_time'] = created_time.replace(" ","")

        base_info = ""
        lis = base_content.xpath('./li')
        for li in lis:
            field_name = li.xpath('./span[1]/text()').get()
            field_value = li.xpath('./text()').get()
            base_info = base_info + field_name + "：" + field_value + "，"
        item['base_info'] = base_info

        transaction_content = response.xpath('//div[@class="fl m-left"]/div[@class="newwrap baseinform"]/div[@class='
                                      '"introContent"]/div[@class="transaction"]/div[@class="content"]/ul[1]')
        publish_time = transaction_content.xpath('./li[3]/text()').get()
        item['publish_time'] = publish_time.replace(" ","")

        transaction_info = ""
        lis = transaction_content.xpath('./li')
        for li in lis:
            field_name = li.xpath('./span[1]/text()').get()
            field_value = li.xpath('./text()').get()
            transaction_info = transaction_info + field_name + "：" + field_value + "，"
        item['transaction_info'] = transaction_info
        yield item
