# -*- coding: utf-8 -*-
import scrapy
from LianJiaSpider.items import LianjiaItem
from LianJiaSpider.settings import *
import json
import random
import time
import traceback
import logging
from LianJiaSpider import ipproxy


class LianjiaSpider(scrapy.Spider):
    name = 'lianjia'
    allowed_domains = ['lianjia.com']
    start_urls = ['https://sz.lianjia.com/ershoufang/']

    base_url = 'https://sz.lianjia.com'

    spider_request_count = 0
    spider_success_count = 0
    spider_fail_reset = {}

    # python -m scrapy crawl lianjia -o items.json

    def __init__(self):
        super(LianjiaSpider, self).__init__()
        self.configLogger()

    def configLogger(self):
        # 创建一个handler，用于输出到文件
        # fh = logging.FileHandler(LOG_FILE, encoding="utf-8",mode='w+')
        # fh.setLevel(LOG_LEVEL)
        # formatterfh = logging.Formatter("%(asctime)s - %(filename)s[line:%(lineno)d] - %(levelname)s - %(message)s")
        # fh.setFormatter(formatterfh)
        # self.logger.logger.addHandler(fh)

        # 创建一个handler，用于输出到控制台
        ch = logging.StreamHandler()
        ch.setLevel(LOG_LEVEL)
        formatterch = logging.Formatter("%(asctime)s - %(filename)s[line:%(lineno)d] - %(levelname)s - %(message)s")
        ch.setFormatter(formatterch)
        self.logger.logger.addHandler(ch)

    # def start_requests(self):
    #     for url in self.start_urls:
    #         yield scrapy.Request(url=url, callback=self.parse_house)

    def parse(self, response):
        areas = response.xpath('//*[@data-role="ershoufang"]/div')
        for index, area in enumerate(areas):
            hrefs = area.xpath('./a/@href').extract()
            for href in hrefs:
                url = self.base_url + href
                if len(areas) == 1:
                    yield scrapy.Request(url=url, callback=self.parse)
                elif len(areas) == 2 and index == 1:
                    self.logger.info(url)
                    yield scrapy.Request(url=url, callback=self.parse_area)

    def parse_area(self, response):
        urls = response.xpath('//*[@class="title"]/a/@href').extract()
        for url in urls:
            self.spider_request_count = self.spider_request_count + 1
            time.sleep(random.uniform(0, 0.1))
            self.logger.info('ready request No%d url: %s' % (self.spider_request_count, url))
            yield scrapy.Request(url=url, callback=self.parse_house)

        page_info = response.xpath('//*[@class="page-box house-lst-page-box"]/@page-data').extract_first("")
        if page_info is not "":
            res = json.loads(page_info)
            next_page = res['curPage'] + 1
            if next_page <= res['totalPage']:
                pg_url = str(response.url).split('pg')
                url = pg_url[0] + "pg" + str(next_page)
                self.logger.info('next page url :' + url)
                yield scrapy.Request(url=url, callback=self.parse_area)

    def parse_house(self, response):
        self.logger.info('parse_house url: %s for proxy ip: %s start' % (str(response.url), response.meta['proxy'] if 'proxy' in response.meta.keys() else '本机'))
        try:
            item = LianjiaItem()
            item['url'] = response.url
            title = response.xpath('//h1[@class="main"]/@title').extract_first("")
            if title == '':
                self.logger.error(response.text)
                raise Exception('parser_house title error')

            item['totle_price'] = response.xpath('//span[@class="total"]/text()').extract_first("") + \
                                  response.xpath('//span[@class="unit"]/span/text()').extract_first("")
            item['price'] = response.xpath('//*[@class="unitPriceValue"]/text()').extract_first("") + \
                            response.xpath('//*[@class="unitPriceValue"]/i/text()').extract_first("")

            shoufu = response.xpath('//div[@id="calculator"]/@data-shoufu').extract_first()
            res = json.loads(shoufu)
            item['down_paymen'] = res['totalShoufuDesc']
            item['monthly_house'] = res['monthPayWithInterest']

            item['residential'] = response.xpath('//*[@class="info "]/text()').extract_first("")

            area = response.xpath('//div[@class="areaName"]/span/a/text()').extract()
            item['area'] = ''.join(area)

            info_dict = {
                '房屋户型': 'house_type',
                '所在楼层': 'floor',
                '建筑面积': 'floor_area',
                '套内面积': 'carpet_area',
                '装修情况': 'renovation',
                '配备电梯': 'elevator',
            }
            house_infos = response.xpath('//div[@class="base"]/div[@class="content"]/ul/li')
            for house_info in house_infos:
                type = house_info.xpath('./span/text()').extract_first("")
                info = house_info.xpath('./text()').extract_first("")
                if type in info_dict.keys():
                    item[info_dict[type]] = info
            item['construction_time'] = response.xpath('//*[@class="area"]/div[@class="subInfo"]/text()').extract_first()

            self.spider_success_count = self.spider_success_count + 1
            self.logger.info("parse suc url count:" + str(self.spider_success_count))
            # self.logger.info(item)
            yield item
        except Exception as e:
            self.logger.error("paser_house error " + str(response.url + "\n" + traceback.format_exc()))
            ipproxy.last_time = 0
            if response.url in self.spider_fail_reset.keys():
                reset = self.spider_fail_reset[response.url]
                if reset < 3:
                    self.spider_fail_reset[response.url] = reset + 1
                    yield scrapy.Request(url=response.url, callback=self.parse_house, dont_filter=True)
                else:
                    self.logger.error("paser_house fail " + str(response.url))
                    with open('fail.log', 'w+', encoding='utf-8') as f:
                        f.write(str(response.url + "," + traceback.format_exc()))
            else:
                self.spider_fail_reset[response.url] = 1
                yield scrapy.Request(url=response.url, callback=self.parse_house, dont_filter=True)
