import datetime
import logging
import scrapy

from qfang.items import SellingAPTItem
from qfang.pipelines.Sql import Sql


class SellingAPTContentSpider(scrapy.Spider):
    name = "SellingAPTContentSpider"
    custom_settings = {
        'ITEM_PIPELINES': {
            'qfang.pipelines.pipelines.SellingAPTPipeLine': 1
        }
    }
    base_url = "https://m.qfang.com"
    source = "qfang"
    headers_list = {
        "Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8",
        "Accept-Encoding": "gzip, deflate, br",
        "Accept-Language": "zh-CN,zh;q=0.9",
        "Cache-Control": "max-age=0",
        "Connection": "keep-alive",
        "Host": "m.qfang.com",
        "Upgrade-Insecure-Requests": "1",
        "User-Agent": "Mozilla/5.0 (iPhone; CPU iPhone OS 10_3 like Mac OS X) AppleWebKit/602.1.50 (KHTML, like Gecko) CriOS/56.0.2924.75 Mobile/14E5239e Safari/602.1"
    }
    headers_ajax = {
        "Accept": "application/json",
        "Accept-Encoding": "gzip, deflate, br",
        "Accept-Language": "zh-CN,zh;q=0.9",
        "Connection": "keep-alive",
        "Content-Length": "0",
        "Host": "m.qfang.com",
        "Referer": "https://m.qfang.com/guangzhou/sale/",
        "User-Agent": "Mozilla/5.0 (iPhone; CPU iPhone OS 10_3 like Mac OS X) AppleWebKit/602.1.50 (KHTML, like Gecko) CriOS/56.0.2924.75 Mobile/14E5239e Safari/602.1",
        "X-Requested-With": "XMLHttpRequest"
    }

    def start_requests(self):
        # url_list=[['https://m.qfang.com/guangzhou/sale/100019638']]
        url_list = Sql.select_crawl_url('selling_apt', self.source, datetime.datetime.now().strftime('%Y-%m-%d'))
        logging.info("待爬取网页有【%s】条" % len(url_list))
        for li in url_list:
            #     ret = Sql.select_by_id_date('selling_apt_record', re.findall('/(\d*)$', li[0])[0],
            #                                 datetime.datetime.now().strftime('%Y-%m-%d'))
            #     if ret[0] == 1:  # 有数据的就不爬了(一般不会有这种情况，除非是之前历史的先跑了url，不是通过查表跑的)
            #         logging.info("数据库已存在，不用再爬取，直接更新状态为成功【%s】" % li[0])
            #         Sql.update_crawl_url_status(1, re.findall('/(\d*)$', li[0])[0],
            #                                     datetime.datetime.now().strftime('%Y-%m-%d'))
            #     else:
            yield scrapy.Request(url=li[0],
                                 headers=self.headers_list, callback=self.parse)

    def parse(self, response):
        item = SellingAPTItem()
        try:
            one_price = response.xpath('//div[@class="other-house-info"]/div[2]/span[2]/text()').extract()[0][:-3]
            community_price = response.xpath('/html/body/section[6]/div/div[1]/span[2]/em/text()').extract()
            if len(community_price) == 0:
                community_price = response.xpath('/html/body/section[7]/div/div[1]/span[2]/em/text()').extract()
            loss = int(one_price) / int(community_price[0])
            item['website'] = 'Q房'
            item['times'] = datetime.datetime.now().strftime('%Y-%m-%d')
            item['community_name'] = response.xpath('//section[@class="relevant-garden"]/a[1]/span/text()').extract()[0]
            item['region'] = \
            response.xpath('//section[@class="relevant-garden"]/div/div[4]/span[@class="txt ellips"]/text()').extract()[
                0].split()[0]
            # item['title_name'] = response.xpath('/html/body/section[2]/h1/text()').extract()[0]
            item['title_name'] = response.xpath('/html/body/section[2]/h1/text()').extract()[0]
            item['area'] = response.xpath('/html/body/section[2]/div[1]/div[3]/span[2]/text()').extract()[0][:-1]
            item['all_price'] = response.xpath('/html/body/section[2]/div[1]/div[1]/span[2]/em/text()').extract()[0]
            item['one_price'] = response.xpath('//div[@class="other-house-info"]/div[2]/span[2]/text()').extract()[0][
                                :-3]
            item['community_price'] = community_price
            item['loss'] = loss
            return item
        except Exception as e:
            print(e)
            print(response.url)
            print(item.values())
            logging.error(e)
            logging.error(response.url)
            logging.error(item.values())
