import scrapy
from scrapy.http import Request
from urllib.parse import urljoin
from selenium import webdriver
from tinyspider.items import Lianjia_ershou_item, Lianjia_zufang_item
import re


class LianJiaErShouSpider(scrapy.Spider):
    name = 'lianjia_ershou'
    allowed_domains = ['bj.lianjia.com/ershoufang/']
    start_urls = ['https://bj.lianjia.com/ershoufang/']

    custom_settings = {
        "JOBDIR": "/home/demeen/Desktop/data/dataweb/lianjia/job/ershou"
    }

    def __init__(self):
        firefox_profile = webdriver.FirefoxProfile()
        firefox_profile.set_preference('permissions.default.image', 2)

        self.driver = webdriver.Firefox(firefox_profile=firefox_profile,
                                        executable_path="/home/demeen/Desktop/source/geckodriver")
        super(LianJiaErShouSpider, self).__init__()
        from scrapy.xlib.pydispatch import dispatcher
        from scrapy import signals
        dispatcher.connect(self.spider_closed, signals.spider_closed)

    def spider_closed(self, spider):
        print("spider close")
        self.driver.quit()

    def parse(self, response):
        house_list = response.css(".sellListContent li.clear .title a::attr(href)").extract()
        for house in house_list:
            yield Request(url=house, callback=self.parse_house, dont_filter=True)
        next_page = response.css(".contentBottom .house-lst-page-box a:last-child")
        next_page = next_page.css("a::attr(href)").extract_first()
        if next_page:
            next_url = urljoin(response.url, next_page)
            yield Request(url=next_url, callback=self.parse, dont_filter=True)

    def parse_house(self, response):
        item = Lianjia_ershou_item()
        # item_loader = ershou_lianjiaItemLoader(item=ershou_lianjiaItem(), response=response)
        title = response.css(".sellDetailHeader .content .main::text").extract_first()
        url = str(response.url).split('/')[-1]
        item["url"] = url
        item["title"] = title
        top_plane = response.css(".overview .content")
        house_price = top_plane.css(".price span::text").extract_first() + top_plane.css(
            ".unit span::text").extract_first()
        item["price"] = house_price
        house_unit_price = top_plane.css(".unitPrice span::text").extract_first()
        item["unit_price"] = house_unit_price
        house_tax_pay = top_plane.css("#tax-text span::attr(title)").extract_first()
        item["tax_pay"] = house_tax_pay
        house_room_1 = top_plane.css(".houseInfo .room .mainInfo::text").extract_first()
        item["room1"] = house_room_1
        house_room_2 = top_plane.css(".houseInfo .room .subInfo::text").extract_first()
        item['room2'] = house_room_2
        house_type_1 = top_plane.css(".houseInfo .type .mainInfo::text").extract_first()
        item['type1'] = house_type_1
        house_type_2 = top_plane.css(".houseInfo .type .subInfo::text").extract_first()
        item['type2'] = house_type_2
        house_area_1 = top_plane.css(".houseInfo .area .mainInfo::text").extract_first()
        item['area1'] = house_area_1
        house_area_2 = top_plane.css(".houseInfo .area .subInfo::text").extract_first()
        item['area2'] = house_area_2
        # community
        house_community_name = top_plane.css(".aroundInfo .communityName .info").css("a::text").extract_first()
        item['community_name'] = house_community_name
        house_community_area = top_plane.css(".aroundInfo .areaName .info a").css("span::text").extract()
        house_area = ""
        for area in house_community_area:
            house_area = house_area + area

        item['community_area'] = house_area
        # details
        base_plane = response.css("#introduction .introContent li::text").extract()
        item['detail0'] = base_plane[0]
        item['detail1'] = base_plane[1]
        item['detail2'] = base_plane[2]
        item['detail3'] = base_plane[3]
        item['detail4'] = base_plane[4]
        item['detail5'] = base_plane[5]
        item['detail6'] = base_plane[6]
        item['detail7'] = base_plane[7]
        item['detail8'] = base_plane[8]
        item['detail9'] = base_plane[9]
        item['detail10'] = base_plane[10]
        item['detail11'] = base_plane[11]
        item['detail12'] = base_plane[12]
        item['detail13'] = base_plane[13]
        item['detail14'] = base_plane[14]
        item['detail15'] = base_plane[15]
        item['detail16'] = base_plane[16]
        item['detail17'] = base_plane[17]
        item['detail18'] = base_plane[18]
        item['detail19'] = base_plane[19]
        special = response.css(".introContent.showbasemore")  # can't know length
        house_tag_list = special.css(".tags.clear .content a::text").extract()
        tag = ""
        for house_tag in house_tag_list:
            tag = tag + str(house_tag).strip() + ","
        item['tag'] = tag
        house_other_special = special.css(".baseattribute.clear .content::text").extract()
        # see time
        see_plane = response.css("#record")
        house_see_in_seven = see_plane.css(".panel .count::text").extract_first()
        item['visit_in_seven'] = house_see_in_seven
        house_see_in_thirty = see_plane.css(".panel .totalCount span::text").extract_first()
        item['visit_in_thirty'] = house_see_in_thirty
        sp = ""
        for other_special in house_other_special:
            sp = sp + "@" + str(other_special).strip()
        sp = sp.strip()
        # item['long_detail'] = sp
        print("--------------FINISH ITEM ANALYZE FROM : ES--------------")
        yield item


class ZufangLianjiaSpider(scrapy.Spider):
    name = 'lianjia_zufang'
    allowed_domains = ['bj.lianjia.com/zufang/']
    start_urls = ['https://bj.lianjia.com/zufang/']
    custom_settings = {
        "JOBDIR": "/home/demeen/Desktop/data/dataweb/lianjia/job/zufang"
    }

    def __init__(self):
        firefox_profile = webdriver.FirefoxProfile()
        firefox_profile.set_preference('permissions.default.image', 2)

        self.driver = webdriver.Firefox(firefox_profile=firefox_profile,
                                        executable_path="/home/demeen/Desktop/source/geckodriver")
        super(ZufangLianjiaSpider, self).__init__()
        from scrapy.xlib.pydispatch import dispatcher
        from scrapy import signals
        dispatcher.connect(self.spider_closed, signals.spider_closed)

    def spider_closed(self, spider):
        print("spider close")
        self.driver.quit()

    def parse(self, response):
        hose_list = response.css("#house-lst")
        hose_list = hose_list.css(".info-panel h2")
        hose_list = hose_list.css("a::attr(href)").extract()
        for house_url in hose_list:
            yield Request(url=urljoin(response.url, house_url), callback=self.parse_house, dont_filter=True)
        next_page = response.css(".page-box.house-lst-page-box a:last-child")
        next_page = next_page.css("a::attr(href)").extract_first()
        if next_page:
            next_url = urljoin(response.url, next_page)
            yield Request(url=next_url, callback=self.parse, dont_filter=True)

    def parse_house(self, response):
        item = Lianjia_zufang_item()
        # item_loader = zufang_lianjiaItemLoader(item=zufang_lianjiaItem(), response=response)
        item['url'] = str(response.url).split('/')[-1]
        # analyze the house page
        title = response.css(".content-wrapper .title-wrapper .title .main::text").extract_first()
        item['title'] = title
        # money and base
        base_plane = response.css(".content.zf-content")
        price = base_plane.css("span.total::text").extract_first()
        item['price'] = price
        fitment = base_plane.css("span.tips::text").extract_first()
        item['fitment'] = fitment
        zf_room = base_plane.css(".zf-room p::text").extract()  # we only need [0,4] details
        item['squire'] = zf_room[0]
        item['room_status'] = zf_room[1]
        item['floor'] = zf_room[2]
        item['face_to'] = zf_room[3]
        item['subway'] = zf_room[4]
        # detail direction
        base_direction = response.css("#introduction .content li::text").extract()
        real_direction = []
        for direction in base_direction:
            direction = str(direction).strip()
            if direction:
                real_direction.append(direction)
        item['rant_way'] = real_direction[0]
        item['pay_way'] = real_direction[1]
        item['now_status'] = real_direction[2]
        item['hot_way'] = real_direction[3]
        # it's long ,so we should make it after all analyze we have
        long_description = response.css(".featureContent .text").extract()
        # what we have in the room
        departments = response.css(".zf-tag li.tags").extract()
        department = ""
        for depart in departments:
            depart = re.sub(re.compile(r'<[^>]+>', re.S), '', depart).strip()
            department = department + depart + ","
        item['tag'] = department.strip()
        # time by analyze
        panel = response.css("#record .panel")
        visit_in_seven = panel.css(".count").extract_first()
        visit_in_seven = re.sub(re.compile(r'<[^>]+>', re.S), '', visit_in_seven).strip()
        item['visit_in_seven'] = visit_in_seven
        visit_in_thirty = panel.css("span::text").extract_first()
        item['visit_in_thirty'] = visit_in_thirty
        # make long_description can save
        long_des = ""
        for detail in long_description:
            detail = re.sub(re.compile(r'<[^>]+>', re.S), '', detail).strip()
            long_des = (long_des + "@" + detail).strip()
        long_des = long_des.strip()
        # item['long_description'] = long_des[:1024]

        print("--------------FINISH ITEM ANALYZE FROM : ZF--------------")
        yield item
