# -*- coding: utf-8 -*-
import scrapy
from scrapy.http import Request
import re
from bs4 import BeautifulSoup
from HunterHouse.items import HunterSellHouseItem

class lianJiaDealHouseSpider(scrapy.Spider):
    name = "lianjia_nj_deal_house_url"
    #下面是南京起始url
    #start_urls = ["https://nj.lianjia.com/ershoufang/103102886547.html"]
    baseUrl = "https://nj.lianjia.com/ershoufang/"
    headUrlList = []
    firstUrlList = []
    secondUrlList = []
    allUrlList = []
    sectionUrlList = []

    allowed_domains = ["lianjia.com"]

    def start_requests(self):
        start_url = self.baseUrl
        return [scrapy.FormRequest(start_url, callback=self.head_url_callback)]

    # 先获取头部 url
    def head_url_callback(self, response):
        self.logger.info("========== head_url_callback ================")

        soup = BeautifulSoup(response.body, "html5lib")
        dl = soup.find_all("div", attrs={"data-role":"ershoufang"})  # 获取各地区的 url 地址的 dl 标签
        my_as = dl[0].find_all("a")  # 获取 dl 标签中所有的 a 标签，
        for my_a in my_as:
            self.firstUrlList.append(self.baseUrl + my_a["href"].split("/")[2])
            self.headUrlList.append(self.baseUrl + my_a["href"].split("/")[2])
        #print(self.firstUrlList)
        url = self.firstUrlList.pop(0)
        #print("firsturl:" + url)
        yield Request(url, callback=self.second_url_callback, dont_filter=True)

    # 获取非头部 url
    def second_url_callback(self, response):  # 解析并拼接所有需要爬取的 url 地址
        self.logger.info("========== second_url_callback ================")

        soup = BeautifulSoup(response.body, "html5lib")
        div = soup.find_all("div", attrs={"data-role":"ershoufang"})  # 获取各地区的 url 地址的 dl 标签
        my_as = div[0].find_all("a")  # 获取 dl 标签中所有的 a 标签，

        for my_a in my_as:
            url = self.baseUrl + my_a["href"].split("/")[2]
            if url not in self.headUrlList:

                self.secondUrlList.append(url + "/pg1")
        #print(self.secondUrlList)
        url = self.secondUrlList.pop(0)
        #print("secondurl:" + url)

        yield Request(url, callback=self.all_url_callback, dont_filter=True)


    def all_url_callback(self, response):  # 解析并拼接所有需要爬取的 url 地址
        self.logger.info("========== all_url_callback ================")

        houses = response.xpath(".//ul[@class='sellListContent']/li")
        for house in houses:
            try:
                houseurl = house.xpath(".//div[@class='title']/a/@href").extract()
                if houseurl not in self.allUrlList:
                    with open(u"南京在售二手房链接.txt", 'a+') as f:
                        f.writelines(houseurl[0] + "\n")
                        self.allUrlList.append(houseurl)
            except:
                print("extract error!")
        #print(self.allUrlList)
        page = response.xpath("//div[@class='page-box house-lst-page-box'][@page-data]").re("\d+")
        p = re.compile(r'.*/pg')
        #print(page)
        #url最后为数字的，匹配后去掉第一个数字
        if len(page) == 3:
            page.pop(0)

        #这里是判断有没有下一页，毕竟不是所有区都是有第100页的，不能for循环到100
        if len(page)>1 and page[0] != page[1]:
            next_page = p.match(response.url).group() + str(int(page[1])+1)
            next_page = response.urljoin(next_page)
            if next_page not in self.sectionUrlList:
                with open(u"南京在售二手房区域链接.txt", 'a+') as f:
                    f.writelines(next_page + "\n")
                    self.sectionUrlList.append(next_page)

            yield Request(next_page, callback=self.all_url_callback, dont_filter=True)

        #print(self.allUrlList)
        #
        #print("firsturl：%d,secondurl:%d" %(len(self.firstUrlList),len(self.secondUrlList)))
        if len(self.firstUrlList) == 0 and len(self.secondUrlList) == 0:
            url = self.allUrlList.pop(0)
            #pass
            yield Request(url[0], callback=self.parse, dont_filter=True)
        else:

            if len(self.secondUrlList) == 0 and len(self.firstUrlList) != 0:
                url = self.firstUrlList.pop(0)
                yield Request(url, callback=self.second_url_callback, dont_filter=True)
            else:
                url = self.secondUrlList.pop(0)
                #不断回调，得到所有URL
                yield Request(url, callback=self.all_url_callback, dont_filter=True)

    def parse(self, response):
        self.logger.info("==========================")
        item = HunterSellHouseItem()

        #房源的
        url = response.url
        item['url'] = url
        print(url)
        #关注人数，带看次数
        followcnt = response.xpath(".//div[@class='action']/span[@class='count']/text()").extract()
        cartcnt = response.xpath(".//div[@class='action ']/span[@class='count']/text()").extract()
        item['followcnt'] = followcnt
        item['cartcnt'] = cartcnt
        #print(followcnt,cartcnt)

        locationlst = response.xpath(".//div[@class='fl l-txt']/a/text()").extract()
        item['locationlst'] = locationlst
        #print(locationlst)

        totalprice_num = response.xpath(".//div[@class='price ']/span[@class='total']/text()").extract()
        totalprice_unit = response.xpath(".//div[@class='price ']/span[@class='unit']/span/text()").extract()
        item['totalprice_num'] = totalprice_num
        item['totalprice_unit'] = totalprice_unit
        #print(totalprice_num,totalprice_unit)

        unitprice_num = response.xpath(".//div[@class='unitPrice']/span[@class='unitPriceValue']/text()").extract()
        unitprice_unit = response.xpath(".//div[@class='unitPrice']/span[@class='unitPriceValue']/i/text()").extract()
        item['unitprice_num'] = unitprice_num
        item['unitprice_unit'] = unitprice_unit
        #print(unitprice_num,unitprice_unit)

        houseinfo_room_maininfo = response.xpath(".//div[@class='room']/div[@class='mainInfo']/text()").extract()
        houseinfo_room_subinfo = response.xpath(".//div[@class='room']/div[@class='subInfo']/text()").extract()
        item['houseinfo_room_maininfo'] = houseinfo_room_maininfo
        item['houseinfo_room_subinfo'] = houseinfo_room_subinfo
        #print(houseinfo_room_maininfo,houseinfo_room_subinfo)

        houseinfo_type_maininfo = response.xpath(".//div[@class='type']/div[@class='mainInfo']/text()").extract()
        houseinfo_type_subinfo = response.xpath(".//div[@class='type']/div[@class='subInfo']/text()").extract()
        item['houseinfo_type_maininfo'] = houseinfo_type_maininfo
        item['houseinfo_type_subinfo'] = houseinfo_type_subinfo
        #print(houseinfo_type_maininfo,houseinfo_type_subinfo)

        houseinfo_areainfo = response.xpath(".//div[@class='areaName']/span[@class='info']/a/text()").extract()
        houseinfo_loctinfo = response.xpath(".//div[@class='areaName']/span[@class='info']/text()").extract()
        item['houseinfo_areainfo'] = houseinfo_areainfo
        item['houseinfo_loctinfo'] = houseinfo_loctinfo
        #print(houseinfo_areainfo,houseinfo_loctinfo)

        communityname = response.xpath(".//div[@class='communityName']/a/text()").extract()
        item['communityname'] = communityname
        #print(communityname)

        visittime = response.xpath(".//div[@class='visitTime']/span[@class='info']/text()").extract()
        item['visittime'] = visittime
        #print(visittime)

        houseID = response.xpath(".//div[@class='houseRecord']/span[@class='info']/text()").extract()
        item['houseID'] = houseID
        #print(houseID)

        baseinfo = response.xpath(".//div[@class='introContent']/div[@class='base']/div[@class='content']/ul/li/text()").extract()
        item['baseinfo'] = baseinfo
        #print(baseinfo)

        tradeinfo = response.xpath(".//div[@class='introContent']/div[@class='transaction']/div[@class='content']/ul/li/span/text()").extract()
        item['tradeinfo'] = tradeinfo
        #print(tradeinfo)

        houseinfo_lable = response.xpath(".//div[@class='tags clear']/div[@class='content']/a/text()").extract()
        item['houseinfo_lable'] = houseinfo_lable
        #print(houseinfo_lable)

        #房源特色有可能有不同的，需要单独解
        houseinfo_feature = response.xpath(".//div[@class='baseattribute clear']/div[@class='content']/text()").extract()
        item['houseinfo_feature'] = houseinfo_feature
        #print(houseinfo_feature)

        # house_comment = response.xpath(".//div[@class='des']/text()").extract()
        # print(house_comment)

        yield item

        if len(self.allUrlList) != 0:
            url = self.allUrlList.pop(0)
            yield Request(url[0], callback=self.parse, dont_filter=True)
        pass