# -*- coding: utf-8 -*-
import scrapy
from scrapy.http import Request
import re
from bs4 import BeautifulSoup
from HunterHouse.items import HunterRentHouseItem

class lianJiaSellHouseSpider(scrapy.Spider):
    #先只考虑整租的情况,url后缀加上"/rt200600000001"，合租是"/rt200600000002"
    name = "lianjia_nj_rent_house_url"
    #下面是南京起始url
    #start_urls = ["https://nj.lianjia.com/zufang/NJ2168555108776804352.html"]
    baseUrl = "https://nj.lianjia.com/zufang/"
    headUrlList = []
    firstUrlList = []
    secondUrlList = []
    allUrlList = []
    sectionUrlList = []

    allowed_domains = ["lianjia.com"]

    def start_requests(self):
        start_url = self.baseUrl
        return [scrapy.FormRequest(start_url, callback=self.head_url_callback)]

    # 先获取头部 url
    def head_url_callback(self, response):
        self.logger.info("========== head_url_callback ================")

        soup = BeautifulSoup(response.body, "html5lib")
        lilst = soup.find_all("li", attrs={"data-type":"district"})  # 获取各地区的 url 地址的 dl 标签
        for li in lilst:
            my_as = li.find_all("a")  # 获取 dl 标签中所有的 a 标签，
            for my_a in my_as:
                self.firstUrlList.append(self.baseUrl + my_a["href"].split("/")[2])
                self.headUrlList.append(self.baseUrl + my_a["href"].split("/")[2])

        #去掉“不限”的情况
        self.firstUrlList.pop(0)
        #print(self.firstUrlList)
        #取第一个一级url去爬取二级url
        url = self.firstUrlList.pop(0)

        yield Request(url, callback=self.second_url_callback, dont_filter=True)

    # 获取非头部 url
    def second_url_callback(self, response):  # 解析并拼接所有需要爬取的 url 地址
        self.logger.info("========== second_url_callback ================")

        soup = BeautifulSoup(response.body, "html5lib")
        lilst = soup.find_all("li", attrs={"data-type":"bizcircle"})  # 获取各地区的 url 地址的 dl 标签
        for li in lilst:
            my_as = li.find_all("a")  # 获取 dl 标签中所有的 a 标签，
            for my_a in my_as:

                url = self.baseUrl + my_a["href"].split("/")[2]
                if url not in self.headUrlList:
                    self.secondUrlList.append(url + "/pg1")

        #print(self.secondUrlList)
        url = self.secondUrlList.pop(0) + "rt200600000001"
        #print("secondurl:" + url)
        yield Request(url, callback=self.all_url_callback, dont_filter=True)


    def all_url_callback(self, response):  # 解析并拼接所有需要爬取的 url 地址
        self.logger.info("========== all_url_callback ================")
        #print(response.url)
        houses = response.xpath(".//div[@class='content__list']")
        #print(len(houses))
        try:
            houseurls = houses.xpath(".//div[@class='content__list--item']/a/@href").extract()
            #print(houseurls)
            for url in houseurls:
                if "zufang" in url:
                    detailurl = self.baseUrl + url.split("/")[2]
                else:
                    continue
                if detailurl not in self.allUrlList:
                    with open(u"南京在租房源链接.txt", 'a+') as f:
                        f.writelines(detailurl + "\n")
                        self.allUrlList.append(detailurl)
        except:
            print(response.url)
            print("extract error!")

        #print(self.allUrlList)

        page = response.xpath("//div[@class='content__pg'][@data-totalpage]").re("\d+")
        if len(page) > 2:
            totalpage = page[-2]
            curpage = page[-1]

        #print(totalpage,curpage)
        p = re.compile(r'.*/pg')
        #print(page)

        #
        #这里是判断有没有下一页，毕竟不是所有区都是有第100页的，不能for循环到100
        if len(page)>1 and totalpage != curpage:
            section_next_page = p.match(response.url).group() + str(int(curpage)+1)
            next_page = section_next_page + "rt200600000001"
            #print(next_page)
            if section_next_page not in self.sectionUrlList:
                with open(u"南京在租区域链接.txt", 'a+') as f:
                    f.writelines(section_next_page + "\n")
                    self.sectionUrlList.append(next_page)

            yield Request(next_page, callback=self.all_url_callback, dont_filter=True)

        #print("firsturl:%d,secondurl:%d\n" %(len(self.firstUrlList),len(self.secondUrlList)))
        if len(self.firstUrlList) == 0 and len(self.secondUrlList) == 0:
            url = self.allUrlList.pop(0)
            #print(url)
            #pass
            yield Request(url, callback=self.parse, dont_filter=True)
        else:

            if len(self.secondUrlList) == 0 and len(self.firstUrlList) != 0:
                url = self.firstUrlList.pop(0)
                yield Request(url, callback=self.second_url_callback, dont_filter=True)
            else:
                url = self.secondUrlList.pop(0) + "rt200600000001"
                #不断回调，得到所有URL
                yield Request(url, callback=self.all_url_callback, dont_filter=True)

    def parse(self, response):
        self.logger.info("==========================")
        item = HunterRentHouseItem()

        #房源的
        url = response.url
        item['url'] = url
        #print(url)
        #标题信息
        title = response.xpath(".//div[@class='content clear w1150']/p[@class='content__title']/text()").extract()
        publish_time = response.xpath(".//div[@class='content__subtitle']/text()").extract()
        houseid = response.xpath(".//div[@class='content__subtitle']/i[@class='house_code']/text()").extract()
        item["title"] = title
        item["publish_time"] = publish_time
        item["houseid"] = houseid
        #print(title,publish_time,houseid)

        #价格
        price = response.xpath(".//div[@class='content__aside fr']/p[@class='content__aside--title']/span/text()").extract()
        price_unit = response.xpath(".//div[@class='content__aside fr']/p[@class='content__aside--title']/text()").extract()
        item["price"] = price
        item["price_unit"] = price_unit
        #print(price,price_unit)

        #房源基本信息
        house_basic_info = response.xpath(".//ul[@class='content__aside__list']/p[@class='content__article__table']/span/text()").extract()
        item['house_basic_info'] = house_basic_info
        #print(house_basic_info)

        #房源详细信息
        house_detail_info = response.xpath(".//div[@class='content__article__info']/ul/li/text()").extract()
        item['house_detail_info'] = house_detail_info
        #print(house_detail_info)


        yield item

        if len(self.allUrlList) != 0:
            url = self.allUrlList.pop(0)
            yield Request(url, callback=self.parse, dont_filter=True)
        pass