# -*- coding: utf-8 -*-
import scrapy
from scrapy.http import Request
import re
from bs4 import BeautifulSoup
from HunterHouse.items import HunterCommunityHouseItem

class lianJiaCommunitySpider(scrapy.Spider):
    #先只考虑整租的情况,url后缀加上"/rt200600000001"，合租是"/rt200600000002"
    name = "lianjia_nj_community_url"
    #下面是南京起始url
    #start_urls = ["https://nj.lianjia.com/xiaoqu/1411046463725/"]
    baseUrl = "https://nj.lianjia.com/xiaoqu/"
    headUrlList = []
    firstUrlList = []
    secondUrlList = []
    allUrlList = []
    sectionUrlList = []

    allowed_domains = ["lianjia.com"]

    def start_requests(self):
        start_url = self.baseUrl
        return [scrapy.FormRequest(start_url, callback=self.head_url_callback)]

    # 先获取头部 url
    def head_url_callback(self, response):
        self.logger.info("========== head_url_callback ================")

        soup = BeautifulSoup(response.body, "html5lib")
        dl = soup.find_all("div", attrs={"data-role":"ershoufang"})  # 获取各地区的 url 地址的 dl 标签
        #print(dl)
        my_as = dl[0].find_all("a")  # 获取 dl 标签中所有的 a 标签，
        for my_a in my_as:
            self.firstUrlList.append(self.baseUrl + my_a["href"].split("/")[2])
            self.headUrlList.append(self.baseUrl + my_a["href"].split("/")[2])

        print(self.firstUrlList)
        #取第一个一级url去爬取二级url

        url = self.firstUrlList.pop(0)

        yield Request(url, callback=self.second_url_callback, dont_filter=True)

    # 获取非头部 url
    def second_url_callback(self, response):  # 解析并拼接所有需要爬取的 url 地址
        self.logger.info("========== second_url_callback ================")

        soup = BeautifulSoup(response.body, "html5lib")
        div = soup.find_all("div", attrs={"data-role":"ershoufang"})  # 获取各地区的 url 地址的 dl 标签
        my_as = div[0].find_all("a")  # 获取 dl 标签中所有的 a 标签，

        for my_a in my_as:
            url = self.baseUrl + my_a["href"].split("/")[2]
            if url not in self.headUrlList:

                self.secondUrlList.append(url + "/pg1")
        print(self.secondUrlList)
        url = self.secondUrlList.pop(0)
        #print("secondurl:" + url)

        yield Request(url, callback=self.all_url_callback, dont_filter=True)


    def all_url_callback(self, response):  # 解析并拼接所有需要爬取的 url 地址
        self.logger.info("========== all_url_callback ================")

        communities = response.xpath(".//ul[@class='listContent']/li")
        for community in communities:
            try:
                communityurl = community.xpath(".//div[@class='title']/a/@href").extract()
                if communityurl not in self.allUrlList:
                    with open(u"南京详细小区链接.txt", 'a+') as f:
                        f.writelines(communityurl[0] + "\n")
                        self.allUrlList.append(communityurl)
            except:
                print("extract error!")
        # print(self.allUrlList)
        page = response.xpath("//div[@class='page-box house-lst-page-box'][@page-data]").re("\d+")
        p = re.compile(r'.*/pg')
        # print(page)
        # url最后为数字的，匹配后去掉第一个数字
        if len(page) == 3:
            page.pop(0)

        # 这里是判断有没有下一页，毕竟不是所有区都是有第100页的，不能for循环到100
        if len(page) > 1 and page[0] != page[1]:
            next_page = p.match(response.url).group() + str(int(page[1]) + 1)
            next_page = response.urljoin(next_page)
            if next_page not in self.sectionUrlList:
                with open(u"南京在售小区区域链接.txt", 'a+') as f:
                    f.writelines(next_page + "\n")
                    self.sectionUrlList.append(next_page)

            yield Request(next_page, callback=self.all_url_callback, dont_filter=True)

        # print(self.allUrlList)
        #
        # print("firsturl：%d,secondurl:%d" %(len(self.firstUrlList),len(self.secondUrlList)))
        if len(self.firstUrlList) == 0 and len(self.secondUrlList) == 0:
            url = self.allUrlList.pop(0)
            #pass
            yield Request(url[0], callback=self.parse, dont_filter=True)
        else:

            if len(self.secondUrlList) == 0 and len(self.firstUrlList) != 0:
                url = self.firstUrlList.pop(0)
                yield Request(url, callback=self.second_url_callback, dont_filter=True)
            else:
                url = self.secondUrlList.pop(0)
                # 不断回调，得到所有URL
                yield Request(url, callback=self.all_url_callback, dont_filter=True)

    def parse(self, response):
        self.logger.info("==========================")
        item = HunterCommunityHouseItem()

        #房源的
        url = response.url
        item['url'] = url
        #print(url)
        #标题信息
        name = response.xpath(".//div[@class='detailHeader fl']/h1/text()").extract()
        address = response.xpath(".//div[@class='detailHeader fl']/div/text()").extract()
        item["name"] = name
        item["address"] = address
        #print(name,address)

        #小区归属
        location = response.xpath(".//div[@class='fl l-txt']/a/text()").extract()
        avg_price = response.xpath(".//div[@class='xiaoquPrice clear']/div[@class='fl']/span/text()").extract()
        item["location"] = location
        item["avg_price"] = avg_price
        #print(location,avg_price)

        #建筑信息
        community_info = response.xpath(".//div[@class='xiaoquInfoItem']/span[@class='xiaoquInfoContent']/text()").extract()
        item["community_info"] = community_info
        #print(community_info)

        #关注人数
        follow_num = response.xpath(".//div[@class='detailFollowedNum']/span/text()").extract()
        item['follow_num'] = follow_num
        #print(follow_num)


        yield item

        if len(self.allUrlList) != 0:
            url = self.allUrlList.pop(0)
            yield Request(url[0], callback=self.parse, dont_filter=True)
        pass