# -*- coding: utf-8 -*-

import scrapy
import re
import json
from bs4 import BeautifulSoup
from scrapy.http import Request
from HouseData.items import HousedataItem


class MaxSpider(scrapy.Spider):
    name = "lianjia"
    allowed_domains = [ "lianjia.com"]
    bash_url = "https://sz.lianjia.com/ershoufang/"

    def start_requests(self):
        quyu = ['luohuqu', 'futianqu', 'nanshanqu', 'yantianqu', 'baoanqu', 'longgangqu', 'longhuaqu', 'guangmingxinqu',
                'pingshanqu', 'dapengxinqu']

        for i in range(len(quyu)):
            quyu_url = self.bash_url+quyu[i]+'/'
            self.region = quyu[i]
            #https://sz.lianjia.com/ershoufang/luohuqu/
            # print(quyu_url)
            yield Request(quyu_url,callback=self.end_requests)
            print(quyu[i])
    #获取当前页面最大页码
    
    def end_requests(self,response):
        #获取当前url
        quyu_url=response.url
        data = response.body
        soup = BeautifulSoup(data, "html.parser")
        #获取页码的div
        number= soup.find_all("div",class_="house-lst-page-box")

        for node in number:
            #链家页码数据，转化为自点
            dict_number=json.loads(node['page-data'])
            #获取最大页码的值
            max_number = dict_number['totalPage']
            # print(max_number)
            for i in range(1,max_number+1):
                # https://sz.lianjia.com/ershoufang/luohuqu/pg3/
                url = quyu_url+'pg'+str(i)+'/'
                # print(url)
                #发送request进行抓取
                yield Request(url,self.parse)

    def parse(self,response):

        data = response.body
        soup = BeautifulSoup(data,"html.parser")

        #获取到所有房子信息列表
        HouseInfo_list = soup.find_all("li",class_="clear")

        #引入项目item初始化储存器
        item = HousedataItem()

        # 获取当前url
        url = response.url
        # 分割url，目的获取区域
        url = url.split('/')
        item['Region']=url[-3]


        #为什么这个语句一直是yuqu列表里的最后一个
        # item['Region']=self.region

        for Info in HouseInfo_list:

            #获取a标签的data-housecode的值
            Id=Info.a['data-housecode']
            item['Id'] = Id

            #从Info集合里面搜索关于房子的信息
            HoseInfo = Info.find_all("div",class_="houseInfo")
            for Region in HoseInfo:
                Region = Region.get_text()
                Region = Region.replace(' ','')
                Region = Region.split('|')
                # print(Region)
                try:
                    item['Garden'] = Region[0]
                    item['Layout'] = Region[1]
                    item['Size'] = Region[2][:-2]
                    item['Direction'] = Region[3]
                    item['Renovation'] = Region[4]
                    if len(Region) > 5:
                        item['Elevator'] = Region[5]
                    else:
                        item['Elevator'] = ''
                except:
                    print('保存发生错误')


            #从Info集合里搜索价钱
            HousePrice = Info.find_all("div",class_="totalPrice")
            for Price in HousePrice:
                item['Price'] = Price.span.get_text()
                # print(Price)

            yield item