# -*- coding: utf-8 -*-

import scrapy
import re
import json
from bs4 import BeautifulSoup
from scrapy.http import Request
from HouseData.items import HousedataItem


class MaxSpider(scrapy.Spider):
    name = "lianjia_v2"
    allowed_domains = [ "lianjia.com"]
    bash_url = "https://www.lianjia.com"

    def start_requests(self):
        url = self.bash_url
        yield Request(url,callback=self.find_url)

    def find_url(self,response):
        data = response.body
        soup = BeautifulSoup(data,"html.parser")

        zz = "^https://[a-zA-Z][a-zA-Z].lianjia.com/$"
        #储存所有地区网址
        url_list=[]
        for node in soup.find_all("a"):
            url = node.get('href')
            if (re.match(zz,str(url))):
                url_list.append(url)
        # str='ershoufang/'
        for x in range(len(url_list)):
            url_list[x]=url_list[x]+'ershoufang/'
            yield Request(url_list[x],callback=self.ershoufang_url)

    def ershoufang_url(self,response):
        item=HousedataItem()
        data = response.body
        soup = BeautifulSoup(data,"html.parser")
        #https://xa.lianjia.com/ershoufang/
        url = response.url
        home_url = url[:-12]
        # print(home_url)
        zz = "^/ershoufang/[0-9a-zA-Z]*/$"
        # 目标链接格式"https://bj.lianjia.com/ershoufang/p2/"
        url_list=[]
        tag_list=[]
        for node in soup.find_all("a"):
            url = node.get('href')
            if(re.match(zz,str(url))):
                url_list.append(url)
                tag_list = node.get_text()
                # print(tag)

        #目前url＝/ershoufang/dongchengqu/
        for x in range(len(url_list)):
            #组装成为目标链接格式
            new_url=home_url+url_list[x]
            # print(new_url)
            yield Request(new_url,callback=self.page_url)

    def page_url(self,response):
        url=response.url
        data =response.body
        soup = BeautifulSoup(data,"html.parser")
        #获取最大页码，翻页抓取
        page_soup = soup.find_all('div',attrs={'comp-module':'page'})
        if page_soup :
            for node in  page_soup:
                max_page_info=node.get("page-data")
                max_page = eval(max_page_info)
                max_id=max_page['totalPage']
            for i in range(max_id):
                new_url=str(url)+'pg'+str(i)+'/'
                yield Request(new_url,callback=self.end_url)

    def end_url(self,response):

        data = response.body
        soup = BeautifulSoup(data, "html.parser")

        # 获取到所有房子信息列表
        HouseInfo_list = soup.find_all("li", class_="clear")
        if HouseInfo_list:
            # 引入项目item初始化储存器
            item = HousedataItem()

            # 获取当前url
            url = response.url
            # 分割url，目的获取区域,需判断多种情况
            # url = url.split('/')
            # item['Region'] = url[-3]
            # print(item['Region'])

            sf=url[8:10]
            print(sf)
            item['Region']=sf
            # 为什么这个语句一直是yuqu列表里的最后一个
            # item['Region']=self.region

            for Info in HouseInfo_list:

                # 获取a标签的data-housecode的值
                Id = Info.a['data-housecode']
                item['Id'] = Id

                # 从Info集合里面搜索关于房子的信息
                HoseInfo = Info.find_all("div", class_="houseInfo")
                for Region in HoseInfo:
                    Region = Region.get_text()
                    Region = Region.replace(' ', '')
                    Region = Region.split('|')
                    # print(Region)
                    try:
                        item['Garden'] = Region[0]
                        item['Layout'] = Region[1]
                        item['Size'] = Region[2][:-2]
                        item['Direction'] = Region[3]
                        item['Renovation'] = Region[4]
                        if len(Region) > 5:
                            item['Elevator'] = Region[5]
                        else:
                            item['Elevator'] = ''
                    except:
                        print('保存发生错误')

                # 从Info集合里搜索价钱
                HousePrice = Info.find_all("div", class_="totalPrice")
                for Price in HousePrice:
                    item['Price'] = Price.span.get_text()
                    # print(Price)

                yield item