# -*- coding: utf-8 -*-

import scrapy
import re
import json
from bs4 import BeautifulSoup
from scrapy.http import Request
from HouseData.items import lianjia_v3Item
from scrapy.selector import Selector

class MaxSpider(scrapy.Spider):
    name = "lianjia_v4"
    allowed_domains = [ "lianjia.com"]
    bash_url = "https://sz.lianjia.com/ershoufang/"
    # bash_url = "https://sz.lianjia.com/ershoufang/105101036595.html"



    def start_requests(self):
        url=self.bash_url
        yield Request(url,callback=self.qu_page)

    def qu_page(self,response):
        r = response.body
        s = BeautifulSoup(r,"html.parser")
        for data in s.find_all("div",attrs={'data-role':'ershoufang'}):
            for a in data.find_all('a'):
                a = a.get('href')
                url='https://sz.lianjia.com'+str(a)
                # print(url)
                yield Request(url,callback=self.diqu)

    def diqu(self,response):
        r = response.body
        s = BeautifulSoup(r, "html.parser")
        for data in s.find_all("div", attrs={'data-role': 'ershoufang'}):
            for a in data.find_all('a'):
                key = str(a.string)
                if '区' not in key:
                    a = a.get('href')
                    url = 'https://sz.lianjia.com' + str(a)
                    yield Request(url,callback=self.page_num)

    def page_num(self,response):
        url = response.url
        data = response.body
        soup = BeautifulSoup(data, "html.parser")
        # 获取最大页码，翻页抓取
        page_soup = soup.find_all('div', attrs={'comp-module': 'page'})
        if page_soup:
            for node in page_soup:
                max_page_info = node.get("page-data")
                max_page = eval(max_page_info)
                max_id = max_page['totalPage']
            for i in range(max_id):
                new_url = str(url) + 'pg' + str(i) + '/'
                yield Request(new_url, callback=self.get_id)
    def get_id(self,response):
        data = response.body
        soup = BeautifulSoup(data, "html.parser")

        # 获取到所有房子信息列表
        HouseInfo_list = soup.find_all("li", class_="clear")
        if HouseInfo_list:
            for Info in HouseInfo_list:
                # 获取a标签的data-housecode的值
                Id = Info.a['data-housecode']
                #https://sz.lianjia.com/ershoufang/105100720599.html
                url='https://sz.lianjia.com/ershoufang/'+Id+'.html'
                yield Request(url,callback=self.id_page)
    def id_page(self,response):
        print('开始分析')
        url = response.url
        data = response.body
        s = BeautifulSoup(data, 'html.parser')
        # 实例化引入item
        item = lianjia_v3Item()

        item['ljid'] = url[-17:-5]
        news_str = '链家ID=' + str(item['ljid'])

        for node in s.find_all("span", class_="total"):
            item['jg'] = node.string

        for info in s.find_all("div", class_="communityName"):
            for xqmc in info.find_all('a', class_="info"):
                item['xqmc'] = xqmc.string

        def ItemSet(Xpath):
            if isinstance(Xpath, list):
                # print(Xpath)
                xstr = Xpath[0] + '/text()'
                xname = Xpath[1]
                xtest = response.selector.xpath(xstr).extract()
                if xtest:
                    item[xname] = xtest[0]
                    # print('已添加')
                else:
                    print('没有找到xpath:' + xname)
                    item[xname] = ''
            else:
                print('ItemSet变量不是list')

        # 所在区域，街道,地铁线，地铁站
        # szqy = scrapy.Field()
        # jd = scrapy.Field()
        # dtx = scrapy.Field()
        # dtz = scrapy.Field()
        # 所在区域
        szqy = ['/html/body/div[5]/div[2]/div[4]/div[2]/span[2]/a[1]', 'szqy']
        ItemSet(szqy)
        # 街道
        jd = ['/html/body/div[5]/div[2]/div[4]/div[2]/span[2]/a[2]', 'jd']
        ItemSet(jd)
        # 地铁站
        # 待定

        # 基本信息表
        # 户型
        # hx = scrapy.Field()
        hx = ['/html/body/div[7]/div[1]/div[1]/div/div/div[1]/div[2]/ul/li[1]', 'hx']
        ItemSet(hx)
        # # 建筑面积，套内面积，房屋朝向
        # jzmj = scrapy.Field()
        jzmj = ['/html/body/div[7]/div[1]/div[1]/div/div/div[1]/div[2]/ul/li[3]', 'jzmj']
        ItemSet(jzmj)
        # tnmj = scrapy.Field()
        tnmj = ['/html/body/div[7]/div[1]/div[1]/div/div/div[1]/div[2]/ul/li[5]', 'tnmj']
        ItemSet(tnmj)
        # fwcx = scrapy.Field()
        fwcx = ['/html/body/div[7]/div[1]/div[1]/div/div/div[1]/div[2]/ul/li[7]', 'fwcx']
        ItemSet(fwcx)
        # # 建筑结构，户型结构，装修情况
        # jzjg = scrapy.Field()
        jzjg = ['/html/body/div[7]/div[1]/div[1]/div/div/div[1]/div[2]/ul/li[8]', 'jzjg']
        ItemSet(jzjg)
        # hxjg = scrapy.Field()
        hxjg = ['/html/body/div[7]/div[1]/div[1]/div/div/div[1]/div[2]/ul/li[4]', 'hxjg']
        ItemSet(hxjg)
        # zxqk = scrapy.Field()
        zxqk = ['/html/body/div[7]/div[1]/div[1]/div/div/div[1]/div[2]/ul/li[9]', 'zxqk']
        ItemSet(zxqk)
        # 梯户比例，电梯，产权年限
        # thbl = scrapy.Field()
        thbl = ['/html/body/div[7]/div[1]/div[1]/div/div/div[1]/div[2]/ul/li[10]', 'thbl']
        ItemSet(thbl)
        # dt = scrapy.Field()
        dt = ['/html/body/div[7]/div[1]/div[1]/div/div/div[1]/div[2]/ul/li[11]', 'dt']
        ItemSet(dt)
        # cqnx = scrapy.Field()
        cqnx = ['/html/body/div[7]/div[1]/div[1]/div/div/div[1]/div[2]/ul/li[12]', 'cqnx']
        ItemSet(cqnx)
        # 交易属性表
        # 挂牌时间，交易权属，上次交易时间，
        # gpsj = scrapy.Field()
        gpsj = ['/html/body/div[7]/div[1]/div[1]/div/div/div[2]/div[2]/ul/li[1]', 'gpsj']
        ItemSet(gpsj)
        # jyqs = scrapy.Field()
        jyqs = ['/html/body/div[7]/div[1]/div[1]/div/div/div[2]/div[2]/ul/li[2]', 'jyqs']
        ItemSet(jyqs)
        # scjysj = scrapy.Field()
        scjysj = ['/html/body/div[7]/div[1]/div[1]/div/div/div[2]/div[2]/ul/li[3]', 'scjysj']
        ItemSet(scjysj)
        # 房屋用途，房屋年限，产权所属，抵押信息
        # fwyt = scrapy.Field()
        fwyt = ['/html/body/div[7]/div[1]/div[1]/div/div/div[2]/div[2]/ul/li[4]', 'fwyt']
        ItemSet(fwyt)
        # fwnx = scrapy.Field()
        fwnx = ['/html/body/div[7]/div[1]/div[1]/div/div/div[2]/div[2]/ul/li[5]', 'fwnx']
        ItemSet(fwnx)
        # cqss = scrapy.Field()
        cqss = ['/html/body/div[7]/div[1]/div[1]/div/div/div[2]/div[2]/ul/li[6]', 'cqss']
        ItemSet(cqss)
        # dyxx = scrapy.Field()
        dyxx = ['/html/body/div[7]/div[1]/div[1]/div/div/div[2]/div[2]/ul/li[7]', 'dyxx']
        ItemSet(dyxx)

        yield item