# -*- coding: utf-8 -*-
import random
import scrapy
import time
import json
from lxml import etree
from urllib import parse
import re
from house_spider.utils import enc


class A58tongchengSpider(scrapy.Spider):
    name = '58tongcheng'
    # allowed_domains = ['58.com']
    BaseUrl = 'https://gongyu.58.com/guide/api_for_renting?displayLimitNum=15&'
    # 地域
    locals = ['chaoyang', 'haidian', 'dongcheng', 'xicheng', 'fengtai', 'tongzhouqu', 'shijingshan', 'fangshan', 'changping', 'daxing', 'shunyi', 'miyun', 'huairou', 'yanqing', 'pinggu', 'mentougou']
    # baseQuerys = ['basequery=room:c|cityId:1|areaId:6808|cateId:10',
    #             'basequery=cityId:1|areaId:1143|cateId:37031',
    #             'basequery=cityId:1|areaId:1142|shangquanId:1203|cateId:37031',
    #             'basequery=cityId:1|areaId:1150|shangquanId:2641|cateId:37031']
    # 获取地域信息url
    local_url = "https://bj.58.com/%s/chuzu/"
    basequery = "basequery=cityId:%s|areaId:%s|cateId:%s"
    page = 20

    phone_base_url = "https://gongyu.58.com/webphone/api_get_phone?"

    def start_requests(self):
        for local in self.locals[:1]:
            # 访问页面获取地域信息
            url = self.local_url % (local)
            time.sleep(random.randint(1, 3))
            yield scrapy.Request(url=url, meta={'local': local})

    # 解析获取地域信息
    def parse(self, response):
        local = response.meta['local']
        # 提取访问获取房源信息api对象参数
        track_url_info = re.search(r'_trackURL = "({.+?})"', response.body.decode('utf8'), re.I).group(1)
        # track_url_info为："{'cate':'1,37031','cateBusiness':'1,37031','area':'1,6809','pagetype':'list','pagesize':123,'pagenum':1,'jiage':'','is_real':false,'is_biz':false,'infoSource':0,'GA_pageview':'/house/chuzu/list/'}"
        track_url_info = track_url_info.replace("true", "True")
        track_url_info = track_url_info.replace("false", "False")
        info = eval(track_url_info)
        city_id, cate_id = info['cate'].split(',')  # 提取城市id，提取 createid
        local_id = info['area'].split(',')[1]  # 提取区域ID
        # 构造api获取url
        page_query = "pageNum={}&".format(1)
        t_now = "_={}".format(int(time.time() * 1000))
        base = "basequery=cityId:%s|areaId:%s|cateId:%s" % (city_id, local_id, city_id)
        url = self.BaseUrl + base + page_query + t_now
        print(url, "======" * 20)
        time.sleep(random.randint(1, 3))
        yield scrapy.Request(url=url, callback=self.parse_house_info, meta={'local': local})

    # 解析列表页面
    def parse_house_info(self, response):
        local = response.meta['local']
        # 获取并解析json数据
        data = json.loads(response.body)

        # 遍历获取房源数据
        # 获取顶部数据
        house_top = data['data']["position1"]['list']
        # 获取 中间数据
        house_mid = data['data']["position2"]['list']
        # 获取底部数据
        house_botom = data['data']["position4"]['list']

        house_list = house_top + house_mid + house_botom

        for house in house_list:
            # 构造item
            data = dict()
            print("8&***"*10)
            print(house)
            print("8&***" * 10)
            data['title'] = house.get('title', None)
            data['area'] = house.get('rentRoomArea', None)
            data['rooms'] = house.get('layout', None)
            data['region'] = local
            data['address'] = house.get('dispLocal', None)
            data['traffic'] = house.get('substationDesc', None)
            url = house.get('url', None)
            data['price'] = house.get('price', None)
            data['publish_time'] = house.get('postDate', None)
            if url is None:
                # 如果url不正确说明数据有问题丢弃
                continue

            time.sleep(random.randint(1, 6))
            yield scrapy.Request(url="http:" + url, callback=self.parse_detail, meta=data)

    def parse_detail(self, response):
        data = response.meta
        # 获取查询参数，用于后面查询手机号码
        url = response.url
        data['url'] = url
        quereys = parse.parse_qs(parse.urlparse(url).query)
        adtype = quereys['adtype'][0]
        tid = quereys['tid'][0]
        # 构造选择器
        selector = etree.HTML(response.body)

        # 房子描述
        desc = selector.xpath("//p[@*='desc']/text()")
        if desc:
            desc = desc[0]
            data['liangdian'] = str(desc)
        else:
            data['liangdian'] = ""

        # 提取房屋设施
        house_setup = selector.xpath("//ul[@class='icon-list']/li/text()")
        data['sheshi'] = house_setup

        # 构造电话url
        # 获取infoid
        content = response.body.decode('utf8')
        try:
            infoid = re.search(r'\"infoid\":\"(.+?)\"', content, re.I)[1]
        except:
            yield data  # 如果没有提取到Infoid 直接yield
        else:  # 如果提取数据则 尝试提取phone
            sign = enc.get_sign(tid, infoid)

            t_now = int(time.time() * 1000)

            phone_url = self.phone_base_url + "adtype=" + adtype + "&tid=" + tid + "&sign=" + sign + "&_=" + str(
                t_now) + "&infoId=" + infoid

            # print(phone_url,"******************")
            time.sleep(random.randint(1, 6))
            yield scrapy.Request(url=phone_url, callback=self.parse_phone, meta=data)

    def parse_phone(self, response):
        data = response.meta
        decdata = json.loads(response.body)
        try:
            phone = decdata['data']["phone"]
        except:
            phone = None
        data['phone_num'] = phone
        # print(data)
        # with open('result.json', 'a') as f:
        #     data1 = json.dumps(data, ensure_ascii=False)
        #     f.write(data1)
        #     f.write('\n')
        #     print('____'*5)
        yield data
