#!/usr/bin/env python
# coding=utf-8
# author: wanghaibo
# 此代码仅供学习与交流，请勿用于商业用途。
# 爬取二手房数据的爬虫派生类
import json
import re
import threadpool
from bs4 import BeautifulSoup
from lib.item.ershou import *
from lib.item.xiaoqu import *
from lib.zone.city import get_city
from spider.base_spider import *
from lib.utility.date import *
from lib.utility.path import *
from lib.zone.area import *
from conf import NEED_DISTTRICT
from store.XiaoquStore import XiaoquStore
from store.HouseStore import  HouseStore
from store.XuequStore import XuequStore
from baseclass.send_email import SendMail
import traceback
from lib.utility.log import *


class ErShouSpider(BaseSpider):

    def __init__(self,channel="lianjia",min_price=0,max_price=600):
        self.min_price, self.max_price = min_price, max_price
        BaseSpider.__init__(self, channel)
        self.mail = SendMail()

    @classmethod
    def xiaoqu_list_path(cls,area_name,page_num=1):
        if not page_num:
            page_num = 1
        return "https://bj.lianjia.com/xiaoqu/{0}/pg{1}rty2rty3/".format(area_name,page_num)

    @classmethod
    def xiaoqu_detail_house_list_path(cls,xiaoqu_id,page_num=None):
        if not page_num:
            page_num = 1
        return "https://bj.lianjia.com/ershoufang/pg{0}bt2bt3c{1}/".format(page_num,xiaoqu_id)

    @classmethod
    def house_detail_path(cls,house_id):
        return "https://bj.lianjia.com/ershoufang/{0}.html".format(house_id)

    def get_area_list(self,district,city="bj"):
        areas = []
        areas_of_district = get_areas(city, district)
        print('{0}: Area list:  {1}'.format(district, areas_of_district))
        return areas_of_district

    def get_district_list(self,city="bj"):
        districts = get_districts(city)
        print('Districts: {0}'.format(districts))
        return districts

    def start_crawl(self,city="bj",districts= list()):
        if len(districts) < 1:
            districts = NEED_DISTTRICT if city == "bj" else get_districts(city)
        for district in districts:
            BaseSpider.random_delay()
            area_list = self.get_area_list(district)
            for area in area_list:
                try:
                    xiaoqu_list = self.crawl_xiaoqu_list(district, area)
                except Exception as e:
                    continue
                #写入数据库
                for xiaoqu in xiaoqu_list:
                    if int(xiaoqu.on_sale) == 0:
                        continue
                    try:
                        house_list = self.crawl_house_list(district, area, xiaoqu.xiaoqu_id)
                    except Exception as e :
                        continue

        '''
        暂时并不考虑并发，没必要
        '''
        # pool_size = thread_pool_size
        # pool = threadpool.ThreadPool(pool_size)
        # my_requests = threadpool.makeRequests(self.collect_area_ershou_data, args)
        # [pool.putRequest(req) for req in my_requests]
        # pool.wait()
        # pool.dismissWorkers(pool_size, do_join=True)  # 完成后退出


    def crawl_xiaoqu_list_all_district(self,district,area_id=None,store=True):
        area_list = self.get_area_list(district) if area_id is None else [area_id]
        for area in area_list:
            logger.info("开始爬area：{0}的小区列表".format(area))
            try:
                self.crawl_xiaoqu_list(district, area, store)
            except Exception as e:
                logger.error("爬取区域小区信息出错。{0}.{1} {2}",district,area,e,exc_info=traceback.print_exc())
            logger.info("成功爬取area：{0}的小区列表".format(area))


    '''
    爬取某个区域的所有小区信息，主要是在售房源汇总以及小区详情
    '''
    def crawl_xiaoqu_list(self,district,area_id,xiaoqu_id=None,store=True):
        total_page = 1
        headers = create_headers()
        page = self.xiaoqu_list_path(area_id, 1)
        response = requests.get(page, timeout=10, headers=headers)
        html = response.content
        soup = BeautifulSoup(html, "lxml")
        try:
            page_box = soup.find_all('div', class_='page-box')[0]
            matches = re.search('.*"totalPage":(\d+),.*', str(page_box))
            # print(int(matches.group(1)))
            # print("\n\n")
            total_page = int(matches.group(1))
        except Exception as e:
            # 输出日志
            print("\tWarning: only find one page for {0}".format(area_id))
            traceback.print_exc()
        xiaoqu_list = []
        err_list = []
        # 从第一页开始,一直遍历到最后一页
        for num in range(1, total_page + 1):
            page =  "https://bj.lianjia.com/xiaoqu/{0}/pg{1}rty2rty3/".format(area_id,num)
            logger.info("开始防爬小区列表:{0}.{1}".format(area_id,page))  # 打印每一页的地址
            headers = create_headers()
            BaseSpider.random_delay()
            response = requests.get(page, timeout=10, headers=headers)
            html = response.content
            soup = BeautifulSoup(html, "lxml")
            #
            # # 获得有小区信息的panel
            house_elements = soup.find_all('li', class_="clear")
            for xiaoqu_elem in house_elements:
                try:
                    xiaoqu = self.crawl_single_xiaoqu(district,area_id,xiaoqu_elem,store)
                    logger.info("小区信息爬取成功:{0}".format(xiaoqu.xiaoqu_id))
                    if xiaoqu and xiaoqu is not None:
                        xiaoqu_list.append(xiaoqu)
                except Exception as e:
                    logger.error("发生异常。exception:{0}.{1}".format(e,traceback.print_exc()),exc_info=traceback.print_exc())
                    err_list.append(xiaoqu_elem["data-id"])
        if len(err_list) > 0:
            logger.error("小区列表存在爬取异常:{0}".format(json.dumps(err_list)))
        return xiaoqu_list

    def crawl_single_xiaoqu(self,district, area_id,xiaoqu_elem,store=True):
        price = xiaoqu_elem.find('div', class_="totalPrice")
        name = xiaoqu_elem.find('div', class_='title')
        xiaoqu_id = xiaoqu_elem["data-id"]
        desc = ""
        sell_count = "-1"
        pic = ""
        try:
            desc = xiaoqu_elem.find('div', class_="houseInfo")
            sell_count = xiaoqu_elem.find('div', class_="xiaoquListItemSellCount")
            sell_count = sell_count.text.replace("\n", "")
            sell_count = re.findall(r"\d+", sell_count)[0]
            pic = xiaoqu_elem.find('a', class_="img").find('img', class_="lj-lazy")
            pic = pic.get('data-original').strip()
        except Exception as e:
            print(xiaoqu_elem)
            logger.error("爬取单个小区信息出现异常:{0}.".format(xiaoqu_elem),exc_info=traceback.print_exc())
        # print(name)
        # print(desc)
        # 继续清理数据
        price = price.text.strip()
        price = price.split("元")[0]
        if not price.isdigit():
            price = 0
        price = round(int(price) / 10000, 2)
        name = name.text.replace("\n", "")
        desc = desc.text.replace("\n", "").strip()
        print("\n")
        # # 作为对象保存
        xiaoqu = XiaoQu(district, area_id, xiaoqu_id, name, price, sell_count, desc, pic)
        print(xiaoqu.text())
        if store:
            xiaoqu_store = XiaoquStore()
            xiaoqu_store.insertXiaoqu(xiaoqu)
        return  xiaoqu


    '''
    爬取某个小区的房子列表，主要是为爬取小区详情服务
    '''
    def crawl_house_list(self,district,area_id,xiaoqu_id,city_name="bj",store=True):
        logger.info("开始爬取小区的房源列表 district:{0}.area:{1}.xiaoqu:{2}".format(district,area_id,xiaoqu_id))
        headers = create_headers()
        page = self.xiaoqu_detail_house_list_path(xiaoqu_id, 1)
        response = requests.get(page, timeout=10, headers=headers)
        html = response.content
        soup = BeautifulSoup(html, "lxml")
        total_page = 1
        # 获得总的页数，通过查找总页码的元素信息
        try:
            page_box = soup.find_all('div', class_='page-box')[0]
            matches = re.search('.*"totalPage":(\d+),.*', str(page_box))
            print( int(matches.group(1)))
            total_page = int(matches.group(1))
        except Exception as e:
            #输出日志
            print("\tWarning: only find one page for {0}".format(xiaoqu_id))
            traceback.print_exc()
        ershou_list = []
        err_list = []
        # 从第一页开始,一直遍历到最后一页
        for num in range(1, total_page + 1):
            page = self.xiaoqu_detail_house_list_path(xiaoqu_id,num)
            print(page)  # 打印每一页的地址
            headers = create_headers()
            BaseSpider.random_delay()
            response = requests.get(page, timeout=10, headers=headers)
            html = response.content
            soup = BeautifulSoup(html, "lxml")
            # # 获得有小区信息的panel
            house_elements = soup.find_all('li', class_="clear")
            for house_elem in house_elements:
                price = house_elem.find('div', class_="totalPrice")
                name = house_elem.find('div', class_='title')
                # desc = house_elem.find('div', class_="houseInfo")
                house_id = house_elem["data-lj_action_housedel_id"]
                # pic = house_elem.find('a', class_="img").find('img', class_="lj-lazy")
                # 继续清理数据
                price = price.text.strip()
                name = name.text.replace("\n", "")
                # desc = desc.text.replace("\n", "").strip()
                # pic = pic.get('data-original').strip()
                print("开始爬取房源明细{0}.{1}：\n".format(house_id,name))
                price = re.findall(r"\d+", price)[0]
                price = int(float(price))
                if int(price) > self.max_price or int(price) < self.min_price:
                    continue
                must_look = self.get_must_look_flag(house_id,house_elem)
                try:
                    ershou = self.crawl_house_detail(district, area_id, xiaoqu_id, house_id, store,must_look)
                    ershou_list.append(ershou)
                except Exception as e:
                    logger.error("发生异常。house:{0},exception:{1}".format(house_id,traceback.print_exc()),exc_info=traceback.print_exc())
                    self.mail = SendMail()
                    self.mail.send_email(subject="房源详情页出现异常",msghtml="house_id:{0}.异常:{1}".format(house_id, e))
                    err_list.append(house_id)
                # print(ershou.text())
                if ershou is None:
                    continue
        if len(err_list) > 0:
            self.mail = SendMail()
            self.mail.send_email("爬取房源异常",msghtml="未正常爬取房源列表：{0}".format(err_list))
            print("未正常爬取房源列表：{0}".format(err_list))
        logger.info("爬取小区的房源列表成功。 district:{0}.area:{1}.xiaoqu:{2}.未正常爬取的列表:{3}".format(district,area_id,xiaoqu_id,json.dumps(err_list)))
        return ershou_list

    def get_must_look_flag(self,house_id,house_elem):
        try:
            must_look = house_elem.find('span', class_="goodhouse_tag")
            if must_look is not None and must_look:
                must_look = 1
                logger.info("房子：{0}.是必看好房".format(house_id))
            else:
                must_look = 0
        except Exception as e:
            traceback.print_exc()
        return must_look

    '''
    爬取房子的详情，最重要的一个环节
    '''
    def crawl_house_detail(self,district,area_id,xiaoqu_id,house_id,store=True,must_look=0):
        logger.info("开始爬取房源详情："+house_id)
        page = self.house_detail_path(house_id)
        headers = create_headers()
        BaseSpider.random_delay()
        response = requests.get(page, timeout=10, headers=headers)
        html = response.content
        soup = BeautifulSoup(html, "lxml")
        price = soup.find('div', class_="price").find("span",class_="total")
        price = price.text.strip()
        price = int(float(price))
        if int(price) > self.max_price or int(price) < self.min_price:
            return None
        name = soup.find('div', class_='title')
        img = soup.find('div', class_='thumbnail')
        pic_list = []
        if not img or img is not None:
            img_list = img.find_all("li")
            for  image in img_list:
                pic = image.find('img')["src"]
                pic_list.append(pic)
        age = soup.find('div', class_='area').find("div",class_="subInfo").text.strip()
        name = name.text.replace("\n", "")
        introduction = soup.find(class_="introContent")
        info_list = introduction.find_all('li')
        _, bed_num = info_list[0].stripped_strings
        _, floor = info_list[1].stripped_strings
        _, size = info_list[2].stripped_strings
        _, direction = info_list[6].stripped_strings
        _, arc_type = info_list[7].stripped_strings
        trade = soup.find(class_="transaction")
        desc_intro = soup.find(class_="baseattribute")
        if not desc_intro or desc_intro is None:
            desc_intro = ""
        else :
            desc_intro = desc_intro.text.strip()
        trade_list = trade.find_all('li')
        online_day = trade_list[0].find_all("span")[1].text.strip()
        house_type = trade_list[1].find_all("span")[1].text.strip()
        trade_detail = trade_list[4].find_all("span")[1].text.strip()
        last_trade_day = trade_list[2].find_all("span")[1].text.strip()
        size = size.split("㎡")[0]
        if not xiaoqu_id or xiaoqu_id is None:
            xiaoqu_id = soup.find('div', class_='communityName').find("a", class_="info")["href"].split("/")[2]
        ershou = ErShou(district,area_id,xiaoqu_id,house_id,name,price,age,desc_intro,pic_list,online_day,last_trade_day,bed_num,floor,size,direction,arc_type,house_type,trade_detail,must_look)
        print(ershou.text())
        if store:
            house_store = HouseStore()
            house_store.insert_house(ershou)
        logger.info("成功爬取房源详情："+house_id)
        return ershou

    def crawl_xiaoqu_detail(self,xiaoqu_id):
        pass





if __name__ == '__main__':
    spider = ErShouSpider("lianjia",min_price=200,max_price=650)
    # spider.start_crawl("bj",["dongcheng"])
    # spider.get_area_list("haidian")
    spider.crawl_house_list("haidian","junbo1","1111027380073")
    # spider.crawl_house_detail("haidian","yuanmingyuan","","101124357647",store=True)
    # spider.crawl_xiaoqu_list_all_district("fengtai")
    # spider.crawl_house_list("fengtai","fangzhuang1","1111027375552",store=False)