# -*- coding: utf-8 -*-
# @Time    : 2021/2/20 13:27
# @Author  : AkromA
# @File    : ajk_esf_crawler.py
# @Software: PyCharm
# @Info    : 安居客二手房数据爬虫

import re
import time
import random
import requests
import traceback
from bs4 import BeautifulSoup
from urllib.request import quote
from db.db_conn import DBHandle
from oss.oss_upload_img import OssUpload

headers = {
    "Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9",
    "Accept-Language": "zh-CN,zh;q=0.9",
    "Accept-Encoding": "gzip, deflate, br",
    "Upgrade-Insecure-Requests": "1",
    "User-Agent": "Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.100 Safari/537.36",
}


# 处理装修情况
def zx_handle(param):
    if param == "毛坯": return 146
    elif param == "简单装修": return 53
    elif param == "精装修": return 52
    elif param == "豪华装修": return 54
    else: return 0      # 默认值为2，简单装修


# 朝向
def esf_cx_handle(param):
    if param == "东": return 55
    elif param == "南": return 56
    elif param == "西": return 57
    elif param == "北": return 58
    elif param == "东南": return 59
    elif param == "东北": return 60
    elif param == "西南": return 61
    elif param == "西北": return 62
    elif param == "南北": return 144
    elif param == "东西": return 145
    else: return 144      # 默认值为南北朝向


# 获取多个标签内容
def muti_tags(t_list):
    tags_list = []
    for i in t_list:
        tags_con = i.text.strip()
        tags_list.append(tags_con)
    if len(tags_list) == 1:
        return tags_list[0]
    elif len(tags_list) > 1:
        return ",".join(tags_list)
    else:
        return ""


# 转换特色
def features_handle(param):
    features_list = []
    f_list = param.split(",")
    for f in f_list:
        if "近地铁" in f: features_list.append("48")
        elif "近学校" in f: features_list.append("49")
        elif "交通便利" in f: features_list.append("50")
        elif "近商业区" in f: features_list.append("51")
        elif "近医院" in f: features_list.append("108")
        elif "近公交站台" in f: features_list.append("112")
        elif "近商超" in f: features_list.append("113")
    if len(features_list) > 1:
        return ",".join(features_list)
    elif len(features_list) == 1:
        return features_list[0]
    else:
        return "0"


# 转换建筑类型
def house_type_handle(param):
    if "普通住宅" in param: return 132
    elif "公寓" in param: return 133
    elif "别墅" in param: return 134
    elif "平房" in param: return 169
    elif "四合院" in param: return 170
    else:
        return 136


# 转换交易权属
def property_right_handle(param):
    if "商品房" in param: return 140
    elif "经济适用房" in param: return 141
    elif "公房" in param: return 142
    else:
        return 143


# 插入数据并获取id
def insert_data(sql):
    db_handle = DBHandle()
    data_id = db_handle.insert_one(sql)
    db_handle.close_db()
    return data_id


# 查找数据
def select_data(sql):
    db_handle = DBHandle()
    flag = db_handle.select_sql(sql)
    db_handle.close_db()
    return flag


# 获取图片阿里云的地址
def get_img_url(img_url):
    # 初始化阿里云图片上传接口
    oss_upload = OssUpload()
    # 处理图片地址re
    pattern = r'(.*?)\.\d+x\d+'

    try:
        # 处理图片地址
        if len(re.findall(pattern, img_url)) > 0:
            img_link = re.findall(pattern, img_url)[0]
        elif "!" in img_url:
            img_link = img_url.split('!')[0]
        else:
            img_link = img_url

        bytes_con = requests.get(img_link).content
        bytes_name = img_link.split('/')[-1]

        ali_img_url = oss_upload.upload_byte(bytes_name, bytes_con)
        return ali_img_url
    except Exception as error:
        return False


# 获取页面源代码html格式
def get_html(url_):
    # print(url_)
    response_ = requests.get(url_, headers=headers, allow_redirects=False)
    # 通过请求状态值判断是否请求成功
    # 需要判断是否被限制IP
    if response_.status_code != 200:
        log_info = "获取页面html时出错"
        print(log_info)
        return False

    return response_.text


# 获取页面byte数据
def get_byte(url_):
    # print(url_)
    response_ = requests.get(url_, headers=headers)
    # 通过请求状态值判断是否请求成功
    if response_.status_code != 200:
        log_info = "获取页面html时出错"
        print(log_info)
        return False

    return response_.content


# 获取详情页内容
def con_handle(html_, city_code, now_url, house_ajk_id, crawler_type):

    # 从页面下方viewData处获取数据的json格式
    pattern = r"(viewData[\s\S]*?)government_verification"
    con_json_data = re.findall(pattern, html_)[0]
    # 粗处理
    con_json_data = con_json_data.replace("\\n", " ")
    con_json_data = con_json_data.replace("\\u002F", "/")
    con_soup_ = BeautifulSoup(html_, 'lxml')

    # print(con_json_data)

    # 各字段获取开始
    # ################
    # ################
    # house表

    # 固定值
    type_ = "2"  # 区分房源 1租房 2二手房 3新房

    try:
        # 获取室厅数据
        rhval_con = con_soup_.find_all('div', class_="maininfo-model-strong")[0].find_all("i")
        # rhval_con = rhval_con.replace("室", ",").replace("厅", ",").replace("卫", ",")
        rhval = muti_tags(rhval_con)  # 室厅

        # 获取面积
        # area_pattern = r'area_num:"(.*?)"'
        # area = re.findall(area_pattern, con_json_data)[0]  # 面积
        area = con_soup_.find_all('div', class_="maininfo-model-strong")[1].find_all("i")[0].text # 面积

        # 获取标签、朝向、装修、长标题的值
        tags_con = con_soup_.find_all('span', class_="maininfo-tags-item")
        tags = muti_tags(tags_con)  # 标签
        orient_pattern = r'orient:"(.*?)"'
        house_orient_name = re.findall(orient_pattern, con_json_data)[0]  # 朝向
        fitment_pattern = r'fitment_name:"(.*?)"'
        fitment_value = re.findall(fitment_pattern, con_json_data)[0]  # 装修
        title_pattern = r'title:"(.*?)"'
        long_title = re.findall(title_pattern, con_json_data)[0]    # 长标题

        # 获取区、商圈、价格、楼层的值
        blocks_con = con_soup_.find_all('span', class_="maininfo-community-item-name")[0].find_all("a")
        region_name = blocks_con[0].text     # 区名
        block_name = blocks_con[1].text     # 商圈名
        price = con_soup_.find_all('span', class_="maininfo-price-num")[0].text     # 价格

        current_floor_pattern = r'floor:"(.*?)"'
        current_floor = re.findall(current_floor_pattern, con_json_data)[0] if re.findall(current_floor_pattern, con_json_data) else 0  # 所在楼层
        floor_tag_pattern = r'floor_level:"(.*?)"'
        floor_tag = re.findall(floor_tag_pattern, con_json_data)[0].split("(共")[0] if re.findall(floor_tag_pattern, con_json_data)[0] else 0  # 楼宇

        # 总楼层数处理
        floor_pattern = r'total_floor:"(.*?)"'
        if re.findall(floor_pattern, con_json_data):
            floor = re.findall(floor_pattern, con_json_data)[0]
        elif re.findall(floor_tag_pattern, con_json_data):
            floor = re.findall(floor_tag_pattern, con_json_data)[0].split("共")[1]
            floor = floor.replace("层", "").replace(")", "")
        else:
            floor = 0
        # time_pattern = r'post_date:"(.*?)"'
        # time_ = re.findall(time_pattern, con_json_data)[0]  # 发布时间
        # 20210302发布时间修改为入库时间
        time_ = int(time.time())

        update_time = int(time.time())  # 修改时间（采集时间）

        # 20210302修改为房屋id从链接中获取
        # init_id_pattern = r'house_id:(.*?),'
        # fid = re.findall(init_id_pattern, con_json_data)[0]     # 页面中房屋id
        # if "q" or "D" in fid: fid = "0"

        address = ""  # 详细地址（页面上无详细地址）
        description = ""  # 描述（描述与详情中的描述有冲突，暂未采集）

        # 获取图片（暂未去水印）
        room_photos_list = []  # 房间图片列表
        model_photos_list = []  # 户型图列表
        photo_pattern = r'(indoor_photos[\s\S]*?),(model_photos[\s\S]*?),outdoor_photos'     # 图片存在多个，目前采集室内图片和户型图片两种
        room_pattern = r'{url:"(.*?)"'      # 室内图匹配
        model_pattern = r'original_url:"(.*?)"'     # 户型图匹配
        photo_links = re.findall(photo_pattern, con_json_data)
        # print(len(photo_links))
        # print(photo_links)
        if photo_links:
            # 室内图
            indoor_ = photo_links[0][0]
            indoor_list = re.findall(room_pattern, indoor_)
            # print(indoor_list)
            for indoor_ in indoor_list:
                indoor_link = get_img_url(indoor_)
                if indoor_link:
                    room_photos_list.append(indoor_link)

            # 户型图
            model_ = photo_links[0][1]
            model_list = re.findall(model_pattern, model_)
            # print(model_list)
            for model_ in model_list:
                model_link = get_img_url(model_)
                if model_link:
                    model_photos_list.append(model_link)

        # 房间图片
        if len(room_photos_list) > 1:
            room_photos = ','.join(room_photos_list)
        elif len(room_photos_list) == 1:
            room_photos = room_photos_list[0]
        else:
            room_photos = ""
        # 户型图
        if len(model_photos_list) > 1:
            model_photos = ','.join(model_photos_list)
        elif len(model_photos_list) == 1:
            model_photos = model_photos_list[0]
        else:
            model_photos = ""

        # 列表图片（为室内图第一张）
        img = room_photos_list[0]

        # 获取视频（速度会较慢）
        video_pattern = r'video_url:(.*?),'        # 视频，不一定有数据，需判断
        video_links = re.findall(video_pattern, con_json_data)
        # print(video_links)
        if video_links:
            has_video = "1"  # 是否有视频
            video_src = get_img_url(video_links[0].replace('"', '')) # 视频地址
        else:
            has_video = "0"
            video_src = ""

        # 需要转换为id的字段
        features = features_handle(tags)  # 特色
        towards = esf_cx_handle(house_orient_name)  # 朝向
        decoration = zx_handle(fitment_value)  # 装修
        # elevator_pattern = r'has_lift:(.*?),'
        # elevator_con = re.findall(elevator_pattern, con_json_data)[0]
        elevator = "0"
        # if elevator_con == "o":
        if "电梯" in tags:
            elevator = "1"  # 是否有电梯

        # 20210226修改，之前house_info表中的house_type和heating字段转到house表中
        house_type = ""
        house_type_pattern = r'use_type:(.*?),'
        house_type_con = re.findall(house_type_pattern, con_json_data)  # 建筑类型
        if house_type_con:
            house_type = house_type_handle(house_type_con[0])

        heating = 0  # 供暖方式（页面上无）

    except Exception as error:
        print("获取house参数出错 %s" % error)
        print(con_soup_)
        traceback.print_exc()
        return

    # 需要去数据库中获取的字段（通过小区名称）
    comm_name = con_soup_.find('div', class_="community-title").find('h3').text
    sel_sql = "SELECT comm_id, areaCode, block_id, lat, lng FROM 06_comm WHERE state = 1 AND comm_name = \"%s\" AND areaCode LIKE \"%s%%\";" % (comm_name, city_code)
    flag = select_data(sel_sql)
    if len(flag) > 0:
        comm_id = flag[0]['comm_id'] # 小区id
        areaCode = flag[0]['areaCode']  # 区域id
        block_id = flag[0]['block_id']  # 商圈id
        lat = flag[0]['lat']  # 经纬度
        lng = flag[0]['lng']  # 经纬度
    else:
        # 可能需要新增该小区，暂时不添加这部分逻辑
        print("%s 小区目前还不存在，待添加" % comm_name)
        return

    # house数据存入house表并获取对应id

    insert_sql = "INSERT INTO 08_house (`areaCode`, `rhval`, `area`, `comm_id`, `tags`, `house_orient_name`, `fitment_value`, `block_id`, `type`, `long_title`, `region_name`, `price`, `img`, `floor_tag`, `state`, `block_name`, `floor`, `towards`, `decoration`, `time`, `fid`, `address`, `description`, `room_photos`, `model_photos`, `has_video`, `video_src`, `features`, `lat`, `lng`, `update_time`, `elevator`, `current_floor`, `house_type`, `heating`, `crawler_type`) VALUES ("
    insert_sql += "\"%s\", \"%s\", \"%s\", \"%s\", " % (areaCode, rhval, area, comm_id)
    insert_sql += "\"%s\", \"%s\", \"%s\", \"%s\", \"%s\", \"%s\", " % (tags, house_orient_name, fitment_value, block_id, type_, long_title)
    insert_sql += "\"%s\", \"%s\", \"%s\", \"%s\", \"%s\", \"%s\", " % (region_name, price, img, floor_tag, '1', block_name)
    insert_sql += "\"%s\", \"%s\", \"%s\", \"%s\", \"%s\", " % (floor, towards, decoration, time_, house_ajk_id)
    insert_sql += "\"%s\", \"%s\", \"%s\", \"%s\", \"%s\", \"%s\", " % (address, description, room_photos, model_photos, has_video, video_src)
    insert_sql += "\"%s\", \"%s\", \"%s\", \"%s\", \"%s\", " % (features, lat, lng, update_time, elevator)
    insert_sql += "\"%s\", \"%s\", \"%s\", \"%s\");" % (current_floor, house_type, heating, crawler_type)

    house_id = insert_data(insert_sql)
    print(house_id)

    # ################
    # ################
    # house_info表

    try:
        # 固定值
        hid = house_id  # 房屋id
        features = features  # 特色
        elevator = elevator  # 是否配备电梯

        unit_price_pattern = r'avg_price:"(.*?)",'
        unit_price = re.findall(unit_price_pattern, con_json_data)[0].split(".")[0]  # 单价
        total_price = price  # 总价

        # house_type_pattern = r'use_type:(.*?),'
        # house_type_con = re.findall(house_type_pattern, con_json_data)[0]  # 建筑类型
        # house_type = house_type_handle(house_type_con)

        property_right_pattern = r'right_type:(.*?),'
        property_right_con = re.findall(property_right_pattern, con_json_data)[0]  # 交易权属
        property_right = property_right_handle(property_right_con)

        floorarea = area  # 套内面积

        # heating = ""  # 供暖方式（页面上无）

        house_intro = con_soup_.find_all('div', class_="houseIntro-content-p-text")
        selling_point = house_intro[0].text.replace("\n", "").replace("\"", "'").strip()  # 卖点
        owner_mentality = house_intro[1].text.replace("\n", "").replace("\"", "'").strip()  # 业主心态
        service_Introduction = house_intro[2].text.replace("\n", "").replace("\"", "'").strip()  # 服务介绍

        architectural_form = ""  # 建筑形式（有混淆，暂未完善）

        down_payments_pattern = r'<span class="houseInfo-main-item-name"[\s\S]*?>首付(\d+)万'
        down_payments = re.findall(down_payments_pattern, html_)[0]    # 首付

        housing_use_pattern = r'<span class="houseInfo-main-item-label"[\s\S]*?>房屋类型</span><span[\s\S]*?>(.*?)</span>'
        housing_use = ""  # 房屋用途
        if re.findall(housing_use_pattern, html_):
            housing_use_con = re.findall(housing_use_pattern, html_)[0]
            if "住宅" in housing_use_con:
                housing_use = 155
            elif "车位" in housing_use_con:
                housing_use = 156
            elif "商业" in housing_use_con:
                housing_use = 157
            elif "办公" in housing_use_con:
                housing_use = 158
            else:
                housing_use = 155

        housing_years_pattern = r'<span class="houseInfo-main-item-label"[\s\S]*?>房本年限</span><span[\s\S]*?>(.*?)</span>'
        housing_years = ""  # 满二满五
        if re.findall(housing_years_pattern, html_):
            housing_years_con = re.findall(housing_years_pattern, html_)[0]
            if "满二" in housing_years_con: housing_years = 166
            elif "满五" in housing_years_con: housing_years = 167
            elif "不满二" in housing_years_con: housing_years = 168
            else:
                housing_years = 166

        only_house_pattern = r'<span class="houseInfo-main-item-label"[\s\S]*?>唯一住房</span><span[\s\S]*?>(.*?)</span>'
        only_house = "0"       # 是否唯一住房
        if re.findall(only_house_pattern, html_):
            only_house_con = re.findall(only_house_pattern, html_)[0]
            if "是" in only_house_con: only_house = "1"

        # 20210226新增几个字段，由原来在comm表中移到这里，新加来源url字段
        # 小区建造年
        comm_build_date = ""
        build_date_pattern = r'house_age:"(.*?)",'
        if re.findall(build_date_pattern, html_):
            comm_build_date = re.findall(build_date_pattern, html_)[0]
        # 小区产权年限
        build_years = ""
        build_years_pattern = r'<span class="houseInfo-main-item-label"[\s\S]*?>产权年限</span><span[\s\S]*?>(.*?)</span>'
        if re.findall(build_years_pattern, html_):
            build_years = re.findall(build_years_pattern, html_)[0].replace("年产权", "")
        # 当前链接
        now_url = now_url

        # 数据存入house_info表
        insert_info_sql = "INSERT INTO 08_house_info (`unit_price`, `total_price`, `hid`, `features`, `property_right`, `floorarea`, `elevator`, `selling_point`, `housing_use`, `owner_mentality`, `comm_build_date`, `build_years`, `url`, `service_Introduction`, `architectural_form`, `down_payments`, `housing_years`, `only_house`) VALUES ("
        insert_info_sql += "\"%s\", \"%s\", \"%s\", \"%s\", \"%s\", " % (unit_price, total_price, hid, features, property_right)
        insert_info_sql += "\"%s\", \"%s\", \"%s\", \"%s\", \"%s\", " % (floorarea, elevator, selling_point, housing_use, owner_mentality)
        # 20210226新增几个字段，由原来在comm表中移到这里，新加来源url字段
        insert_info_sql += "\"%s\", \"%s\", \"%s\", " % (comm_build_date, build_years, now_url)
        insert_info_sql += "\"%s\", \"%s\", \"%s\", \"%s\", \"%s\");" % (service_Introduction, architectural_form, down_payments, housing_years, only_house)

        # print(insert_info_sql)

        house_info_id = insert_data(insert_info_sql)
        print(house_info_id)
    except Exception as error:
        print("获取house_info参数出错 %s" % error)
        print(con_soup_)
        traceback.print_exc()
        return

    # 各字段获取结束
    # ################
    # ################


# 获取列表页中每条数据的详情页内容
def get_html_list(city, page, param, crawler_type):

    url = "https://%s.anjuke.com/sale/p%s/?q=%s" % (city, page, quote(param))

    # print(url)
    ajk_html = get_html(url)
    # print(ajk_html)
    if not ajk_html:
        return False
    soup_ = BeautifulSoup(ajk_html, 'lxml')
    link_html = soup_.find_all('div', class_="property")
    # print(link_html)
    # print(len(link_html))

    for link_ in link_html[7:10]:
        con_url = link_.a.get('href')
        con_html = get_html(con_url)
        # print(con_html)

        # 20210302增加，从url中过去当前数据id，用来判断是否重复
        # print(con_url)
        house_ajk_id_con = link_.a.get('data-lego')
        # print(house_ajk_id_con)
        get_id_patt = r'"entity_id":"(.*?)",'
        house_ajk_id = re.findall(get_id_patt, house_ajk_id_con)[0]


        # 判断重复
        judge_sql = "SELECT id FROM 08_house WHERE fid = '%s' AND crawler_type = '%s' AND state = '1' AND type = '2';" % (house_ajk_id, crawler_type)
        flag = select_data(judge_sql)
        if len(flag) > 0:
            continue

        # 随机sleep，防止被封
        time.sleep(random.randrange(7, 10))
        yield con_html, con_url, house_ajk_id


# 主方法
def ajk_esf_main(city_short, district_param, city_code, data_source):

    # 传入参数为city_short城市缩写，city_code为该城市代码的前4位，district_param商圈或区或其他关键词，data_source目标网站id，默认页数为1页
    page_num = 1

    # 循环获取页面链接，并逐一获取内容，整理并入库
    con_list = get_html_list(city_short, page_num, district_param, data_source)

    if not con_list:
        return False

    for content_html, content_url, h_ajk_id in con_list:
        # print("*" * 200)
        con_handle(content_html, city_code, content_url, h_ajk_id, data_source)

    return True


if __name__ == '__main__':
    c = 'bj'
    c_code = '1101'
    str_ = "顺义"
    crawler = '1'   # 爬虫所爬的网站id（1代表安居客）

    flag = ajk_esf_main(c, str_, c_code, crawler)
