'''
Author: xubing
Date: 2024-01-16 21:56:50
LastEditors: xubing
LastEditTime: 2024-01-23 10:35:01
Description: 爬取所关注的二手房信息
'''

import random
import sys
import time

import pandas as pd
import requests
from bs4 import BeautifulSoup
from tqdm import tqdm

from utils import current_date, current_time, logger, send_msg_to_feishu,write_to_mysql


def crawler(url):
    # 爬虫主程序
    # 定义请求头，模拟浏览器访问
    headers = {
        "User-Agent":
        "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/96.0.4664.45 Safari/537.36"
    }
    # 定义一个空列表，用于存储爬取的数据
    data = []
    page = 1  # 控制翻页
    flag = True  # 是否继续爬取
    count = 0
    # 开始循环爬取
    while flag:
        # 随机休眠1-5秒, 防止检测为机器人无法继续爬取

        time.sleep(random.randint(1, 5))
        # 打印当前页码
        logger.info(f"正在爬取第{page}页...")
        # 拼接完整的网址
        full_url = url + f"pg{page}/"
        # 发送请求，获取响应
        response = requests.get(full_url, headers=headers)
        # 判断响应状态码是否为200，即请求成功
        if response.status_code == 200:
            # 解析响应内容，得到一个BeautifulSoup对象
            soup = BeautifulSoup(response.text, "html.parser")
            # # 找到所有的楼盘信息的div标签，返回一个列表
            divs = soup.find_all("div", class_="info clear")
            # 判断列表是否为空，即是否有楼盘信息
            if divs:
                count += len(divs)
                # 遍历列表，提取每个楼盘的信息
                for div in tqdm(divs):
                    item = {}
                    try:
                        house_code = div.find(
                            "div", class_="title").find("a")['data-housecode']
                    except:
                        house_code = div.find("div", class_="title").find(
                            "a")['data-lj_action_housedel_id']
                    title = div.find("div", class_="title").text.strip()
                    # 提取位置，并去除空白字符
                    position = div.find("div",
                                        class_="positionInfo").text.strip()
                    # 提取房屋信息，并去除空白字符
                    house_info = div.find("div",
                                          class_="houseInfo").text.strip()
                    # 提取关注信息，并去除空白字符
                    follow_info = div.find("div",
                                           class_="followInfo").text.strip()
                    # 提取均价，并去除空白字符
                    unit_price = div.find("div",
                                          class_="unitPrice").text.strip()
                    # 提取总价，并去除空白字符
                    total_price = div.find("div",
                                           class_="totalPrice").text.strip()
                    # 提取标签，并去除空白字符
                    tag = div.find("div", class_="tag").text.strip()
                    # 将提取的信息存入字典
                    item["房屋代码"] = house_code
                    item["房屋标题"] = title
                    # item["位置"] = position
                    item["小区名称"] = position.split('-')[0].strip()
                    item["所在区域"] = position.split('-')[1].strip()
                    # item["房屋信息"] = house_info
                    item["房间数"] = house_info.split('|')[0].strip()
                    item["房屋面积"] = house_info.split('|')[1].strip()
                    item["朝向"] = house_info.split('|')[2].strip()
                    item["装修"] = house_info.split('|')[3].strip()
                    item["层数"] = house_info.split('|')[4].strip()
                    item["楼房类型"] = house_info.split('|')[5].strip()
                    item["关注信息"] = follow_info
                    item["发布时间"] = follow_info.split('/')[1].strip()
                    item["均价"] = unit_price
                    item["总价"] = total_price
                    item["标签"] = tag
                    # 将字典追加到列表中
                    data.append(item)
                # 翻页
                page += 1
                # logger.info("当前爬取了%d / %d  个楼盘的信息" % (count, resblock_value))
                logger.info("当前爬取了%d个房子的信息" % (count))
            else:
                # 列表为空，说明没有二手房信息
                flag = False
        else:
            # 响应状态码不为200，说明请求失败，结束循环
            flag = False

    # 打印爬取的数据条数
    logger.info(f"共爬取了{len(data)}条数据")
    return data, count

def run_ershoufang():
    url_map = {
        "杭州":
        "https://hz.lianjia.com/ershoufang/ty1ie2y1f5dp1sf1l2l3l4a4bp100ep400/",
        "武汉":
        "https://wh.lianjia.com/ershoufang/ty1ie2y1f5dp1sf1l2l3l4a4bp100ep400/",
        "苏州":
        "https://su.lianjia.com/ershoufang/ty1ie2y1f5dp1sf1l2l3l4a4bp100ep400/",
    }
    citys = ["杭州", "武汉", "苏州"]
    summary_info = {}
    for city in citys:
        logger.info(f"======开始爬取【{city}】符合条件的二手房信息======")
        url = url_map.get(city)
        try:
            data, count = crawler(url)
            msg = "爬取成功！"
        except:
            msg = city + '爬虫程序出错，请检查'
            logger.error(msg)
            send_msg_to_feishu(msg)
            sys.exit(1)
        summary_info[city] = count
        df = pd.DataFrame(data)
        df.insert(0, '爬取时间', current_time)
        df.insert(1, '城市', city)
        df.insert(2, "符合条件的二手房数", count)
        # 本地备份
        df.to_excel("data/%s-%s-lianjia.xlsx" % (current_date, city), index=False)
        # 打印保存成功的提示
        logger.info("数据已保存为Excel文件成功!")
        logger.info(msg)
        try:
            write_to_mysql("ershoufang","append",df)
        except:
            logger.info(msg)
            

    msg = f"======【{current_date}】======\n爬虫任务完成！统计信息如下:\n"
    msg += str(summary_info)

    logger.info(msg)
    logger.info('==' * 10)
    # 通知
    send_msg_to_feishu(msg)
