"""
@file:lianjie_spider.py
@time:2023/09/18 20:59:35
@author:Jiajia Zhan

"""
import json
import time
# 导入发送请求的库
import requests
#  导入解析网页的库
from bs4 import BeautifulSoup


# 链家二手房成交信息爬取

class LianjiaSpider():

    # 初始化方法
    def __init__(self):
        # 初始化请求 url
        self.url = 'https://xm.lianjia.com/chengjiao/pg{0}/'
        # 初始化请求头，为了反爬机制，伪装成浏览器
        self.headers = {

            'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.7',
            'Accept-Encoding': 'gzip, deflate, br',
            'Accept-Language': 'zh-CN,zh;q=0.9',
            'Cache-Control': 'max-age=0',
            'Connection': 'keep-alive',
            'Cookie': 'SECKEY_ABVK=rcUmighuGBqBQ8JJC0pqGFFbgBS0hRqoLO4EM11uc8w%3D; BMAP_SECKEY=-ipRUsCuBJnxJWLz5h1DbKqSFpH1iIyCHbnVgqyAy5EKwEFbQeCiCLKfVBjUuHUcQ8uDCuQbYdzQAyoLniwZQghbM_02tBcOBFFm7MnE-pzY19RgkdBwXojrBZqtBz-CsBiGTcnf0ktYL4tvu4wOgzfeG7cxtd_8iaSBVtNWBbCSqZsyNaglD0kVmFKnCbpR; lianjia_uuid=a80696d8-2e32-45af-94e1-ca4089e29dd0; crosSdkDT2019DeviceId=ngubv2--sua0yd-s9bstlhv8iswhzr-73es5pof6; _smt_uid=65045ba4.2dadd004; _jzqc=1; _qzjc=1; _ga=GA1.2.118083682.1694784423; Hm_lvt_9152f8221cb6243a53c83b956842be8a=1694784426; _jzqy=1.1694784421.1694787229.1.jzqsr=baidu.-; lfrc_=c0e336c3-300d-4a51-8436-bc729a4a1e71; sensorsdata2015jssdkcross=%7B%22distinct_id%22%3A%2218a9905fd60371-0700492a4b7ad1-26031f51-2073600-18a9905fd61c8e%22%2C%22%24device_id%22%3A%2218a9905fd60371-0700492a4b7ad1-26031f51-2073600-18a9905fd61c8e%22%2C%22props%22%3A%7B%22%24latest_traffic_source_type%22%3A%22%E7%9B%B4%E6%8E%A5%E6%B5%81%E9%87%8F%22%2C%22%24latest_referrer%22%3A%22%22%2C%22%24latest_referrer_host%22%3A%22%22%2C%22%24latest_search_keyword%22%3A%22%E6%9C%AA%E5%8F%96%E5%88%B0%E5%80%BC_%E7%9B%B4%E6%8E%A5%E6%89%93%E5%BC%80%22%7D%7D; Hm_lvt_efa595b768cc9dc7d7f9823368e795f1=1694842595; Hm_lpvt_efa595b768cc9dc7d7f9823368e795f1=1694842595; login_ucid=2000000347648218; lianjia_token=2.0015df2d3b788cf00d0472040a4a8d75e1; lianjia_token_secure=2.0015df2d3b788cf00d0472040a4a8d75e1; security_ticket=YJTW2FGvHaV2IsOvBXKnqYfJSLrjhFaKw1KBKzQDksnxx4S6+e4iZJ0209mhZdSbIle2H4DNbLA296aneCqHLq5+x0fvsYWc9YyTxKvYS8LVFOv1rm7WjmFSm2PCaiyv5yU7TGsTCrBTboBJYrHvba1kgdH1rnwnpKv3cEhhZF4=; ftkrc_=7d1cbb8a-9a23-42e1-86bc-84ff24104cbe; _ga_QJN1VP0CMS=GS1.2.1694842712.1.1.1694842732.0.0.0; _ga_KJTRWRHDL1=GS1.2.1694842712.1.1.1694842732.0.0.0; _ga_WLZSQZX7DE=GS1.2.1694842707.1.1.1694842736.0.0.0; _ga_TJZVFLS7KV=GS1.2.1694842707.1.1.1694842736.0.0.0; select_city=350200; lianjia_ssid=7d120c73-4386-4e19-bb55-76b4072ef013; _jzqa=1.4084513941691156500.1694784421.1694842272.1695042468.4; _jzqckmp=1; _gid=GA1.2.1768623119.1695042470; Hm_lpvt_9152f8221cb6243a53c83b956842be8a=1695042943; _qzja=1.1724760908.1694784421136.1694842271702.1695042467877.1695042561382.1695042943062.0.0.0.58.4; _qzjb=1.1695042467877.3.0.0.0; _qzjto=3.1.0; _jzqb=1.3.10.1695042468.1; srcid=eyJ0Ijoie1wiZGF0YVwiOlwiYmNkY2U1N2YwYzdiMzM1MmMyOTlkODFlZDIwODIyYzgwMWIxYjI2YTU0ODM3NGIxZjY5OGQ5YzhkOWY3ZjQxNTdmZjYxMDlkODc1OWZmZDExMTBlYzg1MjM3ZmMxYTg5ZmVjY2U4ZjE2NjMwZTQ0ZWNmMjc4MzFiODQ1MzllNDI3OTk4YzU1MmExODQwMzU0ZmFkYTRiZTIzYmNhNzVlZDY3MjZmYzNiMWVjYjUxZDViNjQ2ZWQ4OGZhYTRmMjE1Y2Y0MzlkNjNjNWE5Y2E1ZmQ2ODUxZjYzOGYxMjE5Yjg0NWY4ZTEyODFiNWYxNTA2MWFhZjNmYjQyY2RlMmFlOVwiLFwia2V5X2lkXCI6XCIxXCIsXCJzaWduXCI6XCI5NjZiNjEzNVwifSIsInIiOiJodHRwczovL3htLmxpYW5qaWEuY29tL2NoZW5namlhby8iLCJvcyI6IndlYiIsInYiOiIwLjEifQ==; _ga_BKB2RJCQXZ=GS1.2.1695042469.4.1.1695042945.0.0.0',
            'Host': 'xm.lianjia.com',
            'Sec-Ch-Ua': '"Chromium";v="116", "Not)A;Brand";v="24", "Google Chrome";v="116"',
            'Sec-Ch-Ua-Mobile': '?0',
            'Sec-Ch-Ua-Platform': '"Windows"',
            'Sec-Fetch-Dest': 'document',
            'Sec-Fetch-Mode': 'navigate',
            'Sec-Fetch-Site': 'none',
            'Sec-Fetch-User': '?1',
            'Upgrade-Insecure-Requests': '1',
            'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/116.0.0.0 Safari/537.36'

        }
        # self.page = 1

    # 发送请求的方法
    def send_request(self, url):
        # 请求方式为 get，所以使用 requests 中的 get 方法
        response = requests.get(url, headers=self.headers, allow_redirects=False)
        # 判断请求是否成功,将 response 对象返回到调用它的地方
        # 成功返回 200 状态码，失败返回 None
        if response.status_code == 200:
            return response
        else:
            return None

    # 解析 html
    def parse_html(self, response):
        # 空列表
        li_list = []
        # 获取响应的 html
        html = response.text
        #  第一个参数是要解析的内容，第二个参数是解析器
        bs = BeautifulSoup(html, 'html.parser')
        # 查找名称为 listContent 的 url
        ul = bs.find('ul', class_='listContent')
        # 获取 ul 标签下的所有 li 标签
        li_list = ul.find_all('li')
        # 输出 ul 标签下的所有 li 标签的长度和其中的内容
        # print(len(li_list))
        # print(li_list)

        # 遍历 li 标签
        for item in li_list:
            # 获取 li 标签下的 div 标签，获取所有标题
            title = item.find('div', class_='title').text
            # 获取 li 标签下的 div 标签，获取所有房屋描述
            house_info = item.find('div', class_='houseInfo').text
            # 获取 li 标签下的 div 标签，获取所有房屋的成交日期
            deal_date = item.find('div', class_='dealDate').text
            # 获取 li 标签下的 div 标签，获取所有房屋的总价
            total_price = item.find('div', class_='totalPrice').text
            # 获取 li 标签下的 div 标签，获取所有房屋的楼层信息
            position_info = item.find('div', class_='positionInfo').text
            # 获取 li 标签下的 div 标签，获取所有房屋的单价
            unit_price = item.find('div', class_='unitPrice').text
            # 获取到 span 标签，有两个 span 标签
            # 获取到 span 标签，获取所有房屋的挂牌价和成交周期
            deal_cycle_txt = item.find('span', class_='dealCycleTxt').text
            # span_list = span.findall('span')

            agent_name = item.find('a', class_='agent_name').text
            # 测试结果
            # print(title, house_info, deal_date, total_price, position_info, unit_price, deal_cycle_txt,agent_name)

            yield {

                '小区名': title,
                '小区描述': house_info,
                '成交日期': deal_date,
                '总价': total_price,
                '楼层信息': position_info,
                '单价': unit_price,
                '挂牌价和成交周期': deal_cycle_txt,
                '代理人': agent_name

            }

    # 写入数据
    def write_to_file(self, li_list):
        with open("./lianjia.txt", "a", encoding="utf-8") as f:
            f.write(json.dumps(li_list, ensure_ascii=False) + "\n")

    # 启动爬虫程序

    def run(self):
        # 测试爬虫框架是否搭建成功
        # print("hello world")
        for i in range(1, 101):
            print("正在爬取第", i, "页")
            # 间隔两秒后爬取
            time.sleep(2)
            full_url = self.url.format(i)
            # 测试爬取的 url 链接是否无误
            # print(full_url)

            # 发送请求，获取响应的状态码和内容
            response = self.send_request(full_url)
            # print(response)
            # print(response.text)

            if response:
                # 将响应结果传入，调用解析方法提取有用数据
                self.parse_html(response)

                for item in self.parse_html(response):
                    self.write_to_file(item)


if __name__ == '__main__':
    # 创建类对象，调用 run 方法
    lianjia = LianjiaSpider()

    lianjia.run()
    print("爬取结束")
