# -*- coding:utf-8 -*-
# @Author    : g1879
# @date      : 2020/12/9
# @email     : g1879@qq.com
# @File      : 爬链家2.py
"""爬取一个城市所有区二手房信息"""
from DrissionPage import MixPage
from ListPage import ListPage, Paths, Targets
from DataRecorder import Recorder

# 获取一个城市所有区的链接
城市url = 'https://sz.lianjia.com/ershoufang/'
page = MixPage('s')
page.get(城市url)
区域 = page.eles('xpath://div[@data-role="ershoufang"]//a')
区urls = tuple((x.text, f'{x.link}pg1/') for x in 区域)

# 定义列表页面结构
css_paths = Paths('css')
css_paths.rows = 'ul.sellListContent>li'
css_paths.set_col('标题', 'div.title>a')
css_paths.set_col('信息', 'div.houseInfo')

# 定义爬取目标
targets = Targets(css_paths)
targets.add_target('标题', '标题')
targets.add_target('链接', '标题', 'href')
targets.add_target('信息', '信息')

# 定义记录器
recorder = Recorder('深圳4.csv', 200)

# 创建列表页
page = ListPage(css_paths)

# 循环爬取所有区数据
for 区url in 区urls[1:2]:
    print(f'\n{区url[0]}')
    page.get(区url[1])
    page.num_param = '/pg'
    data = page('.page-box house-lst-page-box').attr('page-data')
    page.pages_count = eval(data)['totalPage']  # 设置总页数

    recorder.set_before({'区': 区url[0]})
    page.get_list(targets, count=1, recorder=recorder, return_data=False)
    break
