"""
    地址
        北京 ·1 https://bj.lianjia.com/zufang/ab200301001000pg1rt200600000001/#contentList
        北京 ·2 https://bj.lianjia.com/zufang/ab200301001000pg2rt200600000001/#contentList
        北京 ·3 https://bj.lianjia.com/zufang/ab200301001000pg3rt200600000001/#contentList

        上海 ·1 https://sh.lianjia.com/zufang/ab200301001000pg1rt200600000001/#contentList
        上海 ·2 https://sh.lianjia.com/zufang/ab200301001000pg2rt200600000001/#contentList
        上海 ·3 https://sh.lianjia.com/zufang/ab200301001000pg3rt200600000001/#contentList
    分析：
        北京对应bj
        上海对应sh
        广州对应gz
        每一页都在变化的是pgindex
"""
import time                                                             # 时间模块
import pandas                                                           # 数据处理模块
import asyncio                                                          # 异步模块
import aiohttp                                                          # 异步请求模块
import requests                                                         # 请求模块
from fake_useragent import UserAgent                                    # 请求头模块
from lxml import etree                                                  # 解析模块

class HomeSpider():
    # 初始化
    def __init__(self):
        self.data = []                                                  # 数据列表
        self.headers = {'User-Agent': UserAgent().random}               # 随机请求头

    # 异步请求
    async def request(self, url):
        async with aiohttp.ClientSession() as session:                  # 创建异步网络请求对象
            try:
                async with session.get(url, headers=self.headers, timeout=3) as response:
                    if response.status == 200:
                        result = await response.text()
                        return result
            except Exception as e:
                print(e.args)

    # 获取北上广对应的字母
    def get_city_letter(self, city_name):
        city_dict = {'北京': 'bj', '上海': 'sh', '广州': 'gz'}
        return city_dict.get(city_name)

    # 获取信息总页码数
    def get_page_all(self, city):
        city_letter = self.get_city_letter(city)                         # 获取对应字母
        url = f'https://{city_letter}.lianjia.com/zufang/rt200600000001/'
        response = requests.get(url=url, headers=self.headers)
        if response.status_code == 200:
            response = etree.HTML(response.text)
            # 获取总页码
            page_all = response.xpath('//div[@id="content"]/div[1]/div[2]/@data-totalpage')[0]
            return int(page_all) + 1
        else:
            print('获取页码未成功')

    # 解析函数
    async def get_data_all(self, page_all, city):
        for i in range(1, page_all):
            print(f'正在获取第{i}页数据')
            city_letter = self.get_city_letter(city)                    # 获取对应字母
            url = f'https://{city_letter}.lianjia.com/zufang/ab200301001000pg{i}rt200600000001/#contentList'
            time.sleep(3)
            html_text = await self.request(url)                         # 发送异步请求
            html = etree.HTML(html_text)
            # 标题
            title_all = html.xpath('//div[@id="content"]/div[1]/div[1]/div/div/p[1]/a/text()')
            # 位置
            big_region = html.xpath('//div[@id="content"]/div[1]/div[1]/div/div/p[2]/a[1]/text()')
            small_region = html.xpath('//div[@id="content"]/div[1]/div[1]/div/div/p[2]/a[2]/text()')
            # 面积
            square_all = html.xpath('//div[@id="content"]/div[1]/div[1]/div/div/p[2]/text()[5]')
            # 楼层数
            floor_all = html.xpath('//div[@id="content"]/div[1]/div[1]/div/div/p[2]/span/text()[2]')
            # 价格
            price_all = html.xpath('//div[@id="content"]/div[1]/div[1]/div/div/span/em/text()')

            title_list = self.remove_spaces(title_all)
            address_list = self.combined_region(big_region, small_region)
            square_list = self.remove_spaces(square_all)
            floor_list = self.remove_spaces(floor_all)
            price_list = self.remove_spaces(price_all)

            data_page = {
                '标题': title_list,
                '面积': square_list,
                '楼层数': floor_list,
                '价格  元/月': price_list,
                '位置': address_list
            }

            # 写入数据
            df = pandas.DataFrame(data_page)
            df.to_csv('{}租房信息.csv'.format(city), mode='a', encoding='utf-8-sig',index=False)

    # 删除空格和换行
    def remove_spaces(self, info):
        info_list = []
        for i in info:
            x = i.replace(' ', '').replace('\n', '')
            info_list.append(x)
        return info_list

    # 合并地址
    def combined_region(self, big_region, small_region):
        address_list = []
        for a, b in zip(big_region, small_region):
            address_list.append(a + '-' + b)
        return address_list

    def start(self, page_all, city):
        asyncio.run(self.get_data_all(page_all=page_all, city=city))

if __name__ == '__main__':
    input_city = input("请输入城市名称('北京', '上海', '广州'):")
    home_spider = HomeSpider()                                          # 创建爬虫对象
    page = home_spider.get_page_all(city=input_city)                # 获取总页数
    home_spider.start(page_all=page, city=input_city)               # 启动爬虫
