# https://cs.lianjia.com/ershoufang/
# https://cs.lianjia.com/ershoufang/pg2/  2
# https://cs.lianjia.com/ershoufang/pg3/  3
import requests

# 5 凑齐全5跳url

l1 = ['https://cs.lianjia.com/ershoufang/']
l2 = [f'https://cs.lianjia.com/ershoufang/pg{i}/' for i in range(2, 6)]
l3 = l1 + l2
print(l3)

for index, url in enumerate(l3):  # 基础的课程9 公共操作 enumerate有讲到

    head_data = {
        # 字典，需要有一个键值对
        'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/105.0.0.0 Safari/537.36'
    }
    html = requests.get(url, headers=head_data)
    print(html.text)

    f = open(f'数据{index + 1}.txt', 'w+', encoding='utf-8')  # 把数据保存到文件中
    f.write(html.text)
    f.close()

    html.close()

# 获取链家网的前5页数据，并且保存到txt或者html文件
# https://cs.lianjia.com/ershoufang/
