# 练习一
import requests
from bs4 import BeautifulSoup

# 访问第一个网址
url1 = "https://bj.fang.com/quanwangso/search.html?city=bj&refer=sy_seach"
response1 = requests.get(url1)
print(f"Status Code for {url1}: {response1.status_code}")
print(f"Headers for {url1}: {response1.headers}")
print(f"Body for {url1} (first 1000 characters): {response1.text[:1000]}")

# 解析第一个网址的HTML内容
soup1 = BeautifulSoup(response1.text, 'html.parser')

# 查找“首页”和“新房”的链接
home_link1 = soup1.find('a', text='首页')
new_house_link1 = soup1.find('div', class_='s4Box').find('a')

if home_link1:
    print(f"首页链接: {home_link1['href']}")
else:
    print("未找到首页链接")

if new_house_link1:
    print(f"新房链接: {new_house_link1['href']}")
else:
    print("未找到新房链接")



# 练习二
from bs4 import BeautifulSoup
import requests
import csv
from tkinter import filedialog, Tk

# 获取网页数据
url = 'https://bj.fang.com/quanwangso/search.html?city=bj&amp;refer=sy_seach'

response = requests.get(url)
response = response.text

soup = BeautifulSoup(response, 'html.parser')

# 找到所有 class 为 's4Box' 的 div
info_compound = soup.find_all('div', class_='s4Box')

# 提取数据到一个列表
data = []
for info in info_compound:
    link = info.find('a')
    if link:
        text = link.text.strip()
        href = link.get('href', '无链接')
        data.append((text, href))

# 使用 filedialog 保存文件
root = Tk()
root.withdraw()  # 隐藏主窗口
file_path = filedialog.asksaveasfilename(
    defaultextension=".csv",
    filetypes=[("CSV files", "*.csv"), ("All files", "*.*")],
    title="保存 CSV 文件"
)

if file_path:
    # 写入 CSV 文件
    with open(file_path, mode='w', encoding='utf-8-sig', newline='') as file:
        writer = csv.writer(file)
        writer.writerow(["文字", "链接"])  # 写入表头
        writer.writerows(data)  # 写入数据

    print(f"数据已成功保存到 {file_path}")
else:
    print("未选择保存路径，文件未保存。")
