"""
获取苹果社区帖子文章地址
https://discussionschinese.apple.com/browse
格式如下
    帖子链接
    帖子标题
    帖子发布人
"""
import pandas as pd
import requests
from bs4 import BeautifulSoup
from fake_useragent import UserAgent

url = "https://discussionschinese.apple.com/browse"
headers = {"User-Agent": UserAgent().random}
res = requests.get(url=url, headers=headers).content

with open('apple_community.html', 'wb') as f:
    f.write(res)

with open('apple_community.html', 'r', encoding='utf-8') as f:
    html = f.read()
soup = BeautifulSoup(html, 'html.parser')

# 找到所有包含数据的<tr>元素
data_rows = soup.find_all('tr', class_='topics-table-row')
# 创建一个空的DataFrame来存储数据
df = pd.DataFrame(columns=['URL路径', '帖子标题', '作者名字'])
for row in data_rows:
    # 提取URL路径
    url_path = "https://discussionschinese.apple.com" + row.find('a', class_='topic-title-link')['href']
    # print("URL路径:", "https://discussionschinese.apple.com" + url_path)
    # print(url_path)

    # 提取标题
    title = row.find('a', class_='topic-title-link').text
    # print("标题:", title)

    # 提取作者名字
    author = row.find('a', class_='post-author-profile').text.strip()
    # print("作者名字:", author)
    # 添加注释
    df = pd.concat([df, pd.DataFrame({'URL路径': [url_path], '帖子标题': [title], '作者名字': [author]})],
                   ignore_index=True)
df.to_excel('data.xlsx', index=False)
