import requests
from bs4 import BeautifulSoup
import pandas as pd

# 定义两个网站的URL
url1 = 'https://gl.ali213.net/html/2023-11/1224639.html'  # 第一个网站的URL
url2 = 'https://www.shanghairanking.cn/rankings/bcur/2023'  # 第二个网站的URL


# 定义函数来爬取一个网站的<a>标签，并返回链接和文本列表
def crawl_links(url):
    try:
        # 发起请求获取网页内容
        response = requests.get(url)
        response.raise_for_status()  # 如果请求失败则抛出异常
        response.encoding = 'utf-8'  # 设置正确的编码

        # 使用BeautifulSoup解析网页内容
        soup = BeautifulSoup(response.text, 'html.parser')
        a_tags = soup.find_all('a')

        # 提取链接和文本内容
        links_and_texts = [(a_tag.get('href'), a_tag.get_text(strip=True)) for a_tag in a_tags]
        return links_and_texts
    except requests.RequestException as e:
        print(f"请求{url}时出错: {e}")
        return []

    # 分别爬取两个网站的<a>标签


links_and_texts_1 = crawl_links(url1)
links_and_texts_2 = crawl_links(url2)

# 创建DataFrame来存储链接和文本信息
df1 = pd.DataFrame(links_and_texts_1, columns=['链接', '文本'])
df2 = pd.DataFrame(links_and_texts_2, columns=['链接', '文本'])

# 创建一个ExcelWriter对象，它将用于写入多个sheet
with pd.ExcelWriter('links.xlsx', engine='openpyxl') as writer:
    df1.to_excel(writer, sheet_name='Sheet1', index=False)
    df2.to_excel(writer, sheet_name='Sheet2', index=False)

print("链接信息已保存到Excel文件的两个sheet中。")