
import requests
from bs4 import BeautifulSoup
import pandas as pd

# 读取txt文件中的网址
with open('urls.txt', 'r') as f:
    urls = [url.strip() for url in f.readlines()]

# 初始化一个空列表来存放DataFrame对象
dfs_list = []

for idx, url in enumerate(urls):
    # 打印网址
    print(f"Processing URL: {url}")

    # 发送HTTP请求
    try:
        response = requests.get(url)
        response.raise_for_status()  # 如果状态不是200，抛出异常
    except requests.RequestException as e:
        print(f"Error fetching URL: {url} - {e}")
        continue

    # 使用BeautifulSoup解析HTML
    soup = BeautifulSoup(response.text, 'html.parser')

    # 获取页面标题和描述信息
    title = soup.find('title').text if soup.title else 'N/A'
    desc_tag = soup.find('meta', attrs={'name': 'description'})
    desc = desc_tag['content'] if desc_tag else 'N/A'
    # content_div = soup.find('div', id='main-content')
    # page_content = content_div.get_text().strip() if content_div else 'N/A'
    #
    # paragraph = soup.find('p')
    # # 提取文本内容
    # text_content = paragraph.get_text()

    meta_keywords = soup.find('meta', attrs={'name': 'keywords'})

    # 如果存在keywords元标签，则提取内容
    if meta_keywords and 'content' in meta_keywords.attrs:
        keywords = meta_keywords['content']
    else:
        keywords = 'N/A'  # 若不存在keywords，则标记为N/A



    # 将数据添加到DataFrame
    data = {'Site': [url], 'Title': [title], 'Description': [desc], 'Keywords': [keywords]}
    # data = {'Site': [url], 'Title': [title], 'Description': [desc], 'Page_Content': [page_content], 'Content': [text_content]}

    df = pd.DataFrame(data)

    # 将每个DataFrame加入到列表中
    dfs_list.append(df)

# 创建一个ExcelWriter对象
with pd.ExcelWriter('output.xlsx', engine='openpyxl') as writer:
    # 遍历DataFrame列表并将每个DataFrame写入不同的sheet
    for idx, df in enumerate(dfs_list):
        df.to_excel(writer, sheet_name=f'Sheet_{idx+1}', index=False)

print("Data has been successfully saved to output.xlsx file.")