<!DOCTYPE html>
<html>
<head>
<meta charset="UTF-8">
<title>PBaiike</title>
</head>
<body>
<p>import pandas as pd</p>
<p>import requests</p>
<p>from bs4 import BeautifulSoup</p>
<p></p>
<p># 创建一个ExcelWriter对象</p>
<p>with pd.ExcelWriter('百度百科.xlsx') as writer:</p>
<p># 打开文本文件并读取URL</p>
<p>with open('urls.txt', 'r', encoding='utf-8') as file:</p>
<p>urls = file.read().splitlines()  # 读取文件中的每一行，并分割成列表</p>
<p></p>
<p># 遍历读取到的每个URL</p>
<p>for i, url in enumerate(urls):</p>
<p># 发送HTTP请求获取网页内容</p>
<p>response = requests.get(url)</p>
<p>response.raise_for_status()  # 检查请求是否成功</p>
<p></p>
<p># 使用BeautifulSoup解析网页内容</p>
<p>soup = BeautifulSoup(response.text, 'html.parser')</p>
<p></p>
<p># 查找所有的div class="itemWrapper_gAtpL"标签</p>
<p>item_wrappers = soup.find_all('div', class_='itemWrapper_gAtpL')</p>
<p></p>
<p># 初始化一个空的列表来存储数据</p>
<p>data = []</p>
<p></p>
<p># 遍历每个div class="itemWrapper_gAtpL"标签</p>
<p>for item_wrapper in item_wrappers:</p>
<p>dt_tag = item_wrapper.find('dt', class_='basicInfoItem_rnDHB')</p>
<p>span_tags = item_wrapper.find_all('span', class_='text_Ygy92')</p>
<p></p>
<p># 提取dt class="..."和span class="..."中的内容</p>
<p>dt_text = dt_tag.text.strip() if dt_tag else "N/A"</p>
<p>span_texts = [span.text.strip() for span in span_tags]</p>
<p></p>
<p># 将提取的内容添加到数据列表中</p>
<p>data.append({'dt': dt_text, 'spans': span_texts})</p>
<p></p>
<p># 将数据转换为DataFrame</p>
<p>df = pd.DataFrame(data)</p>
<p></p>
<p># 将DataFrame保存到指定的Excel sheet中</p>
<p>sheet_name = f'Sheet{i+1}'</p>
<p>df.to_excel(writer, sheet_name=sheet_name, index=False)</p>
<p></p>
<p># 打印DataFrame</p>
<p>print(df)</p>
<p>print("-" * 50)</p>
<p></p>
<p>print(f'数据已保存到"百度百科.xlsx"中。')</p>
</body>
</html>