<html>
<head>
<title>火影忍者.py</title>
<meta http-equiv="Content-Type" content="text/html; charset=utf-8">
<style type="text/css">
.s0 { color: #000080; font-weight: bold;}
.s1 { color: #000000;}
.s2 { color: #008000; font-weight: bold;}
.s3 { color: #808080; font-style: italic;}
.s4 { color: #0000ff;}
</style>
</head>
<body bgcolor="#ffffff">
<table CELLSPACING=0 CELLPADDING=5 COLS=1 WIDTH="100%" BGCOLOR="#c0c0c0" >
<tr><td><center>
<font face="Arial, Helvetica" color="#000000">
火影忍者.py</font>
</center></td></tr></table>
<pre><span class="s0">import </span>pandas <span class="s0">as </span>pd
<span class="s0">import </span>requests
<span class="s0">from </span>bs4 <span class="s0">import </span>BeautifulSoup
<span class="s0">with </span>open(<span class="s2">'D:\python大作业（明昂）\爬取的网址.txt'</span>,<span class="s2">'r'</span>) <span class="s0">as </span>f:<span class="s3">#打开文件读取网址</span>
    url=f.readline()
print(url)
headers={
    <span class="s2">'User-Agent'</span>:<span class="s2">'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/123.0.0.0 Safari/537.36 Edg/123.0.0.0'</span>
}<span class="s3">#获取请求头</span>
response=requests.get(url,headers=headers)<span class="s3">#获取响应</span>
text=response.text<span class="s3">#获取网页的HTML文档</span>
soup=BeautifulSoup(text,<span class="s2">'lxml'</span>)<span class="s3">#导入lxml解析库，解析text</span>
result_list=soup.find_all(<span class="s2">'td'</span>,attrs={<span class="s2">'class'</span>:<span class="s2">'tableTd__dDEM'</span>,<span class="s2">'width'</span>:<span class="s2">'113'</span>})<span class="s3">#通过class和widith限定爬取范围</span>
df=pd.DataFrame(columns=[<span class="s2">'name'</span>])<span class="s3">#建立空DataFrame，列标题设置为name</span>
<span class="s0">for </span>result <span class="s0">in </span>result_list:
    print(result.text)
    df=df._append({<span class="s2">'name'</span>:result.text},ignore_index=<span class="s0">True</span>)<span class="s3">#将爬取的内容通过字典一行行写入df，ignore_index重新索引，避免混乱</span>
print(<span class="s2">'-'</span>*<span class="s4">100</span>)
result_list2=soup.find_all(<span class="s2">'td'</span>,attrs={<span class="s2">'class'</span>:<span class="s2">'tableTd__dDEM'</span>,<span class="s2">'width'</span>:<span class="s2">'230'</span>})<span class="s3">#以下爬取另一部分内容同理</span>
df2=pd.DataFrame(columns=[<span class="s2">'title'</span>])
<span class="s0">for </span>result2 <span class="s0">in </span>result_list2:
    print(result2.text)
    df2=df2._append({<span class="s2">'title'</span>:result2.text},ignore_index=<span class="s0">True</span>)
<span class="s0">with </span>pd.ExcelWriter(<span class="s2">'火影忍者.xlsx'</span>) <span class="s0">as </span>writer:<span class="s3">#通过pd.ExcelWriter，创建excel文档，并通过writer写入两个爬取的内容到excel文档的两个不同工作表里面</span>
    df.to_excel(writer, index=<span class="s0">False</span>, sheet_name=<span class="s2">'角色名'</span>)
    df2.to_excel(writer, index=<span class="s0">False</span>, sheet_name=<span class="s2">'章节名'</span>)
</pre>
</body>
</html>