<html>
<head>
<title>爬取网页数据.py</title>
<meta http-equiv="Content-Type" content="text/html; charset=utf-8">
<style type="text/css">
.s0 { color: #000080; font-weight: bold;}
.s1 { color: #000000;}
.s2 { color: #808080; font-style: italic;}
.s3 { color: #008000; font-weight: bold;}
.s4 { color: #0000ff;}
</style>
</head>
<body bgcolor="#ffffff">
<table CELLSPACING=0 CELLPADDING=5 COLS=1 WIDTH="100%" BGCOLOR="#c0c0c0" >
<tr><td><center>
<font face="Arial, Helvetica" color="#000000">
爬取网页数据.py</font>
</center></td></tr></table>
<pre><span class="s0">import </span>pandas <span class="s0">as </span>pd
<span class="s0">import </span>requests
<span class="s0">import </span>json
<span class="s0">import </span>urllib3
<span class="s2"># 从文本文档中读取需要的网址</span>
<span class="s0">with </span>open(<span class="s3">&quot;D:\python大作业（个人）\爬取的网址.txt&quot;</span>,<span class="s3">'r'</span>) <span class="s0">as </span>f:
    url1=f.readline()
    url2=f.readline()
    url3=f.readline()
<span class="s2"># 打印出来两个网址</span>
print(url1)
print(url2)
print(url3)
urllib3.disable_warnings()<span class="s2">#禁止 urllib3 发出SSL警告</span>
session=requests.session()<span class="s2">#创建了一个新的会话对象，并将其分配给 session 变量</span>
<span class="s2"># 获取请求头</span>
headers={
    <span class="s3">&quot;Content-Type&quot;</span>: <span class="s3">'application/json'</span>,
    <span class="s3">&quot;User-Agent&quot;</span>: <span class="s3">&quot;Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/123.0.0.0 Safari/537.36 Edg/123.0.0.0&quot;</span><span class="s2">#f12工作台查看</span>
}
session.headers=headers
<span class="s2"># 获取cookies</span>
data = {
    <span class="s3">'username'</span>: <span class="s3">'13176385619'</span>,
    <span class="s3">'password'</span>: <span class="s3">'longzihan123'</span>
}
response=session.post(url1,data=json.dumps(data),headers=headers,verify=<span class="s0">False</span>)
<span class="s0">if </span>response.status_code == <span class="s4">200</span>:
    print(<span class="s3">'尊敬的用户，' </span>+ <span class="s3">'您已模拟登录 成功'</span>)<span class="s2">#检验是否登陆成功</span>
m=response.cookies
<span class="s2"># 进入登录页面,爬取好评</span>
response2=session.get(url2,headers=headers,cookies=m,verify=<span class="s0">False</span>)
<span class="s0">if </span>response2.status_code==<span class="s4">200</span>:
    print(<span class="s3">'成功登录到京东页面'</span>)<span class="s2">#检验是否登录到京东界面</span>
print(<span class="s3">'-'</span>*<span class="s4">100</span>)
json_data=response2.json()<span class="s2">#将响应内容解析为Python的字典对象</span>
df=pd.DataFrame(columns=[<span class="s3">'content'</span>])<span class="s2">#创建一个空的Dataframe储存评论</span>
<span class="s0">if </span><span class="s3">'comments' </span><span class="s0">in </span>json_data:<span class="s2">#检验</span>
    <span class="s2"># 遍历评论并打印内容</span>
    <span class="s0">for </span>comment <span class="s0">in </span>json_data[<span class="s3">'comments'</span>]:
        print(comment[<span class="s3">'content'</span>])
        df=df._append({<span class="s3">'content'</span>: comment[<span class="s3">'content'</span>]},ignore_index=<span class="s0">True</span>)<span class="s2">#将每一条好评载入进df中</span>
<span class="s0">else</span>:
    print(<span class="s3">&quot;JSON 数据中没有找到要爬取的数据。&quot;</span>)
<span class="s2"># 爬取差评</span>
response3=session.get(url3,headers=headers,cookies=m,verify=<span class="s0">False</span>)
<span class="s0">if </span>response3.status_code==<span class="s4">200</span>:
    print(<span class="s3">'成功登录到京东页面'</span>)
print(<span class="s3">'-'</span>*<span class="s4">100</span>)
json_data2=response3.json()
df2=pd.DataFrame(columns=[<span class="s3">'content'</span>])
<span class="s0">if </span><span class="s3">'comments' </span><span class="s0">in </span>json_data2:<span class="s2">#检验</span>
    <span class="s2"># 遍历评论并打印内容</span>
    <span class="s0">for </span>comment <span class="s0">in </span>json_data2[<span class="s3">'comments'</span>]:
        print(comment[<span class="s3">'content'</span>])
        <span class="s0">if </span>comment[<span class="s3">'content'</span>]==<span class="s3">'此用户未填写评价内容' </span><span class="s0">or </span>comment[<span class="s3">'content'</span>]==<span class="s3">'此用户未及时填写评价内容'</span>:
            comment[<span class="s3">'content'</span>]=<span class="s3">'用户体验感差'</span>
        df2=df2._append({<span class="s3">'content'</span>: comment[<span class="s3">'content'</span>]},ignore_index=<span class="s0">True</span>)<span class="s2">#将每一条差评载入进df2中</span>
<span class="s0">else</span>:
    print(<span class="s3">&quot;JSON 数据中没有找到要爬取的数据。&quot;</span>)
<span class="s0">with </span>pd.ExcelWriter(<span class="s3">'评论.xlsx'</span>) <span class="s0">as </span>writer:<span class="s2">#创建了一个写入器 writer，它会将数据写入到名为 '评论.xlsx' 的 Excel 文件中</span>
    df.to_excel(writer, index=<span class="s0">False</span>, sheet_name=<span class="s3">'好评'</span>)
    df2.to_excel(writer, index=<span class="s0">False</span>, sheet_name=<span class="s3">'差评'</span>)
</pre>
</body>
</html>