#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue Apr 26 15:16:38 2022

@author: cythnia
"""

#爬虫第5课 爬取豆瓣电影评分(奇迹笨小孩)
#关键点：
#1.观察多页网页切换时的变化规律；
#2.标签相同获取内容多且混乱时，运用切片获取相应信息
#——————————————————————————————————————————#
#导入工具包
import requests 
import time 
from bs4 import BeautifulSoup
import pandas as pd
import numpy as np
#爬取网址
url='https://movie.douban.com/subject/35312437/comments?limit=20&status=P&sort=new_score'
#设置请求头
headers={
    'User-Agent':'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/100.0.4896.127 Safari/537.36'
    }
#获取网页内容
html=requests.get(url,headers=headers)
#获取网页信息
data=html.text
soup=BeautifulSoup(data,'lxml')
#=============================================
#爬取单页内容
#=============================================
#获取用户id
name=soup.select('#comments > div > div.comment > h3 > span.comment-info > a')
#查看用户id信息
# for i in name:
#     print(i)
#     print('————————————————————————————')
#     print(i.get_text())
#获取用户评分
pingfen=soup.select('#comments > div > div.comment > h3 > span.comment-info ')
# for i in pingfen:
#     j=i.find_all('span')
#     print(j[1]['class'])
#     print(j[1]['title'])
#获取用户评论时间
shijian=soup.select('#comments > div > div.comment > h3 > span.comment-info > span.comment-time')
# for i in shijian:
#     print(i)
#     print('——————————————————————————————————')
#     print(i.get_text().strip())
#获取用户评论
pinglun=soup.select('#comments > div > div.comment > p > span')
# for i in pinglun:
#     print(i)
#     print('——————————————————————————————————')
#     print(i.get_text())
#for循环+zip 组合所得内容
lis=[]
for names,pingfens,shijians,pingluns in zip(name,pingfen,shijian,pinglun):
    pingfen_x=pingfens.find_all('span')
    lis.append([names.get_text(),
                pingfen_x[1]['class'],
                pingfen_x[1]['title'],
                shijians.get_text().strip(),
                pingluns.get_text()])
lis
result1=pd.DataFrame(lis,columns=['用户id','星级','评价','评论时间','评论内容'])
result1['评论时间']=result1['评论时间'].str.split().str[0]
result1.to_excel('/Users/cythnia/Desktop/奇迹笨小孩单页爬取.xlsx',index=False)
#=============================================
#爬取多页内容
#============================================= 
#设置多页内容网页列表
url=['https://movie.douban.com/subject/35312437/comments?start={}&limit=20&status=P&sort=new_score'.format(i) for i in range(0,200,20)]
#设置空list，一定在for循环前，否则每次清空
lis2=[]
#for循环，重复单页爬取内容
for urli in url:
    headers={
        'User-Agent':'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/100.0.4896.127 Safari/537.36'
        }
    #获取网页内容
    html=requests.get(urli,headers=headers)
    #获取网页信息
    data=html.text
    soup=BeautifulSoup(data,'lxml')
    #=============================================
    #爬取单页内容
    #=============================================
    #获取用户id
    name=soup.select('#comments > div > div.comment > h3 > span.comment-info > a')
    #查看用户id信息
    # for i in name:
    #     print(i)
    #     print('————————————————————————————')
    #     print(i.get_text())
    #获取用户评分
    pingfen=soup.select('#comments > div > div.comment > h3 > span.comment-info ')
    # for i in pingfen:
    #     j=i.find_all('span')
    #     print(j[1]['class'])
    #     print(j[1]['title'])
    #获取用户评论时间
    shijian=soup.select('#comments > div > div.comment > h3 > span.comment-info > span.comment-time')
    # for i in shijian:
    #     print(i)
    #     print('——————————————————————————————————')
    #     print(i.get_text().strip())
    #获取用户评论
    pinglun=soup.select('#comments > div > div.comment > p > span')
    # for i in pinglun:
    #     print(i)
    #     print('——————————————————————————————————')
    #     print(i.get_text())
    #for循环+zip 组合所得内容
    for names,pingfens,shijians,pingluns in zip(name,pingfen,shijian,pinglun):
        pingfen_x=pingfens.find_all('span')
        lis2.append([names.get_text(),
                    pingfen_x[1]['class'],
                    pingfen_x[1]['title'],
                    shijians.get_text().strip(),
                    pingluns.get_text()])
        
    print('完成:',urli)
    time.sleep(np.random.randint(5,10))
result2=pd.DataFrame(lis2,columns=['用户id','星级','评价','评论时间','评论内容'])
result2['评论时间']=result2['评论时间'].str.split().str[0]
result2.to_excel('/Users/cythnia/Desktop/奇迹笨小孩多页爬取.xlsx',index=False)
