from time import sleep
import requests as rq
from bs4 import BeautifulSoup as bs
header = {'User-Agent':'AppleWebKit/537.36'} 
data = [] 
for i in range(0,100,10):
    url = 'https://maoyan.com/board/4?offset='+str(i)
    sources = rq.get(url,headers = header).text #获取网页源代码
    soup = bs(sources,'html.parser') #html.parser是解析器
    
    #根据以上分析方法得到更加快捷的方式获取网站信息
    ranks = soup.find_all('i',class_="board-index")
    names = soup.find_all('p',class_="name")
    stars = soup.find_all('p',class_="star")
    time = soup.find_all('p',class_="releasetime")
    scores = soup.find_all('p',class_="score")
    
    for r,n,s1,t,s2 in zip(ranks,names,stars,time,scores):
        rank = r.get_text()
        name = n.get_text().strip()
        star = s1.get_text().strip()[3:]
        time = t.get_text()[5:]
        score = s2.get_text()
            
        data.append([rank,name,star,time,score])
        sleep(20) #设置获取数据的时间

#保存数据
import pandas as pd
data = pd.DataFrame(data)
data.to_excel('data.xlsx')



'''部分代码说明：
url = 'https://maoyan.com/board/4'
sources = rq.get(url,headers = header) #获取网页源代码
soup = bs(sources.content,'html.parser') #html.parser是解析器

names = soup.find_all('p',class_="name") #class要加一个下划线
names[0].text #发现需要内容是文本内容，直接使用text

star = soup.find_all('p',class_="star")
star[0].text #获取的文本内容含有空格
star[0].get_text().strip() #去掉空格 如果strip前面加一个r，则空格没有去除，和上一步一样
star[0].get_text().strip()[3:] #去掉主演两个字
