import requests
import re
from bs4 import BeautifulSoup
import pandas as pd


'''
demo: 抓取某张图片
url = 'https://p0.meituan.net/movie/ce4da3e03e655b5b88ed31b5cd7896cf62472.jpg@464w_644h_1e_1c'
r = requests.get(url, headers=header)

# 打开1.jpg; 并以二进制写入的方式将r.content写入文件
# 文件若存在将被清空
with open('1.jpg', 'wb') as f:
    f.write(r.content)
'''
page = 1
while page <= 10:
    url = 'https://ssr1.scrape.center/page/{page}'
    header = {
        'referer': 'https://ssr1.scrape.center/',
        'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/133.0.0.0 Safari/537.36'

    }
    r = requests.get(url, headers=header)

# 此处是指对什么内容进行解析，用什么库来解析代码
    soup = BeautifulSoup(r.text, 'html.parser')

'''
demo: 只抓电影名
# 此处是在定位html 代码中的具体目标位置
## re 是正则表达式的模块
## find_all() 是定位所有符合要求的内容，find是定位第一个符合要求的内容

div_list = soup.find_all(
    name='a', href=re.compile(r'/detail/.*'), class_='name')
print(div_list)
# 接下来需要取出电影名的 text
for name in div_list:
    print(name.text)
    print('\n')
'''

# 构造的dataframe 用于存储数据
movies_df = {
    "name": [],
    "catogries": [],
    "country": [],
    "score": [],
    "year": [],
    "time": []
}

# demo2: 抓取电影名和分类标签

div_list = soup.find_all(
    name='div', class_='p-h el-col el-col-24 el-col-xs-9 el-col-sm-13 el-col-md-16')
# print(div_list[0])
# print(div_list[0].h2.text)
# for bton in div_list[0].find_all(type='button'):
#     print(bton.span.text)

for mvidx, movie in enumerate(div_list):
    # print(movie.h2.text)
    # 将电影名添加到列表中
    movies_df['name'].append(movie.h2.text)

     bton_list = movie.find_all(type='button')
      movie_cate = ''
       for bton in bton_list:
            # print(bton.span.text)
            movie_cate += bton.span.text + ','
        movies_df['catogries'].append(movie_cate.rstrip(','))

        mvinfo_list = movie.find_all(class_="m-v-sm info")
        span_list = mvinfo_list[0].find_all(name="span")
        movies_df['country'].append(span_list[0].string)
        movies_df['time'].append(span_list[2].string)
        span_list = mvinfo_list[1].find_all(name="span")
        if len(span_list) > 0:
            movies_df['year'].append(span_list[0].string)
        else:
            movies_df['year'].append('')
        # 查找电影分数
        score_list = soup.find_all(class_="score m-t-md m-b-n-sm")
        # print(score_list[mvidx].string.strip())
        # strip() 去除字符串两边的空格
        movies_df['score'].append(score_list[mvidx].string.strip())
        page += 1


'''
将以上输出到excel中
Dataframe 可以理解为一个表格
'''
data = pd.DataFrame(movies_df)
data.to_excel('movies.xlsx', sheet_name='movies', index_label='index')
