# -*- coding:utf-8 -*-
# 爬虫：
# requests请求的练习

# 题目：
# 目标网站：https://movie.douban.com/top250?start=0&filter=
# 要求：抓取前三页的html数据并保存为html文件

"""
前三页的url:
https://movie.douban.com/top250?start=0&filter=
https://movie.douban.com/top250?start=25&filter=
https://movie.douban.com/top250?start=50&filter=
"""

import requests


class DoubanMovie(object):
    def __init__(self):
        self.url = "https://movie.douban.com/top250?start={}&filter="
        self.headers = {
            "User-Agent":"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/116.0.0.0 Safari/537.36"
        }

    # 构建前三页的url地址
    def get_url_data(self):
        list1 = []
        for num in range(0, 3):
            list1.append(self.url.format(num*25))

        return list1

    # 发起请求，并获取响应数据
    def get_page_index(self, url):
        response = requests.get(url,  headers=self.headers)    # 通过url地址，发起请求，同时请求头作伪装
        response.encoding = "utf-8"
        if response.status_code == 200:   # 判断请求是否成功
            # new_resp = response.text.replace("\xee","")   # 没有指定编码，可以这样进行替换。
            return response.text
        else:
            return None

    # 将爬到的电影数据保存到，html文件中
    def write_movie_data(self, response, index):
        file_name = f"第{index}页的电影.html"
        with open("./movies/" + file_name, "a", encoding="utf-8") as f:
            f.write(response)
            print(f"第{index}页的电影保存成功")

    # 实现整个代码的业务逻辑
    def main(self):
        url_list = self.get_url_data()    # 调用函数，拿到存储了前三页url的列表
        for url in url_list:
            response = self.get_page_index(url)
            # print(response)
            index = url_list.index(url) + 1
            self.write_movie_data(response, index)


if __name__ == '__main__':
    movies = DoubanMovie()
    movies.main()
