# _*_ coding : utf-8 _*_
# @Time : 2022/1/13 0013 16:40
# @Author : 小猿圈爬虫
# @Project : 1.达内_正则_猫眼爬虫案例
import re

import requests
import os
import random
import time

class MaoyanSpider:
    def __init__(self):
        """定义常用变量"""
        # UA伪装：将对应的User-Agent封装到一个字典中
        self.url = 'https://www.maoyan.com/board/4'
        # UA伪装：将对应的User-Agent封装到一个字典中
        self.headers = {
            'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/95.0.4638.69 Safari/537.36'
        }
        #添加计数器
        self.i= 0

    def get_html(self,url,params):
        """获取相应内容的函数"""
        req = requests.get(url=url, params=params, headers=self.headers).text
        #return req
        self.parse_html(req)

    def parse_html(self,html):
        """解析提取数据的函数"""
        regex = '<div class="movie-item-info">.*?title="(.*?)".*?<p class="star">(.*?)</p>.*?<p class="releasetime">(' \
                '.*?)</p> '
        pattern = re.compile(regex, re.S)
        r_list = pattern.findall(html)
        #return r_list
        #
        self.save_html(r_list)

    def save_html(self,r_list):
        """数据持久化函数"""
        item = {}
        for r in r_list:
          item['name'] = r[0].strip()
          item['star'] = r[1].strip()
          item['time'] = r[2].strip()
          print(item)
          self.i += 1

    def run(self):
        """程序入口函数"""
        #1、拼接URL地址
        for offset in range(0,91,10):
           params = {
                'offset': offset
           }
           self.get_html(self.url,params)
           #控制数据抓取频率
           time.sleep(random.randint(1,2))

if __name__=="__main__":
    spider = MaoyanSpider()
    spider.run()
    print('电影数量：',spider.i)