''' 
@文件：3.使用进程爬取芒果电影信息.py
@作者：Miss丶念
@时间：2025/3/22：20:14
'''

import requests
from multiprocessing import Process, JoinableQueue as Queue
import redis
import pymongo
import hashlib



class MangoSpider:
    redis_cli = redis.Redis()
    mongo_client = pymongo.MongoClient()
    mongo_db = mongo_client['pyspider']['process_mango_movie']

    def __init__(self):
        self.url = 'https://pianku.api.mgtv.com/rider/list/pcweb/v3'
        self.headers = {
            'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/134.0.0.0 Safari/537.36',
            'Referer': 'https://www.mgtv.com/'
        }

        self.params_queue = Queue()
        self.json_queue = Queue()
        self.content_queue = Queue()

    # 上传翻页参数到队列
    def put_params(self):
        for page in range(1, 6):
            params = {
                "allowedRC": "1",
                "platform": "pcweb",
                "channelId": "2",
                "pn": page,
                "pc": "80",
                "hudong": "1",
                "_support": "10000000",
                "kind": "19",
                "area": "10",
                "year": "all",
                "chargeInfo": "a1",
                "sort": "c2",
                "feature": "all"
            }
            self.params_queue.put(params)

    # 请求数据
    def get_data(self):
        while True:
            params = self.params_queue.get()
            response = requests.get(self.url, headers=self.headers, params=params).json()
            self.json_queue.put(response)
            # 标记翻页参数获取的任务完成
            self.params_queue.task_done()

    def parse_data(self):
        while True:
            json_data = self.json_queue.get()
            movie_list = json_data['data']['hitDocs']
            for movie in movie_list:
                item = dict()
                item['title'] = movie['title']
                item['subtitle'] = movie['subtitle']
                item['story'] = movie['story']
                # 存放爬取的最终数据到队列中
                self.content_queue.put(item)
            # 标记翻页参数获取的任务完成
            self.json_queue.task_done()

    # 数据去重
    @staticmethod
    def get_md5(content):
        md5 = hashlib.md5(str(content).encode("utf-8")).hexdigest()
        return md5

    # 数据保存
    def save_data(self):
        while True:
            content = self.content_queue.get()
            md5 = self.get_md5(content)
            result = self.redis_cli.sadd("mongo_movie:filter", md5)
            if result:
                self.mongo_db.insert_one(content)
                print(f"{content['title']} 保存成功")
            else:
                print(f"{content['title']} 已存在")
            self.content_queue.task_done()

    # 关闭数据库链接
    def close_spider(self):
        print('爬虫即将退出, 准备关闭数据库链接...')
        self.mongo_client.close()
        self.redis_cli.close()

    # 启动函数
    def main(self):
        process_list = list()
        # 有限循环任务直接启动进程即可, 无需创建守护进程
        put_params_process = Process(target=self.put_params)
        # 启动翻页参数进程
        put_params_process.start()
        # movie数据列表进程加入到进程列表中，开启3个进程执行
        for _ in range(3):
            get_data_process = Process(target=self.get_data)
            process_list.append(get_data_process)
        # 解析数据进程加入到进程列表中
        parse_data_process = Process(target=self.parse_data)
        process_list.append(parse_data_process)

        save_process = Process(target=self.save_data)
        process_list.append(save_process)
        # 将进程加入列表中，并设置守护进程和启动
        for process_obj in process_list:
            process_obj.daemon = True
            process_obj.start()

        # 主进程等待翻页参数进程完成
        put_params_process.join()

        # 判断队列任务状态, 等待队列任务完成，阻塞主进程
        for queue in [self.params_queue, self.json_queue, self.content_queue]:
            queue.join()

        # 关闭数据库链接
        self.close_spider()
        print('爬虫任务完成...')


if __name__ == '__main__':
    mango_spider = MangoSpider()
    mango_spider.main()
