# -*- coding: utf-8 -*-
# @Time : 2021/09/30 23:00
'''
爬取猫眼电影top100
'''

import requests
import re
import json
from multiprocessing import Pool
from requests.exceptions import RequestException

def get_one_page(url, proxies, headers):
    try:
        response = requests.get(url, proxies=proxies, headers=headers)
        if response.status_code == requests.codes.ok:
            return response.text
        return None
    except RequestException:
        return None

def parse_one_page(html):
    pattern = re.compile('<dd>.*?>(.*?)</i>.*?<img data-src="(.*?)".*?title="'
                         '(.*?)".*?<p class="star">(.*?)</p>.*?>(.*?)</p>.*?'
                         'integer">(.*?)</i>.*?fraction">(.*?)</i></p>',re.S)
    items = re.findall(pattern, html)
    # print(items)
    for item in items:
        yield {
            'ranking': item[0],
            'title': item[2],
            'actor': item[3].strip(),
            'time': item[4].strip(),
            'score': item[5] + item[6],
            'image': item[1]
        }

def write_to_file(content):
    with open('result.txt', 'a', encoding='utf-8') as f:
        f.write(json.dumps(content, ensure_ascii=False) + '\n')

def main(offset):
    headers = {
        'Cookie': '__mta=219111078.1633014629951.1633015166209.1633015171679.8; uuid_n_v=v1; uuid=8C681AE0220011ECBC107F1275B25E5714089A8E57504D8AA4B77588EBFAE204; _csrf=3aa00ec8e81f35194eade88aed6d92f9509d3af32308bfe0afc23a5577a682f7; _lxsdk_cuid=17c3741c5ce83-02bf972df70787-b7a1a38-144000-17c3741c5cf7f; _lxsdk=8C681AE0220011ECBC107F1275B25E5714089A8E57504D8AA4B77588EBFAE204; _lx_utm=utm_source%3Dgoogle%26utm_medium%3Dorganic; lt=cDRGAiD590HEuLB0KWJvCtlw2UIAAAAAvQ4AANUtyezErB7AolyB3XAj7kY_acuBtvtsDYxGq3lVTifz8VdI8FTNYqjsMak93ihsEA; lt.sig=zSQNtx5bLYRxVa79xzZ3lxouc5o; uid=1763233796; uid.sig=rLBuq9Rh_YvdKvWUx40_GKAmgOU; Hm_lvt_703e94591e87be68cc8da0da7cbd0be2=1633014743,1633014754,1633014766,1633014911; __mta=219111078.1633014629951.1633014910965.1633015166209.7; Hm_lpvt_703e94591e87be68cc8da0da7cbd0be2=1633015172; _lxsdk_s=17c3741c5d1-2f1-72d-c89%7C%7C26',
        'Host': 'maoyan.com',
        'Referer': 'https://maoyan.com/board',
        'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/94.0.4606.61 Safari/537.36'
        }
    proxies = {"http": None, "https": None}
    # 第一页没有?offset=这个参数，单独拿出来
    if offset == 0:
        url = 'https://maoyan.com/board/4'
        html = get_one_page(url, proxies, headers)
        for item in parse_one_page(html):
            write_to_file(item)
    # 第2页-第10页
    url = 'https://maoyan.com/board/4?offset=' + str(offset)
    html = get_one_page(url, proxies, headers)
    for item in parse_one_page(html):
        print(item)
        write_to_file(item)

if __name__ == '__main__':
    for i in range(10):
        main(i*10)
    # 多进程执行，不会像上面循环这样依次写入文件了，哪行出结果快就先写入哪行
    # pool = Pool()
    # pool.map(main, [i*10 for i in range(10)])

