"""
@file:maoyan_spider.py
@time:2023/09/17 15:30
@author:Jiajia Zhan
"""

import re
import time
import requests
import json


# 网页的爬取

def getPage(url):
    try:
        # 为了猫眼的反爬机制，设置请求头信息
        headers = {

            'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.7',
            'Accept-Encoding': 'gzip, deflate, br',
            'Accept-Language': 'zh-CN,zh;q=0.9',
            'Cache-Control': 'max-age=0',
            'Connection': 'keep-alive',
            'Cookie': '__mta=121355319.1694843167294.1694963354899.1694963560280.41; uuid_n_v=v1; uuid=54D0A510545411EE92649F22845F14990EB80D427623499F9DE8121F8EEBDCBF; _csrf=b743da5adf5a54cc9ee18201e2e01da03ce7b6703ecc19b82f979e4b2f181858; _lx_utm=utm_source%3DBaidu%26utm_medium%3Dorganic; _lxsdk_cuid=18a9c866130c8-0943c575b3fe99-26031f51-1fa400-18a9c866130c8; _lxsdk=54D0A510545411EE92649F22845F14990EB80D427623499F9DE8121F8EEBDCBF; Hm_lvt_703e94591e87be68cc8da0da7cbd0be2=1694843167; __mta=121355319.1694843167294.1694892056175.1694892059597.14; Hm_lpvt_703e94591e87be68cc8da0da7cbd0be2=1694963560; _lxsdk_s=18aa37fc765-7d4-6b5-432%7C%7C89',
            'Host': 'www.maoyan.com',
            'Referer': 'https://tfz.maoyan.com/',
            'Sec-Ch-Ua': '"Chromium";v="116", "Not)A;Brand";v="24", "Google Chrome";v="116"',
            'Sec-Ch-Ua-Mobile': '?0',
            'Sec-Ch-Ua-Platform': '"Windows"',
            'Sec-Fetch-Dest': 'document',
            'Sec-Fetch-Mode': 'navigate',
            'Sec-Fetch-Site': 'same-site',
            'Sec-Fetch-User': '?1',
            'Upgrade-Insecure-Requests': '1',
            'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/116.0.0.0 Safari/537.36'

        }

        res = requests.get(url, headers=headers)
        if res.status_code == 200:
            res.encoding = "utf-8"
            return res.text
        else:
            return None
    except:
        return None


# 网页的解析
def parsePage(html):
    """
    <i class="board-index board-index-3">3</i>
    <a href="/films/1292" title="海上钢琴师" class="image-link" data-act="boarditem-click" data-val="{movieId:1292}">
      <img src="//s3.meituan.net/static-prod01/com.sankuai.movie.fe.mywww-files/image/loading_2.e3d934bf.png" alt="" class="poster-default" />
      <img data-src="https://p0.pipi.cn/mmdb/d2dad592c7e7e1d2365bf1b63cd25951b722b.jpg?imageView2/1/w/160/h/220" alt="海上钢琴师" class="board-img" />
    </a>
    <div class="board-item-main">
      <div class="board-item-content">
              <div class="movie-item-info">
        <p class="name"><a href="/films/1292" title="海上钢琴师" data-act="boarditem-click" data-val="{movieId:1292}">海上钢琴师</a></p>
        <p class="star">
                主演：蒂姆·罗斯,比尔·努恩 ,克兰伦斯·威廉姆斯三世
        </p>
<p class="releasetime">上映时间：2019-11-15</p>    </div>
    <div class="movie-item-number score-num">
<p class="score"><i class="integer">9.</i><i class="fraction">3</i></p>
    </div>

      </div>
    </div>


    :param html:
    :return:
    """
    patten = '<i class="board-index board-index-[0-9]+">([0-9]+)</i>.*?<img data-src="(.*?)" alt="(.*?)" class="board-img" />.*?<p class="star">(.*?)</p>.*?<p class="releasetime">(.*?)</p>.*?<p class="score"><i class="integer">([0-9.]+)</i><i class="fraction">([0-9])</i></p>'
    # re.S 表示 . 可以匹配换行符
    items = re.findall(patten, html, re.S)
    film_list = []
    # 测试正则匹配结果
    # print(items)

    # 检查是否爬取成功，因为猫眼有反爬的问题
    if len(items) > 0:
        print("爬取完成！")
    else:
        print("请刷新一下猫眼页面！")

    for item in items:
        # 这个方法更快，减少 IO，效率高
        yield {

            '排名': item[0],
            '图片': item[1],
            '电影名': item[2],
            '主演': item[3].strip()[3:],
            '上映时间': item[4].strip()[5:],
            '评分': item[5] + item[6],

        }

    #     tmp = {
    #
    #
    #         '排名': item[0],
    #         '图片': item[1],
    #         '电影名': item[2],
    #         '主演': item[3].strip()[3:],
    #         '上映时间': item[4].strip()[5:],
    #         '评分': item[5] + item[6],
    #
    #     }
    #     film_list.append(tmp)
    # return film_list


# 爬取的数据存储在txt中
def writeFile(content):


    with open("./maoyan.txt", "a", encoding="utf-8") as f:
        f.write(json.dumps(content, ensure_ascii=False) + "\n")



# 调度（爬虫主程序）

"""
    爬虫主程序
    offset: 偏移量
"""


def main(offset):
    url = "https://maoyan.com/board/4?offset=" + str(offset)

    # 调用 getPage 方法  爬取指定 url 的网页
    html = getPage(url)
    # print(html) #被反爬
    if html:
        # 调用 parsePage 方法 解析 html
        # print(parsePage(html))
        for item in parsePage(html):
            writeFile(item)


# 程序主入口
if __name__ == '__main__':
    # main(0)
    for i in range(10):
        print("正在爬取第", i + 1, "页")

        main(offset=i * 10)
        # 间隔两秒后爬取
        time.sleep(2)
    print("爬取完成")



