# 提取目标内容并且格式化
import re

import requests


def parse_maoyan(html):
    pattern = re.compile(
        '<dd>.*?index.*?>(.*?)</i>.*?<img data-src="(.*?)".*?</a>.*?name"><a.*?>(.*?)</a>.*?star">(.*?)</p>.*?releasetime">(.*?)</p>.*?integer">(.*?)</i>.*?fraction">(.*?)</i>',
        re.S)
    items = re.findall(pattern, html)
    for item in items:
        yield {  # 每次被调用返回yield后面的参数 这里是一个字典(代表一条电影信息)
            'index': item[0],
            'image': item[1],
            'title': item[2].strip(),
            'actor': item[3].strip()[3:] if len(item[3]) > 3 else '',
            'time': item[4].strip()[5:] if len(item[4]) > 5 else '',
            'score': item[5].strip() + item[6].strip()
        }


# 爬取一个网页
def request_maoyan(url):
    try:
        headers = {
            'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) '
                          'Chrome/79.0.3945.88 Safari/537.36'}
        response = requests.get(url, headers=headers)
        if response.status_code == 200:
            return response.text
    except requests.RequestException:
        return None


def main(offset):
    url = 'http://maoyan.com/board/4?offset=' + str(offset)
    html = request_maoyan(url)
    square_gen = parse_maoyan(html)
    for item in square_gen:  # parse_maoyan(html)是一个个可迭代对象 实质上是个生成器
        print(item)
    # write_to_file(item)


def square():
    for i in range(4):
        yield i ** 2


if __name__ == '__main__':
    main(1)
    # for x in square():
    #     print(x)
