# -*- coding: utf-8 -*-
# Author: xiantingDeng
# File: 09_猫眼电影爬取实战.py
# Time: 13:49

from urllib import request,parse,error
import re,json,time

def GetPage(url):
    # 爬取指定url页面信息
    try:
        headers = {
            "User-Agent" : "User-Agent':'User-Agent:Mozilla/5.0(WindowsNT6.1;rv:2.0.1)Gecko/20100101Firefox/4.0.1'"
        }
        req = request.Request(url,headers=headers)
        # 执行爬取
        res = request.urlopen(req)
        # 判断响应状态，并响应爬取内容
        if res.code == 200:
            return res.read().decode("utf-8")
        else:
            return None
    except error.URLError as e:
        return None

def ParsePage(html):
    # 解析爬取网页中的内容，并返回字段结果
    # 定义解析正则表达式
    pat = '<i class="board-index board-index-[0-9]+">([0-9]+)</i>.*?<img data-src="(.*?)" alt="(.*?)" class="board-img" />.*?<p class="star">(.*?)</p>.*?<p class="releasetime">(.*?)</p>.*?<i class="integer">([0-9\.]+)</i><i class="fraction">([0-9]+)</i>'
    items = re.findall(pat,html,re.S)
    for item in items:
        yield{
           'index':item[0],
            'image': item[1],
            'title': item[2],
            'actor': item[3].strip()[3:],
            'time': item[4].strip()[5:],
            'score': item[5] + item[6],
        }

def WriteFile(conten):
    # 执行文件追加写操作
    with open("./result.txt","a",encoding="utf-8") as f:
        f.write(json.dumps(conten,ensure_ascii=False)+"\n")


def main(offset):
    url = "http://maoyan.com/board/4?offset=" + str(offset)
    html = GetPage(url)
    if html:
        for item in ParsePage(html):
            WriteFile(item)

if __name__ == '__main__':
    for i in range(10):
        main(offset=i*10)
        import random
        time.sleep(random.randint(0,1))