import requests
import re,time,json
import os

def spride(url):

    # 定义请求头信息
    headers = { 
        'User-Agent': 'User-Agent:Mozilla/5.0(WindowsNT6.1;rv:2.0.1)Gecko/20100101Firefox/4.0.1'
    }
    # 执行爬取
    res = requests.get(url, headers=headers)

    html = res.content.decode("utf-8")

    pat = '<i class="board-index board-index-[0-9]+">([0-9]+)</i>.*?<img data-src="(.*?)" alt="(.*?)" class="board-img" />.*?<p class="star">(.*?)</p>.*?<p class="releasetime">(.*?)</p>.*?<i class="integer">([0-9\.]+)</i><i class="fraction">([0-9]+)</i>'

    dlist = re.findall(pat,html,re.S)

    for v in dlist:
        yield {
            'index': v[0],
            'image': v[1],
            'title': v[2],
            'actor': v[3].strip()[3:],
            'time': v[4].strip()[5:],
            'score': v[5] + v[6],
        }

#执行文件追加写操作,若不是追加，则只写第100条信息了
def writeFile(content):
    with open("./result.txt", 'a', encoding='utf-8') as f:
        f.write(json.dumps(content, ensure_ascii=False) + "\n")

def main(offset):
    ''' 主程序函数，负责调度执行爬虫处理 '''
    url = 'http://maoyan.com/board/4?offset=' + str(offset)

    for v in spride(url):
        writeFile(v)


# 判断当前执行是否为主程序运行，并遍历调用主函数爬取数据
if __name__ == '__main__':

    #每次执行程序，都追加写操作，故在执行前，先删除已有文件
    if os.path.isfile('./result.txt'):
        os.remove('./result.txt')

    for i in range(10):
        main(offset=i*10)
        time.sleep(1)

    print("猫眼电影数据，已写入result.txt文件！")

