#使用requests爬取猫眼TOP100榜单的内容

import requests
import re,time,json


def getPage(offset):
    '''爬取指定url页面信息'''
    try:
        url = 'https://maoyan.com/board/4'
        data = {}
        data['offset'] = str(offset)
        headers = {
            'User-Agent':'User-Agent:Mozilla/5.0(WindowsNT6.1;rv:2.0.1)Gecko/20100101Firefox/4.0.1'
        }
        res = requests.get(url,params=data,headers=headers)
        if res.status_code == 200:
            return res.content.decode('utf-8')
        else:
            return None            
    except RequestException:
        return None
    
def parsePage(html):
    '''解析爬取网页中的内容,并返回字段结果'''
    pat = '<i class="board-index board-index-[0-9]+">(.*?)</i>.*?<img data-src="(.*?)" alt="(.*?)" class="board-img" />.*?<p class="star">(.*?)</p>.*?<p class="releasetime">(.*?)</p>.*?<p class="score"><i class="integer">(.*?)</i><i class="fraction">(.*?)</i></p>'
    dlist = re.findall(pat,html,re.S)
    #遍历封装数据并返回
    for item in dlist:
        yield{
            'index':item[0],
            'image':item[1],
            'title':item[2],
            'actor':item[3].strip()[3:],
            'time':item[4].strip()[5:],
            'score':item[5]+item[6],
            }

    '''
    for i in dlist:
        print('排名: '+ i[0] + '\t电影名: ' + i[2] + '\t'+re.sub(r'\s+','',i[3]) + '\t'+i[4] +'\t 评分: '+ i[5] + i[6] + '\t图片: ' + i[1])
    '''

def writeFile(content):
    '''执行文件追加写操作'''
    with open('./result.txt','a',encoding='utf-8') as f:
        f.write(json.dumps(content,ensure_ascii=False) + '\n')
        #json.dumps序列化时对中文默认使用的是ascii编码,想输出真正的中文需要指定ensure_ascii=False


def main(offset):
    html = getPage(offset)
    if html:
        for item in parsePage(html):
            writeFile(item)


if __name__ == '__main__':
    for i in range(10):
        main(offset=i*10)
        time.sleep(1)



