import requests,re,time,json

def qingqiu(url):
    #伪装头部
    headers = {
        'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/69.0.3452.0 Safari/537.36',
    }
    #执行请求
    request = requests.get(url,headers=headers)

    return request.text

def zhengze(html):
    #定义正则表达式
    pat = '.*? board-index-\d+">(.*?)</i>.*?<img data-src="(.*?)" alt="(.*?)".*?<p class="star">(.*?)</p>.*?<p class="releasetime">(.*?)</p>.*?<i class="integer">(.*?)</i><i class="fraction">(.*?)</i>'

    items = re.findall(pat,html,re.S)
    for item in items:
        yield {
            '序号': item[0],
            '图片': item[1],
            '电影名称': item[2],
            '主演': item[3].strip()[3:],
            '时间': item[4].strip()[5:],
            '评分': item[5] + item[6],
        }

def writeFile(content):
    '''储存信息'''
    with open("./res.txt",'a',encoding='utf-8') as f:
        f.write(json.dumps(content,ensure_ascii=False)+"\n")        

def main(offset):
    ''' 主程序函数，负责调度执行爬虫处理 '''
    url = 'http://maoyan.com/board/4?offset=' + str(offset)
    html = qingqiu(url)
    # 判断是否爬取到数据，并调用解析函数
    for item in zhengze(html):
        #print(item)
        writeFile(item)


# 判断当前执行是否为主程序运行，并遍历调用主函数爬取数据
if __name__ == '__main__':
    for i in range(10):
        print('正在写入本地文件res.txt    第%d页'%(i+1))
        main(offset=i * 10)
        time.sleep(1)
    print('完成写入本地。')