import requests
import re
import json
from requests.exceptions import RequestException


# 爬取某一页函数源代码
def getPage(url):

    try:
        # 设置请求信息，得到返回对象
        headers = {'User-Agent': 'User-Agent:Mozilla/5.0(WindowsNT6.1;rv:2.0.1)Gecko/20100101Firefox/4.0.1'}
        data = {}
        res = requests.get(url, params=data, headers=headers)
        # 判断是否获得响应信息
        if res.status_code == 200:
            return res.content.decode('utf-8')
        else:
            return None
    except RequestException:
        return None


# 解析源代码，并写入数据
def analysPage(html):
    # 名次正则
    indexpat = ' <i class="board-index .*>(.*?)</i>'
    # 图片正则
    picpat = '<img data-src="(.*?)" alt=".*" class="board-img" />'
    # 电影名正则
    titlepat = '<p class="name"><a .*>(.*?)</a></p>'
    # 主演正则
    starpat = '<p class="star">(.*?)</p>'
    # 上映时间正则
    relpat = '<p class="releasetime">(.*?)</p>'
    # 评分正则
    scorepat = '<i class="integer">(.*?)</i><i class="fraction">(.*?)</i>'

    indexlist = re.findall(indexpat, html)

    piclist = re.findall(picpat, html)

    titlelist = re.findall(titlepat, html)

    starlist = re.findall(starpat, html, re.S)

    rellist = re.findall(relpat, html)

    scorelist = re.findall(scorepat, html)

    # print(indexlist)
    # print(titlelist)
    # print(starlist[0].strip())
    # print(rellist)
    # print(scorelist)
    # print(piclist)

    for i in range(10):
        info = indexlist[i] + ' ，' + titlelist[i] + '\t' + starlist[i] + '' + '\t' + rellist[i] + ' ，' + "\n" + '\t' + '评分：' + scorelist[i][0] + scorelist[i][1] + '\n'
        print(info)

        '''执行文件追加写操作，'a'就是追加写模式，文件不存在则创建'''
        with open("./result.txt", 'a', encoding='utf-8') as f:
            f.write(json.dumps(info, ensure_ascii=False) + "\n")
        # json.dumps 序列化时对中文默认使用的ascii编码.想输出真正的中文需要指定ensure_ascii=False


# 爬虫程序控制


def main():
    for index in range(10):
        offset = int(index) * 10
        url = 'http://maoyan.com/board/4?offset=' + str(offset)
        # 获取某一页源代码
        html = getPage(url)
        # 执行解析，并写入数据
        analysPage(html)



# 判断当前执行是否为主程序运行，并遍历调用主函数爬取数据
if __name__ == '__main__':
    main()

'''
测试
index = input("输入第几页：")
offset = int(index) * 10
url_1 = 'http://maoyan.com/board/4?offset=' + str(offset)
print(url_1)
url = 'http://maoyan.com/board/4?offset=20'
html = getPage(url)
analysPage(html)
'''
