import csv
import re
from urllib import request
import time
import random


# 没有爬取成功，可能因为网站做了验证？
class MaoYan:
    def __init__(self):
        self.url = 'https://www.maoyan.com/board/4?offset={}'
        self.headers = {
            'User-Agent': 'Mozilla/5.0 (Windows; U; Windows NT 6.1; en-us) AppleWebKit/534.50 (KHTML, like Gecko) Version/5.1 Safari/534.50'}
        self.i = 0
        #     打开文件，初始化写入对象
        self.f = open('maoyan.csv', 'a')
        self.writer = csv.writer(self.f)
        self.all_list = []

    def get_html(self, url):
        req = request.Request(url=url, headers=self.headers)
        res = request.urlopen(req)
        html = res.read().decode()
        print('ll'+html)
        self.parse_html(html)

    def parse_html(self, html):
        regex = '.*?<div class="movie-item-info">.*?title="(.*?)".*?<p class="star">(.*?)</p><p class="releasetime">(.*?)</p>'
        pattern = re.compile(regex, re.S)
        # r_list:[('霸王别姬，'张国荣','1993''),(..)]
        r_list = pattern.findall(html)
        print(r_list)
        self.save_html(r_list)

    def save_html(self, r_list):
        item = {}
        for r in r_list:
            # 去掉俩边空格
            item['name'] = r[0].split()
            item['star'] = r[1].split()
            item['time'] = r[2].split()
            # 打印输出
            print(item)
            self.i += 1
            # 去掉空格
            file_t = (r[0].strip(), r[1].strip(), r[2].strip())
            # 保存在大列表
            self.all_list.append(file_t)

    def run(self):
        for offset in range(0, 91, 10):
            url = self.url.format(offset)
            print(url)
            self.get_html(url)
            # 控制抓取频率
            time.sleep(random.randint(1, 2))
        self.writer.writerows(self.all_list)
        self.f.close()


if __name__ == '__main__':
    spider = MaoYan()
    spider.run()
    print('电影数量：', spider.i)
