#!/usr/bin/env python
# -*- coding: utf-8 -*-
# author：albert time:2019/9/3
import  requests,json,time,csv
from fake_useragent import  UserAgent
from datetime import  datetime,timedelta

def get_content(url):
    ua = UserAgent().random
    try:
        data = requests.get(url,headers={'User-Agent':ua},timeout=3 ).text
        return data
    except:
        pass
def  Process_data(html):
    data_set_list = []
    data_list =  json.loads(html)['cmts']
    for data in data_list:
        data_set = [data['id'],data['nickName'],data['userLevel'],data['cityName'],data['content'],data['score'],data['startTime']]
        data_set_list.append(data_set)
    return  data_set_list

if __name__ == '__main__':
    start_time = start_time = datetime.now().strftime('%Y-%m-%d %H:%M:%S')  # 获取当前时间，从当前时间向前获取
    # print(start_time)
    end_time = '2019-07-26 08:00:00'

    # print(end_time)
    while start_time > str(end_time):
        url = 'http://m.maoyan.com/mmdb/comments/movie/1211270.json?_v_=yes&offset=0&startTime=' + start_time.replace(
            ' ', '%20')
        print('........')
        try:
            html = get_content(url)
        except Exception as e:
            time.sleep(0.5)
            html = get_content(url)
        else:
            time.sleep(1)
        comments = Process_data(html)
        # print(comments[14][-1])
        if comments:
            start_time = comments[14][-1]
            start_time = datetime.strptime(start_time, '%Y-%m-%d %H:%M:%S') + timedelta(seconds=-1)
            # print(start_time)
            start_time = datetime.strftime(start_time,'%Y-%m-%d %H:%M:%S')
            print(comments)
            with open("comments_1.csv", "a", encoding='utf-8',newline='') as  csvfile:
                writer = csv.writer(csvfile)
                writer.writerows(comments)
