# -*- coding: utf-8 -*-
"""
Created on Mon Jul 10 16:34:27 2017

@author: ahchpr

filename: re_zero_bili.py
"""

import requests, csv, re, time
from bs4 import BeautifulSoup as BS
from selenium import webdriver
import datetime
from multiprocessing import Pool
import sys   



# Re：从零开始的异世界生活 的总剧情首页
first_url = 'https://bangumi.bilibili.com/anime/3461' 
headers = {'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko)'}
one_url = 'https://bangumi.bilibili.com/anime/3461/play#86298'
#history_danmu_url = 'https://comment.bilibili.com/dmroll,time,cid'
#now_danmu_url = 'https://comment.bilibili.com/{}.xml'.format(danmu_id)

def get_danmu_id(url):
    MyDriver = webdriver.PhantomJS()
    MyDriver.get(url)
    time.sleep(3)
    danmu_id = re.findall(r'cid=(\d+)&', MyDriver.page_source)[0]
    return (danmu_id)
    
    

def sele_get_first(url):
    MyDriver = webdriver.PhantomJS()
    MyDriver.get(url)
    time.sleep(5)
    response = MyDriver.page_source.encode('utf-8')
    page = response.decode('utf-8')
    return (page)
   

def sele_get_re_list(page):
    pattern = re.compile('<a.*?href="(.*?)" title="(.*?)" target.*? class="v1-complete-text"><div class=')
    abstract = re.findall(pattern, page)
    return (abstract)


def request_get_comment(url):
    headers = {'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko)'}
    episode = url.split(" ")[0]
    url = url.split(" ")[1].strip()
    response = requests.get(url=url, headers=headers)

    soup = BS(response.text, 'lxml')
    result = soup.find_all('d')
    if  len(result) == 0:
        return (result)
    
    all_list = []
    for item in result:
#        danmu_list.append(item.get('p').split(",").append(item.string))
        danmu_list = item.get('p').split(",")
        danmu_list.append(item.string)
#        danmu_list[0] = sec_to_str(danmu_list[0])
#        danmu_list[4] = time.ctime(eval(danmu_list[4]))
        danmu_list.append(episode)
#        print(danmu_list)
        all_list.append(danmu_list)
    return (all_list)
        
        
  
"""将秒转换成固定格式 "hh:mm:ss"
"""      
def sec_to_str(seconds):
    seconds = eval(seconds)
    m, s = divmod(seconds, 60)
    h, m = divmod(m, 60)
    dtEventTime = "%02d:%02d:%02d" % (h, m, s)
    return (dtEventTime)
    

"""计算最近30天的每天的时间戳，并返回，用于获取历史弹幕
"""
def time_to_stamp():
    today = datetime.date.today()    
    end_day = datetime.datetime(today.year, today.month, today.day)
    start_day = end_day - datetime.timedelta(30)
    gap_day_sum = 30
    stamp_list = []
    for i in range(1, gap_day_sum):
        tmp = start_day + datetime.timedelta(i)
        stamp_list.append(int(time.mktime(tmp.timetuple())))
        
    return (stamp_list)


def csv_write(tablelist, num):
    tableheader = ['dtTime', 'danmu_model', 'font', 'rgb', 'stamp', 'danmu_chi', 'userID', 'rowID', 'message', 'episode']
    file_name = "now{}.csv".format(num)
    print(file_name)
    with open(file_name, 'w', newline='', errors='ignore') as fd:
        writer = csv.writer(fd)
        writer.writerow(tableheader)
        for row in tablelist:
            writer.writerow(row)
            
    
if __name__ == "__main__":
    sys.setrecursionlimit(1000000) 
    """爬取首页，获取共25话 《re:从零开始的异世界生活》 的播放连接
    """
    page = sele_get_first(first_url)
    re_list = sele_get_re_list(page)
#    print(len(re_list))
    
    
    """以字典的形式保存例如：
    {'1': ['初始的终结与结束的开始', 'https://bangumi.bilibili.com/anime/3461/play#85754'],...}
    """
    re_dict = {}
    for item in re_list:
        re_dict[item[1].split(" ")[0]] = [item[1].split(" ")[1], item[0]]
#    print(re_dict)
    
    """获取每一话的播放连接，保存成列表
    """
    re_url_list = []
    for i in range(1, len(re_dict)+1):
        re_url_list.append( re_dict[str(i)][1] )
        
    """利用进程池，获取每一话的弹幕文件连接，
    """
    re_danmu_id_list = []
    pool = Pool(14)
    re_danmu_id_list = pool.map(get_danmu_id, re_url_list)
    pool.close()    
    pool.join()
    re_danmu_id_dict = {}
    for n, p in enumerate(re_danmu_id_list):
        re_danmu_id_dict[str(n+1)] = p
    
    
    """将25话剧集的各自的最新的弹幕文件连接保存到一个文档 comment.txt,按照剧集的顺序保存
    """
    with open('comment.txt', 'w') as fd:
        for i in range(len(re_danmu_id_list)):   
            fd.write('{} https://comment.bilibili.com/{}.xml\n'.format(i+1, re_danmu_id_list[i]))
            
    
    history_danmu_url_list = []   
    stamp_list = time_to_stamp()
    for i in range(1, len(re_danmu_id_list)+1):
        for stamp in stamp_list :
            history_danmu_url = '{} https://comment.bilibili.com/dmroll,{},{}'.format(i, stamp, re_danmu_id_dict[str(i)])
            history_danmu_url_list.append(history_danmu_url)
        history_danmu_url_list.append('{} https://comment.bilibili.com/{}.xml'.format(i, re_danmu_id_dict[str(i)]))
   
    with open('history_danmu_url.txt', 'w') as fd:
        for line in history_danmu_url_list:
            fd.write("{}\n".format(line))
            
        
    all_list = []

    '''把每一集的弹幕文件链接，按照集数，整理到一个字典，
    '''
    url_dict ={}
    with open ("history_danmu_url.txt", 'r') as fd:
        url_whole = fd.readlines()
        print(len(url_whole))
        for i in range(1, len(re_danmu_id_list)+1):
            url_dict[str(i)] = [line for line in url_whole if int(line.split(" ")[0])==i]
    print (len(url_dict))
    
    '''按照集数，取出弹幕链接，进行爬虫，获取弹幕记录，并保存到.csv 文件
    '''
    for i in range(1, len(url_dict)+1):
        n = 0
        tmp_to_get_url = url_dict[(str(i))]
        file_name = "d{}.csv".format(i)
        tableheader = ['dtTime', 'danmu_model', 'font', 'rgb', 'stamp', 'danmu_chi', 'userID', 'rowID', 'message', 'episode']
        with open(file_name, 'a',  newline='', errors='ignore') as fd:
            writer = csv.writer(fd)
            writer.writerow(tableheader)
            for url in tmp_to_get_url :        
                all_list = request_get_comment(url)
                if all_list:
                    for row in all_list:
                        writer.writerow(row)
                print("\n\n\n\n\n")
                n = n+1
                print(n)
                del (all_list)
        del tmp_to_get_url
        
        
        
        
    """获取保存最新历史弹幕文件
    """
    now_danmu_all = {}
    with open ('comment.txt', 'r') as fd:
        for url in fd:
            now_danmu_all[int(url.strip().split(" ")[0])] = request_get_comment(url)

    for num, data in now_danmu_all.items():
        csv_write(data, num)
   

        
    

            
