from selenium import webdriver
from selenium.webdriver.common.keys import Keys
import json, re, time
import pandas as pd

def use_cookie(goal_url, browser_next):
    browser_next.get(goal_url)  # 得先打开网址，这个一定要加上，不然cookie添加不了
    # 从本地读取cookies并登录目标网页
    with open('cookie/cookies.txt', 'r', encoding='utf8') as f:
        listCookies = json.loads(f.read())
    for cookie in listCookies:
        if 'expiry' in cookie:
            del cookie['expiry']
        browser_next.add_cookie(cookie)
    return browser_next

def get_personal_history(data_page=None):     # 爬取个人历史数据【视频名称、观看时间、链接】
    titles = re.findall('<a class="title" target="_blank".*?>(.*?)</a>', data_page, re.S)
    times = re.findall('<span class="lastplay-t">(.*?)</span>', data_page, re.S)
    href_list = re.findall('<a class="title" target="_blank" href="(.*?)"', data_page, re.S)
    for i in range(len(href_list)):
        if 'https' not in href_list[i]:
            href_list[i] = 'https:'+href_list[i]
    data_personal_history = pd.DataFrame(columns=['title','time','href'])
    data_personal_history['title'] = titles
    data_personal_history['time'] = times
    data_personal_history['href'] = href_list
    data_personal_history.to_csv('personal_history/spider_result5.csv',encoding='utf_8_sig')  # 4
    print('历史基础数据已爬取完成，共有',len(titles),'条数据')
    return data_page, titles, times, href_list, data_personal_history

def get_detail_data(data_page=None, num=None ,times=None, href=None):
    title = re.findall('<h1 title="(.*?)"', data_page, re.S)
    if len(title)>0:
        good_number = re.findall('<span title="点赞数(.*?)"', data_page)   # 点赞数
        if len(good_number)==0:
            good_number = [0]
        Up_name = re.findall('class="username.*?\\n        (.*?)\\n', data_page, re.S)  # 多作者合作的话就不是这个
        if len(Up_name)==0:
            Up_name = re.findall('<span class="b-head-t">(.*?)</span>', data_page, re.S)
            Up_name = [re.findall(r'[\u4e00-\u9fa5]+', str(Up_name), re.S)]
        if len(Up_name)==0:
            Up_name = ['empty']
        coin_number = re.findall('<i class="van-icon-videodetails_throw" style="color:;"></i>(.*?)</span>', data_page, re.S)   # 投硬币数
        if len(coin_number)==0:
            coin_number = ['0']
        colletion_number = re.findall('<i class="van-icon-videodetails_collec" style="color:;"></i>(.*?)</span>', data_page, re.S)  # 收藏数
        if len(colletion_number)==0:
            colletion_number = ['0']
        share_number = re.findall('<i class="van-icon-videodetails_share"></i>(.*?)\\n', data_page)  # 转发数
        if len(share_number)==0:
            share_number = ['0']
        labels = ''.join(re.findall('class="tag-link">(.*?)</a>', data_page, re.S))   # 标签
        labels = [re.findall(r'[\u4e00-\u9fa5]+', str(labels), re.S)]
        if len(labels)==0:
            labels = ['0']
        fans_number = re.findall('<i class="van-icon-general_addto_s">.*?关注(.*?)</span>', data_page, re.S)  # 作者关注度
        if len(fans_number)==0:
            fans_number = ['0']
        last_data = pd.DataFrame(columns=['标题','链接','作者','点赞数','投币数','收藏数','转发数','标签','作者粉丝','时间'])
        last_data['标题'] = title
        last_data['链接'] = href
        last_data['作者'] = Up_name
        last_data['点赞数'] = good_number
        last_data['投币数'] = ''.join(coin_number).strip()
        last_data['收藏数'] = ''.join(colletion_number).strip()
        last_data['转发数'] = share_number
        last_data['标签'] = labels
        last_data['作者粉丝'] = ''.join(fans_number).strip().strip('<span>')
        last_data['时间'] = times[num]
        return last_data

# chrome_options = webdriver.ChromeOptions()
# chrome_options.add_argument('--headless')
# browser = webdriver.Chrome(options=chrome_options)
browser = webdriver.Chrome()
browser = use_cookie('https://www.bilibili.com/account/history', browser)
browser.get('https://www.bilibili.com/account/history')

i = 0
while True:  # 下滑
    i = i + 1
    browser.find_element_by_xpath('/html/body').send_keys(Keys.ARROW_DOWN)
    data_test_point = browser.page_source
    if 'img/historyend.png' in data_test_point:
        print(browser.page_source)
        print('已搜索完历史')
        time.sleep(10)
        break
    if i % 100 == 0 :
        print('已下滑',i,'次')

data_page, titles, times, href_list, data_personal_history = get_personal_history(browser.page_source)

data_all = pd.DataFrame(columns=['标题', '链接', '作者', '点赞数', '投币数', '收藏数', '转发数', '标签', '作者粉丝', '时间'])
num = 0
for href in href_list:
    # print('正在爬取第',num,'条数据')
    if num % 100 == 0:
        print('已爬取详细的第', num, '条记录')
    browser.get(href)  # 更新cookies后进入目标网页
    page_data = browser.page_source
    data = get_detail_data(page_data, num, times, href)
    data_all = data_all.append(data)
    num = num + 1
print(data_all.shape)
data_all.to_csv('personal_history/最后的数据5.csv',index=False,encoding='utf_8_sig')  # 4