import time
import json
import re
from fnmatch import fnmatch
from selenium import webdriver
from bs4 import BeautifulSoup

nid = 0


def get_pages(driver):
    bs = BeautifulSoup(driver.page_source, 'html5lib')
    pgtag = bs.find('li', title=re.compile('最后一页:*'))
    pgnumf = pgtag['title']
    pgnum = pgnumf[5:]
    return int(pgnum)


def judge_type(name):
    if fnmatch(name, '*角色演示*'):
        return 'CR'
    elif fnmatch(name, '*角色PV*'):
        return 'CPV'
    elif fnmatch(name, '*剧情PV*'):
        return 'SPV'
    elif fnmatch(name, "*拾枝杂谈*"):
        return 'OID'
    elif fnmatch(name, '*版本PV*'):
        return 'VPV'
    elif fnmatch(name, '*CG*'):
        return 'CG'
    else:
        return 'Other'
    pass


def get_name(vtype, name, opt):
    global nid
    a=None
    if vtype == 'CR':
        for key, value in opt['DefDict']['char'].items():
            if fnmatch(name, '*%s*' % key):
                a = '%s_Pref' % value
    elif vtype == 'CPV':
        for key, value in opt['DefDict']['char'].items():
            if fnmatch(name, '*%s*' % key):
                a = '%s_PV' % value
    elif vtype == 'SPV':
        for key, value in opt['DefDict']['sto_cg'].items():
            if fnmatch(name, '*%s*' % key):
                a = 'SPV_%s' % value
    elif vtype == 'CG':
        for key, value in opt['DefDict']['sto_cg'].items():
            if fnmatch(name, '*%s*' % key):
                a = 'CG_%s' % value
    else:
        a = nid
    return str(a)
    pass


def parse_html(opt, vidg, html):
    global nid
    a = None
    bs = BeautifulSoup(html, 'html5lib')
    vids = bs.find(class_='clearfix cube-list')
    for child in vids.children:
        if child.name == 'li':
            nid += 1
            bv = child['data-aid']
            title = child.find('a', class_='title')
            name = title['title']
            vtype = judge_type(name)
            a = get_name(vtype, name, opt)
            print(a)
            # print('Name:',name,'\n','BV:',bv)
            vtpt = dict(id=a, nid=nid, name=name, type=vtype, bv=bv)
            vidg.append(vtpt)
    return vidg


def get_all(opt):
    optw = webdriver.FirefoxOptions()
    optw.set_headless()
    driver = webdriver.Firefox(options=optw)  # 创建Chrome Headless WebDriver
    vids = []  # 定义视频列表
    driver.get(opt['SpBaseSite'])  # 获取完整页面
    time.sleep(2)
    print('获取第1页源码成功！')
    pg = get_pages(driver)  # 获取个人投稿视频总页数
    vids = parse_html(opt, vids, driver.page_source)
    for i in range(2, pg + 1):
        driver.get(opt['SpBaseSite'] + '?page=' + str(i))
        time.sleep(2)
        print('获取第' + str(i) + '页源码成功！')
        vids = parse_html(opt, vids, driver.page_source)
    print('抓取视频列表成功！')
    return vids


def into_json(vidg):
    jsonfo = open('Videos.json', 'w+', encoding='utf-8')
    json.dump(vidg, jsonfo, ensure_ascii=False, indent=4)
    jsonfo.close()
    print('写入视频列表成功！')


def spider(opt):
    # url = 'https://space.bilibili.com/401742377/video'
    print('开始爬取视频！')
    vids = get_all(opt)
    into_json(vids)
    # print('')
