from requests_html import HTMLSession
from bs4 import BeautifulSoup
import requests
import re
import json
import pymongo


# 连接到Mongo本地客户端，数据库已在本地创建（插入数据前并未创建），拟创建两个集合（插入文档之前并未创建）
def init_mongo():
    my_client = pymongo.MongoClient('mongodb://localhost:27017/')
    mydb = my_client.bilibilitest
    my_bangumi_col = mydb['jp_bangumi_info']
    my_extra_col = mydb['extra_series_info']
    return mydb, my_bangumi_col, my_extra_col


# 动态爬取番剧索引页
def spider_index_page(end_page, bangumi_col, extra_col, insert):
    bangumi_id = 1
    # 防反爬取
    headers = {
        'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko)'   
                      ' Chrome/101.0.4951.64 Safari/537.36 Edg/101.0.1210.53'
    }
    # 动态页面参数字典，按照开播时间排序
    for page_num in range(1, end_page+1):
        bangumi_params = {
            'season_version': '-1',
            'spoken_language_type': '-1',
            'area': '2',
            'is_finish': '-1',
            'copyright': '-1',
            'season_status': '-1',
            'season_month': '-1',
            'year': '-1',
            'style_id': '-1',
            'order': '5',
            'st': '1',
            'sort': '1',
            'page': f'{page_num}',
            'season_type': '1',
            'pagesize': '20',
            'type': '1'
        }
        # 有参数列表后可以将url之后的“参数列表”删去，会自动拼接
        url = "https://api.bilibili.com/pgc/season/index/result"
        res = requests.get(url, params=bangumi_params, headers=headers)
        # print(res.text)
        # 正则表达式：分组命名，其中.*?为怠惰匹配，即匹配到第一个结果就结束
        # 过滤的时候一定要注意，例如：[]、{}等不要都删了；在本例中，json是list对象，过筛的时候也应该保证list对象
        p = re.compile(r'"code":0,"data":\{"has_next":1,"list":(?P<jsondict>.*?),"num":'+str(page_num) +
                       ',"size":20,"total":3087\},"message":"success"')
        content = p.search(res.text).group('jsondict')
        # print(content)
        # 把json字典对象转换成python对象
        result_list = json.loads(content)
        for dic in result_list:
            bangumi_params = list()
            # bangumi_id为番剧号，表示第n个番剧
            bangumi_params.append(bangumi_id)
            bangumi_params.append(dic['title'])
            bangumi_params.append(dic['is_finish'])
            # 爬取主页链接
            homepage_link = spider_play_page(dic['link'])
            bangumi_params.append(homepage_link)
            # 爬取主页上的信息
            bangumi_params = spider_home_page(homepage_link, bangumi_params)
            # 分类进入不同insert函数,传入的参数为title, release, judge_item
            kind_num = kind_judge(bangumi_params[1], bangumi_params[9], bangumi_params[10])
            insert[kind_num](bangumi_params, bangumi_col, extra_col)
            bangumi_id += 1


# 爬取番剧播放信息——获得主页链接
def spider_play_page(link):
    session = HTMLSession()
    play_page = session.get(link)
    # 找到链接元素
    items = play_page.html.find('div.media-right')
    # 提取href中的homepage链接
    a_tag = items[0].find('a', first=True)
    soup = BeautifulSoup(a_tag.html, 'html.parser')
    # soup.a.attrs返回的是a标签中的元素组成的字典
    homepage_link = 'https:' + soup.a.attrs['href']
    return homepage_link


# 爬取番剧主页信息
def spider_home_page(link, document):
    session = HTMLSession()
    home_page = session.get(link)
    # 爬取番剧分类标签，返回list
    tags = home_page.html.find('span.media-tag')
    tag = [x.text for x in tags]
    # 插入参数列表
    document.append(tag)
    # 爬取“总播放量”、“弹幕总量”，返回str
    datas = home_page.html.find('div.media-info-datas')
    data_list = datas[0].find('span')
    dats = [x.text for x in data_list]
    total_play = dats[0].split(' ')[1]
    total_fan = dats[2].split(' ')[1]
    total_screen_comment = dats[4].split(' ')[1]
    # 插入参数列表
    document.append(total_play)
    document.append(total_fan)
    document.append(total_screen_comment)
    # 爬取番剧评分，有可能暂无评分，返回str
    scores = home_page.html.find('div.media-info-score-content')
    score = [x.text for x in scores]
    if len(score) == 0:
        score.append('暂无评分')
    score = score[0]
    # 插入参数列表
    document.append(score)
    # 爬取开播（上映）时间、其他信息（第几季），返回str
    time_datas = home_page.html.find('div.media-info-time')
    time_list = time_datas[0].find('span')
    tims = [x.text for x in time_list]
    release = tims[0]
    judge_item = tims[1]
    # 列表化judge_item，四类：‘连载中’；‘已完结，共x话’；‘第一季 | 已完结，共x话’；‘113分钟’
    t = judge_item.replace(' ', '').split('|')
    # 插入参数列表
    document.append(release)
    document.append(t)
    return document


# 分类，没有给系列电影分类
def kind_judge(title, release, judge_item):
    # 说明是bangumi
    if release.endswith('开播'):
        # 说明是系列作品
        if ' ' in title or (len(judge_item) > 1 and judge_item[0] != '第一季'):
            return 1
        # 说明是系列的首部作品
        else:
            return 0
    # 说明是动画电影
    else:
        return 2


# jp_bangumi_info插入操作，首部作品不进行查询测试
def insert_bangumi_first(params, bangumi_col, extra_col):
    params.append('bangumi')
    # 构造需要插入的文档
    my_document = create_document(params)
    # 向jp_bangumi_info插入文档
    bangumi_col.insert_one(my_document)
    print(f'insert {params[1]} to jp_bangumi_info successfully!')
    return


# extra_series_info插入操作，需要首先去jp_bangumi_info中查询
def insert_bangumi_series(params, bangumi_col, extra_col):
    params.append('bangumi')
    # 就多了一个系列号的查询
    params.append(get_series_id(params[1], bangumi_col, params[11]))
    my_document = create_document(params)
    if params[12] == -1:
        bangumi_col.insert_one(my_document)
        print(f'insert {params[1]} to jp_bangumi_info successfully!')
        return
    # 向extra_series_info插入文档
    else:
        extra_col.insert_one(my_document)
        print(f'insert to {params[1]} to extra_series_info successfully!')
        return


# jp_bangumi_info插入操作——movie
def insert_movie(params, bangumi_col, extra_col):
    params.append('movie')
    temp_id = get_series_id(params[1], bangumi_col, params[11])
    # 需要插入bangumi集合
    if temp_id == 0:
        my_document = create_document(params)
        bangumi_col.insert_one(my_document)
    # 需要插入series集合
    else:
        params.append(temp_id)
        my_document = create_document(params)
        extra_col.insert_one(my_document)


# 获得系列号
def get_series_id(title, bangumi_col, category):
    # 使用名字切片查找首部系列作品的bangumi_id
    title_clip = title[0:3]
    myquery = {'title': {'$regex': '^'+title_clip}}
    query_result = bangumi_col.find(myquery).limit(1)
    if query_result.count() == 0 and category == 'bangumi':
        # 虽然有异常但是不退出，因为某些作品或许第一部作品并没有库存，故还是插入bangumi集合
        return -1
    elif query_result.count() == 0 and category == 'movie':
        # 对于电影来说，就是首次，需要插入jp_bangumi_info
        return 0
    else:
        # 只要有结果，一定是找到了相应的序列，需要插入series集合
        for dic in query_result:
            series_id = dic['bangumi_id']
            return series_id


# 构造需要插入集合的文档
def create_document(params):
    document = dict()
    document['bangumi_id'] = params[0]
    # params长度为13，说明其中有系列号
    if len(params) == 13 and params[12] != -1:
        document['series_id'] = params[12]
    document['title'] = params[1]
    # 0表示未完结，1表示已完结
    document['is_finished'] = params[2]
    # 如果已经完结，则增加’集数‘一项，否则没有
    if document['is_finished'] and params[11] == 'bangumi':
        # 取judge_item中的集数
        if params[10][len(params[10])-1] != '已完结':
            # print(params[10][0])
            p = re.compile('已完结.{0,5}(全|共)(?P<episode>.{1,4})话')
            episode_num = p.search(params[10][len(params[10])-1]).group('episode')
            document['episode_num'] = episode_num
    # 此处judge_item是定长的，就直接取0号下标元素的‘时长’
    if params[11] == 'movie':
        document['duration'] = params[10][0]
    document['homepage'] = params[3]
    document['category'] = params[11]
    document['tags'] = params[4]
    # 切除‘开播’、‘上映’，只留下时间
    document['release'] = params[9][0:len(params[9]) - 2]
    document['total_play'] = params[5]
    document['total_fan'] = params[6]
    document['total_screen_comment'] = params[7]
    document['score'] = params[8]
    return document


# 爬取bilibili网站信息并将其插入Mongo中
if __name__ == '__main__':
    insert_func_list = [insert_bangumi_first, insert_bangumi_series, insert_movie]
    data_scale = int(input())
    mydb, my_bangumi_col, my_extra_col = init_mongo()
    spider_index_page(data_scale, my_bangumi_col, my_extra_col, insert_func_list)
