import json
import re

import requests


# 获取网页
def get_page(page):
    url = f'https://www.bilibili.com/anime/index/#season_version=-1&area=-1&is_finish=-1&copyright=-1&season_status=-1&season_month=-1&year=-1&style_id=-1&order=3&st=1&sort=0&page={page}'
    headers = {
        "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/78.0.3904.108 Safari/537.36"
    }
    response = requests.get(url, headers)
    if response.status_code == 200:
        return response.content.decode('utf-8')
    return response.status_code


# 解析
def parse_page(html):
    pattern = re.compile(
        '<a href="//www.bilibili.com/bangumi/play/.*?/" target="_blank" class="bangumi-title">(.*?)</a>', re.S)
    film_name = re.findall(pattern, html)
    print(film_name)
    result = []
    for i in range(len(film_name)):
        data_dict = {}
        data_dict['film_name'] = film_name[i]
        result.append(data_dict)
    return result


# 获取多个网页
def get_pages():
    result_list = []
    for page in range(10):
        html = get_page(page)
        parse_page(html)
        result = parse_page(html)
        result_list.extend(result)
    return result_list


# 保存json
def save_json(result_list):
    json_str = json.dumps(result_list, ensure_ascii=False)
    with open('maoyan.json', 'w', encoding='utf-8') as f:
        f.write(json_str)


def main():
    result_list = get_pages()
    print('共：', len(result_list))


if __name__ == '__main__':
    main()
