# -*- coding: utf-8 -*-
import os
import pprint
import random
import time

import scrapy
import json
from BiliBiliSpider.items import BilibilispiderItem
import re
import subprocess

HEADERS = {
    'origin': 'https://www.bilibili.com',
}


class BilibiliSpider(scrapy.Spider):
    name = 'bilibili'
    # allowed_domains = ['bilibili.com', 'api.bilibili.com']
    # start_urls = ['https://api.bilibili.com/x/web-interface/newlist?rid=33&type=0&pn=1&ps=20']
    custom_settings = {
        'ITEM_PIPELINES': {
            'BiliBiliSpider.pipelines.DownloadVideoPipeline': 301,
            'BiliBiliSpider.pipelines.DownloadAudioPipeline': 302,
            'BiliBiliSpider.pipelines.SaveMysqlPipeline': 349,
            'BiliBiliSpider.pipelines.MergeVideoPipeline': 320,
            'scrapy_redis.pipelines.RedisPipeline': 300,
        },
        'DOWNLOADER_MIDDLEWARES': {
            'BiliBiliSpider.middlewares.RandomUserAgentDownloaderMiddleware': 300,
        },
    }

    def start_requests(self):
        urls = [
            'https://api.bilibili.com/x/web-interface/newlist?rid=33&type=0&pn={}&ps=20',
            'https://api.bilibili.com/x/web-interface/dynamic/region?jsonp=jsonp&pn={}&ps=5&rid=32',
        ]
        # 爬取多页面
        # for index in range(1, 10):
        #     for link in urls:
        #         link.format(index)
        #         print(link.format(index))
        #         yield scrapy.Request(
        #             url=link.format(index),
        #             callback=self.parse_one_html
        #         )
        # 爬取单页面

        for link in urls:
            if link == urls[0]:
                HEADERS['referer'] = 'https://www.bilibili.com/v/anime/serial/'
            else:
                HEADERS['referer'] = 'https://www.bilibili.com/v/anime/finish/'
            yield scrapy.Request(
                url=link.format(0),
                callback=self.parse_one_html,
                headers=HEADERS,
            )

    def parse_one_html(self, response):
        """解析一级页面，判断是否有地区限制，提取动漫名字、动漫更新状态、动漫图片"""
        string_compare1 = "僅限港澳台地區"
        string_compare2 = "僅限台灣地區"
        string_compare3 = "僅港澳台地區"
        string_compare4 = "僅台灣地區"
        json_content = json.loads(response.text)
        anime_link_list = json_content["data"]['archives']
        for link in anime_link_list[:3]:
            # 判断标题是否区域限制
            if string_compare1 in link['title'] or string_compare2 in link['title'] \
                    or string_compare3 in link['title'] or string_compare4 in link['title']:
                continue
            else:
                # 将链接交给调度器入队列
                yield scrapy.Request(
                    url=link['redirect_url'],
                    callback=self.parse_two_html,
                    headers=HEADERS,
                )

    def parse_two_html(self, response):
        """解析二级页面，提取每集动漫url地址、是否是会员、动漫集数名字"""
        two_html_content = response.text
        # 这是提取视频集数url地址
        video_links = re.findall(r'<script>window.__INITIAL_STATE__=(.*?);\(function\(\)', two_html_content, re.S)[0]
        # 这是提取动漫是完结还是连载状态
        anime_status = re.findall(r'<span class="pub-info">(.*?)</span>', two_html_content, re.S)[0]
        json_video_links = json.loads(video_links)
        # json_content1['mediaInfo']['episodes'] 这是一个列表, 下面是打印输出
        # pprint.pprint(json_content1['mediaInfo']['episodes'])
        # print(type(json_content1['mediaInfo']['episodes']))
        for anime_info in json_video_links['mediaInfo']['episodes'][:7]:
            # 打印解析的数据
            # pprint.pprint(anime_info)
            item = BilibilispiderItem()
            # 查看是否是会员
            item['anime_badge'] = anime_info['badge']
            # print("会员状态", item['anime_badge'])
            # 动漫名字，用于创建目录
            item['anime_name'] = re.findall(r'《(.*?)》', anime_info['share_copy'].replace(" ", "").replace("/", ''))[0]
            # print("看看名字", item['anime_name'])
            # 动漫集数名字，用于下载视频，去除中文符号
            item['anime_num_name'] = anime_info['share_copy'].replace(" ", "").replace("/", '').replace("《", '')
            item['anime_num_name'] = item['anime_num_name'].replace("》", "").replace("！", "").replace("．", "")
            # 动漫集数链接，用于交给调度器给下一个解析函数提取下载链接
            item['anime_link'] = anime_info['link']
            # 获取动漫图片链接，用于GUI展示
            item['anime_image'] = anime_info['cover']
            # 获取动漫的更新状态，看看是完结还是连载
            item['anime_update_status'] = anime_status
            # print("动画状态", item['anime_update_status'])
            HEADERS['referer'] = item['anime_link']
            # 获取目录是否存在的boolean(布尔)值
            video_serial_dirs = os.path.exists("videos/anime_serial/{}".format(item['anime_name']))
            video_end_dirs = os.path.exists("videos/anime_end/{}".format(item['anime_name']))
            merge_serial_dirs = os.path.exists("merge_audios_videos/anime_serial/{}".format(item['anime_name']))
            merge_end_dirs = os.path.exists("merge_audios_videos/anime_end/{}".format(item['anime_name']))
            # 判断是否是会员，若是会员则不下载
            if not item['anime_badge']:
                if "名侦探柯南" in item['anime_name'] or '海绵宝宝中文配音' in item['anime_name'] or '海绵宝宝' in item['anime_name']:
                    continue
                # 判断是否存在该目录，若不存在则创建
                if "连载" in item['anime_update_status']:
                    if not video_serial_dirs:
                        # 创建下载动漫专用文件夹
                        os.mkdir('videos/anime_serial/{}'.format(item['anime_name']))
                    if not merge_serial_dirs:
                        # 创建合成动漫专用文件夹
                        os.mkdir('merge_audios_videos/anime_serial/{}'.format(item['anime_name']))
                elif "完结" in item['anime_update_status']:
                    if not video_end_dirs:
                        # 创建下载动漫专用文件夹
                        os.mkdir('videos/anime_end/{}'.format(item['anime_name']))
                    if not merge_end_dirs:
                        # 创建合成动漫专用文件夹
                        os.mkdir('merge_audios_videos/anime_end/{}'.format(item['anime_name']))
                # print("链接", item['anime_link'])
                # print("动漫集数名", item['anime_num_name'])
                yield scrapy.Request(
                    url=item['anime_link'],
                    meta={'item': item},
                    callback=self.parse_three_html,
                    headers=HEADERS
                )

    def parse_three_html(self, response):
        """
        解析三级页面, 提取每集动漫视频、音频下载地址(真正视频、音频所在地址)
        """
        # 这是提取下载视频url地址
        three_html_content = response.text
        item = response.meta['item']
        # 提取json
        content = re.findall(r'<script>window.__playinfo__=(.*?)</script>', three_html_content, re.S)[0]
        # 将json转换为python可以运算的dict类型
        json_content = json.loads(content)
        # anime_audio = json_content['data']['dash']['audio'][0]['baseUrl']  # 这只是取一个
        item['download_audio'] = json_content['data']['dash']['audio'][0]['baseUrl']  # 这只是取一个
        # json_content['data']['dash']['audio']是一个列表，列表里放一堆字典，字典里有baseUrl, 下面可以打印输出
        # print(type(json_content['data']['dash']['audio']), json_content['data']['dash']['audio'])
        # anime_video = json_content['data']['dash']['video'][0]['baseUrl']  # 这也是取一个
        item['download_video'] = json_content['data']['dash']['video'][0]['baseUrl']  # 这也是取一个
        # print("视频链接", item['download_video'])
        yield item
