import re
import os
import json
import logging
import requests
import threading
import datetime
import urllib.parse as urlparse
from BvAvTransform import *


def getCidinfo(bvid, cookie):
    """
    通过bvid获取视频cid信息
    :param bvid:
    :param cookie:
    :return:
    """
    url = "https://api.bilibili.com/x/player/pagelist?bvid=" + bvid + "&jsonp=jsonp"
    payload = {}
    headers = {
        'Cookie': cookie
    }
    response = requests.request("GET", url, headers=headers, data=payload)
    return json.loads(response.text)


def getBVInfo(bvid, cid, cookie, qn="116"):
    """
    通过bvid和cid获取视频的详细信息
    :param bvid:
    :param cid:
    :param cookie:
    :param qn:
    :return:
    """
    url = "https://api.bilibili.com/x/player/playurl?bvid=" + bvid + "&cid=" + str(cid) + "&qn=" + qn + "&otype=json"

    payload = {}
    headers = {
        'Cookie': cookie
    }
    response = requests.request("GET", url, headers=headers, data=payload)
    return json.loads(response.text)


# def getSsInfo(ssid, cookie):
#     url = "https://api.bilibili.com/pgc/web/season/section?season_id=" + str(ssid)
#     payload = {}
#     headers = {
#         'Cookie': cookie
#     }
#     response = requests.request("GET", url, headers=headers, data=payload)
#     return json.loads(response.text)
#
#
# def getBvId(aid, cid, cookie):
#     url = "https://api.bilibili.com/x/web-interface/view?aid=" + str(aid) + "&cid=" + str(cid)
#     payload = {}
#     headers = {
#         'Cookie': cookie
#     }
#     response = requests.request("GET", url, headers=headers, data=payload)
#     return json.loads(response.text)


def findStartEnd(key, text):
    """
    寻找关键字最小的花括号包裹位置
    :param key:
    :param text:
    :return:
    ''"""
    length = len(text)
    start = 0
    end = 0
    key_pos = []
    while start < len(text):
        start = text.find(key, end)
        end = start + len(key)
        if start != -1:
            key_pos.append((start, end))
        else:
            break

    result_pos = []
    for pos in key_pos:
        end_tag = 0
        start_count = pos[0]
        end_count = pos[1]
        while start_count > 0:
            if text[start_count] == '{':
                break
            else:
                start_count -= 1

        while end_count <= length:
            if text[end_count] == '{':
                end_tag -= 1
            elif text[end_count] == '}':
                end_tag += 1

            end_count += 1

            if end_tag == 1:
                break
        result_pos.append((start_count, end_count))

    return result_pos


def epPageAnalysis(epid, cookie):
    """
    epid页面分析方法
    :param epid:
    :param cookie:
    :return:
    """
    url = "https://www.bilibili.com/bangumi/play/" + epid

    payload = {}
    headers = {
        'Cookie': cookie
    }
    response = requests.request("GET", url, headers=headers, data=payload)
    pos_list = findStartEnd(epid[2:], response.text)
    text = ""
    for pos in pos_list:
        text += response.text[pos[0]: pos[1]]
    # 获取BVID
    pattern = re.compile(r'"bvid":"BV[0-9a-zA-Z]+"')
    result = pattern.findall(text)
    pattern = re.compile(r'BV[0-9a-zA-Z]+')
    result = pattern.findall(result[0])
    bvid = result[0]
    # 获取AID
    pattern = re.compile(r'"aid":[0-9]+')
    result = pattern.findall(text)
    pattern = re.compile(r'[0-9]+')
    result = pattern.findall(text)
    aid = result[0]
    # 获取CID
    pattern = re.compile(r'"cid":[0-9]+')
    result = pattern.findall(text)
    pattern = re.compile(r'[0-9]+')
    result = pattern.findall(text)
    cid = result[0]
    # 获取title
    pattern = re.compile(r'"title":".*?","')
    result = pattern.findall(text)[0]
    result = result.replace('"title":"', "")
    result = result.replace('","', "")
    title = result
    return bvid, cid, title


def mainPageAnalysis(url, cookie):
    """
    页面分析主方法
    :param url:
    :param cookie:
    :return:
    """
    parsed = urlparse.urlparse(url)
    if parsed.netloc != 'www.bilibili.com':
        return False, "非bilibili网址！", None, None, None, None, None, None, None

    bvid = None
    paths = parsed.path.split('/')
    pattern = re.compile(r'(BV[0-9a-zA-Z]{10})|(ep[0-9]+)|(av[0-9]+)')
    for path in paths:
        result = pattern.search(path)
        if result is not None:
            bvid = result[0]
            break

    if bvid is None:
        logging.error("无法找到Bvid,avid或者epid信息")
        return False, "无法找到Bvid,avid或者epid信息", None, None, None, None, None, None, None

    cid = None
    title = None
    if bvid[0:2] == "ep":
        epid = bvid
        bvid, cid, title = epPageAnalysis(epid, cookie)
    if bvid[0:2] == "av":
        bvid = enc(int(bvid[2:]))

    querys = urlparse.parse_qs(parsed.query)
    p = None
    if "p" in querys.keys():
        p = int(querys["p"][0])

    cidInfo = getCidinfo(bvid, cookie)
    obj_cidInfo = None
    if len(cidInfo["data"]) == 1:
        obj_cidInfo = cidInfo["data"][0]
    else:
        if cid is not None:
            for cidTemp in cidInfo["data"]:
                if cid == cidTemp["cid"]:
                    obj_cidInfo = cidTemp
                    break
        else:
            for cidTemp in cidInfo["data"]:
                if p is None:
                    return False, "分P信息获取失败，请选择观看视频的右侧对应分p，点击跳转后复制URL到本程序进行下载！", None, None, None, None, None, None, None

                if p == cidTemp["page"]:
                    obj_cidInfo = cidTemp
                    break

    if cidInfo is None:
        return False, "cid信息获取失败！", None, None, None, None, None, None, None
    else:
        cid = obj_cidInfo["cid"]
        if title is None:
            part = obj_cidInfo["part"]
        else:
            part = title

    bvInfo = getBVInfo(bvid, cid, cookie)
    size = bvInfo["data"]["durl"][0]["size"]
    flv_url = bvInfo["data"]["durl"][0]["url"]
    width = obj_cidInfo["dimension"]["width"]
    height = obj_cidInfo["dimension"]["height"]

    return True, "获取成功！", bvid, cid, width, height, part, size, flv_url


# def download(referer, url, bytes_range):
#     """
#     基本下载方法
#     :param referer:初始url
#     :param url:需要下载的URL
#     :param bytes_range: 分段下载的字节范围
#     :return:返回response
#     """
#     payload = {}
#     headers = {
#         'accept': '*/*',
#         'origin': 'https://www.bilibili.com',
#         'referer': referer,
#         'user-agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/86.0.4240.198 Safari/537.36'
#     }
#
#     if bytes_range is not None:
#         headers["range"] = 'bytes=' + bytes_range
#
#     response = requests.request("GET", url, headers=headers, data=payload)
#     return response


def downloadDanmu(cid, cookie, filePath, fileName):
    logging.info("正在下载弹幕文件:" + fileName)
    url = "https://api.bilibili.com/x/v1/dm/list.so?oid=" + str(cid)

    payload = {}
    headers = {
        'Cookie': cookie
    }
    response = requests.request("GET", url, headers=headers, data=payload)
    file = open(filePath + '/' + fileName, "wb")
    file.write(response.content)
    file.close()
    logging.info("弹幕文件下载完成！")


# def download_thread(id, filePath, fileName, referer, url, bytes_list):
#     global task_state
#     global file_used
#     for bytes_range in bytes_list:
#         print(str(bytes_range["start"]) + "-" + str(bytes_range["end"]))
#         response = download(referer, url, str(bytes_range["start"]) + "-" + str(bytes_range["end"]))
#         while file_used is True:
#             pass
#         file_used = True
#         file = open(filePath + '/' + fileName, "rb+")
#         file.seek(bytes_range["start"], 0)
#         file.write(response.content)
#         file.close()
#         file_used = False
#     task_state[id - 1] = 1


def downloadVideo(referer, url, size, num_thread, filePath, fileName):
    """
    视频下载方法
    :param num_thread:
    :param referer:初始URL
    :param size:文件大小
    :param url:需要下载的URL
    :param packSize:每个包的大小
    :param filePath:文件保存路径
    :param fileName:文件名称
    """

    if not os.path.exists(filePath):
        logging.info("创建目录:" + filePath)
        os.makedirs(filePath)

    start = datetime.datetime.now().replace(microsecond=0)
    download_file(url, filePath + '/' + fileName, size, referer, num_thread=num_thread)
    end = datetime.datetime.now().replace(microsecond=0)
    logging.info("用时: " + str(end - start))



def Handler(tag, start, end, url, filename, referer):
    headers = {
        'accept': '*/*',
        'origin': 'https://www.bilibili.com',
        'referer': referer,
        'Range': 'bytes=%d-%d' % (start, end),
        'user-agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/86.0.4240.198 Safari/537.36'
    }
    logging.info("下载线程" + str(tag) + " 开始下载！")
    r = requests.get(url, headers=headers, stream=True, timeout=5)
    # 写入文件对应位置
    with open(filename, "r+b") as fp:
        fp.seek(start)
        var = fp.tell()
        fp.write(r.content)
    logging.info("线程" + str(tag) + " 下载完成！")


def download_file(url, file_name, file_size, referer, num_thread=10):
    #  创建一个和要下载文件一样大小的文件
    fp = open(file_name, "wb")
    fp.truncate(file_size)
    fp.close()
    # 启动多线程写文件
    part = file_size // num_thread
    # 如果不能整除，最后一块应该多几个字节
    for i in range(num_thread):
        start = part * i
        if i == num_thread - 1:  # 最后一块
            end = file_size - 1
        else:
            end = start + part
        t = threading.Thread(target=Handler, kwargs={'tag': i, 'start': start, 'end': end, 'url': url, 'filename': file_name, 'referer': referer})
        t.setDaemon(True)
        t.start()
    # 等待所有线程下载完成
    main_thread = threading.current_thread()
    for t in threading.enumerate():
        if t is main_thread:
            continue
        t.join()
    logging.info(file_name + ' 下载完成')
