"""CopyRight(C) From WangShiheng"""
# Author: WangShiheng

import os
import shutil
import threading
import urllib
from BaseLib.util.OS.Thread.ThreadPool import ThreadPool
from BaseLib.util.base.LOGGER import LOGGER
from typing import List

import requests

headers = {
    'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/63.0.3239.132 Safari/537.36 QIHU 360SE'
}
session = requests.Session()
MB = 1024 ** 2
GB = 1024

__all__ = [
    # function
    "DownLoadThread",

    # class
    "ThreadDownload"
]


def start_download(url: str, order: int, start: int, end: int, LOGGER=LOGGER()) -> None:
    """
    根据文件起止位置下载文件
        Parameters
        ----------
        start : 开始位置
        end : 结束位置
        url: 文件直链
        order: 顺序码
        LOGGER: 日志输出对象(LOGGER-Like)
    """
    LOGGER.debug(f"Thread:{threading.current_thread().name} is on active")
    file_name = url.split('/')[-1].split('?')[0].replace('&', '+')
    _headers = headers.copy()
    # 分段下载的核心
    _headers['Range'] = f'bytes={start}-{end}'
    # 发起请求并获取响应（流式）
    response = session.get(url, headers=_headers, stream=True)
    # 每次读取的流式响应大小
    chunk_size = 128
    # 暂存已获取的响应，后续循环写入
    chunks = []
    for chunk in response.iter_content(chunk_size=chunk_size):
        # 暂存获取的响应
        chunks.append(chunk)
        # 更新进度条
    with open(fr'.\.download_temp\{file_name}\{str(order)}.tmp_download', 'wb') as f:
        for chunk in chunks:
            f.write(chunk)
    # 释放已写入的资源
    del chunks
    LOGGER.debug(f"Thread:{threading.current_thread().name} is off active")


def get_file_size(url: str) -> int:
    """
    :param url: 文件直链
    :return: 文件大小
    :raise: ValueError (if file_size is none)
    """
    url = urllib.request.Request(url, headers=headers)
    _ = urllib.request.urlopen(url)
    file_size = _.headers['content-length']
    if file_size == '0' or file_size is None:
        raise ValueError('该文件不支持多线程分段下载！')
    else:
        return int(file_size)


def split(end: int, step: int) -> list[tuple[int, int]]:
    """
    :param end: 结束位置
    :param step:  步进
    :return:  分好后的list[tuple]  EG: [(0,1),(2,3)...]
    """
    # 分多块
    parts = [(start, min(start + step, end + 1) - 1)
             for start in range(0, end, step)]
    return parts


def get_step(thread_num, filesize, LOGGER=LOGGER()):
    """
    :param thread_num:  预计的线程数量
    :param filesize:  文件大小
    :param LOGGER:
    :return:  计算后最合理的每线程应下载的大小
    """
    step_min = 3 * MB
    if thread_num * step_min < filesize:
        step = filesize / thread_num
    else:
        step = step_min
    LOGGER.debug("step: " + str(step))
    return int(step)


def compile_file(root_d: str = os.getcwd(), save_path=None, LOGGER=LOGGER()):
    """
    :param root_d: 根目录
    :param save_path: 最终文件保存目录
    :param LOGGER: 日志输出对象(LOGGER-Like)
    :return:
    """
    if save_path is None:
        save_path = root_d
    LOGGER.debug(f"compiling the files, save to {save_path}")
    list_file = os.listdir(root_d)
    temp = []
    for i in list_file:
        temp.append(int(i.split('.')[0]))
    Max = max(temp)
    with open(save_path, 'ab') as f:
        for i in range(Max + 1):
            with open(os.path.join(root_d, f'{i}.tmp_download'), 'rb') as f1:
                content = f1.read()
            f.write(content)

    shutil.rmtree(root_d)


class DownLoadThread(threading.Thread):
    def __init__(self, url, thread_num=15, LOGGER=LOGGER()):
        super().__init__()
        self.ThreadPool = ThreadPool(threading.Thread)
        self.url = url
        self.thread_num = thread_num
        self.LOGGER = LOGGER

    def run(self):
        LOGGER = self.LOGGER
        url = self.url
        thread_pool = []
        file_name = url.split('/')[-1].split('?')[0].replace('&', '+')
        file_size = get_file_size(url)
        step = get_step(self.thread_num, file_size)
        parts = split(file_size, step)

        try:
            os.makedirs(fr'.\.download_temp\{file_name}')
        except:
            pass
        with self.ThreadPool:
            for part in parts:
                if len(url) > 10:
                    text = f"...{url[-7:]}"
                else:
                    text = url
                self.ThreadPool.Add(
                    threading.Thread(
                        target=lambda: start_download(url, parts.index(part), *part, LOGGER=LOGGER),
                        name=f'Download:{text}-order:{parts.index(part)}'
                    )
                )
            # multitasking.wait_for_tasks()

        compile_file(fr'.\.download_temp\{file_name}', fr'.\{file_name}')
        LOGGER.info("Finish downloading " + file_name)


class ThreadDownload:
    def __init__(self, thread_num=0, LOGGER=LOGGER()):
        if thread_num == 0:
            self.auto_thread_make = True
            thread_num = 15
        else:
            self.auto_thread_make = False
            thread_num = thread_num

        self.Url_List = []
        self.Thread_pool: List[DownLoadThread] = []
        self.download_file_thread = thread_num
        self.LOGGER = LOGGER
        self.download_thread = threading.Thread(target=self.download, name='DownLoadThread')
        self.download_thread.start()

    def add_url(self, url: str):
        self.Url_List.append(url)

    def download(self):
        self.LOGGER.info('DOWNLOAD THREAD ACTIVE')
        while True:
            if self.Url_List and len(self.Thread_pool) < self.download_file_thread:
                url = self.Url_List.pop(0)
                self.LOGGER.info(f'download {url}')
                if len(url) > 10:
                    text = f"...{url[-7:]}"
                else:
                    text = url
                thread = DownLoadThread(url, LOGGER=self.LOGGER)
                thread.start()
                self.Thread_pool.append(thread)
            for thread in self.Thread_pool:
                if not thread.is_alive():
                    self.Thread_pool.pop(self.Thread_pool.index(thread))
