# *_*coding:utf-8 *_*

import logging
import random
import re
import threading
import time
from pathlib import Path

import requests

from py3utils.common_util import get_headers, init_proxies, int_or_zero

"""
The requests library is actually based on urllib. It encapsulates urllib and makes the experience much better. 
Now urllib has been released in version 3. The functions and performance are naturally improved a lot.
Therefore, the latest version of requests is also based on the latest urllib3 package.

In the urllib2 era, the handling of https is very simple. You only need to add verify = False when you request. 
This parameter means to ignore the verification of the https security certificate, that is, to directly request without 
verifying the reliability of the certificate.
This is actually insecure because the certificate can be forged, and the authenticity of the data cannot be guaranteed 
without verification.
In the era of urllib3, it is officially mandatory to verify the security certificate of https. 
If it does not pass, the request cannot be passed. Although the parameter that ignores authentication is added, 
it still gives a prominent Warning, which is not wrong.

InsecureRequestWarning: Unverified HTTPS request is being made. Adding certificate verification is strongly advised. 
See: https://urllib3.readthedocs.io/en/latest/advanced-usage.html#ssl-warnings
InsecureRequestWarning,
"""
import urllib3

urllib3.disable_warnings()
try:  # Python 2.7+
    from logging import NullHandler
except ImportError:
    class NullHandler(logging.Handler):
        def emit(self, record):
            pass

logger = logging.getLogger('downloader')
logger.addHandler(NullHandler())


class DownloadWorker(threading.Thread):
    def __init__(self, queue, headers=None, use_proxies=False, try_times=5, rewrite=False, delay=False, timeout=10):
        super(DownloadWorker, self).__init__()
        self.delay = delay
        self.queue = queue
        self.headers = headers
        self.proxies = init_proxies(use_proxies)
        self.try_times = try_times
        self.rewrite = rewrite
        self.timeout = timeout

    def is_max_try_times(self, times):
        if times > self.try_times:
            print("@@@@@Try Max Times!")
            return True
        return False

    def run(self):
        while True:
            default_delay_range = (0.1, 0.5)
            callback = None
            # picurl, savepath, times = self.queue.get()
            args = self.queue.get()
            headers = None
            if isinstance(args, (list, tuple)):
                savepath, picurl, times = args
            elif isinstance(args, dict):
                savepath, picurl, times = args.get("path"), args.get("url"), args.get("times", 0)
                callback = args.get("callback")
                if not all([savepath, picurl]):
                    logger.info("Require 'path': {} 'url': {}".format(savepath, picurl))
                    continue
                # 更新请求头, 有些网站需要加Refer 字段
                headers = args.get("headers", {})
                # 下载延时
                if args.get("delay", {}):
                    default_delay_range = default_delay_range if not args.get("delay_range") else args.get(
                        "delay_range")
            else:
                logger.info("Queue Type Error!", type(args))
                continue
            status = self.downloadfile(picurl, savepath, times, headers=headers)
            if self.delay:
                time.sleep(random.uniform(*default_delay_range))
            self.queue.task_done()
            if status and callable(callback):
                callback()

    def support_continue(self, url):
        headers = {
            'Range': 'bytes=0-4'
        }
        r = requests.head(url, headers=headers)
        try:
            crange = r.headers['content-range']
            self.total = int(re.match(r'^bytes 0-4/(\d+)$', crange).group(1))
            return True
        except:
            pass
        try:
            self.total = int(r.headers['content-length'])
        except:
            self.total = 0
        return False

    @staticmethod
    def write_content(savepath, response, chunk_size=512):
        with open(savepath, 'wb') as f:
            for chunk in response.iter_content(chunk_size=chunk_size):
                f.write(chunk)

    # @profile()
    def downloadfile(self, picurl, savepath, times, headers=None):
        result_status = False
        request_headers = get_headers(self.headers) if not headers else get_headers(headers)
        save_path = Path(savepath)
        logger.debug("Request Headers:{}".format(request_headers))
        logger.debug("Request Proxies:{}".format(self.proxies))
        current_file_size = 0
        if save_path.exists():
            current_file_size = save_path.stat().st_size
        else:
            save_path.touch()
        # 设置重连次数
        requests.adapters.DEFAULT_RETRIES = 15
        # 设置连接活跃状态为False
        s = requests.session()
        s.keep_alive = False
        try:
            response = requests.get(url=picurl, stream=True, headers=request_headers, proxies=self.proxies,
                                    timeout=self.timeout, verify=False)
            if response.status_code != 200:
                logger.error(f"status_code:{response.status_code} err! text:{response.text}")
                return False
            logger.info("[ {} ] [ downloading --> {} ]".format(self.name, picurl))
            if self.rewrite:
                self.write_content(save_path, response)
                result_status = True
            else:
                actual_file_size = int_or_zero(response.headers.get("Content-Length", 0))
                logger.info(f"[ {self.name} ] current_file_size {current_file_size} actual_file_size {actual_file_size}")
                if current_file_size == actual_file_size:
                    logger.info(f"[ {self.name} ] [ {save_path} Already Download Over! ]")
                else:
                    self.write_content(save_path, response)
                    result_status = True
            # 关闭请求  释放内存
            response.close()
            s.close()
        except Exception as e:
            line = "*-" * 50
            info = "{}\r\n" \
                   "{} Try {} Times Download: {}\r\n" \
                   "Error: {}\r\n" \
                   "File Path: {}\r\n" \
                   "Retry!!\r\n" \
                   "{}".format(line, self.name, times, picurl, e, savepath, line)
            logger.info(info)
            if self.is_max_try_times(times):
                logger.info("[ {} ] <--- Download Failed! {}".format(self.name, picurl))
                return False
            self.downloadfile(picurl, save_path, times + 1, headers)
        else:
            logger.info("[ {} ] <--- Download Success! {}".format(self.name, picurl))
        return result_status

if __name__ == '__main__':
    from queue import Queue

    queue = Queue()
    t = DownloadWorker(queue, use_proxies=True, try_times=1000)
    t.setDaemon(True)
    t.start()
    queue.put(
        {
            "path": '1.ts',
            "url": "https://cdn.cdn-xxx.com/videos/20200206/zCFxSjNQ/500kb/hls/bSxrQSn9.ts",
            "headers": {
                # "Referer": "https://twitter.com/home",
                # "origin": "https://twitter.com",
                'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/63.0.3239.132 Safari/537.36 dingtalk-win/1.0.0 nw(0.14.7) DingTalk(4.6.39-Release.10) Mojo/1.0.0 Native AppType(release)',
            }
        }
    )
    queue.join()
