# -*- coding: utf-8 -*-

"""
zjl_util.models
~~~~~~~~~~~~~~~

This module contains some funtion edit by zjl
"""
import requests
# from hyper.contrib import HTTP20Adapter
import time
import os
import urllib
import ssl

# 刷新已下载大小时间间隔 单位：秒
interval = 1
ssl._create_default_https_context = ssl._create_unverified_context

proxies = {'http': "test2.carfunny.com.cn:50000", 'https': "test2.carfunny.com.cn:50000"}

def download(down_url, file_path, header={
    "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:72.0) Gecko/20100101 Firefox/72.0",
    "Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8",
    "Accept-Language": "zh-CN,zh;q=0.8,zh-TW;q=0.7,zh-HK;q=0.5,en-US;q=0.3,en;q=0.2",
    "Accept-Encoding": "gzip, deflate, br",
    "Connection": "keep-alive",
    "Upgrade-Insecure-Requests": "1"
}):
    """下载文件并显示过程
        :param down_url: 资源地址
        :param file_path: 保存的文件路径
        :param header: 下载请求的header
        """
    t1 = time.time()
    with open(file_path, 'wb') as f:
        down_data_count = 0
        speed_count = 0
        chunk_size = 1024
        m = 1024 * 1024
        k = 1024
        size_str = ''
        with requests.get(down_url, stream=True, headers=header, timeout=20) as r:
            for chunk in r.iter_content(chunk_size):
                data_length = len(chunk)
                down_data_count += data_length
                speed_count += data_length
                f.write(chunk)

                t2 = time.time()
                if t2 - t1 > interval:
                    size_str = str(down_data_count)
                    if down_data_count > m:
                        size_str = str(down_data_count / m) + "M"
                    elif down_data_count > k:
                        size_str = str(down_data_count / k) + "K"

                    speed_str = str(speed_count)
                    if speed_count > m:
                        speed_str = str(speed_count / m) + "M"
                    elif speed_count > k:
                        speed_str = str(speed_count / k) + "K"
                    # print("                                     \t\t")
                    print(size_str + "\t" + speed_str + "/s")
                    t1 = t2
                    speed_count = 0
        print(file_path + '\t\t' + size_str)


def merge_ts(ts_file_paths, result_file_path):
    # result_file_path = result_dir + os.path.sep + result_file_name
    result_file_dir = result_file_path[0:result_file_path.rindex('/')]
    if not os.path.exists(result_file_dir):
        os.makedirs(result_file_dir)
    with open(result_file_path, 'wb') as result_file:
        for ts_file_path in ts_file_paths:
            with open(ts_file_path, 'rb') as ts_file:
                content = ts_file.read()
                result_file.write(content)


def get_doc(url, try_count=4):
    """
    get请求网址
    :param url:
    :param try_count: 出错重试次数
    :return:
    """
    try:
        proxy_support = urllib.request.ProxyHandler(proxies)

        # build a new opener that adds authentication and caching FTP handlers
        opener = urllib.request.build_opener(proxy_support)

        # install it
        urllib.request.install_opener(opener)
        doc = urllib.request.urlopen(url, timeout=20).read().decode("utf-8", "ignore")
        return doc
    except Exception as e:
        print("request error url:" + url)
        print(e)
        print('-------\t重试请求  ' + url + '  \t-------')
        try_count -= 1
        # 提前结束本次循环，再次下载这一章
        if try_count < 0:
            print('[error] 多次重试后失败, 抛出错误')
            raise
        else:
            # 随着重试次数增加，等待时间增加 15 35 55 75   
            time.sleep(95 - (try_count * 20))
            return get_doc(url, try_count)


def get_doc3(url, headers: {}, try_count=4):
    """
    get请求网址
    :param url:请求网址
    :param headers:请示的header
    :param try_count: 出错重试次数
    :return: 返回网页内容
    """
    # uri = url.replace('https://sina.com-h-sina.com', '')
    # headers = {
    #     ':authority': 'sina.com-h-sina.com',
    #     ':method': 'GET',
    #     ':path': uri,
    #     ':scheme': 'https',
    #     'accept': '*/*',
    #     'accept-encoding': 'gzip, deflate, br',
    #     'accept-language': 'zh-CN,zh;q=0.9',
    #     'origin': 'http://www.poxiaotv.com',
    #     'referer': 'http://www.poxiaotv.com/',
    #     'sec-fetch-dest': 'empty',
    #     'sec-fetch-mode': 'cors',
    #     'sec-fetch-site': 'cross-site',
    #     'user-agent': 'Mozilla/5.0 (Windows NT 6.1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/87.0.4280.141 Safari/537.36'
    # }

    try:
        sessions = requests.session()
        o = urllib.parse.urlparse(url)
        base_url = o.scheme + "://" + o.netloc
        # sessions.mount(base_url)
        # res = sessions.get(url, headers=headers, proxies=proxies)
        res = sessions.get(url, headers=headers)
        r_text = res.text
        # print(r_text)
        return r_text
    except Exception as e:
        print("request error url:" + url)
        print(e)
        print('-------\t重试请求  ' + url + '  \t-------')
        try_count -= 1
        # 提前结束本次循环，再次请求
        if try_count < 0:
            print('[error] 多次重试后失败, 抛出错误')
            raise
        else:
            # 随着重试次数增加，等待时间增加 15 35 55 75
            time.sleep(95 - (try_count * 20))
            return get_doc3(url, headers, try_count)
