# -*- coding: utf-8 -*-

import json
import logging
import os
import sys
from urllib.parse import urlparse, parse_qs

import requests
import urllib3
from urllib3.exceptions import InsecureRequestWarning
from src.com.ydzy.util.base_util import BaserUtil


class HttpClient:

    def __init__(self):
        self.com_client=BaserUtil()

    def init_headers(self, person_heads, headers):
        if person_heads != None and len(person_heads) > 0:
            headers.update(person_heads)
        return headers

    def post(self, data=None, url=None,headers=None,fast_falg=False,warn_flag=True):
        headers=self.init_headers(headers, {})
        if warn_flag:
            # 禁用警告
            requests.packages.urllib3.disable_warnings(category=InsecureRequestWarning)
        response = requests.post(url, json=data,headers=headers, verify=False)
        response.encoding = 'utf-8'
        resp_content = response.text
        if not fast_falg:
            self.com_client.sleep_period_time(min_value=1, max_value=3)
        return resp_content

    def put(self, data=None, url=None,headers=None,fast_falg=False):
        headers=self.init_headers(headers, {})
        response = requests.put(url, json=data,headers=headers, verify=False)
        response.encoding = 'utf-8'
        resp_content = response.text
        if not fast_falg:
            self.com_client.sleep_period_time(min_value=3, max_value=6)
        return resp_content

    def get(self,url=None, headers={},fast_falg=False):
        headers = self.init_headers(headers, {})
        resp1 = requests.get(url=url,headers=headers, verify=False)
        resp1.encoding = 'utf-8'
        resp_content = resp1.text
        if not fast_falg:
            self.com_client.sleep_period_time(min_value=3, max_value=6)
        return resp_content



    def down_file(self,url, file_path,console_flag=False,bar_flag=False):
        # 重试计数
        count = 0
        # 第一次请求是为了得到文件总大小
        # 将urllib3的日志级别设置为WARNING
        urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)

        # 设置Python的日志级别为WARNING
        logging.getLogger("requests").setLevel(logging.WARNING)
        r1 = requests.get(url, stream=True, verify=False)
        total_size = int(r1.headers['Content-Length'])

        # 判断本地文件是否存在，存在则读取文件数据大小
        if os.path.exists(file_path):
            temp_size = os.path.getsize(file_path)  # 本地已经下载的文件大小
        else:
            temp_size = 0

        # 开始下载
        while count < 10:
            if count != 0:
                temp_size = os.path.getsize(file_path)
            # 文件大小一致，跳出循环
            if temp_size >= total_size:
                break
            count += 1
            if console_flag:
                print( "第[{}]次下载文件,已经下载数据大小:[{}],应下载数据大小:[{}]".format(count, temp_size, total_size),end='\n')
            # 重新请求网址，加入新的请求头的
            # 核心部分，这个是请求下载时，从本地文件已经下载过的后面下载
            headers = {"Range": f"bytes={temp_size}-{total_size}"}
            # r = requests.get(url, stream=True, verify=False)
            r = requests.get(url, stream=True, verify=False, headers=headers)

            # "ab"表示追加形式写入文件
            with open(file_path, "ab") as f:
                if count != 1:
                    f.seek(temp_size)
                for chunk in r.iter_content(chunk_size=1024 * 64):
                    if chunk:
                        temp_size += len(chunk)
                        f.write(chunk)
                        f.flush()
                        if bar_flag:
                            ###这是下载实现进度显示####
                            done = int(50 * temp_size / total_size)
                            sys.stdout.write("\r[%s%s] %d%%" % (
                                '█' * done, ' ' * (50 - done), 100 * temp_size / total_size))
                            sys.stdout.flush()
            print()


    def parse_url(self,url,key=None):
        parsed_url = urlparse(url)
        params = parse_qs(parsed_url.query)
        if key==None:
            return params
        return params[key][0]

if __name__ == "__main__":
    url='http://www.customs.gov.cn/customs/resource/cms/2018/11/2018111617235099394.xls'
    http_client = HttpClient()
    http_client.down_file(url=url,file_path='./a.xls')

