import urllib.request
import urllib.parse
import requests
import os
import time
import chardet
import sys
import time

def savehtml(page_url,basedir,html_cont):
    filepath = page_url.replace('http://www.81.cn', '')
    filepath = filepath.replace('http://tv.81.cn', '')
    filepath = basedir + '/' + filepath
    dir = os.path.dirname(filepath)
    if os.path.exists(dir) == False:
        os.makedirs(dir)
    file = open(filepath, 'w', encoding="utf-8")
    html = html_cont.replace('&#13;', '')
    file.write(html)
    file.close()

def getsavedir(page_url,basedir):
    filepath = page_url.replace('http://www.81.cn', '')
    filepath = filepath.replace('http://tv.81.cn', '')
    filepath = basedir + '/' + filepath
    dir = os.path.dirname(filepath)
    if os.path.exists(dir) == False:
        os.makedirs(dir)
    return dir

def downloadhtml(url):
    if url is None:
        return None
    r = requests.get(url)
    if r.status_code == 200:
        htmlcontent = r.content.strip()
        if htmlcontent == '':
            return None
        char = chardet.detect(htmlcontent)
        htmlcontent = htmlcontent.decode(char['encoding'])
        return htmlcontent
    else:
        return None


def downloadfile(new_full_url, basedir):
    try:
        filepath = new_full_url.replace('http://www.81.cn', '')
        filepath = filepath.replace('http://tv.81.cn', '')
        filename = os.path.basename(urllib.parse.urlparse(new_full_url).path)
        filename = os.path.basename(filename)
        dir = os.path.dirname(filepath)
        filepath = basedir + '/' + dir + '/' + filename
        dir = os.path.dirname(filepath)
        if os.path.exists(dir) == False:
            os.makedirs(dir)
        if os.path.isfile(filepath) == False:
            headers = {'User-Agent': 'Mozilla/5.0 (Windows NT 6.1;WOW64;rv:23.0) Gecko/20100101 Firefox/23.0'}
            req = urllib.request.Request(url=new_full_url, headers=headers)
            webpage = urllib.request.urlopen(req)
            content = webpage.read()
            with open(filepath, 'wb') as fp:
                fp.write(content)
        return filepath
    except Exception:
        return ''


def downfile2(srcUrl, localFile):
    startTime = time.time()
    with requests.get(srcUrl, stream=True) as r:
        contentLength = int(r.headers['content-length'])
        line = 'content-length: %dB/ %.2fKB/ %.2fMB'
        line = line % (contentLength, contentLength / 1024, contentLength / 1024 / 1024)
        print(line)
        downSize = 0
        with open(localFile, 'wb') as f:
            for chunk in r.iter_content(8192):
                if chunk:
                    f.write(chunk)
                downSize += len(chunk)
                line = '\r%d KB/s - %.2f MB， 共 %.2f MB'
                line = line % (
                downSize / 1024 / (time.time() - startTime), downSize / 1024 / 1024, contentLength / 1024 / 1024)
                print(line,end=" ")
                if downSize >= contentLength:
                    break
        timeCost = time.time() - startTime
        line = '共耗时: %.2f s, 平均速度: %.2f KB/s'
        line = line % (timeCost, downSize / 1024 / timeCost)
        print(line)

def downfile3(srcUrl,localFile):
    start_time = time.time()
    urllib.request.urlretrieve(srcUrl, localFile,Schedule)


def Schedule(blocknum, blocksize, totalsize):
    recv_size = blocknum * blocksize
    # 设置下载进度条
    f = sys.stdout
    pervent = recv_size / totalsize
    percent_str = "%.2f%%" % (pervent * 100)
    n = round(pervent * 50)
    s = ('#' * n).ljust(50, '-')
    f.write(percent_str.ljust(8, ' '))
    f.flush()
    f.write('\r')


def format_size(bytes):
    try:
        bytes = float(bytes)
        kb = bytes / 1024
    except:
        print("传入的字节格式不对")
        return "Error"
    if kb >= 1024:
        M = kb / 1024
        if M >= 1024:
            G = M / 1024
            return "%.3fG" % (G)
        else:
            return "%.3fM" % (M)
    else:
        return "%.3fK" % (kb)