# from __future__ import unicode_literals

import pickle
from multiprocessing.dummy import Pool as ThreadPool
import threading

import os
import sys
from collections import namedtuple
# import urllib2
# from urlparse import urlsplit

import time

# global lock
import requests

from Block import Block

lock = threading.Lock()

# default parameters
defaults = dict(
    thread_count=10,
    buffer_size=500 * 1024,
    block_size=1000 * 1024)


def progress(percent, speed, width=50):
    print("%s %d%% %s M/S" % (('%%-%ds' % width) % (int(width * percent / 100) * '=' + '>'), percent, speed))
    if percent >= 100:
        print(sys.stdout.flush())


def write_data(filepath, data):
    with open(filepath, 'wb') as output:
        pickle.dump(data, output)


def read_data(filepath):
    with open(filepath, 'rb') as output:
        return pickle.load(output)


FileInfo = namedtuple('FileInfo', 'url name size lastmodified')


def get_file_info(url):
    headers = dict(requests.head(url).headers)
    size = int(headers.get('Content-Length', 0))
    lastmodified = headers.get('Last-Modified', '')
    name = None
    if 'Content-Disposition' in headers.keys():
        name = headers['Content-Disposition'].split('filename=')[1]
        if name[0] == '"' or name[0] == "'":
            name = name[1:-1]
    else:
        name = os.path.basename(url.split('/')[-1])

    return FileInfo(url, name, size, lastmodified)


def download(url, output,
             thread_count=defaults['thread_count'],
             buffer_size=defaults['buffer_size'],
             block_size=defaults['block_size']):
    # get latest file info
    file_info = get_file_info(url)
    # init path
    if output is None:
        output = file_info.name
    workpath = '%s.ing' % output
    infopath = '%s.inf' % output

    # split file to blocks. every block is a array [start, offset, end],
    # then each greenlet download filepart according to a block, and
    # update the block' offset.
    blocks = []

    if os.path.exists(infopath):
        # load blocks
        _x, blocks = read_data(infopath)

        if (_x.url != url or
                _x.name != file_info.name or
                _x.lastmodified != file_info.lastmodified):
            blocks = []

    if len(blocks) == 0:
        # set blocks
        if block_size > file_info.size:
            blocks = [Block(0, 0, file_info.size, 0)]
        else:
            block_size, remain = divmod(file_info.size, thread_count)
            blocks = [Block(i * block_size, i * block_size,
                            (i + 1) * block_size - 1, 0) for i in range(thread_count)]
            blocks[-1].size += remain
        # create new blank workpath
        f = open(workpath, 'w')
        f.close()

    print('Downloading %s' % url)
    # start monitor
    threading.Thread(target=_monitor, args=(infopath, file_info, blocks)).start()

    # start downloading
    with open(workpath, 'rb+') as fobj:
        args = [(url, block, fobj, buffer_size) for block in blocks if block.nowSize < block.size]
        if thread_count > len(args):
            thread_count = len(args)

        pool = ThreadPool(thread_count)
        pool.map(_worker, args)
        pool.close()
        pool.join()

    # rename workpath to output
    if os.path.exists(output):
        os.remove(output)
    os.rename(workpath, output)

    # delete infopath
    print('# delete infopath')
    print(os.path.exists(infopath))
    if os.path.exists(infopath):
        os.remove(infopath)

    # assert all([block[1] >= block[2] for block in blocks]) is True


def _worker(parm):
    url = parm[0]
    block = parm[1]
    fobj = parm[2]
    buffer_size = parm[3]
    headers = {"Range": "bytes=%s-%s" % (block.nowSize, block.size)}
    res = requests.get(url, headers=headers, stream=True)
    # fobj.seek(block[1])
    # fobj.write(res.content)
    d_start_time = time.time()
    for content in res.iter_content(chunk_size=buffer_size):
        with lock:
            fobj.seek(block.nowSize)
            fobj.write(content)
            fobj.flush()
            print(len(content))
            end_time = time.time()
            if end_time > d_start_time:
                speed = '{:.2f}'.format(len(content) / (1024 ** 2) / (end_time - d_start_time))
            # print(speed)
            block.speed = speed
            d_start_time = end_time
            block.nowSize += len(content)
    # while 1:
    #     chunk = res.raw.read(buffer_size)
    #     if not chunk:
    #         break


def _monitor(infopath, file_info, blocks):
    while 1:
        with lock:
            count = sum([block.nowSize - block.initSize for block in blocks])
            percent = count * 100 / file_info.size
            speeds = [float(block.speed) for block in blocks]
            progress(percent, round(sum(speeds), 2))
            if percent >= 100:
                break
            write_data(infopath, (file_info, blocks))
            time.sleep(2)


if __name__ == '__main__':
    import argparse

    parser = argparse.ArgumentParser(description='多线程文件下载器.')
    parser.add_argument('url', type=str, help='下载连接')
    parser.add_argument('-o', type=str, default=None,
                        dest="output", help='输出文件')
    parser.add_argument(
        '-t', type=int, default=defaults['thread_count'], dest="thread_count", help='下载的线程数量')
    parser.add_argument(
        '-b', type=int, default=defaults['buffer_size'], dest="buffer_size", help='缓存大小')
    parser.add_argument(
        '-s', type=int, default=defaults['block_size'], dest="block_size", help='字区大小')

    argv = sys.argv[1:]

    if len(argv) == 0:
        argv = ['http://localhost/download/ubuntu-18.04.4-desktop-amd64.iso']
        # argv = ['http://localhost/download/此花亭夜.png']

    args = parser.parse_args(argv)

    start_time = time.time()
    download(args.url, args.output, args.thread_count,
             args.buffer_size, args.block_size)
    print('下载时间: %ds' % int(time.time() - start_time))
