import os
import oss2
import sys
import signal
import threading
import hashlib


class SingletonThreadSafe(type):
    _instances = {}
    _singleton_lock = threading.Lock()

    def __call__(cls, *args, **kwargs):
        if cls not in cls._instances:
            with cls._singleton_lock:
                if cls not in cls._instances:
                    cls._instances[cls] = super(SingletonThreadSafe, cls).__call__(*args, **kwargs)
        return cls._instances[cls]


def root_dir():
    dir = os.path.split(os.path.realpath(__file__))[0]
    dir = os.path.abspath(os.path.join(dir, '..'))
    return dir


def make_dirs(dir):
    if not os.path.exists(dir):
        os.makedirs(dir)


def GetFileMd5(filename):
    if not os.path.isfile(filename):
        return

    myhash = hashlib.md5()
    f = open(filename, 'rb')
    while True:
        b = f.read(8096)
        if not b:
            break
        myhash.update(b)
    f.close()
    return myhash.hexdigest()


def GetFileSize(filename):
    """
    计算文件大小
    """
    if not os.path.isfile(filename):
        return

    total_size = os.path.getsize(filename)
    return '%.2fM' % (total_size/1024/1024)


def oss_upload(config, key, file_path):
    auth = oss2.StsAuth(config['accessKeyId'], config['accessKeySecret'], config['securityToken'])
    endpoint = 'https://{}.aliyuncs.com'.format(config['region'])
    bucket_name = config['bucket']
    bucket = oss2.Bucket(auth, endpoint, bucket_name)

    total_size = os.path.getsize(file_path)
    part_size = oss2.determine_part_size(total_size, preferred_size=10*1024 * 1024)

    # 初始化分片。
    upload_id = bucket.init_multipart_upload(key).upload_id
    parts = []

    # 逐个上传分片。
    with open(file_path, 'rb') as file_obj:
        part_number = 1
        offset = 0
        while offset < total_size:
            num_to_upload = min(part_size, total_size - offset)
            # SizedFileAdapter(fileobj, size)方法会生成一个新的文件对象，重新计算起始追加位置。
            result = bucket.upload_part(key, upload_id, part_number, oss2.SizedFileAdapter(file_obj, num_to_upload))
            parts.append(oss2.models.PartInfo(part_number, result.etag))
            offset += num_to_upload

            rate = int(100 * (float(offset) / float(total_size)))
            print('\r文件上传存储服务器进度: {0}%'.format(rate), end='')

            part_number += 1

    bucket.complete_multipart_upload(key, upload_id, parts)

