import uuid 
import time
import ntplib # ntplib
from progressbar import * # progressbar
import oss2 #oss2
import time
import pathlib
import hashlib
import base64
sys.path.append("..")
from oss_.secret import *
auth = oss2.Auth(ak, sk)
bucket = oss2.Bucket(auth, 'http://oss-cn-chengdu.aliyuncs.com', 'zthubo')

widgets = [Percentage(), ' ', Bar('#'),' ', Timer(),  ' ', ETA(), ' ', FileTransferSpeed()]
pbar=None


def getMd5(file_name, block_size=64 * 1024):
    """计算文件的MD5
    :param file_name: 文件名
    :param block_size: 计算MD5的数据块大小，默认64KB
    :return 文件内容的MD5值
    """
    with open(file_name, 'rb') as f:
        md5 = hashlib.md5()
        while True:
            data = f.read(block_size)
            if not data:
                break
            md5.update(data)

def calculate_file_crc64(file_name, block_size=64 * 1024, init_crc=0):
    """计算文件的MD5
    :param file_name: 文件名
    :param block_size: 计算MD5的数据块大小，默认64KB
    :return 文件内容的MD5值
    """
    with open(file_name, 'rb') as f:
        crc64 = oss2.utils.Crc64(init_crc)
        while True:
            data = f.read(block_size)
            if not data:
                break
            crc64.update(data)
            
    return crc64.crc
def calculate_file_blake(file_name, block_size=64 * 1024):
    blake = hashlib.blake2b()
    with open(file_name, 'rb') as f:
        while True:
            data = f.read(block_size)
            if not data:
                break
            blake.update(data)
    return base64.b64encode(blake.digest())

def getMd5_(file_name, block_size=64 * 1024):
    with open(file_name, 'rb') as f:
        md5 = hashlib.md5()
        while True:
            data = f.read(block_size)
            if not data:
                break
            md5.update(data)
            
    return md5.hexdigest() 
def calculate_data_md5(data):
    """计算数据的MD5
    :param data: 数据
    :return MD5值
    """
    md5 = hashlib.md5()
    md5.update(data)
    return base64.b64encode(md5.digest())

def up():
    for ifile in pathlib.Path(savePath).iterdir():
        if ifile.name not in ignoreList:
            key=keyRoot+ifile.name
            crc_=str(calculate_file_crc64(ifile))
            if bucket.object_exists(key):
                fiedmeta =bucket.head_object(key)
                kcrc=fiedmeta.headers.get('x-oss-hash-crc64ecma')
                for v in fiedmeta.headers:
                    print(v,fiedmeta.headers[v])
                if kcrc==crc_:
                    Kbalke=fiedmeta.headers.get('x-oss-meta-blake')
                    #if Kbalke == calculate_file_blake(ifile).decode() :
                    print("文件{0}相同".format(key))
                    continue
            print("正在上传{}".format(key))
            multipartUpload(ifile,key)
            print(calculate_file_crc64(ifile))
            #upFile(ifile,key)
def multipartUpload(file,key):
    global pbar
    global offset
    total_size = os.path.getsize(file)
    # determine_part_size方法用于确定分片大小。
    # part_size = oss2.determine_part_size(total_size, preferred_size=2 * 1024 * 1024)
    part_size=2*1024*1024
    parts = []
    blake=calculate_file_blake(file)
    upload_id = bucket.init_multipart_upload(key).upload_id
    # progress_callback=percentage
    pbar = ProgressBar(widgets=widgets, maxval=total_size).start()  
    with open(file, 'rb') as fileobj:
        part_number = 1
        offset = 0
        while offset < total_size:
            num_to_upload = min(part_size, total_size - offset)
            datas=fileobj.read(num_to_upload)
            md5Check = calculate_data_md5(datas)
            result = bucket.upload_part(key, upload_id, part_number,
                                        datas,progress_callback=mpercentage,headers={'Content-MD5': md5Check.decode()})
            parts.append(oss2.models.PartInfo(part_number, result.etag, part_crc = result.crc,size = len(datas)))
            offset += num_to_upload
            part_number += 1    
        bucket.complete_multipart_upload(key, upload_id, parts)
        bucket.update_object_meta(key, headers={'x-oss-meta-blake': blake.decode()})
        #bucket.complete_multipart_upload(key, upload_id, parts)

    pbar.finish()

def mpercentage(consumed_bytes, total_bytes):
    global pbar
    global offset
    pbar.update(offset+int(consumed_bytes))
	
def percentage(consumed_bytes, total_bytes):
    global pbar
    pbar.update(int(consumed_bytes))
	
def checkTime():
    c = ntplib.NTPClient()
    try:
        response = c.request('ntp.ntsc.ac.cn')
        #response = c.request('ntp.aliyun.com')
    except:
        t=input("时间校正失败! [ c 继续] [ q 退出]  \n")
        timeOffset=0
    else:
        t=""
        timeOffset=response.offset
    if(abs(timeOffset) >300):
        print("本机时间:{0}".format(time.strftime("%Y-%m-%d %H:%M:%S", time.localtime())))
        print("校正时间:{0}".format(time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(time.time()+timeOffset)) ))
        t=input("本地时间误差过大!  [ c 校正] [ q 退出] [ i 忽略]\n")
    if(t=="q" or t=="Q"):
        exit()
    if(t=="i" or t=="I"):
        timeOffset=0
    return timeOffset

def getShortId():
    array = ["a", "b", "c", "d", "e", "f",
         "g", "h", "i", "j", "k", "l", "m", "n", "o", "p", "q", "r", "s",
         "t", "u", "v", "w", "x", "y", "z", "0", "1", "2", "3", "4", "5",
         "6", "7", "8", "9", "A", "B", "C", "D", "E", "F", "G", "H", "I",
         "J", "K", "L", "M", "N", "O", "P", "Q", "R", "S", "T", "U", "V",
         "W", "X", "Y", "Z"]
    id = str(uuid.uuid4()).replace("-", '')
    buffer = []
    for i in range(8):
        start = i * 4
        end = i * 4 + 4
        val = int(id[start:end], 16)
        buffer.append(array[val % 62])
    return "".join(buffer)

def getID():
    array = ["a", "b", "c", "d", "e", "f",
         "g", "h", "i", "j", "k", "l", "m", "n", "o", "p", "q", "r", "s",
         "t", "u", "v", "w", "x", "y", "z", "0", "1", "2", "3", "4", "5",
         "6", "7", "8", "9", "A", "B", "C", "D", "E", "F", "G", "H", "I",
         "J", "K", "L", "M", "N", "O", "P", "Q", "R", "S", "T", "U", "V",
         "W", "X", "Y", "Z"]
    id = str(uuid.uuid4()).replace("-", '')
    buffer = []
    '''
    print(id)
    ids = codecs.encode(codecs.decode(id, 'hex'), 'base64').decode().replace("\n","")
    print(ids)
    ##print(codecs.encode(codecs.decode(ids, 'base64'), 'hex').decode())
    '''
    now=time.localtime(time.time())
    for i in range(8):
        start = i * 4
        end = i * 4 + 4
        val = int(id[start:end], 16)
        buffer.append(array[val % 62])
    buffer.append(array[now.tm_year % 62])
    buffer.append(array[now.tm_mon % 62])
    buffer.append(array[now.tm_mday % 62])
    buffer.append(array[now.tm_hour % 62])
    return "".join(buffer)
	
def getfile(savePath,file,key,meta=None):
    global pbar
    print("下载文件 {}".format(file))
    if meta==None:
        meta = bucket.head_object(key)
    pbar = ProgressBar(widgets=widgets, maxval=int(meta.headers.get('Content-Length'))).start()
    #print(os.path.isfile(savePath/'_download.temp'))
    if (os.path.isfile(savePath/'_download.temp')):
        os.remove(savePath/'_download.temp')
    ##bucket.get_object_to_file(key, file, progress_callback=percentage)
    bucket.get_object_to_file(key, savePath/'_download.temp', progress_callback=percentage)
    if (os.path.isfile(file)):
        os.remove(file)
    os.rename(savePath/'_download.temp',file)
    print("\n文件 {} 下载完成".format(file))
def getRList(key,list=[]):
    for obj in oss2.ObjectIteratorV2(bucket, prefix = key, delimiter = '/'):
        if obj.is_prefix():
            getRList(obj.key,list)
        else:
            list.append(obj.key)
    return list
'''
list=getRList("guigugame/")
for i in list :
    print(i)

id=getShortId()
print(id)
timeOffset=checkTime()
ticks = time.time()
realtime = time.asctime( time.localtime(ticks+timeOffset))
print(realtime)

#objectmeta = bucket.head_object("temp.mp4")
#print(objectmeta.headers)
#multipartUpload("D:/syncthing/Camera/VID_20180909_192250.mp4","temp.mp4")
'''