from queue import Queue
import requests
import threading
import time
import os
import redis
import json
import sys
import urllib

# download settings
# url = "https://downali.game.uc.cn/s1/5/13/20230313100047_0230311000848_23F95D8C229ABF0A904F48033AEEA773.apk"
# filename = "G:/apply/demo.apk"
thread_count = 16
copies_count = thread_count * 4
dir = 'G:/website-uniapp/soft/'
# 防止ssl报错
requests.packages.urllib3.disable_warnings()


class DownloadThread(threading.Thread):
    def __init__(self, bytes_queue: Queue, url):
        super().__init__(daemon=True)
        self.bytes_queue = bytes_queue
        self.url = url

    def run(self):
        while not self.bytes_queue.empty():
            bytes_range = self.bytes_queue.get()
            headers = {
                "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/92.0.4515.159 Safari/537.36 Edg/92.0.902.84",
                "Range": "bytes={}".format(bytes_range[1])
            }
            try:
                response = requests.get(self.url, headers=headers, verify=False, timeout=30)
                with open("files/{}.tmp".format(bytes_range[0]), "wb") as f:
                    f.write(response.content)
            except Exception:
                return False


def get_file_size(url) -> int:
    try:
        headers = {
            "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/92.0.4515.159 Safari/537.36 Edg/92.0.902.84",
        }
        response = requests.head(url, headers=headers, verify=False, timeout=30)
        file_length = int(response.headers['Content-Length'])
    except Exception:
        file_length = 0
    return file_length


def get_thread_download(file_length) -> list:
    bytes = Queue(copies_count)

    start_bytes = -1
    for i in range(copies_count):
        bytes_size = int(file_length / copies_count) * i

        if i == copies_count - 1: bytes_size = file_length
        bytes_length = "{}-{}".format(start_bytes + 1, bytes_size)

        bytes.put([i, bytes_length])
        start_bytes = bytes_size

    return bytes


def create_threading(bytes_queue, url):
    thread_list = []
    for i in range(thread_count):
        thread = DownloadThread(bytes_queue, url)
        thread.start()
        thread_list.append(thread)

    for thread in thread_list:
        thread.join()


def composite_file(filename):
    if os.path.isfile(filename): os.remove(filename)
    # 防止文件不存在
    try:
        with open(filename, "ab") as f:
            for i in range(copies_count):
                with open("files/{}.tmp".format(i), "rb") as bytes_f:
                    f.write(bytes_f.read())

        for i in os.listdir("files"):
            os.remove("files/{}".format(i))
    except Exception:
        return False


def hum_convert(value):
    units = ["B", "KB", "MB", "GB", "TB", "PB"]
    size = 1024.0
    for i in range(len(units)):
        if (value / size) < 1:
            return "%.2f%s" % (value, units[i])
        value = value / size


# 获取下载的文件数据
def get_redis_data(name=''):
    redis_db = redis.Redis(host='98.82.254.89', port=6379, db=5, password='953058009',
                           decode_responses=True, charset='UTF-8', encoding='UTF-8')
    list = redis_db.get(name)
    return json.loads(list)


# 获取文件夹下所有子级的文件名称
def get_file():
    dirname = dir
    if not os.path.exists(dirname):
        print('该文件夹不存在')
        sys.exit()

    file_list = []
    # 递归获取子级
    for root, dirs, files in os.walk(dirname):
        for file in files:
            path = os.path.join(root, file)
            file_list.append(path)
    return file_list


#重新下
def repetition():
    url = 'http://www.example.com/file.txt'
    filename = '/path/to/save/file.txt'
    urllib.request.urlretrieve(url, filename)


# 去请求php分析
def send(filename, status):
    url = "http://127.0.0.1/api/oss/down?filename=" + filename + "&status=" + str(status)
    requests.packages.urllib3.disable_warnings()
    response = requests.get(url=url, verify=False)
    res = json.loads(response.text)
    print(res['msg'])
    return res


def main():
    name = 'oss_data'
    list = get_redis_data(name)
    num = 0
    # 获取文件夹下的文件
    file_list = get_file()

    for v in list:
        num += 1
        print(f"开始执行第{num}个下载")
        startt = time.time()
        url = v['download_url']
        filename = dir + v['filename']
        url = "https://f-droid.org/repo/org.ghostsinthelab.apps.guilelessbopomofo_122.apk"
        filename = '/data/org.apk'
        if filename in file_list:
            print(filename + '文件已存在')
        else:
            file_length = get_file_size(url)
            print('文件大小：'+str(hum_convert(file_length)))
            if file_length == 0:
                print(f"第{num}个文件->"+v['filename']+"<-下载执行失败")
                send(v['url'],2)
            else:
                copies_queue = get_thread_download(file_length)
                create_threading(copies_queue, url)
                composite_file(filename)
                #检查大小是否一致
                size = os.path.getsize(filename)
                if size != file_length:
                    os.remove(filename)
                    print(f"第{num}个文件->" + v['filename'] + "<-下载大小不一致已删除")
                    send(v['url'], 2)
                else:
                    end = round(time.time() - startt, 2)
                    print(f"第{num}个文件->"+v['filename']+"<-下载用时"+str(end)+"秒")
                    send(v['url'], 1)
        print('')


if __name__ == '__main__':
    main()
