import argparse
import gzip
import logging
import os
import re
import sys
import time
from multiprocessing import Process
from queue import Queue

import requests

from compress import CompressHandler


def clinet_logger(name: str = 'clinet-logger', level=logging.INFO):
    # 日志配置
    log_files_dir = '/home/uwsgi/iharbor/scripts/down_clinet'
    if not os.path.exists(log_files_dir):
        os.makedirs(log_files_dir, exist_ok=True)

    logger = logging.getLogger(name)
    file_handler = logging.FileHandler(filename=f"{log_files_dir}/client.log")
    std_handler = logging.StreamHandler(stream=sys.stdout)

    formatter = logging.Formatter(
        fmt="%(asctime)s %(levelname)s %(message)s ",  # 配置输出日志格式
        datefmt='%Y-%m-%d %H:%M:%S'
    )
    logger.setLevel(level)
    file_handler.setLevel(level)
    std_handler.setFormatter(formatter)
    file_handler.setFormatter(formatter)
    logger.addHandler(std_handler)
    logger.addHandler(file_handler)
    return logger


def log(start_time, end_time, msg=''):
    clinet_logger().debug(msg=f'{msg} :任务开始{start_time} ， 结束时间 {end_time}， 总时间 {end_time - start_time}')


def logerr(msg=''):
    clinet_logger().error(msg=f'{msg}')


RESET = "\033[0m"
GREEN = "\033[32m"
RED = "\033[31m"


class Task:
    def __init__(self, down_url, file_name, token, chunk_all_size=0):
        self.down_url = down_url
        self.token = token
        self.file_name = file_name
        self.chunk_all_size = int(chunk_all_size)

    def run(self):
        # 模拟任务处理
        # print(f"Task {self.down_url} is starting by process {current_process().name}")
        # start_time = time.time()
        try:
            DownClinet(token=self.token).download(url=self.down_url, file_name=self.file_name, size=self.chunk_all_size,
                                                  url_chunk=self.down_url)
        except Exception as e:
            pass

        # end_time = time.time()
        #
        # log(start_time=start_time, end_time=end_time, msg=self.down_url)


class TaskManager:
    def __init__(self, num_processes=4):
        self.num_processes = num_processes
        self.task_queue = Queue()
        self.processes = []

    def add_task(self, task):
        self.task_queue.put(task)

    def worker(self, task):

        try:
            task.run()
        except Exception as e:
            logerr(msg=f'进程错误：{str(e)}')


def get_request(url, token):
    header = {
        'Authorization': f'Token {token}'
    }

    return requests.get(url, headers=header)


class DownClinet:

    def __init__(self, token, pwd='/dev/shm'):
        self.pwd = pwd
        self.bytes_sent = 0
        self.token = token

    def download(self, url, file_name, size, url_chunk):
        """下载"""
        os.makedirs(self.pwd, exist_ok=True)

        # 文件的完整路径
        file_path = os.path.join(self.pwd, file_name)
        flag = True
        count = 3
        while flag:
            try:
                # 发送GET请求
                start_time = time.time()
                response = get_request(url=url, token=self.token)
                try:
                    response.raise_for_status()  # 如果响应状态码不是200，将引发异常
                except Exception as e:
                    logerr(msg=f'下载请求异常 {str(e)},')
                    raise e

                # 将内容写入文件
                with open(file_path, 'wb') as file:
                    down_start_time = time.time()
                    for chunk in response.iter_content(chunk_size=size):  # 分块写入
                        file.write(chunk)

                        self.bytes_sent += len(chunk)

                    elapsed_time = time.time() - down_start_time

                    if elapsed_time > 0:
                        speed = self.bytes_sent / elapsed_time / 1048576  # Bytes per second
                        print(
                            f"\r {url_chunk} Downloading... {self.bytes_sent}/{size} bytes |{RED}  Speed: {speed:.2f} M/s {RESET}",
                            end='')

                end_time = time.time()
                log(start_time, end_time)
                print(f" | File downloaded successfully: {file_path}")
                flag = False
            except requests.exceptions.RequestException as e:

                if count > 3:
                    flag = False
                    raise e
                count += 1
                print(f"An error occurred: {e}")

            except Exception as e:
                if count > 3:
                    flag = False
                    raise e
                count += 1
                print(f"An error occurred: {e}")


class IharborClient:

    def __init__(self, oss_url, bucket_name, token, obj_list=None, chunk_size: int = 1024, compress=None, ):
        self.oss_url = oss_url
        self.chunk_size = chunk_size
        self.compress = compress
        self.bucket_name = bucket_name
        self.obj_list = obj_list
        self.token = token

    def generate_offsets(self, file_size, block_size):
        offsets = []
        for offset in range(0, file_size, block_size):
            offsets.append(offset)
        return offsets

    def get_obj_list(self):
        base_url = None
        obj_list = []  # [[file_down_offset_url, chunk_num_name, all_size]]
        obj_matedata_url_list = []
        for obj in self.obj_list:
            base_url = f'http://{self.oss_url}/api/v1/metadata/{self.bucket_name}/{obj}/'
            obj_matedata_url_list.append(base_url)

        for obj_url in obj_matedata_url_list:
            try:
                r = get_request(url=obj_url, token=self.token)
            except Exception as e:
                raise e

            req = r.json()

            all_size = req['obj']['si']

            offset_list = self.generate_offsets(int(all_size), int(self.chunk_size))

            obs_file_na = {req['obj']['na']}
            file_chunk_name = req['obj']['name']

            for index, offset in enumerate(offset_list):
                url = f'http://{self.oss_url}/api/v1/obj/{self.bucket_name}/{obs_file_na}/?offset={offset}&size={self.chunk_size}&compress={self.compress}'

                if index == offset_list[-1]:
                    url_chunk = [url, f'{file_chunk_name}_{index}_end', all_size]
                else:
                    url_chunk = [url, f'{file_chunk_name}_{index}', all_size]

                obj_list.append(url_chunk)

        return obj_list

    def get_obj_list_in_bucket(self, url):
        """ 多次请求获取桶所有文件 """
        flag = True
        obj_list = []

        start_url = url
        count = 0
        all_file_size = 0
        while flag:

            if count > 10:
                flag = False
                return obj_list

            try:
                r = get_request(url=start_url, token=self.token)
                if r.status_code != 200:
                    logerr(msg=f'获取信息列表失败：{start_url} -- {r.text}')
            except Exception as e:
                logerr(msg=f'获取信息列表失败：{start_url} -- {str(e)}')
                count += 1
                continue

            req = r.json()
            req_objs = r.json()['files']

            for obs in req_objs:

                chunk_size_t = self.chunk_size

                value_size = all_size = obs['si']
                all_file_size += value_size

                offset_list = self.generate_offsets(int(all_size), int(chunk_size_t))

                obs_file_na = obs['na']
                file_chunk_name = obs['name']
                for index, offset in enumerate(offset_list):

                    url = f'http://{self.oss_url}/api/v1/obj/{self.bucket_name}/{obs_file_na}/?offset={offset}&size={chunk_size_t}'
                    if self.compress:
                        url += f'&compress={self.compress}'

                    if offset_list[-1] == offset:
                        url_chunk = [url, f'{file_chunk_name}_{index}_end', f'{chunk_size_t}', value_size]
                    else:
                        url_chunk = [url, f'{file_chunk_name}_{index}', f'{chunk_size_t}', value_size]
                    obj_list.append(url_chunk)
                    value = all_size - chunk_size_t
                    if value > chunk_size_t:
                        all_size -= chunk_size_t
                    elif value <= chunk_size_t:
                        chunk_size_t = value

            if req['next'] is None:
                flag = False
            else:
                start_url = req['next']

        obj_list.append([None, None, None, all_file_size])

        return obj_list

    def get_bucket_all_obj_list(self):
        """"""
        base_url = f'http://{self.oss_url}/api/v1/dir/{self.bucket_name}/%2F/'
        obj_all_list = None
        try:
            obj_all_list = self.get_obj_list_in_bucket(base_url)
        except Exception as e:
            logerr(msg=f'获取信息列表失败：{base_url} -- {str(e)}')

        return obj_all_list


def worker(work_queue):
    while True:
        task = work_queue.get()
        if task is None:  # 用 None 作为退出信号
            break

        task.run()


class Clinet(object):

    def __init__(self, oss_url, bucket_name, token, obj_list=None, chunk_size=None, compress='gzip', max_process=10):
        self.oss_url = oss_url
        self.chunk_size = chunk_size
        self.compress = compress
        self.bucket_name = bucket_name
        self.obj_list = obj_list
        self.token = token
        self.max_process = int(max_process)
        self.file_directory = '/dev/shm'
        self.work_queue = Queue()

    def find_end_files(self, ):
        # 存储找到的文件列表
        end_files = []

        # 遍历目录中的所有文件和子目录
        for filename in os.listdir(self.file_directory):
            # 检查文件名是否以 "_end" 结尾
            if filename.endswith('_end'):  # 根据需要可调整扩展名
                end_files.append(filename)

        return end_files

    def find_and_sort_x_files(self, file_name):
        # 存储找到的文件（以元组形式存储文件名和对应的 i 值）
        x_files = []

        # 使用正则表达式查找匹配的文件名
        pattern = re.compile(rf'^{file_name}_(\d+)$')  # 匹配 x_0.txt, x_1.txt, x_2.txt 等文件名

        # 遍历目录中的所有文件
        for filename in os.listdir(self.file_directory):
            match = pattern.match(filename)
            if match:
                # 提取 i 的值并转换为整数，用于排序
                i_value = int(match.group(1))
                x_files.append((i_value, filename))

        # 根据 i 值排序，按照第一个元素（i 值）排序
        x_files.sort()

        return [filename for _, filename in x_files]  # 返回仅包含文件名的列表

    def merge_files(self):

        end_files = self.find_end_files()
        for filename in end_files:  # file_3_end
            file_list = filename.rsplit('_', maxsplit=1)
            filename_new = file_list[0]
            filename_new_list = filename_new.rsplit('_', maxsplit=1)
            # file_index = filename_new_list[1]
            filename_new_name = filename_new_list[0]

            full_new_file_path = os.path.join(self.file_directory, filename_new_name)

            chunk_files = self.find_and_sort_x_files(filename_new_name)
            chunk_files.append(filename)

            with open(full_new_file_path, 'ab+') as outfile:  # 打开输出文件（写模式）
                for chunk in chunk_files:
                    file_path = os.path.join(self.file_directory, chunk)

                    try:
                        with gzip.open(file_path, 'rb') as infile:  # 打开输入文件（读模式）
                            content = infile.read()  # 读取文件内容
                            outfile.write(content)
                            # 解压缩

                    except FileNotFoundError:
                        print(f"文件未找到: {file_path}")
                    except Exception as e:
                        print(f"处理文件时出错: {file_path}, 错误信息: {e}")

            print(f'合并文件成功：: {full_new_file_path}')

    def start_process(self, obj_list, max_process):
        """多进程"""

        iharbor_client = IharborClient(oss_url=self.oss_url, bucket_name=self.bucket_name, token=self.token,
                                       obj_list=self.obj_list, chunk_size=self.chunk_size, compress=self.compress)

        worker_obj_list = None  # 文件信息列表
        if obj_list:
            worker_obj_list = iharbor_client.get_obj_list()
        else:
            worker_obj_list = iharbor_client.get_bucket_all_obj_list()  # 数据量比较大 使用生成器

        if not worker_obj_list:
            return

        file_ = worker_obj_list[-1][3] / 1048576  # MB
        worker_obj_list.pop()

        processes = []
        work_queues = []

        for i in range(int(max_process)):
            work_queue = Queue()  # 为每个进程创建一个独立的队列
            work_queues.append(work_queue)  # 将队列添加到列表中

        len_list = len(worker_obj_list)
        count = 0

        flag = True

        while flag:

            # 添加任务到队列
            for work_q in work_queues:

                if count > len_list - 1:
                    flag = False
                    break

                work_q.put(
                    Task(worker_obj_list[count][0], worker_obj_list[count][1], self.token,
                         int(worker_obj_list[count][2])))

                count += 1

        # 添加终止信号
        for work_q in work_queues:
            work_q.put(None)

        start_time = time.time()
        print(f'服务启动时间： {start_time} 下载文件大小 {file_:.2f} MB')

        for i in range(int(max_process)):  # 启动3个进程
            p = Process(target=worker, args=(work_queues[i],))
            processes.append(p)
            p.start()

        # 等待所有进程完成
        for p in processes:
            p.join()

        end_time = time.time()
        time_value = end_time - start_time
        print(f'下载完成时间： {end_time} 下载文件大小 {file_:.2f} MB ')
        print(f'时间差： {time_value} 速率 {file_ / time_value:.2f} M/s')

        # 文件合并 文件名称_chunk_num   目前只下载桶的根目录
        print(f'等待解压缩和并文件：')
        self.merge_files()


def main():
    # 创建 ArgumentParser 对象
    parser = argparse.ArgumentParser(description="客户端多进程下载")
    parser.add_argument('--oss_url', type=str, help='oss 服务地址')
    parser.add_argument('--bucket', type=str, help='桶名称')
    parser.add_argument('--token', type=str, help='用户token')
    parser.add_argument('--chunk_size', type=str, help='块大小 MB')
    parser.add_argument('--max_process', type=str, help='并发数')
    parser.add_argument('--obj_list', type=list, required=False, help='')
    parser.add_argument('--compress', type=str, required=False, help='压缩类型')  # 代码url没有写

    # 解析命令行参数
    args = parser.parse_args()

    oss_url = args.oss_url
    bucket = args.bucket
    token = args.token
    chunk_size = int(args.chunk_size)
    obj_list = args.obj_list
    compress = args.compress
    max_process = args.max_process

    if chunk_size == 16:
        chunk_size = 16 * 1024 * 1024
    elif chunk_size == 8:
        chunk_size = 8 * 1024 * 1024
    elif chunk_size == 4:
        chunk_size = 4 * 1024 * 1024

    else:
        raise Exception('chunk_size 只能设置 4/8/16M')

    client = Clinet(oss_url, bucket, token, obj_list=obj_list, chunk_size=int(chunk_size), compress=compress,
                    max_process=int(max_process))

    client.start_process(obj_list, int(max_process))


if __name__ == '__main__':
    main()  # 接受 桶、并发、分片、分片大小、压缩类型、文件列表、服务端IP
