import logging
import os
import shutil
import time
from urllib.parse import unquote
import zipfile

from flask import g, request
from myapp.const.response import BizError
from myapp.third.k8s.py_k8s import K8s
from myapp.third.modelarts.configs import MODELARTS_AK, MODELARTS_REGION_NAME, MODELARTS_SK
from myapp.third.redis.client import RedisClient
from myapp.utils import env
from myapp.utils.decorators import once
from myapp.utils.env import get_external_domain_name, is_private
from myapp.utils.region_storage import StorageMgrFactory
from myapp.utils.time import get_local_time
import retry
from taichu_storage import Protocol, StorageInterface, create_storage


# conf = app.config

bucketName = 'mztest'
cube_bucket = 'publish-data'

S3_ENDPOINT = os.getenv('S3_ENDPOINT', 'http://juicefs-s3-gateway.infra:29501')
S3_ENDPOINT_EXTERNAL = os.getenv('S3_ENDPOINT_EXTERNAL')
S3_AK = os.getenv('S3_AK', 'minio')
S3_SK = os.getenv('S3_SK', 'minio2022')

META_URL = os.getenv('metaurl')

DEFAULT_STORAGE_SIZE = 500
MIN_STORAGE_SIZE = 10


@once
def init_storage_mgr():
    cfgs = {}
    p = None
    st_type = os.getenv('STORAGE_MEDIA')
    if st_type == 'OBS':
        p = Protocol.OBS
        cfgs = {
            'ak': MODELARTS_AK,
            'sk': MODELARTS_SK,
            'endpoint_url': f'https://obs.{MODELARTS_REGION_NAME}.{get_external_domain_name()}',
            'bucket': 'publish-data',
        }
    elif st_type == 'MINIO':
        p = Protocol.BOTO3
        cfgs = {
            'ak': S3_AK,
            'sk': S3_SK,
            'endpoint_url': S3_ENDPOINT
            if g.region.key == 'default'
            else 'http://'
            + g.region.key
            + '-'
            + 'juicefs-s3-gateway.infra.svc.clusterset.local:29501',
            'bucket': 'publish-data',
            'endpoint_url_external': S3_ENDPOINT_EXTERNAL,
        }

    return create_storage(Protocol(p), cfgs)


class StorageManager(StorageInterface):
    @retry.retry(tries=5, delay=0.2)
    def list_objects(self, key, delimiter=''):
        cli = init_storage_mgr()
        return cli.list_objects(key, delimiter)

    @retry.retry(tries=5, delay=0.2)
    def get_object(self, key):
        cli = init_storage_mgr()
        return cli.get_object(key)

    def put_object(self, key, content):
        cli = init_storage_mgr()
        return cli.put_object(key, content)

    def upload_file(self, local_filename, key):
        cli = init_storage_mgr()
        return cli.upload_file(local_filename, key)

    def upload_dir(self, local_dir, key):
        cli = init_storage_mgr()
        return cli.upload_dir(local_dir, key)

    def download_file(self, key, local_filename):
        cli = init_storage_mgr()
        return cli.download_file(key, local_filename)

    def download_dir(self, key, local_dir):
        cli = init_storage_mgr()
        return cli.download_dir(key, local_dir)

    def copy_object(self, source_key, target_key):
        cli = init_storage_mgr()
        return cli.copy_object(source_key, target_key)

    def copy_dir(self, source_dir, dest_dir):
        cli = init_storage_mgr()
        return cli.copy_dir(source_dir, dest_dir)

    def create_dir(self, dirname):
        cli = init_storage_mgr()
        return cli.create_dir(dirname)

    def generate_signed_url(self, key, expiration=3600, use_external_host=True, content_type=None):
        cli = init_storage_mgr()
        return unquote(cli.generate_signed_url(key, expiration, use_external_host, content_type))

    def generate_upload_credentials(self, key, expiration=3600, use_external_host=True):
        cli = init_storage_mgr()
        return cli.generate_upload_credentials(key, expiration, use_external_host)


storage_mgr = StorageManager()


def get_host(url):
    output = ''
    if '//' in url:
        output = url.split('//')[1]
        if '/' in output:
            output = output.split('/')[0]
    elif '/' in url:
        output = url.split('/')[0]
    return output


def is_alluxio_storage():
    st_type = os.getenv('STORAGE_MEDIA')
    return st_type == 'MINIO' or st_type == 'minio' or st_type == 'arm' or st_type == 'ARM'


def compress_key(key, zip_path):
    source_dir = os.path.join(os.getcwd(), 'download', str(int(time.time())) + '/')
    if not os.path.exists(source_dir):
        os.makedirs(source_dir)
    logging.info('source_dir: %s' % source_dir)
    storage_mgr.download_dir(key, source_dir)
    with zipfile.ZipFile(zip_path, 'w', zipfile.ZIP_DEFLATED) as zipf:
        for root, _, files in os.walk(source_dir):
            for file in files:
                file_path = os.path.join(root, file)
                zipf.write(file_path, arcname=file_path.replace(source_dir, '', 1))
    shutil.rmtree(source_dir)
    return None


def get_public_url(key):
    if key.startswith('http'):
        return key
    host = request.host.split(':')[0]
    if is_private():
        return f'http://{host}:9000/public/{key}'
    else:
        return f'https://{cube_bucket}.obs.{env.get_region_id()}.{env.get_external_domain_name()}/{key}'


def get_prev_object_list(base_path, child_key):
    ls = []
    key = base_path + child_key
    if not key or key == '/':
        return {'parent_key': child_key, 'list': ls}
    objects = storage_mgr.list_objects(key, delimiter='/')
    for obj in objects:
        n = obj.get('name')
        if obj.get('is_dir'):
            name = os.path.basename(n[:-1])
            ls.append(
                {
                    'name': name,
                    'key': n.replace(base_path, ''),
                    'is_directory': True,
                }
            )
        else:
            ls.append(
                {
                    'name': os.path.basename(n),
                    'key': storage_mgr.generate_signed_url(n),
                    'is_directory': False,
                    'update_at': get_local_time(obj.get('last_modified')),
                    'files_size': size_str(obj.get('size')),
                }
            )

    return {'parent_key': child_key, 'list': ls}


# 计算key大小，支持文件夹和文件
def get_path_size(key, region_key):
    if key is None or key == '':
        return 0
    key = key.strip()
    mount_path = '/mnt/publish-data/'
    if key.startswith(mount_path):
        key = key.replace(mount_path, '')
    local_path = os.path.join(mount_path, key)
    size = 0
    if is_private() and os.path.exists(local_path):  # 计算本地挂载文件大小
        if not os.path.isdir(local_path):
            return os.path.getsize(local_path)
        for root, _, files in os.walk(local_path):
            for f in files:
                size += os.path.getsize(os.path.join(root, f))
        return size
    # 调sdk计算文件大小
    try:
        objects = StorageMgrFactory.region(region_key).list_objects(key)
        for o in objects:
            size += o['size']
    except Exception as e:
        logging.error(f'list_objects error,key:{key}:{repr(e)}', exc_info=True, stack_info=True)

    return size


def size_str(storage_size):
    if not storage_size:
        return ''
    cur_size = int(storage_size)
    if round(cur_size / 1024) < 500:
        size = round(cur_size / 1024, 2)
        storage_size = str(size) + 'KB'
    elif round(cur_size / 1024 / 1024) < 500:
        size = round(cur_size / 1024 / 1024, 2)
        storage_size = str(size) + 'MB'
    else:
        size = round(cur_size / 1024 / 1024 / 1024, 2)
        storage_size = str(size) + 'GB'
    return storage_size


def size_to_gib(size):
    cur_size = int(size)
    size = round(cur_size / 1024 / 1024 / 1024, 2)
    return size


def get_private_path(key):
    return '/mnt/publish-data/' + key


def get_private_bucket_path(key):
    return '/publish-data/' + key


def juicefs_quota_set(key, size):
    # 获取bucket路径，要操作的是该路径，在juicefs-fuse-daemon pod中执行的
    bucket_path = get_private_bucket_path(key)
    app_name = 'juicefs-fuse-daemon'
    namespace = 'infra'
    # juicefs quota set 命令
    command = ['juicefs', 'quota', 'set', META_URL, '--path', bucket_path, '--capacity', f'{size}']
    k8s_client = K8s()
    pods = k8s_client.get_pods_by_appname(namespace, app_name)
    if len(pods) < 1:
        raise BizError(1000, '系统错误')
    return k8s_client.exec_command(pods[0], namespace, command)


def juicefs_quota_get(key):
    bucket_path = get_private_bucket_path(key)
    app_name = 'juicefs-fuse-daemon'
    namespace = 'infra'
    # juicefs quota get 命令
    command = ['juicefs', 'quota', 'get', META_URL, '--path', bucket_path]
    k8s_client = K8s()
    pods = k8s_client.get_pods_by_appname(namespace, app_name)
    if len(pods) < 1:
        raise BizError(1000, '系统错误')
    return k8s_client.exec_command(pods[0], namespace, command)


def check_user_storage(user_id):
    from myapp.app import db
    from myapp.models.model_user import MyUser
    from myapp.models.user_attributes import UserAttribute

    user = db.session.query(MyUser).filter_by(id=user_id).first()
    if not user:
        return False
    used_size = 0
    size = RedisClient.get(f'user_space_size_{user.username}')
    if size:
        used_size = int(size)
    storage_size = DEFAULT_STORAGE_SIZE
    user_attribute = db.session.query(UserAttribute).filter_by(user_id=user_id).first()
    if user_attribute:
        storage_size = user_attribute.storage_size

    if size_to_gib(storage_size * 1024 * 1024 * 1024 - used_size) < MIN_STORAGE_SIZE:
        return False
    return True


def delete_local_dir(local_path: str):
    try:
        if local_path is not None and len(local_path) > 0:
            # 防止误伤根目录，增加了路径前缀的判断
            if local_path.startswith('/mnt/publish-data/') or local_path.startswith(
                '/publish-data/'
            ):
                if os.path.exists(local_path):
                    logging.warning(f'[delete_local_dir] delete local_path: {local_path}')
                    shutil.rmtree(local_path)
                else:
                    logging.warning(
                        f'[delete_local_dir] local_path: {local_path}, not exist, do nothing.'
                    )
            else:
                logging.warning(
                    f"[delete_local_dir] local_path: {local_path} not start with '/mnt/publish-data/' or"
                    f" '/publish-data/', do nothing."
                )
    except Exception as e:
        logging.error(f'[delete_local_dir] Exception: {repr(e)}', exc_info=True, stack_info=True)
