#!/usr/bin/env python
# -*- coding: UTF-8 -*-
'''
@Project ：BugManagementSystem 
@File    ：oss.py
@IDE     ：PyCharm 
@Author  ：kdly
@Date    ：2024/11/18 下午4:35 
'''
import os
import logging

import oss2
from oss2.models import PartInfo
from oss2 import SizedFileAdapter, determine_part_size, ObjectIterator
from oss2.models import BucketCors, CorsRule

from django.conf import settings

logger = logging.getLogger(__name__)


def set_up(bucket_name, region=''):
    # 从环境变量中获取访问凭证。运行本代码示例之前，请确保已设置环境变量OSS_ACCESS_KEY_ID和OSS_ACCESS_KEY_SECRET。
    auth = oss2.AuthV4(settings.ALIYUN_ACCESSKEY_ID, settings.ALIYUN_SECRET)

    # 填写Bucket所在地域对应的Endpoint。以华东1（杭州）为例，Endpoint填写为https://oss-cn-hangzhou.aliyuncs.com。
    endpoint = settings.ALIYUN_ENDPOINT

    # 填写Endpoint对应的Region信息，例如cn-hangzhou。注意，v4签名下，必须填写该参数
    if not region:
        region = settings.ALIYUN_REGION

    # yourBucketName填写存储空间名称。
    bucket = oss2.Bucket(auth, endpoint, bucket_name, region=region)
    return bucket


def create_bucket(bucket_name=settings.ALIYUN_BUCKET_NAME, region=settings.ALIYUN_REGION):
    """
    创建bucket桶
    :param bucket_name: bucket名
    :param region: 区域
    :return:
    """
    bucket = set_up(bucket_name, region)

    # 设置存储空间为私有读写权限。
    bucket.create_bucket(oss2.models.BUCKET_ACL_PUBLIC_READ)
    # 设置cors规则
    rule = CorsRule(allowed_origins=['*'],
                    allowed_methods=['GET', 'PUT', 'DELETE', 'POST', 'HEAD'],
                    allowed_headers=['*'],
                    expose_headers=['ETag', 'x-oss-request-id'],
                    max_age_seconds=60,
                    )

    # 已存在的规则将被覆盖。
    bucket.put_bucket_cors(BucketCors([rule]))


def fragment_upload_file(oss_file_url, file_name, file_obj, bucket_name):
    """
    分片上传文件
    :param oss_file_url: 上传到bucket里面的路径/
    :param file_name: 需要上传的文件名
    :param file_obj: 需要上传的文件对象
    :return: upload_id
    """
    bucket = set_up(bucket_name)

    region = settings.ALIYUN_REGION

    # 填写不能包含Bucket名称在内的Object完整路径，例如exampledir/exampleobject.txt。
    key = oss_file_url + file_name
    file_obj.seek(0, os.SEEK_END)
    total_size = file_obj.tell()
    file_obj.seek(0)
    # determine_part_size方法用于确定分片大小。
    part_size = determine_part_size(total_size, preferred_size=100 * 1024)

    # 如需在初始化分片时设置文件存储类型，请在init_multipart_upload中设置相关Headers，参考如下。
    headers = dict()
    # 指定该Object被下载时的名称。
    headers['Content-Disposition'] = file_name

    upload_id = bucket.init_multipart_upload(key).upload_id
    parts = []
    # 逐个上传分片。
    try:
        part_number = 1
        offset = 0
        while offset < total_size:
            num_to_upload = min(part_size, total_size - offset)
            # 调用SizedFileAdapter(fileobj, size)方法会生成一个新的文件对象，重新计算起始追加位置。
            result = bucket.upload_part(key, upload_id, part_number, SizedFileAdapter(file_obj, num_to_upload))
            parts.append(PartInfo(part_number, result.etag))

            offset += num_to_upload
            part_number += 1
        # 完成分片上传。
        result = bucket.complete_multipart_upload(key, upload_id, parts, headers=headers)
    except Exception as e:
        logger.error(e)
        return upload_id
    size = bucket.get_object_meta(key).content_length
    # https://kdly-bucket.oss-cn-shenzhen.aliyuncs.com/upload/6c436444eb93a8b85be73029af67728d.jpeg
    return [f"https://{bucket_name}.oss-{region}.aliyuncs.com/{key}", size]


def cancel_fragment_upload(oss_file_url, file_name, upload_id):
    """
    取消分片事件
    :param oss_file_url: 文件的oss路径
    :param upload_id: upload_id
    :return:
    """
    bucket_name = settings.ALIYUN_BUCKET_NAME

    bucket = set_up(bucket_name)

    # 填写不能包含Bucket名称在内的Object完整路径，例如exampledir/exampleobject.txt。
    key = oss_file_url + file_name
    # 填写upload_id。upload_id来源于调用InitiateMultipartUpload完成初始化分片之后的返回结果。
    # upload_id = 'yourUploadId'

    # 取消指定upload_id的分片上传事件，已上传的分片会被删除。
    bucket.abort_multipart_upload(key, upload_id)


def list_file(oss_file_url, bucket_name):
    """
    获取oss中的文件
    :param oss_file_url: oss的目录
    :param bucket_name: 桶名
    :return: 迭代器对象
    """
    bucket = set_up(bucket_name)

    # 列举fun文件夹下的所有文件，包括子目录下的文件。
    for obj in oss2.ObjectIterator(bucket, prefix=oss_file_url):
        yield obj.key


def check_file(etag, oss_file_url, bucket_name):
    """
    获取oss中的文件
    :param oss_file_url: oss的目录
    :param bucket_name: 桶名
    :return: 迭代器对象
    """
    bucket = set_up(bucket_name)

    # 填写Object的完整路径，Object完整路径中不能包含Bucket名称。
    meta_data = bucket.get_object_meta(oss_file_url)
    meta_data_etag = meta_data.headers['ETag']

    # 返回值为true表示文件存在，false表示文件不存在。
    if meta_data_etag == etag:
        return True
    return False


def delete_file(oss_file_url_list, bucket_name, msg):
    """
    批量删除文件，每次最多只能删除1000个文件
    :param oss_file_url_list: 需要删除的oss文件列表，包含完整路径
    :param bucket_name: 桶名
    :return: 删除成功的文件名
    """
    bucket = set_up(bucket_name)

    # 批量删除3个文件。每次最多删除1000个文件。
    # 依次填写待删除的3个文件的完整路径，完整路径中不包含Bucket名称。
    # result = bucket.batch_delete_objects(['exampleobject1.jpg', 'testobject2.png', 'destobject3.txt'])
    try:
        result = bucket.batch_delete_objects(oss_file_url_list)
    except Exception:
        return False
    # 打印成功删除的文件名。
    if len(oss_file_url_list) <= 1:
        logger.info(f"{msg}{result.deleted_keys[0]}")
    return result.deleted_keys


def delete_all_objects(bucket_name):
    """
    删除所有对象
    :param bucket:
    :return:
    """
    bucket = set_up(bucket_name)

    objects_to_delete = []
    object_count = 0  # 初始化对象计数器

    for obj in oss2.ObjectIterator(bucket):
        objects_to_delete.append(obj.key)
        object_count += 1
        # 如果积累了足够多的对象，进行批量删除
        if len(objects_to_delete) >= 1000:
            logger.warning("批量删除1000个文件对象...")
            bucket.batch_delete_objects(objects_to_delete)
            objects_to_delete = []
            logger.warning("已删除1000个文件对象，仍在继续...")
    # 执行剩余的删除操作（如果有）
    if object_count > 0 and objects_to_delete:
        logger.warning(f"删除最后一批 {len(objects_to_delete)} 对象...")
        bucket.batch_delete_objects(objects_to_delete)
    elif object_count == 0:
        logger.warning(f"{bucket.bucket_name} bucket中没有要删除的对象")


def delete_all_multipart_uploads(bucket_name):
    """
    删除所有分片上传
    :param bucket:
    :return:
    """
    bucket = set_up(bucket_name)

    multipart_upload_count = 0
    for upload_info in oss2.MultipartUploadIterator(bucket):
        key = upload_info.key
        upload_id = upload_info.upload_id
        bucket.abort_multipart_upload(key, upload_id)
        multipart_upload_count += 1
    if multipart_upload_count > 0:
        logger.warning(f"{bucket.bucket_name} bucket中所有分片上传都已中止")
    else:
        logger.warning(f"{bucket.bucket_name} bucket中没有可中止的分片上传")


def delete_bucket(bucket_name):
    bucket = set_up(bucket_name)
    try:
        bucket.delete_bucket()
    except Exception as e:
        logger.error(e)
