from __future__ import annotations

import logging
import threading
import time
from concurrent.futures import ThreadPoolExecutor
from datetime import datetime, timezone
from pathlib import Path
from typing import TYPE_CHECKING

import urllib3
from minio import Minio
from minio.error import S3Error

from pymilvus.bulk_writer.constants import ConnectType
from pymilvus.bulk_writer.endpoint_resolver import EndpointResolver
from pymilvus.bulk_writer.file_utils import FileUtils
from pymilvus.bulk_writer.volume_restful import apply_volume

logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)

if TYPE_CHECKING:
    from minio.helpers import DictType


class OAuthMinio(Minio):
    def __init__(self, *args, oauth_token: str, **kwargs):
        super().__init__(*args, **kwargs)
        self.oauth_token = oauth_token

    def _url_open(
        self,
        method: str,
        region: str,
        bucket_name: str | None = None,
        object_name: str | None = None,
        body: bytes | None = None,
        headers: DictType | None = None,
        query_params: DictType | None = None,
        preload_content: bool = True,
        no_body_trace: bool = False,
    ):
        headers = headers or {}
        if self.oauth_token:
            headers["Authorization"] = f"Bearer {self.oauth_token}"
        return super()._url_open(
            method,
            region,
            bucket_name=bucket_name,
            object_name=object_name,
            headers=headers,
            query_params=query_params,
            body=body,
            preload_content=preload_content,
        )


class VolumeFileManager:
    def __init__(
        self,
        cloud_endpoint: str,
        api_key: str,
        volume_name: str,
        connect_type: ConnectType = ConnectType.AUTO,
    ):
        """
        Args:
            cloud_endpoint (str): The fixed cloud endpoint URL.
                - For international regions: https://api.cloud.zilliz.com
                - For regions in China: https://api.cloud.zilliz.com.cn
            api_key (str): The API key associated with your organization
            volume_name (str): The name of the Volume.
            connect_type: Current value is mainly for Aliyun OSS buckets, default is Auto.
             - Default case, if the OSS bucket is reachable via the internal endpoint,
               the internal endpoint will be used
             - otherwise, the public endpoint will be used.
             - You can also force the use of either the internal or public endpoint.
        """
        self.cloud_endpoint = cloud_endpoint
        self.api_key = api_key
        self.volume_name = volume_name
        self.connect_type = connect_type
        self.local_file_paths = []
        self.total_bytes = 0
        self.volume_info = {}
        self._client = None

    def _convert_dir_path(self, input_path: str):
        if not input_path or input_path == "/":
            return ""
        if input_path.endswith("/"):
            return input_path
        return input_path + "/"

    def _refresh_volume_and_client(self, path: str):
        logger.info("refreshing volume info...")
        response = apply_volume(self.cloud_endpoint, self.api_key, self.volume_name, path)
        self.volume_info = response.json()["data"]
        logger.info("volume info refreshed.")

        creds = self.volume_info["credentials"]
        http_client = urllib3.PoolManager(maxsize=100)

        cloud = self.volume_info["cloud"]
        region = self.volume_info["region"]
        endpoint = EndpointResolver.resolve_endpoint(
            self.volume_info["endpoint"],
            cloud,
            region,
            self.connect_type,
        )

        session_token = creds["sessionToken"]
        if cloud == "gcp":
            self._client = OAuthMinio(
                endpoint=endpoint,
                region=region,
                secure=True,
                oauth_token=session_token,
                http_client=http_client,
            )
        else:
            self._client = Minio(
                endpoint=endpoint,
                access_key=creds["tmpAK"],
                secret_key=creds["tmpSK"],
                session_token=session_token,
                region=region,
                secure=True,
                http_client=http_client,
            )
        logger.info("storage client refreshed")

    def _validate_size(self):
        file_size_total = self.total_bytes
        file_size_limit = self.volume_info["condition"]["maxContentLength"]
        if file_size_total > file_size_limit:
            error_message = (
                f"localFileTotalSize {file_size_total} exceeds "
                f"the maximum contentLength limit {file_size_limit} defined in the condition."
                f"If you are using the free tier, "
                f"you may switch to the pay-as-you-go volume plan to support uploading larger files."
            )
            raise ValueError(error_message)

        file_number_limit = self.volume_info["condition"].get("maxFileNumber")
        if file_number_limit is not None and len(self.local_file_paths) > file_number_limit:
            error_message = (
                f"localFileTotalNumber {len(self.local_file_paths)} exceeds "
                f"the maximum fileNumber limit {file_number_limit} defined in the condition."
                f"If you are using the free tier, "
                f"you may switch to the pay-as-you-go volume plan to support uploading larger files."
            )
            raise ValueError(error_message)

    def upload_file_to_volume(self, source_file_path: str, target_volume_path: str):
        """
        uploads a local file or directory to the specified path within the Volume.

        Args:
            source_file_path: the source local file or directory path
            target_volume_path: the target directory path in the Volume
        Raises:
            Exception: If an error occurs during the upload process.
        """

        self.local_file_paths, self.total_bytes = FileUtils.process_local_path(source_file_path)
        volume_path = self._convert_dir_path(target_volume_path)
        self._refresh_volume_and_client(volume_path)
        self._validate_size()

        file_count = len(self.local_file_paths)
        logger.info(
            f"begin to upload file to volume, localDirOrFilePath:{source_file_path}, fileCount:{file_count} to volumeName:{self.volume_name}, volumePath:{volume_path}"
        )
        start_time = time.time()

        uploaded_bytes = 0
        uploaded_count = 0
        root_path = Path(source_file_path).resolve()
        uploaded_bytes_lock = threading.Lock()

        def _upload_task(file_path: str, root_path: Path, volume_path: str):
            nonlocal uploaded_bytes
            nonlocal uploaded_count
            path_obj = Path(file_path).resolve()
            if root_path.is_file():
                relative_path = path_obj.name
            else:
                relative_path = path_obj.relative_to(root_path).as_posix()

            volume_prefix = f"{self.volume_info['volumePrefix']}"
            file_start_time = time.time()
            try:
                size = Path(file_path).stat().st_size
                logger.info(f"uploading file, fileName:{file_path}, size:{size} bytes")
                remote_file_path = volume_prefix + volume_path + relative_path
                self._put_object(file_path, remote_file_path, volume_path)
                with uploaded_bytes_lock:
                    uploaded_bytes += size
                    uploaded_count += 1
                percent = uploaded_bytes / self.total_bytes * 100
                elapsed = time.time() - file_start_time
                logger.info(
                    f"Uploaded file, {uploaded_count}/{file_count}: {file_path}, elapsed:{elapsed} s, {uploaded_bytes}/{self.total_bytes} bytes, progress: {percent:.2f}%"
                )
            except S3Error as e:
                logger.error(f"Failed to upload {file_path}: {e!s}")
                raise

        with ThreadPoolExecutor(max_workers=20) as executor:
            futures = []
            for _, file_path in enumerate(self.local_file_paths):
                futures.append(executor.submit(_upload_task, file_path, root_path, volume_path))
            for f in futures:
                f.result()  # wait for all

        total_elapsed = time.time() - start_time
        logger.info(
            f"All files in {source_file_path} uploaded to volume, "
            f"volumeName:{self.volume_info['volumeName']}, volumePath: {volume_path}, "
            f"totalFileCount:{file_count}, totalFileSize:{self.total_bytes}, cost time:{total_elapsed}s"
        )
        return {"volumeName": self.volume_info["volumeName"], "path": volume_path}

    def _put_object(self, file_path: str, remote_file_path: str, volume_path: str):
        expire_time_str = self.volume_info["credentials"]["expireTime"]
        expire_time = datetime.fromisoformat(expire_time_str.replace("Z", "+00:00"))

        now = datetime.now(timezone.utc)
        if now > expire_time:
            self._refresh_volume_and_client(volume_path)

        self._upload_with_retry(file_path, remote_file_path, volume_path)

    def _upload_with_retry(
        self, file_path: str, object_name: str, volume_path: str, max_retries: int = 5
    ):
        attempt = 0
        while attempt < max_retries:
            try:
                self._client.fput_object(
                    bucket_name=self.volume_info["bucketName"],
                    object_name=object_name,
                    file_path=file_path,
                )
                break
            except Exception as e:
                attempt += 1
                logger.warning(f"Attempt {attempt} failed to upload {file_path}: {e}")
                self._refresh_volume_and_client(volume_path)

                if attempt == max_retries:
                    error_message = f"Upload failed after {max_retries} attempts"
                    raise RuntimeError(error_message) from e

                time.sleep(5)
