# Copyright 2021 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Download dataset from the internet."""

import os
import bz2
import gzip
import lzma
import zipfile
import hashlib
import tarfile
import pathlib
import urllib
import urllib.request
import urllib.error
from typing import Optional, Dict, Callable, Any, Tuple, IO


class DownLoad:
    """Download dataset."""
    USER_AGENT: str = "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) "\
                      "Chrome/92.0.4515.131 Safari/537.36"

    _COMPRESSED_FILE_OPENERS: Dict[str, Callable[..., IO]] = {
        ".bz2": bz2.open,
        ".gz": gzip.open,
        ".xz": lzma.open,
    }
    _FILE_TYPE_ALIASES: Dict[str, Tuple[Optional[str], Optional[str]]] = {
        ".tbz": (".tar", ".bz2"),
        ".tbz2": (".tar", ".bz2"),
        ".tgz": (".tar", ".gz"),
    }

    _ZIP_COMPRESSION_MAP: Dict[str, int] = {
        ".bz2": zipfile.ZIP_BZIP2,
        ".xz": zipfile.ZIP_LZMA,
    }

    @property
    def raw_folder(self) -> str:
        """Raw folder location."""
        return os.path.join('./', self.__class__.__name__)

    def _extract_tar(self, from_path: str, to_path: str, compression: Optional[str]) -> None:
        """Extract tar format file."""
        with tarfile.open(from_path, f"r:{compression[1:]}" if compression else "r") as tar:
            tar.extractall(to_path)

    def _extract_zip(self, from_path: str, to_path: str, compression: Optional[str]) -> None:
        """Extract zip format file."""
        with zipfile.ZipFile(
                from_path,
                "r",
                compression=self._ZIP_COMPRESSION_MAP[compression] if compression else zipfile.ZIP_STORED) as zip_file:
            zip_file.extractall(to_path)

    _ARCHIVE_EXTRACTORS: Dict[str, Callable[[str, str, Optional[str]], None]] = {
        ".tar": _extract_tar,
        ".zip": _extract_zip,
    }

    def _url_retrieve(self, url: str, filename: str, chunk_size: int = 1024) -> None:
        """Download a file from a url."""
        with open(filename, "wb") as fh:
            with urllib.request.urlopen(
                    urllib.request.Request(url, headers={"User-Agent": self.USER_AGENT})) as response:
                for chunk in iter(lambda: response.read(chunk_size), ""):
                    if not chunk:
                        break
                    fh.write(chunk)

    def _get_redirect_url(self, url: str, max_hops: int = 3) -> str:
        """Get redirect url."""
        initial_url = url
        headers = {"Method": "HEAD",
                   "User-Agent": self.USER_AGENT}

        for _ in range(max_hops + 1):
            with urllib.request.urlopen(urllib.request.Request(url, headers=headers)) as response:
                if response.url == url or not response.url:
                    break
                url = response.url
        else:
            raise RecursionError(
                f"Request to {initial_url} exceeded {max_hops} redirects. The last redirect points to {url}."
            )
        return url

    @staticmethod
    def calculate_md5(fpath: str, chunk_size: int = 1024 * 1024) -> str:
        """Calculate md5 value."""
        md5 = hashlib.md5()
        with open(fpath, 'rb') as f:
            for chunk in iter(lambda: f.read(chunk_size), b''):
                md5.update(chunk)
        return md5.hexdigest()

    def _check_md5(self, fpath: str, md5: str, **kwargs: Any) -> bool:
        """Check md5 value"""
        return md5 == self.calculate_md5(fpath, **kwargs)

    def _check_integrity(self, fpath: str, md5: Optional[str] = None) -> bool:
        """Check file integrity."""
        if not os.path.isfile(fpath):
            return False
        if not md5:
            return True
        return self._check_md5(fpath, md5)

    def _download_url(self, url: str, root: str, filename: Optional[str] = None, md5: Optional[str] = None,
                      max_redirect_hops: int = 3) -> None:
        """Download a file from a url and place it in root."""
        root = os.path.expanduser(root)

        if not filename:
            filename = os.path.basename(url)
        fpath = os.path.join(root, filename)

        os.makedirs(root, exist_ok=True)

        # Check if file is already present locally.
        if self._check_integrity(fpath, md5):
            print('Using downloaded and verified file: ' + fpath)
            return

        # Expand redirect chain if needed.
        url = self._get_redirect_url(url, max_hops=max_redirect_hops)

        # Download the file.
        try:
            print('Downloading ' + url + ' to ' + fpath)
            self._url_retrieve(url, fpath)
        except (urllib.error.URLError, IOError) as e:
            if url[:5] == 'https':
                url = url.replace('https:', 'http:')
                print('Failed download. Trying https -> http instead.'
                      ' Downloading ' + url + ' to ' + fpath)
                self._url_retrieve(url, fpath)
            else:
                raise e

    def _detect_file_type(self, file: str) -> Tuple[str, Optional[str], Optional[str]]:
        """Detect file type."""
        suffixes = pathlib.Path(file).suffixes
        if not suffixes:
            raise RuntimeError(
                f"File '{file}' has no suffixes that could be used to detect the archive type and compression."
            )
        suffix = suffixes[-1]

        # Check if the suffix is a known alias.
        if suffix in self._FILE_TYPE_ALIASES:
            return suffix, self._FILE_TYPE_ALIASES[suffix][0], self._FILE_TYPE_ALIASES[suffix][1]

        # Check if the suffix is an archive type.
        if suffix in self._ARCHIVE_EXTRACTORS:
            return suffix, suffix, None

        # Check if the suffix is a compression.
        if suffix in self._COMPRESSED_FILE_OPENERS:
            # Check for suffix hierarchy.
            if len(suffixes) > 1:
                suffix2 = suffixes[-2]

                # Check if the suffix2 is an archive type.
                if suffix2 in self._ARCHIVE_EXTRACTORS:
                    return suffix2 + suffix, suffix2, suffix

            return suffix, None, suffix

        valid_suffixes = sorted(
            set(self._FILE_TYPE_ALIASES) | set(self._ARCHIVE_EXTRACTORS) | set(self._COMPRESSED_FILE_OPENERS))
        raise RuntimeError(f"Unknown compression or archive type: '{suffix}'.\nKnown suffixes are: '{valid_suffixes}'.")

    def _decompress(self, from_path: str, to_path: Optional[str] = None, remove_finished: bool = False) -> str:
        """Decompress a file."""
        suffix, archive_type, compression = self._detect_file_type(from_path)
        if not compression:
            raise RuntimeError(f"Couldn't detect a compression from suffix {suffix}.")

        if not to_path:
            to_path = from_path.replace(suffix, archive_type if archive_type else "")

        # We don't need to check for a missing key here, since this was already done in _detect_file_type().
        compressed_file_opener = self._COMPRESSED_FILE_OPENERS[compression]

        with compressed_file_opener(from_path, "rb") as rfh, open(to_path, "wb") as wfh:
            wfh.write(rfh.read())

        if remove_finished:
            os.remove(from_path)

        return to_path

    def _extract_archive(self, from_path: str, to_path: Optional[str] = None, remove_finished: bool = False) -> str:
        """Extract and  archive."""
        if not to_path:
            to_path = os.path.dirname(from_path)

        suffix, archive_type, compression = self._detect_file_type(from_path)
        if not archive_type:
            return self._decompress(
                from_path,
                os.path.join(to_path, os.path.basename(from_path).replace(suffix, "")),
                remove_finished=remove_finished,
            )

        # We don't need to check for a missing key here, since this was already done in _detect_file_type().
        extractor = self._ARCHIVE_EXTRACTORS[archive_type]

        extractor(from_path, to_path, compression)

        return to_path

    def _download_and_extract_archive(self, url: str, download_root: str, extract_root: Optional[str] = None,
                                      filename: Optional[str] = None, md5: Optional[str] = None,
                                      remove_finished: bool = False) -> None:
        """Download and extract archive."""
        download_root = os.path.expanduser(download_root)
        if not extract_root:
            extract_root = download_root
        if not filename:
            filename = os.path.basename(url)

        self._download_url(url, download_root, filename, md5)

        archive = os.path.join(download_root, filename)
        print("Extracting {} to {}".format(archive, extract_root))
        self._extract_archive(archive, extract_root, remove_finished)

    def _check_exists(self) -> bool:
        """Check whether the file exists."""
        return all(
            self._check_integrity(os.path.join(self.raw_folder, os.path.splitext(os.path.basename(url))[0])) for url, _
            in self.resources)
