#!/usr/bin/env python3

# Copyright (c) 2020-2021 Fpemud <fpemud@sina.com>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.


import os
import time
import json
import glob
import errno
import fcntl
import pathlib
import subprocess
import atomicwrites
from datetime import datetime, timedelta
from ._util import Util
from ._const import SignatureFilename, AccessPointUtil
from ._errors import ConfigError, DiskCacheError, DiskCacheUpdateError
from ._backend import Backend


class SourceBase:

    def __init__(self, name):
        if len(name) == 0:
            raise ConfigError('data source name is empty')
        if len(name) > 64:
            raise ConfigError('data source name "%s" is too long' % (name))
        self._name = name

    def get_name(self):
        return self._name


class Source(SourceBase):

    def __init__(self, name, cfg, param):
        super().__init__(name)

        self._cfg = cfg

        # FIXME: one data source <-> one backend
        self._backend = [Backend.new(cfg, self, k, v) for k, v in param["backend"].items()]
        assert len(self._backend) >= 1
        self._backend = self._backend[0]

        if param.get("use-disk-cache", False):
            self._cacheDir = os.path.join(self._cfg.get_cache_dir(), self._name)
        else:
            self._cacheDir = None

        self._ageTime = timedelta(seconds=param.get("aging-time", self._cfg.get_default_cache_aging_time()))

        self._full = False
        self._dataDict = {}

        # self._lastUpdateTimeDict exists when (self._full == False)
        # self._lastUpdateTime exists when (self._full == True)
        self._lastUpdateTimeDict = {}

    def get_data(self):
        return self._dataDict

    def update(self, dtNow, mirror_name_list):
        ret = False

        # always load the (relatively) newst disk cache data into memory
        if self._cacheDir is not None:
            try:
                ret |= self._fullLoadFromDiskCache()
            except DiskCacheError:
                pass

        # load from backends if neccessary
        if mirror_name_list is None:
            if self._full:
                if dtNow - self._lastUpdateTime <= self._ageTime:
                    bNeedUpdate = False
                else:
                    bNeedUpdate = True
            else:
                bNeedUpdate = True

            if bNeedUpdate:
                if self._cacheDir is not None and self._isCacheInitialized():
                    if self._isCacheUpdating():
                        # Normally, data in disk cache never becomes aged by out-of-band keep-aliving.
                        # But after poweroff for a long time, although system is doing its best to update disk
                        # cache immediately after reboot, mrget still face aged cache for the period of updating, which
                        # can be long.
                        # We solve this probelm by still trust the aged cache when updating, and use .problem_in_keep_aliving to
                        # identify that periodical updating failed.
                        pass
                    else:
                        self._createBadFile()
                        ret |= self._fullLoadFromBackend(dtNow)
                else:
                    ret |= self._fullLoadFromBackend(dtNow)
        else:
            if self._full:
                if dtNow - self._lastUpdateTime <= self._ageTime:
                    mirror_name_list = [x for x in mirror_name_list if x not in self._dataDict]
                else:
                    # all mirrors are unqualified
                    pass
            else:
                mirror_name_list = [x for x in mirror_name_list if x not in self._dataDict or dtNow - self._lastUpdateTimeDict[x] > self._ageTime]

            if len(mirror_name_list) > 0:
                if self._full and dtNow - self._lastUpdateTime < self._ageTime:
                    # we strongly guess the reason why some mirrors here just don't exist
                    pass
                else:
                    if self._cacheDir is not None and self._isCacheInitialized():
                        if self._isCacheUpdating():
                            # same above
                            pass
                        else:
                            self._createBadFile()
                            ret |= self._partialLoadFromBackend(dtNow, mirror_name_list)
                    else:
                        ret |= self._partialLoadFromBackend(dtNow, mirror_name_list)

        # return True: data changed
        # return False: data not changed
        return ret

    def _isCacheInitialized(self):
        if not os.path.isdir(self._cacheDir):
            return False

        try:
            int(pathlib.Path(os.path.join(self._cacheDir, SignatureFilename.LAST_UPDATE_TIME)).read_text())
        except BaseException:
            return False

        return True

    def _isCacheUpdating(self):
        # must be called after self._isCacheInitialized() returns True

        fd = None
        try:
            fd = os.open(os.path.join(self._cacheDir, SignatureFilename.UPDATING), os.O_RDONLY | os.O_CLOEXEC)
            fcntl.lockf(fd, fcntl.LOCK_SH | fcntl.LOCK_NB)
            return False                    # file exists but has no lock
        except FileNotFoundError:
            return False                    # file does not exist
        except IOError as e:
            if e.errno != errno.EAGAIN:
                pass                        # file exists and is locked
            else:
                raise
        finally:
            if fd is not None:
                os.close(fd)
                fd = None

        return True

    def _fullLoadFromDiskCache(self):
        ret = False

        if not os.path.isdir(self._cacheDir):
            raise DiskCacheError("cache directory does not exist")

        if os.path.exists(os.path.join(self._cacheDir, SignatureFilename.BAD)):
            raise DiskCacheError("problem found in cache directory keep-aliving")

        dtCache = None
        try:
            cacheSeconds = int(pathlib.Path(os.path.join(self._cacheDir, SignatureFilename.LAST_UPDATE_TIME)).read_text())
            dtCache = datetime.min + timedelta(seconds=cacheSeconds)
        except BaseException:
            raise DiskCacheError("cache directory has invalid content")

        # data on disk is older or smaller than data in memory
        if self._full and len(self._dataDict) > 0 and self._lastUpdateTime >= dtCache:
            raise DiskCacheError("no newer or larger data in cache directory")

        # do work
        dataDict = dict()
        for fn in os.listdir(self._cacheDir):
            if not fn.endswith(".json"):
                raise DiskCacheError("invalid file \"%s\" in cache directory" % (fn))
            fullfn = os.path.join(self._cacheDir, fn)
            if not os.path.isfile(fullfn):
                raise DiskCacheError("invalid file \"%s\" in cache directory" % (fn))
            name = fn[:len(".json")*-1]
            with open(fullfn) as f:
                dataDict[name] = json.load(f)

        # assign
        if True:
            if not self._full:
                del self._lastUpdateTimeDict
                ret = True
            self._full = True
        if True:
            if self._dataDict != dataDict:
                self._dataDict = dataDict
                ret = True
        if True:
            self._lastUpdateTime = dtCache

        return ret

    def _fullLoadFromBackend(self, dtNow):
        ret = False

        dataDict = _callBackend(self._backend, None)

        # assign
        if True:
            if not self._full:
                del self._lastUpdateTimeDict
                ret = True
            self._full = True
        if True:
            if self._dataDict != dataDict:
                self._dataDict = dataDict
                ret = True
        if True:
            self._lastUpdateTime = dtNow

        return ret

    def _partialLoadFromBackend(self, dtNow, mirrorNameList):
        assert not self._full
        ret = False

        dataDict = _callBackend(self._backend, mirrorNameList)

        # assign
        if True:
            for k, v in dataDict.items():
                if ret:
                    self._dataDict[k] = v
                else:
                    if k not in self._dataDict or self._dataDict[k] != v:
                        self._dataDict[k] = v
                        ret = True
        if True:
            self._lastUpdateTimeDict.update({(name, dtNow) for name in dataDict})

        return ret

    def _createBadFile(self):
        subprocess.check_output(["/usr/libexec/mrget-helper", self.get_name()])


class SourceForDiskCacheUpdate(SourceBase):

    def __init__(self, name, cfg, param):
        super().__init__(name)

        self._cfg = cfg

        if param.get("use-disk-cache", False):
            self._cacheDir = os.path.join(self._cfg.get_cache_dir(), self._name)
            self._lastUpdateTimeFullfn = os.path.join(self._cacheDir, SignatureFilename.LAST_UPDATE_TIME)
            self._hasBadAccessFullfn = os.path.join(self._cacheDir, SignatureFilename.BAD)
            self._updateLockFullfn = os.path.join(self._cacheDir, SignatureFilename.UPDATING)
            self._backendList = [Backend.new(cfg, self, k, v) for k, v in param["backend"].items()]
        else:
            self._cacheDir = None

    def update_disk_cache(self):
        if self._cacheDir is None:
            return

        os.makedirs(self._cacheDir, exist_ok=True)

        dtNow = datetime.now()
        fd = os.open(self._updateLockFullfn, os.O_WRONLY | os.O_CREAT | os.O_CLOEXEC)
        try:
            # open and lock .updating
            bLocked = False
            while (datetime.now() - dtNow).total_seconds() < 15.0:
                try:
                    fcntl.lockf(fd, fcntl.LOCK_EX | fcntl.LOCK_NB)
                    bLocked = True
                    break
                except IOError as e:
                    if e.errno != errno.EAGAIN:     # errno.EAGAIN means file is already locked
                        raise
                time.sleep(1.0)
            if not bLocked:
                raise DiskCacheUpdateError("Failed to acquire lock for %s" % (self._updateLockFullfn))

            # update from backend
            dataDict = _callBackend(self._backendList, None)

            # write to disk atomically
            fileList = [
                self._lastUpdateTimeFullfn,
                self._hasBadAccessFullfn,
                self._updateLockFullfn,
            ]
            for k, v in dataDict.items():
                fullfn = os.path.join(self._cacheDir, k + ".json")
                with atomicwrites.atomic_write(fullfn, overwrite=True) as f:
                    json.dump(v, f)
                fileList.append(fullfn)
            for fullfn in glob.glob(os.path.join(self._cacheDir, "*")):
                if fullfn not in fileList:
                    Util.forceDelete(fullfn)

            # write .last_update_time
            with atomicwrites.atomic_write(self._lastUpdateTimeFullfn, overwrite=True) as f:
                f.write("%d" % ((dtNow - datetime.min).total_seconds()))

            # delete .problem_in_keep_aliving
            Util.forceDelete(self._hasBadAccessFullfn)
        finally:
            os.close(fd)


def _callBackend(backend, mirror_name_list):
    dataDict = dict()

    for name, v in backend.get_data(mirror_name_list).items():
        dataDict[name] = {
            "sources": v.get("sources", []),
            "mirrors": v.get("mirrors", []),
        }
        for s in dataDict[name]["sources"]:
            try:
                AccessPointUtil.check_and_fill_object(s, True)
            except ValueError as e:
                raise ValueError("invalid %s source from backend %s, %s" % (name, backend.get_type().value, e))
        for m in dataDict[name]["mirrors"]:
            try:
                AccessPointUtil.check_and_fill_object(m, False)
            except ValueError as e:
                raise ValueError("invalid %s mirror from backend %s, %s" % (name, backend.get_type().value, e))

        # FIXME: should be replaced by mrselect command
        if False:
            tlist3 = []
            if True:
                import asyncio
                import asyncio_pool

                loop = asyncio.get_event_loop()
                pool = asyncio_pool.AioPool(size=999, loop=loop)

                async def _probeAndAppend(m):
                    if "protocol" in m and m["protocol"] == "rsync":
                        proc = await asyncio.create_subprocess_exec("rsync", "--list-only", "--timeout=10", m["url"], stdout=asyncio.subprocess.DEVNULL, stderr=asyncio.subprocess.DEVNULL)
                        retcode = await proc.wait()
                        if retcode == 0:
                            tlist3.append(m)
                    else:
                        tlist3.append(m)

                for m in dataDict[name]["mirrors"]:
                    pool.spawn_n(_probeAndAppend(m))
                loop.run_until_complete(pool.join())
            dataDict[name]["mirrors"] = tlist3

    return dataDict
