#!/bin/env python
# -*- coding: utf-8 -*-

"""
File Name: hbase_exceptions.py
Author: morre <morre@vip.qq.com>
Create Time: 2022/12/31 17:01:29
Brief:
"""

from __future__ import absolute_import
import logging
import time
from threading import Lock, Semaphore
from collections import defaultdict


logger = logging.getLogger("hbase_client." + __name__)


# All PyBase exceptions inherit from me. Assumes unrecoverable.
class PyBaseException(Exception):
    # If any subclass hasn"t redefined _handle they"ll
    # use this function. Assumes the exception is
    # unrecoverable and thus the _handle method
    # just reraises the exception.
    def handle_exception(self, hbase_client, **kwargs):
        _ = hbase_client, kwargs
        raise self.__class__(self.args)


class DoNotRetryIOException(PyBaseException):
    pass


# Parent of any exceptions involving Zookeeper
# Usually unrecoverable so use default _handle.
class ZookeeperException(PyBaseException):
    # Having trouble with ZooKeeper exceptions? They actually use their own
    # exception handling in zk/client.py! I know, I know. I"m the worst.
    pass


# Means we couldn"t connect to ZK given a timeout.
class ZookeeperConnectionException(ZookeeperException):
    pass


# ZK doesn"t hold the necessary data specifying
# location of the master server.
class ZookeeperZNodeException(ZookeeperException):
    pass


# ZK either returned strange data or data not prefaced
# with "PBUF"
class ZookeeperResponseException(ZookeeperException):
    pass


# Means an RS is dead or unreachable.
class RegionServerException(PyBaseException):
    """Region server异常类"""
    def __init__(self, host=None, port=None, region_client=None):
        super().__init__()
        self.region_client = region_client
        self.host = host  # string
        self.port = port  # int
        if not self.host or not self.port:
            self.host = region_client.host
            self.port = region_client.port

    def handle_exception(self, hbase_client, **kwargs):
        logger.warning("process RegionServerException")
        # client not set? Then host/port must have been.
        # Fetch the client given the host, port
        if self.region_client is None:
            server_loc = f"{self.host}:{self.port}"
            self.region_client = hbase_client.reverse_client_cache.get(
                server_loc, None)
        # Let one greenlet through per region_client (returns True otherwise
        # blocks and eventually returns False)
        if _let_one_through(self, self.region_client):
            try:
                logger.warning("process let_one_through")
                if self.region_client is not None:
                    # We need to make sure that a different thread has not
                    # already reestablished to this region.
                    loc = f"{self.region_client.host}:{self.region_client.port}"
                    logger.warning("region_client loc: %s", loc)
                    if loc in hbase_client.reverse_client_cache:
                        # We are the first in and it is our job to kill the
                        # client. Purge it.
                        logger.warning(
                            "Region server %s:%d refusing connections."
                            " Purging cache, sleeping, retrying.",
                             self.region_client.host, self.region_client.port)
                        hbase_client.purge_region_client(self.region_client)
                        # Sleep for an arbitrary amount of time. If this returns
                        # False then we"ve hit our max retry threshold. Die.
                        if not _dynamic_sleep(self, loc):
                            logger.error("go die")
                            raise self
                    else:
                        # 此处应该是有BUG，或者竞争导致的。
                        logger.warning(
                            "region_client not in client_cache.")
                else:
                    logger.warning("handle_exception but region_client is None")

            finally:
                # Notify all the other threads to wake up because we have
                #  handled the exception for everyone!
                logger.warning("handle_exception let_all_through")
                _let_all_through(self, self.region_client)


# RegionServer socket timeout
class RegionServerSocketTimeoutException(RegionServerException):
    pass


# RegionServer stopped (gracefully).
class RegionServerStoppedException(RegionServerException):
    pass


# All Master exceptions inherit from me
class MasterServerException(PyBaseException):
    """Master服务异常类"""

    def __init__(self, host, port):
        super().__init__()
        self.host = host  # string
        self.port = port  # int

    def handle_exception(self, hbase_client, **kwargs):
        # Let one greenlet through. Others block and eventually return False.
        if _let_one_through(self, None):
            try:
                # Makes sure someone else hasn"t already fixed the issue.
                if hbase_client.meta_region_client is None or \
                        (self.host == hbase_client.meta_region_client.host
                         and self.port == hbase_client.meta_region_client.port):
                    logger.warning(
                        "Encountered an exception with the Master server."
                        "Sleeping then reestablishing.")
                    if not _dynamic_sleep(self, None):
                        raise self
                    hbase_client.open()
            finally:
                _let_all_through(self, None)


# Master gave us funky data. Unrecoverable.
class MasterMalformedResponseException(MasterServerException):

    def handle_exception(self, hbase_client, **kwargs):
        _ = hbase_client, kwargs
        raise self.__class__(self.host, self.port)


# All region exceptions inherit from me.
class RegionException(PyBaseException):
    """Region异常类"""

    def handle_exception(self, hbase_client, **kwargs):
        if "dest_region" in kwargs:
            region_name = kwargs["dest_region"].region_name
            if _let_one_through(self, region_name):
                try:
                    hbase_client.purge_region_info(kwargs["dest_region"])
                    if not _dynamic_sleep(self, region_name):
                        raise self
                finally:
                    _let_all_through(self, region_name)
        else:
            raise self


# Region was moved to a different RS.
class RegionMovedException(RegionException):
    pass


# Region is unavailable for whatever reason.
class NotServingRegionException(RegionException):
    pass


class RegionOpeningException(RegionException):
    """Region启动异常类"""

    def handle_exception(self, hbase_client, **kwargs):
        if "dest_region" in kwargs:
            region_name = kwargs["dest_region"].region_name
            # There"s nothing to handle here. We just need to give the region
            # some time to open.
            if not _dynamic_sleep(self, region_name):
                raise self
        else:
            raise self


# Region is too busy now.
class RegionTooBusyException(RegionException):
    """Region负载过高异常类"""

    def handle_exception(self, hbase_client, **kwargs):
        if "dest_region" in kwargs:
            region_name = kwargs["dest_region"].region_name
            # There"s nothing to handle here. We just need to give the region
            # some time to open.
            if not _dynamic_sleep(self, region_name):
                raise self
        else:
            raise self


# The user is looking up a table that doesn"t
# exist. They"re silly.
class NoSuchTableException(PyBaseException):
    pass


# The user is looking up a CF that doesn"t exist,
# also silly.
class NoSuchColumnFamilyException(PyBaseException):
    pass


# They gave us a malformed families structure.
class MalformedFamilies(PyBaseException):
    pass


# They gave us a malformed values structure.
class MalformedValues(PyBaseException):
    pass


# input func parameter erro
class FunctionParamException(PyBaseException):
    pass


# It starts getting a little intense below here. Why? Glad you asked.
# Reason is two fold -
#
# 1. Say 1000 greenlets all hit the same region server at the same time
# but that region server happens to be dead. Shit. 1000 exceptions were
# just thrown but we only want to handle it once because we"re Mr. and
# Mrs. Efficient. How we go about doing that is we define two functions
# _let_one_through and _let_all_through. _let_one_through will instantly
# return True on the first greenlet but block for the other 999. Once the
# first greenlet handles the exception it then calls _let_all_through.
# After _let_all_through is called then _let_one_through will stop
# blocking and return False for the other 999 greenlets.
#
# 2. Say Master is down. We"ll hit an exception, it"ll handle it by
# reestablishing a connection to Master. But what if it"s still down?
# We"ll try reestablishing again. Still down? And again. Still down? And
# again. Instead of infinitely looping we need a way to measure how many
# times similar exceptions have been hit "recently" so we can have failure
# thresholds for even recoverable exceptions. We do that via the function
# _dynamic_sleep which buckets exceptions based on a few properties and
# keeps track of when similar exceptions have been thrown. We then
# exponentially increase our sleep between exceptions until eventually a
# threshold is hit which means we should give up and fail. This both
# allows us to avoid infinite loops but also dynamically backoff on the
# exception handling (it may take Master 30 seconds to come online. We
# want to hit it at 1,3,7,15,31 instead of at 1,2,3,4,5,6,7,...,30)

# Buckets are defined by a tuple (exception_class_name, affected
# client/region). Depending on the type of exception the second value can
# change to either be a client instance or a region instance. For each
# bucket we hold a Semaphore which when set indicates that someone is
# already processing the exception for that bucket, when not set means
# you"re the first and it"s your job to process it.
_buckets = defaultdict(Semaphore)
# We also have an access lock on the above dictionary.
_buckets_lock = Lock()


# Read above for a description.
# TODO: Use the semaphore in reverse to prevent the race condition in
# _let_all_through.
def _let_one_through(exception, data):
    my_tuple = (exception.__class__.__name__, data)
    # Grab my relevant semaphore.
    _buckets_lock.acquire()
    my_sem = _buckets[my_tuple]
    _buckets_lock.release()
    # Try to non-blocking acquire my semaphore. If I get it, woohoo! Otherwise
    # get comfy because we"re sitting on the semaphore.
    if my_sem.acquire(blocking=False):
        # Look at me - I"m the captain now.
        return True
    else:
        # Someone else is already handling
        # the exception. Sit here until they
        # release the semaphore.
        my_sem.acquire()
        my_sem.release()
        return False


# Read above for a description.
def _let_all_through(exception, data):
    my_tuple = (exception.__class__.__name__, data)
    _buckets_lock.acquire()
    my_sem = _buckets[my_tuple]
    my_sem.release()
    _buckets_lock.release()


# We want to sleep more and more with every exception retry.
def sleep_formula(x):
    return (x / 1.5)**2
# [0.0, 0.44, 1.77, 4.0, 7.11, 11.11, 16.0, 21.77, 28.44, 36.0]


_EXCEPTION_COUNT = defaultdict(lambda: (0, time.time()))
_MAX_RETRIES = 7
_MAX_SLEEP = sleep_formula(_MAX_RETRIES)
# _MAX_AGGREGATE_SLEEP = reduce(
#     lambda x, y: x + y, [sleep_formula(x) for x in range(_MAX_RETRIES)])


# TODO: I"m not sure this function behaves as expected. Need to test further.
def _dynamic_sleep(exception, data):
    my_tuple = (exception.__class__.__name__, data)
    retries, last_retry = _EXCEPTION_COUNT[my_tuple]
    logger.info(
        "exception_count: exception_name: %s, data: %s, retries: %s,"
        " last_retry: %s", str(exception.__class__.__name__), str(data),
        str(retries), str(last_retry))
    age = time.time() - last_retry
    if retries >= _MAX_RETRIES or age > (_MAX_SLEEP * 1.2):
        # Should we fail or was the last retry a long time ago?
        # My handwavy answer is if it"s been less than half the
        # time of a max sleep since you last retried, you deserve
        # to be killed.
        #
        # Ex. _MAX_RETRIES=7. That means you"ve slept for 40.44
        # seconds already by the time you hit retries=7. If you
        # fail again within the next 21.77/2 = 10.9 seconds I"m
        # going to end you. Otherwise we"ll restart the counter.
        if age < _MAX_SLEEP:
            # You should die.
            logger.error("_dynamic_sleep: go to die")
            return False
        _EXCEPTION_COUNT.pop(my_tuple)
        return _dynamic_sleep(exception, data)
    new_sleep = sleep_formula(retries)
    _EXCEPTION_COUNT[my_tuple] = (retries + 1, time.time())
    logger.info("Sleeping for %02f seconds.", new_sleep)
    time.sleep(new_sleep)
    return True
