#!/bin/env python
# -*- coding: utf-8 -*-

"""
File Name: hbase_client.py
Author: morre <morre@vip.qq.com>
Create Time: 2022/12/31 16:57:06
Brief:
"""

from __future__ import absolute_import
import logging
import logging.config
import time
import struct
from threading import Lock
from collections import defaultdict
import codecs
import sqlite3
import gevent
from intervaltree import IntervalTree
from google.protobuf.json_format import MessageToDict

from . import util
from . import zk_util
from . import region_client
from . import request_util
from . import result_util
from .filters import _to_filter, PrefixFilter
from .hbase_exceptions import (
    PyBaseException, DoNotRetryIOException, RegionServerException,
    NoSuchTableException, MasterServerException, FunctionParamException,
    RegionServerSocketTimeoutException, NoSuchColumnFamilyException,
    RegionException)
from .pb.HBase_pb2 import RegionInfo as pbRegionInfo

# Using a tiered logger such that all submodules propagate through to this
# logger. Changing the logging level here should affect all other modules.
logger = logging.getLogger("hbase_client")


# TODO: 为了支持缓存meta表信息，应该拆分Register和Parser.
class RegionRegister():
    """Region注册类"""
    # 静态变量: hbase包头的MAGIC字节位
    header_magic = int(codecs.encode(b"PBUF", "hex"), 16)

    def __init__(self):
        self.table = None
        self.region_name = None
        self.start_key = None
        self.stop_key = None
        self.server_loc = None
        self.region_client = None

    def register_by_cells(self, cells, callback_client):
        self.parse_cells(cells)
        self.do_register(callback_client)

    def register(
            self, table, region_name, start_key, stop_key, server_loc,
            callback_client):
        self.table = bytes(table)
        self.region_name = bytes(region_name)
        self.start_key = bytes(start_key)
        self.stop_key = bytes(stop_key)
        self.server_loc = bytes(server_loc)
        self.do_register(callback_client)

    def do_register(self, callback_client):
        self.create_region_client(callback_client)
        # Region has set up! Add this puppy to the cache so future requests can
        # use it.
        callback_client.insert_region_cache(self)

    def create_region_client(self, callback_client):
        # If we have an existing client for this region server already, grab it!
        self.region_client = callback_client.reverse_client_cache.get(
            self.server_loc.decode(), None)
        if not self.region_client:
            # Otherwise we need to create a new region client instance.
            host, port = self.server_loc.split(b":")
            new_client = region_client.RegionClient(
                host.decode(), int(port), callback_client.pool_size,
                callback_client.rpc_socket_timeout)
            if new_client is None:
                raise RegionServerException(host=host, port=port)
            # Welp. We can not connect to the server that the Master
            # supplied. Raise an exception.
            # if not new_client.open():
            #    logger.exception("region_client open except.")
            #    raise RegionServerException(host=host, port=port)
            # Add it to the host,port -> instance of region client map.
            callback_client.reverse_client_cache[self.server_loc.decode()] = \
                new_client
            # Attach the region_client to the region.
            self.region_client = new_client
        # Add this region to the region_client's internal
        # list of all the regions it serves.
        self.region_client.regions.append(self)
        logger.info("Created new RegionRegister: %s", (str(self)))

    def parse_cells(self, cells):
        # We have a valid response but no cells? Apparently that means the
        # table doesn"t exist!
        if not cells:
            raise NoSuchTableException("Table does not exist.")
        # We get ~4 cells back each holding different information. We only care
        # about two of them.
        for cell in cells:
            if cell.qualifier == b"regioninfo":
                # Take the regioninfo information and parse it into our own
                # Region representation.
                self.parse_region_info_cell(cell)
            elif cell.qualifier == b"server":
                # Grab the host, port of the Region Server that this region is
                # hosted on.
                self.server_loc = cell.value
            else:
                # TODO: parse them.
                continue

    def parse_region_info_cell(self, cell):
        magic = struct.unpack(">I", cell.value[:4])[0]
        # 4 bytes: PBUF
        if magic != self.header_magic:
            # Either it"s a corrupt message or an
            # unsupported region info version.
            raise RuntimeError(
                "HBase returned an invalid response (are you running a version"
                " of HBase supporting Protobufs?)")
        region_info = pbRegionInfo()
        region_info.ParseFromString(cell.value[4:-4])
        # merge namespace and table_name
        namespace = region_info.table_name.namespace
        if not namespace or namespace == b"default":
            namespace = b""
            self.table = region_info.table_name.qualifier
        else:
            self.table = b":".join(
                [namespace, region_info.table_name.qualifier])
        self.region_name = cell.row
        self.start_key = region_info.start_key
        self.stop_key = region_info.end_key

    def __repr__(self):
        return str({
            "table": self.table,
            "region_name": self.region_name,
            "start_key": self.start_key,
            "stop_key": self.stop_key,
            "region_client": str(self.region_client),
        })


class HbaseClient:
    """HBase客户端类"""

    def __init__(
            self, zkquorum, zk_root_path, init_socket_pool_size,
            establish_connection_timeout, missing_znode_retries,
            rpc_socket_timeout, auto_flush=False, max_write_buff_size=100,
            auto_connect=False):
        # Connection pool size per region server (and meta region!)
        self.pool_size = init_socket_pool_size
        # Location of the ZooKeeper quorum (csv)
        self.zkquorum = zkquorum
        # zookeeper establish connection timeout
        self.establish_connection_timeout = establish_connection_timeout
        # zookeeper retries
        self.missing_znode_retries = missing_znode_retries
        # hbase zookeeper root path
        self.zk_root_path = zk_root_path
        # rpc socket timeout
        self.rpc_socket_timeout = rpc_socket_timeout
        # Persistent connection to the meta_region server.
        self.meta_region_client = None
        # IntervalTree data structure that allows me to create ranges
        # representing known row keys that fall within a specific region. Any
        # "region look up" is then O(logn)
        self.region_cache = IntervalTree()
        # Takes a client"s host:port as key and maps it to a client instance.
        self.reverse_client_cache = {}
        # Mutex used for all caching operations.
        self.cache_lock = Lock()
        # Mutex used so only one thread can request meta information from
        # the master at a time.
        self.search_region_cache_lock = Lock()

        # operation retry times
        self.op_retry_times = 35
        self.op_retry_interval = 0.3

        self.auto_flush = auto_flush
        self.max_write_buff_size = max_write_buff_size
        self.current_write_buff_size = 0
        self.write_buff = list()
        self.flush_commit_lock = Lock()

        if auto_connect:
            self.open()

    def set_op_retry_times(self, op_retry_times):
        self.op_retry_times = op_retry_times

    def open(self):
        if self.meta_region_client is not None:
            # yep, still no idea why self.meta_region_client can be set to None.
            self.meta_region_client.close()
        # Ask ZooKeeper for the location of the Master.
        ip, port = zk_util.locate_meta_region(
            self.zkquorum, self.establish_connection_timeout,
            self.missing_znode_retries, self.zk_root_path)
        try:
            # Try creating a new client instance and setting it as the new
            # meta_region_client.
            # meta_region_client only has 5 pool_size, take care for server.
            self.meta_region_client = region_client.RegionClient(
                ip, port, init_pool_size=5,
                socket_timeout=self.rpc_socket_timeout)
            if not self.meta_region_client or \
                    not self.meta_region_client.open():
                raise RegionServerException
        except RegionServerException as exc:
            # We can"t connect to the address that ZK supplied. Raise an
            # exception.
            raise MasterServerException(ip, port) from exc

    # HERE LAY CACHE OPERATIONS

    def insert_region_cache(self, new_region):
        stop_key = new_region.stop_key
        if stop_key is None or len(stop_key) == 0:
            # This is hacky but our interval tree requires hard interval stops.
            # So what"s the largest char out there? chr(255) -> "\xff". If
            # you"re using "\xff" as a prefix for your rows then this"ll cause
            # a cache miss on every request.
            stop_key = b"\xff\xff"
        # Keys are formatted like: "tablename,key"
        start_key = new_region.table + b"," + new_region.start_key
        stop_key = new_region.table + b"," + stop_key

        # Only let one person touch the cache at once.
        with self.cache_lock:
            # Get all overlapping regions (overlapping == stale)
            # and close them.
            for overlapping_region in self.region_cache[start_key:stop_key]:
                # Loop over the regions to close and close whoever their
                # attached client is.
                # TODO: ...should we really be killing a client unneccessarily?
                overlapping_region.data.region_client.close()
            # Remove the overlapping regions.
            self.region_cache.remove_overlap(start_key, stop_key)
            # Insert new region.
            self.region_cache[start_key:stop_key] = new_region

    def search_region_cache(self, table, key, reverse):
        # Only let one person touch the cache at once.
        with self.cache_lock:
            if key is None:
                key = b""
            # Fetch the region that serves this key
            if reverse:
                key = b"\xff\xfe"
            regions = self.region_cache[b",".join([table, key])]
            try:
                # Returns a set. Pop the element from the set.
                # (there shouldn"t be more than 1 elem in the set)
                a = regions.pop()
                return a.data
            except KeyError:
                logger.debug("regions miss: %s", str(self.region_cache))
                # Returned set is empty? Cache miss!
                return None

    def remove_region_cache(self, table, start_key, stop_key):
        # Do not acquire the lock because the calling function should have done
        # so already
        logger.info(
            "remove_region_cache table: %s, start_key: %s, stop_key: %s",
            table, start_key, stop_key)
        fixed_stop_key = stop_key
        if fixed_stop_key is None or len(fixed_stop_key) == 0:
            fixed_stop_key = b"\xff\xff"
        self.region_cache.remove_overlap(
            table + b"," + start_key, table + b"," + fixed_stop_key)

    # HERE LAY REQUESTS

    def exists(self, table):
        """
        table exists
        :param table: hbase table
        :return: True or False
        """
        for _ in range(0, self.op_retry_times):
            try:
                self.meta_region_client.region_name = b"hbase:meta,,1"
                meta_rq = request_util.Request.get(
                    self.meta_region_client, table, {}, None, False, None)
                response = self.meta_region_client.run(meta_rq)
                result = result_util.cells_to_row(
                    response.result.cell, with_timestamp=None)
                return True if result else False
            except PyBaseException as e:
                e.handle_exception(self, dest_region=None)
        # not reachable
        return None

    def get(
            self, table, key, families=None, filters=None, exists=False,
            time_range=None, with_timestamp=False):
        """
        get a row or specified cell with optional filter
        :param table: hbase table
        :param key: row key
        :param exists: exist or not
        :param families: (optional) specifies columns to get,
          e.g., {"columnFamily1":["col1","col2"], "colFamily2": "col3"}
        :param filters: (optional) column filters
        :return: response with cells
        """

        if families is None:
            families = {}

        for _ in range(0, self.op_retry_times):
            try:
                # Step 0. Set dest_region to None so if an exception is
                # thrown in find_hosting_region, the exception handling
                # doesn"t break trying to reference dest_region.
                dest_region = None
                # Step 1. Figure out where to send it.
                dest_region = self.find_hosting_region(table, key, False)
                # Step 2. Build the appropriate pb message.
                rq = request_util.Request.get(
                    dest_region, key, families, filters, exists, time_range)
                # Step 3. Send the message and twiddle our thumbs.
                response = dest_region.region_client.run(rq)
                # Step 4. Success.
                if exists:
                    return response.result.exists

                return result_util.cells_to_row(
                    response.result.cell, with_timestamp)
            except PyBaseException as e:
                # Step X. Houston, we have an error. The cool thing about how
                # this is coded is that exceptions know how to handle
                #  themselves. All we need to do is call handle_exception and
                #  everything should be happy! If it cannot handle itself
                #  (unrecoverable) then it will re-raise the exception in the
                #  handle method and we will die too.
                #
                # We pass dest_region in because the handling code needs to know
                # which region or region_client it needs to reestablish.
                e.handle_exception(self, dest_region=dest_region)
            # Everything should be dandy now. Repeat the request!

    def put(self, table, key, values, timestamp=None, auto_flush=False):
        resp = None
        if auto_flush or self.auto_flush:
            resp = self.mutate(
                table, key, values, timestamp, rq_type=request_util.Request.put)
        else:
            self.do_action(table, key, values, timestamp, rq_type_str="put")
        return resp

    def delete(self, table, key, values=None, auto_flush=False):
        if values is None:
            values = {}

        resp = None
        if auto_flush or self.auto_flush:
            resp = self.mutate(
                table, key, values, timestamp=None,
                rq_type=request_util.Request.delete)
        else:
            self.do_action(
                table, key, values, timestamp=None, rq_type_str="delete")
        return resp

    def append(self, table, key, values, auto_flush=False):
        resp = None
        if auto_flush or self.auto_flush:
            resp = self.mutate(
                table, key, values, timestamp=None,
                rq_type=request_util.Request.append)
        else:
            self.do_action(
                table, key, values, timestamp=None, rq_type_str="append")
        return resp

    def increment(self, table, key, values, auto_flush=False):
        resp = None
        if auto_flush or self.auto_flush:
            resp = self.mutate(
                table, key, values, timestamp=None,
                rq_type=request_util.Request.increment)
        else:
            self.do_action(
                table, key, values, timestamp=None, rq_type_str="increment")
        return resp

    def mutate(self, table, key, values, timestamp, rq_type):
        # Same exact methodology as "get". Because all mutate requests have
        # equivalent code I"ve combined them into a single function.
        for _ in range(0, self.op_retry_times):
            dest_region = None
            try:
                dest_region = self.find_hosting_region(table, key, False)
                rq = rq_type(dest_region, key, values, timestamp)
                response = dest_region.region_client.run(rq)
                return response.processed
            except RegionServerSocketTimeoutException as e:
                raise e
            except PyBaseException as e:
                e.handle_exception(hbase_client=self, dest_region=dest_region)
        return None

    def do_action(self, table, key, values, timestamp, rq_type_str):
        action = request_util.Action(
            table=table, key=key, values=values, timestamp=timestamp,
            method=rq_type_str)
        if len(self.write_buff) > self.max_write_buff_size:
            self.flush_commits()
        self.write_buff.append(action)

    def flush_commits(self, force=False, with_timestamp=False, exists=False):
        logger.debug(
            "flush_commits buff size:%s force:%s", len(self.write_buff), force)
        act_write_buff = []
        with self.flush_commit_lock:
            if not force and len(self.write_buff) < self.max_write_buff_size:
                return None
            act_write_buff = self.write_buff
            self.write_buff = []

        if not act_write_buff:
            return None
        # resposne, rest_list, exception
        _, rest_list, exception = self.batch_commit(
            act_write_buff, request_util.Request.multi, with_timestamp, exists)
        if exception:
            raise exception
        return rest_list

    def deal_retry(self,
                   batch_exception,
                   handled_exceptions_class,
                   dest_client):
        """
        Decide whether it is necessary to retry by exception type
        """
        if batch_exception.__class__ in handled_exceptions_class:
            return None
        batch_exception.handle_exception(
            hbase_client=self, dest_region=dest_client)
        handled_exceptions_class.add(batch_exception.__class__)

        if not (isinstance(batch_exception, NoSuchTableException)
                or isinstance(batch_exception, NoSuchColumnFamilyException)
                or isinstance(batch_exception, DoNotRetryIOException)):
            return True

        return False

    def analysis_batch_result(self,
                              action_results,
                              region_action_dict,
                              working_pool,
                              exception_pool,
                              finish_pool,
                              handled_exceptions_class,
                              region_name):
        """
        Get batch response and retry
        """
        retry = False
        result_pool = []
        batch_exception = action_results.exception
        if batch_exception:
            logger.warning(
                "batch_commit hit batch_exception: %s",
                batch_exception.__class__)
            for client, action, index in region_action_dict[region_name]:
                need_retry = self.deal_retry(
                    batch_exception, handled_exceptions_class, client)
                if need_retry is None:
                    continue
                elif need_retry is False:
                    finish_pool.append(index)
                elif need_retry is True:
                    working_pool.append((index, action))
                    exception_pool.append((index, region_name, action, None))

                retry |= need_retry

        for result in action_results.result_or_exception:
            client, action, index = \
                region_action_dict[region_name][result["index"]]
            action_exception = result.get("exception", None)
            if not action_exception:
                result_pool.append(result)
                finish_pool.append(index)
                continue

            logger.warning(
                "batch_commit hit action_exception: %s",
                action_exception.__class__)
            need_retry = self.deal_retry(
                action_exception, handled_exceptions_class, client)
            if need_retry is None:
                continue
            elif need_retry is False:
                finish_pool.append(index)
            elif need_retry is True:
                working_pool.append((index, action))
                exception_pool.append((index, region_name, action, result))

            retry |= need_retry

        return retry, result_pool

    def batch(
            self, working_pool, finish_pool, exception_pool, commit_func,
            with_timestamp, exists, **kwargs):
        """Batch detailed implement
        Args:
            working_pool: List of actions waiting for executing
            finish_pool: Include success and failed actions index
            : Batch commit function
        Returns:
            response: List of protobuf result
            unfinish_pool: actions index of commit failed
        """
        # TODO: process with_timestamp
        _ = with_timestamp
        # break up in to regionserver-sized chunks
        region_action_dict = defaultdict(list)
        for index, action in working_pool:
            dest_region = self.find_hosting_region(
                action.table, action.key, False)
            region_action_dict[dest_region.region_name].append(
                [dest_region, action, index])

        # make request
        region_request_dict = {}
        for region_name, items in region_action_dict.items():
            region = items[0][0]
            actions = [item[1] for item in items]
            kwargs["exists"] = exists
            request = commit_func(region, actions, **kwargs)
            region_request_dict[region_name] = (region, request)

        retry = False

        def exception_callback(g):
            _ = g
            # logger.warning("exception_callback begin")
            # try:
            #    g.get()
            # except PyBaseException as ex:
            #    logger.exception("exception_callback catch PyBaseException")
            #    ex.handle_exception(hbase_client=self)
            #    for dest_region, action, index \
            #    in region_action_dict[workers_region_dict[g]]:
            #        working_pool.append((index, action))
            #        retry = True

        workers_region_dict = {}
        for region_name, (region, request) in region_request_dict.items():
            g = gevent.spawn(region.region_client.run, request)
            workers_region_dict[g] = region_name
            g.link_exception(exception_callback)

        # If an exception occurs, empty the worker_list and try again
        handled_exceptions_class = set()
        response_result = []

        for batch_response in gevent.iwait(list(workers_region_dict.keys())):
            region_name = workers_region_dict[batch_response]
            logger.debug(
                "batch_commit iwait worker:%s region_name:%s",
                batch_response, region_name)

            if not batch_response.value:
                logger.debug(
                    "batch_response.value is empty.",
                    exc_info=batch_response.exc_info)
                retry = True
                continue
            for action_results in batch_response.value:
                action_result_retry, response = self.analysis_batch_result(
                    action_results, region_action_dict, working_pool,
                    exception_pool, finish_pool, handled_exceptions_class,
                    region_name)
                logger.debug(
                    "action_result_retry: %s, working_pool_size: %d, "
                    "finish_pool_size: %d, exception_pool_size: %d",
                    str(action_result_retry), len(working_pool),
                    len(finish_pool), len(exception_pool))
                retry |= action_result_retry
                response_result.extend(response)
        logger.debug(
            "retry: %s, working_pool_size: %d, finish_pool_size: %d, "
            "exception_pool_size: %d, response_result: %d", str(retry),
            len(working_pool), len(finish_pool), len(exception_pool),
            len(response_result))
        return retry, response_result

    def batch_commit(
            self, actions, commit_func, with_timestamp, exists, **kwargs):
        """Batch commit
        Args:
            keys: List of rowkey
            actions: List of Action
            commit_func: Batch commit function
            kwargs: Extra param for commit_func

        Returns:
            response: List of protobuf result
            unfinish_pool: actions index of commit failed
        """

        if not (actions and commit_func):
            raise PyBaseException("batch commit paramater include null value")

        working_pool = [(index, action) for index, action in enumerate(actions)]
        exception_pool = []
        finish_pool = []
        response = []
        need_retry = False
        for retry_times in range(0, self.op_retry_times):
            if retry_times > 0:
                time.sleep(retry_times)
                logger.debug(
                    "batch_commit retries:%d working_pool size:%d "
                    "finish_actions size:%d exceptions size:%d",
                    retry_times, len(working_pool), len(finish_pool),
                    len(exception_pool))
            need_retry, response = self.batch(
                working_pool, finish_pool, exception_pool, commit_func,
                with_timestamp, exists, **kwargs)
            if not need_retry:
                logger.debug("not need retry, retry_times: %d", retry_times)
                break

        # 打印异常的actions
        exception_indexes = []
        ret_exception = None
        # 重试次数已经用尽，准备甩出异常.
        if need_retry:
            ret_exception = PyBaseException("batch commit exception!")
            for index, region_name, action, result in exception_pool:
                exception_indexes.append(index)
                exception_info = "local_exception"
                errmsg = "null"
                if result:
                    exception_info = result.get("exception", "null")
                    errmsg = result.get("errmsg", "null")
                logger.warning(
                    "region_name:%s key:%s exception:%s errmsg:%s",
                    region_name, action.key, exception_info, errmsg)

        # return not finish actions
        done_index = set(finish_pool + exception_indexes)
        unfinish_pool = []
        for index, action in enumerate(actions):
            if index not in done_index:
                unfinish_pool.append(index)

        logger.debug(
            "batch commit actions size:%d  finish size:%d "
            "exponseeptions size:%d rest size:%d", len(actions),
            len(finish_pool), len(exception_indexes), len(unfinish_pool))

        return (response, unfinish_pool, ret_exception)

    def multi_unpack(self, response_results, with_timestamp, exists):
        if not response_results:
            return []
        values = []
        if exists is True:
            for response_result in response_results:
                res_exists = response_result["result"].exists
                # exists return rowkey, exists pairs.
                values.append(res_exists)
        else:
            for response_result in response_results:
                unpack_value = result_util.cells_to_row(
                    response_result["result"].cell, with_timestamp)
                if not unpack_value:
                    continue
                values.append(unpack_value)
        return values

    def mget_impl(self, table, keys, families, filters, with_timestamp, exists):
        actions = []
        for key in keys:
            action = request_util.Action(table=table,
                                         key=key,
                                         values=None,
                                         method="get",
                                         timestamp=None)
            actions.append(action)

        response, rest_actions, exception = self.batch_commit(
            actions, request_util.Request.multi_get, with_timestamp, exists,
            families=families, filters=filters)
        if exception:
            raise exception
        if rest_actions:
            raise PyBaseException(
                f"mget finally failed. rest actions size {len(rest_actions)}")

        return self.multi_unpack(response, with_timestamp, exists)

    def mget(
            self, table, keys=None, families=None, filters=None,
            batch_size=10000, with_timestamp=False, exists=False):
        """Batch rows
        Args:
            keys: List of rowkey

        Returns:
            A dict mapping keys to the corresponding table row data
            looks like {"data:key": value}, data means family

        Raises:
            PyBaseException
        """
        if keys is None:
            keys = []
        if families is None:
            families = {}

        if not (table and keys and isinstance(keys, list)):
            raise FunctionParamException(
                "mget function input parameter format error")

        group_keys = util.partition(keys, batch_size)
        for partiotion_keys in group_keys:
            results = self.mget_impl(
                table, partiotion_keys, families, filters, with_timestamp,
                exists)
            for result in results:
                yield result

    class Scanner():
        """hbase scan类, 用于给scan操作提供游标等状态管理"""

        def __init__(
                self, table, batch_size, caching, number_of_rows, start_key,
                stop_key, families, filters, reverse, limit_of_rows, is_meta,
                time_range):
            self.table = table
            self.batch_size = batch_size
            self.caching = caching
            self.number_of_rows = number_of_rows
            self.start_key = start_key
            self.stop_key = stop_key
            self.families = families
            self.filters = None
            self.reverse = reverse
            self.limit_of_rows = limit_of_rows
            # We convert the filter immediately such that it does not have to be
            #  done for every region. However if the filter has already been
            #  converted then we can not convert it again. This means that even
            #  though we send out N RPCs.
            # we only have to package the filter pb type once.
            if filters is not None and type(filters).__name__ != "Filter":
                self.filters = _to_filter(filters)

            self.retry_times = 10
            self.retry_interval = 3
            # state
            self.previous_key = start_key
            if reverse:
                self.previous_key = stop_key
            self.scanner_id = None
            self.cur_region = None
            self.is_meta = is_meta
            self.time_range = time_range

    def scan(
            self, table, batch_size=1000, caching=1000, number_of_rows=128,
            start_key=b"", stop_key=None, families=None, filters=None,
            reverse=False, limit_of_rows=0, is_meta=False, time_range=None,
            with_timestamp=False):
        if families is None:
            families = {}
        if time_range is None:
            time_range = []

        scanner = HbaseClient.Scanner(
            table=table, batch_size=batch_size, caching=caching,
            number_of_rows=number_of_rows, start_key=start_key,
            stop_key=stop_key, families=families, filters=filters,
            reverse=reverse, limit_of_rows=limit_of_rows, is_meta=is_meta,
            time_range=time_range)
        for row in self.scan_impl(scanner, with_timestamp):
            yield row

    def scan_impl(self, scanner, with_timestamp):
        # We"re going to need to loop over every relevant region. Break out
        # of this loop once we discover there are no more regions left to scan.
        while True:
            logger.debug("begin scan loop")
            # Finds the first region and sends the initial message to it.
            for has_next, once_res in self.scan_hit_region_once(
                    scanner, with_timestamp):
                if has_next:
                    # now yield one row.
                    yield once_res
                    continue
                # the last result is current scanner.
                scanner = once_res
                break
            # TODO: 扫meta表是不用切换region的，考虑下如何整合.
            if scanner.is_meta is True:
                return

            try:
                # Now we need to keep pinging this region for more results until
                # it has no more results to return. We can change how many rows
                #  it returns for each call in the Requests module but I picked
                #  a pseudo-arbitrary figure (alright, fine, I stole it from
                # asynchbase)
                #
                # We pass in first_response so it can pull out the scanner_id
                # from the first response.
                for row in self.scan_region_while_more_results(
                        scanner, with_timestamp):
                    yield row
            except PyBaseException as e:
                time.sleep(scanner.retry_interval)
                # Something happened to the region/region client in the middle
                # of a scan. We"re going to handle it by...
                #
                # Handle the exception.
                e.handle_exception(self, dest_region=scanner.cur_region)
                # Recursively scan JUST this range of keys in the region (it
                # could have been split or merged so this recursive call may be
                # scanning multiple regions or only half of one region).
                logger.debug("scan half bottom region.")
                for res_cell in self.scan(
                        scanner.table, scanner.batch_size, scanner.previous_key,
                        scanner.cur_region.stop_key, scanner.families,
                        scanner.filters, scanner.time_range):
                    yield res_cell
                # We continue here because we don"t want to append the
                # first_response results to the result_set. When we did the
                # recursive scan it rescanned whatever the first_response
                # initially contained. Appending both will produce duplicates.
                if scanner.reverse is True:
                    scanner.previous_key = scanner.cur_region.start_key
                    if scanner.previous_key == b"" or \
                            (scanner.start_key is not None
                                and scanner.previous_key < scanner.start_key):
                        break
                else:
                    scanner.previous_key = scanner.cur_region.stop_key
                    if scanner.previous_key == b"" or \
                            (scanner.stop_key is not None
                                and scanner.previous_key > scanner.stop_key):
                        break
                continue
            # Both calls succeeded!
            # Update the new previous_key (so the next iteration can
            # lookup the next region to scan)
            if scanner.reverse is True:
                scanner.previous_key = scanner.cur_region.start_key
                if scanner.previous_key == b"" or \
                        (scanner.start_key is not None
                            and scanner.previous_key < scanner.start_key):
                    break
                logger.debug(
                    "loop end: previous_key(%s), scanner_strat_key(%s)",
                    scanner.previous_key, scanner.start_key)
            else:
                scanner.previous_key = scanner.cur_region.stop_key
                # Stopping criteria. This region is either the end ("") or the
                # end of this region is beyond the specific stop_key.
                if scanner.previous_key == b"" or \
                        (scanner.stop_key is not None and \
                            scanner.previous_key > scanner.stop_key):
                    break
                logger.debug(
                    "loop end: previous_key(%s), scanner_stop_key(%s)",
                    scanner.previous_key, scanner.stop_key)

    def scan_hit_region_once(self, scanner, with_timestamp):
        cur_region = None
        response = None
        for _ in range(scanner.retry_times):
            try:
                # Lookup the next region to scan by searching for the
                # previous_key (region keys are inclusive on the start and
                # exclusive on the end)
                cur_region = None
                # TODO: register和parser拆分好后，合并meta_region和普通的region.
                if scanner.is_meta is True:
                    cur_region = RegionRegister()
                    cur_region.table = b"hbase:meta"
                    cur_region.region_name = b"hbase:meta,,1"
                    cur_region.start_key = scanner.start_key
                    cur_region.stop_key = None
                    cur_region.server_loc = f"{self.meta_region_client.host}:"
                    f"{self.meta_region_client.port}".encode()
                    cur_region.region_client = self.meta_region_client
                else:
                    cur_region = self.find_hosting_region(
                        scanner.table, scanner.previous_key, scanner.reverse)

                # Create the scan request object. The last two values are
                #  "Close" and "Scanner_ID" respectively.
                rq = None
                if scanner.reverse is True:
                    rq = request_util.Request.scan(
                        cur_region, scanner.batch_size, scanner.caching,
                        scanner.number_of_rows, scanner.start_key,
                        scanner.previous_key, scanner.families, scanner.filters,
                        False, None, scanner.reverse, scanner.limit_of_rows,
                        scanner.time_range)
                else:
                    rq = request_util.Request.scan(
                        cur_region, scanner.batch_size, scanner.caching,
                        scanner.number_of_rows, scanner.previous_key,
                        scanner.stop_key, scanner.families, scanner.filters,
                        False, None, scanner.reverse, scanner.limit_of_rows,
                        scanner.time_range)
                # Send the request.
                response = cur_region.region_client.run(rq)
                for result in response.results:
                    # TODO: register和parser拆分好后，合并meta_region和普通的region.
                    if scanner.is_meta is True:
                        region_register = RegionRegister()
                        region_register.parse_cells(result.cell)
                        yield True, region_register
                    else:
                        yield True, result_util.cells_to_row(
                            result.cell, with_timestamp)
            except PyBaseException as e:
                time.sleep(scanner.retry_interval)
                # This means that either Master is down or something"s funky
                # with the META region. Try handling it and recursively
                # perform the same call again.
                e.handle_exception(self)
                continue
            break
        # refresh scanner.
        if cur_region:
            scanner.cur_region = cur_region
        if response:
            # Grab the scanner_id from the previous response.
            scanner.scanner_id = response.scanner_id
            scanner.more_results_in_region = response.more_results_in_region
        yield False, scanner

    def scan_region_while_more_results(self, scanner, with_timestamp):
        # We only need to specify the scanner_id here because the region we"re
        # pinging remembers our query based on the scanner_id.
        rq = request_util.Request.scan(
            scanner.cur_region, scanner.batch_size, scanner.caching,
            scanner.number_of_rows, None, None, None, None, False,
            scanner.scanner_id, scanner.reverse, scanner.limit_of_rows,
            scanner.time_range)
        while scanner.more_results_in_region:
            # Repeatedly hit it until empty. Note that we"re not handling any
            # exceptions here, instead letting them bubble up because if any
            # of these calls fail we need to rescan the whole region (it seems
            # like a lot of work to search the results for the max row key that
            # we"ve received so far and rescan from there up)
            response = scanner.cur_region.region_client.run(rq)
            for result in response.results:
                # TODO: register和parser拆分好后，合并meta_region和普通的region.
                if scanner.is_meta is True:
                    region_register = RegionRegister()
                    region_register.parse_cells(result.cell)
                    yield region_register
                else:
                    yield result_util.cells_to_row(result.cell, with_timestamp)
            scanner.more_results_in_region = response.more_results_in_region
        # Now close the scanner.
        rq = request_util.Request.scan(
            scanner.cur_region, scanner.batch_size, scanner.caching,
            scanner.number_of_rows, None, None, None, None, True,
            scanner.scanner_id, scanner.reverse, scanner.limit_of_rows,
            scanner.time_range)
        _ = scanner.cur_region.region_client.run(rq)

    # HERE LAY REGION AND CLIENT DISCOVERY

    def find_hosting_region(self, table, key, reverse):
        # Check if it"s in the cache already.
        dest_region = self.search_region_cache(table, key, reverse)
        if dest_region:
            logger.debug(
                "find_hosting_region hit cache, Table: %s, Key: %s, loc: %s",
                table, key, dest_region.server_loc)
            return dest_region
        # We have to reach out to master for the results.
        with self.search_region_cache_lock:
            # Not ideal that we have to lock every thread however we limit
            # concurrent meta requests to one. This is because of the case
            # where 1000 greenlets all fail simultaneously we don"t want
            # 1000 requests shot off to the master (all looking for the
            # same response). My solution is to only let one through at a
            # time and then when it"s your turn, check the cache again to
            # see if one of the greenlets let in before you already fetched
            # the meta or not. We can"t bucket greenlets and selectively
            # wake them up simply because we have no idea which key falls
            # into which region. We can bucket based on key but that"s a
            # lot of overhead for an unlikely scenario.
            dest_region = self.search_region_cache(table, key, reverse)
            if dest_region is None:
                # Nope, still not in the cache.
                logger.debug(
                    "Region cache miss! Table: %s, Key: %s, Reverse: %s",
                    table, key, str(reverse))
                # Ask MetaRegionServer for region information.
                dest_region = self.discover_region(table, key, reverse)
        return dest_region

    def discover_region(self, table, key, reverse):
        # Create the appropriate meta request given a meta_key.
        meta_rq = request_util.Request.meta_region(table, key, reverse)
        logger.warning("meta_rq: %s", str(MessageToDict(meta_rq.pb)))
        result = None
        try:
            # This will terow standard Region/RegionServer exceptions.
            # We need to catch them and convert them to the Master equivalent.
            response = self.meta_region_client.run(meta_rq)
            if response and response.results:
                result = response.results[0]
                if result.cell is None:
                    raise RegionServerException
        except (AttributeError, RegionServerException, RegionException) as exc:
            if self.meta_region_client is None:
                # I don"t know why this can happen but it does.
                raise MasterServerException(None, None) from exc
            raise MasterServerException(
                self.meta_region_client.host,
                self.meta_region_client.port) from exc
        # Master gave us a response. We need to run and parse the response,
        # then do all necessary work for entering it into our structures.
        new_region = RegionRegister()
        new_region.register_by_cells(result.cell, self)
        logger.info("Successfully discovered new region %s", new_region)
        return new_region

    # 此函数目的是dump一个table的region信息，用于初始化时缓存。
    # 对于hadoop streaming任务，可以降低对meta表的scan压力.
    def get_table_regions(self, table, max_cache_num):
        cur_region_name = table + b",,"
        last_region_name = None
        while True:
            cur_count = 0
            for region_register in self.scan(
                b"hbase:meta", batch_size=10000, caching=max_cache_num,
                number_of_rows=5000, start_key=cur_region_name, stop_key=None,
                families={b"info": [b"regioninfo", b"server"]},
                filters=PrefixFilter(table + b","), reverse=False,
                    limit_of_rows=0, is_meta=True):
                if region_register.stop_key:
                    cur_region_name = region_register.region_name
                cur_count += 1
                yield (region_register.table, region_register.region_name,
                       region_register.start_key, region_register.stop_key,
                       region_register.server_loc)
            if last_region_name == cur_region_name:
                break
            last_region_name = cur_region_name

    # HERE LAY THE MISCELLANEOUS

    def purge_region_client(self, client):
        # Given a client to close, purge all of it"s known hosted regions from
        # our cache, delete the reverse lookup entry and close the client
        # clearing up any file descriptors.
        with self.cache_lock:
            for reg in client.regions:
                self.remove_region_cache(reg.table, reg.start_key, reg.stop_key)
            region_client_key = f"{client.host}:{client.port}"
            logger.debug("remove region_client_key: %s", region_client_key)
            self.reverse_client_cache.pop(region_client_key, None)
            client.close()

    def purge_region_info(self, region_info):
        # Given a region_info, deletes it's entry from the cache and removes
        #  itself from it is region list of region client.
        with self.cache_lock:
            self.remove_region_cache(
                region_info.table, region_info.start_key, region_info.stop_key)
            try:
                while region_info in region_info.region_client.regions:
                    region_info.region_client.regions.remove(region_info)
            except ValueError:
                logger.exception("purge_region_info except.")
                pass

    def close(self):
        logger.info("Main client received close request.")
        # force to flush commit
        self.flush_commits(force=True)
        # Close the meta_region client.
        if self.meta_region_client is not None:
            self.meta_region_client.close()
        # Clear the region cache.
        self.region_cache.clear()
        # Close each open region client.
        # foreach item : (location, client)
        for _, client in self.reverse_client_cache.items():
            client.close()
        self.reverse_client_cache = {}

    def get_meta_rows(self, hbase_table_names, max_cache_num):
        meta_rows = []
        for hbase_table_name in hbase_table_names:
            for table_name, region_name, start_key, stop_key, server_loc in \
                    self.get_table_regions(hbase_table_name, max_cache_num):
                if not server_loc:
                    logger.warning(
                        "get_meta_rows: table %s hit empty server_loc: ",
                        table_name)
                    raise ValueError(
                        f"table {table_name} hit null server_loc")

                # 忽略table_name为空的情况
                if table_name is None:
                    continue
                meta_rows.append(
                    (table_name, region_name, start_key, stop_key, server_loc))
        return meta_rows

    def dump_meta_to_sqlite_impl(
            self, db_path, hbase_table_names, max_cache_num):
        ret = True
        conn = sqlite3.connect(db_path)
        c = conn.cursor()
        c.execute(
            "CREATE TABLE IF NOT EXISTS hbase_meta(table_name BLOB, "
            "region_name BLOB, start_key BLOB, stop_key BLOB, server_loc BLOB)")
        c.execute(
            "CREATE UNIQUE INDEX IF NOT EXISTS keyindex on "
            "hbase_meta(table_name, region_name)")

        try:
            for table_name, region_name, start_key, stop_key, server_loc in \
                    self.get_meta_rows(hbase_table_names, max_cache_num):
                c.execute(
                    "REPLACE INTO hbase_meta(table_name, region_name, "
                    "start_key, stop_key, server_loc) VALUES (?, ?, ?, ?, ?)",
                    (table_name, region_name, start_key, stop_key, server_loc))
        except sqlite3.IntegrityError as ex:
            logger.exception("dump_meta_to_sqlite_impl except: %s", str(ex))
            ret = False
        conn.commit()
        conn.close()
        return ret

    def load_meta_rows(self, meta_rows):
        for table, region, start_key, stop_key, server_loc in meta_rows:
            new_region = RegionRegister()
            new_region.register(
                table, region, start_key, stop_key, server_loc, self)

    # 此函数用于常驻程序预先加载tables的meta信息
    def load_meta(self, table_names, max_cache_num=100000):
        meta_rows = self.get_meta_rows(table_names, max_cache_num)
        self.load_meta_rows(meta_rows)

    def dump_meta_to_sqlite(
            self, db_path, hbase_table_name, max_cache_num=10000, retry_time=3):
        res = False
        # 为了防止dump meta失败，这里执行重试
        for _ in range(retry_time):
            res = self.dump_meta_to_sqlite_impl(
                db_path, [hbase_table_name], max_cache_num)
            if res is True:
                break
            time.sleep(30)

    def load_meta_from_sqlite(self, db_path):
        conn = sqlite3.connect(db_path)
        c = conn.cursor()
        c.execute(
            "SELECT table_name, region_name, start_key, stop_key, server_loc"
            " FROM hbase_meta")

        for row in c:
            new_region = RegionRegister()
            new_region.register(row[0], row[1], row[2], row[3], row[4], self)
        conn.close()

    def find_region(self, table, key):
        dest_region = None
        for _ in range(0, self.op_retry_times):
            try:
                dest_region = self.find_hosting_region(table, key, False)
                break
            except RegionServerSocketTimeoutException as e:
                raise e
            except PyBaseException as e:
                e.handle_exception(hbase_client=self, dest_region=dest_region)
        return dest_region
