# -*- coding: utf-8 -*-

"""
publish data to redis incrementally

The difference is that we already know what to delete and we don't need to update meta info.
"""

from __future__ import unicode_literals
from __future__ import print_function
from __future__ import absolute_import
from __future__ import division

try:
    from rediscluster import StrictRedisCluster as RedisCluster
except ImportError:
    from rediscluster import RedisCluster

from kgpipeline.job import KgJob, JobInputMisMatchError, JobConfigError
from kgpipeline.jsonutil import entity_json_decode, entity_json_encode


# redis_cluster_hosts = [
#     {"host": "99.13.219.128", "port": 6379},
#     {"host": "99.13.219.129", "port": 6379},
#     {"host": "99.13.219.130", "port": 6379}
# ]
#
# # ST cluster密码
# redis_cluster_password = 'lz-e3mPXzlo1KCZhuBl59rMWam2fwVHz'
def get_redis_uploader(hosts, password, prefix, batch_size):
    def __uploader__(index, items):
        try:
            client = RedisCluster(startup_nodes=hosts, password=password,
                                  skip_full_coverage_check=True)
            pl = client.pipeline()
            total_count = 0
            batch_count = 0
            for item_key, item_values in items:
                batch_count += 1
                pl.sadd("{0}:{1}".format(prefix, item_key), *item_values)
                if batch_count > batch_size:
                    pl.execute()
                    total_count += batch_count
                    batch_count = 0
            if batch_count > 0:
                pl.execute()
                total_count += batch_count
            yield "partition {0} added total {1} keys".format(index, total_count)
        except Exception as e:
            yield "partition {0} failed: {1}".format(index, e)

    return __uploader__


def get_redis_remover(hosts, password, prefix, batch_size):
    def __uploader__(index, items):
        try:
            client = RedisCluster(startup_nodes=hosts, password=password,
                                  skip_full_coverage_check=True)
            pl = client.pipeline()
            total_count = 0
            batch_count = 0
            for item_key, _ in items:
                batch_count += 1
                pl.delete("{0}:{1}".format(prefix, item_key))
                if batch_count > batch_size:
                    pl.execute()
                    total_count += batch_count
                    batch_count = 0
            if batch_count > 0:
                pl.execute()
                total_count += batch_count
            yield "partition {0} deleted total {1} keys".format(index, total_count)
        except Exception as e:
            yield "partition {0} failed: {1}".format(index, e)

    return __uploader__


class KgRedisIncJob(KgJob):
    """publish entities to redis incrementally."""

    def process(self, inputs):
        if len(inputs) != 1:
            raise JobInputMisMatchError("needs exactly one input json file for a Redis Incremental Job")

        batch_size = self.config.get("batch_size", 10000)

        prefix = self.config.get("prefix")
        if not prefix:
            raise JobConfigError("Input parameter `prefix` is missing!")

        hosts = self.config.get("redis_hosts")
        if not hosts:
            raise JobConfigError("Input parameter `redis_hosts` is missing!")

        password = self.config.get("redis_password")
        # password can be absent
        # if not password:
        #     raise JobConfigError("Input parameter `redis_password` is missing!")

        remover = get_redis_remover(hosts, password, prefix, batch_size)
        uploader = get_redis_uploader(hosts, password, prefix, batch_size)

        df, _ = inputs[0]

        names = df.rdd.map(lambda x: entity_json_decode(x["value"]))
        delete_set = names.filter(lambda x: x[1] is None)
        add_set = names.filter(lambda x: x[1] is not None).map(
            lambda x: (x[0], [entity_json_encode(e) for e in x[1]]))

        results = delete_set.mapPartitionsWithIndex(remover).collect()
        for res in results:
            print(res)

        results = add_set.mapPartitionsWithIndex(uploader).collect()
        for res in results:
            print(res)


KgJob.register("redisinc", KgRedisIncJob)
