# -*- coding: utf-8 -*-

"""
kg json to elasticsearch job (incremental version)
"""

from __future__ import unicode_literals
from __future__ import print_function
from __future__ import absolute_import

from kgpipeline.job import KgJob, JobInputMisMatchError, JobConfigError
from kgpipeline.jsonutil import entity_json_decode

try:
    from elasticsearch import Elasticsearch
    from elasticsearch.helpers import bulk
except ImportError:
    from elasticsearch2 import Elasticsearch
    from elasticsearch2.helpers import bulk

import logging

logger = logging.getLogger(__name__)


def doc_from_kg_entity_inc(item, index):
    item["_index"] = index
    item["_type"] = "doc"
    item["_id"] = item["@id"]

    if len(item) == 1:  # @id only
        item["_op_type"] = "delete"
    else:
        item["_op_type"] = "index"
    return item


def get_uploader(hosts, username, password):
    def upload_partition(_, items):
        if username:
            es = Elasticsearch(hosts=hosts, http_auth=(username, password))
        else:
            es = Elasticsearch(hosts=hosts)

        yield bulk(es, items, stats_only=False, raise_on_error=False)

    return upload_partition


class KgEsIncJob(KgJob):
    """
    publish entities to elasticsearch incrementally.

    only one input support.

    config includes:
    - date: current date
    - partition: output partitions. optional, default 10
    - alias: index alias
    - index: index name
    - hosts: es hosts
    - username: es user. optional, default empty
    - password: es password. optional, default empty

    Notes
    -----
    the type in index is hardcoded ``doc``.
    record with only one field ``@id`` is treated as ``delete``
    """

    def process(self, inputs):
        if len(inputs) != 1:
            raise JobInputMisMatchError(
                "needs exactly one input json file for an Elasticsearch Incremental Publish Job")

        today = self.config["date"].replace("-", "")

        partition = self.config.get("partition", 10)

        alias = self.config.get("alias")
        if not alias:
            raise JobConfigError("Input parameter `alias` is missing!")

        index = self.config.get("index")
        if not index:
            raise JobConfigError("Input parameter `index` is missing!")

        index_date = index + "_" + today

        hosts = self.config.get("hosts")
        if not hosts:
            raise JobConfigError("Input parameter `hosts` is missing!")

        username = self.config.get("username")
        password = self.config.get("password")

        if username:
            es = Elasticsearch(hosts=hosts, http_auth=(username, password))
        else:
            es = Elasticsearch(hosts=hosts)

        if es.indices.exists(index=index_date):
            index_real = index_date
        else:
            index_alias = es.indices.get_alias()

            indices = []
            for index_name, alias_config in index_alias.items():
                # name part in name_YYYYMMDD
                if index_name[:-9] == index:
                    # date part in name_YYYYMMDD
                    indices.append(index_name[-8:])

            if not indices:
                raise Exception("No index found!")

            sorted_indices = [index + "_" + d for d in sorted(indices)]

            logger.info("all indices: {}".format(",".join(sorted_indices)))
            index_real = sorted_indices[-1]

        logger.info("es alias is: {}".format(alias))
        logger.info("es index is: {}".format(index_real))

        uploader = get_uploader(hosts, username, password)

        df, _ = inputs[0]

        docs = df.rdd.map(lambda x: entity_json_decode(x["value"])).map(
            lambda x: doc_from_kg_entity_inc(x, index_real)
        ).repartition(partition)

        upload_stat = docs.mapPartitionsWithIndex(
            uploader
        ).collect()

        from pprint import pprint
        for p_stat in upload_stat:
            pprint(p_stat)


KgJob.register("esinc", KgEsIncJob)
