#
#  Copyright 2022 The Open Islands Authors. All Rights Reserved.
#
#  Licensed under the Apache License, Version 2.0 (the "License");
#  you may not use this file except in compliance with the License.
#  You may obtain a copy of the License at
#
#      http://www.apache.org/licenses/LICENSE-2.0
#
#  Unless required by applicable law or agreed to in writing, software
#  distributed under the License is distributed on an "AS IS" BASIS,
#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#  See the License for the specific language governing permissions and
#  limitations under the License.
#

import json
import typing
from pickle import dumps as p_dumps, loads as p_loads

import pyspark
from pyspark import SparkContext
from pyspark.sql import SparkSession

from pyoi.constant import FederatedMode, DataStructure
from pyoi.constant import FederatedPacketFormat, FederatedPacketType
from pyoi.constant import StorageEngine
from pyoi.context._context import BaseContext
from pyoi.context.abc import MQChannelABC, MQManagerABC, GarbageCollectionABC
from pyoi.context.mq import MQ
from pyoi.context.types import (
    MQArgs,
    FederatedPacketRouting,
    DataStream,
    FederatedPacketHeaders,
    FederatedPacketProperties,
    ExecutorLogger
)
from pyoi.types import DataIO
from pyoi.types import DataSchema
from pyoi.types import FederatedAddress
from pyoi.types.profile import computing_profile
from pyoi.util import log_utils
from ._materialize import materialize
from ._spark_rdd_storage import SparkRDDStorage

NAME_PACKET_TYPE_TAG = "<packet_type>"
_SPLIT_ = "^"
_REMOTE_HISTORY = set()
_GET_HISTORY = set()

LOGGER = log_utils.getLogger()
ELOGGER = ExecutorLogger(logger=LOGGER)


class SparkRDDContext(BaseContext):
    def __init__(self,
                 context_id,
                 task_id,
                 federated_mode: FederatedMode,
                 role,
                 node_id,
                 nodes: typing.Dict[str, typing.List[str]],
                 mq_args: MQArgs, mq_manager: MQManagerABC,
                 default_storage_engine: StorageEngine):
        super().__init__(context_id, task_id, federated_mode, role, node_id, nodes, mq_args, mq_manager,
                         default_storage_engine)
        self._data_structure = DataStructure.KVTABLE
        self._spark_session = SparkSession.builder.getOrCreate()
        self._spark_context = SparkContext.getOrCreate()
        self._my_storage = SparkRDDStorage(spark_session=self._spark_session, spark_context=self._spark_context)

    @computing_profile
    def load_text(self, data_input: DataIO) -> typing.Tuple[pyspark.RDD, DataSchema]:
        return self._my_storage.load_text(data_input=data_input)

    @computing_profile
    def save_text(self, schema: DataSchema, rdd: pyspark.RDD, data_output: DataIO):
        data_output.engine = data_output.engine or self._default_storage_engine
        return self._my_storage.save_text(schema=schema, rdd=rdd, data_output=data_output)

    def cleanup(self):
        self._mq_manager.cleanup_group()

    def stop(self):
        pass

    def kill(self):
        pass

    def get(self, name: str, tag: str, src_addresses: typing.List[FederatedAddress],
            gc: GarbageCollectionABC) -> typing.List:
        log_str = f"[name={name}, tag={tag}, srcs={src_addresses}]"
        LOGGER.debug(f"{log_str}start to get")

        for src_address in src_addresses:
            if not self._get_tag_not_duplicate(name, tag, src_address):
                raise ValueError(f"{log_str}get from {src_address} with duplicate tag")

        _name_dtype_keys = [
            _SPLIT_.join([src_address.role, src_address.node_id, name, tag, "get"])
            for src_address in src_addresses
        ]

        if _name_dtype_keys[0] not in self._name_dtype_map:
            LOGGER.debug(f"{log_str} start to get packet_type info")
            topics = self._get_topics(src_addresses, packet_type=NAME_PACKET_TYPE_TAG)
            channels = self._get_channels(topics=topics)
            results_info = []
            for i, channel in enumerate(channels):
                obj = self._receive_obj(
                    channel, name, tag=_SPLIT_.join([tag, NAME_PACKET_TYPE_TAG])
                )
                results_info.append(obj)
                LOGGER.debug(
                    f"{log_str} _name_dtype_keys: {_name_dtype_keys}, packet_type: {obj}"
                )

            for k in _name_dtype_keys:
                if k not in self._name_dtype_map:
                    self._name_dtype_map[k] = results_info[0]
            LOGGER.debug(f"{log_str} get packet_type info finished")

        results_info = self._name_dtype_map[_name_dtype_keys[0]]

        results = []
        packet_type = results_info.get("packet_type", None)
        num_partitions = results_info.get("num_partitions", None)

        if packet_type == FederatedPacketType.DATASET:
            LOGGER.debug(f"{log_str}start to get dataset")
            topics = self._get_topics(src_addresses, name, num_partitions=num_partitions)
            for i, topics_one_address in enumerate(topics):
                receive_func = self._get_partition_receive_func(
                    name, tag, topics_one_address, mq_args=self._mq_args
                )
                rdd = self._spark_context.parallelize(range(num_partitions), num_partitions)
                rdd = rdd.mapPartitionsWithIndex(receive_func)
                rdd = materialize(rdd)
                # todo: add gc
                # gc.add_gc_action(tag, rdd, "__del__", {})
                results.append(rdd)
                LOGGER.debug(
                    f"{log_str}received rdd({i + 1}/{len(src_addresses)}), src: {src_addresses[i]} "
                )
        else:
            # todo: big object and move to object context module
            LOGGER.debug(f"{log_str}start to get object")
            topics = self._get_topics(src_addresses, name)
            channels = self._get_channels(topics=topics)
            for i, channel in enumerate(channels):
                obj = self._receive_obj(channel, name, tag)
                LOGGER.debug(
                    f"{log_str}received obj({i + 1}/{len(src_addresses)}), src: {src_addresses[i]} "
                )
                results.append(obj)

        LOGGER.debug(f"{log_str}finish to get")
        return results

    def remote(
            self,
            v,
            name: str,
            tag: str,
            dest_addresses: typing.List[FederatedAddress],
            gc: GarbageCollectionABC,
    ) -> typing.NoReturn:
        log_str = f"[name={name}, tag={tag}, dest addresses={dest_addresses}]"
        LOGGER.debug(f"{log_str} start to remote")

        if not self._remote_tag_not_duplicate(name, tag, dest_addresses):
            raise ValueError(f"{log_str} remote to {dest_addresses} with duplicate tag")

        _name_dtype_keys = [
            _SPLIT_.join([dest_address.role, dest_address.node_id, name, tag, "remote"])
            for dest_address in dest_addresses
        ]
        LOGGER.info(f"v type {type(v)}")

        if _name_dtype_keys[0] not in self._name_dtype_map:
            LOGGER.debug(f"{log_str} start to remote packet_type info")
            topics = self._get_topics(dest_addresses, packet_type=NAME_PACKET_TYPE_TAG)
            channels = self._get_channels(topics=topics)
            if isinstance(v, pyspark.RDD):
                body = {"packet_type": FederatedPacketType.DATASET, "num_partitions": v.getNumPartitions()}
            else:
                body = {"packet_type": FederatedPacketType.OBJECT}

            LOGGER.debug(
                f"{log_str} _name_dtype_keys: {_name_dtype_keys}, packet_type: {body}"
            )
            self._send_obj(
                name=name,
                tag=_SPLIT_.join([tag, NAME_PACKET_TYPE_TAG]),
                data=p_dumps(body),
                channels=channels,
            )

            for k in _name_dtype_keys:
                if k not in self._name_dtype_map:
                    self._name_dtype_map[k] = body
            LOGGER.debug(f"{log_str} remote packet_type info finish")

        if isinstance(v, pyspark.RDD):
            total_count = v.count()
            num_partitions = v.getNumPartitions()
            LOGGER.debug(
                f"{log_str} start to remote table, total_count={total_count}, num_partitions={num_partitions}"
            )

            topics = self._get_topics(dest_addresses, name, num_partitions=num_partitions)
            send_func = self._get_partition_send_func(
                name, tag, num_partitions, topics, mq_args=self._mq_args
            )
            v.mapPartitionsWithIndex(send_func).count()
        else:
            # todo: big object and move to object context module
            LOGGER.debug(f"{log_str} start to remote obj")
            topics = self._get_topics(dest_addresses, name)
            channels = self._get_channels(topics=topics)
            self._send_obj(name=name, tag=tag, data=p_dumps(v), channels=channels)

        LOGGER.debug(f"{log_str} finish remote")

    def _get_topics(
            self,
            addresses: typing.List[FederatedAddress],
            name: str = None,
            num_partitions: int = None,
            packet_type: str = None,
    ) -> typing.List:
        topics = [
            self._get_or_create_topic(address, name, num_partitions, packet_type)
            for address in addresses
        ]
        return topics

    def _get_or_create_topic(
            self, address: FederatedAddress, name: str = None, num_partitions: int = None, packet_type: str = None
    ) -> typing.List[typing.Tuple[str, FederatedPacketRouting]]:
        topic_key_list = []
        topic_list = []

        if packet_type is not None:
            topic_key = _SPLIT_.join([address.role, address.node_id, packet_type, packet_type])
            topic_key_list.append(topic_key)
        else:
            if num_partitions is not None:
                for i in range(num_partitions):
                    topic_key = _SPLIT_.join([address.role, address.node_id, name, str(i)])
                    topic_key_list.append(topic_key)
            elif name is not None:
                topic_key = _SPLIT_.join([address.role, address.node_id, name])
                topic_key_list.append(topic_key)
            else:
                topic_key = _SPLIT_.join([address.role, address.node_id])
                topic_key_list.append(topic_key)

        for topic_key in topic_key_list:
            if topic_key not in self._topic_map:
                LOGGER.debug(
                    f"key: {topic_key} address: {address} not found, start to create"
                )

                topic_key_splits = topic_key.split(_SPLIT_)
                topic_suffix = "-".join(topic_key_splits[2:])
                if self._federated_mode == FederatedMode.SINGLE:
                    send_topic_name = f"{self._task_id}-{self._role}-{self._node_id}-{address.role.upper()}-{address.node_id}-{topic_suffix}"
                    receive_topic_name = f"{self._task_id}-{address.role.upper()}-{address.node_id}-{self._role}-{self._node_id}-{topic_suffix}"
                    dest_send_topic_name = receive_topic_name
                    dest_receive_topic_name = send_topic_name
                elif self._federated_mode == FederatedMode.MULTIPLE:
                    send_topic_name = f"send-{self._task_id}-{self._role}-{self._node_id}-{address.role.upper()}-{address.node_id}-{topic_suffix}"
                    dest_receive_topic_name = send_topic_name.replace(
                        "send", "receive", 1
                    )
                    send_topic_name = f"send-{self._task_id}-{self._role}-{self._node_id}"
                    receive_topic_name = f"receive-{self._task_id}-{address.role.upper()}-{address.node_id}-{self._role}-{self._node_id}-{topic_suffix}"
                    dest_send_topic_name = (
                        f"send-{self._task_id}-{address.role.upper()}-{address.node_id}"
                    )
                else:
                    raise RuntimeError(
                        f"can not support {self._federated_mode} federated mode"
                    )

                send_topic_name = send_topic_name.replace("<", "_").replace(">", "_")
                receive_topic_name = receive_topic_name.replace("<", "_").replace(">", "_")
                dest_send_topic_name = dest_send_topic_name.replace("<", "_").replace(">", "_")
                dest_receive_topic_name = dest_receive_topic_name.replace("<", "_").replace(">", "_")

                routing_info = FederatedPacketRouting(
                    task_id=self._task_id,
                    src_role=self._role,
                    src_node_id=self._node_id,
                    send_topic=send_topic_name,
                    receive_topic=receive_topic_name,
                    dest_role=address.role,
                    dest_node_id=address.node_id,
                    dest_send=dest_send_topic_name,
                    dest_receive=dest_receive_topic_name,
                )

                self._mq_manager.create_topic(routing_info.send_topic)
                self._mq_manager.create_topic(routing_info.receive_topic)

                self._topic_map[topic_key] = routing_info
                LOGGER.debug(
                    f"key: {topic_key}, address: {address} create send topic: {routing_info.send_topic}, receive topic: {routing_info.receive_topic}"
                )

            routing_info = self._topic_map[topic_key]
            topic_list.append((topic_key, routing_info))
        return topic_list

    def _get_channels(
            self, topics: typing.List[typing.Tuple[str, FederatedPacketRouting]]
    ) -> typing.List[MQChannelABC]:
        channels = []
        for topics_one_address in topics:
            for topic_key, routing_info in topics_one_address:
                channel = self._channels_map.get(topic_key)
                if channel is None:
                    channel = MQ.get_channel(self._mq_args, routing_info)
                    self._channels_map[topic_key] = channel
                channels.append(channel)
        return channels

    def _get_channels_by_index(
            self,
            index: int,
            topics: typing.List[typing.List[typing.Tuple[str, FederatedPacketRouting]]],
            mq_args: MQArgs,
    ):
        channels = []
        for topics_one_address in topics:
            topic_key, routing_info = topics_one_address[index]
            LOGGER.debug(f"topic key: {topic_key} topic group: {routing_info}")
            channel = MQ.get_channel(mq_args, routing_info)
            channels.append(channel)
        return channels

    def _send_obj(self, name: str, tag: str, data, channels: typing.List[MQChannelABC]):
        for channel in channels:
            properties = FederatedPacketProperties(
                format=FederatedPacketFormat.PLAIN.name,
                name=name,
                tag=tag,
                delivery_mode=1,
            )
            headers = FederatedPacketHeaders(packet_type=FederatedPacketType.OBJECT, routing=channel.routing_info,
                                             properties=properties)
            LOGGER.debug(f"properties:{properties}")
            # todo: split object data?
            channel.publish(headers=headers, data=data)
            LOGGER.debug(f"publish")

    def _get_packet_cache_key(self, name: str, tag: str, node_id: str, role: str):
        cache_key = _SPLIT_.join([name, tag, str(node_id), role])
        return cache_key

    def _receive_obj(self, channel: MQChannelABC, name: str, tag: str):
        node_id = channel.routing_info.dest_node_id
        role = channel.routing_info.dest_role
        # todo: may be using src node id not dest node id?
        wish_cache_key = self._get_packet_cache_key(name, tag, node_id, role)
        LOGGER.debug(f"wish cache key: {wish_cache_key}")

        if wish_cache_key in self._packet_cache:
            return self._packet_cache[wish_cache_key]

        for deliver, headers, data in channel.consume():
            properties: FederatedPacketProperties = headers.properties
            if properties.name != name or properties.tag != tag:
                # todo: fix this
                LOGGER.warning(
                    f"require {name}.{tag}, got {properties.name}.{properties.tag}"
                )

            cache_key = self._get_packet_cache_key(
                properties.name, properties.tag, node_id, role
            )
            if headers.packet_type == FederatedPacketType.OBJECT:
                # todo: packet cache size will be so big?
                self._packet_cache[cache_key] = p_loads(data)
                channel.ack(deliver=deliver)
                if cache_key == wish_cache_key:
                    channel.cancel()
                    LOGGER.debug(
                        f"cache_key: {cache_key}, obj: {self._packet_cache[cache_key]}"
                    )
                    return self._packet_cache[cache_key]
            else:
                raise ValueError(f"require {FederatedPacketType.OBJECT}, but {headers.packet_type}")

    def _send_kv(
            self,
            name: str,
            tag: str,
            data,
            channels: typing.List[MQChannelABC],
            partition_size: int,
            num_partitions: int,
            packet_key: str,
    ):
        log_str = f"[name={name}, tag={tag}]"
        ELOGGER.debug(f"{log_str} start send kv")
        for channel in channels:
            properties = FederatedPacketProperties(
                format=FederatedPacketFormat.JSON.name,
                name=name,
                tag=tag,
                delivery_mode=1,
                num_partitions=num_partitions,
                partition_size=partition_size,
                packet_key=packet_key
            )
            headers = FederatedPacketHeaders(packet_type=FederatedPacketType.DATASET, routing=channel.routing_info,
                                             properties=properties)
            channel.publish(headers=headers, data=data)
            ELOGGER.debug(f"{log_str} publish to {channel.send_topic}, may be receive by {channel.receive_topic}")
        ELOGGER.debug(f"{log_str} start send kv done")

    def _get_partition_send_func(
            self,
            name: str,
            tag: str,
            num_partitions: int,
            topics: typing.List,
            mq_args: MQArgs,
    ):
        def _fn(index, kvs):
            # todo: send row
            return self._partition_send(
                index, kvs, name, tag, num_partitions, topics, mq_args
            )

        return _fn

    def _partition_send(
            self,
            index: int,
            kvs,
            name: str,
            tag: str,
            num_partitions: int,
            topics: typing.List,
            mq_args: MQArgs,
    ):
        log_str = f"[name={name}, tag={tag}, partition_index={index}/{num_partitions}]"
        ELOGGER.debug(f"{log_str} start send partition, max message size: {mq_args.max_message_size}")
        channels = self._get_channels_by_index(
            index=index, topics=topics, mq_args=mq_args
        )

        datastream = DataStream()
        base_packet_key = str(index)
        packet_key_idx = 0
        row_index = 0

        for k, v in kvs:
            row_index += 1
            k_hex = p_dumps(k).hex()
            v_hex = p_dumps(v).hex()

            row_size = len(v_hex)
            row_chunk_num = row_size // int(mq_args.max_message_size) + (
                1 if row_size % int(mq_args.max_message_size) > 0 else 0)
            ELOGGER.debug(
                f"{log_str} row: {row_index}, row size: {row_size}, row chunk num: {row_chunk_num}, start send")
            chunk = {"row": row_index, "row_chunk_num": row_chunk_num, "type": "key", "data": k_hex}
            datastream.append(chunk)

            current_position = 0
            row_chunk_index = 0
            while current_position < row_size:
                if datastream.get_size() + (row_size - current_position) > mq_args.max_message_size:
                    ELOGGER.debug(f"the size of message is: {datastream.get_size()}")
                    packet_key_idx += 1
                    packet_key = f"{base_packet_key}{_SPLIT_}{packet_key_idx}"
                    self._send_kv(
                        name=name,
                        tag=tag,
                        data=datastream.get_data(),
                        channels=channels,
                        partition_size=-1,
                        num_partitions=num_partitions,
                        packet_key=packet_key,
                    )
                    datastream.clear()
                ELOGGER.debug(f"{log_str} row: {row_index}, send current chunk: {row_chunk_index}")
                datastream.append({"row": row_index, "row_chunk_num": row_chunk_num, "type": "value",
                                   "data": v_hex[current_position:current_position + mq_args.max_message_size],
                                   "row_chunk_index": row_chunk_index})
                current_position += mq_args.max_message_size
                row_chunk_index += 1
            ELOGGER.debug(f"{log_str} row: {row_index}, send done")

        packet_key_idx += 1
        packet_key = _SPLIT_.join([base_packet_key, str(packet_key_idx)])

        ELOGGER.debug(f"{log_str} send partition done")

        self._send_kv(
            name=name,
            tag=tag,
            data=datastream.get_data(),
            channels=channels,
            partition_size=row_index,
            num_partitions=num_partitions,
            packet_key=packet_key,
        )
        ELOGGER.debug(f"{log_str} send partition {index}/{num_partitions} done")

        return [1]

    def _get_partition_receive_func(
            self,
            name: str,
            tag: str,
            topics_one_address: typing.List[typing.Tuple[str, FederatedPacketRouting]],
            mq_args: MQArgs,
    ):
        def _fn(index, kvs):
            return self._partition_receive(index, kvs, name, tag, topics_one_address, mq_args)

        return _fn

    def _partition_receive(
            self,
            index: int,
            kvs,
            name: str,
            tag: str,
            topics_one_address: typing.List[typing.Tuple[str, FederatedPacketRouting]],
            mq_args: MQArgs,
    ):
        routing_info = topics_one_address[index][1]
        channel = MQ.get_channel(mq_args, routing_info)

        packet_key_cache = set()
        rows_cache = {}
        count = 0
        partition_size = -1
        all_data = []

        log_str = f"[name={name}, tag={tag}, partition={index}]"

        ELOGGER.debug(f"{log_str} start receive partition")

        for deliver, headers, data in channel.consume():
            properties: FederatedPacketProperties = headers.properties
            if properties.name != name or properties.tag != tag:
                # todo: fix this
                channel.ack(deliver=deliver)
                ELOGGER.debug(
                    f"{log_str} got error name: {properties.name} and tag: {properties.tag}"
                )
                continue

            if headers.packet_type == FederatedPacketType.DATASET:
                if properties.packet_key in packet_key_cache:
                    ELOGGER.debug(f"{log_str} message_key : {properties.packet_key} is duplicated")
                    channel.ack(deliver=deliver)
                    continue

                packet_key_cache.add(properties.packet_key)

                if properties.partition_size >= 0:
                    partition_size = properties.partition_size

                row_chunks = json.loads(data)
                for chunk in row_chunks:
                    row_index = chunk["row"]
                    if row_index not in rows_cache:
                        rows_cache[row_index] = {"key": None, "row_chunk_get_num": 0,
                                                 "value": [None] * chunk["row_chunk_num"], "enabled": True}
                    row = rows_cache[row_index]
                    if not row["enabled"]:
                        ELOGGER.debug(f"{log_str} duplicate row")
                        continue
                    if chunk["type"] == "key":
                        row["key"] = chunk["data"]
                    elif chunk["type"] == "value":
                        row["value"][chunk["row_chunk_index"]] = chunk["data"]
                        row["row_chunk_get_num"] += 1
                    else:
                        raise Exception(f"not support {chunk['type']} chunk")
                    if row["row_chunk_get_num"] == chunk["row_chunk_num"] and row["key"] is not None:
                        all_data.append((
                            p_loads(bytes.fromhex(row["key"])),
                            p_loads(bytes.fromhex("".join(row["value"]))),
                        ))
                        ELOGGER.debug(
                            f"{log_str} receive row: {row_index}, row chunk num: {row['row_chunk_get_num']} done")
                        count += 1
                        del row["value"][:]
                        row["enabled"] = False

                ELOGGER.debug(f"{log_str} count: {count}, partition size: {partition_size}")
                channel.ack(deliver=deliver)

                if count == partition_size:
                    channel.cancel()
                    return all_data
            else:
                raise ValueError(f"require {FederatedPacketType.DATASET}, but {headers.packet_type}")
        ELOGGER.debug(f"{log_str} receive partition done")

    def _remote_tag_not_duplicate(self, name, tag, addresses):
        for address in addresses:
            if (name, tag, address) in _REMOTE_HISTORY:
                return False
            _REMOTE_HISTORY.add((name, tag, address))
        return True

    def _get_tag_not_duplicate(self, name, tag, address):
        if (name, tag, address) in _GET_HISTORY:
            return False
        _GET_HISTORY.add((name, tag, address))
        return True
