#
#  Copyright 2022 The Open Islands Authors. All Rights Reserved.
#
#  Licensed under the Apache License, Version 2.0 (the "License");
#  you may not use this file except in compliance with the License.
#  You may obtain a copy of the License at
#
#      http://www.apache.org/licenses/LICENSE-2.0
#
#  Unless required by applicable law or agreed to in writing, software
#  distributed under the License is distributed on an "AS IS" BASIS,
#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#  See the License for the specific language governing permissions and
#  limitations under the License.
#
import os.path
import typing

import pyspark
from pyspark import SparkContext
from pyspark.sql import SparkSession

from pyoi.constant import DatasetFormat, StandardDatasetFormat
from pyoi.constant import StorageEngine
from pyoi.context.types import ExecutorLogger
from pyoi.types import DataIO
from pyoi.types import DataSchema
from pyoi.util import log_utils
from pyoi.util.base_utils import deserialize_hex
from pyoi.util.base_utils import json_dumps, serialize_hex
from pyoi.util.data_schema_utils import DataSchemaUtils
from ._materialize import materialize

LOGGER = log_utils.getLogger()
ELOGGER = ExecutorLogger(logger=LOGGER)


class SparkRDDStorage:
    def __init__(self, spark_session: SparkSession, spark_context: SparkContext):
        self._spark_session = spark_session
        self._spark_context = spark_context

    def load_text(self, data_input: DataIO) -> typing.Tuple[pyspark.RDD, DataSchema]:
        LOGGER.info(f"load from {json_dumps(data_input)}")
        if data_input.schema is None:
            DataSchemaUtils.load_schema(data_input)
        if data_input.format.name in StandardDatasetFormat.names():
            return self.load_standard_text(data_input)
        else:
            return self.load_nonstandard_text(data_input)

    def save_text(self, schema: DataSchema, rdd: pyspark.RDD, data_output: DataIO):
        LOGGER.info(f"try to save rdd to {json_dumps(data_output)}")
        if data_output.format.name in StandardDatasetFormat.names():
            self.save_standard_text(schema=schema, rdd=rdd, data_output=data_output)
        else:
            self.save_nonstandard_text(schema=schema, rdd=rdd, data_output=data_output)
        DataSchemaUtils.save_schema(schema=schema, data_output=data_output)

    def load_standard_text(self, data_input: DataIO) -> typing.Tuple[pyspark.RDD, DataSchema]:
        if data_input.engine == StorageEngine.LOCALFS:
            return self.standard_text_from_localfs(data_input)
        else:
            raise RuntimeError(f"not support {data_input.engine} storage engine")

    def load_nonstandard_text(self, data_input: DataIO) -> typing.Tuple[pyspark.RDD, DataSchema]:
        if data_input.engine == StorageEngine.LOCALFS:
            return self.nonstandard_text_from_localfs(data_input)
        else:
            raise RuntimeError(f"not support {data_input.engine} storage engine")

    def standard_text_from_localfs(self, data_input: DataIO) -> typing.Tuple[pyspark.RDD, DataSchema]:
        if data_input.format == DatasetFormat.CSV:
            LOGGER.info(f"load csv test from {data_input.uri}")
            df = self._spark_session.read.options(delimiter=data_input.schema.delimiter, header=(
                "true" if data_input.schema.include_header else "false")).format("csv").load(path=data_input.uri)
            rdd = df.rdd.map(list).repartition(data_input.num_partitions)
            schema = DataSchema()
            if data_input.schema.include_header:
                schema.columns = [field.name for field in df.schema.fields]
            else:
                schema.columns = []
            schema.include_header = data_input.schema.include_header
            schema.delimiter = data_input.schema.delimiter
        else:
            raise RuntimeError(f"not support {data_input.format} format")
        return materialize(rdd), schema

    def nonstandard_text_from_localfs(self, data_input: DataIO) -> typing.Tuple[pyspark.RDD, DataSchema]:
        if os.path.isdir(data_input.uri):
            read_path = f"{data_input.uri}/*"
        else:
            read_path = data_input.uri
        if data_input.schema.metadata.get("hex_serialized", False):
            rdd = self._spark_context.textFile(read_path).map(deserialize_hex).repartition(data_input.num_partitions)
        else:
            rdd = self._spark_context.textFile(read_path).repartition(data_input.num_partitions)
        return materialize(rdd), data_input.schema

    def save_standard_text(self, schema: DataSchema, rdd: pyspark.RDD, data_output: DataIO):
        if data_output.engine in {StorageEngine.LOCALFS, StorageEngine.HDFS}:
            if data_output.format == DatasetFormat.CSV:
                df = rdd.toDF(schema.columns)
                df.write.options(header="true" if schema.include_header else "false",
                                 delimiter=schema.delimiter).format("csv").save(path=data_output.uri)
            else:
                raise RuntimeError(f"format {data_output.format.name} is not supported")
        else:
            raise RuntimeError(f"{data_output.engine} is not supported with spark")

    def save_nonstandard_text(self, schema: DataSchema, rdd: pyspark.RDD, data_output: DataIO):
        if data_output.engine in {StorageEngine.LOCALFS, StorageEngine.HDFS}:
            rdd.map(lambda x: serialize_hex(x)).repartition(data_output.num_partitions).saveAsTextFile(data_output.uri)
            schema.metadata["hex_serialized"] = True
        else:
            raise RuntimeError(f"{data_output.engine} is not supported with spark")
