from pyspark.sql import SparkSession
from pyspark.sql.functions import *
from pyspark.sql.types import StructType, StructField, StringType

from algorithm.abstract_algorithm import AbstractAlgorithm
from common_config import *
from .abstract_runner_framework import AbstractRunnerFrameWork, AbstractFrameWorkFactory, T
from .source_reader import SparkSourceReader
from .spark_utils import get_spark_session, spark_rows_to_df

default_config = {
    "path": result_root_path,
    "save_mode": spark_save_mode,
    "partition_columns": spark_partition_columns
}


class SparkFrameWorkFactory(AbstractFrameWorkFactory):

    def create(self, params) -> AbstractRunnerFrameWork:
        path = default_config['path'] if 'path' not in params else params['path']
        save_mode = default_config['save_mode'] if 'save_mode' not in params else params['save_mode']
        partition_columns = default_config['partition_columns'] if 'partition_columns' not in params else str(
            params['partition_columns']).split(',')
        return SparkFrameWork(path, save_mode, partition_columns)


class SparkFrameWork(AbstractRunnerFrameWork[DataFrame]):
    def close(self):
        pass

    spark: SparkSession = get_spark_session("detect spark session")
    schema = StructType([
        StructField('sn', StringType()),
        StructField('start_time', StringType()),
        StructField('end_time', StringType())
    ])

    def __init__(self,
                 path,
                 save_mode,
                 partition_columns):
        self.root_path = path
        self.partitions_columns = partition_columns
        self.save_mode = save_mode

    def get_source(self, reader: SparkSourceReader) -> DataFrame:
        return reader.read(self.spark)

    def filter(self, data: T) -> T:
        return data

    def detect(self, data: DataFrame, algorithm: AbstractAlgorithm) -> DataFrame:
        spark_df = data.repartition(100, "sn").sortWithinPartitions("sn", "timestamp").rdd.mapPartitions(
            spark_rows_to_df)
        spark_df: DataFrame = spark_df.mapPartitions(algorithm.run).toDF(self.schema)
        spark_df: DataFrame = spark_df.withColumn('fault_code', lit(algorithm.config.fault_code().value))
        spark_df: DataFrame = spark_df.withColumn('protocol', lit(algorithm.config.proto_col().value))
        spark_df: DataFrame = spark_df.withColumn('detect_time', current_timestamp())
        spark_df: DataFrame = spark_df.withColumn('p_date', substring(col("end_time"), 0, 10))
        spark_df: DataFrame = spark_df.withColumn('system_type', lit(algorithm.config.system_tye().value))
        spark_df: DataFrame = spark_df.withColumn('version', lit(algorithm.config.version()))
        return spark_df

    def write(self, data: DataFrame):
        writer = data.repartition(300, 'p_date', 'fault_code').write
        writer = writer.partitionBy(self.partitions_columns).option("header", "true")
        writer.mode(self.save_mode).csv(self.root_path)
