from os import scandir
from random import randint
from typing import Any

from pyspark.ml.classification import RandomForestClassifier
from pyspark.ml.evaluation import MulticlassClassificationEvaluator
from pyspark.sql import DataFrame
from sparksampling.core.job.base_job import BaseJob

import numpy as np
from pyspark.sql.types import DoubleType
from sparksampling.core.mlsamplinglib.func import vectorized_feature
from sparksampling.core.mlsamplinglib.func import df_with_column_float
from pyspark.sql.functions import monotonically_increasing_id as mi
class RandforestEvaluationJob_spark(BaseJob):
    type_map = {
        'source_path': str,
        'selected_features_list': list,
        'numTrees': int,
        'maxDepth': int,
        'maxBins': int,
        'round': int,
        'label': str,
    }

    def __init__(self, source_path=None, selected_features_list=None, numTrees=20, maxDepth=10, maxBins=32, round=10, label=None, *args, **kwargs):
        super(RandforestEvaluationJob_spark, self).__init__(*args, **kwargs)
        self.source_path = source_path
        self.selected_features_list = selected_features_list
        self.numTrees = numTrees
        self.maxDepth = maxDepth
        self.maxBins = maxBins
        self.round = round
        self.label = label
        self.check_type()

    def RF(self, dataset=None, numTrees=20, maxDepth=10, maxBins=32, predict=None, seed=np.random.randint(1, 65535)):
        # trains a randomforest model.
        randforest = RandomForestClassifier(seed=seed).setNumTrees(numTrees).setMaxDepth(maxDepth).setMaxBins(maxBins).setFeaturesCol('features').setLabelCol('label').setPredictionCol('prediction')
        model = randforest.fit(dataset)
        # make predictions
        predictions = model.transform(predict)
        # show result:
        return predictions

    def evaluation_prediction(self, prediction, sample_prediction):
        prediction = prediction.withColumnRenamed('prediction', 'label')
        df = sample_prediction.join(prediction, ['features'])
        df = df.select(['prediction', 'label']).withColumn("label", df.label.cast(DoubleType())).withColumn(
            "prediction", df.prediction.cast(DoubleType()))
        evaluator_accuracy = MulticlassClassificationEvaluator(predictionCol="prediction", metricName="accuracy",
                                                               labelCol='label')
        return evaluator_accuracy.evaluate(df)

    # source_df 为全数据 x
    # df 为样本数据 x
    # label 为 y
    def _statistics(self, df: DataFrame, *args, **kwargs) -> dict:
        # 读y并更名label
        source_df_label = self._get_df_from_source(self.source_path, dataio=kwargs.get('data_io')).select(self.label).withColumnRenamed(self.label,"label")
        df_label = df.select(self.label).withColumnRenamed(self.label,"label")
        # label浮点化
        source_df_label = df_with_column_float(source_df_label)
        df_label = df_with_column_float(df_label)
        # 读x
        source_df = self._get_df_from_source(self.source_path, dataio=kwargs.get('data_io')).select(*self.selected_features_list)
        df = df.select(*self.selected_features_list)
        # x向量化
        source_df = vectorized_feature(source_df)
        df = vectorized_feature(df)
        # combine the source_df and df to ['label', 'features']
        id=mi()
        source_df = source_df.withColumn("match_id", id)
        source_df_label = source_df_label.withColumn("match_id", id)
        source_df = source_df.join(source_df_label, source_df_label.match_id==source_df.match_id, 'inner').drop(source_df.match_id).select(['label', 'features'])

        df = df.withColumn("match_id", id)
        df_label = df_label.withColumn("match_id", id)
        df = df.join(df_label, df_label.match_id==df.match_id, 'inner').drop(df.match_id).select(['label', 'features'])

        # get the highest point in each round to return
        acc, score = 0.0, 0.0
        for _ in range(self.round):
            t_acc = self.__randforest_acc(source_df, df, *args, **kwargs)
            t_score = int((t_acc * 100) ** 0.5 * 10)

            if t_score > score:
                acc, score = t_acc, t_score
                print(f"acc:{t_acc}, score:{t_score}")
        return {
            "score": score,
            "accuracy": acc,
        }

    def __randforest_acc(self, source_df: DataFrame, df: DataFrame, *args, **kwargs):
        # use a set to train the randomForest and its subset to predict may get the wrong answer
        predictions = self.RF(source_df, self.numTrees, self.maxDepth, self.maxBins, source_df).select(['prediction', 'features'])
        # use a set to train and predict itself won't get wrong
        sample_predictions = self.RF(df, self.numTrees, self.maxDepth, self.maxBins, source_df).select(['prediction', 'features'])
        # cal sampling acc, recall
        accuracy = self.evaluation_prediction(predictions, sample_predictions)
        return accuracy
