# !/usr/bin/env python
# -*- coding: utf-8 -*-
from sklearn.datasets import load_iris
from sklearn.linear_model import LogisticRegression
from sklearn.ensemble import RandomForestClassifier
from sklearn.model_selection import train_test_split
from sklearn.metrics import accuracy_score
from sklearn2pmml.pipeline import PMMLPipeline
from sklearn2pmml import sklearn2pmml
import pandas as pd

iris = load_iris()
x, y = iris.data, iris.target
print(iris.feature_names)

iris_df = pd.DataFrame(data=iris.data, columns=iris.feature_names)
iris_df['target'] = iris.target

# 划分训练集和测试集
x_train, x_test, y_train, y_test = train_test_split(x, y, test_size=0.2, random_state=42)


# print(x_test)


def train():
    pipeline = PMMLPipeline([
        ("classifier", RandomForestClassifier(n_estimators=100, random_state=42))
    ])

    # 训练模型
    pipeline.fit(iris_df.drop(columns=["target"]), iris_df["target"])

    predictions = pipeline.predict(x_test)
    print(f'model predict {predictions}')

    accuracy = accuracy_score(y_test, predictions)
    print(f"Accuracy: {accuracy:.2f}")

    # 导出为 PMML 文件
    sklearn2pmml(pipeline, "RandomForestIris.pmml", with_repr=True)


def predict():
    from pypmml import Model
    load_model = Model.fromFile('RandomForestIris.pmml')

    data = [
        [5.1, 3.9, 1.8, 6.1]
    ]

    predictions = load_model.predict(data)
    print(f'load pmml, model predict {predictions}')


def spark_predict():
    from pyspark.sql import SparkSession
    from pyspark.ml.feature import VectorAssembler
    from pyspark.sql.functions import udf
    from pyspark.sql.types import FloatType
    from pyspark import SparkFiles
    from pypmml import Model

    # 创建SparkSession
    spark = SparkSession.builder.master("local[*]").appName("LoadPickleModel").getOrCreate()
    spark.sparkContext.addFile('RandomForestIris.pmml')

    model_path = SparkFiles.get("RandomForestIris.pmml")
    print(f'model path: {model_path}')
    # 加载pmml模型
    # model = Model.fromFile(model_path)
    model = Model.fromFile('RandomForestIris.pmml')

    # 广播模型以在所有节点上使用
    model_sc = spark.sparkContext.broadcast(model)

    # 定义UDF进行预测
    @udf(returnType=FloatType())
    def model_predict(features):
        return float(model_sc.value.predict([features])[0])

    data = [(5.1, 3.5, 1.4, 0.2), (6.2, 3.4, 5.4, 2.3)]
    columns = ["sepal_length", "sepal_width", "petal_length", "petal_width"]
    df = spark.createDataFrame(data, columns)

    # 使用VectorAssembler将特征列组合成一个特征向量列
    assembler = VectorAssembler(inputCols=columns, outputCol="features")
    df_features = assembler.transform(df)
    df_features.printSchema()

    # 应用UDF进行预测
    df_predictions = df_features.withColumn("prediction", model_predict("features"))

    # 显示预测结果
    df_predictions.show()


def spark_predict2():
    from pyspark.sql import SparkSession
    from pyspark import SparkFiles
    from pypmml import Model
    # from pypmml import PMMLContext
    # PMMLContext.getOrCreate(gateway="jpype")

    # 创建SparkSession
    spark = SparkSession.builder.master("local[*]").appName("LoadPmmlModel").getOrCreate()
    spark.sparkContext.addFile('RandomForestIris.pmml')
    # spark.sparkContext.addFile('file:///d:/code/learn/python-learn/venv/share/py4j/py4j0.10.9.7.jar')
    # # 文件夹
    # spark.sparkContext.addFile('file:///d:/code/learn/python-learn/venv/lib/site-packages/jpype', recursive=True)
    # spark.sparkContext.addFile('file:///d:/code/learn/python-learn/venv/lib/site-packages/_jpype.cp38-win_amd64.pyd')
    # spark.sparkContext.addFile('file:///d:/code/learn/python-learn/venv/lib/site-packages/py4j', recursive=True)
    # spark.sparkContext.addFile('file:///d:/code/learn/python-learn/venv/lib/site-packages/pypmml', recursive=True)

    def predict_partition(partition):
        model_path = SparkFiles.get("RandomForestIris.pmml")
        print(f'model path: {model_path}')
        # 加载pmml模型
        # model = Model.fromFile('RandomForestIris.pmml')
        model = Model.fromFile(model_path)
        # data = [
        #     [5.1, 3.9, 1.8, 6.1]
        # ]
        #
        # predictions = model.predict(data)
        # print(f'load pmml, model predict {predictions}')
        predictions = []
        for row in partition:
            # 假设 row 是一个包含特征的元组或列表
            features = {f: row[i] for i, f in enumerate(model.fieldMap.keys())}
            prediction = model.apply(features)
            predictions.append(prediction)
        yield from predictions

    data = [(5.1, 3.5, 1.4, 0.2), (6.2, 3.4, 5.4, 2.3)]
    columns = ["sepal_length", "sepal_width", "petal_length", "petal_width"]
    df = spark.createDataFrame(data, columns)

    predictions = df.rdd.mapPartitions(predict_partition)

    # 将预测结果转换回 DataFrame
    prediction_df = spark.createDataFrame(predictions).toDF("prediction")

    prediction_df.show()

    spark.stop()


if __name__ == '__main__':
    # train()
    # predict()
    # spark_predict()
    spark_predict2()
