# !/usr/bin/env python
# -*- coding: utf-8 -*-
from sklearn.datasets import load_iris
from sklearn.linear_model import LogisticRegression
from sklearn.ensemble import RandomForestClassifier
from sklearn.model_selection import train_test_split
from sklearn.metrics import accuracy_score
import pickle
import joblib

iris = load_iris()
x, y = iris.data, iris.target

# 划分训练集和测试集
x_train, x_test, y_train, y_test = train_test_split(x, y, test_size=0.2, random_state=42)
print(x_test)


def train():
    # model = LogisticRegression(max_iter=200)
    model = RandomForestClassifier(n_estimators=100, random_state=42)
    model.fit(x_train, y_train)

    predictions = model.predict(x_test)
    print(f'model predict {predictions}')

    accuracy = accuracy_score(y_test, predictions)
    print(f"Accuracy: {accuracy:.2f}")

    # 保存模型
    with open('iris_model.pkl', 'wb') as f:
        pickle.dump(model, f)
    joblib.dump(model, 'random_forest_model.pkl')


def predict():
    with open('iris_model.pkl', 'rb') as f:
        load_model = pickle.load(f)

    data = [
        [5.1, 3.9, 1.8, 6.1]
    ]

    predictions = load_model.predict(data)
    print(f'load pkl, model predict {predictions}')


def predict2():
    load_model = joblib.load('iris_model.pkl')

    data = [
        [5.1, 3.9, 1.8, 6.1]
    ]

    predictions = load_model.predict(data)
    print(f'load pkl, model predict {predictions}')


def spark_predict():
    from pyspark.sql import SparkSession
    from pyspark.ml.feature import VectorAssembler
    from pyspark.sql.functions import udf
    from pyspark.sql.types import FloatType
    from pyspark import SparkFiles

    # 创建SparkSession
    spark = SparkSession.builder.master("local[*]").appName("LoadPickleModel").getOrCreate()
    spark.sparkContext.addFile('random_forest_model.pkl')

    model_path = SparkFiles.get("random_forest_model.pkl")
    # 加载pkl模型
    model = joblib.load(model_path)

    # 广播模型以在所有节点上使用
    model_sc = spark.sparkContext.broadcast(model)

    # 定义UDF进行预测
    @udf(returnType=FloatType())
    def pkl_predict(features):
        return float(model_sc.value.predict([features])[0])

    data = [(5.1, 3.5, 1.4, 0.2), (6.2, 3.4, 5.4, 2.3)]
    columns = ["sepal_length", "sepal_width", "petal_length", "petal_width"]
    df = spark.createDataFrame(data, columns)

    # 使用VectorAssembler将特征列组合成一个特征向量列
    assembler = VectorAssembler(inputCols=columns, outputCol="features")
    df_features = assembler.transform(df)
    df_features.printSchema()

    # 应用UDF进行预测
    df_predictions = df_features.withColumn("prediction", pkl_predict("features"))

    # 显示预测结果
    df_predictions.show()


def spark_predict2():
    from pyspark.sql import SparkSession
    from pyspark.ml.feature import VectorAssembler
    from pyspark.sql.functions import udf
    from pyspark.sql.types import FloatType

    # 创建SparkSession
    spark = SparkSession.builder.master("local").appName("LoadPickleModel").getOrCreate()

    # 加载pkl模型
    model = joblib.load('random_forest_model.pkl')

    # 定义UDF进行预测
    @udf(returnType=FloatType())
    def pkl_predict(features):
        return float(model.predict([features])[0])

    data = [(5.1, 3.5, 1.4, 0.2), (6.2, 3.4, 5.4, 2.3)]
    columns = ["sepal_length", "sepal_width", "petal_length", "petal_width"]
    df = spark.createDataFrame(data, columns)

    # 使用VectorAssembler将特征列组合成一个特征向量列
    assembler = VectorAssembler(inputCols=columns, outputCol="features")
    df_features = assembler.transform(df)
    df_features.printSchema()

    # 应用UDF进行预测
    df_predictions = df_features.withColumn("prediction", pkl_predict("features"))

    # 显示预测结果
    df_predictions.show()


if __name__ == '__main__':
    # train()
    # predict()
    predict2()
    # spark_predict()
    # spark_predict2()
