package com.zhny.algorithm.sql;

import com.zhny.algorithm.bean.RFDbDataBean;
import com.zhny.algorithm.bean.RFDbResultBean;
import org.apache.spark.api.java.JavaPairRDD;
import org.apache.spark.api.java.JavaRDD;
import org.apache.spark.api.java.JavaSparkContext;
import org.apache.spark.mllib.classification.NaiveBayes;
import org.apache.spark.mllib.classification.NaiveBayesModel;
import org.apache.spark.mllib.regression.LabeledPoint;
import org.apache.spark.mllib.util.MLUtils;
import org.apache.spark.sql.Dataset;
import org.apache.spark.sql.Row;
import org.apache.spark.sql.SaveMode;
import org.apache.spark.sql.SparkSession;
import scala.Tuple2;

import java.io.File;
import java.io.FileWriter;
import java.io.IOException;
import java.util.*;

public class NaiveBayesSQLUtil {

    private final static String DB_URL = "jdbc:mysql://localhost:3306/zhny?useUnicode=true&characterEncoding=utf8&zeroDateTimeBehavior=CONVERT_TO_NULL&useSSL=false&serverTimezone=GMT%2B8&allowPublicKeyRetrieval=true";
    private final static String DB_USER = "zhny";
    private final static String DB_PASSWORD = "zhny@123";

    /**
     * 数据格式为libsvm的sql数据格式 1 2:1 3:2 4:1
     * @param inputSql 查询数据库数据的sql
     * @param modalSavePath model保存的文件夹
     * @param columnNum 查询列的数量
     */
    public static void excMySQLSelectPredictModel(String inputSql,
                                                  String modalSavePath,
                                                  int columnNum) {
        SparkSession spark = SparkSession
                .builder()
                .appName("rf sql")
                .config("spark.driver.allowMultipleContexts", "true")
                .master("local[4]")
                .getOrCreate();

        Properties connectionProperties = new Properties();
        connectionProperties.put("user", DB_USER);
        connectionProperties.put("password", DB_PASSWORD);

        Dataset<Row> jdbcDF = spark.read().jdbc(DB_URL, inputSql, connectionProperties);

        jdbcDF.show();

        // 表的数据结构 id feature col1 col2
        JavaRDD<Row> jdbcResult = jdbcDF.javaRDD();
        JavaRDD<String> result = jdbcResult.map(el -> {
            String t = "";
            for (int i = 0; i < columnNum; i++) {
                if (i == 0) {
                    if (null == el.getString(0)) {
                        t = "1 ";
                    } else {
                        t = el.getString(0) + " ";
                    }
                } else {
                    t += " " + i + ":" + String.valueOf(el.getDouble(i));
                }
            }

            return t;
        });

        String filePath = "tmp.txt";
        File tmpFile = new File(filePath);
        try {
            FileWriter fos = new FileWriter(tmpFile);

            List<String> strs = result.collect();
            for (String line : strs) {
                fos.write(line + "\n");
            }

            fos.flush();
            fos.close();
        } catch (IOException e) {
            e.printStackTrace();
        }

        JavaSparkContext jsc = new JavaSparkContext(spark.sparkContext());
        JavaRDD<LabeledPoint> data = MLUtils.loadLibSVMFile(jsc.sc(), filePath).toJavaRDD();

        Map<Integer, Integer> categoricalFeaturesInfo = new HashMap<>();

        String featureSubsetStrategy = "auto";
        String impurity = "gini";

        NaiveBayesModel model = NaiveBayes.train(data.rdd(), 1.0);

        model.save(jsc.sc(), modalSavePath);

        if (tmpFile.exists()) {
            tmpFile.delete();
        }
    }

    /**
     * 由生成的模型预测数据 sql查询后的数据格式为 1 1:2 2:1 3:1 4:1
     * @param modelFilePath model的文件夹
     * @param querySql 查询的sql
     * @param db 数据库名
     * @param resultTable 结果表名
     * @param columnNum sql的列数
     */
    public static void excMySQLDataComputeAndSave(String modelFilePath,
                                                  String querySql,
                                                  String db,
                                                  String resultTable,
                                                  int columnNum) {
        SparkSession spark = SparkSession
                .builder()
                .appName("rf sql")
                .config("spark.driver.allowMultipleContexts", "true")
                .master("local[4]")
                .getOrCreate();

        Properties connectionProperties = new Properties();
        connectionProperties.put("user", DB_USER);
        connectionProperties.put("password", DB_PASSWORD);

        Dataset<Row> jdbcDF = spark.read().jdbc(DB_URL, querySql, connectionProperties);

        jdbcDF.show();

        // 表的数据结构 feature 1:v1 2:v2....
        JavaRDD<Row> jdbcResult = jdbcDF.javaRDD();
        JavaRDD<String> result = jdbcResult.map(el -> {
//            return el.getInt(0) + " " + el.getString(1);

            String t = "";
            for (int i = 0; i < columnNum; i++) {
                if (i == 0) {
                    t = el.getString(0) + "," + 1;
                } else {
                    t += " " + i + ":" + String.valueOf(el.getDouble(i));
                }
            }

            return t;
        });

        String filePath = "tmp.txt";
        List<String> keyList = new ArrayList<>();
        File tmpFile = new File(filePath);
        try {
            FileWriter fos = new FileWriter(tmpFile);

            for (String line : result.collect()) {
                String[] elements = line.split(",");

                fos.write(elements[1] + "\n");
                keyList.add(elements[0]);
            }

            fos.flush();
            fos.close();
        } catch (IOException e) {
            e.printStackTrace();
        }

        JavaSparkContext jsc = new JavaSparkContext(spark.sparkContext());

        NaiveBayesModel model = NaiveBayesModel.load(jsc.sc(), modelFilePath);
        JavaRDD<LabeledPoint> predictData = MLUtils.loadLibSVMFile(jsc.sc(), filePath).toJavaRDD();

        String resultFile = "resultFile.txt";
        try {
            FileWriter fos = new FileWriter(new File(resultFile));

            JavaPairRDD<Double, Double> predictionAndLabel =
                    predictData.mapToPair(p -> {
                        return new Tuple2<>(model.predict(p.features()), p.label());
                    });

            double testErr =
                    predictionAndLabel.filter(pl -> !pl._1().equals(pl._2())).count() / (double) predictData.count();
            System.out.println("Test Error: " + testErr);

            JavaRDD<String> result2 =
                    predictData.map(p -> {
                        return model.predict(p.features()) + "";
                    });

            List<String> resultList = result2.collect();
            for (int i = 0; i < resultList.size(); i++) {
                fos.write(resultList.get(i) + " " + keyList.get(i) + "\n");
            }

            fos.flush();
            fos.close();
        } catch (IOException e) {
            e.printStackTrace();
        }

        // 处理文件里面的数据
        JavaRDD<RFDbResultBean> resultRDD = spark.read()
                .textFile(resultFile)
                .javaRDD()
                .map(line -> {
                    String[] values = line.split(" ");

                    String data = "";

                    for (int i = 1; i < values.length; i++) {
                        data += values[i] + " ";
                    }

                    return new RFDbResultBean(values[0], data);
                });

        Dataset<Row> resultDF = spark.createDataFrame(resultRDD, RFDbResultBean.class);

        resultDF.printSchema();

        resultDF.write().mode(SaveMode.Append).format("jdbc")
                .option("url", DB_URL)
                .option("dbtable", db + "." + resultTable)
                .option("user", DB_USER)
                .option("password", DB_PASSWORD)
                .save();

        if (tmpFile.exists()) {
            tmpFile.delete();
        }

        File deleteFile = new File(resultFile);
        if (deleteFile.exists()) {
            deleteFile.delete();
        }
    }

    public static void excMySQLSave(String saveFilePath, String db, String resultTable) {
        SparkSession spark = SparkSession
                .builder()
                .appName("rf sql")
                .config("spark.driver.allowMultipleContexts", "true")
                .master("local[4]")
                .getOrCreate();

        Properties connectionProperties = new Properties();
        connectionProperties.put("user", DB_USER);
        connectionProperties.put("password", DB_PASSWORD);

        // 处理文件里面的数据
        JavaRDD<RFDbDataBean> resultRDD = spark.read()
                .textFile(saveFilePath)
                .javaRDD()
                .map(line -> {
                    String[] values = line.split(" ");

                    String data = "";

                    for (int i = 1; i < values.length; i++) {
                        data += values[i] + " ";
                    }

                    return new RFDbDataBean(Integer.parseInt(values[0]), data);
                });

        Dataset<Row> resultDF = spark.createDataFrame(resultRDD, RFDbDataBean.class);

        resultDF.printSchema();

        resultDF.write().mode(SaveMode.Append).format("jdbc")
                .option("url", DB_URL)
                .option("dbtable", db + "." + resultTable)
                .option("user", DB_USER)
                .option("password", DB_PASSWORD)
                .save();
    }
}
