package main.scala.spark

import org.apache.log4j.{Level, Logger}
import org.apache.spark.ml.classification.LogisticRegression
import org.apache.spark.ml.linalg.Vectors
import org.apache.spark.sql.SparkSession

import scala.collection.Map

object RecommenderModelDF {
  def main(args: Array[String]): Unit = {
    Logger.getLogger("org.apache.main.scala.spark").setLevel(Level.ERROR)

    val spark = SparkSession.builder()
      .appName("recommender-ml")
      // 删除 .master("local[*]")，从 spark-submit 命令获取 master 参数
      .getOrCreate()

    import spark.implicits._

    // 修正 HDFS 路径格式（添加双斜杠 //）
    val rawData = spark.read.textFile("hdfs://192.168.206.129:8020/opt/data/traindata")
      .map(_.split("\t"))
      .filter(_.length >= 2)
      .sample(false, 0.1, 100L)
      .cache()

    val labels = rawData.map(_(0))
    val samples = rawData.map(_(1).split(";").map(_.split(":")(0)))

    // 构建特征词典映射
    val allFeaturesMap: Map[String, Long] = samples
      .flatMap(_.toSeq)
      .distinct()
      .rdd
      .zipWithIndex()
      .collectAsMap()

    val featureCount = allFeaturesMap.size
    val idToFeatureMap: Map[Long, String] = allFeaturesMap.map(_.swap)

    // 转换为 SparseVector + label 的 DataFrame
    val data = rawData.map { arr =>
      val labelRaw = arr(0) match {
        case "-1" => 0.0
        case "1"  => 1.0
        case other => other.toDouble
      }
      val featureTokens = arr(1).split(";").map(_.split(":")(0))
      val indices = featureTokens.flatMap(allFeaturesMap.get(_)).map(_.toInt).distinct.sorted
      val values = Array.fill(indices.length)(1.0)
      val vector = Vectors.sparse(featureCount, indices, values)
      (labelRaw, vector)
    }.toDF("label", "features")

    // 模型训练
    val lr = new LogisticRegression()
      .setMaxIter(10)
      .setRegParam(0.01)
      .setElasticNetParam(0.0)
      .setFitIntercept(true)

    val model = lr.fit(data)

    // 模型权重与特征名称对应
    val weights = model.coefficients.toArray
    val output = weights.zipWithIndex.map { case (w, i) =>
      val feature = idToFeatureMap.getOrElse(i.toLong, "")
      s"$feature\t$w"
    }

    // 修正 HDFS 输出路径格式（添加双斜杠 //，并保持 IP 一致）
    val fs = org.apache.hadoop.fs.FileSystem.get(spark.sparkContext.hadoopConfiguration)
    val outputPath = new org.apache.hadoop.fs.Path("hdfs://192.168.206.129:8020/opt/data/model")

    if (fs.exists(outputPath)) fs.delete(outputPath, true)

    val out = fs.create(outputPath)
    val writer = new java.io.PrintWriter(new java.io.OutputStreamWriter(out, "UTF-8"))

    output.foreach(line => writer.println(line))
    writer.flush()
    writer.close()
    out.close()

    spark.stop()
  }
}