package com.itheima.sparkml.exercise

import org.apache.spark.SparkConf
import org.apache.spark.mllib.linalg
import org.apache.spark.mllib.linalg.Vectors
import org.apache.spark.mllib.linalg.distributed.RowMatrix
import org.apache.spark.mllib.stat.{MultivariateStatisticalSummary, Statistics}
import org.apache.spark.rdd.RDD
import org.apache.spark.sql.SparkSession

object _7DistributeMatrixRowMatrix {
  def main(args: Array[String]): Unit = {
    val sparkConf=new SparkConf().setAppName("_7DistributeMatrixRowMatrix").setMaster("local[*]")
    val spark=SparkSession.builder().config(sparkConf).getOrCreate()
    val sc=spark.sparkContext
    sc.setLogLevel("WARN")
    val dataPath="D:\\test\\data\\RowMatrix.txt"
    val rdd: RDD[String] = sc.textFile(dataPath)
    rdd.foreach(println(_))
    val unit: RDD[Array[Double]] = rdd.map(_.split(" ").map(_.toDouble))
    val result: RDD[linalg.Vector] = unit.map(x=>{Vectors.dense(x)})
    //* 4-统计分析
    val summary: MultivariateStatisticalSummary = Statistics.colStats(result)
    //* 5-打印结果
    result.foreach(println(_))
    //  * 4-RowMatrix的定义---需要的是rdd的vector类型
    val matrix = new RowMatrix(result)
    //  * 5-打印行和列的数据
    println("matix rows:", matrix.numRows())
    println("matix cols:", matrix.numCols())
    println("max:", summary.max)
    println("min:", summary.min)
    println("mean:", summary.mean)
    println("non zeros", summary.numNonzeros)
    println("count:", summary.count)
  }
}
