package ApplicationStart.SparkService

import java.util

import ApplicationStart.Domain.{UserCredit, UserCreditRepository}
import org.apache.spark.rdd.RDD
import org.springframework.beans.factory.annotation.Autowired
import org.springframework.stereotype.Service

@Service
class KMeans{

  @Autowired
  var sparkService : SparkService = _

  @Autowired
  var userCreditRepository : UserCreditRepository = _


  def SQLRequestByIds(lowId : Long, bestId: Long): RDD[UserCredit] = {
     val userCredits = scala.collection.JavaConversions.asScalaBuffer(userCreditRepository.getAllByIdBetween(lowId.toInt, bestId.toInt)).toSeq
     sparkService.getSC.parallelize(userCredits)
  }

  def thoughtUserIdGroup(userId : String): RDD[(Int, Iterable[UserCredit])] ={
     SQLRequestByIds(15, 200).groupBy(_.getUserId)
  }

  /**
    * 获取记录条数
    * @param rDD rdd 数据信息
    * @tparam T 关于统计类型
    * @return 统计的个数
    */
  def countRecode[T](rDD: RDD[T]):Long = {
    rDD.collect().length
  }

  def ageDistribute(): java.util.List[Integer] = {
    import scala.collection.JavaConversions._
    //val user_data = sparkService.getSC.textFile("springdeom-api\\src\\main\\resources\\ml-100k\\u.user")
    val user_data = sparkService.getSC.textFile("hdfs://master:9000/input/springSwagger3Deom/u.user")
    val user_field = user_data.map( line => Integer.valueOf(line.split('|').toList(1).toInt)).cache()
    user_field.collect().toList
  }
}
