package binaryclass

import org.apache.log4j.{Level, Logger}
import org.apache.spark.mllib.feature.StandardScaler
import org.apache.spark.mllib.linalg.Vectors
import org.apache.spark.mllib.regression.LabeledPoint
import org.apache.spark.mllib.tree.DecisionTree
import org.apache.spark.mllib.tree.configuration.Algo
import org.apache.spark.mllib.tree.impurity.{Gini, Entropy}

import org.apache.spark.{SparkConf, SparkContext}
import org.apache.spark.mllib.classification.{LogisticRegressionWithLBFGS, SVMWithSGD}
import org.apache.spark.mllib.evaluation.BinaryClassificationMetrics
import org.apache.spark.mllib.util.MLUtils
import org.apache.spark.mllib.optimization.{SquaredL2Updater, L1Updater}

import scala.reflect.internal.util.Statistics


object BinaryClass {

  object Algorithm extends Enumeration {
    type Algorithm = Value
    val SVM, LR = Value
  }

  object RegType extends Enumeration {
    type RegType = Value
    val L1, L2 = Value
  }

  import Algorithm._
  import RegType._

  case class Params(
                     input: String = "LR",
                     numIterations: Int = 50,
                     stepSize: Double = 1.0,
                     algorithm: Algorithm = LR,
                     regType: RegType = L2,
                     regParam: Double = 0.01)

  def main(args: Array[String]) {
    val conf = new SparkConf().setAppName("BinaryClassification")
    val sc = new SparkContext(conf)

    val path = "/home/cuidong/spark/spark-1.4/data/mytest/gxxy/3gto4g.csv"
    val data_org = sc.textFile(path).map(_.split(",").tail).filter(x => x.length == 19 && x(0).trim != "1" &&
      x(1) != "1" && x(4).trim.toDouble > 3 && x(7).toDouble > 0.0)
    data_org.cache()
    val data_count = data_org.count()
    val data_1_count = data_org.filter(x=>x(18).toDouble == 1.0).count()
    //    val data_f1 = data_org.filter{x=>
    //      x.forall(!_.trim().isEmpty())
    //    }
    //    val notNull_count = data_f1.count()
    println(s"总共条数：$data_count")
    println(s"正例条数：$data_1_count 比例 ${data_1_count * 100.0/data_count}")
    //    println(s"不含有非空字段的条数：$notNull_count")


    val produc_base_class_dict = data_org.map(x => x(2)).distinct().zipWithIndex().collectAsMap()
    val month_fee_dict = data_org.map(x => x(3)).distinct().zipWithIndex().collectAsMap()
    val agree_type_dict = data_org.map(x => x(6)).distinct().zipWithIndex().collectAsMap()
    //        val manu_name_dict=data_org.map(x=>x(15)).distinct().zipWithIndex().collectAsMap()
    //        val model_name_dict=data_org.map(x=>x(16)).distinct().zipWithIndex().collectAsMap()
    val net_type_dict = data_org.map(x => x(17)).distinct().zipWithIndex().collectAsMap()
    Logger.getRootLogger.setLevel(Level.WARN)

    //决策树
/*
    //manu_name 和model_name不要了，里面包含的类型太多了
    val data_lp_dt = data_org.map { x =>
      val product = produc_base_class_dict(x(2)).toDouble
      val agree = agree_type_dict(x(6)).toDouble
      val net_type = net_type_dict(x(17)).toDouble
/*
      val other_features = List(x(3), x(4), x(5), x(7), x(8), x(9), x(10), x(11), x(12), x(13), x(14)).map {
        d =>
          val dd = d.trim()
          if (dd.isEmpty) 0.0 else dd.toDouble
      }
      val all_features = product :: agree :: net_type :: other_features
*/

      val other_features = List(x(3), x(9), x(10), x(12), x(14)).map {
        d =>
          val dd = d.trim()
          if (dd.isEmpty) 0.0 else dd.toDouble
      }
      val all_features =net_type :: other_features
      val lable = x(18).trim.toDouble
      LabeledPoint(lable, Vectors.dense(all_features.toArray))
    }

    val data_lp_dt_true = data_lp_dt.filter(x => x.label == 1.0)
    val data_lp_dt_false = data_lp_dt.filter(x => x.label != 1.0).randomSplit(Array(0.1, 0.9))(0)

    val data_sample_dt = data_lp_dt_true.union(data_lp_dt_false)

    //    data_lp.cache()

    //    data_lp.unpersist(blocking = false)
    val splits_dt = data_sample_dt.randomSplit(Array(0.8, 0.2))
    val training_dt = splits_dt(0).cache()
    val test_dt = splits_dt(1).cache()

    val numTrainingDt = training_dt.count()
    val numTestDt = test_dt.count()
    println(s"Training: $numTrainingDt, test: $numTestDt.")
    Seq(5, 10, 15,20,30).map { treeDepth =>
      val model = DecisionTree.train(training_dt, Algo.Classification, Gini, treeDepth)
      val prediction = model.predict(training_dt.map(_.features))
      val predictionAndLabel = prediction.zip(training_dt.map(_.label))

      val numTp = predictionAndLabel.filter(x => x._1 == x._2 && x._2 == 1.0).count()
      val numFn = predictionAndLabel.filter(x => x._1 != x._2 && x._2 == 1.0).count()
      val numFp = predictionAndLabel.filter(x => x._1 == x._2 && x._2 == 0.0).count()
      val numTn = predictionAndLabel.filter(x => x._1 != x._2 && x._2 == 0.0).count()

      prediction.distinct().collect().map(println(_))

      val metrics = new BinaryClassificationMetrics(predictionAndLabel)
      println("浑浊矩阵：")
      println(s"${numTp}    ${numFn}")
      println(s"${numFp}    ${numTn}")
      println(s"Accuracy: ${(numTp + numTn) * 100.0 / numTrainingDt}")
      println(s"Test areaUnderPR = ${metrics.areaUnderPR()}.")
      println(s"Test areaUnderROC = ${metrics.areaUnderROC()}.")
    }
*/
    //逻辑回归和svm

    val prodct_len = produc_base_class_dict.size
    val month_fee_len = month_fee_dict.size
    val agree_len = agree_type_dict.size
    val net_type_len = net_type_dict.size

//    import org.apache.spark.mllib.linalg.{Vector => LV}
    import org.apache.spark.mllib.stat.{MultivariateStatisticalSummary, Statistics}

    val data_tmp=data_org.map{x=>
      Array(x(4),x(5),x(8),x(12),x(13))
    }.filter{y=>y.forall(!_.isEmpty)}.map{z=>
      z.map(_.toDouble)
    }.map{x=>Vectors.dense(x)}
    import org.apache.spark.mllib.linalg.distributed.RowMatrix
//    val rm=new RowMatrix(data_tmp)
//    val summary=rm.computeColumnSummaryStatistics()
val summary = Statistics.colStats(data_tmp)
    print(summary.min)
//    print(summary.numNonzeros)
    val mean=summary.mean.toArray

    val data_lp=data_org.map { x =>
      val product =  produc_base_class_dict(x(2)).toInt
      val month_fee=month_fee_dict(x(3)).toInt
      val agree= agree_type_dict(x(6)).toInt
      val net_type = net_type_dict(x(17)).toInt

      val product_feature=Array.ofDim[Double](prodct_len)
      product_feature(product) = 1.0
      val month_fee_feature=Array.ofDim[Double](month_fee_len)
      month_fee_feature(month_fee) = 1.0
      val agree_feature=Array.ofDim[Double](agree_len)
      agree_feature(agree) = 1.0
      val net_type_feature = Array.ofDim[Double](net_type_len)
      net_type_feature(net_type) = 1.0

      import scala.math.{log => sc_log}

//      val other_f = Array( sc_log(x(4).toDouble+1), sc_log(x(5).toDouble+1), sc_log(x(8).toDouble+1), sc_log(x(12).toDouble+1),
//        sc_log(x(13).toDouble+1), sc_log(x(14).toDouble +1))
      val other_f = Array( x(4), x(5), x(8), x(12), x(13), x(14))
        .zip(mean).map{
        case (d,x) =>
          val dd=d.trim()
          if (dd.isEmpty) sc_log(x+1) else sc_log(dd.toDouble +1)
      }
      val max = Array(20000.0,20000.0,2000.0,20.0,200.0)
//      val other_features = other_f.zip(max).map{
//        case (d,m) =>
//          if(d > m) d else m
//      }
      val all_features= other_f ++ product_feature++month_fee_feature ++ agree_feature ++ net_type_feature
      val lable = x(18).trim.toDouble
      LabeledPoint(lable, Vectors.dense(all_features))
    }.filter{x=>
      x.features(2) < 2000.0 && x.features(3) < 20.0 && x.features(4) <200
    }

//    val data_lp_true=data_lp.filter(x=>x.label == 1.0)
//    val data_lp_false=data_lp.filter(x=>x.label != 1.0).randomSplit(Array(0.3,0.7))(0)
//    val data_sample=data_lp_true.union(data_lp_false)

    val params = Params()
    val scaler = new StandardScaler(withMean = false,withStd = true).fit(data_lp.map(x=>x.features))
    val data_scaler = data_lp.map(lp=>LabeledPoint(lp.label,scaler.transform(lp.features)))

    val splits = data_scaler.randomSplit(Array(0.8, 0.2))
    val training = splits(0).cache()
    val test = splits(1).cache()

    val numTraining = training.count()
    val numTest = test.count()
    println(s"Training: $numTraining, test: $numTest.")

    val updater = params.regType match {
      case L1 => new L1Updater()
      case L2 => new SquaredL2Updater()
    }

      val model = params.algorithm match {
        case LR =>
          val algorithm = new LogisticRegressionWithLBFGS()
          algorithm.optimizer.setNumIterations(params.numIterations).setUpdater(updater).setRegParam(params.regParam)
          algorithm.run(training).clearThreshold()
        case SVM =>
          val algorithm = new SVMWithSGD()
          algorithm.optimizer.setNumIterations(params.numIterations).setStepSize(params.stepSize).setUpdater(updater).setRegParam(params.regParam)
          algorithm.run(training).clearThreshold()
      }

      val prediction = model.predict(test.map(_.features))
      val predictionAndLabel = prediction.zip(test.map(_.label))
    predictionAndLabel.take(10).foreach(println(_))
    Range(9,13,1).map {x=>
      val splitPoint=x/100.0
//      val correct = predictionAndLabel.map(x => if ((x._1 > splitPoint && x._2 == 1.0) || (x._1 <= splitPoint && x._2 == 0.0)) 1 else 0).sum()
      val numTp = predictionAndLabel.filter(x => x._1 > splitPoint && x._2 == 1.0).count()
      val numFn = predictionAndLabel.filter(x => x._1 <= splitPoint && x._2 == 1.0).count()
      val numFp = predictionAndLabel.filter(x => x._1 > splitPoint && x._2 == 0.0).count()
      val numTn = predictionAndLabel.filter(x => x._1 <= splitPoint && x._2 == 0.0).count()

      val metrics = new BinaryClassificationMetrics(predictionAndLabel)
      println(s"浑浊矩阵：$splitPoint")
      println(s"${numTp}    ${numFn}")
      println(s"${numFp}    ${numTn}")
      println(s"覆盖率：${numTp*100.0/(numTp+numFn)}")
      println(s"准确率 ：${numTp*100.0/(numTp+numFp)}")
//      println(s"Accuracy: ${correct * 100.0 / numTest}")
      println(s"Accuracy: ${(numTp + numTn) * 100.0 / (numTp + numFn + numFp + numTn)}")
      println(s"Test areaUnderPR = ${metrics.areaUnderPR()}.")
      println(s"Test areaUnderROC = ${metrics.areaUnderROC()}.")
    }

    println("所有训练都已完成")
    sc.stop()
  }
}