package Example

import org.apache.spark.sql.SparkSession

object svm_model {
  def main(args: Array[String]): Unit = {
    val spark = SparkSession
      .builder()
      .master("local[*]")
      .appName("Data Explore")
      //设置Hive连接参数
      .config("hive.metastore.uris", "thrift://master:9083")
      .config("spark.sql.warehouse.dir", "hdfs://master:8020/user/hive/warehouse")
      .enableHiveSupport()
      .getOrCreate()

    //构建特征
    //统计每个月的月均消费金额C
    val billevents = spark.sql("select phone_no,sum(should_pay-favour_fee)/3 as consume " +
      "from portrait.mmconsume_billevents_process where sm_name not like '%珠江宽频%' group by phone_no")
      .na.fill(0)

    //每个用户的入网时长max(当前时间-run_time)
    val userevents = spark.sql("select phone_no,max(months_between(current_date(),run_time)/12) join_time " +
      "from portrait.mediamatch_userevent_process group by phone_no")
      .na.fill(0)

    //统计每个用户平均每天看多少小时的电视M
    val media_index = spark.sql("select phone_no,(sum(duration)/(1000*60*60))/count(90) as count_duration " +
      "from portrait.media_index_process media group by phone_no")
      .na.fill(0)

    //合并三个特征
    val join1 = billevents.join(userevents, "phone_no").join(media_index, "phone_no")

    //计算电视用户活跃度标签
    val msg = spark.sql("select distinct phone_no,0 as col1 from portrait.mediamatch_usermsg_process") //抽取所有用户，贴上0
    //过滤不符合的数据
    val orderIndexTV = spark.sql("select * from portrait.order_index_process " +
      "where run_name='正常' and offername!='废'  and offername!='赠送' and offername!='免费体验' and offername!='提速' and offername!='提价' and offername!='转网优惠' and offername!='虚拟' and offername!='空包' and offername not like '%宽带%'")
      .select("phone_no").distinct()

    //选择一个月的数据，计算一个月内用户的总观看时长
    val mediaIndex = spark.sql("select phone_no,sum(duration) as total_one_month_seconds " +
      "from portrait.media_index_process where origin_time>=add_months('2018-08-01 00:00:00',-1) group by phone_no")
    //活跃用户贴上1标签
    val activate_join2 = mediaIndex.join(orderIndexTV, Seq("phone_no"), "inner")
      .filter("total_one_month_seconds/(1000*60*60)>5.2")
      .selectExpr("phone_no", "1 as col2").distinct()
    //连接全0用户标签表与活跃用户1标签表，只保留判断产生的col2并重命名为col1
    val activate_join3 = msg.join(activate_join2, Seq("phone_no"), "left_outer")
      .na.fill(0).selectExpr("phone_no", "col2 as col1")

    //构建标签列
    val usermsg = spark.sql("select * from portrait.mediamatch_usermsg_process where  run_name ='主动销户' or run_name='主动暂停' ") //取出离网用户

    //为离网用户贴上0标签
    val features_join0 = usermsg.join(join1, Seq("phone_no"), "left_outer")
      .select("phone_no", "consume", "join_time", "count_duration").na.fill(0)
    val leave_user = features_join0.withColumn("label", features_join0("consume") * 0)

    //取出非离网用户
    val activate_user = activate_join3.where("col1=1") //从贴完活跃标签的表中活跃用户
    val join3 = activate_user.join(join1, Seq("phone_no"), "left_outer")
      .select("phone_no", "consume", "join_time", "count_duration").na.fill(0)
    val no_leave_user = join3.withColumn("label", join3("consume") * 0 + 1)

    //合并离网用户数据与非离网用户数据
    val union_data = leave_user.union(no_leave_user)

    //分类模型需要的是labelPoint类型数据，因此转换数据为labelPoint类型
    import org.apache.spark.mllib.linalg.Vectors
    import org.apache.spark.mllib.regression.LabeledPoint

    val trainData = union_data.rdd.map(x => LabeledPoint(x.getDouble(4), Vectors.dense(x.getDouble(1), x.getDouble(2), x.getDouble(3)))).cache()
    trainData.take(5)

    /**
     * //数据归一化
     * import org.apache.spark.mllib.feature.StandardScaler
     *
     * val features_data = trainData.map(x =>x.features)
     * val scaler = new  StandardScaler(withMean=true,withStd=true).fit(features_data)
     * val new_features = scaler.transform(features_data)
     * val data = trainData.map(x => x.label).zip(new_features).map(x => LabeledPoint(x._1,x._2)).cache()
     *
     * //划分训练集与验证集
     * val split = data.distinct().randomSplit(Array(0.8,0.2))
     * val train_data = split(0).cache()
     * val validate_data = split(1).cache()
     *
     * //训练模型
     * import org.apache.spark.mllib.classification.SVMWithSGD
     * val model = SVMWithSGD.train(train_data,100,10,0.01,1.0)
     *
     * //预测验证集数据
     * val predict = validate_data.map(x =>(x.features,model.predict(x.features)))
     *
     * //模型评估
     * //利用验证集本来的label来进行检验，也就是原始数据
     * val original = validate_data.map(x =>(x.features,x.label))
     * //用zip合并原始数据跟预测结果，提取预测结果和原始数据中的标签列
     * val predict_original = predict.zip(original).map(x=>((x._1._2,x._2._2)))
     * val testCorrectRate = predict_original.filter(x => x._1 == x._2).count().toFloat / validate_data.count().toFloat
     * println("准确率："+testCorrectRate)
     *
     * //构建测试数据并预测测试数据
     * import org.apache.spark.sql.Row
     * val test_data = join1.select("phone_no").rdd.zip(join1.select("consume","join_time","count_duration").rdd)
     * .map(x=>(x._1.getString(0),Vectors.dense(x._2.getDouble(0),x._2.getDouble(1),x._2.getDouble(2)))).cache()
     * val predict_data = test_data.map(x=>Row(x._1.toString,x._2(0).toFloat,x._2(1).toFloat,x._2(2).toFloat,model.predict(x._2).toInt))
     * val schema = StructType(List(
     * StructField("phone_no",StringType,true),
     * StructField("consume", FloatType, true),
     * StructField("join_time", FloatType, true),
     * StructField("count_duration", FloatType, true),
     * StructField("label",IntegerType,true)))
     * val result = spark.createDataFrame(predict_data,schema)
     * result.write.mode("overwrite").saveAsTable("portrait.svmModel")
     * */
  }
}
