package com.bonc.tjbank

import org.apache.log4j.{Level, Logger}
import org.apache.spark.examples.mllib.AbstractParams
import org.apache.spark.sql.SQLContext
import org.apache.spark.sql.types.{StructField, StringType, StructType}
import scopt.OptionParser
import org.apache.spark.{SparkConf, SparkContext}
import org.apache.spark.sql._
import org.apache.spark.sql.types._

case class Params(
                   input: String = null,
                   output: String = null,
                   coalesce: Boolean = true
                   ) extends AbstractParams[Params]

object bank {
  def main(args: Array[String]) {
    val defaultParams = Params()

    val parser = new OptionParser[Params]("tjbank") {
      head("tjbank: an example app for binary tjbank.")
      opt[Boolean]("coalesce").text("是否归并为但文件").action((x,c) => c.copy(coalesce=x))
      opt[String]("input")
        .required()
        .text("input paths")
        .action((x, c) => c.copy(input = x))
      opt[String]("output")
        .required()
        .text("output paths to save")
        .action((x, c) => c.copy(output = x))
      note(
        """
          |For example, the following command runs this app on a synthetic dataset:
          |
          | bin/spark-submit --class com.bonc.tjbank \
          |  tjbank.jar \
          |  --input data/zgx.txt data --output data/out
        """.stripMargin)
    }

    parser.parse(args, defaultParams)
      .map {
        params =>
          run(params)
    } getOrElse {
      sys.exit(1)
    }
  }

  def run(params: Params) {
    val conf = new SparkConf().setAppName(s"tjbank with $params")
    val sc = new SparkContext(conf)
    Logger.getRootLogger.setLevel(Level.WARN)

    val data = sc.textFile(params.input)
    val filde = data.map(_.split("\t"))
//    sc.setCheckpointDir("/tmp/")
    var tmp = filde.map(x=>(x(1),x(0)))
    var loops=0
    var isRevert=false
    var count=tmp.count()
    var loop = true
    while (loop) {
      loops += 1
      val tmp2=tmp.map(x=>(x._2,1)).distinct()
      val j = tmp.join(tmp2)
      tmp = j.mapValues(_._1)
      val count2 = tmp.count()
      if (count == count2) {
        if (isRevert){
          loop = false
        } else {
          isRevert = true
          tmp = tmp.map(_.swap)
        }
      } else {
        count = count2
      }
    }
    println(s"循环次数：${loops}")
    println(s"最后的记录数：${count}")
    if(params.coalesce){
      tmp.map(x=> s"${x._1}\t${x._2}").coalesce(1).saveAsTextFile(params.output)
    } else {
      tmp.saveAsTextFile(params.output)
    }
  }

  def runByDf(params: Params) {
    val conf = new SparkConf().setAppName(s"tjbank with $params")
    val sc = new SparkContext(conf)
    val sqlContext = new SQLContext(sc)

    Logger.getRootLogger.setLevel(Level.WARN)

    val data = sc.textFile(params.input)
    val filde = data.map(_.split("\t")).map(p=>Row(p(0),p(1)))
    val schema =
      StructType(
        StructField("x", StringType, true) :: StructField("y", StringType, true) :: Nil)

    var tmp = sqlContext.createDataFrame(filde,schema)
//    sc.setCheckpointDir("/tmp/")
//    var tmp = filde.map(x=>(x(1),x(0)))
    var loops=0
    var isRevert=false
    var count=tmp.count()
    var loop = true
    while (loop) {
      loops += 1
      //把y列去重，然后改名为x
      val tmp2=tmp.select("y").distinct().toDF("x")
      val j = tmp.join(tmp2,"x")
      tmp = j
      val count2 = tmp.count()
      if (count == count2) {
        if (isRevert){
          loop = false
        } else {
          isRevert = true
          tmp = tmp.toDF("y","x")
        }
      } else {
        count = count2
      }
    }
    println(s"循环次数：${loops}")
    println(s"最后的记录数：${count}")
    if(params.coalesce){
      // 这地方会出错，原因未知，这种方式竟然没有rdd方式快，rdd 3分多钟完成， 这种方式要15分钟。
      tmp.selectExpr().coalesce(1).write.text(params.output)
    } else {
      tmp.write.text(params.output)
    }
  }
}