/*
此代码的功能是使用spark 将文件的所有行合并成1行，并给定一个行号
*/
package spark.example

import org.apache.spark._
import SparkContext._

object mergeFile {
    def main(args: Array[String]) {
        val conf = new SparkConf().setAppName("mergeFile")

        val sc = new SparkContext(conf)
        val output = args(1).toString
        val input = sc.textFile(args(0)).collect()
        // val n = 56
        val n = input.length
        val index = 1001
        val numSlices = input.length / n
        val x = sc.parallelize(input, numSlices).zipWithIndex().aggregate(List.empty[List[String]])(seqOp = (result, lineWithIndex) => {
          lineWithIndex match {
            case (line, index) =>
              if (index % n == 0) List(line) :: result else (line :: result.head) :: result.tail
          }
        }, combOp = (x, y) => x ::: y).map(_.reverse mkString " ")
        sc.parallelize(x).map(x=> (index.toString + "\t" + x)).saveAsTextFile(output)
    }
}

/*
mergeFile.scala 文件所在的路径：/data/project/spark/spark_workstation/src/main/scala/spark/example
提交spark集群的代码 
[root@master spark_workstation]# pwd
/data/project/spark/spark_workstation
[root@master spark_workstation]# cat run_mergefile.sh
#!/bin/bash
/data/hadoop/bin/hadoop fs -rm -r -skipTrash /spark_mergefile_output
cd /data/project/spark/spark_workstation
/data/sbt/bin/sbt compile && /data/sbt/bin/sbt package && \
/data/spark/bin/spark-submit --master yarn-cluster \
        --num-executors 2 \
        --executor-memory '1024m' \
        --executor-cores 1 \
        --class spark.example.mergeFile ./target/scala-2.11/mergefile_2.11-2.2.1.jar \
    hdfs://master:9000/spark_ReadFile_input/592it.seg.cln.txt \
    hdfs://master:9000/spark_mergefile_output
*/