/*
此代码的功能是使用spark 将目录下所有的文件合并成1行
*/
package spark.example

import org.apache.spark._
import SparkContext._

import java.io.PrintWriter
import java.io.File
import scala.reflect.io.Directory

object mergeFile {
    def subDir(dir:File):Iterator[File] = {
        val dirs = dir.listFiles().filter(_.isDirectory())
        val files = dir.listFiles().filter(_.isFile()) 
        files.toIterator ++ dirs.toIterator.flatMap(subDir _)
    }
    def print_format(line:String) :String ={
        var index = 1000
        index += 1
        return (index.toString + "\t" + line) 
    }
    def main(args: Array[String]) {
        val conf = new SparkConf().setAppName("mergeFile")

        val sc = new SparkContext(conf)
        // val output = args(3).toString
        // val tempResult = args(2).toString
        val input1 = sc.textFile("file:///data/nlp_3qi/input_tfidf_dir/*.txt").collect()
        // val input1 = sc.textFile(args(0))
        // val input2 = sc.textFile(args(1)).collect()
        //val input2 = sc.textFile("/temp/part-00000").collect()
        // input1.repartition(1).saveAsTextFile(tempResult)
       
        val n = input1.length
        val numSlices = 1
        val x = sc.parallelize(input1, numSlices).zipWithIndex().aggregate(List.empty[List[String]])(
            seqOp = (result, lineWithIndex) => {
                lineWithIndex match {
                case (line, index) =>
                    if (index % n == 0) List(line) :: result else (line :: result.head) :: result.tail
                }
            }, combOp = (x, y) => x ::: y).map(_.reverse mkString " ")
        sc.parallelize(x).saveAsTextFile("/spark_mergeFile_output")
    }
}