package com.shujia.hbase;

import java.io.*;
import java.util.ArrayList;

public class SplitFIle {
    public static void main(String[] args) throws Exception {

        long start = System.currentTimeMillis();


        /**
         * 切分文件将一个文件切分成多个
         * 切分的过程可以理解为将数据存到hdfs的过程
         *
         * 4G ---> 8个文件
         *
         * 1、计算出每一个文件多少行数据
         *
         *
         *
         */

        int fileNum = 10;

        File file = new File("hadoop/data/students.txt");

        Long length = fileLength(file);


        //每个文件行数   每读取l行数据生成一个新的文件
        Long l = length / fileNum;

        int i = 0;

        long flag = 1;
        FileWriter fileWriter = new FileWriter("hadoop/data/index/" + i);
        BufferedWriter bufferedWriter = new BufferedWriter(fileWriter);


        FileReader fileReader = new FileReader(file);
        BufferedReader bufferedReader = new BufferedReader(fileReader);
        String line;

        ArrayList<Index> indices = new ArrayList<Index>();

        String startId = "";
        String endId = "";


        while ((line = bufferedReader.readLine()) != null) {

            bufferedWriter.write(line);
            bufferedWriter.newLine();


            flag++;
            if (flag == l && i < fileNum - 1) {
                String id = line.split(",")[0];

                endId = id;

                Index index = new Index(startId, endId);
                indices.add(index);


                startId = id;

                //关闭之前的文件对象
                bufferedWriter.flush();
                bufferedWriter.close();
                fileWriter.close();


                //切换文件对象
                i++;
                fileWriter = new FileWriter("hadoop/data/index/" + i);
                bufferedWriter = new BufferedWriter(fileWriter);

                flag = 1;
            }

        }
        Index index = new Index(startId, "");
        indices.add(index);


        bufferedWriter.flush();
        bufferedWriter.close();
        fileWriter.close();


        long end = System.currentTimeMillis();

        System.out.println(end - start);

        System.out.println(indices);

        //保存索引

        BufferedWriter bufferedWriter1 = new BufferedWriter(new FileWriter("hadoop/data/indexdata.txt"));

        for (int i1 = 0; i1 < indices.size(); i1++) {

            String startId1 = indices.get(i1).getStartId();
            String endId1 = indices.get(i1).getEndId();

            String line1 = i1 + "," + startId1 + "," + endId1;

            bufferedWriter1.write(line1);
            bufferedWriter1.newLine();
            bufferedWriter1.flush();
        }

    }

    /**
     * 统计文件的行数
     *
     * @param file 文件
     * @return
     */
    public static Long fileLength(File file) throws Exception {
        Long count = 0L;


        FileReader fileReader = new FileReader(file);
        BufferedReader bufferedReader = new BufferedReader(fileReader);
        String line;
        while ((line = bufferedReader.readLine()) != null) {
            count++;
        }


        return count;
    }
}
