package com.crawler.repitle;

import java.io.BufferedReader;
import java.io.File;
import java.io.FileReader;
import java.io.IOException;
import java.util.Random;

//import org.apache.hadoop.conf.Configuration;
//import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.FileSystem;
import org.apache.log4j.Logger;
import org.apache.parquet.column.ParquetProperties;
import org.apache.parquet.example.data.Group;
import org.apache.parquet.example.data.GroupFactory;
import org.apache.parquet.example.data.simple.SimpleGroupFactory;
import org.apache.parquet.hadoop.ParquetReader;
import org.apache.parquet.hadoop.ParquetReader.Builder;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.parquet.hadoop.ParquetWriter;
import org.apache.parquet.hadoop.example.GroupReadSupport;
import org.apache.parquet.hadoop.example.GroupWriteSupport;
import org.apache.parquet.schema.MessageType;
import org.apache.parquet.schema.MessageTypeParser;

public class ReadParquet {
    static Logger logger=Logger.getLogger(ReadParquet.class);

    //新版本中new ParquetReader()所有构造方法好像都弃用了,用上面的builder去构造对象
    static void parquetReader(String inPath) throws Exception{
        Configuration conf = new Configuration();
        Path path = new Path("/data.parquet");
        GroupReadSupport readSupport = new GroupReadSupport();
        ParquetReader<Group> reader = new ParquetReader<Group>(path, readSupport);
        Group result = reader.read();
        System.out.println(result);
        result = reader.read();
        System.out.println(result);
//        System.out.println(result.getString("left", 0));
//        System.out.println(result.getString("right", 0));

    }
    /**
     *
     * @param outPath　　输出Parquet格式
     * @param inPath  输入普通文本文件
     * @throws IOException
     */
    static void parquetWriter(String outPath,String inPath) throws IOException{
        MessageType schema = MessageTypeParser.parseMessageType(
                "message Pair {\n" +
                        " required binary left (UTF8);\n" +
                        " required binary right (UTF8);\n" +
                        "required group point(LIST) {\n" +
                        "repeated group list {\n" +
                        "required int32 element;" +
                        "}\n" +
                        "}\n" +
                        "}");
        GroupFactory groupFactory = new SimpleGroupFactory(schema);
        Configuration conf = new Configuration();

        FileSystem fileSystem =  FileSystem.get(conf);
        Path path = new Path(outPath);
        if (fileSystem.exists(path)) {
            fileSystem.delete(path, true);
        }
        GroupWriteSupport writeSupport = new GroupWriteSupport();
        GroupWriteSupport.setSchema(schema, conf);
        ParquetWriter<Group> writer = new ParquetWriter<Group>(path, writeSupport,
                ParquetWriter.DEFAULT_COMPRESSION_CODEC_NAME,
                ParquetWriter.DEFAULT_BLOCK_SIZE,
                ParquetWriter.DEFAULT_PAGE_SIZE,
                ParquetWriter.DEFAULT_PAGE_SIZE, /* dictionary page size */
                ParquetWriter.DEFAULT_IS_DICTIONARY_ENABLED,
                ParquetWriter.DEFAULT_IS_VALIDATING_ENABLED,
                ParquetProperties.WriterVersion.PARQUET_1_0, conf);

//        writer.write(group);
        writer.close();
    }

    public static void main(String args[]) throws Exception {
//        parquetWriter("/", "/");
        parquetReader("/");
    }
}