package paas.storage.distributedColumnDatabase.impl;

import org.apache.commons.lang.StringUtils;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FSDataInputStream;
import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hbase.Cell;
import org.apache.hadoop.hbase.CellUtil;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.client.*;
import org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat;
import org.apache.hadoop.hive.ql.io.orc.OrcSerde;
import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspectorFactory;
import org.apache.hadoop.hive.serde2.objectinspector.StructObjectInspector;
import org.apache.hadoop.io.NullWritable;
import org.apache.hadoop.mapred.JobConf;
import org.apache.hadoop.mapred.OutputFormat;
import org.apache.hadoop.mapred.RecordWriter;
import org.apache.hadoop.mapred.Reporter;
import org.apache.parquet.column.ParquetProperties;
import org.apache.parquet.example.data.Group;
import org.apache.parquet.example.data.simple.SimpleGroup;
import org.apache.parquet.example.data.simple.SimpleGroupFactory;
import org.apache.parquet.hadoop.ParquetFileWriter;
import org.apache.parquet.hadoop.ParquetReader;
import org.apache.parquet.hadoop.ParquetWriter;
import org.apache.parquet.hadoop.example.ExampleParquetWriter;
import org.apache.parquet.hadoop.example.GroupReadSupport;
import org.apache.parquet.hadoop.metadata.CompressionCodecName;
import org.apache.parquet.schema.MessageType;
import org.apache.parquet.schema.MessageTypeParser;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.stereotype.Service;
import paas.storage.common.config.ConnectionManager;
import paas.storage.common.config.HBaseDaoUtil;
import paas.storage.common.constant.ResponseCode;
import paas.storage.common.utils.AssertUtils;
import paas.storage.distributedColumnDatabase.IManagement;
import paas.storage.exception.RRException;
import paas.storage.result.ExportDataResponse;
import paas.storage.result.ImportDataResponse;

import java.io.*;
import java.util.ArrayList;
import java.util.List;

/**
 * 备注
 *
 * @author xufeng
 * @email 525207937@qq.com
 * @date 2021/1/26 16:41
 */
@Service
public class IManagementImpl implements IManagement {

    private static Logger log = LoggerFactory.getLogger(IManagementImpl.class);

    @Autowired
    private ConnectionManager connectionManager;
    @Autowired
    HBaseDaoUtil hBaseDaoUtil;

    /**
     * 导入
     *
     * @param connectionId   连接 必填
     * @param database       数据库 不必填
     * @param table          表 必填
     * @param sourceFileType 文件类型 必填
     * @param sourceFilePath 文件路劲 必填
     * @return
     */
    @Override
    public ImportDataResponse importData(String connectionId, String database, String table, String sourceFileType, String sourceFilePath) {
        ImportDataResponse importDataResponse = new ImportDataResponse();
        importDataResponse.setTaskStatus(1);
        try {
            orc orc = new orc();
            BufferedReader reader = null;
            String temp = null;
            String rowkey = null;
            String tmpvalue = null;

            AssertUtils.isTrue(StringUtils.isEmpty(connectionId), "连接不能为空");
            if (database != null) {
                AssertUtils.charLengthLe(database, 64, "连接长度为128以下");
            }
            if (table != null) {
                AssertUtils.charLengthLe(table, 64, "表名不能为空");
            }
            if (connectionManager.getConnection(connectionId) == null) {
                importDataResponse.setTaskStatus(0);
                importDataResponse.setErrorCode(ResponseCode.TASK_STATUS_ERROR_CONNECTION);
                importDataResponse.setErrorMsg("Please establish a connection first");
                return importDataResponse;
            }
            TableName tn = hBaseDaoUtil.getTable(database, table);
            org.apache.hadoop.conf.Configuration configuration = connectionManager.getHadoopConfiguration(connectionId);
            Table importTable = connectionManager.getConnection(connectionId).getTable(tn);
            //构建数据导入
            List<Put> listPut = new ArrayList<>();
            if ("TXT".equalsIgnoreCase(sourceFileType) || "CSV".equalsIgnoreCase(sourceFileType)) {
                //hdfs中的数据导入到hbase
                if (flag(sourceFilePath)) {
                    FileSystem fileSystem = connectionManager.createFileSystem(configuration);
                    //去掉dfs://
                    String path = sourceFilePath.replaceAll("dfs://", "").trim();
                    FSDataInputStream fsDataInputStream = fileSystem.open(new Path(path));
                    reader = new BufferedReader(new InputStreamReader(fsDataInputStream.getWrappedStream()));
                } else {
                    sourceFilePath = sourceFilePath.replaceAll("local://", "").trim();
                    File file = new File(sourceFilePath);
                    reader = new BufferedReader(new InputStreamReader(new FileInputStream(file), "GBK"));
                }
                while ((temp = reader.readLine()) != null) {
                    rowkey = temp.split(",")[0];
                    Put put = new Put(rowkey.getBytes());
                    tmpvalue = temp.substring(rowkey.length() + 1);
                    String[] str = tmpvalue.split(",");
                    put.addColumn(str[0].getBytes(), str[1].getBytes(), str[2].getBytes());
                    listPut.add(put);
                }
            } else if ("ORC".equalsIgnoreCase(sourceFileType)) {
                //从hdfs读取
                FileSystem fileSystem = connectionManager.createFileSystem(configuration);

                JobConf conf;

                if (flag(sourceFilePath)) {
                    conf = new JobConf(fileSystem.getConf());
                    sourceFilePath = sourceFilePath.replaceAll("dfs://", "").trim();
                } else {
                    conf = new JobConf();
                    sourceFilePath = sourceFilePath.replaceAll("local://", "").trim();
                }

                List<orc.MyRow> reader1 = orc.readerOrc(sourceFilePath, conf);
                reader1.forEach(myRow -> {
                    Put put = new Put(myRow.getRowkey().getBytes());
                    put.addColumn(myRow.getFamily().getBytes(), myRow.getField().getBytes(), myRow.getValue().getBytes());
                    listPut.add(put);
                });

            } else if ("Parquet".equalsIgnoreCase(sourceFileType)) {
                FileSystem fileSystem = connectionManager.createFileSystem(configuration);
                Configuration conf = new Configuration();
                if (flag(sourceFilePath)) {
                    conf = fileSystem.getConf();
                    sourceFilePath = sourceFilePath.replaceAll("dfs://", "").trim();
                } else {
                    sourceFilePath = sourceFilePath.replaceAll("local://", "").trim();

                }

                //从hdfs读取
                Path file1 = new Path(sourceFilePath);
                ParquetReader.Builder<Group> builder = ParquetReader.builder(new GroupReadSupport(), file1).withConf(conf);
                ParquetReader<Group> reader1 = builder.build();
                SimpleGroup line = null;
                while ((line = (SimpleGroup) reader1.read()) != null) {
                    rowkey = line.getString("rowkey", 0);
                    tmpvalue = line.getString("value", 0);
                    String family = line.getString("family", 0);
                    String colume = line.getString("colume", 0);


                    Put put = new Put(rowkey.getBytes());
          /*          Field field = ReflectionUtils.findField(line.getClass(), "data");
                    List<String> str = new ArrayList<>();
                    List<Object>[] objectList = (List<Object>[]) field.get(line);
                    for (List<Object> objects : objectList) {
                        str.add(objects.get(0).toString());
                    }*/
                    put.addColumn(family.getBytes(), colume.getBytes(), tmpvalue.getBytes());
                    listPut.add(put);
                    //log.info("success!");
                }

            } else {
                log.info("please input right Document Format!");
            }
            importTable.put(listPut);
            importDataResponse.setTable(table);
            importTable.close();
        } catch (RRException e) {
            importDataResponse.setTaskStatus(ResponseCode.TASK_STATUS_ERROR);
            importDataResponse.setErrorCode(ResponseCode.BUSINESS_CODE);
            importDataResponse.setErrorMsg("业务异常," + e.getMessage());
        } catch (Exception e) {
            importDataResponse.setTaskStatus(0);
            importDataResponse.setErrorCode(ResponseCode.IMPORT_DATA_ERROR);
            importDataResponse.setErrorMsg("导入失败" + e.getMessage());
            log.error(e.getMessage(), e);
        }
        return importDataResponse;
    }

    /**
     * 判断是否是hdfs路径
     *
     * @param sourceFilePath
     * @return 返回boolean
     */
    public boolean flag(String sourceFilePath) {
        return sourceFilePath.contains("dfs://");
    }


    /**
     * @param connectionId 连接 必填
     * @param database     数据库 不必填
     * @param table        表 必填
     * @param destFileType 文件类型 必填
     * @param destFilePath 文件路劲 必填
     * @return
     * @throws IOException
     */
    @Override
    public ExportDataResponse exportData(String connectionId, String database, String table, String destFileType, String destFilePath)  {

        ExportDataResponse exportDataResponse = new ExportDataResponse();
        exportDataResponse.setTaskStatus(1);
        try {
            if (connectionManager.getConnection(connectionId) == null) {
                exportDataResponse.setTaskStatus(0);
                exportDataResponse.setErrorCode(ResponseCode.TASK_STATUS_ERROR_CONNECTION);
                exportDataResponse.setErrorMsg("Please establish a connection first");
                return exportDataResponse;
            }
            TableName tn = hBaseDaoUtil.getTable(database, table);

            Table importTable = connectionManager.getConnection(connectionId).getTable(tn);
            org.apache.hadoop.conf.Configuration configuration = connectionManager.getHadoopConfiguration(connectionId);

            ResultScanner resultScanner;
            try {
                Scan scan = new Scan();
                resultScanner = importTable.getScanner(scan);
            } finally {
                importTable.close();
            }
            if ("TXT".equalsIgnoreCase(destFileType) || "CSV".equalsIgnoreCase(destFileType)) {
                //导出到hdfs
                if (flag(destFilePath)) {
                    FileSystem fileSystem = connectionManager.createFileSystem(configuration);
                    //去掉dfs://
                    String path = destFilePath.replaceAll("dfs://", "").trim();
                    FSDataOutputStream fsDataOutputStream = fileSystem.create(new Path(path));
                    for (Result result : resultScanner) {
                        List<Cell> keyValueList = result.listCells();
                        for (Cell cell : keyValueList) {
                            //log.info("row=" + new String(CellUtil.cloneRow(cell)) + " " + "columefamily=" + new String(CellUtil.cloneFamily(cell)) + " " + "colume = " + new String(CellUtil.cloneQualifier(cell)) + " " + "value=" + new String(CellUtil.cloneValue(cell)));
                            String str = new String(CellUtil.cloneRow(cell)) + "," + new String(CellUtil.cloneFamily(cell)) + "," + new String(CellUtil.cloneQualifier(cell)) + "," + new String(CellUtil.cloneValue(cell)) + "\n";
                            fsDataOutputStream.write(str.getBytes());
                        }
                    }
                    fsDataOutputStream.close();
                } else {
                    destFilePath = destFilePath.replaceAll("local://", "").trim();
                    //导出到本地
                    File file = new File(destFilePath);
                    FileWriter writer = new FileWriter(file);
                    for (Result result : resultScanner) {
                        List<Cell> keyValueList = result.listCells();
                        for (Cell cell : keyValueList) {
                            //log.info("row=" + new String(CellUtil.cloneRow(cell)) + " " + "columefamily=" + new String(CellUtil.cloneFamily(cell)) + " " + "colume = " + new String(CellUtil.cloneQualifier(cell)) + " " + "value=" + new String(CellUtil.cloneValue(cell)));
                            String str = new String(CellUtil.cloneRow(cell)) + "," + new String(CellUtil.cloneFamily(cell)) + "," + new String(CellUtil.cloneQualifier(cell)) + "," + new String(CellUtil.cloneValue(cell)) + "\n";
                            writer.write(str);
                        }
                    }
                    writer.flush();
                    writer.close();
                }
            } else if ("ORC".equalsIgnoreCase(destFileType)) {
                FileSystem fileSystem = connectionManager.createFileSystem(configuration);
                String path = null;
                JobConf conf1;
                //导出到hdfs
                if (flag(destFilePath)) {
                    path = destFilePath.replaceAll("dfs://", "").trim();
                    conf1 = new JobConf(fileSystem.getConf());
                    //存在删除
                    if (fileSystem.exists(new Path(path))) {
                        fileSystem.delete(new Path(path), true);
                    }
                } else {
                    path = destFilePath.replaceAll("local://", "").trim();
                    //导出数据到本地
                    conf1 = new JobConf();
                }

                Path outputPath = new Path(path);
                StructObjectInspector inspector =
                        (StructObjectInspector) ObjectInspectorFactory
                                .getReflectionObjectInspector(paas.storage.distributedColumnDatabase.impl.orc.MyRow.class,
                                        ObjectInspectorFactory.ObjectInspectorOptions.JAVA);
                OrcSerde serde = new OrcSerde();

                OutputFormat outFormat = new OrcOutputFormat();
                RecordWriter writer = outFormat.getRecordWriter(fileSystem, conf1,
                        outputPath.toString(), Reporter.NULL);
                for (Result result : resultScanner) {
                    List<Cell> keyValueList = result.listCells();
                    for (Cell cell : keyValueList) {
                        //log.info("row=" + new String(CellUtil.cloneRow(cell)) + " " + "columefamily=" + new String(CellUtil.cloneFamily(cell)) + " " + "colume = " + new String(CellUtil.cloneQualifier(cell)) + " " + "value=" + new String(CellUtil.cloneValue(cell)));
                        //String str = new String(CellUtil.cloneRow(cell)) + "," + new String(CellUtil.cloneFamily(cell)) + "," + new String(CellUtil.cloneQualifier(cell)) + "," + new String(CellUtil.cloneValue(cell)) + "\n";
                        //orc.writerOrc(outputPath, new orc.MyRow(new String(CellUtil.cloneRow(cell)), new String(CellUtil.cloneFamily(cell)), new String(CellUtil.cloneFamily(cell)), new String(CellUtil.cloneValue(cell))));
                        writer.write(NullWritable.get(), serde.serialize(new orc.MyRow(new String(CellUtil.cloneRow(cell)), new String(CellUtil.cloneFamily(cell)), new String(CellUtil.cloneQualifier(cell)), new String(CellUtil.cloneValue(cell))), inspector));
                    }
                }
                //导出到本地 windows 没有hadoop环境会报错....(null) entry in command string: null chmod 0644 .
                writer.close(Reporter.NULL);
                fileSystem.close();
                //log.info("write success .");

            } else if ("Parquet".equalsIgnoreCase(destFileType)) {
                //导出到hdfs
                if (flag(destFilePath)) {
                    //
                    String path = destFilePath.replaceAll("dfs://", "").trim();
                    FileSystem fileSystem = connectionManager.createFileSystem(configuration);
                    //存在删除
                    if (fileSystem.exists(new Path(path))) {
                        fileSystem.delete(new Path(path), true);
                    }

                    //构建schema
                    String schemaStr = "message schema {" + "repeated binary rowkey;"
                            + "repeated binary family;" + "repeated binary colume;" + "repeated binary value;}";
                    MessageType schema = MessageTypeParser.parseMessageType(schemaStr);
                    Path file = new Path(path);
                    ExampleParquetWriter.Builder builder = ExampleParquetWriter
                            .builder(file).withWriteMode(ParquetFileWriter.Mode.CREATE)
                            .withWriterVersion(ParquetProperties.WriterVersion.PARQUET_1_0)
                            .withCompressionCodec(CompressionCodecName.SNAPPY)
                            .withConf(fileSystem.getConf())
                            .withType(schema);


                    ParquetWriter<Group> writer = builder.build();
                    SimpleGroupFactory groupFactory = new SimpleGroupFactory(schema);
                    for (Result result : resultScanner) {
                        List<Cell> keyValueList = result.listCells();
                        for (Cell cell : keyValueList) {
                            //System.out.println("row=" + new String(CellUtil.cloneRow(cell)) + " " + "columefamily=" + new String(CellUtil.cloneFamily(cell)) + " " + "colume = " + new String(CellUtil.cloneQualifier(cell)) + " " + "value=" + new String(CellUtil.cloneValue(cell)));
                            String[] str = {new String(CellUtil.cloneRow(cell)), new String(CellUtil.cloneFamily(cell)), new String(CellUtil.cloneQualifier(cell)), new String(CellUtil.cloneValue(cell))};
                            writer.write(groupFactory.newGroup()
                                    .append("rowkey", str[0])
                                    .append("family", str[1])
                                    .append("colume", str[2])
                                    .append("value", str[3]));
                        }
                    }
                    writer.close();
                } else {
                    destFilePath = destFilePath.replaceAll("local://", "").trim();

                    //构建schema
                    String schemaStr = "message schema {" + "repeated binary rowkey;"
                            + "repeated binary family;" + "repeated binary colume;" + "repeated binary value;}";
                    MessageType schema = MessageTypeParser.parseMessageType(schemaStr);

                    Path file = new Path(destFilePath);
                    ExampleParquetWriter.Builder builder = ExampleParquetWriter
                            .builder(file).withWriteMode(ParquetFileWriter.Mode.CREATE)
                            .withWriterVersion(ParquetProperties.WriterVersion.PARQUET_1_0)
                            .withCompressionCodec(CompressionCodecName.SNAPPY)
                            //.withConf(configuration)
                            .withType(schema);

                    //导出到本地 windows 没有hadoop环境会报错....(null) entry in command string: null chmod 0644 .
                    ParquetWriter<Group> writer = builder.build();
                    SimpleGroupFactory groupFactory = new SimpleGroupFactory(schema);
                    for (Result result : resultScanner) {
                        List<Cell> keyValueList = result.listCells();
                        for (Cell cell : keyValueList) {
                            //System.out.println("row=" + new String(CellUtil.cloneRow(cell)) + " " + "columefamily=" + new String(CellUtil.cloneFamily(cell)) + " " + "colume = " + new String(CellUtil.cloneQualifier(cell)) + " " + "value=" + new String(CellUtil.cloneValue(cell)));
                            String[] str = {new String(CellUtil.cloneRow(cell)), new String(CellUtil.cloneFamily(cell)), new String(CellUtil.cloneQualifier(cell)), new String(CellUtil.cloneValue(cell))};
                            writer.write(groupFactory.newGroup()
                                    .append("rowkey", str[0])
                                    .append("family", str[1])
                                    .append("colume", str[2])
                                    .append("value", str[3]));
                        }
                    }
                    writer.close();
                }

            } else {
                throw new RRException("please input right Document Format!");
            }
            exportDataResponse.setTable(table);
            importTable.close();
        } catch (RRException e) {
            exportDataResponse.setTaskStatus(ResponseCode.TASK_STATUS_ERROR);
            exportDataResponse.setErrorCode(ResponseCode.BUSINESS_CODE);
            exportDataResponse.setErrorMsg("业务异常," + e.getMessage());
        } catch (Exception e) {
            exportDataResponse.setTaskStatus(0);
            exportDataResponse.setErrorCode(ResponseCode.EXPORT_DATA_ERROR);
            exportDataResponse.setErrorMsg(e.getMessage());
        }
        return exportDataResponse;

    }
}
