package com.wubai.bigdata.flink.sink;

import com.google.gson.Gson;
import com.wubai.bigdata.flink.sink.model.VoucherRecord;
import com.wubai.bigdata.flink.sink.util.CreateIndex;
import com.wubai.bigdata.flink.sink.util.LoginUtil;
import org.apache.flink.api.common.functions.MapFunction;
import org.apache.flink.api.java.tuple.Tuple1;
import org.apache.flink.api.java.utils.ParameterTool;
import org.apache.flink.connector.hbase.util.HBaseConfigurationUtil;
import org.apache.flink.streaming.api.datastream.DataStream;
import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment;
import org.apache.flink.streaming.api.functions.sink.RichSinkFunction;
import org.apache.flink.streaming.api.functions.sink.SinkFunction;
import org.apache.flink.streaming.api.functions.source.RichParallelSourceFunction;
import org.apache.flink.types.Row;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FSDataInputStream;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hbase.TableExistsException;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.client.*;
import org.apache.hadoop.hbase.io.compress.Compression;
import org.apache.hadoop.hbase.io.encoding.DataBlockEncoding;
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.http.Header;
import org.apache.http.HttpHost;
import org.apache.http.HttpStatus;
import org.apache.http.client.config.RequestConfig;
import org.apache.http.entity.ContentType;
import org.apache.http.entity.StringEntity;
import org.apache.http.message.BasicHeader;
import org.elasticsearch.client.Request;
import org.elasticsearch.client.Response;
import org.elasticsearch.client.RestClient;
import org.elasticsearch.client.RestClientBuilder;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;

import java.io.*;
import java.text.SimpleDateFormat;
import java.util.*;
import java.util.concurrent.BlockingQueue;
import java.util.concurrent.LinkedBlockingQueue;

/**
 * @author hecq
 * @since 8.2.0
 */
public class SinkToHbaseES {
    private static final Logger LOG = LoggerFactory.getLogger(SinkToHbaseES.class);
    static String hdfsConfDir;
    static Path propConfPath;
    //公共配置
    private static Properties properties = new Properties();

    //ES
    private static String isSecureMode;
    private static String esServerHost;
    private static int MaxRetryTimeoutMillis;
    private static String type;
    private static int ConnectTimeout;
    private static int SocketTimeout;
    private static String schema = "https";
    private static RestClientBuilder builder = null;
    private static RestClient restClient = null;
    private static String indexname;
    static String sinkPoint;
    private static BlockingQueue<Tuple1<String>> list = new LinkedBlockingQueue<Tuple1<String>>(1000);


    public static void main(String[] args) throws Exception {
        String ss = "select count(*) from default.sharkdw limit 1";
        System.out.println(ss.split(";")[0]);
        System.out.println("use command as: ");
        System.out.println(
                "flink run -t yarn-per-job -p 10 -ys 1 -yt ssl/ -Dyarn.ship-files=\"/opt/client/Flink/flink/conf\" --class com.mrs.bigdata.flink.sink.SinkToHbaseES /opt/client/Flink/flink/FlinkSinkHbaseES.jar --tableName 'default:xx_prd' " +
                        "--hdfsConfDir /opt/client/HDFS/hadoop/etc/hadoop/ --hbaseConfDir /opt/client/HBase/hbase/conf/ --hdfsFilePath /tmp/2024/ --hbasePropFile hbaseConfDir/no_epd_bnk_fin_prd.properties --esConfFile esConfDir/no_epd_bnk_fin_prd_mapping.properties" +
                        " --sinkPoint 2");
        System.out.println(
                "******************************************************************************************");
        System.out.println("<sinkPoint> chooses 1/Hbase,2/HbaseAndEs,3/Es");
        System.out.println("<tableName> hbase tableName");
        System.out.println("<hdfsConfDir> hdfs conf dir");
        System.out.println("<hbaseConfDir> hbase conf dir");
        System.out.println("<hdfsFilePath> path hdfs file");
        System.out.println("<hbasePropFile> path hbasePropFile");
        System.out.println("<esConfFile> path esConfFile");
        System.out.println(
                "******************************************************************************************");


        ParameterTool paraTool = ParameterTool.fromArgs(args);
        FSDataInputStream inputStream_es_prop = FileSystem.get(createConf(hdfsConfDir)).open(new Path(paraTool.get("esConfFile")));
        try {
            properties.load(inputStream_es_prop);  // esConfFile
        } catch (IOException e) {
            LOG.error("esConfFile:", e);
        }
        hdfsConfDir = paraTool.get("hdfsConfDir");
//        sourceParallelism = paraTool.get("sourceParallelism");
        FileSystem fileSystem = FileSystem.get(createConf(hdfsConfDir));
        propConfPath = new Path(paraTool.get("hbasePropFile"));// hbasePropFile
        if (!fileSystem.exists(propConfPath)) {
            LOG.error("The HDFS {} does not exist.", propConfPath);
        }
        Path hdfsDir = new Path(paraTool.get("hdfsFilePath")); // source hdfs
        if (!fileSystem.exists(hdfsDir)) {
            LOG.error("The HDFS directory does not exist.");
        }
        FSDataInputStream inputStream_hb_prop = FileSystem.get(createConf(hdfsConfDir)).open(propConfPath);
        BufferedReader reader = new BufferedReader(new InputStreamReader(inputStream_hb_prop));
        String[] columns = reader.readLine().split(",");
        try {
            inputStream_es_prop.close();
            inputStream_hb_prop.close();
            reader.close();
        } catch (IOException e) {
            LOG.error("Close inputStream_prop Exception:", e);
            e.printStackTrace();
        }
        // 获取HDFS目录下所有文件的状态
        FileStatus[] fileStatuses = fileSystem.listStatus(hdfsDir);
        ArrayList<Path> filePaths = new ArrayList<>();
        LOG.info("get hdfs filePaths");
        for (FileStatus fileStatus : fileStatuses) {
            if (fileStatus.isFile()) {
                if (fileStatus.getLen() == 0) {
                    continue; // 如果文件大小为0，跳过当前文件，不进行处理
                }
                filePaths.add(fileStatus.getPath());
            }
        }
// 解决 如果文件个数为0 设置并行度会异常
        if (filePaths.size() == 0) {
            Date date = new Date();
            SimpleDateFormat df = new SimpleDateFormat("yyyy-MM-dd hh:mm:ss.SSS");
            System.out.println(df.format(date) + " source 端:文件无文件!");
            return;
        }
        StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
//        env.setParallelism(2);
        LOG.info("get hdfsDataReader");
        HdfsDataReader hdfsDataReader = new HdfsDataReader(createConf(paraTool.get("hdfsConfDir")), filePaths, propConfPath, columns);
        sinkPoint = paraTool.get("sinkPoint");

        DataStream<Row> messageStream = env.addSource(hdfsDataReader).setParallelism(filePaths.size()).name(filePaths.toString());
//        LOG.info("addSink for Hbase");
        if (sinkPoint.equals("1")) {
            messageStream.addSink(
                    new HBaseWriteSink(paraTool.get("tableName"), createConf(paraTool.get("hbaseConfDir")), propConfPath, columns)).name("Hdfs-->HBase: " + paraTool.get("tableName"));
        } else if (sinkPoint.equals("2")) {
            messageStream.addSink(
                    new HBaseWriteSink(paraTool.get("tableName"), createConf(paraTool.get("hbaseConfDir")), propConfPath, columns)).name("Hdfs-->HBase: " + paraTool.get("tableName"));
            dataStream(messageStream, columns);
        } else if (sinkPoint.equals("3")) {
            dataStream(messageStream, columns);
        }
        env.execute("Hdfs-->SinkToHBaseES");


    }

    private static void dataStream(DataStream<Row> messageStream, String[] columns) {
        DataStream<VoucherRecord> voucherRecordStream =
                messageStream.map(new MapFunction<Row, VoucherRecord>() {
                    @Override
                    public VoucherRecord map(Row value) throws Exception {
                        // 根据Row的实际结构获取相应的值，并转换为VoucherRecord类型
                        // 这里假设Row包含一个字段，其值可以转换为String类型用于后续处理
                        StringBuilder rowValue = new StringBuilder();
                        for (int i = 0; i < columns.length; i++) {
                            rowValue.append(value.getField(i).toString());
                            if (i < columns.length - 1) {
                                rowValue.append("\u001C");
                            }
                        }
                        return getRecord(rowValue.toString());
                    }
                });
        voucherRecordStream.addSink(new ESSink(properties)).name("Hdfs-->ES");
    }

    private static org.apache.hadoop.conf.Configuration createConf(String hdfsConfDir) {
        LOG.info("Create HBase/HDFS configuration.");
        org.apache.hadoop.conf.Configuration hbaseConf = HBaseConfigurationUtil.getHBaseConfiguration();
        if (hdfsConfDir != null) {
            File hbaseSite = new File(hdfsConfDir + File.separator + "hbase-site.xml");
            if (hbaseSite.exists()) {
                LOG.info("Add hbase-site.xml");
                hbaseConf.addResource(new Path(hbaseSite.getPath()));
            }
            File coreSite = new File(hdfsConfDir + File.separator + "core-site.xml");
            if (coreSite.exists()) {
                LOG.info("Add core-site.xml");
                hbaseConf.addResource(new Path(coreSite.getPath()));
            }
            File hdfsSite = new File(hdfsConfDir + File.separator + "hdfs-site.xml");
            if (hdfsSite.exists()) {
                LOG.info("Add hdfs-site.xml");
                hbaseConf.addResource(new Path(hdfsSite.getPath()));
            }
        }
        LOG.info("HBase configuration created successfully.");
        return hbaseConf;
    }

    private static class HBaseWriteSink extends RichSinkFunction<Row> {
        private Connection conn;
        private BufferedMutator bufferedMutator;
        private String tableName;
        private Path propConfPath;
        private final byte[] serializedConfig;
        private Admin admin;
        private org.apache.hadoop.conf.Configuration hbaseConf;
        /*      private long flushTimeIntervalMillis = 5000; // 5s to do
              private long preFlushTime;*/
        private String[] columns;
        // 计数器，用于记录已处理的数据数量
        static int count = 0;
        List<Put> puts = new ArrayList<>();

        public HBaseWriteSink(String sourceTable, org.apache.hadoop.conf.Configuration conf, Path propConfPath, String[] columns) {
            this.tableName = sourceTable;
            this.serializedConfig = HBaseConfigurationUtil.serializeConfiguration(conf);
            this.propConfPath = propConfPath;
            this.columns = columns;
        }

        private void deserializeConfiguration() {
            LOG.info("Deserialize HBase configuration.");
            hbaseConf = HBaseConfigurationUtil.deserializeConfiguration(
                    serializedConfig, HBaseConfigurationUtil.getHBaseConfiguration());
            LOG.info("Deserialization successfully.");
        }

        private void createTable() throws IOException {
            LOG.info("Create HBase Table.");
            if (admin.tableExists(TableName.valueOf(tableName))) {
                LOG.info("Table already exists.");
                return;
            }
            TableDescriptorBuilder htd = TableDescriptorBuilder.newBuilder(TableName.valueOf(tableName));
            ColumnFamilyDescriptorBuilder hcd = ColumnFamilyDescriptorBuilder.newBuilder(Bytes.toBytes("c"));
            hcd.setDataBlockEncoding(DataBlockEncoding.FAST_DIFF);
            hcd.setCompressionType(Compression.Algorithm.SNAPPY);
            htd.setColumnFamily(hcd.build());
            try {
                admin.createTable(htd.build());
            } catch (IOException e) {
                if (!(e instanceof TableExistsException) || !admin.tableExists(TableName.valueOf(tableName))) {
                    throw e;
                }
                LOG.info("Table already exists, ignore.");
            }
            LOG.info("Table created successfully.");
        }

        @Override
        public void open(org.apache.flink.configuration.Configuration parameters) throws Exception {
            LOG.info("Write sink open");
            super.open(parameters);
            deserializeConfiguration();
            conn = ConnectionFactory.createConnection(hbaseConf);
            admin = conn.getAdmin();
//            createTable();
            bufferedMutator = conn.getBufferedMutator(TableName.valueOf(tableName));
        }

        @Override
        public void close() throws Exception {
            try {
                if (count > 0) {
                    // 如果还有剩余未写入的数据，进行批量写入
                    for (Put p : puts) {
                        bufferedMutator.mutate(p);
                    }
                    bufferedMutator.flush();
                    System.out.println("Complete Sink LastBatchData to HBASE.....");
                }
                LOG.info("Close HBase Connection.");
                if (count == 0 && admin != null) {
                    admin.close();
                    admin = null;
                }
                if (bufferedMutator != null) {
                    bufferedMutator.close();
                    bufferedMutator = null;
                }
                if (conn != null) {
                    conn.close();
                    conn = null;
                }
            } catch (IOException e) {
                LOG.error("Close HBase Exception:", e);
                throw new RuntimeException(e);
            }
            LOG.info("Close successfully.");
        }

        @Override
        public void invoke(Row value, Context context) throws Exception {
            LOG.info("Sink data to HBase.");


            Put put = new Put(Bytes.toBytes(value.getField(0).toString()));

            String columnFamily = columns[1].split("#")[0];
            for (int i = 1; i < columns.length; i++) {
                String columnQualifier = columns[i].split("#")[1];
                String valueToPut = value.getField(i).toString();
                put.addColumn(Bytes.toBytes(columnFamily), Bytes.toBytes(columnQualifier), Bytes.toBytes(valueToPut));
            }
            puts.add(put);
            count++;

            if (count >= 1000) {
                // 批量写入数据
                for (Put p : puts) {
                    bufferedMutator.mutate(p);
                }
                LOG.info("Flush data to HBase.");
                bufferedMutator.flush();
//                preFlushTime = System.currentTimeMillis();
                LOG.info("Flush successfully.");
                count = 0;
                puts.clear();
            } /*else {
//                LOG.info("Skip Flush.");
            }*/

            LOG.info("Sink Hbase successfully.");
        }

    }

    //将flink计算的结果插入到ES
    private static class ESSink extends RichSinkFunction<VoucherRecord> implements Serializable {
        private Properties properties;

        public ESSink(Properties properties) {
            this.properties = properties;
        }

        //open方法是初始化方法,会在invoke方法之前执行,执行一次
        public void open(org.apache.flink.configuration.Configuration parameters) throws Exception {
            super.open(parameters);
            try {
                //ES初始化
                System.out.println("Es_properties:" + properties);
                esInit(properties);

            } catch (Exception e) {
                System.out.println(" e.getMessage() :" + e.getMessage());
//                System.out.println(" e.getCause() :" + e.getCause().toString());
                e.printStackTrace();
            }
        }

        //实时更新数据，并获取打印显示
        public void invoke(VoucherRecord voucherRecord, SinkFunction.Context context) throws Exception {
            try {
                //ES  bulk
                Tuple1<String> tuple4 = new Tuple1<String>(voucherRecord.getDocId());
                list.put(tuple4); //阻塞 保证一致性

                //测试使用1条数据，生产环境请使用if (list.size() >= putSize) {
                if (list.size() >= 1000) {
                    int subtaskIndex = getRuntimeContext().getIndexOfThisSubtask();
                    System.out.println("ESSubtaskIndex: " + subtaskIndex + " DOCS:" + list.size());
                    //      if (list.size() >= putSize) {
                    System.out.println("Enter Sink data to ES....." + list.size());
                    /*Calendar calendar = Calendar.getInstance();
                    SimpleDateFormat sdf = new SimpleDateFormat("yyyy_MM");
                    indexname = properties.getProperty("ES.indexName") + sdf.format(calendar.getTime());*/
                    indexname = properties.getProperty("ES.indexName");
                    String[] esInfo = properties.getProperty("ES.mapping").split(",");
                    bulk(indexname, new ArrayList<>(list), esInfo);
                    list.clear();
                }
            } catch (Exception e) {
                e.printStackTrace();
            }
        }

        //close()是tear down的方法,在销毁时执行,关闭连接
        public void close() throws Exception {
            if (list.size() >= 1) {

                indexname = properties.getProperty("ES.indexName");
                String[] esInfo = properties.getProperty("ES.mapping").split(",");
                bulk(indexname, new ArrayList<>(list), esInfo);
                System.out.println("Complete Sink LastBatchData to ES....." + list.size());
                list.clear();
            }
            super.close();
            if (list.size() == 0 && restClient != null) {
                try {
                    restClient.close();
                    LOG.info("Close the client successful in main.");
                } catch (Exception e1) {
                    LOG.error("Close the client failed in main.", e1);
                }
            }
        }
    }

    private static VoucherRecord getRecord(String line) {
//        String[] elems = line.split("\u001C");
//        System.out.println("=>docId:" + elems[0]);
        return new VoucherRecord(line);
    }

    private static void esInit(Properties properties) throws Exception {
        //加载es-example.properties配置文件中的内容，包括与主机的连接信息，索引信息等
        //“esServerHost”为已安装Elasticsearch集群中任意节点 IP与该IP节点上已安装的任意Elasticsearch实例的HTTP端口组合的列表，
        // 形如“ip1：port1，ip2:port2,ip3:port3......”。
        // **不建议在ES Master提交业务，业务量过大导致ES Master崩溃**
        // 所以请配置ES Node对应的ip和端口，该端口值可以通过以下方式获取：FusionInsight Manager界面点击“服务管理 > Elasticsearch > 服务配置>参数类别:(需选择)全部配置>”，
        System.out.println("enter esInit");
        esServerHost = properties.getProperty("ES.esServerHost");
        MaxRetryTimeoutMillis = Integer.valueOf(properties.getProperty("ES.MaxRetryTimeoutMillis"));
        ConnectTimeout = Integer.valueOf(properties.getProperty("ES.ConnectTimeout"));
        SocketTimeout = Integer.valueOf(properties.getProperty("ES.SocketTimeout"));
        isSecureMode = properties.getProperty("ES.isSecureMode");
        type = properties.getProperty("ES.type");
        //安全登录
        if ((isSecureMode).equals("true")) {
            //加载krb5.conf：客户端与Kerberos对接的配置文件，配置到JVM系统参数中
            String userName = properties.getProperty("Principal");//请根据实际情况，修改“fanC80_jj”为实际用户名
            String userKeytabFile = SinkToHbaseES.class.getClassLoader().getResource("conf/user.keytab").getPath();
            String krb5File = SinkToHbaseES.class.getClassLoader().getResource("conf/krb5.conf").getPath();
            System.setProperty("java.security.krb5.conf", krb5File);
            //加载jaas文件进行认证，并配置到JVM系统参数中
//            String jaasPath = SinkToHbaseES.class.getClassLoader().getResource("conf/User.keytab").getPath();
//            System.setProperty("java.security.auth.login.config", jaasPath);
            System.out.println("userKeytabFile: " + userKeytabFile);
            LoginUtil.setJaasFile(userName, userKeytabFile);
            System.setProperty("javax.security.auth.useSubjectCredsOnly", "false");
            //添加ES安全指示
            System.setProperty("es.security.indication", "true");
        } else if ((isSecureMode).equals("false")) {
            System.setProperty("es.security.indication", "false");
            schema = "http";
        }

        //************* 获取客户端，连接Elasticsearch集群 ************
        //我们需要获取RestClient类 通过设置IP和端口连接到特定Elasticsearch集群
        //RestClient实例可以通过相应的RestClientBuilder类构建，该类通过RestClient＃builder（HttpHost ...）静态方法创建。
        // 唯一必需的参数是客户端将与之通信的一个或多个主机，作为HttpHost的实例提供。
        //HttpHost保存与主机的HTTP连接所需的所有变量。这包括远程主机名，端口和方案。
        List<HttpHost> hosts = new ArrayList<HttpHost>();
        String[] hostArray1 = esServerHost.split(",");

        for (String host : hostArray1) {
            String[] ipPort = host.split(":");
            HttpHost hostNew = new HttpHost(ipPort[0], Integer.valueOf(ipPort[1]), schema);
            hosts.add(hostNew);
        }
        HttpHost[] httpHosts = hosts.toArray(new HttpHost[]{});
        builder = RestClient.builder(httpHosts);
        // 设置请求的回调函数
        //1.设置连接超时时间，单位毫秒。
        //2.设置请求获取数据的超时时间，单位毫秒。如果访问一个接口，多少时间内无法返回数据，就直接放弃此次调用。
        //3.设置同一请求最大超时重试时间（以毫秒为单位）。
        builder = builder.setRequestConfigCallback(new RestClientBuilder.RequestConfigCallback() {
            @Override
            public RequestConfig.Builder customizeRequestConfig(RequestConfig.Builder requestConfigBuilder) {
                return requestConfigBuilder.setConnectTimeout(ConnectTimeout).setSocketTimeout(SocketTimeout);
            }
        }).setMaxRetryTimeoutMillis(MaxRetryTimeoutMillis);

        //设置默认请求标头，它将与每个请求一起发送。请求时标头将始终覆盖任何默认标头。
        Header[] defaultHeaders = new Header[]{new BasicHeader("Accept", "application/json"),
                new BasicHeader("Content-type", "application/json")};
        builder.setDefaultHeaders(defaultHeaders);
        //根据配置好的RestClientBuilder创建新的RestClient。
        restClient = builder.build();
        restClient.setHosts(httpHosts);

        CreateIndex createIndex = new CreateIndex();
        createIndex.index(restClient, properties.getProperty("ES.mapping.json"), properties.getProperty("ES.indexName"));
    }


    private static void bulk(String index, List<Tuple1<String>> t, String[] esInfo) {
        StringBuilder bulkRequestBuilder = new StringBuilder();
        Gson gson = new Gson();

        // 循环拼接多条数据的请求内容
        for (Tuple1<String> f : t) {
            String value = f.f0.split("\u001c")[0];
            String requestHeader = "{ \"index\" : { \"_index\" : \"" + index + "\", \"_id\" :  \"" + value + "\"} }";
            Map<String, Object> esMap = new HashMap<>();
            for (int i = 1; i < esInfo.length; i++) {
                String[] keys = esInfo[i].split("#");
                int i1 = Integer.parseInt(keys[0]);
                esMap.put(keys[1], f.f0.split("\u001c")[i1]);
            }
            String strJson = gson.toJson(esMap);
            bulkRequestBuilder.append(requestHeader).append("\n").append(strJson).append("\n");
        }

        StringEntity entity = null;
        try {
            entity = new StringEntity(bulkRequestBuilder.toString(), ContentType.APPLICATION_JSON);
            entity.setContentEncoding("UTF-8");
            Request request = new Request("PUT", "/_bulk");
            request.addParameter("pretty", "true");
            request.setEntity(entity);
            Response rsp = restClient.performRequest(request);
            if (HttpStatus.SC_OK == rsp.getStatusLine().getStatusCode()) {
                System.out.println("<=Bulk successful.");
            } else {
                LOG.error("<=Bulk failed.");
            }
        } catch (Exception e) {
            LOG.error("<=Bulk failed, exception occurred.", e);
        }
    }

    public static class HdfsDataReader extends RichParallelSourceFunction<Row> {
        private static final long serialVersionUID = 2174904787118597072L;
        private boolean running = true;
        private transient Configuration configuration;
        private transient FileSystem fileSystem;
        private transient FSDataInputStream inputStream;
        private transient BufferedReader reader;
        private ArrayList<Path> hdfsFilePath;
        private Path propConfPath;
        private String[] columns;


        public HdfsDataReader(Configuration hdfsConf, ArrayList<Path> hdfsFilePath, Path propConfPath, String[] columns) {

            this.hdfsFilePath = hdfsFilePath;
            this.propConfPath = propConfPath;
            this.columns = columns;
        }

        @Override
        public void run(SourceContext<Row> ctx) throws Exception {
            // 获取当前子任务的索引
            int subtaskIndex = getRuntimeContext().getIndexOfThisSubtask();
            // 获取子任务总数
            int subtaskCount = getRuntimeContext().getNumberOfParallelSubtasks();

            // 根据子任务索引和总数划分文件路径
            List<Path> partitionedPaths = new ArrayList<>();
            for (int i = subtaskIndex; i < hdfsFilePath.size(); i += subtaskCount) {
                partitionedPaths.add(hdfsFilePath.get(i));
            }

            try {
                if (configuration == null) {
                    configuration = createConf(null);
                }
          /*      FSDataInputStream inputStream = FileSystem.get(createConf(hdfsConfDir)).open(propConfPath);
                BufferedReader reader = new BufferedReader(new InputStreamReader(inputStream));
                String[] columns = reader.readLine().split(",");*/
                synchronized (this) {
                    fileSystem = FileSystem.get(createConf(hdfsConfDir));
                    LOG.info("fileSystem对象：" + fileSystem);
                }
                for (Path filePath : partitionedPaths) {

                    LOG.info("read hdfs: " + filePath.toString());
                    inputStream = fileSystem.open(filePath);
                    reader = new BufferedReader(new InputStreamReader(inputStream));
                    String line;
                    Date date = new Date();
                    SimpleDateFormat df = new SimpleDateFormat("yyyy-MM-dd hh:mm:ss.SSS");
                    System.out.println(df.format(date) + " subtaskIndex: " + subtaskIndex + " " + filePath);
                    List<Row> buffer = new ArrayList<>();
                    while ((line = reader.readLine()) != null) {
                        String[] parts = line.split("\u001C");
                        Row row = new Row(columns.length);
                        for (int i = 0; i < columns.length; i++) {
                            row.setField(i, parts[i]);
                        }
                        buffer.add(row); // 将读取到的行数据添加到缓存列表中
                        if (buffer.size() >= 1000) { // 当缓存列表中的数据达到1000条时
                            for (Row r : buffer) {
                                ctx.collect(r); // 批量传递数据给下游
                            }
                            buffer.clear(); // 清空缓存列表
                        }
//                        ctx.collect(row);
//                        LOG.info(" ctx.collect(row)"); // to do删除
                    }
                    if (!buffer.isEmpty()) {
                        for (Row r : buffer) {
                            ctx.collect(r);
                        }
                        buffer.clear();
                    }
                }
            } finally {
                if (reader != null) {
                    try {
//                        System.out.println("reader对象:" + reader);
                        reader.close();
                    } catch (IOException e) {
                        LOG.error("Error closing reader: ", e);
                    }
                }
                if (inputStream != null) {
                    try {
//                        System.out.println("inputStream对象:" + inputStream);
                        inputStream.close();
                    } catch (IOException e) {
                        LOG.error("Error closing inputStream: ", e);
                    }
                }
                if (fileSystem != null) {
                    try {
                        fileSystem.close();
                    } catch (IOException e) {
                        LOG.error("Error closing fileSystem: ", e);
                    }
                }
            }
        }

        @Override
        public void cancel() {
            running = false;
        }
    }
}