package com.huawei.bigdata.flink.examples;

import com.google.gson.Gson;
import com.huawei.bigdata.flink.examples.model.VoucherRecord;
import com.huawei.bigdata.flink.examples.utils.CreateIndex;
import com.huawei.bigdata.flink.examples.utils.CreateTable;
import com.huawei.bigdata.flink.examples.utils.LoginUtil;
import org.apache.flink.api.common.functions.MapFunction;
import org.apache.flink.api.java.utils.ParameterTool;
import org.apache.flink.configuration.Configuration;
import org.apache.flink.runtime.state.filesystem.FsStateBackend;
import org.apache.flink.streaming.api.CheckpointingMode;
import org.apache.flink.streaming.api.datastream.DataStream;
import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment;
import org.apache.flink.streaming.api.functions.sink.RichSinkFunction;
import org.apache.flink.streaming.api.functions.sink.SinkFunction;
import org.apache.flink.streaming.connectors.kafka.FlinkKafkaConsumer010;
import org.apache.flink.streaming.util.serialization.SimpleStringSchema;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hbase.HBaseConfiguration;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.client.*;
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.http.Header;
import org.apache.http.HttpHost;
import org.apache.http.HttpStatus;
import org.apache.http.client.config.RequestConfig;
import org.apache.http.entity.ContentType;
import org.apache.http.entity.StringEntity;
import org.apache.http.message.BasicHeader;
import org.apache.http.util.EntityUtils;
import org.elasticsearch.client.Response;
import org.elasticsearch.client.RestClient;
import org.elasticsearch.client.RestClientBuilder;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import scala.Tuple4;

import java.io.File;
import java.io.FileInputStream;
import java.io.IOException;
import java.io.Serializable;
import java.text.DateFormat;
import java.text.SimpleDateFormat;
import java.util.*;

public class ProcessDataAndSinkToHBaseES {
    //公共配置
    private static Properties properties = new Properties();
    //hbase
    private static org.apache.hadoop.conf.Configuration conf = HBaseConfiguration.create();
    private static TableName thisMonthTableName = null;
    //private static TableName nextMonthTableName = null;
    private static Connection conn = null;
    private static Admin admin = null;
    private static Table table = null;
   //private static List<Put> putList = new ArrayList<Put>();
    private static int putSize = 5000;
    //ES
    private static final Logger LOG = LoggerFactory.getLogger(ProcessDataAndSinkToHBaseES.class);
    private static String isSecureMode;
    private static String esServerHost;
    private static int MaxRetryTimeoutMillis;
    private static String type;
    private static int ConnectTimeout;
    private static int SocketTimeout;
    private static String schema = "https";
    private static RestClientBuilder builder = null;
    private static RestClient restClient = null;
    private static String indexname;
    private static List<Tuple4<String, String, String, String>> list = new ArrayList<Tuple4<String, String, String, String>>();

    public static void main(String[] args) throws Exception {
        //加载ConfFile.properties配置文件中的内容
        ParameterTool paraTool = ParameterTool.fromArgs(args);
        String path=paraTool.get("app.conf");
        try {
            properties.load(new FileInputStream(new File(path)));
        } catch (IOException e) {
            e.printStackTrace();
        }
        //StreamExecutionEnvironment 执行流程序的上下文。环境提供了控制作业执行的方法（例如设置并行性或容错/检查点参数）以及与外部世界交互（数据访问）。
        //parallelism不能多于slots总个数
        StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
        env.setParallelism(1);
//        //EventTime： 每个独立事件在产生它的设备上发生的时间。
//        env.setStreamTimeCharacteristic(TimeCharacteristic.EventTime);
        //设置checkpoint的间隔毫秒数 1000,方式EXACTLY_ONCE保证数据只处理一次
        env.enableCheckpointing(1000, CheckpointingMode.EXACTLY_ONCE);
        //设置checkpoint保存大型State一致性快照的存储位置
        env.setStateBackend(new FsStateBackend(properties.getProperty("Flink.checkpoint")));
        DataStream<String> messageStream = env.addSource(new FlinkKafkaConsumer010<>(properties.getProperty("Kafka.topic"), new SimpleStringSchema(), paraTool.getProperties()));
        DataStream<VoucherRecord> voucherRecordStream =
                messageStream.map(new MapFunction<String, VoucherRecord>() {
            @Override
            public VoucherRecord map(String value) throws Exception {
                return getRecord(value);
            }
        });
        voucherRecordStream.addSink(new HBaseSink(properties));
        voucherRecordStream.addSink(new ESSink(properties));
        env.execute();
    }
    //将flink计算的结果插入到HBase
    private static class HBaseSink extends RichSinkFunction<VoucherRecord> implements Serializable {
        private Properties properties = new Properties();

        public HBaseSink(Properties properties) {
            this.properties = properties;
        }

        //open方法是初始化方法,会在invoke方法之前执行,执行一次
        public void open(Configuration parameters) throws Exception {
            super.open(parameters);
            try {
                //HBase初始化
                hbaseInit(properties);
            } catch (Exception e) {
                System.out.println(" e.getMessage() :" + e.getMessage());
                System.out.println(" e.getCause() :" + e.getCause().toString());
                e.printStackTrace();
            }
        }

        //实时更新数据，并获取打印显示
        public void invoke(VoucherRecord voucherRecord, SinkFunction.Context context) throws Exception {
            try {

                Put put = new Put(Bytes.toBytes(voucherRecord.getPictureName()));
                byte[] pictureData = voucherRecord.getPictureData();
                // set the column value of column family base with the value of "cf1"
                put.addColumn(Bytes.toBytes("base"), Bytes.toBytes("cf1"), pictureData);
                // get the table object represent table tableName
            //    table = conn.getTable(thisMonthTableName);
                // put data
                table.put(put);
                LOG.info("MOB data inserted successfully.");

            } catch (Exception e) {
                e.printStackTrace();
            }
        }

        //close()是tear down的方法,在销毁时执行,关闭连接
        public void close() throws Exception {
            super.close();
            if (table != null) {
                try {
                    table.close();
                    System.out.println("HBase table  close!!!");
                } catch (IOException e) {
                    e.printStackTrace();
                }
            }
            if (admin != null) {
                try {
                    admin.close();
                    System.out.println("HBase  admin close!!!");
                } catch (IOException e) {
                    e.printStackTrace();
                }
            }
            if (conn != null) {
                try {
                    // Close the HBase connection.
                    conn.close();
                    System.out.println("HBase  conn close!!!");
                } catch (IOException e) {
                    e.printStackTrace();
                }
            }
        }
    }

    //将flink计算的结果插入到ES
    private static class ESSink extends RichSinkFunction<VoucherRecord> implements Serializable {
        private Properties properties = new Properties();

        public ESSink(Properties properties) {
            this.properties = properties;
        }

        //open方法是初始化方法,会在invoke方法之前执行,执行一次
        public void open(Configuration parameters) throws Exception {
            super.open(parameters);
            try {
                //ES初始化
                esInit(properties);
            } catch (Exception e) {
                System.out.println(" e.getMessage() :" + e.getMessage());
                System.out.println(" e.getCause() :" + e.getCause().toString());
                e.printStackTrace();
            }
        }

        //实时更新数据，并获取打印显示
        public void invoke(VoucherRecord voucherRecord, SinkFunction.Context context) throws Exception {
            try {
                //ES  bulk
                Tuple4<String, String, String, String> tuple4 = new Tuple4<String, String, String, String>(
                        voucherRecord.getCrownfontNumber() , voucherRecord.getEdition(), voucherRecord.getCoupon(), voucherRecord.getPictureName());

                list.add(tuple4);

                //测试使用1条数据，真实环境请使用if (list.size() >= putSize) {
                if (list.size() >= 1) {
                    //      if (list.size() >= putSize) {
                    System.out.println("Write data to ES....." + list.size());
                    Calendar calendar = Calendar.getInstance();
                    SimpleDateFormat sdf = new SimpleDateFormat("yyyy_MM");
                    indexname = properties.getProperty("ES.indexName.Prefix") + sdf.format(calendar.getTime());
                    bulk(indexname, list);
                    list.clear();
                }
            } catch (Exception e) {
                e.printStackTrace();
            }
        }

        //close()是tear down的方法,在销毁时执行,关闭连接
        public void close() throws Exception {
            super.close();
            if (restClient != null) {
                try {
                    restClient.close();
                    LOG.info("Close the client successful in main.");
                } catch (Exception e1) {
                    LOG.error("Close the client failed in main.", e1);
                }
            }
        }
    }


    private static VoucherRecord getRecord(String line) {
        String[] elems = line.split(",");
        assert elems.length == 5;
        return new VoucherRecord(elems[0], elems[1], elems[2], elems[3], elems[4]);
    }


    private static void hbaseInit(Properties properties) {
        conf.addResource(new Path(properties.getProperty("HBase.core-site")), false);
        conf.addResource(new Path(properties.getProperty("HBase.hdfs-site")), false);
        conf.addResource(new Path(properties.getProperty("HBase.hbase-site")), false);

        //安全认证 HBase为非安全模式可删除该部分
        try {
            String userName = properties.getProperty("Principal");//请根据实际情况，修改“fanC80_jj”为实际用户名
            String userKeytabFile = HBaseSink.class.getClassLoader().getResource(properties.getProperty("User.keytab")).getPath();
            String krb5File = HBaseSink.class.getClassLoader().getResource(properties.getProperty("Krb5.conf")).getPath();

            //配置ZooKeeper认证信息。ZooKeeper为HBase集群中各进程提供分布式协作服务
            LoginUtil.setJaasFile(userName, userKeytabFile);
            LoginUtil.setZookeeperServerPrincipal("zookeeper/hadoop.hadoop.com");
            LoginUtil.login(userName, userKeytabFile, krb5File, conf);

        } catch (IOException e) {
            System.out.println("Failed to init security configuration" + e);
            return;
        }

        String tableNamePrefix = properties.getProperty("HBase.tableName.Prefix");
        try {
            CreateTable createTable = new CreateTable();
            createTable.create(conf,tableNamePrefix);
            conn = ConnectionFactory.createConnection(conf);
            DateFormat df = new SimpleDateFormat("yyyy_MM");
            Calendar ct = Calendar.getInstance();
            ct.getTime();
            String thisMonth = df.format(ct.getTime());
            // ct.add(Calendar.MONTH, +1);
            // String nextMonth = df.format(ct.getTime());
            thisMonthTableName = TableName.valueOf(tableNamePrefix + thisMonth);
            //  nextMonthTableName = TableName.valueOf("transaction_data_" + nextMonth);

            admin = conn.getAdmin();
            table = conn.getTable(thisMonthTableName);

            if (table == null) {
                System.out.println("table is null:" + table);
            }

        } catch (IOException e) {
            e.printStackTrace();
        }

    }



    private static void esInit(Properties properties) throws Exception {
        //加载es-example.properties配置文件中的内容，包括与主机的连接信息，索引信息等
        //“esServerHost”为已安装Elasticsearch集群中任意节点 IP与该IP节点上已安装的任意Elasticsearch实例的HTTP端口组合的列表，
        // 形如“ip1：port1，ip2:port2,ip3:port3......”。
        // **不建议在ES Master提交业务，业务量过大导致ES Master崩溃**
        // 所以请配置ES Node对应的ip和端口，该端口值可以通过以下方式获取：FusionInsight Manager界面点击“服务管理 > Elasticsearch > 服务配置>参数类别:(需选择)全部配置>”，
        esServerHost = properties.getProperty("ES.esServerHost");
        MaxRetryTimeoutMillis = Integer.valueOf(properties.getProperty("ES.MaxRetryTimeoutMillis"));
        ConnectTimeout = Integer.valueOf(properties.getProperty("ES.ConnectTimeout"));
        SocketTimeout = Integer.valueOf(properties.getProperty("ES.SocketTimeout"));
        isSecureMode = properties.getProperty("ES.isSecureMode");
        type = properties.getProperty("ES.type");
        //安全登录
        if ((isSecureMode).equals("true")) {
            //加载krb5.conf：客户端与Kerberos对接的配置文件，配置到JVM系统参数中
            String userName = properties.getProperty("Principal");//请根据实际情况，修改“fanC80_jj”为实际用户名
            String userKeytabFile = ESSink.class.getClassLoader().getResource(properties.getProperty("User.keytab")).getPath();
            String krb5File = ESSink.class.getClassLoader().getResource(properties.getProperty("Krb5.conf")).getPath();
            System.setProperty("java.security.krb5.conf", krb5File);
            //加载jaas文件进行认证，并配置到JVM系统参数中
//            String jaasPath = ProcessDataAndSinkToHBaseES.class.getClassLoader().getResource("conf/jaas.conf").getPath();
//            System.setProperty("java.security.auth.login.config", jaasPath);
            LoginUtil.setJaasFile(userName, userKeytabFile);
            System.setProperty("javax.security.auth.useSubjectCredsOnly", "false");
            //添加ES安全指示
            System.setProperty("es.security.indication", "true");
        } else if ((isSecureMode).equals("false")) {
            System.setProperty("es.security.indication", "false");
            schema = "http";
        }

        //************* 获取客户端，连接Elasticsearch集群 ************
        //我们需要获取RestClient类 通过设置IP和端口连接到特定Elasticsearch集群
        //RestClient实例可以通过相应的RestClientBuilder类构建，该类通过RestClient＃builder（HttpHost ...）静态方法创建。
        // 唯一必需的参数是客户端将与之通信的一个或多个主机，作为HttpHost的实例提供。
        //HttpHost保存与主机的HTTP连接所需的所有变量。这包括远程主机名，端口和方案。
        List<HttpHost> hosts = new ArrayList<HttpHost>();
        String[] hostArray1 = esServerHost.split(",");

        for (String host : hostArray1) {
            String[] ipPort = host.split(":");
            HttpHost hostNew = new HttpHost(ipPort[0], Integer.valueOf(ipPort[1]), schema);
            hosts.add(hostNew);
        }
        HttpHost[] httpHosts = hosts.toArray(new HttpHost[]{});
        builder = RestClient.builder(httpHosts);
        // 设置请求的回调函数
        //1.设置连接超时时间，单位毫秒。
        //2.设置请求获取数据的超时时间，单位毫秒。如果访问一个接口，多少时间内无法返回数据，就直接放弃此次调用。
        //3.设置同一请求最大超时重试时间（以毫秒为单位）。
        builder = builder.setRequestConfigCallback(new RestClientBuilder.RequestConfigCallback() {
            @Override
            public RequestConfig.Builder customizeRequestConfig(RequestConfig.Builder requestConfigBuilder) {
                return requestConfigBuilder.setConnectTimeout(ConnectTimeout).setSocketTimeout(SocketTimeout);
            }
        }).setMaxRetryTimeoutMillis(MaxRetryTimeoutMillis);

        //设置默认请求标头，它将与每个请求一起发送。请求时标头将始终覆盖任何默认标头。
        Header[] defaultHeaders = new Header[]{new BasicHeader("Accept", "application/json"),
                new BasicHeader("Content-type", "application/json")};
        builder.setDefaultHeaders(defaultHeaders);
        //根据配置好的RestClientBuilder创建新的RestClient。
        restClient = builder.build();
        restClient.setHosts(httpHosts);

        CreateIndex createIndex = new CreateIndex();
        createIndex.index(restClient, properties.getProperty("ES.mapping.json"),properties.getProperty("ES.indexName.Prefix"));
    }


    private static void bulk(String index, List<Tuple4<String, String, String, String>> t) {
        System.out.println("Enter ES bulk...");
        StringEntity entity = null;
        Gson gson = new Gson();
    
        String requestHeader = "";
        Map<String, Object> esMap = new HashMap<String, Object>();
        //将需要存储到ES中的数据，存储到map中。
        StringBuffer buffer = new StringBuffer();

        for (Tuple4<String, String, String, String> f : t) {
            requestHeader = "{ \"index\" : { \"_index\" : \"" + index + "\", \"_type\" :  \"" + type + "\", \"_id\" :  \"" + f._4() + "\"} }";
            esMap.put("crownfontNumber", f._1());
            esMap.put("edition", f._2());
            esMap.put("coupon", f._3());
            String strJson = gson.toJson(esMap);
                buffer.append(requestHeader).append("\n");
            buffer.append(strJson).append("\n");
        }
        entity = new StringEntity(buffer.toString(), ContentType.APPLICATION_JSON);
        entity.setContentEncoding("UTF-8");
        Response rsp = null;
        Map<String, String> params = Collections.singletonMap("pretty", "true");
        try {
            rsp = restClient.performRequest("PUT", "/_bulk", params, entity);
            if (HttpStatus.SC_OK == rsp.getStatusLine().getStatusCode()) {
                System.out.println("Bulk successful.");
            } else {
                LOG.error("Bulk failed.");
            }
            LOG.info("Bulk response entity is : " + EntityUtils.toString(rsp.getEntity()));
        } catch (Exception e) {
            LOG.error("Bulk failed, exception occurred.", e);
        }
    }
}
