package com.example.demo.spark;

import java.io.IOException;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Base64;
import java.util.HashSet;
import java.util.List;
import java.util.Set;

import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.HBaseConfiguration;
import org.apache.hadoop.hbase.client.Put;
import org.apache.hadoop.hbase.client.Result;
import org.apache.hadoop.hbase.client.Scan;
import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
import org.apache.hadoop.hbase.mapreduce.TableInputFormat;
import org.apache.hadoop.hbase.mapreduce.TableOutputFormat;
import org.apache.hadoop.hbase.protobuf.ProtobufUtil;
import org.apache.hadoop.hbase.protobuf.generated.ClientProtos;
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.mapreduce.Job;
import org.apache.spark.api.java.JavaPairRDD;
import org.apache.spark.api.java.JavaRDD;
import org.apache.spark.api.java.JavaSparkContext;
import org.apache.spark.api.java.function.Function;
import org.apache.spark.sql.Dataset;
import org.apache.spark.sql.Row;
import org.apache.spark.sql.RowFactory;
import org.apache.spark.sql.SparkSession;
import org.apache.spark.sql.functions;
import org.apache.spark.sql.types.DataTypes;
import org.apache.spark.sql.types.StructField;
import org.apache.spark.sql.types.StructType;
import org.elasticsearch.spark.sql.EsSparkSQL;

import scala.Tuple2;
import scala.collection.mutable.HashMap;

/**
 * Created by dongkelun on 2020/11/10 16:20
 * <p>
 * spark-submit --class com.inspur.info.security.spark.JavaCSV2ESAndHbase  --master yarn --deploy-mode client \
 * --principal spark/indata-10-111-24-5.indata.com@INDATA.COM --keytab /etc/security/keytabs/spark.service.keytab \
 * --conf "spark.driver.extraJavaOptions=-Dlog4j.configuration=file:log4j.properties" \
 * --jars elasticsearch-hadoop-6.8.2.jar  ./info-security-1.0.jar \
 * /data/csv/data dkl_hbase_test /custom_demo/demo indata-10-111-24-4.indata.com,indata-10-111-24-5.indata.com,indata-10-111-24-6.indata.com sysadmin-realm admin2355
 */
@SuppressWarnings("all")
public class JavaCSV2ESAndHbase {
    public static void main(String[] args) {
        if (args.length < 6) {
            System.err.println("Usage: JavaCSV2ESAndHbase <csv dir> <hbase_table_name> <es resource> <es nodes> <es user> <es password>");
            System.exit(1);
        }

        String srcPath = args[0];
        String hbaseTable = args[1];
        String resource = args[2];
        String nodes = args[3];
        String user = args[4];
        String password = args[5];

        SparkSession spark = SparkSession.builder()
                .appName("JavaCSV2ESAndHbase")
                .config("cluster.name", "es")
                .config("es.index.auto.create", "true")
//                .config("es.nodes", "indata-10-111-24-4.indata.com,indata-10-111-24-5.indata.com,indata-10-111-24-6.indata.com")
                .config("es.nodes", nodes)
                .config("es.port", "9200")
                .config("es.index.read.missing.as.empty", "true")
//                .config("es.net.http.auth.user", "sysadmin-realm") //访问es的用户名
                .config("es.net.http.auth.user", user) //访问es的用户名
//                .config("es.net.http.auth.pass", "admin2355") //访问es的密码
//                .config("es.net.http.auth.pass", "admin8506") //访问es的密码
                .config("es.net.http.auth.pass", password) //访问es的密码
                .config("es.nodes.wan.only", "true")
                .getOrCreate();

        Dataset<Row> df = spark.read().schema(createSchema()).csv(srcPath);
        df = df
            .where(functions.col("mobile").isNotNull())
            .where(functions.col("ac0018").isNotNull())
            .where(functions.col("tracking_number").isNotNull());
        save2ES(df, resource);
        //readES(spark, resource);

        String rowKeyColString = "mobile,ac0018,tracking_number";
        //save2HBase(df, hbaseTable, rowKeyColString);
        //readHBase(spark, hbaseTable);

        spark.stop();
    }

    /**
     * csv schema
     *
     * @return
     */
    private static StructType createSchema() {
        String schemaString = "dbname,ac0018,name,mobile,tel,addr,org_addr,freight,totalcommodityvalue," +
                "totalorderamount,paytime,buyerid,platform,email,refundamount,discountamount," +
                "ordersubmissiontime,shippedqty,univalent,unitDiscountAmount,barcode,sku," +
                "tradename,tradename_category,brand,tracking_number,express_company";

        List<StructField> fields = new ArrayList<>();
        for (String fieldName : schemaString.split(",")) {
            StructField field = DataTypes.createStructField(fieldName, DataTypes.StringType, true);
            fields.add(field);
        }

        StructType schema = DataTypes.createStructType(fields);
        return schema;
    }

    private static void save2ES(Dataset<Row> df, String resource) {
		HashMap map = new HashMap<String, String>();
        map.put("es.mapping.id", "id");
        df = df.withColumn("id", functions.concat(functions.col("mobile"), functions.col("ac0018"), functions.col("tracking_number")));
        EsSparkSQL.saveToEs(df.select("id","mobile", "name", "tradename", "addr","ac0018","tracking_number"), resource, map);
        System.out.println("============DF写入ES成功！！！=================");
    }

    private static void readES(SparkSession spark, String resource) {
        Dataset<Row> resDf = EsSparkSQL.esDF(spark, resource);
        System.out.println("============esDF读取结果如下=================");
        System.out.println("es 总数：" + resDf.count());
        resDf.show();
        String esQuery = new StringBuilder()
                .append("{\n")
                .append("  \"query\": {\n")
                .append("     \"match\": {\n")
                .append("        \"mobile\":\"15685191888\"\n")
                .append("      }\n")
                .append("   }\n")
                .append("}")
                .toString();
        System.out.println(esQuery);
        Dataset<Row> resDfByQuery = EsSparkSQL.esDF(spark, resource, esQuery);
        System.out.println("====esDF根据查询条件读取结果如下（查询条件 mobile='15685191888'）====");
        resDfByQuery.show();
    }


    /**
     * DataFrame 保存数据到HBASE表，需要提前建好HBASE表，本代码示例列簇名为cf
     *
     * @param df
     * @param outTable
     * @param rowKeyColString
     */
    private static void save2HBase(Dataset<Row> df, String outTable, String rowKeyColString) {
        String columns[] = df.columns();
        Set<String> sets = new HashSet<>(Arrays.asList(columns));
        for (String rowKeyCol : rowKeyColString.split(",")) {
            sets.remove(rowKeyCol);
        }
        try {
            Configuration configuration = HBaseConfiguration.create();
            configuration.set(TableOutputFormat.OUTPUT_TABLE, outTable);
            Job job = Job.getInstance(configuration);
            job.setOutputKeyClass(ImmutableBytesWritable.class);
            job.setOutputValueClass(Result.class);
            job.setOutputFormatClass(TableOutputFormat.class);

            JavaPairRDD<ImmutableBytesWritable, Put> resultRdd = df.toJavaRDD().mapToPair(row -> {
                String rowKey = "";
                for (String rowKeyCol : rowKeyColString.split(",")) {
                    if (rowKeyCol.equals("mobile")) {
                        rowKey += new StringBuilder(row.getAs(rowKeyCol).toString()).reverse().toString();
                    } else {
                        rowKey += row.getAs(rowKeyCol).toString();
                    }
                }
                Put put = new Put(Bytes.toBytes(rowKey));

                for (String col : sets) {
                    String value = row.<String>getAs(col);
                    if (value == null) {
                        continue;
                    }
                    put.addColumn(Bytes.toBytes("cf"), Bytes.toBytes(col),
                            Bytes.toBytes(row.<String>getAs(col)));
                }
                return new Tuple2<>(new ImmutableBytesWritable(), put);
            });

            resultRdd.saveAsNewAPIHadoopDataset(job.getConfiguration());
            System.out.println("hbase 写入成功！！！");
        } catch (IOException e) {
            e.printStackTrace();
        }
    }

    private static void readHBase(SparkSession spark, String inputTable) {
        try {
            Configuration configuration = HBaseConfiguration.create();
            configuration.set(TableInputFormat.INPUT_TABLE, inputTable);

            byte[] family_bytes = Bytes.toBytes("cf");
            /*--------设置过滤条件，不设置等于select * from ---------------------*/
            Scan scan = new Scan();
            //设置要读取的列簇
            scan.addFamily(family_bytes);
            //设置要查询的列
//            scan.addColumn(Bytes.toBytes("cf"),Bytes.toBytes("express_company"));
            ClientProtos.Scan proto = ProtobufUtil.toScan(scan);
            String scanToString = new String(Base64.getEncoder().encode(proto.toByteArray()));
            configuration.set(TableInputFormat.SCAN, scanToString);
            /*-----------------------设置过滤条件---------------------*/

            JavaSparkContext jsc = JavaSparkContext.fromSparkContext(spark.sparkContext());
            // 将HBase数据转成RDD
            JavaPairRDD<ImmutableBytesWritable, Result> HBaseRdd =
                    jsc.newAPIHadoopRDD(configuration, TableInputFormat.class, ImmutableBytesWritable.class, Result.class);

            String schemaString = "dbname,ac0018,tel,org_addr,freight,totalcommodityvalue," +
                    "totalorderamount,paytime,buyerid,platform,email,refundamount,discountamount," +
                    "ordersubmissiontime,shippedqty,univalent,unitDiscountAmount,barcode,sku," +
                    "tradename_category,brand,tracking_number,express_company";


            // 再将以上结果转成Row类型RDD
            JavaRDD<Row> HBaseRow = HBaseRdd.map((Function<Tuple2<ImmutableBytesWritable, Result>, Row>) tuple2 -> {
                Result result = tuple2._2;
                ArrayList<String> columns = new ArrayList();
                String rowKey = Bytes.toString(result.getRow());
                columns.add(rowKey);
                for (String col : schemaString.split(",")) {
                    columns.add(Bytes.toString(result.getValue(family_bytes, Bytes.toBytes(col))));
                }
                return RowFactory.create(columns.toArray());

            });

            List<StructField> fields = new ArrayList<>();
            fields.add(DataTypes.createStructField("user_id", DataTypes.StringType, true));
            for (String fieldName : schemaString.split(",")) {
                StructField field = DataTypes.createStructField(fieldName, DataTypes.StringType, true);
                fields.add(field);
            }

            StructType schema = DataTypes.createStructType(fields);
            // 生成DataFrame
            Dataset<Row> HBaseDF = spark.createDataFrame(HBaseRow, schema);

            System.out.println("============Spark读取Hbase结果如下=================");
            HBaseDF.show();

        } catch (Exception e) {
            e.printStackTrace();
        }
    }

}
