/*
 * Licensed to the Apache Software Foundation (ASF) under one or more
 * contributor license agreements.  See the NOTICE file distributed with
 * this work for additional information regarding copyright ownership.
 * The ASF licenses this file to You under the Apache License, Version 2.0
 * (the "License"); you may not use this file except in compliance with
 * the License.  You may obtain a copy of the License at
 *
 *    http://www.apache.org/licenses/LICENSE-2.0
 *
 * Unless required by applicable law or agreed to in writing, software
 * distributed under the License is distributed on an "AS IS" BASIS,
 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 * See the License for the specific language governing permissions and
 * limitations under the License.
 */

package com.lee;

import com.alibaba.dcm.DnsCacheManipulator;
import org.apache.flink.api.common.functions.FlatMapFunction;
import org.apache.flink.api.common.io.OutputFormat;
import org.apache.flink.api.java.tuple.Tuple2;
import org.apache.flink.configuration.Configuration;
import org.apache.flink.streaming.api.datastream.DataStream;
import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment;
import org.apache.flink.streaming.api.functions.source.SourceFunction;

import org.apache.flink.streaming.connectors.kafka.FlinkKafkaConsumer010;
import org.apache.flink.streaming.util.serialization.SimpleStringSchema;
import org.apache.flink.util.Collector;
import org.apache.hadoop.hbase.HBaseConfiguration;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.client.*;
import org.apache.hadoop.hbase.util.Bytes;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;

import java.io.IOException;
import java.util.Date;
import java.util.Properties;

/**
 * This is an example how to write streams into HBase. In this example the
 * stream will be written into a local Hbase but it is possible to adapt this
 * example for an HBase running in a cloud. You need a running local HBase with a
 * table "flinkExample" and a column "entry". If your HBase configuration does
 * not fit the hbase-site.xml in the resource folder then you gave to delete temporary this
 * hbase-site.xml to execute the example properly.
 */
public class PcapETL {
    final static Logger logger = LoggerFactory.getLogger(PcapETL.class);
    public static void main(String[] args) throws Exception {



        // set up the execution environment
        final StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();

        Properties properties = new Properties();

        properties.setProperty("bootstrap.servers", "10.36.8.128:9092");
        properties.setProperty("auto.offset.reset", "earliest");
        properties.setProperty("fetch.max.bytes", "1048576000");
        properties.setProperty("enable.auto.commit", "False");

        FlinkKafkaConsumer010<String> consumer = new FlinkKafkaConsumer010<String>("pcap_format3", new SimpleStringSchema(), properties);
        consumer.setStartFromEarliest();

        DataStream<String[]> stream = env
                .addSource(consumer)
                .flatMap(new FlatMapFunction<String, String[]>() {
                    @Override
                    public void flatMap(String value, Collector<String[]> out)
                            throws Exception {
                        out.collect(value.split("\\|"));
                    }
                });

//        stream.print();

        stream.writeUsingOutputFormat(new HBaseOutputFormat());

        env.execute();
    }


    /**
     * This class implements an OutputFormat for HBase.
     */
    private static class HBaseOutputFormat implements OutputFormat<String[]> {
        private String[] indexs = {"length", "time",
                "SrcMAC", "DstMAC", "NetworkProtocol",
                "IHL", "TOS", "id", "Flags", "FragOffset", "TTL", "SrcIP", "DstIP", "TransportProtol",
                "SrcPort", "DstPort", "Seq", "Ack", "FIN", "SYN", "RST", "PSH", "ACK", "URG", "ECE", "CWR", "NS", "Window", "CheckSum"};

        private org.apache.hadoop.conf.Configuration conf = null;
        private Table table = null;
        private String taskNumber = null;
        private int rowNumber = 0;
        private long time = new Date().getTime();

        private static final long serialVersionUID = 1L;

        @Override
        public void configure(Configuration parameters) {

            conf = HBaseConfiguration.create();
            conf.set("hbase.zookeeper.property.clientport", "2181");
            conf.set("hbase.zookeeper.quorum", "10.36.8.138:2181,10.36.8.139:2181,10.36.8.140:2181");
            DnsCacheManipulator.setDnsCache("hadoop-1", "10.36.8.138");
            DnsCacheManipulator.setDnsCache("hadoop-2", "10.36.8.139");
            DnsCacheManipulator.setDnsCache("hadoop-3", "10.36.8.140");
        }

        @Override
        public void open(int taskNumber, int numTasks) throws IOException {
            Connection connection = ConnectionFactory.createConnection(conf);

            TableName tableName = TableName.valueOf("pcap_attack");
            table = connection.getTable(tableName);

            this.taskNumber = String.valueOf(taskNumber);
        }

        @Override
        public void writeRecord(String[] records) throws IOException {
//            logger.info(records.length+"");
            if (records.length<29){
                logger.warn("too short!");
                return;
            }
            Put put = new Put(Bytes.toBytes(taskNumber +time+rowNumber));
            for(int i=0;i<29;i++){
                if(i<2){
                    put.add(Bytes.toBytes("origin"), Bytes.toBytes(indexs[i]),
                            Bytes.toBytes(records[i]));
                }
                else if(i<5){
                    put.add(Bytes.toBytes("ethernet"), Bytes.toBytes(indexs[i]),
                            Bytes.toBytes(records[i]));
                }else if(i<14){
                    put.add(Bytes.toBytes("ipv4"), Bytes.toBytes(indexs[i]),
                            Bytes.toBytes(records[i]));
                }else {
                    put.add(Bytes.toBytes("tcp"), Bytes.toBytes(indexs[i]),
                            Bytes.toBytes(records[i]));
                }
            }
            put.add(Bytes.toBytes("NormalOrAttack"), Bytes.toBytes("type"),
                    Bytes.toBytes("Attack"));
            rowNumber++;
            table.put(put);
        }

        @Override
        public void close() throws IOException {
            table.close();
        }

    }
}