/*
 * Licensed to the Apache Software Foundation (ASF) under one or more
 * contributor license agreements.  See the NOTICE file distributed with
 * this work for additional information regarding copyright ownership.
 * The ASF licenses this file to You under the Apache License, Version 2.0
 * (the "License"); you may not use this file except in compliance with
 * the License.  You may obtain a copy of the License at
 *
 *    http://www.apache.org/licenses/LICENSE-2.0
 *
 * Unless required by applicable law or agreed to in writing, software
 * distributed under the License is distributed on an "AS IS" BASIS,
 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 * See the License for the specific language governing permissions and
 * limitations under the License.
 */

package com.lee;

import akka.io.Tcp;
import com.alibaba.dcm.DnsCacheManipulator;
import org.apache.flink.api.common.functions.FlatMapFunction;
import org.apache.flink.api.common.functions.MapFunction;
import org.apache.flink.api.common.io.OutputFormat;
import org.apache.flink.api.java.functions.KeySelector;
import org.apache.flink.api.java.tuple.Tuple2;
import org.apache.flink.configuration.Configuration;
import org.apache.flink.streaming.api.datastream.DataStream;
import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment;
import org.apache.flink.streaming.api.functions.source.SourceFunction;

import org.apache.flink.streaming.api.functions.windowing.WindowFunction;
import org.apache.flink.streaming.api.windowing.assigners.EventTimeSessionWindows;
import org.apache.flink.streaming.api.windowing.assigners.ProcessingTimeSessionWindows;
import org.apache.flink.streaming.api.windowing.time.Time;
import org.apache.flink.streaming.api.windowing.triggers.CountTrigger;
import org.apache.flink.streaming.api.windowing.windows.TimeWindow;
import org.apache.flink.streaming.api.windowing.windows.Window;
import org.apache.flink.streaming.connectors.kafka.FlinkKafkaConsumer010;
import org.apache.flink.streaming.util.serialization.SimpleStringSchema;
import org.apache.flink.util.Collector;
import org.apache.hadoop.hbase.HBaseConfiguration;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.client.*;
import org.apache.hadoop.hbase.util.Bytes;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;

import java.io.IOException;
import java.util.Date;
import java.util.Properties;
import java.util.regex.Matcher;
import java.util.regex.Pattern;

/**
 * This is an example how to write streams into HBase. In this example the
 * stream will be written into a local Hbase but it is possible to adapt this
 * example for an HBase running in a cloud. You need a running local HBase with a
 * table "flinkExample" and a column "entry". If your HBase configuration does
 * not fit the hbase-site.xml in the resource folder then you gave to delete temporary this
 * hbase-site.xml to execute the example properly.
 */
public class Main {
    final static Logger logger = LoggerFactory.getLogger(Main.class);

    public static void main(String[] args) throws Exception {

        // set up the execution environment
        final StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();

        Properties properties = new Properties();

        properties.setProperty("bootstrap.servers", "10.36.8.128:9092");
        properties.setProperty("auto.offset.reset", "earliest");
        properties.setProperty("fetch.max.bytes", "1048576000");
        properties.setProperty("enable.auto.commit", "False");


        AvroDeserializationSchema<TCP> avroSchema = new AvroDeserializationSchema<>(TCP.class);

        FlinkKafkaConsumer010<TCP> consumer = new FlinkKafkaConsumer010<>("avroTest", avroSchema, properties);
        consumer.setStartFromEarliest();

        DataStream<SessionResult> stream = env
                .addSource(consumer)
                .keyBy(new KeySelector<TCP, String>() {
                    @Override
                    public String getKey(TCP tcp) throws Exception {
                        String srcUrl = tcp.getSrcIP().toString() + Integer.toString(tcp.getSrcPort());
                        String dstUrl = tcp.getDstIP().toString() + Integer.toString(tcp.getDstPort());
                        String key = srcUrl.compareTo(dstUrl) > 0 ? srcUrl + dstUrl : dstUrl + srcUrl;
//                        logger.info("key=" + key);
                        return key;
                    }
                })
//                .timeWindow(Time.seconds(5))
                .window(ProcessingTimeSessionWindows.withGap(Time.seconds(5)))
                .trigger(SessionDataTrigger.New())
                .apply(new SessionWindowFunction());

        stream.print();

        env.execute("Flink avro kafka test!");
    }

    /*
    *  处理数据的WindowFunction
    * */
    public static class SessionWindowFunction implements WindowFunction<TCP,SessionResult,String,TimeWindow>{
        @Override
        public void apply(String key, TimeWindow timeWindow, Iterable<TCP> iterable, Collector<SessionResult> collector) throws Exception {
            SessionResult sr = new SessionResult();
            TCP tcp = iterable.iterator().next();
            if(!isInner(tcp.getDstIP().toString())){
                sr.setSrcIP(tcp.getSrcIP().toString());
                sr.setSrcPort(tcp.getSrcPort());
                sr.setDstIP(tcp.getDstIP().toString());
                sr.setDstPort(tcp.getDstPort());
                sr.setStartTime(tcp.getTime());
            }else {
                sr.setSrcIP(tcp.getDstIP().toString());
                sr.setSrcPort(tcp.getDstPort());
                sr.setDstIP(tcp.getSrcIP().toString());
                sr.setDstPort(tcp.getSrcPort());
                sr.setStartTime(tcp.getTime());
            }
            for(TCP t: iterable){
                if(t.getSrcIP().toString().compareTo(sr.getSrcIP())==0){//说明是发给外网的包
                    sr.setSendLen(sr.getSendLen()+t.getLength());
                }else if(t.getDstIP().toString().compareTo(sr.getSrcIP())==0){//说明是接受到外网的包
                    sr.setRecLen(sr.getSendLen()+t.getLength());
                }else{//说明不是这个会话的数据，前面计算出错了
                    logger.error("你的窗口中出现了不属于该窗口的数据包，请尽快处理。");
                }
                sr.setEndTime(t.getTime());

            }
            collector.collect(sr);
        }
    }

    /*
    * 判断是否是内网IP
    * */
    public  static boolean isInner(String ip)
    {
        String reg = "(10|172|192)\\.([0-1][0-9]{0,2}|[2][0-5]{0,2}|[3-9][0-9]{0,1})\\.([0-1][0-9]{0,2}|[2][0-5]{0,2}|[3-9][0-9]{0,1})\\.([0-1][0-9]{0,2}|[2][0-5]{0,2}|[3-9][0-9]{0,1})";//正则表达式=。 =、懒得做文字处理了、
        Pattern p = Pattern.compile(reg);
        Matcher matcher = p.matcher(ip);
        return matcher.find();
    }


    /**
     * This class implements an OutputFormat for HBase.
     */
    private static class HBaseOutputFormat implements OutputFormat<String[]> {
        private String[] indexs = {"length", "time",
                "SrcMAC", "DstMAC", "NetworkProtocol",
                "IHL", "TOS", "id", "Flags", "FragOffset", "TTL", "SrcIP", "DstIP", "TransportProtol",
                "SrcPort", "DstPort", "Seq", "Ack", "FIN", "SYN", "RST", "PSH", "ACK", "URG", "ECE", "CWR", "NS", "Window", "CheckSum"};

        private org.apache.hadoop.conf.Configuration conf = null;
        private Table table = null;
        private String taskNumber = null;
        private int rowNumber = 0;
        private long time = new Date().getTime();

        private static final long serialVersionUID = 1L;

        @Override
        public void configure(Configuration parameters) {

            conf = HBaseConfiguration.create();
            conf.set("hbase.zookeeper.property.clientport", "2181");
            conf.set("hbase.zookeeper.quorum", "10.36.8.138:2181,10.36.8.139:2181,10.36.8.140:2181");
            DnsCacheManipulator.setDnsCache("hadoop-1", "10.36.8.138");
            DnsCacheManipulator.setDnsCache("hadoop-2", "10.36.8.139");
            DnsCacheManipulator.setDnsCache("hadoop-3", "10.36.8.140");
        }

        @Override
        public void open(int taskNumber, int numTasks) throws IOException {
            Connection connection = ConnectionFactory.createConnection(conf);

            TableName tableName = TableName.valueOf("pcap_attack");
            table = connection.getTable(tableName);

            this.taskNumber = String.valueOf(taskNumber);
        }

        @Override
        public void writeRecord(String[] records) throws IOException {
//            logger.info(records.length+"");
            if (records.length < 29) {
                logger.warn("too short!");
                return;
            }
            Put put = new Put(Bytes.toBytes(taskNumber + time + rowNumber));
            for (int i = 0; i < 29; i++) {
                if (i < 2) {
                    put.add(Bytes.toBytes("origin"), Bytes.toBytes(indexs[i]),
                            Bytes.toBytes(records[i]));
                } else if (i < 5) {
                    put.add(Bytes.toBytes("ethernet"), Bytes.toBytes(indexs[i]),
                            Bytes.toBytes(records[i]));
                } else if (i < 14) {
                    put.add(Bytes.toBytes("ipv4"), Bytes.toBytes(indexs[i]),
                            Bytes.toBytes(records[i]));
                } else {
                    put.add(Bytes.toBytes("tcp"), Bytes.toBytes(indexs[i]),
                            Bytes.toBytes(records[i]));
                }
            }
            put.add(Bytes.toBytes("NormalOrAttack"), Bytes.toBytes("type"),
                    Bytes.toBytes("Attack"));
            rowNumber++;
            table.put(put);
        }

        @Override
        public void close() throws IOException {
            table.close();
        }

    }
}