package com.asap.demo.rete;


import com.asap.demo.ContextInfo;
import com.asap.demo.function.dealMapFunction;
import com.asap.demo.function.dealStreamProcessFunction;
import com.asap.demo.function.dealStreamProcessFunctionList;
import com.asap.demo.model.BeanField;
import com.asap.demo.sourcefunc.MysqlSourceFunction1;
import com.asap.demo.table.RuleParse;
import com.asap.demo.utils.Constants;
import com.asap.demo.utils.Utils;
import com.asap.rule.StandardEvent;
import com.asap.rule.orm.DbFetcher;
import com.asap.rule.util.PropTransformMap;
import com.asap.rule.util.RuleReader;
import org.apache.flink.api.common.eventtime.*;
import org.apache.flink.api.common.functions.MapFunction;
import org.apache.flink.api.common.serialization.SimpleStringSchema;
import org.apache.flink.api.common.state.MapStateDescriptor;
import org.apache.flink.api.common.time.Time;
import org.apache.flink.api.common.typeinfo.BasicTypeInfo;
import org.apache.flink.api.java.functions.KeySelector;
import org.apache.flink.api.java.tuple.Tuple2;
import org.apache.flink.api.java.typeutils.ListTypeInfo;
import org.apache.flink.configuration.Configuration;
import org.apache.flink.streaming.api.TimeCharacteristic;
import org.apache.flink.streaming.api.datastream.BroadcastStream;
import org.apache.flink.streaming.api.datastream.DataStream;
import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment;
import org.apache.flink.streaming.api.functions.AssignerWithPeriodicWatermarks;
import org.apache.flink.streaming.api.functions.ProcessFunction;
import org.apache.flink.streaming.api.watermark.Watermark;
import org.apache.flink.streaming.connectors.kafka.FlinkKafkaConsumer;
import org.apache.flink.table.api.DataTypes;
import org.apache.flink.table.api.EnvironmentSettings;
import org.apache.flink.table.api.Schema;
import org.apache.flink.table.api.Table;
import org.apache.flink.table.api.bridge.java.StreamTableEnvironment;
import org.apache.flink.table.connector.ChangelogMode;
import org.apache.flink.types.Row;
import org.apache.flink.util.Collector;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;

import javax.annotation.Nullable;
import java.sql.Timestamp;
import java.time.Duration;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.Properties;

public class ReteDemo6 {

	private static final Logger logger = LoggerFactory.getLogger(ReteDemo6.class);
	//3085654
	//kafka-run-class kafka.tools.ConsumerOffsetChecker --zookeeper 10.28.184.25:1813 --group temporal2 --topic flink_pressure_test9

	public static void main(String[] args) throws Exception {

		final StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();

		EnvironmentSettings blinkStreamSettings = EnvironmentSettings.newInstance()
				.useBlinkPlanner()
				.inStreamingMode()
				.build();
		StreamTableEnvironment blinkStreamTableEnv = StreamTableEnvironment.create(env, blinkStreamSettings);
		blinkStreamTableEnv.getConfig().setIdleStateRetentionTime(Time.minutes(60), Time.minutes(120));


		blinkStreamTableEnv.getConfig()        // access high-level configuration
				.getConfiguration()   // set low-level key-value options
				.setString("table.optimizer.distinct-agg.split.enabled", "true");  // enable distinct agg split
		// access flink configuration
		Configuration configuration = blinkStreamTableEnv.getConfig().getConfiguration();
		// set low-level key-value options
		configuration.setString("table.exec.mini-batch.enabled", "true"); // local-global aggregation depends on mini-batch is enabled
		configuration.setString("table.exec.mini-batch.allow-latency", "5s");
		configuration.setString("table.exec.mini-batch.size", "5000");
		configuration.setString("table.optimizer.agg-phase-strategy", "TWO_PHASE"); // enable two-phase, i.e. local-global aggregation
		//env.setParallelism(3);
		env.enableCheckpointing(5000);  //检查点 每5000ms
		env.setStreamTimeCharacteristic(TimeCharacteristic.EventTime);

		Properties browseProperties = getProperties();
		PropTransformMap.getInstance().readConfigMap("/home/asap/wbh/conf/cfg.properties");

		DbFetcher dbFetcher = getDbFetcher();
		RuleParse ruleParse = getRuleParse();

		logger.info("size:" + ruleParse.getTableDefine().getJsonFieldList());
		//1、读取mysql的配置消息
		DataStream<List<String>> conf = env.addSource(new MysqlSourceFunction1(dbFetcher));

		//2、创建MapStateDescriptor规则，对广播的数据的数据类型的规则
		MapStateDescriptor<String, List<String>> ruleStateDescriptor = new MapStateDescriptor<>(ContextInfo.RULE_SBROAD_CAST_STATE
				, BasicTypeInfo.STRING_TYPE_INFO
				, new ListTypeInfo<>(String.class));
		//3、对conf进行broadcast返回BroadcastStream
		final BroadcastStream<List<String>> confBroadcast = conf.broadcast(ruleStateDescriptor);
		DataStream<String> dataStream = env
				.addSource(new FlinkKafkaConsumer<>(
						"flink_pressure_test9",
						new SimpleStringSchema(),
						browseProperties
				)).setParallelism(1);
		DataStream<BeanField> kafkaData = getStandardEventDataStream(confBroadcast, dataStream)
				.map(new dealMapFunction(ruleParse)).setParallelism(6);
		Table inputTable = getTable(blinkStreamTableEnv, kafkaData);
		//blinkStreamTableEnv.createTemporaryView("InputTable", inputTable);
		//executeSql(blinkStreamTableEnv);
		executeSqlForTable(inputTable, blinkStreamTableEnv);
		env.execute("Broadcast test kafka");
	}

	private static void executeSqlForTable(Table inputTable, StreamTableEnvironment blinkStreamTableEnv) {

//		Table resultTable = blinkStreamTableEnv.sqlQuery("SELECT * FROM InputTable");
//		blinkStreamTableEnv.toRetractStream(resultTable, BeanField.class).print("query==");
//		Table resultTableIds = blinkStreamTableEnv.sqlQuery("SELECT deviceType,count(1) FROM InputTable where deviceType = 'IDS' group by TUMBLE(createTime, INTERVAL '5' MINUTE),deviceType");
//		blinkStreamTableEnv.toRetractStream(resultTableIds, Row.class).print("queryIds==");
//
//		Table resultTableTda = blinkStreamTableEnv.sqlQuery("SELECT deviceType,count(1) FROM InputTable where deviceType = 'TDA' group by TUMBLE(createTime, INTERVAL '5' MINUTE),deviceType");
//		blinkStreamTableEnv.toRetractStream(resultTableTda, Row.class).print("queryTda==");
//
//		Table resultTableIps = blinkStreamTableEnv.sqlQuery("SELECT deviceType,count(1) FROM InputTable where deviceType = 'IPS' group by TUMBLE(createTime, INTERVAL '5' MINUTE),deviceType");
//		blinkStreamTableEnv.toRetractStream(resultTableIps, Row.class).print("queryIps==");

		String query = "select count(1) from " + inputTable;
		Table query1 = blinkStreamTableEnv.sqlQuery(query);
		blinkStreamTableEnv.toRetractStream(query1, Row.class).print("count(1)==");

//		String querySQL4 = "select deviceType,snowId from TABLE(\n" +
//				"TUMBLE(TABLE InputTable, DESCRIPTOR(createTime), INTERVAL '10' MINUTES))" +
//				" where deviceType='TDA' GROUP BY window_start, window_end,deviceType,snowId having count(1)>1 ";
//		Table resultTableIps1 = blinkStreamTableEnv.sqlQuery(querySQL4);
//		blinkStreamTableEnv.createTemporaryView("resultTableIps1", resultTableIps1);
//
//		String querySQL5 = "select InputTable.* from InputTable INNER JOIN resultTableIps1 ON InputTable.snowId = resultTableIps1.snowId";
//		Table resultTableIps2 = blinkStreamTableEnv.sqlQuery(querySQL5);
//		blinkStreamTableEnv.toRetractStream(resultTableIps2, BeanField.class).print("BeanField==");
//

//		String querySQL6 = "select b.* from (select deviceType,snowId ,count(1) as cnt from TABLE(\n" +
//				"TUMBLE(TABLE InputTable, DESCRIPTOR(createTime), INTERVAL '10' MINUTES))" +
//				" where deviceType='TDA' GROUP BY window_start, window_end,deviceType,snowId having count(1)>1) a,(SELECT * FROM InputTable) b" +
//				" where a.snowId =b.snowId ";
		String querySQL6 = "select b.* from (select deviceType,snowId ,count(1) as cnt from TABLE(\n" +
				"TUMBLE(TABLE " + inputTable + ", DESCRIPTOR(createTime), INTERVAL '10' MINUTES))" +
				" where deviceType='TDA' GROUP BY window_start, window_end,deviceType,snowId having count(1)>2) a INNER JOIN " + inputTable + " b" +
				" ON a.snowId =b.snowId ";


		String querySQL7 = "select deviceType,snowId ,count(1) as cnt from TABLE(\n" +
				"TUMBLE(TABLE InputTable, DESCRIPTOR(createTime), INTERVAL '10' MINUTES))" +
				" where deviceType='TDA' GROUP BY window_start, window_end,deviceType,snowId having count(1)>2";

		String querySQL8 = "select b.* from (select deviceType,snowId ,count(1) as cnt from TABLE(\n" +
				"TUMBLE(TABLE InputTable, DESCRIPTOR(createTime), INTERVAL '10' MINUTES))" +
				" where deviceType='TDA' GROUP BY window_start, window_end,deviceType,snowId having count(1)>2) a INNER JOIN (SELECT * FROM TABLE(TUMBLE(TABLE InputTable, DESCRIPTOR(createTime), INTERVAL '10' MINUTES)) where 1=1) b" +
				" ON a.snowId =b.snowId ";

		Table resultTableIps3 = blinkStreamTableEnv.sqlQuery(querySQL6);
		blinkStreamTableEnv.toRetractStream(resultTableIps3, BeanField.class).print("BeanField==");

		//		String sql="SELECT * FROM (SELECT deviceType,count(1) FROM InputTable where deviceType = 'IDS' group by TUMBLE(createTime, INTERVAL '5' MINUTE),deviceType)) a,(SELECT deviceType,count(1) FROM InputTable where deviceType = 'TDA' group by TUMBLE(createTime, INTERVAL '5' MINUTE),deviceType) b";
//		Table resultTableIps= blinkStreamTableEnv.sqlQuery(sql);
//		blinkStreamTableEnv.toRetractStream(resultTableIps, Row.class).print("queryIps==");

//		blinkStreamTableEnv.createTemporaryView("table1", resultTableIds);
//		blinkStreamTableEnv.createTemporaryView("table2", resultTableTda);
//		blinkStreamTableEnv.createTemporaryView("table3", resultTableIps);
//		String querySQL8 = "select * from table2 ,table1,table3 where table1.srcIp=table2.srcIp and table1.dstIp=table3.dstIp";
//		Table table8 = blinkStreamTableEnv.sqlQuery(querySQL8);
//		blinkStreamTableEnv.toRetractStream(table8, Row.class).print("table8==");
	}

	private static Properties getProperties() {
		Properties browseProperties = new Properties();
		browseProperties.put("bootstrap.servers", "10.28.184.25:9093");
		browseProperties.put("group.id", "temporal2");
		browseProperties.put("auto.offset.reset", "latest");
		return browseProperties;
	}

	private static RuleParse getRuleParse() {
		RuleParse ruleParse = new RuleParse();
		ruleParse.parseData("/home/asap/wbh/conf/cfg.json");
		return ruleParse;
	}

	private static DbFetcher getDbFetcher() {
		Map<String, String> configMap = new HashMap<String, String>();
		configMap.put(Constants.DB_JDBC_USER, "root");
		configMap.put(Constants.DB_JDBC_PASSWD, "1qazXSW@3edc");
		configMap.put(Constants.DB_JDBC_URL, "jdbc:mysql://10.28.184.25:3306/SSA?useUnicode=true&characterEncoding=utf-8");
		configMap.put(Constants.DB_JDBC_DRIVER, "com.mysql.jdbc.Driver");
		configMap.put(Constants.INITAL_POOL_SIZE, "10");
		configMap.put(Constants.MIN_POOL_SIZE, "5");
		configMap.put(Constants.MAX_IDLE_TIME, "50");
		configMap.put(Constants.MAX_STATE_ELEMENTS, "100");
		configMap.put(Constants.MAX_IDLE_TIME, "60");
		return new DbFetcher(configMap);
	}

	private static void executeSql(StreamTableEnvironment blinkStreamTableEnv) {
//		Table resultTable = blinkStreamTableEnv.sqlQuery("SELECT * FROM InputTable");
//		blinkStreamTableEnv.toRetractStream(resultTable, BeanField.class).print("query==");
//		Table resultTableIds = blinkStreamTableEnv.sqlQuery("SELECT deviceType,count(1) FROM InputTable where deviceType = 'IDS' group by TUMBLE(createTime, INTERVAL '5' MINUTE),deviceType");
//		blinkStreamTableEnv.toRetractStream(resultTableIds, Row.class).print("queryIds==");
//
//		Table resultTableTda = blinkStreamTableEnv.sqlQuery("SELECT deviceType,count(1) FROM InputTable where deviceType = 'TDA' group by TUMBLE(createTime, INTERVAL '5' MINUTE),deviceType");
//		blinkStreamTableEnv.toRetractStream(resultTableTda, Row.class).print("queryTda==");
//
//		Table resultTableIps = blinkStreamTableEnv.sqlQuery("SELECT deviceType,count(1) FROM InputTable where deviceType = 'IPS' group by TUMBLE(createTime, INTERVAL '5' MINUTE),deviceType");
//		blinkStreamTableEnv.toRetractStream(resultTableIps, Row.class).print("queryIps==");

		String query = "select count(1) from InputTable";
		Table query1 = blinkStreamTableEnv.sqlQuery(query);
		blinkStreamTableEnv.toRetractStream(query1, Row.class).print("count(1)==");

//		String querySQL4 = "select deviceType,snowId from TABLE(\n" +
//				"TUMBLE(TABLE InputTable, DESCRIPTOR(createTime), INTERVAL '10' MINUTES))" +
//				" where deviceType='TDA' GROUP BY window_start, window_end,deviceType,snowId having count(1)>1 ";
//		Table resultTableIps1 = blinkStreamTableEnv.sqlQuery(querySQL4);
//		blinkStreamTableEnv.createTemporaryView("resultTableIps1", resultTableIps1);
//
//		String querySQL5 = "select InputTable.* from InputTable INNER JOIN resultTableIps1 ON InputTable.snowId = resultTableIps1.snowId";
//		Table resultTableIps2 = blinkStreamTableEnv.sqlQuery(querySQL5);
//		blinkStreamTableEnv.toRetractStream(resultTableIps2, BeanField.class).print("BeanField==");
//

//		String querySQL6 = "select b.* from (select deviceType,snowId ,count(1) as cnt from TABLE(\n" +
//				"TUMBLE(TABLE InputTable, DESCRIPTOR(createTime), INTERVAL '10' MINUTES))" +
//				" where deviceType='TDA' GROUP BY window_start, window_end,deviceType,snowId having count(1)>1) a,(SELECT * FROM InputTable) b" +
//				" where a.snowId =b.snowId ";
		String querySQL6 = "select b.* from (select deviceType,snowId ,count(1) as cnt from TABLE(\n" +
				"TUMBLE(TABLE InputTable, DESCRIPTOR(createTime), INTERVAL '10' MINUTES))" +
				" where deviceType='TDA' GROUP BY window_start, window_end,deviceType,snowId having count(1)>2) a INNER JOIN (SELECT * FROM InputTable) b" +
				" ON a.snowId =b.snowId ";


		String querySQL7 = "select deviceType,snowId ,count(1) as cnt from TABLE(\n" +
				"TUMBLE(TABLE InputTable, DESCRIPTOR(createTime), INTERVAL '10' MINUTES))" +
				" where deviceType='TDA' GROUP BY window_start, window_end,deviceType,snowId having count(1)>2";

		String querySQL8 = "select b.* from (select deviceType,snowId ,count(1) as cnt from TABLE(\n" +
				"TUMBLE(TABLE InputTable, DESCRIPTOR(createTime), INTERVAL '10' MINUTES))" +
				" where deviceType='TDA' GROUP BY window_start, window_end,deviceType,snowId having count(1)>2) a INNER JOIN (SELECT * FROM TABLE(TUMBLE(TABLE InputTable, DESCRIPTOR(createTime), INTERVAL '10' MINUTES)) where 1=1) b" +
				" ON a.snowId =b.snowId ";

		Table resultTableIps3 = blinkStreamTableEnv.sqlQuery(querySQL8);
		blinkStreamTableEnv.toRetractStream(resultTableIps3, Row.class).print("BeanField==");

		//		String sql="SELECT * FROM (SELECT deviceType,count(1) FROM InputTable where deviceType = 'IDS' group by TUMBLE(createTime, INTERVAL '5' MINUTE),deviceType)) a,(SELECT deviceType,count(1) FROM InputTable where deviceType = 'TDA' group by TUMBLE(createTime, INTERVAL '5' MINUTE),deviceType) b";
//		Table resultTableIps= blinkStreamTableEnv.sqlQuery(sql);
//		blinkStreamTableEnv.toRetractStream(resultTableIps, Row.class).print("queryIps==");

//		blinkStreamTableEnv.createTemporaryView("table1", resultTableIds);
//		blinkStreamTableEnv.createTemporaryView("table2", resultTableTda);
//		blinkStreamTableEnv.createTemporaryView("table3", resultTableIps);
//		String querySQL8 = "select * from table2 ,table1,table3 where table1.srcIp=table2.srcIp and table1.dstIp=table3.dstIp";
//		Table table8 = blinkStreamTableEnv.sqlQuery(querySQL8);
//		blinkStreamTableEnv.toRetractStream(table8, Row.class).print("table8==");
	}

	private static DataStream<StandardEvent> getStandardEventDataStream(BroadcastStream<List<String>> confBroadcast, DataStream<String> dataStream) {

		DataStream<StandardEvent> dataStreamInfo = dataStream.map(new MapFunction<String, StandardEvent>() {
			@Override
			public StandardEvent map(String value) throws Exception {
				StandardEvent standardEvent = StandardEvent.parse(value);
				return standardEvent;
			}
		}).setParallelism(2);
		WatermarkStrategy<StandardEvent> strategy = WatermarkStrategy
				.<StandardEvent>forBoundedOutOfOrderness(Duration.ofSeconds(20))
				.withTimestampAssigner((standardEvent, timestamp) -> Timestamp.valueOf(Utils.transforDate(standardEvent.getField("CREATE_TIME"))).getTime());

		DataStream<StandardEvent> withTimestampsAndWatermarks =
				dataStreamInfo.assignTimestampsAndWatermarks(strategy).setParallelism(3);

		return withTimestampsAndWatermarks
//				.assignTimestampsAndWatermarks(
//				new AssignerWithPeriodicWatermarks<StandardEvent>() {
//					Long currentMaxTimestamp = 0L;
//					Long maxDelayTime = 5000L;
//
//					@Override
//					public long extractTimestamp(StandardEvent s, long l) {
//						currentMaxTimestamp = Timestamp.valueOf(Utils.transforDate(s.getField("CREATE_TIME"))).getTime();
//						return currentMaxTimestamp;
//					}
//
//					@Nullable
//					@Override
//					public Watermark getCurrentWatermark() {
//						long time = currentMaxTimestamp - maxDelayTime;
//						//logger.info("getCurrentWatermark.............."+time);
//						return new Watermark(time);
//					}
//				}
//		)
				.keyBy(new KeySelector<StandardEvent, String>() {
					@Override
					public String getKey(StandardEvent event) throws Exception {
						StringBuilder sb = new StringBuilder();
						sb.append(event.getField("DEVICE_PARENT_TYPE"))
								.append(event.getField("SNOW_ID"));
						return sb.toString();
					}
				})
				.connect(confBroadcast)
				.process(
						new dealStreamProcessFunctionList()
				).setParallelism(4);
	}

	private static Table getTable(StreamTableEnvironment blinkStreamTableEnv, DataStream<BeanField> kafkaData1) {
		return blinkStreamTableEnv.fromDataStream(kafkaData1,
				getBuildSchema());
	}

	private static Schema getBuildSchema() {
		return Schema.newBuilder()
				.column("ruleId", DataTypes.STRING().notNull())
				.column("snowId", DataTypes.STRING().notNull())
				.column("account", "STRING")
				.column("action", "INTEGER")
				.column("actionDesc", "STRING")
				.column("assetIp", "STRING")
				.column("attackStage", "STRING")
				.column("ausIndex", "STRING")
				.column("averageByteFlow", "INTEGER")
				.column("averagePackageFlow", "INTEGER")
				.column("baseline", "STRING")
				.column("bizId", "STRING")
				.column("bizName", "STRING")
				.column("confidence", "INTEGER")
				.column("createTime", DataTypes.TIMESTAMP_LTZ(3).notNull())
				.column("databaseName", "STRING")
				.column("dataType", "STRING")
				.column("deviceIp", "STRING")
				.column("deviceName", "STRING")
				.column("deviceParentType", "STRING")
				.column("deviceType", "STRING")
				.column("direction", "STRING")
				.column("directionDesc", "STRING")
				.column("downBaseLineFlow", "INTEGER")
				.column("downFlowTotal", "INTEGER")
				.column("dstAssetId", "INTEGER")
				.column("dstAssetGroup", "STRING")
				.column("dstAssetKey", "INTEGER")
				.column("dstAssetName", "STRING")
				.column("dstAssetPublic", "INTEGER")
				.column("dstAssetStatus", "INTEGER")
				.column("dstAssetSubType", "STRING")
				.column("dstAssetType", "INTEGER")
				.column("dstBizId", "STRING")
				.column("dstCity", "STRING")
				.column("dstCountry", "STRING")
				.column("dstDomainName", "STRING")
				.column("dstIntelDesc", "STRING")
				.column("dstIntelId", "STRING")
				.column("dstIntelType", "INTEGER")
				.column("dstIp", "STRING")
				.column("dstLatitude", "STRING")
				.column("dstLongitude", "STRING")
				.column("dstOrgId", "STRING")
				.column("dstOrgName", "STRING")
				.column("dstPort", "STRING")
				.column("dstPost", "STRING")
				.column("dstProvince", "STRING")
				.column("dstSubDomainName", "STRING")
				.column("eventName", "STRING")
				.column("eventOneType", "INTEGER")
				.column("eventThreeType", "INTEGER")
				.column("eventTwoType", "INTEGER")
				.column("eventThreeTypeDesc", "STRING")
				.column("eventOneTypeDesc", "STRING")
				.column("eventTwoTypeDesc", "STRING")
				.column("eventType", "STRING")
				.column("extAttr", "STRING")
				.column("fileHash", "STRING")
				.column("fileName", "STRING")
				.column("filePath", "STRING")
				.column("idCard", "STRING")
				.column("infectionFile", "STRING")
				.column("insertTime", "TIMESTAMP_LTZ(3)")
				.column("installNum", "INTEGER")
				.column("intelId", "STRING")
				.column("intelType", "STRING")
				.column("logSubType", "STRING")
				.column("logType", "STRING")
				.column("mailFileSize", "STRING")
				.column("mailNum", "INTEGER")
				.column("mailProtocol", "STRING")
				.column("mailRecipient", "STRING")
				.column("mailSender", "STRING")
				.column("mailType", "STRING")
				.column("mailTypeDesc", "STRING")
				.column("mainAccount", "STRING")
				.column("malwareName", "STRING")
				.column("malwareSubType", "STRING")
				.column("malwareType", "STRING")
				.column("mobile", "STRING")
				.column("msg", "STRING")
				.column("name", "STRING")
				.column("operateContent", "STRING")
				.column("operateType", "STRING")
				.column("orgId", "INTEGER")
				.column("orgPath", "STRING")
				.column("password", "STRING")
				.column("payload", "STRING")
				.column("pcap", "STRING")
				.column("peakByteFlow", "INTEGER")
				.column("peakPackageFlow", "INTEGER")
				.column("percentBaseLineFlow", "STRING")
				.column("percentFlowTotal", "STRING")
				.column("policy", "STRING")
				.column("position", "STRING")
				.column("protocol", "STRING")
				.column("rawMsg", "STRING")
				.column("registerNum", "INTEGER")
				.column("responseCode", "STRING")
				.column("result", "INTEGER")
				.column("riskLevel", "INTEGER")
				.column("riskLevelDesc", "STRING")
				.column("sceneId", "INTEGER")
				.column("source", "INTEGER")
				.column("sourceEventThreeType", "INTEGER")
				.column("sourceEventThreeTypeDesc", "STRING")
				.column("sourceType", "INTEGER")
				.column("srcAssetId", "INTEGER")
				.column("srcAssetGroup", "STRING")
				.column("srcAssetKey", "INTEGER")
				.column("srcAssetName", "STRING")
				.column("srcAssetPublic", "INTEGER")
				.column("srcAssetStatus", "INTEGER")
				.column("srcAssetSubType", "STRING")
				.column("srcAssetType", "STRING")
				.column("srcBizId", "STRING")
				.column("srcCity", "STRING")
				.column("srcCountry", "STRING")
				.column("srcDomainName", "STRING")
				.column("srcDomain2Name", "STRING")
				.column("srcIntelDesc", "STRING")
				.column("srcIntelId", "STRING")
				.column("srcIntelType", "INTEGER")
				.column("srcIp", "STRING")
				.column("srcLatitude", "STRING")
				.column("srcLongitude", "STRING")
				.column("srcOrgId", "STRING")
				.column("srcOrgName", "STRING")
				.column("srcPort", "STRING")
				.column("srcPost", "STRING")
				.column("srcProvince", "STRING")
				.column("srcSubDomainName", "STRING")
				.column("subAccount", "STRING")
				.column("tabelName", "STRING")
				.column("tags", "STRING")
				.column("tenantId", "INTEGER")
				.column("terminalNum", "INTEGER")
				.column("threatType", "STRING")
				.column("totalByteFlow", "INTEGER")
				.column("totalPackageFlow", "INTEGER")
				.column("type", "STRING")
				.column("upBaseLineFlow", "INTEGER")
				.column("upFlowTotal", "INTEGER")
				.column("url", "STRING")
				.column("user", "STRING")
				.column("userName", "STRING")
				.column("userOrgName", "STRING")
				.column("userType", "STRING")
				.column("vulnId", "STRING")
				.column("vulnInfo", "STRING")
				.column("vulnLevel", "STRING")
				.column("alertSignatureIdL", "STRING")
				.column("vulnType", "STRING")
				.column("alertInfo", "STRING")
				.column("domain", "STRING")
				.column("flag", "STRING")
				.column("flow", "INTEGER")
				.column("flowUp", "INTEGER")
				.column("flowDown", "INTEGER")
				.column("command", "STRING")
				.primaryKey("snowId", "createTime")
				.watermark("createTime", "SOURCE_WATERMARK()")
				.build();
	}

}
