package rete;


import alink.AlsRateRecommStreamOpTest;
import com.asap.demo.ContextInfo;
import com.asap.demo.function.dealMapFunction;
import com.asap.demo.function.dealStreamProcessFunction;
import com.asap.demo.model.BeanField;
import com.asap.demo.sourcefunc.MysqlSourceFunction1;
import com.asap.demo.table.RuleParse;
import com.asap.demo.utils.Constants;
import com.asap.demo.utils.Utils;
import com.asap.rule.StandardEvent;
import com.asap.rule.orm.DbFetcher;
import com.asap.rule.util.PropTransformMap;
import com.asap.rule.util.RuleReader;
import org.apache.flink.api.common.functions.MapFunction;
import org.apache.flink.api.common.state.MapStateDescriptor;
import org.apache.flink.api.common.typeinfo.BasicTypeInfo;
import org.apache.flink.api.java.functions.KeySelector;
import org.apache.flink.api.java.typeutils.ListTypeInfo;
import org.apache.flink.runtime.testutils.MiniClusterResourceConfiguration;
import org.apache.flink.streaming.api.TimeCharacteristic;
import org.apache.flink.streaming.api.datastream.BroadcastStream;
import org.apache.flink.streaming.api.datastream.DataStream;
import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment;
import org.apache.flink.streaming.api.functions.AssignerWithPeriodicWatermarks;
import org.apache.flink.streaming.api.watermark.Watermark;
import org.apache.flink.table.api.EnvironmentSettings;
import org.apache.flink.table.api.Schema;
import org.apache.flink.table.api.Table;
import org.apache.flink.table.api.bridge.java.StreamTableEnvironment;
import org.apache.flink.test.util.MiniClusterWithClientResource;
import org.apache.flink.types.Row;
import org.junit.ClassRule;
import org.junit.Test;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;

import javax.annotation.Nullable;
import java.sql.Timestamp;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.Properties;

public class ReteDemo5 {

	private static final Logger logger = LoggerFactory.getLogger(AlsRateRecommStreamOpTest.class);

	@ClassRule
	public static MiniClusterWithClientResource flinkCluster =
			new MiniClusterWithClientResource(
					new MiniClusterResourceConfiguration.Builder()
							.setNumberSlotsPerTaskManager(3)
							.setNumberTaskManagers(2)
							.build());
	@Test
	public void test() throws Exception {

		final StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();

		EnvironmentSettings blinkStreamSettings = EnvironmentSettings.newInstance()
				.useBlinkPlanner()
				.inStreamingMode()
				.build();
		StreamTableEnvironment blinkStreamTableEnv = StreamTableEnvironment.create(env, blinkStreamSettings);

		env.setParallelism(1);
		env.enableCheckpointing(5000);  //检查点 每5000ms
		env.setStreamTimeCharacteristic(TimeCharacteristic.EventTime);
		String str=Utils.read("./conf/createTable.cfg");
		System.out.println(str);

		//读取createTable
		blinkStreamTableEnv.executeSql(str);
		Table table = blinkStreamTableEnv.from("asap_superset");

		DataStream<Row> dataStream = blinkStreamTableEnv.toDataStream(table);

		RuleParse ruleParse = new RuleParse();
		Map properties = new HashMap();
		ruleParse.parseData("/home/asap/wbh/conf/cfg.json");
		logger.info("1111size:" + ruleParse.getTableDefine().getJsonFieldList());

		DataStream<StandardEvent> kafkaData = dataStream
				.map(new MapFunction<Row, StandardEvent>() {
					@Override
					public StandardEvent map(Row value) throws Exception {
						StandardEvent standardEvent = StandardEvent.parse(value.toString());
						return standardEvent;
					}
				})
				.assignTimestampsAndWatermarks(
						new AssignerWithPeriodicWatermarks<StandardEvent>() {
							Long currentMaxTimestamp = 0L;
							Long maxDelayTime = 5000L;

							@Override
							public long extractTimestamp(StandardEvent s, long l) {
								currentMaxTimestamp = Timestamp.valueOf(Utils.transforDate(s.getField("CREATE_TIME"))).getTime();
								return currentMaxTimestamp;
							}

							@Nullable
							@Override
							public Watermark getCurrentWatermark() {
								long time = currentMaxTimestamp - maxDelayTime;
								//logger.info("getCurrentWatermark.............."+time);
								return new Watermark(time);
							}
						}
				)
				.keyBy(new KeySelector<StandardEvent, Object>() {
					@Override
					public Object getKey(StandardEvent event) throws Exception {
						return 1;
					}
				})
				;
		DataStream<BeanField> kafkaData1 = kafkaData.map(new dealMapFunction(ruleParse));
		env.execute("Broadcast test kafka");
	}
}
