package cloudy.trident;

import cloudy.trident.tools.OrderSplit;
import cloudy.trident.tools.Print;
import cloudy.trident.tools.Split;
import cloudy.trident.tools.SplitBy;
import kafka.examples.KafkaProperties;
import org.apache.storm.Config;
import org.apache.storm.LocalCluster;
import org.apache.storm.LocalDRPC;
import org.apache.storm.StormSubmitter;
import org.apache.storm.generated.AlreadyAliveException;
import org.apache.storm.generated.AuthorizationException;
import org.apache.storm.generated.InvalidTopologyException;
import org.apache.storm.generated.StormTopology;
import org.apache.storm.kafka.BrokerHosts;
import org.apache.storm.kafka.SpoutConfig;
import org.apache.storm.kafka.StringScheme;
import org.apache.storm.kafka.ZkHosts;
import org.apache.storm.kafka.trident.TransactionalTridentKafkaSpout;
import org.apache.storm.kafka.trident.TridentKafkaConfig;
import org.apache.storm.spout.SchemeAsMultiScheme;
import org.apache.storm.trident.TridentState;
import org.apache.storm.trident.TridentTopology;
import org.apache.storm.trident.operation.builtin.Count;
import org.apache.storm.trident.operation.builtin.FirstN;
import org.apache.storm.trident.operation.builtin.MapGet;
import org.apache.storm.trident.operation.builtin.Sum;
import org.apache.storm.trident.testing.MemoryMapState;
import org.apache.storm.tuple.Fields;
import sun.plugin.com.Utils;

/**
 * @Name cloudy.trident.TridentTopo
 * @Description
 * @Author Elwyn
 * @Version 2017/9/30
 * @Copyright 上海云辰信息科技有限公司
 **/
public class TridentTopo {
	public static StormTopology build(LocalDRPC localDRPC) {
		BrokerHosts zkHosts = new ZkHosts("192.168.241.18");
		String topic = KafkaProperties.ORDER_TOPIC;
		TridentKafkaConfig tridentKafkaConfig = new TridentKafkaConfig(zkHosts, topic,"test");
		/*tridentKafkaConfig.ignoreZkOffsets = true;//生成环境必须false
		tridentKafkaConfig.startOffsetTime=-1L;*/
		tridentKafkaConfig.fetchSizeBytes = 100;//不能太小
		tridentKafkaConfig.scheme = new SchemeAsMultiScheme(new StringScheme());
		TransactionalTridentKafkaSpout spout = new TransactionalTridentKafkaSpout(tridentKafkaConfig);
		TridentTopology tridentTopology = new TridentTopology();
		//销售额
		TridentState tridentState = tridentTopology.newStream("spout", spout)
				.parallelismHint(1)
				.each(new Fields(StringScheme.STRING_SCHEME_KEY), new OrderSplit("\\t"), new Fields("order_id", "order_amt", "create_date", "province_id"))
				.shuffle()
				.groupBy(new Fields("create_date", "province_id"))
				.persistentAggregate(new MemoryMapState.Factory(), new Fields("order_amt"), new Sum(), new Fields("sum_amt"));
		tridentTopology.newDRPCStream("getOrderAmt", localDRPC)
				.each(new Fields("args"), new Split(" "), new Fields("arg"))
				.each(new Fields("arg"), new SplitBy("\\:"), new Fields("create_date", "province_id"))
				.groupBy(new Fields("create_date", "province_id"))
				.stateQuery(tridentState, new Fields("create_date", "province_id"), new MapGet(), new Fields("sum_amt"))
			.applyAssembly(new FirstN(5,"sum_amt",true))
		;

//订单数
		TridentState orderState = tridentTopology.newStream("orderSpout", spout)
				.parallelismHint(1)
				.each(new Fields(StringScheme.STRING_SCHEME_KEY), new OrderSplit("\\t"), new Fields("order_id", "order_amt", "create_date", "province_id"))
				.groupBy(new Fields("create_date", "province_id"))
				.persistentAggregate(new MemoryMapState.Factory(), new Fields("order_id"), new Count(), new Fields("order_num"));


		tridentTopology.newDRPCStream("getOrderNum", localDRPC)
				.each(new Fields("args"), new Split(" "), new Fields("arg"))
				.each(new Fields("arg"), new SplitBy("\\:"), new Fields("create_date", "province_id"))
				.groupBy(new Fields("create_date", "province_id"))
				.stateQuery(orderState, new Fields("create_date", "province_id"), new MapGet(), new Fields("order_num"))
		.applyAssembly(new FirstN(5,"order_num",true))
		;


		return tridentTopology.build();
	}


	public static void main(String[] args) {
		Config config = new Config();
		config.setDebug(false);
		LocalDRPC localDRPC = new LocalDRPC();
		LocalCluster localCluster = new LocalCluster();
		config.setNumWorkers(10);




		localCluster.submitTopology("mytopology", config, build(localDRPC));
		while (true) {
			String getOrderAmt = localDRPC.execute("getOrderAmt", "2017-09-30:1 2017-09-30:2 2017-09-30:3 2017-09-30:4 2017-09-30:5 2017-09-30:6 2017-09-30:7 2017-09-30:8");
			System.err.println("amt" + getOrderAmt);
			String getOrderNum = localDRPC.execute("getOrderNum", "2017-09-30:1 2017-09-30:2 2017-09-30:3 2017-09-30:4 2017-09-30:5 2017-09-30:6 2017-09-30:7 2017-09-30:8");
			System.err.println("num" + getOrderNum);
			org.apache.storm.utils.Utils.sleep(1000);
		}
	/*	if (args.length > 0) {
			try {
				StormSubmitter.submitTopology(args[0], config, build(null));
			} catch (AlreadyAliveException | InvalidTopologyException | AuthorizationException e) {
				e.printStackTrace();
			}
		} else {
			localCluster.submitTopology("mytopology", config, build(localDRPC));
		}*/
	}
}
