package com.chb.storm.trident;

import org.apache.hadoop.hbase.client.Durability;
import org.apache.storm.hbase.bolt.mapper.HBaseProjectionCriteria;
import org.apache.storm.hbase.bolt.mapper.HBaseValueMapper;
import org.apache.storm.hbase.trident.mapper.SimpleTridentHBaseMapper;
import org.apache.storm.hbase.trident.mapper.TridentHBaseMapper;
import org.apache.storm.hbase.trident.state.HBaseQuery;
import org.apache.storm.hbase.trident.state.HBaseState;
import org.apache.storm.hbase.trident.state.HBaseStateFactory;
import org.apache.storm.hbase.trident.state.HBaseUpdater;

import storm.trident.Stream;
import storm.trident.TridentState;
import storm.trident.TridentTopology;
import storm.trident.state.StateFactory;
import storm.trident.testing.FixedBatchSpout;
import backtype.storm.Config;
import backtype.storm.StormSubmitter;
import backtype.storm.generated.StormTopology;
import backtype.storm.tuple.Fields;
import backtype.storm.tuple.Values;

import com.chb.storm.topology.WordCountValueMapper;

/**
 * create 'storm_ns:WordCount', { NAME => 'd', COMPRESSION => 'snappy'}
 * 注意这边的column跟代码里面配置的必须一致!!!!!
 * @author Administrator
 *
 */
public class KafkaTopologyHbaseWithTrident {

	 public static StormTopology buildTopology(String hbaseRoot){
	        Fields fields = new Fields("word", "count");
	        FixedBatchSpout spout = new FixedBatchSpout(fields, 4,
	                new Values("storm", 1),
	                new Values("trident", 1),
	                new Values("needs", 1),
	                new Values("javadoc", 1)
	        );
	        spout.setCycle(true);

	        TridentHBaseMapper tridentHBaseMapper = new SimpleTridentHBaseMapper()
	                .withColumnFamily("d")	//跟hbase表里的column一致
	                .withColumnFields(new Fields("word"))
	                .withCounterFields(new Fields("count"))
	                .withRowKeyField("word");

	        HBaseValueMapper rowToStormValueMapper = new WordCountValueMapper();

	        //定义投影类
	        HBaseProjectionCriteria projectionCriteria = new HBaseProjectionCriteria();
	        projectionCriteria.addColumn(new HBaseProjectionCriteria.ColumnMetaData("d", "count"));

	        //定义HBaseState类的属性类Option
	        AdHBaseState.Options options = new AdHBaseState.Options()
	                .withConfigKey(hbaseRoot)
	                .withDurability(Durability.SYNC_WAL)
	                .withMapper(tridentHBaseMapper)
	                .withProjectionCriteria(projectionCriteria)
	                .withRowToStormValueMapper(rowToStormValueMapper)
	                .withTableName("storm_ns:WordCount");

	        //使用工厂方法和Option生成HBaseState对象
	        StateFactory factory = new AdHBaseStateFactory(options);

	        //定义拓扑
	        TridentTopology topology = new TridentTopology();
	        //定义stream，从FixedBatchSpout读取随机的单词
	        Stream stream = topology.newStream("spout1", spout);

	        //持久化统计单词的结果到hbase里面
	        stream.partitionPersist(factory, fields,  new AdHBaseUpdater(), new Fields());

//	        TridentState state = topology.newStaticState(factory);
//	        stream = stream.stateQuery(state, new Fields("word"), new AdHBaseQuery(), new Fields("columnName","columnValue"));
//	        stream.each(new Fields("word","columnValue"), new PrintFunction(), new Fields());
	        return topology.build();
	    }

	    public static void main(String[] args) throws Exception {
	        Config conf = new Config();
	        conf.setMaxSpoutPending(5);
	        conf.setNumWorkers(3);
	        
	        //提交拓扑
            StormSubmitter.submitTopology("wordCounterTrident", conf, buildTopology("testRooKey"));
	        
//	        if (args.length == 1) {
//	            try (LocalCluster cluster = new LocalCluster();
//	                 LocalTopology topo = cluster.submitTopology("wordCounter", conf, buildTopology(args[0]));) {
//	                Thread.sleep(60 * 1000);
//	            }
//	            System.exit(0);
//	        }
//	        else if(args.length == 2) {
//	            conf.setNumWorkers(3);
//	            StormSubmitter.submitTopology(args[1], conf, buildTopology(args[0]));
//	        } else{
//	            System.out.println("Usage: TridentFileTopology <hdfs url> [topology name]");
//	        }
	    }

}
