package state.inmemony;

import org.apache.storm.spout.SpoutOutputCollector;
import org.apache.storm.task.TopologyContext;
import org.apache.storm.topology.OutputFieldsDeclarer;
import org.apache.storm.topology.base.BaseRichSpout;
import org.apache.storm.tuple.Fields;
import org.apache.storm.tuple.Values;
import org.apache.storm.utils.Utils;

import java.util.Map;
import java.util.Random;
import java.util.UUID;

/**
 * @Author:RenPu
 * @Date: 2020/4/20 11:13
 * @Version: 1.0
 * @description:在内存storm状态管理
 */
public class LineSpolt extends BaseRichSpout {


    //封装数据的数组
    private String[] line=null;


    //定义spolt的发射对象
    private SpoutOutputCollector collector;

    /**
     * 初始化方法
     * @param map
     * @param topologyContext
     * @param spoutOutputCollector
     */
    @Override
    public void open(Map<String, Object> map, TopologyContext topologyContext, SpoutOutputCollector spoutOutputCollector) {

        this.collector=collector;

        line=new String[]{"Hello Storm","Hello kafka","Hello Spark"};


    }


    /**
     * 生成元祖对象，同时发射到下一个数据处理器(Bolt)
     */
    @Override
    public void nextTuple() {

        //每隔五秒，就会产生新的tuplt，并且发送给下游进行处理
        Utils.sleep(50000);


        //根据数的长度的范围生成随机数，来获取数据中对应的数据
        int num = new Random().nextInt(line.length);
        String word = line[num];

        String msgId = UUID.randomUUID().toString();

        System.out.println("send bolt tuplt:"+msgId);

        //将封装好的数据，发送给下游的处理器进行处理
        collector.emit(new Values(word),msgId);


    }


    @Override
    public void ack(Object msgId) {
        System.out.println("处理成功："+msgId);
    }

    @Override
    public void fail(Object msgId) {
        System.out.println("处理失败："+msgId);
    }

    /**
     * 声明元祖的一些信息
     * @param declarer
     */
    @Override
    public void declareOutputFields(OutputFieldsDeclarer declarer) {

        //声明元祖的唯一标识，供下一组件获取里面的数据
        declarer.declare(new Fields("line"));

    }
}
