package cn.ruc.dbiir.storm.generator.bolt;

import java.io.FileWriter;
import java.util.Map;

import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.FileSystem;
import org.apache.kafka.clients.producer.ProducerRecord;
import org.apache.storm.task.TopologyContext;
import org.apache.storm.topology.BasicOutputCollector;
import org.apache.storm.topology.OutputFieldsDeclarer;
import org.apache.storm.topology.base.BaseBasicBolt;
import org.apache.storm.tuple.Fields;
import org.apache.storm.tuple.Tuple;

import cn.ruc.dbiir.storm.generator.util.Config;
import cn.ruc.dbiir.storm.generator.util.HDFSOperate;

/**
 * 处理从源（spout）流出来的数据 author：mark createTime：May 31, 2018 8:39:16 PM
 * 
 * @version
 */
public class DataDealBolt extends BaseBasicBolt
{

	private String waterPath = Config.PATH_WATER;
	private String errWaterPath = Config.PATH_ERR_WATER;
	private String content = "";
	private boolean append = true;
	
//	普通文件
//	private FileWriter fw_water = null;
//	private FileWriter fw_errWater = null;
	
//	hdfs
	private FileSystem fs_water = null;
	private FSDataOutputStream outputStream_water = null;
	private FileSystem fs_errwater = null;
	private FSDataOutputStream outputStream_errwater = null;

	// 在bolt启动前执行，提供bolt启动环境配置的入口，初始化一些参数
	public void prepare(Map stormConf, TopologyContext context)
	{
//		普通文件系统
//		fw_water = FileWriterUtil.initialFileWriter(fw_water, waterPath, append);
//		fw_errWater = FileWriterUtil.initialFileWriter(fw_errWater, errWaterPath, append);
		
//		hdfs初始化
		fs_water = HDFSOperate.initialFS(waterPath);
		outputStream_water = HDFSOperate.initialFSOS(fs_water, waterPath);
		
		fs_errwater = HDFSOperate.initialFS(errWaterPath);
		outputStream_errwater = HDFSOperate.initialFSOS(fs_errwater, errWaterPath);
		
	}

	/**
	 * 每次从stream中接受一个订阅的tuple；
	 * tuple中可以通过tuple.getString()和tuple.getStringByField()这两个方法
	 */
	public void execute(Tuple tuple, BasicOutputCollector collector)
	{

		// content = (String) tuple.getValue(0) + "!\n";
		content = tuple.getStringByField("content");
//		Water water = new Gson().fromJson(content, Water.class);
		
		
	
		//根据参数设置错误率
		Config.ERROR_RATE_BASE = (int)(1000/Config.ERROR_RATE);
		
		// 设置错误率
		if (Config.ACC_RATE == Config.ERROR_RATE_BASE)
		{ // 产生坏账

			 Config.producer.send(new ProducerRecord<String,String>(Config.TOPIC_ERRWATER, "key_errWater", content));
			Config.ACC_RATE = 0;
//			FileWriterUtil.insertToFile(fw_errWater, content + "\n");
			HDFSOperate.insertDataToHDFS(outputStream_errwater, content);
		}
		else
		{
			Config.ACC_RATE++;
//			Config.producer.send(new ProducerRecord<String, String>("info", "water", content));
//			FileWriterUtil.insertToFile(fw_water, content + "\n");
			HDFSOperate.insertDataToHDFS(outputStream_water, content);
		}
		// collector.emit(new Values(out));
	}

	/**
	 * 同样也是为了定义输出的field
	 */
	public void declareOutputFields(OutputFieldsDeclarer declarer)
	{
		declarer.declare(new Fields(Config.SCHEMA));
	}

	/**
	 * 用于释放bolt占用的资源，storm在终止一个bolt之前调用此方法；
	 */
	@Override
	public void cleanup()
	{
//		FileWriterUtil.closeFileWriter(fw_errWater);
//		FileWriterUtil.closeFileWriter(fw_water);
		HDFSOperate.closeFSOS(outputStream_water);
		HDFSOperate.closeFSOS(outputStream_errwater);
		
		HDFSOperate.closeFS(fs_water);
		HDFSOperate.closeFS(fs_errwater);
	}
	
//	public static void main(String[] args) {
//		int re = (int)(1000/0.35);
//		System.out.println(re+"");
//	}

}
