import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.NullWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.*;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
import org.apache.kafka.clients.producer.KafkaProducer;
import org.apache.kafka.clients.producer.ProducerRecord;
import java.io.IOException;
import java.util.List;
import java.util.Properties;
//class HiveInputFormat extends InputFormat<>{
//
//    @Override
//    public List<InputSplit> getSplits(JobContext context) throws IOException, InterruptedException {
//        return null;
//    }
//
//    @Override
//    public RecordReader createRecordReader(InputSplit split, TaskAttemptContext context) throws IOException, InterruptedException {
//        return null;
//    }
//}
class KafkaMap extends Mapper<Object, Text, NullWritable, NullWritable> {
    private Properties properties;
    private KafkaProducer<String, String> kafkaProducer;
    @Override
    protected void setup(Context context) throws IOException, InterruptedException {
        properties = new Properties();
        properties.setProperty("bootstrap.servers","localhost:9092");
        properties.setProperty("acks","all");
        properties.setProperty("key.serializer","org.apache.kafka.common.serialization.StringSerializer");
        properties.setProperty("value.serializer","org.apache.kafka.common.serialization.StringSerializer");
        kafkaProducer = new KafkaProducer<>(properties);
        super.setup(context);
    }
    @Override
    protected void map(Object key, Text value, Context context) throws IOException, InterruptedException {
        String line = value.toString();
        String[] fields = line.split(" ");
        String rowKey = fields[0];
        String rowVal = fields[1];
        ProducerRecord<String, String> producerRecord = new ProducerRecord<String, String>("test", rowKey, rowVal);
        kafkaProducer.send(producerRecord);
        context.write(NullWritable.get(),NullWritable.get());
        super.map(key, value, context);
    }

    @Override
    protected void cleanup(Context context) throws IOException, InterruptedException {
        kafkaProducer.close();
        super.cleanup(context);
    }
}

public class KafkaMapReduce {
    public static void main(String[] args) throws IOException, InterruptedException, ClassNotFoundException {
        Configuration configuration = new Configuration();
        Job job = Job.getInstance(configuration);

        // 2 指定本程序的jar包所在的本地路径
        job.setJarByClass(KafkaMapReduce.class);

        // 3 指定本业务job要使用的mapper/Reducer业务类
        job.setMapperClass(KafkaMap.class);

        // 4 指定mapper输出数据的kv类型
        job.setMapOutputKeyClass(Text.class);
        job.setMapOutputValueClass(Text.class);
        job.setNumReduceTasks(0);
        // 6 指定job的输入原始文件所在目录
        FileInputFormat.setInputPaths(job, new Path("/opt/hive.txt"));
        FileOutputFormat.setOutputPath(job, new Path("/opt/out"));

        // 7 将job中配置的相关参数，以及job所用的java类所在的jar包， 提交给yarn去运行
        boolean result = job.waitForCompletion(true);
        System.exit(result ? 0 : 1);
    }
}
