package com.nanohadoop.mapreduce.core;

import java.io.IOException;
import java.util.concurrent.atomic.AtomicLong;

/**
 * Mapper 上下文,用于写出中间结果
 */
public class MapperContext<KEYIN, VALUEIN, KEYOUT, VALUEOUT> {
    private KEYIN currentKey;
    private VALUEIN currentValue;
    private RecordReader<KEYIN, VALUEIN> reader;
    private float progress = 0.0f;
    private AtomicLong mapOutputRecords;

    public MapperContext() {
    }

    public MapperContext(RecordReader<KEYIN, VALUEIN> reader, AtomicLong mapOutputRecords) {
        this.reader = reader;
        this.mapOutputRecords = mapOutputRecords;
    }

    /**
     * 写出map的中间结果
     */
    public void write(KEYOUT key, VALUEOUT value) throws IOException {
        mapOutputRecords.incrementAndGet();
        // 实际的写出逻辑在TaskRunner中实现
    }

    /**
     * 获取任务进度
     */
    public float getProgress() {
        return progress;
    }

    /**
     * 报告进度
     */
    public void progress() {
        // 通知框架任务正在进行
    }

    /**
     * 读取下一对key-value
     */
    public boolean nextKeyValue() throws IOException, InterruptedException {
        if (reader.nextKeyValue()) {
            currentKey = reader.getCurrentKey();
            currentValue = reader.getCurrentValue();
            progress = reader.getProgress();
            return true;
        }
        return false;
    }

    /**
     * 获取当前key
     */
    public KEYIN getCurrentKey() throws IOException, InterruptedException {
        return currentKey;
    }

    /**
     * 获取当前value
     */
    public VALUEIN getCurrentValue() throws IOException, InterruptedException {
        return currentValue;
    }
}