package im.composer.media.sound.storage;

import im.composer.audio.engine.InOut;
import im.composer.media.sound.codec.FloatOutputStream;

import java.io.IOException;
import java.util.Collection;
import java.util.HashSet;
import java.util.Set;
import java.util.SortedMap;
import java.util.stream.Collectors;

import org.jaudiolibs.audioservers.AudioConfiguration;
import org.tritonus.share.sampled.FloatInputStream;
import org.tritonus.share.sampled.FloatSampleBuffer;
/**
 * 
 * @author David Zhang (zdl@zdl.hk)
 * @NonThreadSafe
 */
public abstract class AudioRecorder extends InOut {

	private transient boolean recording;
	private transient boolean playingback;
	private AudioFragmentMap frags = new AudioFragmentMap();
	private transient SortedMap<Long,Collection<AudioFragment>> sorted_frags;
	private transient Set<AudioFragment> playback_frags = new HashSet<>();
	private transient AudioFragment cur_rcd;
	private transient long frame_start_time;
	private transient long frame_end_time;

	public AudioRecorder() {
		super(null);
	}

	public AudioRecorder(AudioConfiguration context) {
		super(context);
	}

	public boolean isRecording() {
		return recording;
	}

	public void setRecording(boolean recording) {
		this.recording = recording;
	}

	public boolean isPlayingBack() {
		return playingback;
	}

	public void setPlayingBack(boolean playingback) {
		this.playingback = playingback;
	}

	@Override
	protected void computeBuffer() {
		doThru();
		frame_start_time = (long) (getTime() * millisecond_per_frame);
		frame_end_time = (long) ((getTime()+1) * millisecond_per_frame);
		if (recording) {
			try {
				doRecord();
			} catch (IOException e) {
			}
		}
		if (playingback) {
			doPlayback();
		}
	}

	protected void doThru() {
		buf.makeSilence();
		for(FloatSampleBuffer src:srcBuffers){
			buf.mix(src);
		}
	}

	protected void doRecord() throws IOException {
		if (cur_rcd == null) {
			cur_rcd = newFragment();
			cur_rcd.setStartTime(frame_start_time);
			frags.put(frame_start_time, cur_rcd);
		}
		AudioConfiguration ctx = getContext();
		FloatOutputStream fos = cur_rcd.getDataSource().getOutputStream(ctx);
		FloatSampleBuffer buf = new FloatSampleBuffer(ctx.getInputChannelCount(),ctx.getMaxBufferSize(),ctx.getSampleRate());
		for(FloatSampleBuffer src:srcBuffers){
			buf.mix(src);
		}
		fos.write(buf);
	}

	protected void doPlayback() {
		AudioConfiguration ctx = getContext();
		if(sorted_frags==null){
			sorted_frags = frags;
		}
		playback_frags = sorted_frags.subMap(frame_start_time, frame_end_time).values().stream().flatMap(col->col.stream()).filter(frag->frag.getStartTime()+frag.getLength()>frame_start_time).collect(Collectors.toSet());
		FloatSampleBuffer _buf = new FloatSampleBuffer(ctx.getInputChannelCount(),ctx.getMaxBufferSize(),ctx.getSampleRate());
		for(AudioFragment frag:playback_frags){
			long freg_start_pos = (long) (frag.getStartTime() * ctx.getSampleRate());//in samples
			long cur_start_pos = (long) (frame_start_time * ctx.getSampleRate());//in samples
			long target_start_pos = cur_start_pos - freg_start_pos;
			target_start_pos += frag.getOffset() * getContext().getSampleRate();
			if(target_start_pos<0){
				continue;
			}
			FloatInputStream fis;
			try {
				fis = frag.getDataSource().getInputStream();
			} catch (IOException e1) {
				continue;
			}
			
			long target_pos_sample = (long) (target_start_pos / 1000f / ctx.getSampleRate());
			long cur_pos = fis.getPosition();
			long len = fis.getFrameLength();
			long read_len = bufferSize;
			while(cur_pos < target_pos_sample){
				try {
					fis.skipFrames(target_pos_sample);
					cur_pos = fis.getPosition();
				} catch (IOException e) {
					break;
				}
			}
			if(read_len > len - cur_pos){
				read_len = len - cur_pos;
			}
			fis.read(_buf, 0, (int) read_len);
			buf.mix(_buf, 0, 0, (int) read_len);
			_buf.makeSilence();
		}
	}

	protected abstract AudioFragment newFragment();
}
