package com.ibm.cps.dft;

import com.esotericsoftware.kryo.Kryo;
import com.esotericsoftware.kryo.KryoSerializable;
import com.esotericsoftware.kryo.io.Input;
import com.esotericsoftware.kryo.io.Output;

import java.util.ArrayList;
import java.util.Arrays;
import java.util.List;

import org.apache.log4j.Logger;

public class IncDFTOperator implements KryoSerializable {

	/**
	 * actual input data's length for dft compuation
	 */
	private int slidewin;
	private double slidwin_factor;/* dftfactor */
	private Logger logger = Logger.getLogger(IncDFTOperator.class);

	private FixedSizeQueue<Double> inputdatavector;

	/**
	 * tell number of not-null values in inputdatavector, also the cursor when,
	 * curdatalen < slidewin, dft computation will return null
	 */
	private int datacursor;

	private Complex[] dftvector = null;

	public FixedSizeQueue<Double> getdataVectorCopy() {
		if (inputdatavector != null)
			return inputdatavector.clone();
		else
			return null;

	}

	public IncDFTOperator(int slid_win) {
		this.slidewin = slid_win;
		inputdatavector = new FixedSizeQueue<Double>(this.slidewin);
		datacursor = 0;
		slidwin_factor = 1 / Math.sqrt(slidewin);
	}

	public Double[] updateAndComputDFTModulusVecotr(Double[] update_batch) {
		Complex[] res = updateAndComputeDFT(update_batch);
		if (res == null)
			return null;

		Double[] ret = new Double[this.slidewin];

		for (int i = 0; i < res.length; i++) {
			ret[i] = res[i].modulus();
		}
		return ret;
	}

	public Complex[] updateAndComputeDFT(Double[] update_batch) {
		int next_cursor = datacursor + update_batch.length;
		// System.out.println("next_cursor is " + next_cursor);
		// System.out.println("Sliding window is " + slidewin);
		if (next_cursor < slidewin) {
			for (int i = 0; i < update_batch.length; i++) {
				inputdatavector.add(update_batch[i]);
			}
			datacursor = next_cursor;
			logger.info("Return null");
			return null;
		} else {
			/* datacursor is no longer used by now */
			datacursor = slidewin;
			if (dftvector == null) {
				/* means it is the frist time to compute DFT */
				for (int i = 0; i < update_batch.length; i++) {
					inputdatavector.add(update_batch[i]);
				}
				Double[] dataarray = new Double[slidewin];
				dataarray = inputdatavector.toArray(dataarray);
				dftvector = DFTOperator.computeDFT(dataarray);
				return (Complex[]) Arrays.copyOfRange(dftvector, 0,
						dftvector.length);
			} else {/* incrementally compute DFT and output */
				for (int i = 0; i < update_batch.length; i++) {
					for (int j = 0; j < slidewin; j++) {
						computeOneSlideStep(inputdatavector.get(0),
								update_batch[i], j);
					}
					inputdatavector.add(update_batch[i]);
				}

				return (Complex[]) Arrays.copyOfRange(dftvector, 0,
						dftvector.length);
			}
		}
	}

	/**
	 * when slide one step, incrementally compute dftvector[curidx]
	 * @param cur_head
	 * @param next_tail
	 * @param curidx: 0~slidewin-1
	 */
	private void computeOneSlideStep(Double cur_head, Double next_tail,
			int curidx) {
		int w = slidewin;
		double cur_xita = 2 * Math.PI * (curidx) / (double) w;
		Complex factor = new Complex(1, cur_xita, 0);

		// dftvector[curidx] =
		// dftvector[curidx].minus(new Complex(cur_head,0)).times(sidwin_factor)
		// .plus(new Complex(next_tail,0)).times(factor);
		dftvector[curidx] = dftvector[curidx].plus(
				new Complex(((next_tail - cur_head) * slidwin_factor), 0))
				.times(factor);
	}

	// For Spark Serialization
	@Override
	public void write(Kryo kryo, Output output) {
		output.writeInt(slidewin);
		output.writeDouble(slidwin_factor);
		output.writeInt(datacursor);
		kryo.writeObject(output, dftvector);
		List<Double> cachedData = new ArrayList<>();
		for (double data : inputdatavector) {
			cachedData.add(data);
		}
		kryo.writeObject(output, cachedData);
	}

	// For Spark Serialization
	@Override
	public void read(Kryo kryo, Input input) {
		this.slidewin = input.readInt();
		this.slidwin_factor = input.readDouble();
		this.datacursor = input.readInt();
		this.dftvector = kryo.readObject(input, Complex[].class);
		List<Double> cachedData = kryo.readObject(input, ArrayList.class);
		inputdatavector = new FixedSizeQueue<Double>(this.slidewin);
		for (double data : cachedData) {
			inputdatavector.add(data);
		}
	}

	// For Spark debug
	@Override
	public String toString() {
		return "IncDFTOperator{" + "slidewin=" + slidewin + ", slidwin_factor="
				+ slidwin_factor + ", inputdatavector=" + inputdatavector
				+ ", datacursor=" + datacursor + ", dftvector="
				+ Arrays.toString(dftvector) + '}';
	}
}
