package com.devbobcorn.nekoration.sounds.music;

import java.io.File;

import javax.sound.sampled.AudioFormat;
import javax.sound.sampled.AudioInputStream;
import javax.sound.sampled.AudioSystem;
import javax.sound.sampled.DataLine;
import javax.sound.sampled.SourceDataLine;

import com.devbobcorn.nekoration.client.ClientHelper;
import com.devbobcorn.nekoration.client.rendering.MusicRenderer;
import com.devbobcorn.nekoration.utils.Complex;
import com.devbobcorn.nekoration.utils.FastFourier;

import net.minecraft.client.Minecraft;

public final class VisualHelper {
    public static final int DEF_BUFFER_SAMPLE_SZ = 1024;

    private static MusicVisualThread visualThread = null;

    public static final int BAR_NUM = 64;
	public static double frequencyBins[];
	public static double[] buffer = new double[BAR_NUM];
	public static double[] bufferDecrease = new double[BAR_NUM];
	public static double[] magnitudes = new double[BAR_NUM];
	
    public static long frames = 0L;

    public static void startThread(String musicName){
        try {
            if (visualThread != null){
                visualThread.interrupt();
            }
            visualThread = new MusicVisualThread(ClientHelper.chosenMusic);
            visualThread.start();
        } catch (Exception e){
            e.printStackTrace();
        }
    }

	public static void reset(){
		try {
            if (visualThread != null){
                visualThread.interrupt();
            }
			for (int i = 0;i < BAR_NUM;i++){
				buffer[i] = 0.0;
			}
        } catch (Exception e){
            e.printStackTrace();
        }
	}

    public static class MusicVisualThread extends Thread {
        private Thread thread;
        private String threadName;

        MusicVisualThread(String name) {
            threadName = name;
            System.out.println("Creating " + threadName);
        }

		@SuppressWarnings("resource")
        public void run() {
            System.out.println("Running " + threadName);
			AudioInputStream ais = null;
			frames = 0L;
            try {
				// Supported file types: wave / au / aiff
				ais = AudioSystem.getAudioInputStream(new File(Minecraft.getInstance().gameDirectory, "nekomusic/" + ClientHelper.chosenMusic + ".au"));
				ais = AudioSystem.getAudioInputStream(AudioFormat.Encoding.PCM_SIGNED, ais);

                AudioFormat audioFormat = ais.getFormat();
                System.out.println("Audio Format: " + audioFormat);
                DataLine.Info info = new DataLine.Info(SourceDataLine.class, audioFormat);
                System.out.println("Info: " + info);
                
				final int normalBytes = normalBytesFromBits(audioFormat.getSampleSizeInBits());
				final double milSecsPerFrame = 1024.0 * 1000.0 / 44100.0;
				System.out.println("milSecsPerFrame: " + milSecsPerFrame);
				float[] samples = new float[DEF_BUFFER_SAMPLE_SZ * audioFormat.getChannels()];
				long[] transfer = new long[samples.length];
				byte[] bytes = new byte[samples.length * normalBytes];
				int bread = 0;

				calculateBins();

				while ((bread != -1) && !Thread.interrupted()){
					final int audioMilSecs = (int)(frames * milSecsPerFrame);
					final int playedMilSecs = MusicRenderer.getPlayedMilSecs();
					if (playedMilSecs > audioMilSecs){ // Update visual data...
						bread = ais.read(bytes);
						samples = unpack(bytes, transfer, samples, bread, audioFormat);
						samples = hamming(samples, bread / normalBytes, audioFormat);
						updateBuffer(samples);
						++frames;
					} else {
						Thread.sleep(Math.min(1, audioMilSecs - playedMilSecs));
					}
				}
            } catch (Exception e) {
                System.out.println("Thread " + threadName + " interrupted.");
                e.printStackTrace();
            }
            System.out.println("Thread " + threadName + " exiting.");
        }

        public void start() {
            System.out.println("Starting " + threadName);
            if (thread == null) {
                thread = new Thread(this, threadName);
                thread.start();
            }
        }

		public void interrupt(){
			super.interrupt();
			thread.interrupt();
		}
	}

	public static int normalBytesFromBits(int bitsPerSample) {
		return bitsPerSample + 7 >> 3;
	}

	public static float[] unpack(byte[] bytes, long[] transfer, float[] samples, int bvalid, AudioFormat fmt) {
		if (fmt.getEncoding() != AudioFormat.Encoding.PCM_SIGNED && fmt.getEncoding() != AudioFormat.Encoding.PCM_UNSIGNED) {
			return samples;
		}

		final int bitsPerSample = fmt.getSampleSizeInBits();
		final int normalBytes = normalBytesFromBits(bitsPerSample);
		/*
		 * not the most DRY way to do this but it's a bit more efficient.
		 * otherwise there would either have to be 4 separate methods for each
		 * combination of endianness/signedness or do it all in one loop and
		 * check the format for each sample.
		 * 
		 * a helper array (transfer) allows the logic to be split up but without
		 * being too repetetive.
		 * 
		 * here there are two loops converting bytes to raw long samples.
		 * integral primitives in Java get sign extended when they are promoted
		 * to a larger type so the & 0xffL mask keeps them intact.
		 */
		if (fmt.isBigEndian()) {
			for (int i = 0, k = 0, b; i < bvalid; i += normalBytes, k++) {
				transfer[k] = 0L;

				int least = i + normalBytes - 1;
				for (b = 0; b < normalBytes; b++) {
					transfer[k] |= (bytes[least - b] & 0xffL) << (8 * b);
				}
			}
		} else {
			for (int i = 0, k = 0, b; i < bvalid; i += normalBytes, k++) {
				transfer[k] = 0L;

				for (b = 0; b < normalBytes; b++) {
					transfer[k] |= (bytes[i + b] & 0xffL) << (8 * b);
				}
			}
		}

		final long fullScale = (long) Math.pow(2.0, bitsPerSample - 1);

		/*
		 * the OR is not quite enough to convert, the signage needs to be
		 * corrected.
		 */
		if (fmt.getEncoding() == AudioFormat.Encoding.PCM_SIGNED) {
			/*
			 * if the samples were signed, they must be extended to the 64-bit
			 * long.
			 * 
			 * the arithmetic right shift in Java will fill the left bits with
			 * 1's if the MSB is set.
			 * 
			 * so sign extend by first shifting left so that if the sample is
			 * supposed to be negative, it will shift the sign bit in to the
			 * 64-bit MSB then shift back and fill with 1's.
			 * 
			 * as an example, imagining these were 4-bit samples originally and
			 * the destination is 8-bit, if we have a hypothetical sample -5
			 * that ought to be negative, the left shift looks like this:
			 * 
			 * 00001011 << (8 - 4) =========== 10110000
			 * 
			 * (except the destination is 64-bit and the original bit depth from
			 * the file could be anything.)
			 * 
			 * and the right shift now fills with 1's:
			 * 
			 * 10110000 >> (8 - 4) =========== 11111011
			 */
			final long signShift = 64L - bitsPerSample;

			for (int i = 0; i < transfer.length; i++) {
				transfer[i] = ((transfer[i] << signShift) >> signShift);
			}
		} else {
			/*
			 * unsigned samples are easier since they will be read correctly in
			 * to the long.
			 * so just sign them: subtract 2^(bits - 1) so the center is 0.
			 */
			for (int i = 0; i < transfer.length; i++) {
				transfer[i] -= fullScale;
			}
		}

		/* finally normalize to range of -1.0f to 1.0f */
		for (int i = 0; i < transfer.length; i++) {
			samples[i] = (float) transfer[i] / (float) fullScale;
		}

		return samples;
	}

    public static float[] hamming(float[] samples, int svalid, AudioFormat fmt) {
		int channels = fmt.getChannels();
		for (int ch = 0, k, i;ch < channels;ch++) {
			for (i = ch, k = 0; i < svalid; i += channels) {
				samples[i] = (float)(samples[i] * (0.54 - 0.46 * Math.cos(2.0*Math.PI * k++ / samples.length)));
			}
		}
		return samples;
	}

	private static void calculateBins(){
		double maxFreq = 44100; // 22050;
		double time = (DEF_BUFFER_SAMPLE_SZ / 2) / maxFreq;
		double minFreq = 1 / time;
		
		frequencyBins = new double[BAR_NUM + 2];
		frequencyBins[0] = minFreq;
		frequencyBins[frequencyBins.length-1] = maxFreq;
		
		minFreq = melTransform(minFreq);
		maxFreq = melTransform(maxFreq);
		
		double amount = (maxFreq - minFreq)/(BAR_NUM + 1);
		
		/* Mel's scale is logarithmic so we can set the distances between
		frequencies to be linear, once we convert the values back 
		from Mel's scale we get logarithmic distances between frequencies 
		the distance increases as the frequencies increase
		which corresponds to how humans hear sound
		(we can detect fewer differences in higher frequencies) */
		for (int i = 1; i < frequencyBins.length-1; i++){
			frequencyBins[i] = iMelTransform(minFreq + i * amount);
		}
		
		int index = 0;
		//System.out.println(Arrays.toString(frequencyBins));
		for (int i = 1; i <= DEF_BUFFER_SAMPLE_SZ / 2; i++){
			double freq = i / time;
			if (freq >= frequencyBins[index]){
				frequencyBins[index++] = i - 1;
			}
			if (index == BAR_NUM + 2) break;
		}
		frequencyBins[frequencyBins.length-1] = DEF_BUFFER_SAMPLE_SZ / 2;
		//System.out.println(Arrays.toString(frequencyBins));
	}

	private static double melTransform(double freq){
		return 1125 * Math.log(1 + freq / (float)700);
	}
	
	private static double iMelTransform(double freq){
		return 700 * (Math.pow(Math.E, freq / (float)1125) - 1);
	}

	public static void updateBuffer(float samples[]) {
		Complex data[] = new Complex[samples.length];
		for (int i = 0;i < samples.length;i++){
			data[i] = new Complex(samples[i], 0);
		}
		Complex niz[] = FastFourier.fft(data);

		for (int i = 0;i < magnitudes.length;i++){
			int startIndex = (int)frequencyBins[i];
			int endIndex = (int)frequencyBins[i+2];
			
			int amount = (endIndex - startIndex) / 2;
			int amountFull = endIndex - startIndex;
			double maxFreq =  0;
			
			for (int j = startIndex; j < endIndex; j++){
				double freq = 0;
				if (j <= startIndex + amount) {
					freq = (((j - (startIndex - 1)) * 1.0) / (amount + 1)) * (niz[j].re() * niz[j].re() + niz[j].im() * niz[j].im());
				} else {
					freq = (((amountFull - (j - startIndex)) * 1.0) / amount) * (niz[j].re() * niz[j].re() + niz[j].im() * niz[j].im());
				}
		
				if (freq > maxFreq) maxFreq = freq;
			}
			
			magnitudes[i] = 20 * Math.log10(maxFreq);
			
			if (magnitudes[i] > buffer[i]){
				buffer[i] = magnitudes[i];
				bufferDecrease[i] = 0.9;
			}
			
			if (magnitudes[i] < buffer[i]){
				buffer[i] -= bufferDecrease[i];
				bufferDecrease[i] *= 1.3;
			}
		}
	}
}