package com.aij.starter.jna;

import com.sun.jna.Structure;
import com.sun.jna.Callback;
import com.sun.jna.Library;
import com.sun.jna.Pointer;

import com.aij.starter.event.LlamaEventHandler;
import com.aij.starter.utils.NativeUtils;

/**
 * 加载llama动态库
 * @author aij
 */
public class Llama {

    static LlamaLibrary LLAMA;
	static {
		try {
			System.setProperty("jna.encoding", "UTF-8");
			LLAMA = (LlamaLibrary)NativeUtils.loadFromJar("llama", LlamaLibrary.class);
		} catch (Exception e) {
			System.out.println("Llama Load: " + e.getMessage() + "\n" + e.getStackTrace());
		}
	}
	
	public interface LlamaLibrary extends Library {

		class LlamaContextParams extends Structure
	    {
		    public static class ByValue extends LlamaContextParams implements Structure.ByValue { }
		    
	        public int seed;         // RNG seed, -1 for random
	        public int n_ctx;        // text context
	        public int n_batch;      // prompt processing batch size
	        public int n_gqa;        // grouped-query attention (TEMP - will be moved to model hparams)
	        public float rms_norm_eps; // rms norm epsilon (TEMP - will be moved to model hparams)
	        public int n_gpu_layers; // number of layers to store in VRAM
	        public int main_gpu;     // the GPU that is used for scratch and small tensors

	        public Pointer tensor_split; // how to split layers across multiple GPUs (size: LLAMA_MAX_DEVICES)

	        // ref: https://github.com/ggerganov/llama.cpp/pull/2054
	        public float rope_freq_base;  // RoPE base frequency
	        public float rope_freq_scale; // RoPE frequency scaling factor

	        // called with a progress value between 0 and 1, pass NULL to disable
	        public Pointer progress_callback;
	        // context pointer passed to the progress callback
	        public Pointer progress_callback_user_data;

	        // Keep the booleans together to avoid misalignment during copy-by-value.
	        public byte low_vram;   // if true, reduce VRAM usage at the cost of performance
	        public byte mul_mat_q;  // if true, use experimental mul_mat_q kernels
	        public byte f16_kv;     // use fp16 for KV cache
	        public byte logits_all; // the llama_eval() call computes all logits, not just the last one
	        public byte vocab_only; // only load the vocabulary, no weights
	        public byte use_mmap;   // use mmap if possible
	        public byte use_mlock;  // force system to keep model in RAM
	        public byte embedding;  // embedding mode only
	    }
		
		void llama_backend_init(boolean numa);

        void llama_backend_free();

        LlamaContextParams.ByValue llama_context_default_params();

        void llama_free(Pointer ctx);

        void llama_free_model(Pointer model);

        Pointer llama_load_model_from_file(String path_model, LlamaContextParams.ByValue parameters);

        Pointer llama_new_context_with_model(Pointer model, LlamaContextParams.ByValue parameters);
        
        void llama_tokenize_prompt(Pointer ctx, String prompt);

        public interface OnNewWord extends Callback {
            void callback(String text);
        }
        void llama_token_to_parts(
        		Pointer ctx,
                String word,
                OnNewWord cb,
                int length,
                int n_threads
                );
	}

    static Pointer ctx;
    static Pointer m;

    public static void Initialize(String model)
    {
    	Initialize(model, "");
    }
    public static void Initialize(String model, String prompt)
    {
        try
        {
        	LLAMA.llama_backend_init(false);
        	LlamaLibrary.LlamaContextParams.ByValue param = LLAMA.llama_context_default_params();
            m = LLAMA.llama_load_model_from_file(model, param);
            if (m == null)
            {
                throw new Exception(String.format("Error: failed to load model '%s'\n", model));
            }
            ctx = LLAMA.llama_new_context_with_model(m, param);
            if (ctx == null)
            {
            	LLAMA.llama_free_model(m);
                throw new Exception(String.format("Error: failed to create context with model '%s'\n", model));
            }
            LLAMA.llama_tokenize_prompt(ctx, prompt);
        }
        catch (Exception e)
        {
			System.out.println("Llama Initialize: " + model + "\n" + e.getMessage() + "\n" + e.getStackTrace());
        }
    }

    public static void Run(String word)
    {
        try
        {
        	LLAMA.llama_token_to_parts(ctx, word, new LlamaEventHandler(), 500, 1);
        }
        catch (Exception e)
        {
			System.out.println("Llama Run: " + e.getMessage() + "\n" + e.getStackTrace());
        }
    }

    public static void Destroy()
    {
        if (ctx != null)
        {
        	LLAMA.llama_free(ctx);
        }
        if (m != null)
        {
        	LLAMA.llama_free_model(m);
        }
        LLAMA.llama_backend_free();
    }
 
	// Test Driver
	public static void main(String[] args) {
		//String url = ClassLoader.getSystemResource("models/Llama-1b-ggml-q4.bin").getPath();
		//String url = "/Volumes/MACBOOK/macbook/Documents/cmake/llama.cpp/models/llama-1B/Llama-1b-ggml-q4.bin";
		String url = "D:/Documents/cmake/llama.cpp/models/Chinese-Llama-2-7b-ggml-q4.bin";
		Llama.Initialize(url);  // Allocate an instance and invoke the native method
		Llama.Run("你好");
		Llama.Destroy();
	}
}
