use serde::{Deserialize, Serialize};
use wasm_bindgen::prelude::*;
use std::{fs::File, path::Path};

use anyhow::Result;
use candle_core::{IndexOp, Tensor, D};
use candle_nn::VarBuilder;
use candle_transformers::models::bert::{self, BertModel};
use tokenizers::{EncodeInput, PaddingParams, Tokenizer};

#[wasm_bindgen]
pub struct Embd {
    tokenizer: Tokenizer,
    model: BertModel,
}

// load
impl Embd {
    pub fn load<P>(path: P, dtype: candle_core::DType, device: &candle_core::Device) -> Result<Self>
    where
        P: AsRef<Path>,
    {
        let path = path.as_ref();
        let vb = unsafe {
            VarBuilder::from_mmaped_safetensors(&[path.join("model.safetensors")], dtype, device)?
        };
        //let vb = VarBuilder::from_pth(path.join("pytorch_model.bin"), dtype, device)?;
        let config = Self::load_bert_config(&path.join("config.json"))?;
        println!("config = {:?}",config);
        let model = BertModel::load(vb, &config)?;
        let mut tokenizer = Tokenizer::from_file(&path.join("tokenizer.json")).unwrap();
        if let Some(pp) = tokenizer.get_padding_mut() {
            pp.strategy = tokenizers::PaddingStrategy::BatchLongest
        } else {
            let pp = PaddingParams {
                strategy: tokenizers::PaddingStrategy::BatchLongest,
                ..Default::default()
            };
            tokenizer.with_padding(Some(pp));
        }
        Ok(Self { tokenizer, model })
    }
    pub fn load_bert_config(path: &Path) -> Result<bert::Config> {
        let rdr = File::open(path)?;
        let config: bert::Config = serde_json::from_reader(rdr)?;
        Ok(config)
    }
}

#[wasm_bindgen]
impl Embd {
    #[wasm_bindgen(constructor)]
    pub fn from_raw(vb_raw : Vec<u8>,config_raw : Vec<u8>,tokenizer_raw : Vec<u8>) -> Result<Embd,JsError> {
        let vb = VarBuilder::from_buffered_safetensors(vb_raw, candle_core::DType::F32, &candle_core::Device::Cpu)?;
        let config : bert::Config = serde_json::from_slice(&config_raw)?;
        let model = BertModel::load(vb, &config)?;
        let mut tokenizer = Tokenizer::from_bytes(tokenizer_raw).map_err(|e| JsError::new(&format!("Failed to initailize tokenizer. Error : {}",e)))?;
        if let Some(pp) = tokenizer.get_padding_mut() {
            pp.strategy = tokenizers::PaddingStrategy::BatchLongest
        } else {
            let pp = PaddingParams {
                strategy: tokenizers::PaddingStrategy::BatchLongest,
                ..Default::default()
            };
            tokenizer.with_padding(Some(pp));
        }
        Ok(Self { tokenizer, model })
    }
    pub fn embedding_single(&self,input: JsValue) -> Result<JsValue, JsError> {
        let input : EmbdSingleParm = serde_wasm_bindgen::from_value(input).map_err(|e| JsError::new(&format!("Failed to deseriaize js parms .Error : {}",e)))?;
        let res = self.get_batch_embedding(vec![input.sentence.as_str()]).unwrap().i(0).unwrap();
        let re = res.to_vec1().unwrap();
        let re = EmbdSingleOutput {
            embedding : re
        };
        Ok(
            serde_wasm_bindgen::to_value(&re).map_err(|e| JsError::new(&format!("Failed to serialize embedding result for {} .Error : {}",input.sentence,e)))?
        )
    }
    pub fn embedding_batch(&self,input: JsValue) -> Result<JsValue, JsError> {
        let input : EmbdBatchParm = serde_wasm_bindgen::from_value(input).map_err(|e| JsError::new(&format!("Failed to deseriaize js parms .Error : {}",e)))?;
        let res = self.get_batch_embedding(input.sentences).unwrap().i(0).unwrap();
        let re = res.to_vec1().unwrap();
        let re = EmbdSingleOutput {
            embedding : re
        };
        Ok(
            serde_wasm_bindgen::to_value(&re).map_err(|e| JsError::new(&format!("Failed to serialize embedding result for batch .Error : {}",e)))?
        )
    }
}
impl Embd {
    pub fn get_batch_embedding<'a,E>(&self, sentences: Vec<E>) -> candle_core::Result<Tensor>
    where  E: Into<EncodeInput<'a>> + Send,
    {
        let encoding = self.tokenizer.encode_batch(sentences, true).unwrap();
        let encoded_ids = encoding
            .iter()
            .map(|encoded| {
                let encoded = encoded.get_ids();
                Tensor::new(encoded, &self.model.device).unwrap()
            })
            .collect::<Vec<Tensor>>();
        let attention_mask = encoding
            .iter()
            .map(|encoded| {
                let attention_mask = encoded.get_attention_mask();
                Tensor::new(attention_mask, &self.model.device).unwrap()
            })
            .collect::<Vec<Tensor>>();
        let input_ids = Tensor::stack(&encoded_ids, 0)?;
        let input_type_ids = input_ids.zeros_like()?;
        let model_output = self.model.forward(&input_ids, &input_type_ids)?;
        let attention_mask = Tensor::stack(&attention_mask, 0)?;
        let sentence_embeddings = mean_pooling(&model_output, &attention_mask)?;
        Ok(sentence_embeddings)
    }
    pub fn get_single_embedding(&self, sentence: &str) -> candle_core::Result<Tensor> {
        let encoding = self.tokenizer.encode(sentence, true).unwrap();
        let encoded_ids = encoding.get_ids();
        let attention_mask = encoding.get_attention_mask();
        let input_ids = Tensor::new(&*encoded_ids, &self.model.device)?.unsqueeze(0)?;
        let input_type_ids = input_ids.zeros_like()?;
        let model_output = self.model.forward(&input_ids, &input_type_ids)?;
        let atttention_mask = Tensor::new(attention_mask, &self.model.device)?;
        let sentence_embeddings = mean_pooling(&model_output, &atttention_mask)?;
        Ok(sentence_embeddings)
    }
}

fn mean_pooling(model_output: &Tensor, attention_mask: &Tensor) -> candle_core::Result<Tensor> {
    let input_mask_expanded = attention_mask
        .unsqueeze(D::Minus1)?
        .expand(model_output.dims())?
        .to_dtype(model_output.dtype())?;
    return Ok((Tensor::sum(&(model_output * &input_mask_expanded)?, 1)?
        / Tensor::clamp(&input_mask_expanded.sum(1)?, 1e-9, f32::MAX)?)?);
}

#[derive(Debug,Clone,Deserialize)]
pub struct EmbdSingleParm {
    sentence : String
}

#[derive(Debug,Clone,Serialize)]
pub struct EmbdSingleOutput {
    embedding : Vec<f32>
}

#[derive(Debug,Clone,Deserialize)]
pub struct EmbdBatchParm {
    sentences : Vec<String>,
}

#[derive(Debug,Clone,Serialize)]
pub struct EmbdBatchOutput {
    embedding : Option<Vec<Vec<f32>>>,
}



