use std::{fs::File, path::Path};

use anyhow::Result;
use candle_core::{IndexOp, Tensor, D};
use candle_nn::VarBuilder;
use candle_transformers::models::bert::{self, BertModel};
use embedding_model::EmbeddingModel;
use log::info;
use tokenizers::{EncodeInput, PaddingParams, Tokenizer};

pub struct Embd {
    tokenizer: Tokenizer,
    model: BertModel,
}

// load
impl Embd {
    pub fn load<P>(path: P, dtype: candle_core::DType, device: &candle_core::Device) -> Result<Self>
    where
        P: AsRef<Path>,
    {
        let path = path.as_ref();
        let vb = unsafe {
            VarBuilder::from_mmaped_safetensors(&[path.join("model.safetensors")], dtype, device)?
        };
        //let vb = VarBuilder::from_pth(path.join("pytorch_model.bin"), dtype, device)?;
        let config = Self::load_bert_config(&path.join("config.json"))?;
        info!("Loading model with config : {:?} ...", config);
        let model = BertModel::load(vb, &config)?;
        let mut tokenizer = Tokenizer::from_file(&path.join("tokenizer.json")).unwrap();
        if let Some(pp) = tokenizer.get_padding_mut() {
            pp.strategy = tokenizers::PaddingStrategy::Fixed(512)
        } else {
            let pp = PaddingParams {
                strategy: tokenizers::PaddingStrategy::Fixed(512),
                ..Default::default()
            };
            tokenizer.with_padding(Some(pp));
        }
        Ok(Self { tokenizer, model })
    }
    pub fn load_bert_config(path: &Path) -> Result<bert::Config> {
        let rdr = File::open(path)?;
        let config: bert::Config = serde_json::from_reader(rdr)?;
        Ok(config)
    }
}

#[cfg(feature = "legacy")]
impl Embd {
    /// 对一组句子进行向量化，并返回二维Vector。有可能会意外地出错。
    pub fn get_batch_embedding_vec<'a, E>(
        &self,
        sentences: Vec<E>,
    ) -> candle_core::Result<Vec<Vec<f32>>>
    where
        E: Into<EncodeInput<'a>> + Send,
    {
        let encoded = self.get_batch_embedding(sentences)?;
        match encoded.to_vec2::<f32>() {
            Ok(vec) => return Ok(vec),
            Err(_) => {
                let dims = encoded.dims();
                if dims.len() == 1 {
                    return Ok(vec![encoded.to_vec1::<f32>()?]);
                }
                if dims.len() == 2 {
                    let dim1 = dims[1];
                    let vec = (0..dim1)
                        .map(|i| encoded.i(i).unwrap().to_vec1().unwrap())
                        .collect::<Vec<Vec<f32>>>();
                    return Ok(vec);
                }
                return Err(candle_core::Error::UnexpectedNumberOfDims {
                    expected: 2,
                    got: dims.len(),
                    shape: encoded.shape().clone(),
                });
            }
        }
    }
    /// 对一组句子进行向量化，并返回二维Tensor。
    pub fn get_batch_embedding<'a, E>(&self, sentences: Vec<E>) -> candle_core::Result<Tensor>
    where
        E: Into<EncodeInput<'a>> + Send,
    {
        let encoding = self.tokenizer.encode_batch(sentences, true).unwrap();
        let encoded_ids = encoding
            .iter()
            .map(|encoded| {
                let encoded = encoded.get_ids();
                Tensor::new(encoded, &self.model.device).unwrap()
            })
            .collect::<Vec<Tensor>>();
        let attention_mask = encoding
            .iter()
            .map(|encoded| {
                let attention_mask = encoded.get_attention_mask();
                Tensor::new(attention_mask, &self.model.device).unwrap()
            })
            .collect::<Vec<Tensor>>();
        let input_ids = Tensor::stack(&encoded_ids, 0)?;
        let input_type_ids = input_ids.zeros_like()?;
        let model_output = self.model.forward(&input_ids, &input_type_ids)?;
        let attention_mask = Tensor::stack(&attention_mask, 0)?;
        let sentence_embeddings = mean_pooling(&model_output, &attention_mask)?;
        Ok(sentence_embeddings)
    }
    pub fn get_single_embedding(&self, sentence: &str) -> candle_core::Result<Tensor> {
        let encoding = self.tokenizer.encode(sentence, true).unwrap();
        let encoded_ids = encoding.get_ids();
        let attention_mask = encoding.get_attention_mask();
        let input_ids = Tensor::new(&*encoded_ids, &self.model.device)?.unsqueeze(0)?;
        let input_type_ids = input_ids.zeros_like()?;
        let model_output = self.model.forward(&input_ids, &input_type_ids)?;
        let atttention_mask = Tensor::new(attention_mask, &self.model.device)?;
        let sentence_embeddings = mean_pooling(&model_output, &atttention_mask)?;
        Ok(sentence_embeddings)
    }
}

fn mean_pooling(model_output: &Tensor, attention_mask: &Tensor) -> candle_core::Result<Tensor> {
    let input_mask_expanded = attention_mask
        .unsqueeze(D::Minus1)?
        .expand(model_output.dims())?
        .to_dtype(model_output.dtype())?;
    return Ok((Tensor::sum(&(model_output * &input_mask_expanded)?, 1)?
        / Tensor::clamp(&input_mask_expanded.sum(1)?, 1e-9, f32::MAX)?)?);
}

impl EmbeddingModel for Embd {
    fn load<P>(
        path: P,
        dtype: candle_core::DType,
        device: &candle_core::Device,
    ) -> anyhow::Result<Self>
    where
        Self: Sized,
        P: AsRef<Path>,
    {
        let path = path.as_ref();
        let vb = unsafe {
            VarBuilder::from_mmaped_safetensors(&[path.join("model.safetensors")], dtype, device)?
        };
        let config = Self::load_bert_config(&path.join("config.json"))?;
        info!("Loading model with config : {:?} ...", config);
        let model = BertModel::load(vb, &config)?;
        let mut tokenizer = Tokenizer::from_file(&path.join("tokenizer.json")).unwrap();
        if let Some(pp) = tokenizer.get_padding_mut() {
            pp.strategy = tokenizers::PaddingStrategy::Fixed(512)
        } else {
            let pp = PaddingParams {
                strategy: tokenizers::PaddingStrategy::Fixed(512),
                ..Default::default()
            };
            tokenizer.with_padding(Some(pp));
        }
        Ok(Self { tokenizer, model })
    }

    fn embedding_batch_tensor<'a, E>(&self, sentences: Vec<E>) -> candle_core::Result<Tensor>
    where
        E: Into<EncodeInput<'a>> + Send,
    {
        let encoding = self.tokenizer.encode_batch(sentences, true).unwrap();
        let encoded_ids = encoding
            .iter()
            .map(|encoded| {
                let encoded = encoded.get_ids();
                Tensor::new(encoded, &self.model.device).unwrap()
            })
            .collect::<Vec<Tensor>>();
        let attention_mask = encoding
            .iter()
            .map(|encoded| {
                let attention_mask = encoded.get_attention_mask();
                Tensor::new(attention_mask, &self.model.device).unwrap()
            })
            .collect::<Vec<Tensor>>();
        let input_ids = Tensor::stack(&encoded_ids, 0)?;
        let input_type_ids = input_ids.zeros_like()?;
        let model_output = self.model.forward(&input_ids, &input_type_ids)?;
        let attention_mask = Tensor::stack(&attention_mask, 0)?;
        let sentence_embeddings = mean_pooling(&model_output, &attention_mask)?;
        Ok(sentence_embeddings)
    }
}
