import re
from typing import Any, List


import torch
from langchain.embeddings.base import Embeddings
from sentence_transformers import SentenceTransformer

device = "cpu"


class BceEmbedding(Embeddings):
	client: Any
	tokenizer: Any
	context_sequence_length: int = 512
	query_sequence_length: int = 512
	model_name: str = ""

	def __init__(self, **kwargs: Any):
		path: str = kwargs.get("path") or r'F:\code\models\bce-embedding-base_v1'
		dim: int = kwargs.get("dim") or 768
		model_name: str = kwargs.get("model_name") or "768"
		self.path = path
		self.dim = dim
		self.model_name = model_name
		self.client = SentenceTransformer(path, device=device, trust_remote_code=True)
		self.query_sequence_length = 521
		self.context_sequence_length = 521

	class Config:
		extra = 'allow'

	@staticmethod
	def mean_pooling(model_output, attention_mask):
		token_embeddings = model_output[0]
		input_mask_expanded = attention_mask.unsqueeze(-1).expand(token_embeddings.size()).float()
		return torch.sum(token_embeddings * input_mask_expanded, 1) / \
			   torch.clamp(input_mask_expanded.sum(1), min=1e-9)

	def embed_documents(self, texts: list[str]) -> List[List[float]]:
		with torch.no_grad():
			embeddings = self.client.encode(texts)
		embeddings = embeddings.astype('float32')
		return embeddings.tolist()

	def embed_query(self, text: str) -> List[float]:
		return self.embed_documents([text])[0]

	# return np.array(self.client.encode(text), dtype=np.float32).reshape(1, self.dim)

	@staticmethod
	def split_text(text: str, sentence_size: int = 150):
		text = re.sub(r'([;；!?。！？\?])([^”’])', r"\1\n\2", text)
		text = re.sub(r'(\.{6})([^"’”」』])', r"\1\n\2", text)
		text = re.sub(r'(\…{2})([^"’”」』])', r"\1\n\2", text)
		text = re.sub(r'([;；!?。！？\?]["’”」』]{0,2})([^;；!?。！？\?])', r'\1\n\2', text)
		text = text.rstrip()

		ls = [i for i in text.split("\n") if i]
		for ele in ls:
			if len(ele) > sentence_size:
				ele1 = re.sub(r'([。.？?!！]["’”」』]{0,2})([^,，])', r'\1\n\2', ele)
				ele1_ls = ele1.split("\n")
				for ele_ele1 in ele1_ls:
					if len(ele_ele1) > sentence_size:
						ele_ele2 = re.sub(r'([\n]{1,}| {2,}["’”」』]{0,2})([^\s])', r'\1\n\2', ele_ele1)
						ele2_ls = ele_ele2.split("\n")
						for ele_ele2 in ele2_ls:
							if len(ele_ele2) > sentence_size:
								ele_ele3 = re.sub('( ["’”」』]{0,2})([^ ])', r'\1\n\2', ele_ele2)
								ele2_id = ele2_ls.index(ele_ele2)
								ele2_ls = ele2_ls[:ele2_id] + [i for i in ele_ele3.split("\n") if i] + ele2_ls[
																									   ele2_id + 1:]
						ele_id = ele1_ls.index(ele_ele1)
						ele1_ls = ele1_ls[:ele_id] + [i for i in ele2_ls if i] + ele1_ls[ele_id + 1:]

				id = ls.index(ele)
				ls = ls[:id] + [i for i in ele1_ls if i] + ls[id + 1:]
		return ls


if __name__ == '__main__':
	embeddings = BceEmbedding(model_name="text-embedding-3-large", path=r'F:\code\models\bce-embedding-base_v1',
							  dim=768)
	print(embeddings.embed_query("你好"))

# print(embeddings.embed_documents(["你好"][0]))
