File size: 3,001 Bytes
545c4d5
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
import numpy as np
from tqdm import tqdm

from sentence_transformers import SentenceTransformer
from cde_benchmark.embedders.base_embedder import Embedder


class LateChunkingEmbedder(Embedder):
    def __init__(
        self,
        model: SentenceTransformer = None,
        batch_size: int = 16,
        show_progress_bar: bool = True,
    ):
        super().__init__(is_contextual_model=True)
        self.model: SentenceTransformer = model
        self.show_progress_bar = show_progress_bar
        self.batch_size = batch_size
        self.sep_token = self.model.tokenizer.sep_token

    def embed_queries(self, queries):
        return self.model.encode(
            queries,
            show_progress_bar=self.show_progress_bar,
            batch_size=self.batch_size,
        )

    def embed_documents(self, documents):
        # documents is a list of list of documents
        # This is just for the demo, but here it's not contextual at all
        embeddings = []
        for document in tqdm(documents):
            doc = self.sep_token + f"{self.sep_token}".join(document)
            encodings = self.model.tokenizer(
                [doc],
                max_length=8192,
                truncation=True,
                padding=True,
                return_tensors="pt",
            ).to(self.model.device)

            # split the model outputs on the [SEP] token
            sep_indices = (
                encodings["input_ids"] == self.model.tokenizer.sep_token_id
            ).nonzero(as_tuple=True)[1]

            # assert sep_token is at the end
            assert (sep_indices[-1] == encodings.input_ids.shape[1] - 1).item()
            if len(document) != len(sep_indices) - 1:
                print(f"Warning: number of documents ({len(document)}) does not match number of [SEP] tokens - 1 ({len(sep_indices)}), indicating document was too long and was truncated")
                print(f"The length of the document was {len(doc)} with {len(encodings.input_ids[0])} tokens while model max_length is {8192}")
                breakpoint()

            model_outputs = (
                self.model._modules["0"].auto_model(**encodings).last_hidden_state
            )
            tmp_embeddings = []
            for i in range(len(sep_indices) - 1):
                # normalize embeddings
                tmp_embeddings.append(
                    model_outputs[
                        0,
                        sep_indices[i] + 1 : sep_indices[i + 1],
                        :,
                    ]
                    .mean(dim=0)
                    .detach()
                    .cpu()
                    .numpy()
                )
                # concatenate embeddings
            tmp_embeddings = np.array(tmp_embeddings)
            # normalize embeddings
            tmp_embeddings = (
                tmp_embeddings / np.linalg.norm(tmp_embeddings, axis=1)[:, None]
            )

            embeddings.append(tmp_embeddings)

        return embeddings