File size: 1,715 Bytes
00808cf
893a13c
00808cf
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
# cognitivess_model/tokenization_cognitivess.py

from transformers import PreTrainedTokenizer
import json

class CognitivessTokenizer(PreTrainedTokenizer):
    def __init__(self, vocab_file, merges_file=None, **kwargs):
        super().__init__(**kwargs)
        self.vocab_file = vocab_file
        self.merges_file = merges_file
        self.load_vocab()

    def load_vocab(self):
        # Load vocabulary
        with open(self.vocab_file, 'r') as f:
            self.vocab = {line.strip(): idx for idx, line in enumerate(f)}

        # Load merges file if exists
        self.merges = []
        if self.merges_file:
            with open(self.merges_file, 'r') as f:
                self.merges = [line.strip() for line in f]

    def _tokenize(self, text):
        # Tokenization logic (basic example)
        tokens = text.split()  # Simple whitespace-based tokenization
        return tokens

    def convert_tokens_to_ids(self, tokens):
        return [self.vocab.get(token, self.vocab.get('[UNK]')) for token in tokens]

    def convert_ids_to_tokens(self, ids):
        reverse_vocab = {idx: token for token, idx in self.vocab.items()}
        return [reverse_vocab.get(idx, '[UNK]') for idx in ids]

    def save_vocabulary(self, save_directory):
        vocab_path = f"{save_directory}/vocab.txt"
        with open(vocab_path, 'w') as f:
            for token in self.vocab:
                f.write(f"{token}\n")

        if self.merges_file:
            merges_path = f"{save_directory}/merges.txt"
            with open(merges_path, 'w') as f:
                for merge in self.merges:
                    f.write(f"{merge}\n")
            return vocab_path, merges_path
        return vocab_path,