repo_id
stringlengths 15
86
| file_path
stringlengths 27
180
| content
stringlengths 1
1.75M
| __index_level_0__
int64 0
0
|
---|---|---|---|
hf_public_repos/tokenizers/bindings/node/lib | hf_public_repos/tokenizers/bindings/node/lib/bindings/decoders.js | const native = require("./native");
module.exports = {
byteLevelDecoder: native.decoders_ByteLevel,
replaceDecoder: native.decoders_Replace,
wordPieceDecoder: native.decoders_WordPiece,
byteFallbackDecoder: native.decoders_ByteFallback,
fuseDecoder: native.decoders_Fuse,
stripDecoder: native.decoders_Strip,
metaspaceDecoder: native.decoders_Metaspace,
bpeDecoder: native.decoders_BPEDecoder,
ctcDecoder: native.decoders_CTC,
sequenceDecoder: native.decoders_Sequence,
};
| 0 |
hf_public_repos/tokenizers/bindings/node/lib | hf_public_repos/tokenizers/bindings/node/lib/bindings/decoders.test.ts | import {
bpeDecoder,
byteFallbackDecoder,
ctcDecoder,
fuseDecoder,
metaspaceDecoder,
replaceDecoder,
sequenceDecoder,
stripDecoder,
wordPieceDecoder,
} from "./decoders";
describe("wordPieceDecoder", () => {
it("accepts `undefined` as first parameter", () => {
expect(wordPieceDecoder(undefined)).toBeDefined();
});
it("accepts `undefined` as second parameter", () => {
expect(wordPieceDecoder("test", undefined)).toBeDefined();
});
it("can decode arrays of strings", () => {
expect(
wordPieceDecoder().decode(["Hel", "##lo", "there", "my", "fr", "##iend"])
).toEqual("Hello there my friend");
});
});
describe("byteFallbackDecoder", () => {
it("accepts `undefined` as first parameter", () => {
expect(byteFallbackDecoder()).toBeDefined();
});
it("can decode arrays of strings", () => {
expect(byteFallbackDecoder().decode(["Hel", "lo"])).toEqual("Hello");
expect(byteFallbackDecoder().decode(["<0x61>"])).toEqual("a");
expect(byteFallbackDecoder().decode(["<0x61>"])).toEqual("a");
expect(byteFallbackDecoder().decode(["My", " na", "me"])).toEqual("My name");
expect(byteFallbackDecoder().decode(["<0x61>"])).toEqual("a");
expect(byteFallbackDecoder().decode(["<0xE5>"])).toEqual("�");
expect(byteFallbackDecoder().decode(["<0xE5>", "<0x8f>"])).toEqual("��");
expect(byteFallbackDecoder().decode(["<0xE5>", "<0x8f>", "<0xab>"])).toEqual("叫");
expect(byteFallbackDecoder().decode(["<0xE5>", "<0x8f>", "a"])).toEqual("��a");
expect(byteFallbackDecoder().decode(["<0xE5>", "<0x8f>", "<0xab>", "a"])).toEqual(
"叫a"
);
});
});
describe("replaceDecoder", () => {
it("can decode arrays of strings", () => {
expect(replaceDecoder("_", " ").decode(["Hello", "_Hello"])).toEqual("Hello Hello");
});
});
describe("fuseDecoder", () => {
it("accepts `undefined` as first parameter", () => {
expect(fuseDecoder()).toBeDefined();
});
it("can decode arrays of strings", () => {
expect(fuseDecoder().decode(["Hel", "lo"])).toEqual("Hello");
});
});
describe("stripDecoder", () => {
it("accepts `undefined` as first parameter", () => {
expect(stripDecoder("_", 0, 0)).toBeDefined();
});
it("can decode arrays of strings", () => {
expect(stripDecoder("_", 1, 0).decode(["_Hel", "lo", "__there"])).toEqual(
"Hello_there"
);
});
});
describe("metaspaceDecoder", () => {
it("accepts `undefined` as first parameter", () => {
expect(metaspaceDecoder(undefined)).toBeDefined();
});
it("accepts `undefined` as second parameter", () => {
expect(metaspaceDecoder("t", undefined)).toBeDefined();
});
});
describe("bpeDecoder", () => {
it("accepts `undefined` as parameter", () => {
expect(bpeDecoder(undefined)).toBeDefined();
});
});
describe("ctcDecoder", () => {
it("accepts `undefined` as parameter", () => {
expect(ctcDecoder(undefined)).toBeDefined();
});
it("encodes correctly", () => {
expect(
ctcDecoder().decode(["<pad>", "h", "h", "e", "e", "l", "l", "<pad>", "l", "l", "o"])
).toEqual("hello");
});
});
describe("sequenceDecoder", () => {
it("accepts `empty list` as parameter", () => {
expect(sequenceDecoder([])).toBeDefined();
});
it("encodes correctly", () => {
expect(
sequenceDecoder([ctcDecoder(), metaspaceDecoder()]).decode([
"▁",
"▁",
"H",
"H",
"i",
"i",
"▁",
"y",
"o",
"u",
])
).toEqual("Hi you");
});
});
| 0 |
hf_public_repos/tokenizers/bindings/node/lib | hf_public_repos/tokenizers/bindings/node/lib/bindings/enums.ts | export enum TruncationStrategy {
LongestFirst = "longest_first",
OnlyFirst = "only_first",
OnlySecond = "only_second",
}
export enum TruncationDirection {
Left = "left",
Right = "right",
}
export enum PaddingDirection {
Left = "left",
Right = "right",
}
| 0 |
hf_public_repos/tokenizers/bindings/node/lib | hf_public_repos/tokenizers/bindings/node/lib/bindings/models.d.ts | /**
* This class is not supposed to be instantiated directly. Instead, any implementation of
* a Model will return a instance of this class when instantiated.
*/
interface Model {
/**
* Save the current model in the given folder, using the given name
* for the various files that will get created.
* Any file with the same name that already exist in this folder will be overwritten.
*
* @param folder Name of the destination folder
* @param name Prefix to use in the name of created files
*/
save(folder: string, name?: string): string[];
}
type ModelCallback = (err: Error, model: Model) => void;
export interface BPEOptions {
/**
* The number of words that the BPE cache can contain. The cache allows
* to speed-up the process by keeping the result of the merge operations
* for a number of words.
* @default 10_000
*/
cacheCapacity?: number;
/**
* The BPE dropout to use. Must be an float between 0 and 1
*/
dropout?: number;
/**
* The unknown token to be used by the model
*/
unkToken?: string;
/**
* The prefix to attach to subword units that don't represent a beginning of word
*/
continuingSubwordPrefix?: string;
/**
* The suffix to attach to subword units that represent an end of word
*/
endOfWordSuffix?: string;
}
export namespace BPE {
/**
* Instantiate a BPE model from the given vocab and merges
*
* @param vocab A dict mapping strings to number, representing the vocab
* @param merges An array of tuples of strings, representing two tokens to be merged
* @param options BPE model options
*/
export function init(
vocab: { [token: string]: number },
merges: [string, string][],
options?: BPEOptions
): Model;
/**
* Instantiate a BPE model from the given vocab and merges files
*
* @param vocab Path to a vocabulary JSON file
* @param merges Path to a merge file
* @param options BPE model options
* @param __callback Callback called when model is loaded
*/
export function fromFile(
vocab: string,
merges: string,
optionsOrCallback?: BPEOptions | ModelCallback,
__callback?: ModelCallback
): void;
/**
* Instantiate an empty BPE Model
*/
export function empty(): Model;
}
export interface WordPieceOptions {
/**
* The prefix to attach to subword units that don't represent a beginning of word
* @default "##"
*/
continuingSubwordPrefix?: string;
/**
* The maximum number of characters to authorize in a single word.
* @default 100
*/
maxInputCharsPerWord?: number;
/**
* The unknown token to be used by the model.
* @default "[UNK]"
*/
unkToken?: string;
}
export namespace WordPiece {
/**
* Instantiate a WordPiece model from the given vocab
*
* @param vocab A dict mapping strings to numbers, representing the vocab
* @param options WordPiece model options
*/
export function init(
vocab: { [token: string]: number },
options?: WordPieceOptions
): Model;
/**
* Instantiate a WordPiece model from the given vocab file
*
* @param vocab Path to a vocabulary file
* @param options WordPiece model options
* @param __callback Callback called when model is loaded
*/
export function fromFile(
vocab: string,
optionsOrCallback?: WordPieceOptions | ModelCallback,
__callback?: ModelCallback
): void;
/**
* Instantiate an empty WordPiece model
*/
export function empty(): Model;
}
export interface WordLevelOptions {
/**
* The unknown token to be used by the model.
* @default "[UNK]"
*/
unkToken?: string;
}
export namespace WordLevel {
/**
* Instantiate a WordLevel model from the given vocab
*
* @param vocab A dict mapping strings to numbers, representing the vocab
* @param options WordLevel model options
*/
export function init(
vocab: { [token: string]: number },
options?: WordLevelOptions
): Model;
/**
* Instantiate a WordLevel model from the given vocab file
*
* @param vocab Path to a vocabulary file
* @param options WordLevel model options
* @param __callback Callback called when model is loaded
*/
export function fromFile(
vocab: string,
optionsOrCallback?: WordLevelOptions | ModelCallback,
__callback?: ModelCallback
): void;
/**
* Instantiate an empty WordLevel model
*/
export function empty(): Model;
}
export interface UnigramOptions {
/**
* The unknown token id to be used by the model.
* @default undefined
*/
unkId?: number;
/**
* Whether or not bytefallback support should be enabled.
* @default false
*/
byte_fallback?: boolean;
}
export namespace Unigram {
/**
* Instantiate a Unigram model from the given vocab
*
* @param vocab An array of token and id tuples
* @param optiosn Unigram model options
*/
export function init(vocab: [string, number][], options?: UnigramOptions): Model;
/**
* Instantiate an empty Unigram model
*/
export function empty(): Model;
}
| 0 |
hf_public_repos/tokenizers/bindings/node/lib | hf_public_repos/tokenizers/bindings/node/lib/bindings/models.js | const native = require("./native");
module.exports = {
BPE: {
init: native.models_BPE_init,
fromFile: native.models_BPE_from_file,
empty: native.models_BPE_empty,
},
WordPiece: {
init: native.models_WordPiece_init,
fromFile: native.models_WordPiece_from_file,
empty: native.models_WordPiece_empty,
},
WordLevel: {
init: native.models_WordLevel_init,
fromFile: native.models_WordLevel_from_file,
empty: native.models_WordLevel_empty,
},
Unigram: {
init: native.models_Unigram_init,
empty: native.models_Unigram_empty,
},
};
| 0 |
hf_public_repos/tokenizers/bindings/node/lib | hf_public_repos/tokenizers/bindings/node/lib/bindings/models.test.ts | /* eslint-disable @typescript-eslint/no-empty-function */
/* eslint-disable @typescript-eslint/no-explicit-any */
import { BPE, Unigram, WordPiece } from "./models";
const MOCKS_DIR = __dirname + "/__mocks__";
describe("WordPiece", () => {
describe("fromFile", () => {
it("throws if called with only one argument", () => {
expect(() => (WordPiece as any).fromFile("test")).toThrow("not enough arguments");
});
it("throws if called with 2 arguments without a callback as third argument", () => {
expect(() => (WordPiece as any).fromFile("test", {})).toThrow(
"not enough arguments"
);
});
describe("when called with 2 correct arguments", () => {
it("returns `undefined` ", () => {
expect(WordPiece.fromFile(`${MOCKS_DIR}/vocab.txt`, () => {})).toBeUndefined();
});
it("has its callback called with the loaded model", () => {
return new Promise((done) => {
WordPiece.fromFile(`${MOCKS_DIR}/vocab.txt`, (err, model) => {
expect(model).toBeDefined();
done();
});
});
});
});
describe("when called with 3 correct arguments", () => {
it("returns `undefined`", () => {
expect(
WordPiece.fromFile(`${MOCKS_DIR}/vocab.txt`, {}, () => {})
).toBeUndefined();
});
it("has its callback called with the loaded model", () => {
return new Promise((done) => {
WordPiece.fromFile(`${MOCKS_DIR}/vocab.txt`, {}, (err, model) => {
expect(model).toBeDefined();
done();
});
});
});
});
});
});
describe("BPE", () => {
describe("fromFile", () => {
it("throws if called with only two arguments", () => {
expect(() => (BPE as any).fromFile("test", "bis")).toThrow("not enough arguments");
});
it("throws if called with 3 arguments without a callback as last argument", () => {
expect(() => (BPE as any).fromFile("test", "bis", {})).toThrow(
"not enough arguments"
);
});
});
describe("when called with 3 correct arguments", () => {
it("returns `undefined`", () => {
expect(
BPE.fromFile(`${MOCKS_DIR}/vocab.json`, `${MOCKS_DIR}/merges.txt`, () => {})
).toBeUndefined();
});
it("has its callback called with the loaded model", () => {
return new Promise((done) => {
BPE.fromFile(
`${MOCKS_DIR}/vocab.json`,
`${MOCKS_DIR}/merges.txt`,
(err, model) => {
expect(model).toBeDefined();
done();
}
);
});
});
});
describe("when called with 4 correct arguments", () => {
it("returns `undefined`", () => {
expect(
BPE.fromFile(`${MOCKS_DIR}/vocab.json`, `${MOCKS_DIR}/merges.txt`, {}, () => {})
).toBeUndefined();
});
it("has its callback called with the loaded model", () => {
return new Promise((done) => {
BPE.fromFile(
`${MOCKS_DIR}/vocab.json`,
`${MOCKS_DIR}/merges.txt`,
{},
(err, model) => {
expect(model).toBeDefined();
done();
}
);
});
});
});
describe("When initialized from memory", () => {
it("returns the loaded Model", () => {
const bpe = BPE.init({ a: 0, b: 1, ab: 2 }, [["a", "b"]]);
expect(bpe.constructor.name).toEqual("Model");
});
});
});
describe("Unigram", () => {
it("can be initialized from memory", () => {
const unigram = Unigram.init(
[
["<unk>", 0],
["Hello", -1],
["there", -2],
],
{
unkId: 0,
byte_fallback: false,
}
);
expect(unigram.constructor.name).toEqual("Model");
});
});
| 0 |
hf_public_repos/tokenizers/bindings/node/lib | hf_public_repos/tokenizers/bindings/node/lib/bindings/native.js | const addon = require("../../native");
module.exports = addon;
| 0 |
hf_public_repos/tokenizers/bindings/node/lib | hf_public_repos/tokenizers/bindings/node/lib/bindings/native.prod.js | const native = require("../bin-package");
module.exports = native;
| 0 |
hf_public_repos/tokenizers/bindings/node/lib | hf_public_repos/tokenizers/bindings/node/lib/bindings/normalizers.d.ts | /**
* This class is not supposed to be instantiated directly. Instead, any implementation of a
* Normalizer will return an instance of this class when instantiated.
*/
// eslint-disable-next-line @typescript-eslint/no-empty-interface
interface Normalizer {
normalizeString(s: string): string;
}
export interface BertNormalizerOptions {
/**
* Whether to clean the text, by removing any control characters
* and replacing all whitespaces by the classic one.
* @default true
*/
cleanText?: boolean;
/**
* Whether to handle chinese chars by putting spaces around them.
* @default true
*/
handleChineseChars?: boolean;
/**
* Whether to lowercase.
* @default true
*/
lowercase?: boolean;
/**
* Whether to strip all accents.
* @default undefined
*/
stripAccents?: boolean;
}
/**
* Instantiate a Bert Normalizer with the given options
*
* @param [options] Normalizer options
* @returns Bert Normalizer. Takes care of normalizing raw text before giving it to a Bert model.
* This includes cleaning the text, handling accents, chinese chars and lowercasing
*/
export function bertNormalizer(options?: BertNormalizerOptions): Normalizer;
/**
* Returns a new NFC Unicode Normalizer
*/
export function nfcNormalizer(): Normalizer;
/**
* Returns a new NFD Unicode Normalizer
*/
export function nfdNormalizer(): Normalizer;
/**
* Returns a new NFKC Unicode Normalizer
*/
export function nfkcNormalizer(): Normalizer;
/**
* Returns a new NFKD Unicode Normalizer
*/
export function nfkdNormalizer(): Normalizer;
/**
* Instantiate a new Normalization Sequence using the given normalizers
* @param normalizers A list of Normalizer to be run as a sequence
*/
export function sequenceNormalizer(normalizers: Normalizer[]): Normalizer;
/**
* Returns a new Lowercase Normalizer
*/
export function lowercaseNormalizer(): Normalizer;
/**
* Returns a new Strip Normalizer
* @param [left=true] Whether or not to strip on the left (defaults to `true`)
* @param [right=true] Whether or not to strip on the right (defaults to `true`)
*/
export function stripNormalizer(left?: boolean, right?: boolean): Normalizer;
/**
* Returns a new Prepend Normalizer
* @param [prepend] The string to prepend
*/
export function prependNormalizer(prepend: string): Normalizer;
/**
* Returns a new StripAccents Normalizer
*/
export function stripAccentsNormalizer(): Normalizer;
/**
* Returns a new Nmt Normalizer
*/
export function nmtNormalizer(): Normalizer;
/**
* Returns a new Precompiled Normalizer
*/
export function precompiledNormalizer(): Normalizer;
/**
* Returns a new Replace Normalizer
*/
export function replaceNormalizer(): Normalizer;
| 0 |
hf_public_repos/tokenizers/bindings/node/lib | hf_public_repos/tokenizers/bindings/node/lib/bindings/normalizers.js | const native = require("./native");
module.exports = {
bertNormalizer: native.normalizers_BertNormalizer,
nfcNormalizer: native.normalizers_NFC,
nfdNormalizer: native.normalizers_NFD,
nfkcNormalizer: native.normalizers_NFKC,
nfkdNormalizer: native.normalizers_NFKD,
sequenceNormalizer: native.normalizers_Sequence,
lowercaseNormalizer: native.normalizers_Lowercase,
stripNormalizer: native.normalizers_Strip,
prependNormalizer: native.normalizers_Prepend,
stripAccentsNormalizer: native.normalizers_StripAccents,
nmtNormalizer: native.normalizers_Nmt,
precompiledNormalizer: native.normalizers_Precompiled,
replaceNormalizer: native.normalizers_Replace,
};
| 0 |
hf_public_repos/tokenizers/bindings/node/lib | hf_public_repos/tokenizers/bindings/node/lib/bindings/normalizers.test.ts | import {
prependNormalizer,
stripAccentsNormalizer,
stripNormalizer,
} from "./normalizers";
describe("stripNormalizer", () => {
it("instantiates with no parameters", () => {
const normalizer = stripNormalizer();
expect(normalizer.constructor.name).toEqual("Normalizer");
});
it("accepts `undefined` as first parameter", () => {
expect(stripNormalizer(undefined)).toBeDefined();
});
it("accepts `undefined` as second parameter", () => {
expect(stripNormalizer(false, undefined)).toBeDefined();
});
it("instantiates with one parameter", () => {
const normalizer = stripNormalizer(false);
expect(normalizer.constructor.name).toEqual("Normalizer");
});
it("instantiates with two parameters", () => {
const normalizer = stripNormalizer(false, true);
expect(normalizer.constructor.name).toEqual("Normalizer");
});
it("prepend instantiates with one parameter", () => {
const normalizer = prependNormalizer("_");
expect(normalizer.constructor.name).toEqual("Normalizer");
expect(normalizer.normalizeString("Hello")).toEqual("_Hello");
});
it("can normalize strings", () => {
const normalizer = stripNormalizer();
expect(normalizer.normalizeString(" Hello there ")).toEqual("Hello there");
});
});
describe("stripAccentsNormalizer", () => {
it("initialize", () => {
const normalizer = stripAccentsNormalizer();
expect(normalizer.constructor.name).toEqual("Normalizer");
});
});
| 0 |
hf_public_repos/tokenizers/bindings/node/lib | hf_public_repos/tokenizers/bindings/node/lib/bindings/post-processors.d.ts | /**
* This class is not supposed to be instantiated directly. Instead, any implementation of
* a PostProcessor will return an instance of this class when instantiated.
*/
// eslint-disable-next-line @typescript-eslint/no-empty-interface
interface PostProcessor {}
/**
* Instantiate a new BertProcessing with the given tokens
*
* @param sep A tuple with the string representation of the SEP token, and its id
* @param cls A tuple with the string representation of the CLS token, and its id
*/
export function bertProcessing(
sep: [string, number],
cls: [string, number]
): PostProcessor;
/**
* Instantiate a new ByteLevelProcessing.
*
* @param [trimOffsets=true] Whether to trim the whitespaces from the produced offsets.
* Takes care of trimming the produced offsets to avoid whitespaces.
* By default, the ByteLevel BPE might include whitespaces in the produced tokens. If you
* don't want the offsets to include these whitespaces, then this processing step must be used.
* @since 0.6.0
*/
export function byteLevelProcessing(trimOffsets?: boolean): PostProcessor;
/**
* Instantiate a new RobertaProcessing with the given tokens
*
* @param sep A tuple with the string representation of the SEP token, and its id
* @param cls A tuple with the string representation of the CLS token, and its id
* @param [trimOffsets=true] Whether to trim the whitespaces in the produced offsets
* @param [addPrefixSpace=true] Whether addPrefixSpace was ON during the pre-tokenization
*/
export function robertaProcessing(
sep: [string, number],
cls: [string, number],
trimOffsets?: boolean,
addPrefixSpace?: boolean
): PostProcessor;
/**
* Instantiate a new TemplateProcessing.
*
* @param single A string describing the template for a single sequence
* @param pair A string describing the template for a pair of sequences
* @param specialTokens An array with all the special tokens
*/
export function templateProcessing(
single: string,
pair?: string,
specialTokens?: [string, number][]
): PostProcessor;
/**
* Instantiate a new SequenceProcessing.
*
* @param PostProcessor[] The list of Processors to use
* @since 0.13.0
*/
export function sequenceProcessing(processors: PostProcessor[]): PostProcessor;
| 0 |
hf_public_repos/tokenizers/bindings/node/lib | hf_public_repos/tokenizers/bindings/node/lib/bindings/post-processors.js | const native = require("./native");
module.exports = {
bertProcessing: native.processors_BertProcessing,
byteLevelProcessing: native.processors_ByteLevel,
robertaProcessing: native.processors_RobertaProcessing,
templateProcessing: native.processors_TemplateProcessing,
sequenceProcessing: native.processors_Sequence,
};
| 0 |
hf_public_repos/tokenizers/bindings/node/lib | hf_public_repos/tokenizers/bindings/node/lib/bindings/post-processors.test.ts | /* eslint-disable @typescript-eslint/no-explicit-any */
import {
bertProcessing,
byteLevelProcessing,
robertaProcessing,
sequenceProcessing,
templateProcessing,
} from "./post-processors";
describe("bertProcessing", () => {
it("instantiates correctly with only two parameters", () => {
const processor = bertProcessing(["sep", 1], ["cls", 2]);
expect(processor.constructor.name).toEqual("Processor");
});
it("throws if only one argument is provided", () => {
expect(() => (bertProcessing as any)(["sep", 1])).toThrow("Argument 1 is missing");
});
it("throws if arguments are malformed", () => {
expect(() => (bertProcessing as any)(["sep", "1"], ["cls", "2"])).toThrow(
'invalid type: string "1", expected u32'
);
expect(() => (bertProcessing as any)(["sep"], ["cls"])).toThrow(
"invalid length 1, expected a tuple of size 2"
);
});
});
describe("byteLevelProcessing", () => {
it("instantiates correctly without any parameter", () => {
const processor = byteLevelProcessing();
expect(processor.constructor.name).toEqual("Processor");
});
it("accepts `undefined` as first parameter", () => {
expect(byteLevelProcessing(undefined)).toBeDefined();
});
it("accepts `boolean` as first parameter", () => {
expect(byteLevelProcessing(true)).toBeDefined();
});
});
describe("robertaProcessing", () => {
it("instantiates correctly with only two parameters", () => {
const processor = robertaProcessing(["sep", 1], ["cls", 2]);
expect(processor.constructor.name).toEqual("Processor");
});
it("accepts `undefined` as third and fourth parameters", () => {
expect(robertaProcessing(["sep", 1], ["cls", 2], undefined, undefined)).toBeDefined();
});
it("accepts `boolean` as third and fourth parameter", () => {
expect(robertaProcessing(["sep", 1], ["cls", 2], true, true)).toBeDefined();
});
});
describe("templateProcessing", () => {
it("instantiates correctly with only a single template", () => {
const processor = templateProcessing("$A $A");
expect(processor.constructor.name).toEqual("Processor");
});
it("throws if special tokens are missing", () => {
expect(() => templateProcessing("[CLS] $A [SEP]")).toThrow(
"Missing SpecialToken(s) with id(s)"
);
});
it("instantiates correctly with both templates", () => {
const processor = templateProcessing(
"[CLS] $A [SEP]",
"[CLS] $A [SEP] $B:1 [SEP]:1",
[
["[CLS]", 1],
["[SEP]", 2],
]
);
expect(processor.constructor.name).toEqual("Processor");
});
});
describe("sequenceProcessing", () => {
it("accepts `PostProcessor[]` as first parameter", () => {
const template = templateProcessing("[CLS] $A [SEP]", "[CLS] $A [SEP] $B:1 [SEP]:1", [
["[CLS]", 1],
["[SEP]", 2],
]);
const bytelevel = byteLevelProcessing(true);
expect(sequenceProcessing([bytelevel, template])).toBeDefined();
});
});
| 0 |
hf_public_repos/tokenizers/bindings/node/lib | hf_public_repos/tokenizers/bindings/node/lib/bindings/pre-tokenizers.d.ts | /**
* This class is not supposed to be instantiated directly. Instead, any implementation of a
* PreTokenizer will return an instance of this class when instantiated.
*/
// eslint-disable-next-line @typescript-eslint/no-empty-interface
interface PreTokenizer {
preTokenizeString(s: string): [string, [number, number]][];
}
/**
* Instantiate a new ByteLevel PreTokenizer
*
* @param [addPrefixSpace=true] Whether to add a space to the first word if there isn't already one.
* This lets us treat `hello` exactly like `say hello`.
* @returns ByteLevel PreTokenizer.
* This pre-tokenizer takes care of replacing all bytes of the given string
* with a corresponding representation, as well as splitting into words.
*/
export function byteLevelPreTokenizer(addPrefixSpace?: boolean): PreTokenizer;
/**
* Returns the alphabet used by the ByteLevel PreTokenizer.
* Since the ByteLevel works as its name suggests, at the byte level, it
* encodes any byte to one visible character. This means that there is a
* total of 256 different characters composing this alphabet.
*/
export function byteLevelAlphabet(): string[];
/**
* Returns a Whitespace PreTokenizer
* This pre-tokenizer simply splits using the following regex: `\w+|[^\w\s]+`
*/
export function whitespacePreTokenizer(): PreTokenizer;
/**
* Returns a WhitespaceSplit PreTokenizer
* This pre-tokenizer simply splits on whitespaces only. Works almost like the `.split(' ')`
* function, except that it accounts for multiple consecutive spaces
*/
export function whitespaceSplitPreTokenizer(): PreTokenizer;
/**
* Returns a Split PreTokenizer
* This versatile pre-tokenizer splits using the provided pattern and
* according to the provided behavior. The pattern can be inverted by
* making use of the invert flag.
*
* @param [pattern] A pattern used to split the string. Usually a string or a Regex.
* @param [behavior] The behavior to use when splitting.
* Choices: "removed", "isolated", "mergedWithPrevious", "mergedWithNext",
* "contiguous".
* @param [invert=false] Whether to invert the pattern.
*/
export function splitPreTokenizer(
pattern?: string,
behavior?: string,
invert?: boolean
): PreTokenizer;
/**
* Returns a new Bert PreTokenizer.
* This pre-tokenizer splits tokens on spaces, and also on punctuation.
* Each occurrence of a punctuation character will be treated separately.
*/
export function bertPreTokenizer(): PreTokenizer;
/**
* Returns a new Metaspace PreTokenizer.
* This pre-tokenizer replaces any whitespace by the provided replacement character.
* It then tries to split on these spaces.
*
* @param [replacement="▁"] The replacement character. Must be exactly one character.
* By default we use the `▁` (U+2581) meta symbol (Same as in SentencePiece).
* @param [addPrefixSpace] Whether to add a space to the first word if there isn't already one.
* This lets us treat `hello` exactly like `say hello`.
*/
export function metaspacePreTokenizer(
replacement?: string,
addPrefixSpace?: boolean
): PreTokenizer;
/**
* Returns a CharDelimiterSplit PreTokenizer
* This pre-tokenizer simply splits on the provided delimiter. Works almost like the `.split(delimiter)`
* function, except that it accounts for multiple consecutive spaces
*
* @param delimiter The delimiter character on which the sequence will be split.
*/
export function charDelimiterSplitPreTokenizer(delimiter: string): PreTokenizer;
/**
* Returns a new Punctuation PreTokenizer.
* This pre-tokenizer splits tokens on punctuation according to the provided behavior.
* Each occurrence of a punctuation character is treated separately.
*
* @param [behavior="isolated"] The behavior to use when splitting.
* Choices: "removed", "isolated", "mergedWithPrevious", "mergedWithNext",
* "contiguous"
*/
export function punctuationPreTokenizer(behavior?: string): PreTokenizer;
/**
* Returns a new Sequence PreTokenizer.
* This pre-tokenizer combines other pretokenizers and applies them.
* sequentially.
*/
export function sequencePreTokenizer(pretokenizers: PreTokenizer[]): PreTokenizer;
/**
* Returns a new Digits PreTokenizer.
* This pre-tokenizer splits on numbers. Optionnaly it can split on individual digits.
*
* @param [individualDigits=false] Whether to split on individual digits.
*/
export function digitsPreTokenizer(individualDigits?: boolean): PreTokenizer;
| 0 |
hf_public_repos/tokenizers/bindings/node/lib | hf_public_repos/tokenizers/bindings/node/lib/bindings/pre-tokenizers.js | const native = require("./native");
module.exports = {
byteLevelPreTokenizer: native.pre_tokenizers_ByteLevel,
byteLevelAlphabet: native.pre_tokenizers_ByteLevel_Alphabet,
whitespacePreTokenizer: native.pre_tokenizers_Whitespace,
whitespaceSplitPreTokenizer: native.pre_tokenizers_WhitespaceSplit,
bertPreTokenizer: native.pre_tokenizers_BertPreTokenizer,
metaspacePreTokenizer: native.pre_tokenizers_Metaspace,
charDelimiterSplitPreTokenizer: native.pre_tokenizers_CharDelimiterSplit,
punctuationPreTokenizer: native.pre_tokenizers_Punctuation,
sequencePreTokenizer: native.pre_tokenizers_Sequence,
digitsPreTokenizer: native.pre_tokenizers_Digits,
splitPreTokenizer: native.pre_tokenizers_Split,
};
| 0 |
hf_public_repos/tokenizers/bindings/node/lib | hf_public_repos/tokenizers/bindings/node/lib/bindings/pre-tokenizers.test.ts | import {
byteLevelPreTokenizer,
metaspacePreTokenizer,
punctuationPreTokenizer,
sequencePreTokenizer,
splitPreTokenizer,
whitespaceSplitPreTokenizer,
} from "./pre-tokenizers";
describe("byteLevelPreTokenizer", () => {
it("instantiates correctly", () => {
const processor = byteLevelPreTokenizer();
expect(processor.constructor.name).toEqual("PreTokenizer");
});
});
describe("metaspacePreTokenizer", () => {
it("instantiates correctly without any parameter", () => {
const processor = metaspacePreTokenizer();
expect(processor.constructor.name).toEqual("PreTokenizer");
});
it("accepts `undefined` as first parameter", () => {
expect(metaspacePreTokenizer(undefined)).toBeDefined();
});
it("accepts `undefined` as second parameter", () => {
expect(metaspacePreTokenizer("t", undefined)).toBeDefined();
});
it("can pre-tokenize strings", () => {
const pretok = metaspacePreTokenizer();
expect(pretok.preTokenizeString("Hello there friend")).toEqual([
["▁Hello", [0, 5]],
["▁there", [5, 11]],
["▁friend", [11, 18]],
]);
});
});
describe("punctuationPreTokenizer", () => {
it("instantiates correctly without any parameter", () => {
const processor = punctuationPreTokenizer();
expect(processor.constructor.name).toEqual("PreTokenizer");
});
it("instantiates correctly with non-default split delimeter", () => {
const processor = punctuationPreTokenizer("removed");
expect(processor.constructor.name).toEqual("PreTokenizer");
});
});
describe("splitPreTokenizer", () => {
it("instantiates correctly with invert parameter", () => {
const processor = splitPreTokenizer(" ", "mergedWithPrevious", false);
expect(processor.constructor.name).toEqual("PreTokenizer");
});
});
describe("sequencePreTokenizer", () => {
it("instantiates correctly", () => {
const punctuation = punctuationPreTokenizer();
const whitespace = whitespaceSplitPreTokenizer();
const sequence2 = sequencePreTokenizer([]);
expect(sequence2.constructor.name).toEqual("PreTokenizer");
const sequence3 = sequencePreTokenizer([punctuation, whitespace]);
expect(sequence3.constructor.name).toEqual("PreTokenizer");
});
});
| 0 |
hf_public_repos/tokenizers/bindings/node/lib | hf_public_repos/tokenizers/bindings/node/lib/bindings/raw-encoding.d.ts | import { PaddingDirection } from "./enums";
/**
* An Encoding as returned by the Tokenizer
*/
export interface RawEncoding {
/**
* Get the encoded tokens corresponding to the word at the given index in one of the input
* sequences, with the form [startToken, endToken+1]
* @param word The position of a word in one of the input sequences
* @param seqId The index of the input sequence that contains said word
* @since 0.7.0
*/
wordToTokens(word: number, seqId?: number): [number, number] | undefined;
/**
* Get the offsets of the word at the given index in the input sequence
* @param word The index of the word in the input sequence
* @param seqId The index of the input sequence that contains said word
* @since 0.7.0
*/
wordToChars(word: number, seqId?: number): [number, number] | undefined;
/**
* Get the index of the sequence that contains the given token
* @param token The index of the token in the encoded sequence
*/
tokenToSequence(token: number): number | undefined;
/**
* Get the offsets of the token at the given index
*
* The returned offsets are related to the input sequence that contains the
* token. In order to determine in which input sequence it belongs, you
* must call `tokenToSequence`.
*
* @param token The index of the token in the encoded sequence
* @since 0.7.0
*/
tokenToChars(token: number): [number, number] | undefined;
/**
* Get the word that contains the token at the given index
*
* The returned index is related to the input sequence that contains the
* token. In order to determine in which input sequence it belongs, you
* must call `tokenToSequence`.
*
* @param token The index of the token in the encoded sequence
* @since 0.7.0
*/
tokenToWord(token: number): number | undefined;
/**
* Find the index of the token at the position of the given char
* @param pos The position of a char in one of the input strings
* @param seqId The index of the input sequence that contains said char
* @since 0.6.0
*/
charToToken(pos: number, seqId?: number): number | undefined;
/**
* Get the word that contains the given char
* @param pos The position of a char in the input string
* @param seqId The index of the input sequence that contains said char
* @since 0.7.0
*/
charToWord(pos: number, seqId?: number): number | undefined;
/**
* Returns the attention mask
*/
getAttentionMask(): number[];
/**
* Returns the number of sequences
*/
getNSequences(): number;
/**
* Set the sequence id for this encoding
*/
setSequenceId(seqId: number): undefined;
/**
* Returns the tokenized ids
*/
getIds(): number[];
/**
* Returns the number of tokens
*/
getLength(): number;
/**
* Returns the offsets
*/
getOffsets(): [number, number][];
/**
* Returns the overflowing encodings, after truncation
*/
getOverflowing(): RawEncoding[];
/**
* Returns the special tokens mask
*/
getSpecialTokensMask(): number[];
/**
* Returns the tokenized string
*/
getTokens(): string[];
/**
* Returns the type ids
*/
getTypeIds(): number[];
/**
* The tokenized words indexes
* @since 0.6.0
*/
getWordIds(): (number | undefined)[];
/**
* The sequences indices
*/
getSequenceIds(): (number | undefined)[];
/**
* Pad the current Encoding at the given length
*
* @param length The length at which to pad
* @param [options] Padding options
*/
pad(length: number, options?: PaddingOptions): void;
/**
* Truncate the current Encoding at the given max_length
*
* @param length The maximum length to be kept
* @param [stride=0] The length of the previous first sequence
* to be included in the overflowing sequence
* @param [direction='right'] Truncate direction
*/
truncate(length: number, stride?: number, direction?: string): void;
}
interface PaddingOptions {
/**
* @default "right"
*/
direction?: PaddingDirection;
/**
* The index to be used when padding
* @default 0
*/
padId?: number;
/**
* The type index to be used when padding
* @default 0
*/
padTypeId?: number;
/**
* The pad token to be used when padding
* @default "[PAD]"
*/
padToken?: string;
}
| 0 |
hf_public_repos/tokenizers/bindings/node/lib | hf_public_repos/tokenizers/bindings/node/lib/bindings/raw-encoding.test.ts | import { promisify } from "util";
import { PaddingDirection } from "./enums";
import { Model, WordPiece, WordPieceOptions } from "./models";
import {
punctuationPreTokenizer,
sequencePreTokenizer,
whitespacePreTokenizer,
} from "./pre-tokenizers";
import { RawEncoding } from "./raw-encoding";
import { EncodeOptions, InputSequence, Tokenizer } from "./tokenizer";
const MOCKS_DIR = __dirname + "/__mocks__";
describe("Can modify pretokenizers on the fly", () => {
let encoding: RawEncoding;
let encode: (
sequence: InputSequence,
pair?: InputSequence | null,
options?: EncodeOptions | null
) => Promise<RawEncoding>;
let tokenizer: Tokenizer;
beforeAll(async () => {
const model = await promisify<string, WordPieceOptions, Model>(WordPiece.fromFile)(
`${MOCKS_DIR}/vocab.txt`,
{
continuingSubwordPrefix: "##",
}
);
tokenizer = new Tokenizer(model);
encode = promisify(tokenizer.encode.bind(tokenizer));
});
it("Can change pre tokenizer", async () => {
const input = "my name is john.!?";
tokenizer.setPreTokenizer(sequencePreTokenizer([whitespacePreTokenizer()]));
encoding = await encode(input, null);
expect(encoding.getIds()).toEqual([0, 1, 2, 3, 4, 8]);
// Change pre tokenizer
tokenizer.setPreTokenizer(
sequencePreTokenizer([whitespacePreTokenizer(), punctuationPreTokenizer()])
);
encoding = await encode(input, null);
expect(encoding.getIds()).toEqual([0, 1, 2, 3, 4, 8, 8, 8]);
});
});
describe("RawEncoding", () => {
const originalString = "my name is john";
const originalPairString = "what is yours?";
let encoding: RawEncoding;
let encodingDual: RawEncoding;
let encode: (
sequence: InputSequence,
pair?: InputSequence | null,
options?: EncodeOptions | null
) => Promise<RawEncoding>;
beforeAll(async () => {
const model = await promisify<string, WordPieceOptions, Model>(WordPiece.fromFile)(
`${MOCKS_DIR}/vocab.txt`,
{
continuingSubwordPrefix: "##",
}
);
const tokenizer = new Tokenizer(model);
tokenizer.setPreTokenizer(whitespacePreTokenizer());
encode = promisify(tokenizer.encode.bind(tokenizer));
});
beforeEach(async () => {
encoding = await encode(originalString, null);
encodingDual = await encode(originalString, originalPairString);
});
it("has a list of defined methods", async () => {
expect(typeof encoding.wordToTokens).toBe("function");
expect(typeof encoding.wordToChars).toBe("function");
expect(typeof encoding.tokenToChars).toBe("function");
expect(typeof encoding.tokenToWord).toBe("function");
expect(typeof encoding.charToToken).toBe("function");
expect(typeof encoding.charToWord).toBe("function");
expect(typeof encoding.getAttentionMask).toBe("function");
expect(typeof encoding.getIds).toBe("function");
expect(typeof encoding.getLength).toBe("function");
expect(typeof encoding.getOffsets).toBe("function");
expect(typeof encoding.getOverflowing).toBe("function");
expect(typeof encoding.getSpecialTokensMask).toBe("function");
expect(typeof encoding.getTokens).toBe("function");
expect(typeof encoding.getTypeIds).toBe("function");
expect(typeof encoding.getWordIds).toBe("function");
expect(typeof encoding.getSequenceIds).toBe("function");
expect(typeof encoding.pad).toBe("function");
expect(typeof encoding.truncate).toBe("function");
});
describe("truncate", () => {
it("accepts `undefined` as second parameter", () => {
expect(encoding.truncate(10, undefined)).toBeUndefined();
});
it("should throw an Error on invalid direction", () => {
const t = () => encoding.truncate(10, 3, "not_valid");
expect(t).toThrow(`Invalid truncation direction value : not_valid`);
});
});
describe("getWordIds", () => {
it("returns the correct list of indexes", () => {
const indexes = encoding.getWordIds();
expect(indexes).toEqual([0, 1, 2, 3, 3]);
});
});
describe("getSequenceIds", () => {
it("returns the correct list of indexes", () => {
expect(encoding.getSequenceIds()).toEqual([0, 0, 0, 0, 0]);
expect(encodingDual.getSequenceIds()).toEqual([0, 0, 0, 0, 0, 1, 1, 1, 1]);
});
});
describe("wordToTokens", () => {
it("returns the correct indexes", () => {
const indexes = encoding.wordToTokens(3);
expect(indexes).toEqual([3, 5]);
});
it("returns the corrent indexes with pair sequences", () => {
expect(encodingDual.wordToTokens(3, 0)).toEqual([3, 5]);
expect(encodingDual.wordToTokens(3, 1)).toEqual([8, 9]);
});
it("returns undefined when out of range word", () => {
const index = encoding.wordToTokens(100);
expect(index).toBeUndefined();
});
});
describe("wordToChars", () => {
it("returns the correct offsets", () => {
const offsets = encoding.wordToChars(3);
expect(offsets).toEqual([11, 15]);
});
it("returns the correct offsets with pair sequences", () => {
expect(encodingDual.wordToChars(3, 0)).toEqual([11, 15]);
expect(encodingDual.wordToChars(3, 1)).toEqual([13, 14]);
});
it("returns undefined when out of range word", () => {
const offsets = encoding.wordToChars(100);
expect(offsets).toBeUndefined();
});
});
describe("tokenToSequence", () => {
it("returns the correct value", () => {
expect(encodingDual.tokenToSequence(4)).toEqual(0);
expect(encodingDual.tokenToSequence(6)).toEqual(1);
});
});
describe("tokenToChars", () => {
it("returns the correct offsets", () => {
const offsets = encoding.tokenToChars(3);
expect(offsets).toEqual([11, 13]);
});
it("returns the correct offsets with pair sequences", () => {
expect(encodingDual.tokenToChars(3)).toEqual([11, 13]);
expect(encodingDual.tokenToChars(7)).toEqual([8, 13]);
});
it("returns undefined when out of range token", () => {
const offsets = encoding.tokenToChars(100);
expect(offsets).toBeUndefined();
});
});
describe("tokenToWord", () => {
it("returns the correct index", () => {
const index = encoding.tokenToWord(3);
expect(index).toEqual(3);
});
it("returns the correct index with pair sequences", () => {
expect(encodingDual.tokenToWord(3)).toEqual(3);
expect(encodingDual.tokenToWord(7)).toEqual(2);
});
it("returns undefined when out of range token", () => {
const index = encoding.tokenToWord(100);
expect(index).toBeUndefined();
});
});
describe("charToToken", () => {
it("returns the correct index", () => {
const index = encoding.charToToken(3);
expect(index).toEqual(1);
});
it("returns the correct index with pair sequences", () => {
expect(encodingDual.charToToken(3, 0)).toEqual(1);
expect(encodingDual.charToToken(3, 1)).toEqual(5);
});
it("returns undefined when out of range char", () => {
const index = encoding.charToToken(100);
expect(index).toBeUndefined();
});
});
describe("charToWord", () => {
it("returns the correct index", () => {
const index = encoding.charToWord(3);
expect(index).toEqual(1);
});
it("returns the correct index with pair sequences", () => {
expect(encodingDual.charToWord(3, 0)).toEqual(1);
expect(encodingDual.charToWord(3, 1)).toEqual(0);
});
it("returns undefined when out of range char", () => {
const index = encoding.charToWord(100);
expect(index).toBeUndefined();
});
});
describe("pad", () => {
it("works correctly with only one parameter", () => {
encoding.pad(10);
expect(encoding.getTokens()).toHaveLength(10);
});
it("accepts `undefined` as second parameter", () => {
encoding.pad(10, undefined);
expect(encoding.getTokens()).toHaveLength(10);
});
it("accepts options as second parameter", () => {
encoding.pad(10, {
direction: PaddingDirection.Left,
padToken: "[PA]",
padTypeId: 10,
padId: 400,
});
const tokens = encoding.getTokens();
expect(tokens).toHaveLength(10);
expect(tokens[0]).toBe("[PA]");
expect(encoding.getTypeIds()[0]).toBe(10);
expect(encoding.getIds()[0]).toBe(400);
});
});
});
| 0 |
hf_public_repos/tokenizers/bindings/node/lib | hf_public_repos/tokenizers/bindings/node/lib/bindings/tokenizer.d.ts | import { Decoder } from "./decoders";
import { PaddingDirection, TruncationDirection, TruncationStrategy } from "./enums";
import { Model } from "./models";
import { Normalizer } from "./normalizers";
import { PostProcessor } from "./post-processors";
import { PreTokenizer } from "./pre-tokenizers";
import { RawEncoding } from "./raw-encoding";
import { Trainer } from "./trainers";
export interface FromPretrainedOptions {
/**
* The revision to download
* @default "main"
*/
revision?: string;
/**
* The auth token to use to access private repositories on the Hugging Face Hub
* @default undefined
*/
authToken?: string;
}
export interface TruncationOptions {
/**
* The length of the previous sequence to be included in the overflowing sequence
* @default 0
*/
stride?: number;
/**
* Strategy to use:
* - `TruncationStrategy.LongestFirst` Iteratively reduce the inputs sequence until the input is under max_length
* starting from the longest one at each token (when there is a pair of input sequences).
* - `TruncationStrategy.OnlyFirst` Only truncate the first sequence.
* - `TruncationStrategy.OnlySecond` Only truncate the second sequence.
* @default TruncationStrategy.LongestFirst
*/
strategy?: TruncationStrategy;
/**
* Which side to truncate
* @default TruncationDirection.Left
*/
direction?: TruncationDirection;
}
export interface TruncationConfiguration extends Required<TruncationOptions> {
/**
* The maximum length at which to truncate
*/
maxLength: number;
}
export type PaddingConfiguration = Required<
Omit<PaddingOptions, "maxLength" | "padToMultipleOf">
> &
Pick<PaddingOptions, "maxLength" | "padToMultipleOf">;
export interface PaddingOptions {
/**
* @default PaddingDirection.Right
*/
direction?: PaddingDirection;
/**
* Padding length. If not provided:
* - Will default to the longest sequence when encoding in batch.
* - No padding will be applied when single encoding
*/
maxLength?: number;
/**
* If specified, the padding will snap to a multiple of the given value.
* @default undefined
*/
padToMultipleOf?: number;
/**
* The index to be used when padding
* @default 0
*/
padId?: number;
/**
* The type index to be used when padding
* @default 0
*/
padTypeId?: number;
/**
* The pad token to be used when padding
* @default "[PAD]"
*/
padToken?: string;
}
export type TextInputSequence = string;
export type PreTokenizedInputSequence = string[];
export type InputSequence = TextInputSequence | PreTokenizedInputSequence;
export type TextEncodeInput = TextInputSequence | [TextInputSequence, TextInputSequence];
export type PreTokenizedEncodeInput =
| PreTokenizedInputSequence
| [PreTokenizedInputSequence, PreTokenizedInputSequence];
export type EncodeInput = TextEncodeInput | PreTokenizedEncodeInput;
export interface EncodeOptions {
/**
* Whether the given sequence is pre-tokenized
* @default false
*/
isPretokenized?: boolean;
/**
* Whether we should add special tokens
* @default true
*/
addSpecialTokens?: boolean;
}
/**
* A Tokenizer works as a pipeline, it processes some raw text as input and outputs
* an `Encoding`.
* The various steps of the pipeline are:
* 1. The `Normalizer`: in charge of normalizing the text. Common examples of
* normalization are the unicode normalization standards, such as NFD or NFKC.
* 2. The `PreTokenizer`: in charge of creating initial words splits in the text.
* The most common way of splitting text is simply on whitespace.
* 3. The `Model`: in charge of doing the actual tokenization. An example of a
* `Model` would be `BPE` or `WordPiece`.
* 4. The `PostProcessor`: in charge of post-processing the `Encoding` to add anything
* relevant that, for example, a language model would need, such as special tokens.
*/
export class Tokenizer {
/**
* Instantiate a new Tokenizer using the given Model
*/
constructor(model: Model);
/**
* Instantiate a new Tokenizer from the given file
* @param path Path to a file containing a Tokenizer
*/
static fromFile(path: string): Tokenizer;
/**
* Instantiate a new Tokenizer from the given JSON string
* @param s A JSON string representation of the Tokenizer
*/
static fromString(s: string): Tokenizer;
/**
* Instantiate a new Tokenizer from an existing file on the
* Hugging Face Hub. Any model repo containing a `tokenizer.json`
* can be used here.
* @param identifier A model identifier on the Hub
* @param options Additional options
*/
static fromPretrained(s: string, options?: FromPretrainedOptions): Tokenizer;
/**
* Add the given tokens to the vocabulary
*
* @param tokens A list of tokens to add to the vocabulary.
* Each token can either be a string, or an instance of {@link AddedToken}.
* @returns The number of tokens that were added to the vocabulary
*/
addTokens(tokens: (string | AddedToken)[]): number;
/**
* Add the given special tokens to the vocabulary, and treat them as special tokens.
* The special tokens will never be processed by the model, and will be removed while decoding.
*
* @param tokens The list of special tokens to add.
* Each token can either be a string or an instance of {@link AddedToken}.
* @returns The number of tokens that were added to the vocabulary
*/
addSpecialTokens(tokens: (string | AddedToken)[]): number;
/**
* Encode the given sequence
*
* @param sequence The sequence to encode
* @param pair The optional pair sequence
* @param addSpecialTokens Whether to add the special tokens while encoding
* @param __callback Callback called when encoding is complete
*/
encode(
sequence: InputSequence,
pair?: InputSequence | null,
options?: EncodeOptions | null, // |(err: Error, encoding: RawEncoding) => void,
__callback?: (err: Error, encoding: RawEncoding) => void
): void;
/**
* Encode the given sequences or pair of sequences
*
* @param sequences A list of sequences or pair of sequences. The list can contain both at the same time.
* @param addSpecialTokens Whether to add the special tokens while encoding
* @param __callback Callback called when encoding is complete
*/
encodeBatch(
inputs: EncodeInput[],
options?: EncodeOptions | null, // (err: Error, encodings: RawEncoding[]) => void,
__callback?: (err: Error, encodings: RawEncoding[]) => void
): void;
/**
* Decode the given list of ids to a string sequence
*
* @param ids A list of ids to be decoded
* @param skipSpecialTokens Whether to remove all the special tokens from the output string
* @param __callback Callback called with decoded string
*/
decode(
ids: number[],
skipSpecialTokens: boolean,
__callback: (err: Error, encodings: string) => void
): void;
/**
* Decode the list of sequences to a list of string sequences
*
* @param sequences A list of sequence of ids to be decoded
* @param skipSpecialTokens Whether to remove all the special tokens from the output strings
* @param __callback Callback called with decoded strings
*/
decodeBatch(
sequences: number[][],
skipSpecialTokens: boolean,
__callback: (err: Error, encodings: string[]) => void
): void[];
/**
* Convert the given token id to its corresponding string
*
* @param id The token id to convert
* @returns The corresponding string if it exists
*/
idToToken(id: number): string | undefined;
/**
* Convert the given token to its corresponding id
*
* @param token The token to convert
* @returns The corresponding id if it exists
*/
tokenToId(token: string): number | undefined;
/**
* Enable/change padding with specified options
* @param [options] Padding options
*/
setPadding(options?: PaddingOptions): PaddingConfiguration;
/**
* Disable padding
*/
disablePadding(): void;
/**
* Enable/change truncation with specified options
*
* @param maxLength The maximum length at which to truncate
* @param [options] Additional truncation options
*/
setTruncation(maxLength: number, options?: TruncationOptions): TruncationConfiguration;
/**
* Disable truncation
*/
disableTruncation(): void;
/**
* Train the model using the given files
*
* @param trainer Trainer to use
* @param files List of files to use
*/
train(trainer: Trainer, files: string[]): void;
/**
* Returns the vocabulary
*
* @param [withAddedTokens=true] Whether to include the added tokens in the vocabulary
*/
getVocab(withAddedTokens?: boolean): { [token: string]: number };
/**
* Returns the size of the vocabulary
*
* @param [withAddedTokens=true] Whether to include the added tokens in the vocabulary's size
*/
getVocabSize(withAddedTokens?: boolean): number;
/**
* Returns the number of encoding tasks running currently
*/
runningTasks(): number;
/**
* Returns the model in use
*/
getModel(): Model;
/**
* Change the model to use with this Tokenizer
* @param model New model to use
* @throws Will throw an error if any task is running
* @throws Will throw an error if the model is already used in another Tokenizer
*/
setModel(model: Model): void;
/**
* Returns the normalizer in use
*/
getNormalizer(): Normalizer | undefined;
/**
* Change the normalizer to use with this Tokenizer
* @param normalizer New normalizer to use
* @throws Will throw an error if any task is running
* @throws Will throw an error if the normalizer is already used in another Tokenizer
*/
setNormalizer(normalizer: Normalizer): void;
/**
* Returns the pre-tokenizer in use
*/
getPreTokenizer(): PreTokenizer | undefined;
/**
* Change the pre-tokenizer to use with this Tokenizer
* @param preTokenizer New pre-tokenizer to use
* @throws Will throw an error if any task is running
* @throws Will throw an error if the pre-tokenizer is already used in another Tokenizer
*/
setPreTokenizer(preTokenizer: PreTokenizer): void;
/**
* Returns the post-processor in use
*/
getPostProcessor(): PostProcessor | undefined;
/**
* Change the post-processor to use with this Tokenizer
* @param postProcessor New post-processor to use
* @throws Will throw an error if any task is running
* @throws Will throw an error if the post-processor is already used in another Tokenizer
*/
setPostProcessor(processor: PostProcessor): void;
/**
* Returns the decoder in use
*/
getDecoder(): Decoder | undefined;
/**
* Change the decoder to use with this Tokenizer
* @param decoder New decoder to use
* @throws Will throw an error if any task is running
* @throws Will throw an error if the decoder is already used in another Tokenizer
*/
setDecoder(decoder: Decoder): void;
/**
* Apply all the post-processing steps to the given encodings.
* The various steps are:
* 1. Truncate according to global params (@see setTruncation)
* 2. Apply the PostProcessor
* 3. Pad according to global params (@see setPadding)
* @param encoding The main Encoding to post process
* @param [pair] An optional pair Encoding
* @param [addSpecialTokens=true] Whether to add special tokens. Default to `true`.
* @since 0.6.0
*/
postProcess(
encoding: RawEncoding,
pair?: RawEncoding,
addSpecialTokens?: boolean
): RawEncoding;
/**
* Save the Tokenizer as JSON to the given path
* @param path Path to the JSON file to write
* @param [pretty=false] Whether the JSON string should be prettified
*/
save(path: string, pretty?: boolean): void;
/**
* Get a serialized JSON version of the Tokenizer as a string
* @param [pretty=false] Whether the JSON string should be prettified
*/
toString(pretty?: boolean): string;
}
/**
* Options used to construct an AddedToken
* @since 0.6.0
*/
export interface AddedTokenOptions {
/**
* Whether this token should strip all potential whitespaces on the left side.
* If True, this token will greedily match any whitespace on the left and then strip
* them out.
* @default False
*/
leftStrip?: boolean;
/**
* Whether this token should strip all potential whitespaces on the right side.
* If True, this token will greedily match any whitespace on the right and then strip
* them out.
* @default False
*/
rightStrip?: boolean;
/**
* Whether this token should only match against single word.
* If True, this token will never match inside of a word.
* @default False
*/
singleWord?: boolean;
/**
* Whether this token should match on the normalized version of the text. For example
* with the added token `yesterday` and a normalizer in charge of lowercasing the text,
* the input `I saw a lion Yesterday` would match the token.
* This is False for special tokens by default, true otherwise
* @default True
*/
normalized?: boolean;
}
/**
* AddedToken represents a token to be added to a Tokenizer.
* An AddedToken can have special options defining the way it should behave.
*
* @since 0.6.0
*/
export class AddedToken {
/**
* Instantiate a new AddedToken
* @param content The content of the token
* @param special Whether this is a special token
* @param [options] Options for the token
*/
constructor(content: string, special: boolean, options?: AddedTokenOptions);
/**
* Get the content of the AddedToken
*/
getContent(): string;
}
| 0 |
hf_public_repos/tokenizers/bindings/node/lib | hf_public_repos/tokenizers/bindings/node/lib/bindings/tokenizer.js | const native = require("./native");
class Tokenizer extends native.tokenizer_Tokenizer {
static fromString = native.tokenizer_Tokenizer_from_string;
static fromFile = native.tokenizer_Tokenizer_from_file;
static fromPretrained = native.tokenizer_Tokenizer_from_pretrained;
}
module.exports = {
AddedToken: native.tokenizer_AddedToken,
Tokenizer,
};
| 0 |
hf_public_repos/tokenizers/bindings/node/lib | hf_public_repos/tokenizers/bindings/node/lib/bindings/tokenizer.test.ts | /* eslint-disable @typescript-eslint/no-explicit-any */
/* eslint-disable @typescript-eslint/no-empty-function */
import { promisify } from "util";
import { PaddingDirection, TruncationDirection, TruncationStrategy } from "./enums";
import { BPE } from "./models";
import { RawEncoding } from "./raw-encoding";
import {
AddedToken,
EncodeInput,
EncodeOptions,
InputSequence,
PaddingConfiguration,
Tokenizer,
TruncationConfiguration,
} from "./tokenizer";
// jest.mock('../bindings/tokenizer');
// jest.mock('../bindings/models', () => ({
// __esModule: true,
// Model: jest.fn()
// }));
// Or:
// jest.mock('../bindings/models', () => {
// return require('../bindings/__mocks__/models');
// });
// const TokenizerMock = mocked(Tokenizer);
describe("AddedToken", () => {
it("instantiates with only content", () => {
const addToken = new AddedToken("test", false);
expect(addToken.constructor.name).toEqual("AddedToken");
});
it("instantiates with empty options", () => {
const addToken = new AddedToken("test", false, {});
expect(addToken.constructor.name).toEqual("AddedToken");
});
it("instantiates with options", () => {
const addToken = new AddedToken("test", false, {
leftStrip: true,
rightStrip: true,
singleWord: true,
});
expect(addToken.constructor.name).toEqual("AddedToken");
});
describe("getContent", () => {
it("returns the string content of AddedToken", () => {
const addedToken = new AddedToken("test", false);
expect(addedToken.getContent()).toEqual("test");
});
});
});
describe("Tokenizer", () => {
it("has expected methods", () => {
const model = BPE.empty();
const tokenizer = new Tokenizer(model);
expect(typeof Tokenizer.fromFile).toBe("function");
expect(typeof Tokenizer.fromString).toBe("function");
expect(typeof Tokenizer.fromPretrained).toBe("function");
expect(typeof tokenizer.addSpecialTokens).toBe("function");
expect(typeof tokenizer.addTokens).toBe("function");
expect(typeof tokenizer.decode).toBe("function");
expect(typeof tokenizer.decodeBatch).toBe("function");
expect(typeof tokenizer.disablePadding).toBe("function");
expect(typeof tokenizer.disableTruncation).toBe("function");
expect(typeof tokenizer.encode).toBe("function");
expect(typeof tokenizer.encodeBatch).toBe("function");
expect(typeof tokenizer.getDecoder).toBe("function");
expect(typeof tokenizer.getNormalizer).toBe("function");
expect(typeof tokenizer.getPostProcessor).toBe("function");
expect(typeof tokenizer.getPreTokenizer).toBe("function");
expect(typeof tokenizer.getVocab).toBe("function");
expect(typeof tokenizer.getVocabSize).toBe("function");
expect(typeof tokenizer.idToToken).toBe("function");
expect(typeof tokenizer.runningTasks).toBe("function");
expect(typeof tokenizer.save).toBe("function");
expect(typeof tokenizer.setDecoder).toBe("function");
expect(typeof tokenizer.setModel).toBe("function");
expect(typeof tokenizer.setNormalizer).toBe("function");
expect(typeof tokenizer.setPadding).toBe("function");
expect(typeof tokenizer.setPostProcessor).toBe("function");
expect(typeof tokenizer.setPreTokenizer).toBe("function");
expect(typeof tokenizer.setTruncation).toBe("function");
expect(typeof tokenizer.tokenToId).toBe("function");
expect(typeof tokenizer.toString).toBe("function");
expect(typeof tokenizer.train).toBe("function");
});
it("can be instantiated from the hub", async () => {
let tokenizer: Tokenizer;
let encode: (
sequence: InputSequence,
pair?: InputSequence | null,
options?: EncodeOptions | null
) => Promise<RawEncoding>;
let output: RawEncoding;
tokenizer = Tokenizer.fromPretrained("bert-base-cased");
encode = promisify(tokenizer.encode.bind(tokenizer));
output = await encode("Hey there dear friend!", null, { addSpecialTokens: false });
expect(output.getTokens()).toEqual(["Hey", "there", "dear", "friend", "!"]);
tokenizer = Tokenizer.fromPretrained("anthony/tokenizers-test");
encode = promisify(tokenizer.encode.bind(tokenizer));
output = await encode("Hey there dear friend!", null, { addSpecialTokens: false });
expect(output.getTokens()).toEqual(["hey", "there", "dear", "friend", "!"]);
tokenizer = Tokenizer.fromPretrained("anthony/tokenizers-test", {
revision: "gpt-2",
});
encode = promisify(tokenizer.encode.bind(tokenizer));
output = await encode("Hey there dear friend!", null, { addSpecialTokens: false });
expect(output.getTokens()).toEqual(["Hey", "Ġthere", "Ġdear", "Ġfriend", "!"]);
});
describe("addTokens", () => {
it("accepts a list of string as new tokens when initial model is empty", () => {
const model = BPE.empty();
const tokenizer = new Tokenizer(model);
const nbAdd = tokenizer.addTokens(["my", "name", "is", "john", "pair"]);
expect(nbAdd).toBe(5);
});
it("accepts a list of AddedToken as new tokens when initial model is empty", () => {
const model = BPE.empty();
const tokenizer = new Tokenizer(model);
const addedToken = new AddedToken("test", false);
const nbAdd = tokenizer.addTokens([addedToken]);
expect(nbAdd).toBe(1);
});
});
describe("encode", () => {
let tokenizer: Tokenizer;
let encode: (
sequence: InputSequence,
pair?: InputSequence | null,
options?: EncodeOptions | null
) => Promise<RawEncoding>;
let encodeBatch: (
inputs: EncodeInput[],
options?: EncodeOptions | null
) => Promise<RawEncoding[]>;
beforeEach(() => {
// Clear all instances and calls to constructor and all methods:
// TokenizerMock.mockClear();
const model = BPE.empty();
tokenizer = new Tokenizer(model);
tokenizer.addTokens(["my", "name", "is", "john", new AddedToken("pair", false)]);
encode = promisify(tokenizer.encode.bind(tokenizer));
encodeBatch = promisify(tokenizer.encodeBatch.bind(tokenizer));
});
it("accepts a pair of strings as parameters", async () => {
const encoding = await encode("my name is john", "pair");
expect(encoding).toBeDefined();
});
it("accepts a string with a null pair", async () => {
const encoding = await encode("my name is john", null);
expect(encoding).toBeDefined();
});
it("throws if we try to encode a pre-tokenized string without isPretokenized=true", async () => {
await expect((encode as any)(["my", "name", "is", "john"], null)).rejects.toThrow(
"encode with isPreTokenized=false expect string"
);
});
it("accepts a pre-tokenized string as parameter", async () => {
const encoding = await encode(["my", "name", "is", "john"], undefined, {
isPretokenized: true,
});
expect(encoding).toBeDefined();
});
it("throws if we try to encodeBatch pre-tokenized strings without isPretokenized=true", async () => {
await expect((encodeBatch as any)([["my", "name", "is", "john"]])).rejects.toThrow(
"encodeBatch with isPretokenized=false expects input to be `EncodeInput[]` " +
"with `EncodeInput = string | [string, string]`"
);
});
it("accepts a pre-tokenized input in encodeBatch", async () => {
const encoding = await encodeBatch([["my", "name", "is", "john"]], {
isPretokenized: true,
});
expect(encoding).toBeDefined();
});
it("Encodes correctly if called with only one argument", async () => {
const encoded = await encode("my name is john");
expect(encoded.getIds()).toEqual([0, 1, 2, 3]);
});
it("returns an Encoding", async () => {
const encoding = await encode("my name is john", "pair");
expect(encoding.getAttentionMask()).toEqual([1, 1, 1, 1, 1]);
const ids = encoding.getIds();
expect(Array.isArray(ids)).toBe(true);
expect(ids).toHaveLength(5);
for (const id of ids) {
expect(typeof id).toBe("number");
}
expect(encoding.getOffsets()).toEqual([
[0, 2],
[3, 7],
[8, 10],
[11, 15],
[0, 4],
]);
expect(encoding.getOverflowing()).toEqual([]);
expect(encoding.getSpecialTokensMask()).toEqual([0, 0, 0, 0, 0]);
expect(encoding.getTokens()).toEqual(["my", "name", "is", "john", "pair"]);
expect(encoding.getTypeIds()).toEqual([0, 0, 0, 0, 1]);
});
describe("when truncation is enabled", () => {
it("truncates with default if no truncation options provided", async () => {
tokenizer.setTruncation(2);
const singleEncoding = await encode("my name is john", null);
expect(singleEncoding.getTokens()).toEqual(["my", "name"]);
const pairEncoding = await encode("my name is john", "pair");
expect(pairEncoding.getTokens()).toEqual(["my", "pair"]);
});
it("throws an error with strategy `only_second` and no pair is encoded", async () => {
tokenizer.setTruncation(2, { strategy: TruncationStrategy.OnlySecond });
await expect(encode("my name is john", null)).rejects.toThrow();
});
});
describe("when padding is enabled", () => {
it("does not pad anything with default options", async () => {
tokenizer.setPadding();
const singleEncoding = await encode("my name", null);
expect(singleEncoding.getTokens()).toEqual(["my", "name"]);
const pairEncoding = await encode("my name", "pair");
expect(pairEncoding.getTokens()).toEqual(["my", "name", "pair"]);
});
it("pads to the right by default", async () => {
tokenizer.setPadding({ maxLength: 5 });
const singleEncoding = await encode("my name", null);
expect(singleEncoding.getTokens()).toEqual([
"my",
"name",
"[PAD]",
"[PAD]",
"[PAD]",
]);
const pairEncoding = await encode("my name", "pair");
expect(pairEncoding.getTokens()).toEqual([
"my",
"name",
"pair",
"[PAD]",
"[PAD]",
]);
});
it("pads to multiple of the given value", async () => {
tokenizer.setPadding({ padToMultipleOf: 8 });
const singleEncoding = await encode("my name", null);
expect(singleEncoding.getTokens()).toHaveLength(8);
const pairEncoding = await encode("my name", "pair");
expect(pairEncoding.getTokens()).toHaveLength(8);
});
});
});
describe("decode", () => {
let tokenizer: Tokenizer;
beforeEach(() => {
const model = BPE.empty();
tokenizer = new Tokenizer(model);
tokenizer.addTokens(["my", "name", "is", "john", "pair"]);
});
it("returns `undefined`", () => {
expect(tokenizer.decode([0, 1, 2, 3], true, () => {})).toBeUndefined();
});
it("has its callback called with the decoded string", async () => {
const decode = promisify(tokenizer.decode.bind(tokenizer));
await expect(decode([0, 1, 2, 3], true)).resolves.toEqual("my name is john");
});
});
describe("decodeBatch", () => {
let tokenizer: Tokenizer;
beforeEach(() => {
const model = BPE.empty();
tokenizer = new Tokenizer(model);
tokenizer.addTokens(["my", "name", "is", "john", "pair"]);
});
it("returns `undefined`", () => {
expect(tokenizer.decodeBatch([[0, 1, 2, 3], [4]], true, () => {})).toBeUndefined();
});
it("has its callback called with the decoded string", async () => {
const decodeBatch = promisify(tokenizer.decodeBatch.bind(tokenizer));
await expect(decodeBatch([[0, 1, 2, 3], [4]], true)).resolves.toEqual([
"my name is john",
"pair",
]);
});
});
describe("getVocab", () => {
it("accepts `undefined` as parameter", () => {
const model = BPE.empty();
const tokenizer = new Tokenizer(model);
expect(tokenizer.getVocab(undefined)).toBeDefined();
});
it("returns the vocabulary", () => {
const model = BPE.empty();
const tokenizer = new Tokenizer(model);
tokenizer.addTokens(["my", "name", "is", "john"]);
expect(tokenizer.getVocab(true)).toEqual({
my: 0,
name: 1,
is: 2,
john: 3,
});
});
});
describe("getVocabSize", () => {
it("accepts `undefined` as parameter", () => {
const model = BPE.empty();
const tokenizer = new Tokenizer(model);
expect(tokenizer.getVocabSize(undefined)).toBeDefined();
});
});
describe("setTruncation", () => {
it("returns the full truncation configuration", () => {
const model = BPE.empty();
const tokenizer = new Tokenizer(model);
const truncation = tokenizer.setTruncation(2);
const expectedConfig: TruncationConfiguration = {
maxLength: 2,
strategy: TruncationStrategy.LongestFirst,
stride: 0,
direction: TruncationDirection.Right,
};
expect(truncation).toEqual(expectedConfig);
});
});
describe("setPadding", () => {
it("returns the full padding params", () => {
const model = BPE.empty();
const tokenizer = new Tokenizer(model);
const padding = tokenizer.setPadding();
const expectedConfig: PaddingConfiguration = {
direction: PaddingDirection.Right,
padId: 0,
padToken: "[PAD]",
padTypeId: 0,
};
expect(padding).toEqual(expectedConfig);
});
});
describe("postProcess", () => {
let tokenizer: Tokenizer;
let encode: (
sequence: InputSequence,
pair?: InputSequence | null,
options?: EncodeOptions | null
) => Promise<RawEncoding>;
let firstEncoding: RawEncoding;
let secondEncoding: RawEncoding;
beforeAll(() => {
const model = BPE.empty();
tokenizer = new Tokenizer(model);
tokenizer.addTokens(["my", "name", "is", "john", "pair"]);
encode = promisify(tokenizer.encode.bind(tokenizer));
});
beforeEach(async () => {
firstEncoding = await encode("my name is john", null);
secondEncoding = await encode("pair", null);
tokenizer.setTruncation(2);
tokenizer.setPadding({ maxLength: 5 });
});
it("returns correctly with a single Encoding param", () => {
const encoding = tokenizer.postProcess(firstEncoding);
expect(encoding.getTokens()).toEqual(["my", "name", "[PAD]", "[PAD]", "[PAD]"]);
});
it("returns correctly with `undefined` as second and third parameters", () => {
const encoding = tokenizer.postProcess(firstEncoding, undefined, undefined);
expect(encoding.getTokens()).toEqual(["my", "name", "[PAD]", "[PAD]", "[PAD]"]);
});
it("returns correctly with 2 encodings", () => {
const encoding = tokenizer.postProcess(firstEncoding, secondEncoding);
expect(encoding.getTokens()).toEqual(["my", "pair", "[PAD]", "[PAD]", "[PAD]"]);
});
});
});
| 0 |
hf_public_repos/tokenizers/bindings/node/lib | hf_public_repos/tokenizers/bindings/node/lib/bindings/trainers.d.ts | /**
* This class is not supposed to be instantiated directly. Instead, any implementation of a
* Trainer will return an instance of this class when instantiated.
*/
import { AddedToken } from "./tokenizer";
// eslint-disable-next-line @typescript-eslint/no-empty-interface
interface Trainer {}
export interface TrainerOptions {
/**
* A prefix to be used for every subword that is not a beginning-of-word.
*/
continuingSubwordPrefix?: string;
/**
* A suffix to be used for every subword that is a end-of-word.
*/
endOfWordSuffix?: string;
/**
* A list of characters to include in the initial alphabet, even
* if not seen in the training dataset.
* If the strings contains more than one character, only the first one
* is kept.
* @default []
*/
initialAlphabet?: string[];
/**
* The maximum different characters to keep in the alphabet.
*/
limitAlphabet?: number;
/**
* The minimum frequency a pair should have in order to be merged.
* @default 2
*/
minFrequency?: number;
/**
* Whether to show progress bars while training.
* @default true
*/
showProgress?: boolean;
/**
* A list of special tokens the model should know of.
* @default []
*/
specialTokens?: (string | AddedToken)[];
/**
* The size of the final vocabulary, including all tokens and alphabet.
* @default 30000
*/
vocabSize?: number;
}
/**
* Instantiate a new BPE Trainer
* @param [options] BPE Trainer options
*/
export function bpeTrainer(options?: TrainerOptions): Trainer;
/**
* Instantiate a new WordPiece Trainer
* @param [options] WordPiece Trainer options
*/
export function wordPieceTrainer(options?: TrainerOptions): Trainer;
export interface WordLevelTrainerOptions {
/**
* The minimum frequency a pair should have in order to be merged.
* @default 2
*/
minFrequency?: number;
/**
* Whether to show progress bars while training.
* @default true
*/
showProgress?: boolean;
/**
* A list of special tokens the model should know of.
* @default []
*/
specialTokens?: (string | AddedToken)[];
/**
* The size of the final vocabulary, including all tokens and alphabet.
* @default 30000
*/
vocabSize?: number;
}
/**
* Instantiate a new WordLevel Trainer
* @param [options] WordLevel Trainer options
*/
export function wordLevelTrainer(options?: WordLevelTrainerOptions): Trainer;
export interface UnigramTrainerOptions {
vocabSize?: number;
nSubIterations?: number;
shrinkingFactor?: number;
specialTokens?: string[];
initialAlphabet?: string[];
unkToken?: string;
maxPieceLength?: number;
seedSize?: number;
showProgress?: boolean;
}
/**
* Instantiate a new Unigram Trainer
* @param [options] Unigram Trainer options
*/
export function unigramTrainer(options?: UnigramTrainerOptions): Trainer;
| 0 |
hf_public_repos/tokenizers/bindings/node/lib | hf_public_repos/tokenizers/bindings/node/lib/bindings/trainers.js | const native = require("./native");
module.exports = {
bpeTrainer: native.trainers_BPETrainer,
wordPieceTrainer: native.trainers_WordPieceTrainer,
wordLevelTrainer: native.trainers_WordLevelTrainer,
unigramTrainer: native.trainers_UnigramTrainer,
};
| 0 |
hf_public_repos/tokenizers/bindings/node/lib | hf_public_repos/tokenizers/bindings/node/lib/bindings/utils.d.ts | import { RawEncoding } from "./raw-encoding";
/**
* Returns a subpart of a string according to specified indexes, and respecting unicode characters
*
* @param text The text for which to return a subpart
* @param [begin] The index from which to start (can be negative).
* @param [end] The index (excluded) to which to stop (can be negative).
* Stopping at the end of the string if not provided.
* @returns The full string if no start/end indexes are provided,
* otherwise the original string between `begin` (included) and `end` (excluded)
* @since 0.6.0
*/
export function slice(text: string, start?: number, end?: number): string;
/**
* Merge the list of RawEncoding into one final RawEncoding
* @param encodings The list of encodings to merge
* @param [growingOffsets=false] Whether the offsets should accumulate while merging
*/
export function mergeEncodings(
encodings: RawEncoding[],
growingOffsets?: boolean
): RawEncoding;
| 0 |
hf_public_repos/tokenizers/bindings/node/lib | hf_public_repos/tokenizers/bindings/node/lib/bindings/utils.js | const native = require("./native");
module.exports = {
mergeEncodings: native.utils_mergeEncodings,
slice: native.utils_slice,
};
| 0 |
hf_public_repos/tokenizers/bindings/node/lib | hf_public_repos/tokenizers/bindings/node/lib/bindings/utils.test.ts | import { promisify } from "util";
import { BPE } from "./models";
import { RawEncoding } from "./raw-encoding";
import { EncodeOptions, InputSequence, Tokenizer } from "./tokenizer";
import { mergeEncodings, slice } from "./utils";
describe("slice", () => {
const text = "My name is John 👋";
const sliceText = slice.bind({}, text);
it("returns the full text when no params", () => {
const sliced = sliceText();
expect(sliced).toEqual(text);
});
it("accepts `undefined` as second parameter", () => {
const original = sliceText(undefined);
expect(original).toEqual(text);
});
it("accepts `undefined` as third parameter", () => {
const original = sliceText(0, undefined);
expect(original).toEqual(text);
});
it("throws an error when `begin` is out of range", () => {
expect(() => sliceText(1000)).toThrow();
});
it("returns slice starting at the specified index", () => {
const original = sliceText(3);
expect(original).toEqual("name is John 👋");
});
it("throws an error when `end` is out of range", () => {
expect(() => sliceText(0, 1000)).toThrow();
});
it("returns the text between the two specified indexes", () => {
const original = sliceText(3, 7);
expect(original).toEqual("name");
});
describe("with only a negative `begin`", () => {
it("returns the original string counting from the end when in the range", () => {
const original = sliceText(-1);
expect(original).toEqual("👋");
});
it("throws an error when out of range", () => {
expect(() => sliceText(-1000)).toThrow();
});
});
describe("with a positive `begin` and a negative `end`", () => {
it("returns correct slice when resulting range is valid", () => {
const original = sliceText(3, -7);
expect(original).toEqual("name is");
});
it("throws an error when resulting `end` index is lower than `begin`", () => {
expect(() => sliceText(7, -12)).toThrow();
});
it("throws an error when `begin` is out of range", () => {
expect(() => sliceText(1000, -12)).toThrow();
});
it("throws an error when resulting `end` index is out of range", () => {
expect(() => sliceText(7, -1000)).toThrow();
});
});
describe("with a negative `begin` and a positive `end`", () => {
it("returns correct slice when resulting range is valid", () => {
const original = sliceText(-9, 10);
expect(original).toEqual("is");
});
it("throws an error when resulting `begin` index is upper than `end`", () => {
expect(() => sliceText(-3, 5)).toThrow();
});
it("throws an error when `end` is out of range", () => {
expect(() => sliceText(-5, 1000)).toThrow();
});
it("throws an error when resulting `begin` index is out of range", () => {
expect(() => sliceText(-1000, 10)).toThrow();
});
});
describe("with negatives `begin` and `end`", () => {
it("returns correct slice when resulting range is valid", () => {
const original = sliceText(-9, -7);
expect(original).toEqual("is");
});
it("throws an error when resulting `end` index is lower than `begin`", () => {
expect(() => sliceText(-5, -10)).toThrow();
});
it("throws an error when resulting `begin` index is out of range", () => {
expect(() => sliceText(-1000, -10)).toThrow();
});
it("throws an error when resulting `end` index is out of range", () => {
expect(() => sliceText(-10, -1000)).toThrow();
});
});
});
describe("mergeEncodings", () => {
let encode: (
sequence: InputSequence,
pair?: InputSequence | null,
options?: EncodeOptions | null
) => Promise<RawEncoding>;
beforeAll(async () => {
const model = BPE.empty();
const tokenizer = new Tokenizer(model);
tokenizer.addTokens(["my", "name", "is", "john"]);
encode = promisify(tokenizer.encode.bind(tokenizer));
});
it("accepts `undefined` as a second parameter", () => {
const encoding = mergeEncodings([], undefined);
expect(encoding.constructor.name).toEqual("Encoding");
});
it("returns correct result with `growingOffsets` not provided", async () => {
const firstEncoding = await encode("my name is", null);
const secondEncoding = await encode("john", null);
const encoding = mergeEncodings([firstEncoding, secondEncoding]);
expect(encoding.getTokens()).toEqual(["my", "name", "is", "john"]);
expect(encoding.getOffsets()).toEqual([
[0, 2],
[3, 7],
[8, 10],
[0, 4],
]);
});
it("returns correct result when `growingOffsets` is `false`", async () => {
const firstEncoding = await encode("my name is", null);
const secondEncoding = await encode("john", null);
const encoding = mergeEncodings([firstEncoding, secondEncoding], false);
expect(encoding.getTokens()).toEqual(["my", "name", "is", "john"]);
expect(encoding.getOffsets()).toEqual([
[0, 2],
[3, 7],
[8, 10],
[0, 4],
]);
});
it("returns correct result when `growingOffsets` is `true`", async () => {
const firstEncoding = await encode("my name is", null);
const secondEncoding = await encode("john", null);
const encoding = mergeEncodings([firstEncoding, secondEncoding], true);
expect(encoding.getTokens()).toEqual(["my", "name", "is", "john"]);
expect(encoding.getOffsets()).toEqual([
[0, 2],
[3, 7],
[8, 10],
[10, 14],
]);
});
});
| 0 |
hf_public_repos/tokenizers/bindings/node/lib/bindings | hf_public_repos/tokenizers/bindings/node/lib/bindings/__mocks__/vocab.json | {} | 0 |
hf_public_repos/tokenizers/bindings/node/lib/bindings | hf_public_repos/tokenizers/bindings/node/lib/bindings/__mocks__/vocab.txt | my
name
is
jo
##hn
what
yours
pair
[UNK]
| 0 |
hf_public_repos/tokenizers/bindings/node/lib | hf_public_repos/tokenizers/bindings/node/lib/implementations/encoding.test.ts | /* eslint-disable @typescript-eslint/no-explicit-any */
import { RawEncoding } from "../bindings/raw-encoding";
import { Encoding } from "./encoding";
describe("Encoding", () => {
let encoding: Encoding;
const rawEncodingMock = jest.fn<Partial<RawEncoding>, any>();
describe("ids", () => {
const getIdsMock = jest.fn(() => [3]);
const m = rawEncodingMock.mockImplementation(() => ({
getIds: getIdsMock,
}));
encoding = new Encoding(m() as RawEncoding);
it("returns the ids from the raw encoding when not called before", () => {
const ids = encoding.ids;
expect(getIdsMock).toHaveBeenCalledTimes(1);
expect(ids).toEqual([3]);
});
it("returns the ids without using the raw encoding when already called before", () => {
getIdsMock.mockReset();
const ids = encoding.ids;
expect(getIdsMock).toHaveBeenCalledTimes(0);
expect(ids).toEqual([3]);
});
});
describe("pad", () => {
it('reset internal "cache" properties', () => {
const getIdsMock = jest.fn(() => [4]);
const m = rawEncodingMock.mockImplementation(() => ({
getIds: getIdsMock,
pad: jest.fn(),
}));
encoding = new Encoding(m() as RawEncoding);
encoding["_ids"] = [3];
encoding.pad(10);
const ids = encoding.ids;
expect(getIdsMock).toHaveBeenCalledTimes(1);
expect(ids).toEqual([4]);
});
});
describe("truncate", () => {
it('reset internal "cache" properties', () => {
const getIdsMock = jest.fn(() => [4]);
const m = rawEncodingMock.mockImplementation(() => ({
getIds: getIdsMock,
truncate: jest.fn(),
}));
encoding = new Encoding(m() as RawEncoding);
encoding["_ids"] = [3];
encoding.truncate(10);
const ids = encoding.ids;
expect(getIdsMock).toHaveBeenCalledTimes(1);
expect(ids).toEqual([4]);
});
});
});
| 0 |
hf_public_repos/tokenizers/bindings/node/lib | hf_public_repos/tokenizers/bindings/node/lib/implementations/encoding.ts | import { PaddingOptions, RawEncoding } from "../bindings/raw-encoding";
import { mergeEncodings } from "../bindings/utils";
export class Encoding {
private _attentionMask?: number[];
private _ids?: number[];
private _length?: number;
private _offsets?: [number, number][];
private _overflowing?: Encoding[];
private _specialTokensMask?: number[];
private _tokens?: string[];
private _typeIds?: number[];
private _wordIndexes?: (number | undefined)[];
private _sequenceIndexes?: (number | undefined)[];
constructor(private _rawEncoding: RawEncoding) {}
/**
* Merge a list of Encoding into one final Encoding
* @param encodings The list of encodings to merge
* @param [growingOffsets=false] Whether the offsets should accumulate while merging
*/
static merge(encodings: Encoding[], growingOffsets?: boolean): Encoding {
const mergedRaw = mergeEncodings(
encodings.map((e) => e.rawEncoding),
growingOffsets
);
return new Encoding(mergedRaw);
}
/**
* Number of sequences
*/
get nSequences(): number {
return this._rawEncoding.getNSequences();
}
setSequenceId(seqId: number): void {
return this._rawEncoding.setSequenceId(seqId);
}
/**
* Attention mask
*/
get attentionMask(): number[] {
if (this._attentionMask) {
return this._attentionMask;
}
return (this._attentionMask = this._rawEncoding.getAttentionMask());
}
/**
* Tokenized ids
*/
get ids(): number[] {
if (this._ids) {
return this._ids;
}
return (this._ids = this._rawEncoding.getIds());
}
/**
* Number of tokens
*/
get length(): number {
if (this._length !== undefined) {
return this._length;
}
return (this._length = this._rawEncoding.getLength());
}
/**
* Offsets
*/
get offsets(): [number, number][] {
if (this._offsets) {
return this._offsets;
}
return (this._offsets = this._rawEncoding.getOffsets());
}
/**
* Overflowing encodings, after truncation
*/
get overflowing(): Encoding[] {
if (this._overflowing) {
return this._overflowing;
}
return (this._overflowing = this._rawEncoding
.getOverflowing()
.map((e) => new Encoding(e)));
}
/**
* __⚠️ DANGER ZONE: do not touch unless you know what you're doing ⚠️__
* Access to the `rawEncoding` returned by the internal Rust code.
* @private
* @ignore
* @since 0.6.0
*/
get rawEncoding(): Readonly<RawEncoding> {
return this._rawEncoding;
}
/**
* Special tokens mask
*/
get specialTokensMask(): number[] {
if (this._specialTokensMask) {
return this._specialTokensMask;
}
return (this._specialTokensMask = this._rawEncoding.getSpecialTokensMask());
}
/**
* Tokenized string
*/
get tokens(): string[] {
if (this._tokens) {
return this._tokens;
}
return (this._tokens = this._rawEncoding.getTokens());
}
/**
* Type ids
*/
get typeIds(): number[] {
if (this._typeIds) {
return this._typeIds;
}
return (this._typeIds = this._rawEncoding.getTypeIds());
}
/**
* The tokenized words indexes
*/
get wordIndexes(): (number | undefined)[] {
if (this._wordIndexes) {
return this._wordIndexes;
}
return (this._wordIndexes = this._rawEncoding.getWordIds());
}
get sequenceIndexes(): (number | undefined)[] {
if (this._sequenceIndexes) {
return this._sequenceIndexes;
}
return (this._sequenceIndexes = this._rawEncoding.getSequenceIds());
}
/**
* Get the encoded tokens corresponding to the word at the given index in one of the input
* sequences, with the form [startToken, endToken+1]
* @param word The position of a word in one of the input sequences
* @param seqId The index of the input sequence that contains said word
* @since 0.7.0
*/
wordToTokens(word: number, seqId?: number): [number, number] | undefined {
return this._rawEncoding.wordToTokens(word, seqId);
}
/**
* Get the offsets of the word at the given index in the input sequence
* @param word The index of the word in the input sequence
* @param seqId The index of the input sequence that contains said word
* @since 0.7.0
*/
wordToChars(word: number, seqId?: number): [number, number] | undefined {
return this._rawEncoding.wordToChars(word, seqId);
}
/**
* Get the index of the sequence that contains the given token
* @param token The index of the token in the encoded sequence
*/
tokenToSequence(token: number): number | undefined {
return this._rawEncoding.tokenToSequence(token);
}
/**
* Get the offsets of the token at the given index
*
* The returned offsets are related to the input sequence that contains the
* token. In order to determine in which input sequence it belongs, you
* must call `tokenToSequence`.
*
* @param token The index of the token in the encoded sequence
* @since 0.7.0
*/
tokenToChars(token: number): [number, number] | undefined {
return this._rawEncoding.tokenToChars(token);
}
/**
* Get the word that contains the token at the given index
*
* The returned index is related to the input sequence that contains the
* token. In order to determine in which input sequence it belongs, you
* must call `tokenToSequence`.
*
* @param token The index of the token in the encoded sequence
* @since 0.7.0
*/
tokenToWord(token: number): number | undefined {
return this._rawEncoding.tokenToWord(token);
}
/**
* Find the index of the token at the position of the given char
* @param pos The position of a char in one of the input strings
* @param seqId The index of the input sequence that contains said char
* @since 0.6.0
*/
charToToken(pos: number, seqId?: number): number | undefined {
return this._rawEncoding.charToToken(pos, seqId);
}
/**
* Get the word that contains the given char
* @param pos The position of a char in the input string
* @param seqId The index of the input sequence that contains said char
* @since 0.7.0
*/
charToWord(pos: number, seqId?: number): number | undefined {
return this._rawEncoding.charToWord(pos, seqId);
}
/**
* Pad the current Encoding at the given length
*
* @param length The length at which to pad
* @param [options] Padding options
*/
pad(length: number, options?: PaddingOptions): void {
this._rawEncoding.pad(length, options);
this.resetInternalProperties();
}
/**
* Truncate the current Encoding at the given max length
*
* @param length The maximum length to be kept
* @param [stride=0] The length of the previous first sequence
* to be included in the overflowing sequence
* @param [direction='right'] Truncate direction
*/
truncate(length: number, stride?: number, direction = "right"): void {
this._rawEncoding.truncate(length, stride, direction);
this.resetInternalProperties();
}
private resetInternalProperties(): void {
for (const prop of [
"_attentionMask",
"_ids",
"_length",
"_offsets",
"_overflowing",
"_specialTokensMask",
"_tokens",
"_typeIds",
"_wordIndexes",
]) {
delete this[prop as keyof this];
}
}
}
| 0 |
hf_public_repos/tokenizers/bindings/node/lib/implementations | hf_public_repos/tokenizers/bindings/node/lib/implementations/tokenizers/base.tokenizer.test.ts | import {
PaddingDirection,
TruncationDirection,
TruncationStrategy,
} from "../../bindings/enums";
import { BPE } from "../../bindings/models";
import {
PaddingConfiguration,
Tokenizer,
TruncationConfiguration,
} from "../../bindings/tokenizer";
import { BaseTokenizer } from "./base.tokenizer";
describe("BaseTokenizer", () => {
let tokenizer: BaseTokenizer<Record<string, unknown>>;
beforeEach(() => {
// Clear all instances and calls to constructor and all methods:
// TokenizerMock.mockClear();
const model = BPE.empty();
const t = new Tokenizer(model);
tokenizer = new BaseTokenizer(t, {});
});
describe("truncation", () => {
it("returns `null` if no truncation setted", () => {
expect(tokenizer.truncation).toBeNull();
});
it("returns configuration when `setTruncation` has been called", () => {
tokenizer.setTruncation(2);
const expectedConfig: TruncationConfiguration = {
maxLength: 2,
strategy: TruncationStrategy.LongestFirst,
direction: TruncationDirection.Right,
stride: 0,
};
expect(tokenizer.truncation).toEqual(expectedConfig);
});
it("returns null when `disableTruncation` has been called", () => {
tokenizer.setTruncation(2);
tokenizer.disableTruncation();
expect(tokenizer.truncation).toBeNull();
});
});
describe("padding", () => {
it("returns `null` if no padding setted", () => {
expect(tokenizer.padding).toBeNull();
});
it("returns configuration when `setPadding` has been called", () => {
tokenizer.setPadding();
const expectedConfig: PaddingConfiguration = {
direction: PaddingDirection.Right,
padId: 0,
padToken: "[PAD]",
padTypeId: 0,
};
expect(tokenizer.padding).toEqual(expectedConfig);
});
it("returns null when `disablePadding` has been called", () => {
tokenizer.setPadding();
tokenizer.disablePadding();
expect(tokenizer.padding).toBeNull();
});
});
});
| 0 |
hf_public_repos/tokenizers/bindings/node/lib/implementations | hf_public_repos/tokenizers/bindings/node/lib/implementations/tokenizers/base.tokenizer.ts | import { promisify } from "util";
import { PostProcessor } from "../../bindings/post-processors";
import {
AddedToken,
EncodeInput,
EncodeOptions,
InputSequence,
PaddingConfiguration,
PaddingOptions,
Tokenizer,
TruncationConfiguration,
TruncationOptions,
} from "../../bindings/tokenizer";
import { Encoding } from "../encoding";
export type Token = string | AddedToken;
// eslint-disable-next-line @typescript-eslint/ban-types
export class BaseTokenizer<TConfig extends object> {
private _truncation?: TruncationConfiguration;
private _padding?: PaddingConfiguration;
constructor(
protected tokenizer: Tokenizer,
/**
* @since 0.4.0
*/
readonly configuration: Readonly<TConfig>
) {}
/**
* Instantiate a new Tokenizer from the given file
* @param path Path to a file containing a Tokenizer
*/
static fromFile = Tokenizer.fromFile;
/**
* Instantiate a new Tokenizer from the given JSON string
* @param s A JSON string representation of the Tokenizer
*/
static fromString = Tokenizer.fromString;
/**
* Truncation configuration if enabled, `null` otherwise.
*
* @see {@link BaseTokenizer#setTruncation} to change truncation configuration
* @see {@link BaseTokenizer#disableTruncation} to disable truncation
* @since 0.4.0
*/
get truncation(): Readonly<TruncationConfiguration> | null {
return this._truncation ?? null;
}
/**
* Padding configuration if enabled, `null` otherwise
*
* @see {@link BaseTokenizer#setPadding} to change padding configuration
* @see {@link BaseTokenizer#disablePadding} to disable padding
* @since 0.4.0
*/
get padding(): Readonly<PaddingConfiguration> | null {
return this._padding ?? null;
}
/**
* Add the given tokens to the vocabulary
*
* @param tokens A list of tokens to add to the vocabulary.
* Each token can either be a string, or an instance of AddedToken.
*/
addTokens(tokens: Token[]): number {
return this.tokenizer.addTokens(tokens);
}
/**
* Add the given special tokens to the vocabulary, and treat them as special tokens.
* The special tokens will never be processed by the model, and will be removed while decoding.
*
* @param tokens The list of special tokens to add.
* Each token can either be a string, or an instance of AddedToken
* @returns The number of tokens that were added to the vocabulary
*/
addSpecialTokens(tokens: Token[]): number {
return this.tokenizer.addSpecialTokens(tokens);
}
/**
* Encode the given sequence
*
* @param sequence The sequence to encode
* @param [pair] The optional pair sequence
* @param [options] Some options to customize the encoding
*/
async encode(
sequence: InputSequence,
pair?: InputSequence,
options?: EncodeOptions
): Promise<Encoding> {
const encode = promisify(this.tokenizer.encode.bind(this.tokenizer));
const rawEncoding = await encode(sequence, pair ?? null, options ?? null);
return new Encoding(rawEncoding);
}
/**
* Encode the given sequences or pair of sequences
*
* @param sequences A list of sequences or pair of sequences.
* The list can contain both at the same time.
* @param [options] Sope options to customize the encoding
*/
async encodeBatch(
sequences: EncodeInput[],
options?: EncodeOptions
): Promise<Encoding[]> {
const encodeBatch = promisify(this.tokenizer.encodeBatch.bind(this.tokenizer));
const rawEncodings = await encodeBatch(sequences, options);
return rawEncodings.map((e) => new Encoding(e));
}
/**
* Decode the given list of ids to a string sequence
*
* @param ids A list of ids to be decoded
* @param [skipSpecialTokens=true] Whether to remove all the special tokens from the output string
*/
decode(ids: number[], skipSpecialTokens = true): Promise<string> {
const decode = promisify(this.tokenizer.decode.bind(this.tokenizer));
return decode(ids, skipSpecialTokens);
}
/**
* Decode the list of sequences to a list of string sequences
*
* @param sequences A list of sequences of ids to be decoded
* @param [skipSpecialTokens=true] Whether to remove all the special tokens from the output strings
*/
decodeBatch(ids: number[][], skipSpecialTokens = true): Promise<string[]> {
const decodeBatch = promisify(this.tokenizer.decodeBatch.bind(this.tokenizer));
return decodeBatch(ids, skipSpecialTokens);
}
/**
* Enable/change truncation with specified options
*
* @param maxLength The maximum length at which to truncate
* @param [options] Additional truncation options
* @returns Full truncation configuration
*/
setTruncation(
maxLength: number,
options?: TruncationOptions
): Readonly<TruncationConfiguration> {
const result = this.tokenizer.setTruncation(maxLength, options);
return (this._truncation = result);
}
/**
* Disable truncation
*/
disableTruncation(): void {
this.tokenizer.disableTruncation();
delete this._truncation;
}
/**
* Enable/change padding with specified options
* @param [options] Padding options
* @returns Full padding configuration
*/
setPadding(options?: PaddingOptions): Readonly<PaddingConfiguration> {
const result = this.tokenizer.setPadding(options);
return (this._padding = result);
}
/**
* Disable padding
*/
disablePadding(): void {
this.tokenizer.disablePadding();
delete this._padding;
}
/**
* Convert the given token id to its corresponding string
*
* @param id The token id to convert
* @returns The corresponding string if it exists
*/
idToToken(id: number): string | undefined {
return this.tokenizer.idToToken(id);
}
/**
* Convert the given token to its corresponding id
*
* @param token The token to convert
* @returns The corresponding id if it exists
*/
tokenToId(token: string): number | undefined {
return this.tokenizer.tokenToId(token);
}
/**
* Apply all the post-processing steps to the given encodings.
* The various steps are:
* 1. Truncate according to global params (@see setTruncation)
* 2. Apply the PostProcessor
* 3. Pad according to global params (@see setPadding)
* @param encoding The main Encoding to post process
* @param [pair] An optional pair Encoding
* @param [addSpecialTokens=true] Whether to add special tokens. Default to `true`.
* @since 0.6.0
*/
postProcess(encoding: Encoding, pair?: Encoding, addSpecialTokens?: boolean): Encoding {
const rawEncoding = this.tokenizer.postProcess(
encoding.rawEncoding,
pair?.rawEncoding,
addSpecialTokens
);
return new Encoding(rawEncoding);
}
/**
* Change the post-processor to use with this Tokenizer
* @param postProcessor New post-processor to use
* @throws Will throw an error if any task is running
* @throws Will throw an error if the post-processor is already used in another Tokenizer
*/
setPostProcessor(processor: PostProcessor): void {
return this.tokenizer.setPostProcessor(processor);
}
/**
* Save the Tokenizer as JSON to the given path
* @param path Path to the JSON file to write
* @param [pretty=false] Whether the JSON string should be prettified
*/
save(path: string, pretty?: boolean): void {
return this.tokenizer.save(path, pretty);
}
/**
* Get a serialized JSON version of the Tokenizer as a string
* @param [pretty=false] Whether the JSON string should be prettified
*/
toString(pretty?: boolean): string {
return this.tokenizer.toString(pretty);
}
}
/**
* Get the string content from a token, which can be a string or AddedToken
* @param token The token from which get the content
*/
export function getTokenContent(token: Token): string {
return typeof token === "string" ? token : token.getContent();
}
| 0 |
hf_public_repos/tokenizers/bindings/node/lib/implementations | hf_public_repos/tokenizers/bindings/node/lib/implementations/tokenizers/bert-wordpiece.tokenizer.test.ts | import { BertWordPieceOptions, BertWordPieceTokenizer } from "./bert-wordpiece.tokenizer";
const MOCKS_DIR = __dirname + "/__mocks__";
describe("BertWordPieceTokenizer", () => {
describe("fromOptions", () => {
it("does not throw any error if no vocabFile is provided", async () => {
const tokenizer = await BertWordPieceTokenizer.fromOptions();
expect(tokenizer).toBeDefined();
});
describe("when a vocabFile is provided and `addSpecialTokens === true`", () => {
it("throws a `sepToken error` if no `sepToken` is provided", async () => {
const options: BertWordPieceOptions = {
vocabFile: MOCKS_DIR + "/bert-vocab-empty.txt",
};
await expect(BertWordPieceTokenizer.fromOptions(options)).rejects.toThrow(
"sepToken not found in the vocabulary"
);
});
it("throws a `clsToken error` if no `clsToken` is provided", async () => {
const options: BertWordPieceOptions = {
vocabFile: MOCKS_DIR + "/bert-vocab-without-cls.txt",
};
await expect(BertWordPieceTokenizer.fromOptions(options)).rejects.toThrow(
"clsToken not found in the vocabulary"
);
});
});
});
});
| 0 |
hf_public_repos/tokenizers/bindings/node/lib/implementations | hf_public_repos/tokenizers/bindings/node/lib/implementations/tokenizers/bert-wordpiece.tokenizer.ts | import { promisify } from "util";
import { wordPieceDecoder } from "../../bindings/decoders";
import { Model, WordPiece, WordPieceOptions } from "../../bindings/models";
import { bertNormalizer } from "../../bindings/normalizers";
import { bertProcessing } from "../../bindings/post-processors";
import { bertPreTokenizer } from "../../bindings/pre-tokenizers";
import { Tokenizer } from "../../bindings/tokenizer";
import { wordPieceTrainer } from "../../bindings/trainers";
import { BaseTokenizer, getTokenContent, Token } from "./base.tokenizer";
export interface BertWordPieceOptions {
/**
* @default true
*/
cleanText?: boolean;
/**
* @default "[CLS]"
*/
clsToken?: Token;
/**
* @default true
*/
handleChineseChars?: boolean;
/**
* @default true
*/
lowercase?: boolean;
/**
* @default "[MASK]"
*/
maskToken?: Token;
/**
* @default "[PAD]"
*/
padToken?: Token;
/**
* @default "[SEP]"
*/
sepToken?: Token;
/**
* @default true
*/
stripAccents?: boolean;
/**
* @default "[UNK]"
*/
unkToken?: Token;
vocabFile?: string;
/**
* The prefix to attach to subword units that don't represent a beginning of word
* @default "##"
*/
wordpiecesPrefix?: string;
}
export interface BertWordPieceTrainOptions {
/**
* @default []
*/
initialAlphabet?: string[];
/**
* @default 1000
*/
limitAlphabet?: number;
/**
* @default 2
*/
minFrequency?: number;
/**
* @default true
*/
showProgress?: boolean;
/**
* @default ["[PAD]", "[UNK]", "[CLS]", "[SEP]", "[MASK]"]
*/
specialTokens?: Token[];
/**
* @default 30000
*/
vocabSize?: number;
/**
* The prefix to attach to subword units that don't represent a beginning of word
* @default "##"
*/
wordpiecesPrefix?: string;
}
type BertTokenizerConfig = Required<Omit<BertWordPieceOptions, "vocabFile">> & {
vocabFile?: string;
};
/**
* Bert WordPiece Tokenizer
*/
export class BertWordPieceTokenizer extends BaseTokenizer<BertTokenizerConfig> {
private static readonly defaultBertOptions: BertTokenizerConfig = {
cleanText: true,
clsToken: "[CLS]",
handleChineseChars: true,
lowercase: true,
maskToken: "[MASK]",
padToken: "[PAD]",
sepToken: "[SEP]",
stripAccents: true,
unkToken: "[UNK]",
wordpiecesPrefix: "##",
};
private readonly defaultTrainOptions: Required<BertWordPieceTrainOptions> = {
initialAlphabet: [],
limitAlphabet: 1000,
minFrequency: 2,
showProgress: true,
specialTokens: ["[PAD]", "[UNK]", "[CLS]", "[SEP]", "[MASK]"],
vocabSize: 30000,
wordpiecesPrefix: "##",
};
private constructor(tokenizer: Tokenizer, configuration: BertTokenizerConfig) {
super(tokenizer, configuration);
}
/**
* Instantiate and returns a new Bert WordPiece tokenizer
* @param [options] Optional tokenizer options
*/
static async fromOptions(
options?: BertWordPieceOptions
): Promise<BertWordPieceTokenizer> {
const opts = { ...this.defaultBertOptions, ...options };
let model: Model;
if (opts.vocabFile) {
const fromFile = promisify<string, WordPieceOptions, Model>(WordPiece.fromFile);
model = await fromFile(opts.vocabFile, {
unkToken: getTokenContent(opts.unkToken),
continuingSubwordPrefix: opts.wordpiecesPrefix,
});
} else {
model = WordPiece.empty();
}
const tokenizer = new Tokenizer(model);
for (const token of [
opts.clsToken,
opts.sepToken,
opts.unkToken,
opts.padToken,
opts.maskToken,
]) {
if (tokenizer.tokenToId(getTokenContent(token)) !== undefined) {
tokenizer.addSpecialTokens([token]);
}
}
const normalizer = bertNormalizer(opts);
tokenizer.setNormalizer(normalizer);
tokenizer.setPreTokenizer(bertPreTokenizer());
if (opts.vocabFile) {
const sepTokenId = tokenizer.tokenToId(getTokenContent(opts.sepToken));
if (sepTokenId === undefined) {
throw new Error("sepToken not found in the vocabulary");
}
const clsTokenId = tokenizer.tokenToId(getTokenContent(opts.clsToken));
if (clsTokenId === undefined) {
throw new Error("clsToken not found in the vocabulary");
}
const processor = bertProcessing(
[getTokenContent(opts.sepToken), sepTokenId],
[getTokenContent(opts.clsToken), clsTokenId]
);
tokenizer.setPostProcessor(processor);
}
const decoder = wordPieceDecoder(opts.wordpiecesPrefix);
tokenizer.setDecoder(decoder);
return new BertWordPieceTokenizer(tokenizer, opts);
}
/**
* Train the model using the given files
*
* @param files Files to use for training
* @param [options] Training options
*/
async train(files: string[], options?: BertWordPieceTrainOptions): Promise<void> {
const mergedOptions = { ...this.defaultTrainOptions, ...options };
const trainer = wordPieceTrainer(mergedOptions);
this.tokenizer.train(trainer, files);
}
}
| 0 |
hf_public_repos/tokenizers/bindings/node/lib/implementations | hf_public_repos/tokenizers/bindings/node/lib/implementations/tokenizers/bpe.tokenizer.ts | import { promisify } from "util";
import { bpeDecoder } from "../../bindings/decoders";
import { BPE, BPEOptions, Model } from "../../bindings/models";
import {
lowercaseNormalizer,
nfkcNormalizer,
sequenceNormalizer,
} from "../../bindings/normalizers";
import { whitespaceSplitPreTokenizer } from "../../bindings/pre-tokenizers";
import { Tokenizer } from "../../bindings/tokenizer";
import { bpeTrainer } from "../../bindings/trainers";
import { BaseTokenizer, getTokenContent, Token } from "./base.tokenizer";
export interface BPETokenizerOptions {
/**
* The BPE dropout to use. Must be an float between 0 and 1
*/
dropout?: number;
/**
* @default false
*/
lowercase?: boolean;
mergesFile?: string;
/**
* @default "</w>"
*/
suffix?: string;
/**
* The unknown token to be used by the model
* @default "<unk>"
*/
unkToken?: Token;
vocabFile?: string;
}
export interface BPETokenizerTrainOptions {
/**
* @default []
*/
initialAlphabet?: string[];
/**
* @default 1000
*/
limitAlphabet?: number;
/**
* @default 2
*/
minFrequency?: number;
/**
* @default true
*/
showProgress?: boolean;
/**
* @default ["<unk>"]
*/
specialTokens?: Token[];
/**
* @default "</w>"
*/
suffix?: string;
/**
* @default 30000
*/
vocabSize?: number;
}
type BPETokenizerConfig = BPETokenizerOptions &
Required<Pick<BPETokenizerOptions, "unkToken" | "suffix">>;
/**
* Original BPE Tokenizer.
* Represents the BPE algorithm, as introduced by Rico Sennrich (https://arxiv.org/abs/1508.07909)
*/
export class BPETokenizer extends BaseTokenizer<BPETokenizerConfig> {
private static readonly defaultBPEOptions: BPETokenizerConfig = {
suffix: "</w>",
unkToken: "<unk>",
};
private readonly defaultTrainOptions: Required<BPETokenizerTrainOptions> = {
initialAlphabet: [],
limitAlphabet: 1000,
minFrequency: 2,
showProgress: true,
specialTokens: ["<unk>"],
suffix: "</w>",
vocabSize: 30000,
};
private constructor(tokenizer: Tokenizer, configuration: BPETokenizerConfig) {
super(tokenizer, configuration);
}
/**
* Instantiate and returns a new BPE tokenizer
* @param [options] Optional tokenizer options
*/
static async fromOptions(options?: BPETokenizerOptions): Promise<BPETokenizer> {
const opts = { ...this.defaultBPEOptions, ...options };
const unkToken = getTokenContent(opts.unkToken);
let model: Model;
if (opts.vocabFile && opts.mergesFile) {
const modelOptions: BPEOptions = {
dropout: opts.dropout,
endOfWordSuffix: opts.suffix,
unkToken: unkToken,
};
const fromFile = promisify<string, string, BPEOptions, Model>(BPE.fromFile);
model = await fromFile(opts.vocabFile, opts.mergesFile, modelOptions);
} else {
model = BPE.empty();
}
const tokenizer = new Tokenizer(model);
if (tokenizer.tokenToId(unkToken) !== undefined) {
tokenizer.addSpecialTokens([opts.unkToken]);
}
if (opts.lowercase) {
tokenizer.setNormalizer(
sequenceNormalizer([nfkcNormalizer(), lowercaseNormalizer()])
);
} else {
tokenizer.setNormalizer(nfkcNormalizer());
}
tokenizer.setPreTokenizer(whitespaceSplitPreTokenizer());
const decoder = bpeDecoder(opts.suffix);
tokenizer.setDecoder(decoder);
return new BPETokenizer(tokenizer, opts);
}
/**
* Train the model using the given files
*
* @param files Files to use for training
* @param [options] Training options
*/
async train(files: string[], options?: BPETokenizerTrainOptions): Promise<void> {
const mergedOptions = { ...this.defaultTrainOptions, ...options };
const trainer = bpeTrainer(mergedOptions);
this.tokenizer.train(trainer, files);
}
}
| 0 |
hf_public_repos/tokenizers/bindings/node/lib/implementations | hf_public_repos/tokenizers/bindings/node/lib/implementations/tokenizers/byte-level-bpe.tokenizer.ts | import { promisify } from "util";
import { byteLevelDecoder } from "../../bindings/decoders";
import { BPE, BPEOptions, Model } from "../../bindings/models";
import {
lowercaseNormalizer,
nfkcNormalizer,
sequenceNormalizer,
} from "../../bindings/normalizers";
import { byteLevelProcessing } from "../../bindings/post-processors";
import { byteLevelAlphabet, byteLevelPreTokenizer } from "../../bindings/pre-tokenizers";
import { Tokenizer } from "../../bindings/tokenizer";
import { bpeTrainer } from "../../bindings/trainers";
import { BaseTokenizer, Token } from "./base.tokenizer";
export interface ByteLevelBPETokenizerOptions {
/**
* @default false
*/
addPrefixSpace?: boolean;
/**
* The prefix to attach to subword units that don't represent a beginning of word
*/
continuingSubwordPrefix?: string;
/**
* @default false
*/
lowercase?: boolean;
/**
* The BPE dropout to use. Must be an float between 0 and 1
*/
dropout?: number;
/**
* The suffix to attach to subword units that represent an end of word
*/
endOfWordSuffix?: string;
mergesFile?: string;
unicodeNormalizer?: string;
/**
* Whether to trim the whitespaces from the produced offsets
* @default false
*/
trimOffsets?: boolean;
vocabFile?: string;
}
export interface ByteLevelBPETrainOptions {
/**
* @default 2
*/
minFrequency?: number;
/**
* @default true
*/
showProgress?: boolean;
/**
* @default []
*/
specialTokens?: Token[];
/**
* @default 30000
*/
vocabSize?: number;
}
type ByteLevelBPETokenizerConfig = ByteLevelBPETokenizerOptions &
Required<Pick<ByteLevelBPETokenizerOptions, "addPrefixSpace">>;
/**
* Represents a Byte-level BPE as introduced by OpenAI with their GPT-2 model
*/
export class ByteLevelBPETokenizer extends BaseTokenizer<ByteLevelBPETokenizerConfig> {
private static readonly defaultOptions: ByteLevelBPETokenizerConfig = {
addPrefixSpace: false,
trimOffsets: false,
};
private readonly defaultTrainOptions: Required<ByteLevelBPETrainOptions> = {
minFrequency: 2,
showProgress: true,
specialTokens: ["<unk>"],
vocabSize: 30000,
};
private constructor(tokenizer: Tokenizer, configuration: ByteLevelBPETokenizerConfig) {
super(tokenizer, configuration);
}
static async fromOptions(
options?: ByteLevelBPETokenizerOptions
): Promise<ByteLevelBPETokenizer> {
const opts = { ...this.defaultOptions, ...options };
let model: Model;
if (opts.vocabFile && opts.mergesFile) {
const fromFile = promisify<string, string, BPEOptions, Model>(BPE.fromFile);
model = await fromFile(opts.vocabFile, opts.mergesFile, opts);
} else {
model = BPE.empty();
}
const tokenizer = new Tokenizer(model);
if (opts.lowercase) {
tokenizer.setNormalizer(
sequenceNormalizer([nfkcNormalizer(), lowercaseNormalizer()])
);
} else {
tokenizer.setNormalizer(nfkcNormalizer());
}
const preTokenizer = byteLevelPreTokenizer(opts.addPrefixSpace);
tokenizer.setPreTokenizer(preTokenizer);
tokenizer.setDecoder(byteLevelDecoder());
tokenizer.setPostProcessor(byteLevelProcessing(opts.trimOffsets));
return new ByteLevelBPETokenizer(tokenizer, opts);
}
/**
* Train the model using the given files
*
* @param files Files to use for training
* @param [options] Training options
*/
async train(files: string[], options?: ByteLevelBPETrainOptions): Promise<void> {
const mergedOptions = { ...this.defaultTrainOptions, ...options };
const trainer = bpeTrainer({
...mergedOptions,
initialAlphabet: byteLevelAlphabet(),
});
this.tokenizer.train(trainer, files);
}
}
| 0 |
hf_public_repos/tokenizers/bindings/node/lib/implementations | hf_public_repos/tokenizers/bindings/node/lib/implementations/tokenizers/index.ts | export * from "./bert-wordpiece.tokenizer";
export * from "./bpe.tokenizer";
export * from "./byte-level-bpe.tokenizer";
export * from "./sentence-piece-bpe.tokenizer";
export { getTokenContent, BaseTokenizer, Token } from "./base.tokenizer";
| 0 |
hf_public_repos/tokenizers/bindings/node/lib/implementations | hf_public_repos/tokenizers/bindings/node/lib/implementations/tokenizers/sentence-piece-bpe.tokenizer.ts | import { promisify } from "util";
import { metaspaceDecoder } from "../../bindings/decoders";
import { BPE, BPEOptions, Model } from "../../bindings/models";
import { nfkcNormalizer } from "../../bindings/normalizers";
import { metaspacePreTokenizer } from "../../bindings/pre-tokenizers";
import { Tokenizer } from "../../bindings/tokenizer";
import { bpeTrainer } from "../../bindings/trainers";
import { BaseTokenizer, getTokenContent, Token } from "./base.tokenizer";
export interface SentencePieceBPETokenizerOptions extends OptionsWithDefaults {
dropout?: number;
mergesFile?: string;
vocabFile?: string;
}
interface OptionsWithDefaults {
/**
* @default true
*/
addPrefixSpace?: boolean;
/**
* @default "▁"
*/
replacement?: string;
/**
* @default "<unk>"
*/
unkToken?: Token;
}
export interface SentencePieceBPETrainOptions {
/**
* @default []
*/
initialAlphabet?: string[];
/**
* @default 1000
*/
limitAlphabet?: number;
/**
* @default 2
*/
minFrequency?: number;
/**
* @default true
*/
showProgress?: boolean;
/**
* @default ["<unk>"]
*/
specialTokens?: Token[];
/**
* @default 30000
*/
vocabSize?: number;
}
type SentencePieceBPETokenizerConfig = SentencePieceBPETokenizerOptions &
Required<OptionsWithDefaults>;
/**
* Represents the BPE algorithm, with the pretokenization used by SentencePiece
*/
export class SentencePieceBPETokenizer extends BaseTokenizer<SentencePieceBPETokenizerConfig> {
private static readonly defaultOptions: SentencePieceBPETokenizerConfig = {
addPrefixSpace: true,
replacement: "▁",
unkToken: "<unk>",
};
private readonly defaultTrainOptions: Required<SentencePieceBPETrainOptions> = {
initialAlphabet: [],
limitAlphabet: 1000,
minFrequency: 2,
showProgress: true,
specialTokens: ["<unk>"],
vocabSize: 30000,
};
private constructor(
tokenizer: Tokenizer,
configuration: SentencePieceBPETokenizerConfig
) {
super(tokenizer, configuration);
}
static async fromOptions(
options?: SentencePieceBPETokenizerOptions
): Promise<SentencePieceBPETokenizer> {
const opts = { ...this.defaultOptions, ...options };
const unkToken = getTokenContent(opts.unkToken);
let model: Model;
if (opts.vocabFile && opts.mergesFile) {
const modelOptions: BPEOptions = {
dropout: opts.dropout,
unkToken: unkToken,
};
const fromFile = promisify<string, string, BPEOptions, Model>(BPE.fromFile);
model = await fromFile(opts.vocabFile, opts.mergesFile, modelOptions);
} else {
model = BPE.empty();
}
const tokenizer = new Tokenizer(model);
if (tokenizer.tokenToId(unkToken) !== undefined) {
tokenizer.addSpecialTokens([opts.unkToken]);
}
tokenizer.setNormalizer(nfkcNormalizer());
const preTokenizer = metaspacePreTokenizer(opts.replacement, opts.addPrefixSpace);
tokenizer.setPreTokenizer(preTokenizer);
const decoder = metaspaceDecoder(opts.replacement, opts.addPrefixSpace);
tokenizer.setDecoder(decoder);
return new SentencePieceBPETokenizer(tokenizer, opts);
}
/**
* Train the model using the given files
*
* @param files Files to use for training
* @param [options] Training options
*/
async train(files: string[], options?: SentencePieceBPETrainOptions): Promise<void> {
const mergedOptions = { ...this.defaultTrainOptions, ...options };
const trainer = bpeTrainer(mergedOptions);
this.tokenizer.train(trainer, files);
}
}
| 0 |
hf_public_repos/tokenizers/bindings/node/lib/implementations/tokenizers | hf_public_repos/tokenizers/bindings/node/lib/implementations/tokenizers/__mocks__/bert-vocab-without-cls.txt | [SEP]
| 0 |
hf_public_repos/tokenizers/bindings/node | hf_public_repos/tokenizers/bindings/node/native/Cargo.toml | [package]
name = "node"
version = "0.13.3"
authors = ["Anthony MOI <m.anthony.moi@gmail.com>"]
license = "Apache-2.0"
build = "build.rs"
exclude = ["artifacts.json", "index.node"]
[lib]
name = "node"
crate-type = ["cdylib"]
[build-dependencies]
neon-build = "0.3.3"
[dependencies]
neon = "0.3"
neon-runtime = "0.3"
neon-serde = "0.3"
serde = { version = "1.0", features = [ "rc", "derive" ] }
tokenizers = { path = "../../../tokenizers" }
serde_json = "1.0"
| 0 |
hf_public_repos/tokenizers/bindings/node | hf_public_repos/tokenizers/bindings/node/native/build.rs | extern crate neon_build;
fn main() {
neon_build::setup(); // must be called in build.rs
// add project-specific build logic here...
}
| 0 |
hf_public_repos/tokenizers/bindings/node/native | hf_public_repos/tokenizers/bindings/node/native/src/decoders.rs | extern crate tokenizers as tk;
use crate::extraction::*;
use neon::prelude::*;
use std::sync::Arc;
use tk::decoders::DecoderWrapper;
/// Decoder
#[derive(Clone, Serialize, Deserialize)]
pub struct Decoder {
#[serde(flatten)]
pub decoder: Option<Arc<DecoderWrapper>>,
}
impl tk::Decoder for Decoder {
fn decode_chain(&self, tokens: Vec<String>) -> tk::Result<Vec<String>> {
self.decoder
.as_ref()
.ok_or("Uninitialized Decoder")?
.decode_chain(tokens)
}
}
declare_types! {
pub class JsDecoder for Decoder {
init(_) {
// This should not be called from JS
Ok(Decoder { decoder: None })
}
method decode(mut cx) {
use tk::Decoder;
let tokens = cx.extract_vec::<String>(0)?;
let this = cx.this();
let guard = cx.lock();
let output = this.borrow(&guard)
.decoder.as_ref().unwrap()
.decode(tokens)
.map_err(|e| Error(format!("{}", e)))?;
Ok(cx.string(output).upcast())
}
}
}
/// byte_level()
fn byte_level(mut cx: FunctionContext) -> JsResult<JsDecoder> {
let mut decoder = JsDecoder::new::<_, JsDecoder, _>(&mut cx, vec![])?;
let guard = cx.lock();
decoder.borrow_mut(&guard).decoder = Some(Arc::new(
tk::decoders::byte_level::ByteLevel::default().into(),
));
Ok(decoder)
}
/// replace()
fn replace(mut cx: FunctionContext) -> JsResult<JsDecoder> {
let pattern: String = cx.extract::<String>(0)?;
let content: String = cx.extract::<String>(1)?;
let mut decoder = JsDecoder::new::<_, JsDecoder, _>(&mut cx, vec![])?;
let guard = cx.lock();
decoder.borrow_mut(&guard).decoder = Some(Arc::new(
tk::normalizers::replace::Replace::new(pattern, content)
.map_err(|e| Error(e.to_string()))?
.into(),
));
Ok(decoder)
}
/// wordpiece(prefix: String = "##", cleanup: bool)
fn wordpiece(mut cx: FunctionContext) -> JsResult<JsDecoder> {
let prefix = cx
.extract_opt::<String>(0)?
.unwrap_or_else(|| String::from("##"));
let cleanup = cx.extract_opt::<bool>(1)?.unwrap_or(true);
let mut decoder = JsDecoder::new::<_, JsDecoder, _>(&mut cx, vec![])?;
let guard = cx.lock();
decoder.borrow_mut(&guard).decoder = Some(Arc::new(
tk::decoders::wordpiece::WordPiece::new(prefix, cleanup).into(),
));
Ok(decoder)
}
/// byte_fallback()
fn byte_fallback(mut cx: FunctionContext) -> JsResult<JsDecoder> {
let mut decoder = JsDecoder::new::<_, JsDecoder, _>(&mut cx, vec![])?;
let guard = cx.lock();
decoder.borrow_mut(&guard).decoder = Some(Arc::new(
tk::decoders::byte_fallback::ByteFallback::new().into(),
));
Ok(decoder)
}
/// fuse()
fn fuse(mut cx: FunctionContext) -> JsResult<JsDecoder> {
let mut decoder = JsDecoder::new::<_, JsDecoder, _>(&mut cx, vec![])?;
let guard = cx.lock();
decoder.borrow_mut(&guard).decoder = Some(Arc::new(tk::decoders::fuse::Fuse::new().into()));
Ok(decoder)
}
/// strip(content: char, left: usize, right: usize)
fn strip(mut cx: FunctionContext) -> JsResult<JsDecoder> {
let content: char = cx.extract(0)?;
let left: usize = cx.extract(1)?;
let right: usize = cx.extract(2)?;
let mut decoder = JsDecoder::new::<_, JsDecoder, _>(&mut cx, vec![])?;
let guard = cx.lock();
decoder.borrow_mut(&guard).decoder = Some(Arc::new(
tk::decoders::strip::Strip::new(content, left, right).into(),
));
Ok(decoder)
}
/// metaspace(replacement: String = "_", add_prefix_space: bool = true)
fn metaspace(mut cx: FunctionContext) -> JsResult<JsDecoder> {
let replacement = cx.extract_opt::<char>(0)?.unwrap_or('▁');
let add_prefix_space = cx.extract_opt::<bool>(1)?.unwrap_or(true);
let mut decoder = JsDecoder::new::<_, JsDecoder, _>(&mut cx, vec![])?;
let guard = cx.lock();
decoder.borrow_mut(&guard).decoder = Some(Arc::new(
tk::decoders::metaspace::Metaspace::new(replacement, add_prefix_space).into(),
));
Ok(decoder)
}
/// bpe_decoder(suffix: String = "</w>")
fn bpe_decoder(mut cx: FunctionContext) -> JsResult<JsDecoder> {
let suffix = cx
.extract_opt::<String>(0)?
.unwrap_or_else(|| String::from("</w>"));
let mut decoder = JsDecoder::new::<_, JsDecoder, _>(&mut cx, vec![])?;
let guard = cx.lock();
decoder.borrow_mut(&guard).decoder =
Some(Arc::new(tk::decoders::bpe::BPEDecoder::new(suffix).into()));
Ok(decoder)
}
/// ctc_decoder(pad_token: String = "<pad>", word_delimiter_token: String = "|", cleanup = true)
fn ctc_decoder(mut cx: FunctionContext) -> JsResult<JsDecoder> {
let pad_token = cx
.extract_opt::<String>(0)?
.unwrap_or_else(|| String::from("<pad>"));
let word_delimiter_token = cx
.extract_opt::<String>(1)?
.unwrap_or_else(|| String::from("|"));
let cleanup = cx.extract_opt::<bool>(2)?.unwrap_or(true);
let mut decoder = JsDecoder::new::<_, JsDecoder, _>(&mut cx, vec![])?;
let guard = cx.lock();
decoder.borrow_mut(&guard).decoder = Some(Arc::new(
tk::decoders::ctc::CTC::new(pad_token, word_delimiter_token, cleanup).into(),
));
Ok(decoder)
}
/// sequence()
fn sequence(mut cx: FunctionContext) -> JsResult<JsDecoder> {
let decoders = cx.argument::<JsArray>(0)?.to_vec(&mut cx)?;
let mut sequence = Vec::with_capacity(decoders.len());
decoders.into_iter().try_for_each(|decoder| {
match decoder.downcast::<JsDecoder>().or_throw(&mut cx) {
Ok(decoder) => {
let guard = cx.lock();
if let Some(decoder_arc) = &decoder.borrow(&guard).decoder {
let decoder: DecoderWrapper = (**decoder_arc).clone();
sequence.push(decoder);
}
Ok(())
}
Err(e) => Err(e),
}
})?;
let mut pretok = JsDecoder::new::<_, JsDecoder, _>(&mut cx, vec![])?;
let guard = cx.lock();
pretok.borrow_mut(&guard).decoder = Some(Arc::new(tk::DecoderWrapper::Sequence(
tk::decoders::sequence::Sequence::new(sequence),
)));
Ok(pretok)
}
/// Register everything here
pub fn register(m: &mut ModuleContext, prefix: &str) -> NeonResult<()> {
m.export_function(&format!("{}_ByteLevel", prefix), byte_level)?;
m.export_function(&format!("{}_Replace", prefix), replace)?;
m.export_function(&format!("{}_WordPiece", prefix), wordpiece)?;
m.export_function(&format!("{}_ByteFallback", prefix), byte_fallback)?;
m.export_function(&format!("{}_Fuse", prefix), fuse)?;
m.export_function(&format!("{}_Strip", prefix), strip)?;
m.export_function(&format!("{}_Metaspace", prefix), metaspace)?;
m.export_function(&format!("{}_BPEDecoder", prefix), bpe_decoder)?;
m.export_function(&format!("{}_CTC", prefix), ctc_decoder)?;
m.export_function(&format!("{}_Sequence", prefix), sequence)?;
Ok(())
}
| 0 |
hf_public_repos/tokenizers/bindings/node/native | hf_public_repos/tokenizers/bindings/node/native/src/encoding.rs | extern crate tokenizers as tk;
use crate::extraction::*;
use crate::tokenizer::PaddingParams;
use neon::prelude::*;
use tk::utils::truncation::TruncationDirection;
/// Encoding
pub struct Encoding {
pub encoding: Option<tk::tokenizer::Encoding>,
}
declare_types! {
pub class JsEncoding for Encoding {
init(_) {
// This should never be called from JavaScript
Ok(Encoding { encoding: None })
}
method getLength(mut cx) {
let this = cx.this();
let guard = cx.lock();
let length = this.borrow(&guard)
.encoding.as_ref().expect("Uninitialized Encoding")
.get_ids()
.len();
Ok(cx.number(length as f64).upcast())
}
method getNSequences(mut cx) {
let this = cx.this();
let guard = cx.lock();
let n = this.borrow(&guard)
.encoding.as_ref().expect("Uninitialized Encoding")
.n_sequences();
Ok(cx.number(n as f64).upcast())
}
method setSequenceId(mut cx) {
let seq_id = cx.extract::<usize>(0)?;
let mut this = cx.this();
let guard = cx.lock();
this.borrow_mut(&guard)
.encoding.as_mut().expect("Uninitialized Encoding")
.set_sequence_id(seq_id);
Ok(cx.undefined().upcast())
}
method getIds(mut cx) {
// getIds(): number[]
let this = cx.this();
let guard = cx.lock();
let ids = this.borrow(&guard)
.encoding.as_ref().expect("Uninitialized Encoding")
.get_ids()
.to_vec();
Ok(neon_serde::to_value(&mut cx, &ids)?)
}
method getTypeIds(mut cx) {
// getTypeIds(): number[]
let this = cx.this();
let guard = cx.lock();
let ids = this.borrow(&guard)
.encoding.as_ref().expect("Uninitialized Encoding")
.get_type_ids()
.to_vec();
Ok(neon_serde::to_value(&mut cx, &ids)?)
}
method getAttentionMask(mut cx) {
// getAttentionMask(): number[]
let this = cx.this();
let guard = cx.lock();
let ids = this.borrow(&guard)
.encoding.as_ref().expect("Uninitialized Encoding")
.get_attention_mask()
.to_vec();
Ok(neon_serde::to_value(&mut cx, &ids)?)
}
method getSpecialTokensMask(mut cx) {
// getSpecialTokensMask(): number[]
let this = cx.this();
let guard = cx.lock();
let ids = this.borrow(&guard)
.encoding.as_ref().expect("Uninitialized Encoding")
.get_special_tokens_mask()
.to_vec();
Ok(neon_serde::to_value(&mut cx, &ids)?)
}
method getTokens(mut cx) {
// getTokens(): string[]
let this = cx.this();
let guard = cx.lock();
let tokens = this.borrow(&guard)
.encoding.as_ref().expect("Uninitialized Encoding")
.get_tokens()
.to_vec();
Ok(neon_serde::to_value(&mut cx, &tokens)?)
}
method getWordIds(mut cx) {
// getWordIds(): (number | undefined)[]
let this = cx.this();
let guard = cx.lock();
let ids = this.borrow(&guard)
.encoding.as_ref().expect("Uninitialized Encoding")
.get_word_ids()
.to_vec();
Ok(neon_serde::to_value(&mut cx, &ids)?)
}
method getSequenceIds(mut cx) {
// getSequenceIds(): (number | undefined)[]
let this = cx.this();
let guard = cx.lock();
let ids = this.borrow(&guard)
.encoding.as_ref().expect("Uninitialized Encoding")
.get_sequence_ids();
Ok(neon_serde::to_value(&mut cx, &ids)?)
}
method getOffsets(mut cx) {
// getOffsets(): [number, number][]
let this = cx.this();
let guard = cx.lock();
let offsets = this.borrow(&guard)
.encoding.as_ref().expect("Uninitialized Encoding")
.get_offsets()
.to_vec();
let js_offsets = neon_serde::to_value(&mut cx, &offsets)?;
Ok(js_offsets)
}
method getOverflowing(mut cx) {
// getOverflowing(): Encoding[]
let this = cx.this();
let guard = cx.lock();
let overflowings = this.borrow(&guard)
.encoding.as_ref().expect("Uninitialized Encoding")
.get_overflowing()
.clone();
let js_overflowings = JsArray::new(&mut cx, overflowings.len() as u32);
for (index, overflowing) in overflowings.iter().enumerate() {
let mut js_overflowing = JsEncoding::new::<_, JsEncoding, _>(&mut cx, vec![])?;
// Set the content
let guard = cx.lock();
js_overflowing.borrow_mut(&guard).encoding = Some(overflowing.clone());
js_overflowings.set(&mut cx, index as u32, js_overflowing)?;
}
Ok(js_overflowings.upcast())
}
method wordToTokens(mut cx) {
// wordToTokens(word: number, seqId: number = 0): [number, number] | undefined
let word = cx.extract::<u32>(0)?;
let seq_id = cx.extract_opt::<usize>(1)?.unwrap_or(0);
let this = cx.this();
let guard = cx.lock();
let res = this.borrow(&guard)
.encoding.as_ref().expect("Uninitialized Encoding")
.word_to_tokens(word, seq_id);
if let Some(tokens) = res {
Ok(neon_serde::to_value(&mut cx, &tokens)?)
} else {
Ok(cx.undefined().upcast())
}
}
method wordToChars(mut cx) {
// wordToChars(word: number, seqId: number = 0): [number, number] | undefined
let word = cx.extract::<u32>(0)?;
let seq_id = cx.extract_opt::<usize>(1)?.unwrap_or(0);
let this = cx.this();
let guard = cx.lock();
let res = this.borrow(&guard)
.encoding.as_ref().expect("Uninitialized Encoding")
.word_to_chars(word, seq_id);
if let Some(offsets) = res {
Ok(neon_serde::to_value(&mut cx, &offsets)?)
} else {
Ok(cx.undefined().upcast())
}
}
method tokenToSequence(mut cx) {
// tokenToSequence(token: number): number | undefined
let token = cx.extract::<usize>(0)?;
let this = cx.this();
let guard = cx.lock();
let res = this.borrow(&guard)
.encoding.as_ref().expect("Uninitialized Encoding")
.token_to_sequence(token);
if let Some(seq) = res {
Ok(neon_serde::to_value(&mut cx, &seq)?)
} else {
Ok(cx.undefined().upcast())
}
}
method tokenToChars(mut cx) {
// tokenToChars(token: number): [number, number] [number, [number, number]] | undefined
let token = cx.extract::<usize>(0)?;
let this = cx.this();
let guard = cx.lock();
let res = this.borrow(&guard)
.encoding.as_ref().expect("Uninitialized Encoding")
.token_to_chars(token);
if let Some((_, offsets)) = res {
Ok(neon_serde::to_value(&mut cx, &offsets)?)
} else {
Ok(cx.undefined().upcast())
}
}
method tokenToWord(mut cx) {
// tokenToWord(token: number): number | [number, number] | undefined
let token = cx.argument::<JsNumber>(0)?.value() as usize;
let this = cx.this();
let guard = cx.lock();
let res = this.borrow(&guard)
.encoding.as_ref().expect("Uninitialized Encoding")
.token_to_word(token);
if let Some((_, index)) = res {
Ok(cx.number(index as f64).upcast())
} else {
Ok(cx.undefined().upcast())
}
}
method charToToken(mut cx) {
// charToToken(pos: number, seqId: number = 0): number | undefined
let pos = cx.extract::<usize>(0)?;
let seq_id = cx.extract_opt::<usize>(1)?.unwrap_or(0);
let this = cx.this();
let guard = cx.lock();
let index = this.borrow(&guard)
.encoding.as_ref().expect("Uninitialized Encoding")
.char_to_token(pos, seq_id);
if let Some(index) = index {
Ok(cx.number(index as f64).upcast())
} else {
Ok(cx.undefined().upcast())
}
}
method charToWord(mut cx) {
// charToWord(pos: number, seqId: number = 0): number | undefined
let pos = cx.extract::<usize>(0)?;
let seq_id = cx.extract_opt::<usize>(1)?.unwrap_or(0);
let this = cx.this();
let guard = cx.lock();
let index = this.borrow(&guard)
.encoding.as_ref().expect("Uninitialized Encoding")
.char_to_word(pos, seq_id);
if let Some(index) = index {
Ok(cx.number(index as f64).upcast())
} else {
Ok(cx.undefined().upcast())
}
}
method pad(mut cx) {
// pad(length: number, options?: {
// direction?: 'left' | 'right' = 'right',
// padId?: number = 0,
// padTypeId?: number = 0,
// padToken?: string = "[PAD]"
// }
let length = cx.extract::<usize>(0)?;
let params = cx.extract_opt::<PaddingParams>(1)?
.map_or_else(tk::PaddingParams::default, |p| p.0);
let mut this = cx.this();
let guard = cx.lock();
this.borrow_mut(&guard)
.encoding.as_mut().expect("Uninitialized Encoding")
.pad(
length,
params.pad_id,
params.pad_type_id,
¶ms.pad_token,
params.direction
);
Ok(cx.undefined().upcast())
}
method truncate(mut cx) {
// truncate(length: number, stride: number = 0, direction: string = 'right')
let length = cx.extract::<usize>(0)?;
let stride = cx.extract_opt::<usize>(1)?.unwrap_or(0);
let direction = cx.extract_opt::<String>(2)?.unwrap_or_else(|| String::from("right"));
let tdir = match direction.as_str() {
"left" => Ok(TruncationDirection::Left),
"right" => Ok(TruncationDirection::Right),
_ => cx.throw_error(format!("Invalid truncation direction value : {}", direction)),
}?;
let mut this = cx.this();
let guard = cx.lock();
this.borrow_mut(&guard)
.encoding.as_mut().expect("Uninitialized Encoding")
.truncate(length, stride, tdir);
Ok(cx.undefined().upcast())
}
}
}
| 0 |
hf_public_repos/tokenizers/bindings/node/native | hf_public_repos/tokenizers/bindings/node/native/src/extraction.rs | use neon::prelude::*;
use serde::de::DeserializeOwned;
/// Common Error that can be converted to a neon::result::Throw and put
/// the js engine in a throwing state. Makes it way easier to manage errors
pub struct Error(pub String);
impl<T> From<T> for Error
where
T: std::fmt::Display,
{
fn from(e: T) -> Self {
Self(format!("{}", e))
}
}
impl From<Error> for neon::result::Throw {
fn from(err: Error) -> Self {
let msg = err.0;
unsafe {
neon_runtime::error::throw_error_from_utf8(msg.as_ptr(), msg.len() as i32);
neon::result::Throw
}
}
}
pub type LibResult<T> = std::result::Result<T, Error>;
/// This trait is to be implemented for any type that we want to extract from
/// a JsValue.
pub trait FromJsValue: Sized {
fn from_value<'c, C: Context<'c>>(from: Handle<'c, JsValue>, cx: &mut C) -> LibResult<Self>;
}
/// Any type that implements DeserializeOwned from serde can easily be converted
impl<T> FromJsValue for T
where
T: DeserializeOwned,
{
fn from_value<'c, C: Context<'c>>(from: Handle<'c, JsValue>, cx: &mut C) -> LibResult<Self> {
let val: T = neon_serde::from_value(cx, from)?;
Ok(val)
}
}
/// This trait provides some extraction helpers, and we implement it for CallContext
/// so that we can easily extract any type that implements FromJsValue from the arguments.
pub trait Extract {
fn extract<T: FromJsValue>(&mut self, pos: i32) -> LibResult<T>;
fn extract_opt<T: FromJsValue>(&mut self, pos: i32) -> LibResult<Option<T>>;
fn extract_vec<T: FromJsValue>(&mut self, pos: i32) -> LibResult<Vec<T>>;
fn extract_vec_opt<T: FromJsValue>(&mut self, pos: i32) -> LibResult<Option<Vec<T>>>;
}
impl<'c, T: neon::object::This> Extract for CallContext<'c, T> {
fn extract<E: FromJsValue>(&mut self, pos: i32) -> LibResult<E> {
let val = self
.argument_opt(pos)
.ok_or_else(|| Error(format!("Argument {} is missing", pos)))?;
let ext = E::from_value(val, self)?;
Ok(ext)
}
fn extract_opt<E: FromJsValue>(&mut self, pos: i32) -> LibResult<Option<E>> {
let val = self.argument_opt(pos);
match val {
None => Ok(None),
Some(v) => {
// For any optional value, we accept both `undefined` and `null`
if v.downcast::<JsNull>().is_ok() || v.downcast::<JsUndefined>().is_ok() {
Ok(None)
} else if v.downcast::<JsFunction>().is_ok() {
// Could be parsed as an empty object, so we don't accept JsFunction here
Err(Error("Cannot extract from JsFunction".into()))
} else {
Ok(Some(E::from_value(v, self)?))
}
}
}
}
fn extract_vec<E: FromJsValue>(&mut self, pos: i32) -> LibResult<Vec<E>> {
let vec = self
.argument_opt(pos)
.ok_or_else(|| Error(format!("Argument {} is missing", pos)))?
.downcast::<JsArray>()?
.to_vec(self)?;
vec.into_iter().map(|v| E::from_value(v, self)).collect()
}
fn extract_vec_opt<E: FromJsValue>(&mut self, pos: i32) -> LibResult<Option<Vec<E>>> {
self.argument_opt(pos)
.map(|v| {
let vec = v.downcast::<JsArray>()?.to_vec(self)?;
vec.into_iter()
.map(|v| E::from_value(v, self))
.collect::<LibResult<Vec<_>>>()
})
.map_or(Ok(None), |v| v.map(Some))
}
}
| 0 |
hf_public_repos/tokenizers/bindings/node/native | hf_public_repos/tokenizers/bindings/node/native/src/lib.rs | #![warn(clippy::all)]
// We need to allow these to use !declare_types
#![allow(clippy::unnecessary_wraps)]
#![allow(clippy::upper_case_acronyms)]
extern crate neon;
extern crate neon_serde;
#[macro_use]
extern crate serde;
extern crate tokenizers as tk;
mod decoders;
mod encoding;
mod extraction;
mod models;
mod normalizers;
mod pre_tokenizers;
mod processors;
mod tasks;
mod tokenizer;
mod trainers;
mod utils;
use neon::prelude::*;
pub const VERSION: &str = env!("CARGO_PKG_VERSION");
register_module!(mut m, {
// Tokenizer
tokenizer::register(&mut m, "tokenizer")?;
// Models
models::register(&mut m, "models")?;
// Decoders
decoders::register(&mut m, "decoders")?;
// Processors
processors::register(&mut m, "processors")?;
// Normalizers
normalizers::register(&mut m, "normalizers")?;
// PreTokenizers
pre_tokenizers::register(&mut m, "pre_tokenizers")?;
// Trainers
trainers::register(&mut m, "trainers")?;
// Utils
utils::register(&mut m, "utils")?;
Ok(())
});
| 0 |
hf_public_repos/tokenizers/bindings/node/native | hf_public_repos/tokenizers/bindings/node/native/src/models.rs | extern crate tokenizers as tk;
use crate::extraction::*;
use crate::tasks::models::{BPEFromFilesTask, WordLevelFromFilesTask, WordPieceFromFilesTask};
use crate::trainers::Trainer;
use neon::prelude::*;
use std::collections::HashMap;
use std::path::Path;
use std::path::PathBuf;
use std::sync::{Arc, RwLock};
use tk::models::{
bpe::{BpeBuilder, Merges, Vocab},
wordlevel::WordLevelBuilder,
wordpiece::WordPieceBuilder,
ModelWrapper,
};
use tk::Model as ModelTrait;
use tk::Token;
/// Model
#[derive(Clone, Serialize, Deserialize)]
pub struct Model {
#[serde(flatten)]
pub model: Option<Arc<RwLock<ModelWrapper>>>,
}
impl<M> From<M> for Model
where
M: Into<ModelWrapper>,
{
fn from(wrapper: M) -> Self {
Self {
model: Some(Arc::new(RwLock::new(wrapper.into()))),
}
}
}
impl tk::Model for Model {
type Trainer = Trainer;
fn tokenize(&self, sequence: &str) -> tk::Result<Vec<Token>> {
self.model
.as_ref()
.ok_or("Uninitialized Model")?
.read()
.unwrap()
.tokenize(sequence)
}
fn token_to_id(&self, token: &str) -> Option<u32> {
self.model.as_ref()?.read().unwrap().token_to_id(token)
}
fn id_to_token(&self, id: u32) -> Option<String> {
self.model.as_ref()?.read().unwrap().id_to_token(id)
}
fn get_vocab(&self) -> HashMap<String, u32> {
self.model
.as_ref()
.expect("Uninitialized Model")
.read()
.unwrap()
.get_vocab()
}
fn get_vocab_size(&self) -> usize {
self.model
.as_ref()
.expect("Uninitialized Model")
.read()
.unwrap()
.get_vocab_size()
}
fn save(&self, folder: &Path, name: Option<&str>) -> tk::Result<Vec<PathBuf>> {
self.model
.as_ref()
.ok_or("Uninitialized Model")?
.read()
.unwrap()
.save(folder, name)
}
fn get_trainer(&self) -> Self::Trainer {
self.model
.as_ref()
.expect("Uninitialized Model")
.read()
.unwrap()
.get_trainer()
.into()
}
}
declare_types! {
pub class JsModel for Model {
init(_) {
// This should not be called from JS
Ok(Model { model: None })
}
method save(mut cx) {
// save(folder: string, name?: string)
let folder = cx.extract::<String>(0)?;
let name = cx.extract_opt::<String>(1)?;
let this = cx.this();
let guard = cx.lock();
let files = this.borrow(&guard)
.model.as_ref().expect("Uninitialized Model")
.read().unwrap()
.save(
Path::new(&folder),
name.as_deref()
)
.map_err(|e| Error(format!("{}", e)))?;
Ok(neon_serde::to_value(&mut cx, &files)?)
}
}
}
#[derive(Serialize, Deserialize, Default)]
#[serde(rename_all = "camelCase")]
struct BpeOptions {
cache_capacity: Option<usize>,
dropout: Option<f32>,
unk_token: Option<String>,
continuing_subword_prefix: Option<String>,
end_of_word_suffix: Option<String>,
fuse_unk: Option<bool>,
byte_fallback: Option<bool>,
}
impl BpeOptions {
fn apply_to_bpe_builder(self, mut builder: BpeBuilder) -> BpeBuilder {
if let Some(cache_capacity) = self.cache_capacity {
builder = builder.cache_capacity(cache_capacity);
}
if let Some(dropout) = self.dropout {
builder = builder.dropout(dropout);
}
if let Some(unk_token) = self.unk_token {
builder = builder.unk_token(unk_token);
}
if let Some(continuing_subword_prefix) = self.continuing_subword_prefix {
builder = builder.continuing_subword_prefix(continuing_subword_prefix);
}
if let Some(end_of_word_suffix) = self.end_of_word_suffix {
builder = builder.end_of_word_suffix(end_of_word_suffix);
}
if let Some(fuse_unk) = self.fuse_unk {
builder = builder.fuse_unk(fuse_unk);
}
if let Some(byte_fallback) = self.byte_fallback {
builder = builder.byte_fallback(byte_fallback);
}
builder
}
}
/// bpe_init(vocab: {[token: string]: number}, merges: [string, string][], options: {
/// cacheCapacity?: number,
/// dropout?: number,
/// unkToken?: string,
/// continuingSubwordPrefix?: string,
/// endOfWordSuffix?: string
/// })
fn bpe_init(mut cx: FunctionContext) -> JsResult<JsModel> {
let vocab = cx.extract::<Vocab>(0)?;
let merges = cx.extract::<Merges>(1)?;
let options = cx.extract_opt::<BpeOptions>(2)?.unwrap_or_default();
let mut builder = tk::models::bpe::BPE::builder().vocab_and_merges(vocab, merges);
builder = options.apply_to_bpe_builder(builder);
let model = builder.build().map_err(|e| Error(e.to_string()))?;
let mut js_model = JsModel::new::<_, JsModel, _>(&mut cx, vec![])?;
let guard = cx.lock();
js_model.borrow_mut(&guard).model = Some(Arc::new(RwLock::new(model.into())));
Ok(js_model)
}
/// bpe_from_file(vocab: string, merges: string, options: {
/// cacheCapacity?: number,
/// dropout?: number,
/// unkToken?: string,
/// continuingSubwordPrefix?: string,
/// endOfWordSuffix?: string
/// byteFallback?: bool
/// }, callback)
fn bpe_from_file(mut cx: FunctionContext) -> JsResult<JsUndefined> {
let (options, callback) = match cx.extract_opt::<BpeOptions>(2) {
// Options were there, and extracted
Ok(Some(options)) => (options, cx.argument::<JsFunction>(3)?),
// Options were undefined or null
Ok(None) => (BpeOptions::default(), cx.argument::<JsFunction>(3)?),
// Options not specified, callback instead
Err(_) => (BpeOptions::default(), cx.argument::<JsFunction>(2)?),
};
let vocab = cx.extract::<String>(0)?;
let merges = cx.extract::<String>(1)?;
let mut builder = tk::models::bpe::BPE::from_file(&vocab, &merges);
builder = options.apply_to_bpe_builder(builder);
let task = BPEFromFilesTask::new(builder);
task.schedule(callback);
Ok(cx.undefined())
}
/// bpe_empty()
fn bpe_empty(mut cx: FunctionContext) -> JsResult<JsModel> {
let mut model = JsModel::new::<_, JsModel, _>(&mut cx, vec![])?;
let bpe = tk::models::bpe::BPE::default();
let guard = cx.lock();
model.borrow_mut(&guard).model = Some(Arc::new(RwLock::new(bpe.into())));
Ok(model)
}
#[derive(Serialize, Deserialize, Default)]
#[serde(rename_all = "camelCase")]
struct WordPieceOptions {
unk_token: Option<String>,
continuing_subword_prefix: Option<String>,
max_input_chars_per_word: Option<usize>,
}
impl WordPieceOptions {
fn apply_to_wordpiece_builder(self, mut builder: WordPieceBuilder) -> WordPieceBuilder {
if let Some(token) = self.unk_token {
builder = builder.unk_token(token);
}
if let Some(prefix) = self.continuing_subword_prefix {
builder = builder.continuing_subword_prefix(prefix);
}
if let Some(max) = self.max_input_chars_per_word {
builder = builder.max_input_chars_per_word(max);
}
builder
}
}
/// wordpiece_init(vocab: {[token: string]: number}, options: {
/// unkToken?: string = "[UNK]",
/// maxInputCharsPerWord?: number = 100,
/// continuingSubwordPrefix?: "##",
/// })
fn wordpiece_init(mut cx: FunctionContext) -> JsResult<JsModel> {
let vocab = cx.extract::<HashMap<String, u32>>(0)?;
let options = cx.extract_opt::<WordPieceOptions>(1)?.unwrap_or_default();
let mut builder = tk::models::wordpiece::WordPiece::builder().vocab(vocab);
builder = options.apply_to_wordpiece_builder(builder);
let model = builder.build().map_err(|e| Error(e.to_string()))?;
let mut js_model = JsModel::new::<_, JsModel, _>(&mut cx, vec![])?;
let guard = cx.lock();
js_model.borrow_mut(&guard).model = Some(Arc::new(RwLock::new(model.into())));
Ok(js_model)
}
/// wordpiece_from_file(vocab: string, options: {
/// unkToken?: string = "[UNK]",
/// maxInputCharsPerWord?: number = 100,
/// continuingSubwordPrefix?: "##",
/// }, callback)
fn wordpiece_from_file(mut cx: FunctionContext) -> JsResult<JsUndefined> {
let (options, callback) = match cx.extract_opt::<WordPieceOptions>(1) {
// Options were there, and extracted
Ok(Some(options)) => (options, cx.argument::<JsFunction>(2)?),
// Options were undefined or null
Ok(None) => (WordPieceOptions::default(), cx.argument::<JsFunction>(2)?),
// Options not specified, callback instead
Err(_) => (WordPieceOptions::default(), cx.argument::<JsFunction>(1)?),
};
let vocab = cx.extract::<String>(0)?;
let mut builder = tk::models::wordpiece::WordPiece::from_file(&vocab);
builder = options.apply_to_wordpiece_builder(builder);
let task = WordPieceFromFilesTask::new(builder);
task.schedule(callback);
Ok(cx.undefined())
}
/// wordpiece_empty()
fn wordpiece_empty(mut cx: FunctionContext) -> JsResult<JsModel> {
let mut model = JsModel::new::<_, JsModel, _>(&mut cx, vec![])?;
let wordpiece = tk::models::wordpiece::WordPiece::default();
let guard = cx.lock();
model.borrow_mut(&guard).model = Some(Arc::new(RwLock::new(wordpiece.into())));
Ok(model)
}
#[derive(Serialize, Deserialize, Default)]
#[serde(rename_all = "camelCase")]
struct WordLevelOptions {
unk_token: Option<String>,
}
impl WordLevelOptions {
fn apply_to_wordlevel_builder(self, mut builder: WordLevelBuilder) -> WordLevelBuilder {
if let Some(token) = self.unk_token {
builder = builder.unk_token(token);
}
builder
}
}
/// wordlevel_init(vocab: {[token: string]: number}, options: {
/// unkToken?: String,
/// }, callback)
fn wordlevel_init(mut cx: FunctionContext) -> JsResult<JsModel> {
let vocab = cx.extract::<HashMap<String, u32>>(0)?;
let options = cx.extract_opt::<WordLevelOptions>(1)?.unwrap_or_default();
let mut builder = tk::models::wordlevel::WordLevel::builder().vocab(vocab);
builder = options.apply_to_wordlevel_builder(builder);
let model = builder.build().map_err(|e| Error(e.to_string()))?;
let mut js_model = JsModel::new::<_, JsModel, _>(&mut cx, vec![])?;
let guard = cx.lock();
js_model.borrow_mut(&guard).model = Some(Arc::new(RwLock::new(model.into())));
Ok(js_model)
}
/// wordlevel_from_file(vocab: string, options: {
/// unkToken?: String,
/// }, callback)
fn wordlevel_from_file(mut cx: FunctionContext) -> JsResult<JsUndefined> {
let (options, callback) = match cx.extract_opt::<WordLevelOptions>(1) {
// Options were there, and extracted
Ok(Some(options)) => (options, cx.argument::<JsFunction>(2)?),
// Options were undefined or null
Ok(None) => (WordLevelOptions::default(), cx.argument::<JsFunction>(2)?),
// Options not specified, callback instead
Err(_) => (WordLevelOptions::default(), cx.argument::<JsFunction>(1)?),
};
let vocab = cx.extract::<String>(0)?;
let mut builder = tk::models::wordlevel::WordLevel::builder().files(vocab);
builder = options.apply_to_wordlevel_builder(builder);
let task = WordLevelFromFilesTask::new(builder);
task.schedule(callback);
Ok(cx.undefined())
}
/// wordlevel_empty()
fn wordlevel_empty(mut cx: FunctionContext) -> JsResult<JsModel> {
let mut model = JsModel::new::<_, JsModel, _>(&mut cx, vec![])?;
let wordlevel = tk::models::wordlevel::WordLevel::default();
let guard = cx.lock();
model.borrow_mut(&guard).model = Some(Arc::new(RwLock::new(wordlevel.into())));
Ok(model)
}
#[derive(Serialize, Deserialize, Default)]
#[serde(rename_all = "camelCase")]
struct UnigramOptions {
unk_id: Option<usize>,
byte_fallback: Option<bool>,
}
/// unigram_init(vocab: [string, number][], options?: {
/// unkId?: number
/// })
fn unigram_init(mut cx: FunctionContext) -> JsResult<JsModel> {
let vocab = cx.extract::<Vec<(String, f64)>>(0)?;
let options = cx.extract_opt::<UnigramOptions>(1)?.unwrap_or_default();
let byte_fallback = options.byte_fallback.unwrap_or(false);
let unigram = tk::models::unigram::Unigram::from(vocab, options.unk_id, byte_fallback)
.map_err(|e| Error(e.to_string()))?;
let mut js_model = JsModel::new::<_, JsModel, _>(&mut cx, vec![])?;
let guard = cx.lock();
js_model.borrow_mut(&guard).model = Some(Arc::new(RwLock::new(unigram.into())));
Ok(js_model)
}
/// unigram_empty()
fn unigram_empty(mut cx: FunctionContext) -> JsResult<JsModel> {
let mut model = JsModel::new::<_, JsModel, _>(&mut cx, vec![])?;
let unigram = tk::models::unigram::Unigram::default();
let guard = cx.lock();
model.borrow_mut(&guard).model = Some(Arc::new(RwLock::new(unigram.into())));
Ok(model)
}
/// Register everything here
pub fn register(m: &mut ModuleContext, prefix: &str) -> NeonResult<()> {
m.export_function(&format!("{}_BPE_init", prefix), bpe_init)?;
m.export_function(&format!("{}_BPE_from_file", prefix), bpe_from_file)?;
m.export_function(&format!("{}_BPE_empty", prefix), bpe_empty)?;
m.export_function(&format!("{}_WordPiece_init", prefix), wordpiece_init)?;
m.export_function(
&format!("{}_WordPiece_from_file", prefix),
wordpiece_from_file,
)?;
m.export_function(&format!("{}_WordPiece_empty", prefix), wordpiece_empty)?;
m.export_function(&format!("{}_WordLevel_init", prefix), wordlevel_init)?;
m.export_function(
&format!("{}_WordLevel_from_file", prefix),
wordlevel_from_file,
)?;
m.export_function(&format!("{}_WordLevel_empty", prefix), wordlevel_empty)?;
m.export_function(&format!("{}_Unigram_init", prefix), unigram_init)?;
m.export_function(&format!("{}_Unigram_empty", prefix), unigram_empty)?;
Ok(())
}
| 0 |
hf_public_repos/tokenizers/bindings/node/native | hf_public_repos/tokenizers/bindings/node/native/src/normalizers.rs | extern crate tokenizers as tk;
use crate::extraction::*;
use neon::prelude::*;
use serde::{ser::SerializeStruct, Serialize, Serializer};
use std::sync::Arc;
use tk::normalizers::NormalizerWrapper;
use tk::NormalizedString;
#[derive(Clone, Debug, Deserialize)]
#[serde(untagged)]
pub enum JsNormalizerWrapper {
Sequence(Vec<Arc<NormalizerWrapper>>),
Wrapped(Arc<NormalizerWrapper>),
}
impl Serialize for JsNormalizerWrapper {
fn serialize<S>(&self, serializer: S) -> Result<<S as Serializer>::Ok, <S as Serializer>::Error>
where
S: Serializer,
{
match self {
JsNormalizerWrapper::Sequence(seq) => {
let mut ser = serializer.serialize_struct("Sequence", 2)?;
ser.serialize_field("type", "Sequence")?;
ser.serialize_field("normalizers", seq)?;
ser.end()
}
JsNormalizerWrapper::Wrapped(inner) => inner.serialize(serializer),
}
}
}
impl<I> From<I> for JsNormalizerWrapper
where
I: Into<NormalizerWrapper>,
{
fn from(norm: I) -> Self {
JsNormalizerWrapper::Wrapped(Arc::new(norm.into()))
}
}
/// Normalizer
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct Normalizer {
#[serde(flatten)]
pub normalizer: Option<JsNormalizerWrapper>,
}
impl tk::Normalizer for Normalizer {
fn normalize(&self, normalized: &mut NormalizedString) -> tk::Result<()> {
match self.normalizer.as_ref().ok_or("Uninitialized Normalizer")? {
JsNormalizerWrapper::Sequence(seq) => {
for norm in seq {
norm.normalize(normalized)?;
}
}
JsNormalizerWrapper::Wrapped(norm) => norm.normalize(normalized)?,
};
Ok(())
}
}
declare_types! {
pub class JsNormalizer for Normalizer {
init(_) {
// This should not be called from JS
Ok(Normalizer { normalizer: None })
}
method normalizeString(mut cx) {
use tk::Normalizer;
let sequence = cx.extract::<String>(0)?;
let mut normalized = NormalizedString::from(sequence);
let this = cx.this();
let guard = cx.lock();
this.borrow(&guard)
.normalize(&mut normalized)
.map_err(|e| Error(format!("{}", e)))?;
Ok(cx.string(normalized.get()).upcast())
}
}
}
#[derive(Serialize, Deserialize)]
#[serde(rename_all = "camelCase")]
struct BertNormalizerOptions {
clean_text: bool,
handle_chinese_chars: bool,
strip_accents: Option<bool>,
lowercase: bool,
}
impl Default for BertNormalizerOptions {
fn default() -> Self {
Self {
clean_text: true,
handle_chinese_chars: true,
strip_accents: None,
lowercase: true,
}
}
}
/// bert_normalizer(options?: {
/// cleanText?: bool = true,
/// handleChineseChars?: bool = true,
/// stripAccents?: bool = true,
/// lowercase?: bool = true
/// })
fn bert_normalizer(mut cx: FunctionContext) -> JsResult<JsNormalizer> {
let options = cx
.extract_opt::<BertNormalizerOptions>(0)?
.unwrap_or_default();
let mut normalizer = JsNormalizer::new::<_, JsNormalizer, _>(&mut cx, vec![])?;
let guard = cx.lock();
normalizer.borrow_mut(&guard).normalizer = Some(
tk::normalizers::bert::BertNormalizer::new(
options.clean_text,
options.handle_chinese_chars,
options.strip_accents,
options.lowercase,
)
.into(),
);
Ok(normalizer)
}
/// nfd()
fn nfd(mut cx: FunctionContext) -> JsResult<JsNormalizer> {
let mut normalizer = JsNormalizer::new::<_, JsNormalizer, _>(&mut cx, vec![])?;
let guard = cx.lock();
normalizer.borrow_mut(&guard).normalizer = Some(tk::normalizers::unicode::NFD.into());
Ok(normalizer)
}
/// nfkd()
fn nfkd(mut cx: FunctionContext) -> JsResult<JsNormalizer> {
let mut normalizer = JsNormalizer::new::<_, JsNormalizer, _>(&mut cx, vec![])?;
let guard = cx.lock();
normalizer.borrow_mut(&guard).normalizer = Some(tk::normalizers::unicode::NFKD.into());
Ok(normalizer)
}
/// nfc()
fn nfc(mut cx: FunctionContext) -> JsResult<JsNormalizer> {
let mut normalizer = JsNormalizer::new::<_, JsNormalizer, _>(&mut cx, vec![])?;
let guard = cx.lock();
normalizer.borrow_mut(&guard).normalizer = Some(tk::normalizers::unicode::NFC.into());
Ok(normalizer)
}
/// nfkc()
fn nfkc(mut cx: FunctionContext) -> JsResult<JsNormalizer> {
let mut normalizer = JsNormalizer::new::<_, JsNormalizer, _>(&mut cx, vec![])?;
let guard = cx.lock();
normalizer.borrow_mut(&guard).normalizer = Some(tk::normalizers::unicode::NFKC.into());
Ok(normalizer)
}
/// strip(left?: boolean, right?: boolean)
fn strip(mut cx: FunctionContext) -> JsResult<JsNormalizer> {
let left = cx.extract_opt::<bool>(0)?.unwrap_or(true);
let right = cx.extract_opt::<bool>(1)?.unwrap_or(true);
let mut normalizer = JsNormalizer::new::<_, JsNormalizer, _>(&mut cx, vec![])?;
let guard = cx.lock();
normalizer.borrow_mut(&guard).normalizer =
Some(tk::normalizers::strip::Strip::new(left, right).into());
Ok(normalizer)
}
/// prepend(prepend: string)
fn prepend(mut cx: FunctionContext) -> JsResult<JsNormalizer> {
let prepend: String = cx.extract::<String>(0)?;
let mut normalizer = JsNormalizer::new::<_, JsNormalizer, _>(&mut cx, vec![])?;
let guard = cx.lock();
normalizer.borrow_mut(&guard).normalizer =
Some(tk::normalizers::prepend::Prepend::new(prepend).into());
Ok(normalizer)
}
/// strip_accents()
fn strip_accents(mut cx: FunctionContext) -> JsResult<JsNormalizer> {
let mut normalizer = JsNormalizer::new::<_, JsNormalizer, _>(&mut cx, vec![])?;
let guard = cx.lock();
normalizer.borrow_mut(&guard).normalizer = Some(tk::normalizers::strip::StripAccents.into());
Ok(normalizer)
}
/// sequence(normalizers: Normalizer[])
fn sequence(mut cx: FunctionContext) -> JsResult<JsNormalizer> {
let normalizers = cx.argument::<JsArray>(0)?.to_vec(&mut cx)?;
let mut sequence = Vec::with_capacity(normalizers.len());
normalizers.into_iter().try_for_each(|normalizer| {
match normalizer.downcast::<JsNormalizer>().or_throw(&mut cx) {
Ok(normalizer) => {
let guard = cx.lock();
let normalizer = normalizer.borrow(&guard).normalizer.clone();
if let Some(normalizer) = normalizer {
match normalizer {
JsNormalizerWrapper::Sequence(seq) => sequence.extend(seq),
JsNormalizerWrapper::Wrapped(inner) => sequence.push(inner),
}
Ok(())
} else {
cx.throw_error("Uninitialized Normalizer")
}
}
Err(e) => Err(e),
}
})?;
let mut normalizer = JsNormalizer::new::<_, JsNormalizer, _>(&mut cx, vec![])?;
let guard = cx.lock();
normalizer.borrow_mut(&guard).normalizer = Some(JsNormalizerWrapper::Sequence(sequence));
Ok(normalizer)
}
/// lowercase()
fn lowercase(mut cx: FunctionContext) -> JsResult<JsNormalizer> {
let mut normalizer = JsNormalizer::new::<_, JsNormalizer, _>(&mut cx, vec![])?;
let guard = cx.lock();
normalizer.borrow_mut(&guard).normalizer = Some(tk::normalizers::utils::Lowercase.into());
Ok(normalizer)
}
/// replace()
fn replace(mut cx: FunctionContext) -> JsResult<JsNormalizer> {
let pattern: String = cx.extract::<String>(0)?;
let content: String = cx.extract::<String>(1)?;
let mut normalizer = JsNormalizer::new::<_, JsNormalizer, _>(&mut cx, vec![])?;
let guard = cx.lock();
normalizer.borrow_mut(&guard).normalizer = Some(
tk::normalizers::replace::Replace::new(pattern, content)
.map_err(|e| Error(e.to_string()))?
.into(),
);
Ok(normalizer)
}
/// nmt()
fn nmt(mut cx: FunctionContext) -> JsResult<JsNormalizer> {
let mut normalizer = JsNormalizer::new::<_, JsNormalizer, _>(&mut cx, vec![])?;
let guard = cx.lock();
normalizer.borrow_mut(&guard).normalizer = Some(tk::normalizers::unicode::Nmt.into());
Ok(normalizer)
}
/// precompiled()
fn precompiled(mut cx: FunctionContext) -> JsResult<JsNormalizer> {
let bytes = cx.extract::<Vec<u8>>(0)?;
let mut normalizer = JsNormalizer::new::<_, JsNormalizer, _>(&mut cx, vec![])?;
let guard = cx.lock();
normalizer.borrow_mut(&guard).normalizer = Some(
tk::normalizers::precompiled::Precompiled::from(&bytes)
.map_err(|e| Error(e.to_string()))?
.into(),
);
Ok(normalizer)
}
/// Register everything here
pub fn register(m: &mut ModuleContext, prefix: &str) -> NeonResult<()> {
m.export_function(&format!("{}_BertNormalizer", prefix), bert_normalizer)?;
m.export_function(&format!("{}_NFD", prefix), nfd)?;
m.export_function(&format!("{}_NFKD", prefix), nfkd)?;
m.export_function(&format!("{}_NFC", prefix), nfc)?;
m.export_function(&format!("{}_NFKC", prefix), nfkc)?;
m.export_function(&format!("{}_Sequence", prefix), sequence)?;
m.export_function(&format!("{}_Lowercase", prefix), lowercase)?;
m.export_function(&format!("{}_Strip", prefix), strip)?;
m.export_function(&format!("{}_Prepend", prefix), prepend)?;
m.export_function(&format!("{}_StripAccents", prefix), strip_accents)?;
m.export_function(&format!("{}_Nmt", prefix), nmt)?;
m.export_function(&format!("{}_Precompiled", prefix), precompiled)?;
m.export_function(&format!("{}_Replace", prefix), replace)?;
Ok(())
}
#[cfg(test)]
mod test {
use super::*;
use tk::normalizers::unicode::{NFC, NFKC};
use tk::normalizers::utils::Sequence;
use tk::normalizers::NormalizerWrapper;
#[test]
fn serialize() {
let js_wrapped: JsNormalizerWrapper = NFKC.into();
let js_ser = serde_json::to_string(&js_wrapped).unwrap();
let rs_wrapped = NormalizerWrapper::NFKC(NFKC);
let rs_ser = serde_json::to_string(&rs_wrapped).unwrap();
assert_eq!(js_ser, rs_ser);
let js_norm: Normalizer = serde_json::from_str(&rs_ser).unwrap();
match js_norm.normalizer.unwrap() {
JsNormalizerWrapper::Wrapped(nfc) => match nfc.as_ref() {
NormalizerWrapper::NFKC(_) => {}
_ => panic!("Expected NFKC"),
},
_ => panic!("Expected wrapped, not sequence."),
}
let js_seq: JsNormalizerWrapper = Sequence::new(vec![NFC.into(), NFKC.into()]).into();
let js_wrapper_ser = serde_json::to_string(&js_seq).unwrap();
let rs_wrapped = NormalizerWrapper::Sequence(Sequence::new(vec![NFC.into(), NFKC.into()]));
let rs_ser = serde_json::to_string(&rs_wrapped).unwrap();
assert_eq!(js_wrapper_ser, rs_ser);
let js_seq = Normalizer {
normalizer: Some(js_seq),
};
let js_ser = serde_json::to_string(&js_seq).unwrap();
assert_eq!(js_wrapper_ser, js_ser);
let rs_seq = Sequence::new(vec![NFC.into(), NFKC.into()]);
let rs_ser = serde_json::to_string(&rs_seq).unwrap();
assert_eq!(js_wrapper_ser, rs_ser);
}
}
| 0 |
hf_public_repos/tokenizers/bindings/node/native | hf_public_repos/tokenizers/bindings/node/native/src/pre_tokenizers.rs | extern crate tokenizers as tk;
use crate::extraction::*;
use neon::prelude::*;
use std::sync::Arc;
use serde::{ser::SerializeStruct, Serialize, Serializer};
use tk::normalizer::SplitDelimiterBehavior;
use tk::pre_tokenizers::PreTokenizerWrapper;
use tk::PreTokenizedString;
#[derive(Clone)]
struct JsSplitDelimiterBehavior(SplitDelimiterBehavior);
impl FromJsValue for JsSplitDelimiterBehavior {
fn from_value<'c, C: Context<'c>>(from: Handle<'c, JsValue>, _cx: &mut C) -> LibResult<Self> {
let s = from.downcast::<JsString>()?.value();
Ok(Self(match s.as_ref() {
"removed" => Ok(SplitDelimiterBehavior::Removed),
"isolated" => Ok(SplitDelimiterBehavior::Isolated),
"mergedWithPrevious" => Ok(SplitDelimiterBehavior::MergedWithPrevious),
"mergedWithNext" => Ok(SplitDelimiterBehavior::MergedWithNext),
"contiguous" => Ok(SplitDelimiterBehavior::Contiguous),
_ => Err(Error(
"Wrong value for SplitDelimiterBehavior, expected one of: \
`removed, isolated, mergedWithPrevious, mergedWithNext, contiguous`"
.into(),
)),
}?))
}
}
impl From<JsSplitDelimiterBehavior> for SplitDelimiterBehavior {
fn from(v: JsSplitDelimiterBehavior) -> Self {
v.0
}
}
#[derive(Clone, Debug, Deserialize)]
#[serde(untagged)]
pub enum JsPreTokenizerWrapper {
Sequence(Vec<Arc<PreTokenizerWrapper>>),
Wrapped(Arc<PreTokenizerWrapper>),
}
impl Serialize for JsPreTokenizerWrapper {
fn serialize<S>(&self, serializer: S) -> Result<<S as Serializer>::Ok, <S as Serializer>::Error>
where
S: Serializer,
{
match self {
JsPreTokenizerWrapper::Sequence(seq) => {
let mut ser = serializer.serialize_struct("Sequence", 2)?;
ser.serialize_field("type", "Sequence")?;
ser.serialize_field("pretokenizers", seq)?;
ser.end()
}
JsPreTokenizerWrapper::Wrapped(inner) => inner.serialize(serializer),
}
}
}
impl<I> From<I> for JsPreTokenizerWrapper
where
I: Into<PreTokenizerWrapper>,
{
fn from(norm: I) -> Self {
JsPreTokenizerWrapper::Wrapped(Arc::new(norm.into()))
}
}
/// PreTokenizers
#[derive(Clone, Serialize, Deserialize, Debug)]
pub struct PreTokenizer {
#[serde(flatten)]
pub pretok: Option<JsPreTokenizerWrapper>,
}
impl tk::PreTokenizer for PreTokenizer {
fn pre_tokenize(&self, pretokenized: &mut PreTokenizedString) -> tk::Result<()> {
match self.pretok.as_ref().ok_or("Uninitialized PreTokenizer")? {
JsPreTokenizerWrapper::Sequence(seq) => {
for pretokenizer in seq {
pretokenizer.pre_tokenize(pretokenized)?;
}
}
JsPreTokenizerWrapper::Wrapped(pretokenizer) => {
pretokenizer.pre_tokenize(pretokenized)?
}
};
Ok(())
}
}
declare_types! {
pub class JsPreTokenizer for PreTokenizer {
init(_) {
// This should not be called from JS
Ok(PreTokenizer { pretok: None })
}
method preTokenizeString(mut cx) {
use tk::PreTokenizer;
let sequence = cx.extract::<String>(0)?;
let mut pretokenized = PreTokenizedString::from(sequence);
let this = cx.this();
let guard = cx.lock();
this.borrow(&guard)
.pre_tokenize(&mut pretokenized)
.map_err(|e| Error(format!("{}", e)))?;
let splits = pretokenized
.get_splits(tk::OffsetReferential::Original, tk::OffsetType::Char)
.into_iter()
.map(|(s, o, _)| (s.to_owned(), o))
.collect::<Vec<_>>();
Ok(neon_serde::to_value(&mut cx, &splits)?.upcast())
}
}
}
/// byte_level(addPrefixSpace: bool = true, useRegex: bool = true)
fn byte_level(mut cx: FunctionContext) -> JsResult<JsPreTokenizer> {
let mut byte_level = tk::pre_tokenizers::byte_level::ByteLevel::default();
if let Some(add_prefix_space) = cx.extract_opt::<bool>(0)? {
byte_level = byte_level.add_prefix_space(add_prefix_space);
}
if let Some(use_regex) = cx.extract_opt::<bool>(1)? {
byte_level = byte_level.use_regex(use_regex);
}
let mut pretok = JsPreTokenizer::new::<_, JsPreTokenizer, _>(&mut cx, vec![])?;
let guard = cx.lock();
pretok.borrow_mut(&guard).pretok = Some(byte_level.into());
Ok(pretok)
}
/// byte_level_alphabet()
fn byte_level_alphabet(mut cx: FunctionContext) -> JsResult<JsValue> {
let chars = tk::pre_tokenizers::byte_level::ByteLevel::alphabet()
.into_iter()
.map(|c| c.to_string())
.collect::<Vec<_>>();
Ok(neon_serde::to_value(&mut cx, &chars)?)
}
/// whitespace()
fn whitespace(mut cx: FunctionContext) -> JsResult<JsPreTokenizer> {
let mut pretok = JsPreTokenizer::new::<_, JsPreTokenizer, _>(&mut cx, vec![])?;
let guard = cx.lock();
pretok.borrow_mut(&guard).pretok = Some(tk::pre_tokenizers::whitespace::Whitespace {}.into());
Ok(pretok)
}
/// whitespace_split()
fn whitespace_split(mut cx: FunctionContext) -> JsResult<JsPreTokenizer> {
let mut pretok = JsPreTokenizer::new::<_, JsPreTokenizer, _>(&mut cx, vec![])?;
let guard = cx.lock();
pretok.borrow_mut(&guard).pretok = Some(tk::pre_tokenizers::whitespace::WhitespaceSplit.into());
Ok(pretok)
}
/// bert_pre_tokenizer()
fn bert_pre_tokenizer(mut cx: FunctionContext) -> JsResult<JsPreTokenizer> {
let mut pretok = JsPreTokenizer::new::<_, JsPreTokenizer, _>(&mut cx, vec![])?;
let guard = cx.lock();
pretok.borrow_mut(&guard).pretok = Some(tk::pre_tokenizers::bert::BertPreTokenizer.into());
Ok(pretok)
}
/// metaspace(replacement: string = '_', addPrefixSpace: bool = true)
fn metaspace(mut cx: FunctionContext) -> JsResult<JsPreTokenizer> {
let replacement = cx.extract_opt::<char>(0)?.unwrap_or('▁');
let add_prefix_space = cx.extract_opt::<bool>(1)?.unwrap_or(true);
let mut pretok = JsPreTokenizer::new::<_, JsPreTokenizer, _>(&mut cx, vec![])?;
let guard = cx.lock();
pretok.borrow_mut(&guard).pretok =
Some(tk::pre_tokenizers::metaspace::Metaspace::new(replacement, add_prefix_space).into());
Ok(pretok)
}
/// split(invert: bool = false)
fn split(mut cx: FunctionContext) -> JsResult<JsPreTokenizer> {
let pattern: String = cx.extract::<String>(0)?;
let behavior: JsSplitDelimiterBehavior = cx.extract::<JsSplitDelimiterBehavior>(1)?;
let invert: bool = cx.extract_opt::<bool>(2)?.unwrap_or(false);
let mut pretok = JsPreTokenizer::new::<_, JsPreTokenizer, _>(&mut cx, vec![])?;
let guard = cx.lock();
pretok.borrow_mut(&guard).pretok = Some(
tk::pre_tokenizers::split::Split::new(pattern, behavior.into(), invert)
.map_err(|e| Error(e.to_string()))?
.into(),
);
Ok(pretok)
}
/// punctuation()
fn punctuation(mut cx: FunctionContext) -> JsResult<JsPreTokenizer> {
let behavior: JsSplitDelimiterBehavior = cx
.extract_opt::<JsSplitDelimiterBehavior>(0)?
.unwrap_or(JsSplitDelimiterBehavior(SplitDelimiterBehavior::Isolated));
let mut pretok = JsPreTokenizer::new::<_, JsPreTokenizer, _>(&mut cx, vec![])?;
let guard = cx.lock();
pretok.borrow_mut(&guard).pretok =
Some(tk::pre_tokenizers::punctuation::Punctuation::new(behavior.into()).into());
Ok(pretok)
}
/// sequence()
fn sequence(mut cx: FunctionContext) -> JsResult<JsPreTokenizer> {
let pretokenizers = cx.argument::<JsArray>(0)?.to_vec(&mut cx)?;
let mut sequence = Vec::with_capacity(pretokenizers.len());
pretokenizers.into_iter().try_for_each(|pretokenizer| {
match pretokenizer.downcast::<JsPreTokenizer>().or_throw(&mut cx) {
Ok(pretokenizer) => {
let guard = cx.lock();
let pretok = pretokenizer.borrow(&guard).pretok.clone();
if let Some(pretokenizer) = pretok {
match pretokenizer {
JsPreTokenizerWrapper::Sequence(seq) => sequence.extend(seq),
JsPreTokenizerWrapper::Wrapped(inner) => sequence.push(inner),
}
Ok(())
} else {
cx.throw_error("Uninitialized PreTokenizer")
}
}
Err(e) => Err(e),
}
})?;
let mut pretok = JsPreTokenizer::new::<_, JsPreTokenizer, _>(&mut cx, vec![])?;
let guard = cx.lock();
pretok.borrow_mut(&guard).pretok = Some(JsPreTokenizerWrapper::Sequence(sequence));
Ok(pretok)
}
/// char_delimiter_split(delimiter: string)
fn char_delimiter_split(mut cx: FunctionContext) -> JsResult<JsPreTokenizer> {
let delimiter = cx.extract::<char>(0)?;
let mut pretok = JsPreTokenizer::new::<_, JsPreTokenizer, _>(&mut cx, vec![])?;
let guard = cx.lock();
pretok.borrow_mut(&guard).pretok =
Some(tk::pre_tokenizers::delimiter::CharDelimiterSplit::new(delimiter).into());
Ok(pretok)
}
/// digits(individualDigits: bool)
fn digits(mut cx: FunctionContext) -> JsResult<JsPreTokenizer> {
let individual_digits = cx.extract_opt::<bool>(0)?.unwrap_or(false);
let mut pretok = JsPreTokenizer::new::<_, JsPreTokenizer, _>(&mut cx, vec![])?;
let guard = cx.lock();
pretok.borrow_mut(&guard).pretok =
Some(tk::pre_tokenizers::digits::Digits::new(individual_digits).into());
Ok(pretok)
}
/// Register everything here
pub fn register(m: &mut ModuleContext, prefix: &str) -> NeonResult<()> {
m.export_function(&format!("{}_ByteLevel", prefix), byte_level)?;
m.export_function(
&format!("{}_ByteLevel_Alphabet", prefix),
byte_level_alphabet,
)?;
m.export_function(&format!("{}_Whitespace", prefix), whitespace)?;
m.export_function(&format!("{}_WhitespaceSplit", prefix), whitespace_split)?;
m.export_function(&format!("{}_BertPreTokenizer", prefix), bert_pre_tokenizer)?;
m.export_function(&format!("{}_Metaspace", prefix), metaspace)?;
m.export_function(&format!("{}_Split", prefix), split)?;
m.export_function(
&format!("{}_CharDelimiterSplit", prefix),
char_delimiter_split,
)?;
m.export_function(&format!("{}_Punctuation", prefix), punctuation)?;
m.export_function(&format!("{}_Sequence", prefix), sequence)?;
m.export_function(&format!("{}_Digits", prefix), digits)?;
Ok(())
}
#[cfg(test)]
mod test {
use super::*;
use tk::pre_tokenizers::sequence::Sequence;
use tk::pre_tokenizers::whitespace::{Whitespace, WhitespaceSplit};
use tk::pre_tokenizers::PreTokenizerWrapper;
#[test]
fn serialize() {
let js_wrapped: JsPreTokenizerWrapper = Whitespace {}.into();
let js_ser = serde_json::to_string(&js_wrapped).unwrap();
let rs_wrapped = PreTokenizerWrapper::Whitespace(Whitespace {});
let rs_ser = serde_json::to_string(&rs_wrapped).unwrap();
assert_eq!(js_ser, rs_ser);
let js_pretok: PreTokenizer = serde_json::from_str(&rs_ser).unwrap();
match js_pretok.pretok.unwrap() {
JsPreTokenizerWrapper::Wrapped(pretok) => match pretok.as_ref() {
PreTokenizerWrapper::Whitespace(_) => {}
_ => panic!("Expected Whitespace"),
},
_ => panic!("Expected wrapped, not sequence."),
}
let js_seq: JsPreTokenizerWrapper =
Sequence::new(vec![WhitespaceSplit.into(), Whitespace {}.into()]).into();
let js_wrapper_ser = serde_json::to_string(&js_seq).unwrap();
let rs_wrapped = PreTokenizerWrapper::Sequence(Sequence::new(vec![
WhitespaceSplit.into(),
Whitespace {}.into(),
]));
let rs_ser = serde_json::to_string(&rs_wrapped).unwrap();
assert_eq!(js_wrapper_ser, rs_ser);
let js_seq = PreTokenizer {
pretok: Some(js_seq),
};
let js_ser = serde_json::to_string(&js_seq).unwrap();
assert_eq!(js_wrapper_ser, js_ser);
let rs_seq = Sequence::new(vec![WhitespaceSplit.into(), Whitespace {}.into()]);
let rs_ser = serde_json::to_string(&rs_seq).unwrap();
assert_eq!(js_wrapper_ser, rs_ser);
}
}
| 0 |
hf_public_repos/tokenizers/bindings/node/native | hf_public_repos/tokenizers/bindings/node/native/src/processors.rs | extern crate tokenizers as tk;
use crate::extraction::*;
use neon::prelude::*;
use std::sync::Arc;
use tk::processors::PostProcessorWrapper;
use tk::Encoding;
/// Processor
#[derive(Clone, Serialize, Deserialize)]
pub struct Processor {
#[serde(flatten)]
pub processor: Option<Arc<PostProcessorWrapper>>,
}
impl tk::PostProcessor for Processor {
fn added_tokens(&self, is_pair: bool) -> usize {
self.processor
.as_ref()
.expect("Uninitialized PostProcessor")
.added_tokens(is_pair)
}
fn process_encodings(
&self,
encodings: Vec<Encoding>,
add_special_tokens: bool,
) -> tk::Result<Vec<Encoding>> {
self.processor
.as_ref()
.ok_or("Uninitialized PostProcessor")?
.process_encodings(encodings, add_special_tokens)
}
}
declare_types! {
pub class JsPostProcessor for Processor {
init(_) {
// This should not be called from JS
Ok(Processor { processor: None })
}
}
}
/// bert_processing(sep: [String, number], cls: [String, number])
fn bert_processing(mut cx: FunctionContext) -> JsResult<JsPostProcessor> {
let sep = cx.extract::<(String, u32)>(0)?;
let cls = cx.extract::<(String, u32)>(1)?;
let mut processor = JsPostProcessor::new::<_, JsPostProcessor, _>(&mut cx, vec![])?;
let guard = cx.lock();
processor.borrow_mut(&guard).processor = Some(Arc::new(
tk::processors::bert::BertProcessing::new(sep, cls).into(),
));
Ok(processor)
}
/// roberta_processing(
/// sep: [String, number],
/// cls: [String, number],
/// trimOffsets: boolean = true,
/// addPrefixSpace: boolean = true
/// )
fn roberta_processing(mut cx: FunctionContext) -> JsResult<JsPostProcessor> {
let sep = cx.extract::<(String, u32)>(0)?;
let cls = cx.extract::<(String, u32)>(1)?;
let mut processor = tk::processors::roberta::RobertaProcessing::new(sep, cls);
if let Some(trim_offsets) = cx.extract_opt::<bool>(2)? {
processor = processor.trim_offsets(trim_offsets);
}
if let Some(add_prefix_space) = cx.extract_opt::<bool>(3)? {
processor = processor.add_prefix_space(add_prefix_space);
}
let mut js_processor = JsPostProcessor::new::<_, JsPostProcessor, _>(&mut cx, vec![])?;
let guard = cx.lock();
js_processor.borrow_mut(&guard).processor = Some(Arc::new(processor.into()));
Ok(js_processor)
}
/// bytelevel(trimOffsets?: boolean)
fn bytelevel(mut cx: FunctionContext) -> JsResult<JsPostProcessor> {
let mut byte_level = tk::processors::byte_level::ByteLevel::default();
if let Some(trim_offsets) = cx.extract_opt::<bool>(0)? {
byte_level = byte_level.trim_offsets(trim_offsets);
}
let mut processor = JsPostProcessor::new::<_, JsPostProcessor, _>(&mut cx, vec![])?;
let guard = cx.lock();
processor.borrow_mut(&guard).processor = Some(Arc::new(byte_level.into()));
Ok(processor)
}
/// template_processing(
/// single: String,
/// pair?: String,
/// special_tokens?: [String, number][] = [],
/// )
fn template_processing(mut cx: FunctionContext) -> JsResult<JsPostProcessor> {
let mut i = 1;
let special_tokens = loop {
if let Ok(Some(spe)) = cx.extract_opt::<Vec<(String, u32)>>(i) {
break spe;
}
i += 1;
if i == 3 {
break vec![];
}
};
let single = cx.extract::<String>(0)?;
let pair = cx.extract_opt::<String>(1)?;
let mut builder = tk::processors::template::TemplateProcessing::builder();
builder.try_single(single).map_err(Error)?;
builder.special_tokens(special_tokens);
if let Some(pair) = pair {
builder.try_pair(pair).map_err(Error)?;
}
let processor = builder.build().map_err(|e| Error(e.to_string()))?;
let mut js_processor = JsPostProcessor::new::<_, JsPostProcessor, _>(&mut cx, vec![])?;
let guard = cx.lock();
js_processor.borrow_mut(&guard).processor = Some(Arc::new(processor.into()));
Ok(js_processor)
}
/// sequence(processors: List[Processor])
fn sequence(mut cx: FunctionContext) -> JsResult<JsPostProcessor> {
let processors = cx.argument::<JsArray>(0)?.to_vec(&mut cx)?;
let mut sequence = Vec::with_capacity(processors.len());
processors.into_iter().try_for_each(|processor| {
match processor.downcast::<JsPostProcessor>().or_throw(&mut cx) {
Ok(processor) => {
let guard = cx.lock();
if let Some(processor_arc) = &processor.borrow(&guard).processor {
let processor: PostProcessorWrapper = (**processor_arc).clone();
sequence.push(processor);
}
Ok(())
}
Err(e) => Err(e),
}
})?;
let mut pretok = JsPostProcessor::new::<_, JsPostProcessor, _>(&mut cx, vec![])?;
let guard = cx.lock();
pretok.borrow_mut(&guard).processor = Some(Arc::new(PostProcessorWrapper::Sequence(
tk::processors::sequence::Sequence::new(sequence),
)));
Ok(pretok)
}
/// Register everything here
pub fn register(m: &mut ModuleContext, prefix: &str) -> NeonResult<()> {
m.export_function(&format!("{}_BertProcessing", prefix), bert_processing)?;
m.export_function(&format!("{}_RobertaProcessing", prefix), roberta_processing)?;
m.export_function(&format!("{}_ByteLevel", prefix), bytelevel)?;
m.export_function(
&format!("{}_TemplateProcessing", prefix),
template_processing,
)?;
m.export_function(&format!("{}_Sequence", prefix), sequence)?;
Ok(())
}
| 0 |
hf_public_repos/tokenizers/bindings/node/native | hf_public_repos/tokenizers/bindings/node/native/src/tokenizer.rs | extern crate tokenizers as tk;
use crate::decoders::{Decoder, JsDecoder};
use crate::encoding::JsEncoding;
use crate::extraction::*;
use crate::models::{JsModel, Model};
use crate::normalizers::{JsNormalizer, Normalizer};
use crate::pre_tokenizers::{JsPreTokenizer, PreTokenizer};
use crate::processors::{JsPostProcessor, Processor};
use crate::tasks::tokenizer::{DecodeTask, EncodeTask};
use crate::trainers::JsTrainer;
use neon::prelude::*;
use std::sync::{Arc, RwLock};
use tk::Model as ModelTrait;
use tk::TokenizerImpl;
// AddedToken
#[derive(Clone)]
pub struct AddedToken {
pub token: tk::AddedToken,
}
impl From<AddedToken> for tk::AddedToken {
fn from(v: AddedToken) -> Self {
v.token
}
}
#[allow(non_snake_case)]
#[derive(Debug, Default, Serialize, Deserialize)]
struct AddedTokenOptions {
singleWord: Option<bool>,
leftStrip: Option<bool>,
rightStrip: Option<bool>,
normalized: Option<bool>,
}
impl AddedTokenOptions {
fn into_added_token(self, content: String, special: bool) -> tk::AddedToken {
let mut token = tk::AddedToken::from(content, special);
if let Some(sw) = self.singleWord {
token = token.single_word(sw);
}
if let Some(ls) = self.leftStrip {
token = token.lstrip(ls);
}
if let Some(rs) = self.rightStrip {
token = token.rstrip(rs);
}
if let Some(n) = self.normalized {
token = token.normalized(n);
}
token
}
}
declare_types! {
pub class JsAddedToken for AddedToken {
init(mut cx) {
// init(
// content: string,
// special: boolean,
// options?: {
// singleWord?: boolean = false,
// leftStrip?: boolean = false,
// rightStrip?: boolean = false
// normalized?: boolean = true,
// }
// )
let content = cx.extract::<String>(0)?;
let special = cx.extract::<bool>(1)?;
let token = cx.extract_opt::<AddedTokenOptions>(2)?
.unwrap_or_default()
.into_added_token(content, special);
Ok(AddedToken { token })
}
method getContent(mut cx) {
// getContent()
let this = cx.this();
let content = {
let guard = cx.lock();
let token = this.borrow(&guard);
token.token.content.clone()
};
Ok(cx.string(content).upcast())
}
}
}
impl FromJsValue for AddedToken {
fn from_value<'c, C: Context<'c>>(from: Handle<'c, JsValue>, cx: &mut C) -> LibResult<Self> {
if let Ok(token) = from.downcast::<JsString>() {
Ok(AddedToken {
token: tk::AddedToken::from(token.value(), false),
})
} else if let Ok(token) = from.downcast::<JsAddedToken>() {
let guard = cx.lock();
let token = token.borrow(&guard);
Ok(token.clone())
} else {
Err(Error("Expected `string | AddedToken`".into()))
}
}
}
struct SpecialToken(tk::AddedToken);
impl FromJsValue for SpecialToken {
fn from_value<'c, C: Context<'c>>(from: Handle<'c, JsValue>, cx: &mut C) -> LibResult<Self> {
if let Ok(token) = from.downcast::<JsString>() {
Ok(SpecialToken(tk::AddedToken::from(token.value(), true)))
} else if let Ok(token) = from.downcast::<JsAddedToken>() {
let guard = cx.lock();
let token = token.borrow(&guard);
Ok(SpecialToken(token.token.clone()))
} else {
Err(Error("Expected `string | AddedToken`".into()))
}
}
}
// encode & encodeBatch types
struct TextInputSequence<'s>(tk::InputSequence<'s>);
struct PreTokenizedInputSequence<'s>(tk::InputSequence<'s>);
impl FromJsValue for PreTokenizedInputSequence<'_> {
fn from_value<'c, C: Context<'c>>(from: Handle<'c, JsValue>, cx: &mut C) -> LibResult<Self> {
let sequence = from
.downcast::<JsArray>()?
.to_vec(cx)?
.into_iter()
.map(|v| Ok(v.downcast::<JsString>()?.value()))
.collect::<LibResult<Vec<_>>>()?;
Ok(Self(sequence.into()))
}
}
impl<'s> From<PreTokenizedInputSequence<'s>> for tk::InputSequence<'s> {
fn from(v: PreTokenizedInputSequence<'s>) -> Self {
v.0
}
}
impl FromJsValue for TextInputSequence<'_> {
fn from_value<'c, C: Context<'c>>(from: Handle<'c, JsValue>, _cx: &mut C) -> LibResult<Self> {
Ok(Self(from.downcast::<JsString>()?.value().into()))
}
}
impl<'s> From<TextInputSequence<'s>> for tk::InputSequence<'s> {
fn from(v: TextInputSequence<'s>) -> Self {
v.0
}
}
struct TextEncodeInput<'s>(tk::EncodeInput<'s>);
struct PreTokenizedEncodeInput<'s>(tk::EncodeInput<'s>);
impl FromJsValue for PreTokenizedEncodeInput<'_> {
fn from_value<'c, C: Context<'c>>(from: Handle<'c, JsValue>, cx: &mut C) -> LibResult<Self> {
// If array is of size 2, and the first element is also an array, we'll parse a pair
let array = from.downcast::<JsArray>()?;
let is_pair = array.len() == 2
&& array
.get(cx, 0)
.map_or(false, |a| a.downcast::<JsArray>().is_ok());
if is_pair {
let first_seq: tk::InputSequence =
PreTokenizedInputSequence::from_value(array.get(cx, 0)?, cx)?.into();
let pair_seq: tk::InputSequence =
PreTokenizedInputSequence::from_value(array.get(cx, 1)?, cx)?.into();
Ok(Self((first_seq, pair_seq).into()))
} else {
Ok(Self(
PreTokenizedInputSequence::from_value(from, cx)?.into(),
))
}
}
}
impl<'s> From<PreTokenizedEncodeInput<'s>> for tk::EncodeInput<'s> {
fn from(v: PreTokenizedEncodeInput<'s>) -> Self {
v.0
}
}
impl FromJsValue for TextEncodeInput<'_> {
fn from_value<'c, C: Context<'c>>(from: Handle<'c, JsValue>, cx: &mut C) -> LibResult<Self> {
// If we get an array, it's a pair of sequences
if let Ok(array) = from.downcast::<JsArray>() {
if array.len() != 2 {
return Err(Error(
"TextEncodeInput should be \
`TextInputSequence | [TextInputSequence, TextInputSequence]`"
.into(),
));
}
let first_seq: tk::InputSequence =
TextInputSequence::from_value(array.get(cx, 0)?, cx)?.into();
let pair_seq: tk::InputSequence =
TextInputSequence::from_value(array.get(cx, 1)?, cx)?.into();
Ok(Self((first_seq, pair_seq).into()))
} else {
Ok(Self(TextInputSequence::from_value(from, cx)?.into()))
}
}
}
impl<'s> From<TextEncodeInput<'s>> for tk::EncodeInput<'s> {
fn from(v: TextEncodeInput<'s>) -> Self {
v.0
}
}
#[derive(Debug, Serialize, Deserialize)]
#[serde(rename_all = "camelCase")]
struct EncodeOptions {
#[serde(default)]
is_pretokenized: bool,
#[serde(default)]
add_special_tokens: bool,
}
impl Default for EncodeOptions {
fn default() -> Self {
Self {
is_pretokenized: false,
add_special_tokens: true,
}
}
}
// Encoding
#[repr(transparent)]
pub struct Encoding(tk::Encoding);
impl FromJsValue for Encoding {
fn from_value<'c, C: Context<'c>>(from: Handle<'c, JsValue>, cx: &mut C) -> LibResult<Self> {
from.downcast::<JsEncoding>()
.map(|e| {
let guard = cx.lock();
let enc = e.borrow(&guard).encoding.clone();
Self(enc.expect("Uninitialized Encoding"))
})
.map_err(|_| Error("Expected Encoding".into()))
}
}
impl From<Encoding> for tk::Encoding {
fn from(v: Encoding) -> Self {
v.0
}
}
// Truncation
#[derive(Serialize, Deserialize)]
#[serde(remote = "tk::TruncationStrategy", rename_all = "snake_case")]
pub enum TruncationStrategyDef {
LongestFirst,
OnlyFirst,
OnlySecond,
}
#[derive(Serialize, Deserialize)]
#[serde(remote = "tk::TruncationDirection", rename_all = "camelCase")]
pub enum TruncationDirectionDef {
Left,
Right,
}
#[derive(Serialize, Deserialize)]
#[serde(
remote = "tk::TruncationParams",
rename_all = "camelCase",
default = "tk::TruncationParams::default"
)]
pub struct TruncationParamsDef {
max_length: usize,
#[serde(with = "TruncationStrategyDef")]
strategy: tk::TruncationStrategy,
#[serde(with = "TruncationDirectionDef")]
direction: tk::TruncationDirection,
stride: usize,
}
#[derive(Serialize, Deserialize)]
#[serde(transparent)]
pub struct TruncationParams(#[serde(with = "TruncationParamsDef")] pub tk::TruncationParams);
// Padding
#[derive(Serialize, Deserialize)]
#[serde(remote = "tk::PaddingDirection", rename_all = "camelCase")]
pub enum PaddingDirectionDef {
Left,
Right,
}
// Here we define a custom method of serializing and deserializing a PaddingStrategy because
// we want it to actually be very different from the classic representation.
// In Rust, we use an enum to define the strategy, but in JS, we just want to have a optional
// length number => If defined we use the Fixed(n) strategy and otherwise the BatchLongest.
pub mod padding_strategy_serde {
use serde::{Deserialize, Deserializer, Serialize, Serializer};
#[derive(Serialize, Deserialize)]
#[serde(rename_all = "camelCase")]
struct Strategy {
#[serde(skip_serializing_if = "Option::is_none")]
max_length: Option<usize>,
}
pub fn serialize<S>(value: &tk::PaddingStrategy, serializer: S) -> Result<S::Ok, S::Error>
where
S: Serializer,
{
let s = Strategy {
max_length: match value {
tk::PaddingStrategy::BatchLongest => None,
tk::PaddingStrategy::Fixed(s) => Some(*s),
},
};
s.serialize(serializer)
}
pub fn deserialize<'de, D>(deserializer: D) -> Result<tk::PaddingStrategy, D::Error>
where
D: Deserializer<'de>,
{
let v = Strategy::deserialize(deserializer)?;
if let Some(length) = v.max_length {
Ok(tk::PaddingStrategy::Fixed(length))
} else {
Ok(tk::PaddingStrategy::BatchLongest)
}
}
}
#[derive(Serialize, Deserialize)]
#[serde(
remote = "tk::PaddingParams",
rename_all = "camelCase",
default = "tk::PaddingParams::default"
)]
pub struct PaddingParamsDef {
#[serde(flatten, with = "padding_strategy_serde")]
strategy: tk::PaddingStrategy,
#[serde(with = "PaddingDirectionDef")]
direction: tk::PaddingDirection,
#[serde(skip_serializing_if = "Option::is_none")]
pad_to_multiple_of: Option<usize>,
pad_id: u32,
pad_type_id: u32,
pad_token: String,
}
#[derive(Serialize, Deserialize)]
#[serde(transparent)]
pub struct PaddingParams(#[serde(with = "PaddingParamsDef")] pub tk::PaddingParams);
type RsTokenizer = TokenizerImpl<Model, Normalizer, PreTokenizer, Processor, Decoder>;
/// Tokenizer
#[derive(Clone)]
pub struct Tokenizer {
pub(crate) tokenizer: Arc<RwLock<RsTokenizer>>,
}
declare_types! {
pub class JsTokenizer for Tokenizer {
init(mut cx) {
// init(model: JsModel)
let model = cx.argument::<JsModel>(0)?;
let guard = cx.lock();
let model = model.borrow(&guard).clone();
Ok(Tokenizer {
tokenizer: Arc::new(RwLock::new(TokenizerImpl::new(model)))
})
}
method toString(mut cx) {
// toString(pretty?: bool): string
let pretty = cx.extract_opt::<bool>(0)?.unwrap_or(false);
let this = cx.this();
let guard = cx.lock();
let s = this.borrow(&guard)
.tokenizer.read().unwrap()
.to_string(pretty)
.map_err(|e| Error(format!("{}", e)))?;
Ok(cx.string(s).upcast())
}
method save(mut cx) {
// save(path: striing, pretty?: bool): undefined
let path = cx.extract::<String>(0)?;
let pretty = cx.extract_opt::<bool>(1)?.unwrap_or(false);
let this = cx.this();
let guard = cx.lock();
this.borrow(&guard)
.tokenizer.read().unwrap()
.save(&path, pretty)
.map_err(|e| Error(format!("{}", e)))?;
Ok(cx.undefined().upcast())
}
method runningTasks(mut cx) {
// runningTasks(): number
let this = cx.this();
let guard = cx.lock();
let count = std::sync::Arc::strong_count(&this.borrow(&guard).tokenizer);
let running = if count > 0 { count - 1 } else { 0 };
Ok(cx.number(running as f64).upcast())
}
method getVocab(mut cx) {
// getVocab(withAddedTokens: bool = true)
let with_added_tokens = cx.extract_opt::<bool>(0)?.unwrap_or(true);
let this = cx.this();
let guard = cx.lock();
let vocab = this.borrow(&guard)
.tokenizer.read().unwrap()
.get_vocab(with_added_tokens);
let js_vocab = JsObject::new(&mut cx);
for (token, id) in vocab {
let js_token = cx.string(token);
let js_id = cx.number(id as f64);
js_vocab.set(&mut cx, js_token, js_id)?;
}
Ok(js_vocab.upcast())
}
method getVocabSize(mut cx) {
// getVocabSize(withAddedTokens: bool = true)
let with_added_tokens = cx.extract_opt::<bool>(0)?.unwrap_or(true);
let this = cx.this();
let guard = cx.lock();
let size = this.borrow(&guard)
.tokenizer.read().unwrap()
.get_vocab_size(with_added_tokens);
Ok(cx.number(size as f64).upcast())
}
method encode(mut cx) {
// type InputSequence = string | string[];
// encode(
// sentence: InputSequence,
// pair?: InputSequence,
// options?: {
// addSpecialTokens?: boolean,
// isPretokenized?: boolean,
// } | (err, encoding) -> void,
// __callback: (err, encoding) -> void
// )
// Start by extracting options if they exist (options is in slot 1 ,or 2)
let mut i = 1;
let (options, option_index) = loop {
if let Ok(Some(opts)) = cx.extract_opt::<EncodeOptions>(i){
break (opts, Some(i));
}
i += 1;
if i == 3{
break (EncodeOptions::default(), None)
}
};
// Then we extract the first input sentence
let sentence: tk::InputSequence = if options.is_pretokenized {
cx.extract::<PreTokenizedInputSequence>(0)
.map_err(|_| Error("encode with isPretokenized=true expect string[]".into()))?
.into()
} else {
cx.extract::<TextInputSequence>(0)
.map_err(|_| Error("encode with isPreTokenized=false expect string".into()))?
.into()
};
let (pair, has_pair_arg): (Option<tk::InputSequence>, bool) = if options.is_pretokenized {
if let Ok(second) = cx.extract_opt::<PreTokenizedInputSequence>(1){
(second.map(|v| v.into()), true)
}else{
(None, false)
}
} else if let Ok(second) = cx.extract_opt::<TextInputSequence>(1){
(second.map(|v| v.into()), true)
}else{
(None, false)
};
// Find the callback index.
let last_index = if let Some(option_index) = option_index{
option_index + 1
}else if has_pair_arg{
2
}else{
1
};
let callback = cx.argument::<JsFunction>(last_index)?;
let input: tk::EncodeInput = match pair {
Some(pair) => (sentence, pair).into(),
None => sentence.into()
};
let this = cx.this();
let guard = cx.lock();
let task = EncodeTask::Single(
this.borrow(&guard).clone(), Some(input), options.add_special_tokens
);
task.schedule(callback);
Ok(cx.undefined().upcast())
}
method encodeBatch(mut cx) {
// type InputSequence = string | string[];
// type EncodeInput = (InputSequence | [InputSequence, InputSequence])[]
// encode_batch(
// inputs: EncodeInput[],
// options?: {
// addSpecialTokens?: boolean,
// isPretokenized?: boolean,
// } | (err, encodings) -> void,
// __callback: (err, encodings) -> void
// )
// Start by extracting options and callback
let (options, callback) = match cx.extract_opt::<EncodeOptions>(1) {
// Options were there, and extracted
Ok(Some(options)) => {
(options, cx.argument::<JsFunction>(2)?)
},
// Options were undefined or null
Ok(None) => {
(EncodeOptions::default(), cx.argument::<JsFunction>(2)?)
}
// Options not specified, callback instead
Err(_) => {
(EncodeOptions::default(), cx.argument::<JsFunction>(1)?)
}
};
let inputs: Vec<tk::EncodeInput> = if options.is_pretokenized {
cx.extract_vec::<PreTokenizedEncodeInput>(0)
.map_err(|_| Error(
"encodeBatch with isPretokenized=true expects input to be `EncodeInput[]` \
with `EncodeInput = string[] | [string[], string[]]`".into()))?
.into_iter().map(|v| v.into()).collect()
} else {
cx.extract_vec::<TextEncodeInput>(0)
.map_err(|_| Error(
"encodeBatch with isPretokenized=false expects input to be `EncodeInput[]` \
with `EncodeInput = string | [string, string]`".into()))?
.into_iter().map(|v| v.into()).collect()
};
let this = cx.this();
let guard = cx.lock();
let task = EncodeTask::Batch(
this.borrow(&guard).clone(), Some(inputs), options.add_special_tokens
);
task.schedule(callback);
Ok(cx.undefined().upcast())
}
method decode(mut cx) {
// decode(ids: number[], skipSpecialTokens: bool, callback)
let ids = cx.extract_vec::<u32>(0)?;
let (skip_special_tokens, callback_index) = if let Ok(skip_special_tokens) = cx.extract::<bool>(1){
(skip_special_tokens, 2)
}else{
(false, 1)
};
let callback = cx.argument::<JsFunction>(callback_index)?;
let this = cx.this();
let guard = cx.lock();
let task = DecodeTask::Single(
this.borrow(&guard).clone(), ids, skip_special_tokens
);
task.schedule(callback);
Ok(cx.undefined().upcast())
}
method decodeBatch(mut cx) {
// decodeBatch(sequences: number[][], skipSpecialTokens: bool, callback)
let sentences = cx.extract_vec::<Vec<u32>>(0)?;
let (skip_special_tokens, callback_index) = if let Ok(skip_special_tokens) = cx.extract::<bool>(1){
(skip_special_tokens, 2)
}else{
(false, 1)
};
let callback = cx.argument::<JsFunction>(callback_index)?;
let this = cx.this();
let guard = cx.lock();
let task = DecodeTask::Batch(
this.borrow(&guard).clone(), sentences, skip_special_tokens
);
task.schedule(callback);
Ok(cx.undefined().upcast())
}
method tokenToId(mut cx) {
// tokenToId(token: string): number | undefined
let token = cx.extract::<String>(0)?;
let this = cx.this();
let guard = cx.lock();
let id = this.borrow(&guard)
.tokenizer.read().unwrap()
.token_to_id(&token);
if let Some(id) = id {
Ok(cx.number(id).upcast())
} else {
Ok(cx.undefined().upcast())
}
}
method idToToken(mut cx) {
// idToToken(id: number): string | undefined
let id = cx.extract::<u32>(0)?;
let this = cx.this();
let guard = cx.lock();
let token = this.borrow(&guard)
.tokenizer.read().unwrap()
.id_to_token(id);
if let Some(token) = token {
Ok(cx.string(token).upcast())
} else {
Ok(cx.undefined().upcast())
}
}
method addTokens(mut cx) {
// addTokens(tokens: (string | AddedToken)[]): number
let tokens = cx.extract_vec::<AddedToken>(0)?
.into_iter()
.map(|token| token.into())
.collect::<Vec<_>>();
let mut this = cx.this();
let guard = cx.lock();
let added = this.borrow_mut(&guard)
.tokenizer.write().unwrap()
.add_tokens(&tokens);
Ok(cx.number(added as f64).upcast())
}
method addSpecialTokens(mut cx) {
// addSpecialTokens(tokens: (string | AddedToken)[]): number
let tokens = cx.extract_vec::<SpecialToken>(0)?
.into_iter()
.map(|token| token.0)
.collect::<Vec<_>>();
let mut this = cx.this();
let guard = cx.lock();
let added = this.borrow_mut(&guard)
.tokenizer.write().unwrap()
.add_special_tokens(&tokens);
Ok(cx.number(added as f64).upcast())
}
method setTruncation(mut cx) {
// setTruncation(
// maxLength: number,
// options?: { stride?: number; strategy?: string }
// )
let max_length = cx.extract::<usize>(0)?;
let mut options = cx.extract_opt::<TruncationParams>(1)?
.map_or_else(tk::TruncationParams::default, |p| p.0);
options.max_length = max_length;
let params_obj = neon_serde::to_value(&mut cx, &TruncationParams(options.clone()))?;
let mut this = cx.this();
let guard = cx.lock();
let _ = this.borrow_mut(&guard)
.tokenizer.write().unwrap()
.with_truncation(Some(options));
Ok(params_obj)
}
method disableTruncation(mut cx) {
// disableTruncation()
let mut this = cx.this();
let guard = cx.lock();
this.borrow_mut(&guard)
.tokenizer.write().unwrap()
.with_truncation(None);
Ok(cx.undefined().upcast())
}
method setPadding(mut cx) {
// setPadding(options?: {
// direction?: "left" | "right",
// padId?: number,
// padTypeId?: number,
// padToken?: string,
// maxLength?: number
// })
let options = cx.extract_opt::<PaddingParams>(0)?
.map_or_else(tk::PaddingParams::default, |p| p.0);
let params_obj = neon_serde::to_value(&mut cx, &PaddingParams(options.clone()))?;
let mut this = cx.this();
let guard = cx.lock();
this.borrow_mut(&guard)
.tokenizer.write().unwrap()
.with_padding(Some(options));
Ok(params_obj)
}
method disablePadding(mut cx) {
// disablePadding()
let mut this = cx.this();
let guard = cx.lock();
this.borrow_mut(&guard)
.tokenizer.write().unwrap()
.with_padding(None);
Ok(cx.undefined().upcast())
}
method train(mut cx) {
// train(files: string[], trainer?: Trainer)
let files = cx.extract::<Vec<String>>(0)?;
let mut trainer = if let Some(val) = cx.argument_opt(1) {
let js_trainer = val.downcast::<JsTrainer>().or_throw(&mut cx)?;
let guard = cx.lock();
let trainer = js_trainer.borrow(&guard).clone();
trainer
} else {
let this = cx.this();
let guard = cx.lock();
let trainer = this.borrow(&guard).tokenizer.read().unwrap().get_model().get_trainer();
trainer
};
let mut this = cx.this();
let guard = cx.lock();
this.borrow_mut(&guard)
.tokenizer.write().unwrap()
.train_from_files(&mut trainer, files)
.map_err(|e| Error(format!("{}", e)))?;
Ok(cx.undefined().upcast())
}
method postProcess(mut cx) {
// postProcess(
// encoding: Encoding,
// pair?: Encoding,
// addSpecialTokens: boolean = true
// ): Encoding
let encoding = cx.extract::<Encoding>(0)?;
let pair = cx.extract_opt::<Encoding>(1)?;
let add_special_tokens = cx.extract_opt::<bool>(2)?.unwrap_or(true);
let this = cx.this();
let guard = cx.lock();
let encoding = this.borrow(&guard)
.tokenizer.read().unwrap()
.post_process(encoding.into(), pair.map(|p| p.into()), add_special_tokens)
.map_err(|e| Error(format!("{}", e)))?;
let mut js_encoding = JsEncoding::new::<_, JsEncoding, _>(&mut cx, vec![])?;
let guard = cx.lock();
js_encoding.borrow_mut(&guard).encoding = Some(encoding);
Ok(js_encoding.upcast())
}
method getModel(mut cx) {
// getModel(): Model
let this = cx.this();
let guard = cx.lock();
let model = this.borrow(&guard)
.tokenizer.read().unwrap()
.get_model()
.model
.clone();
let mut js_model = JsModel::new::<_, JsModel, _>(&mut cx, vec![])?;
let guard = cx.lock();
js_model.borrow_mut(&guard).model = model;
Ok(js_model.upcast())
}
method setModel(mut cx) {
// setModel(model: JsModel)
let model = cx.argument::<JsModel>(0)?;
let mut this = cx.this();
let guard = cx.lock();
let model = model.borrow(&guard).clone();
this.borrow_mut(&guard)
.tokenizer.write().unwrap()
.with_model(model);
Ok(cx.undefined().upcast())
}
method getNormalizer(mut cx) {
// getNormalizer(): Normalizer | undefined
let this = cx.this();
let guard = cx.lock();
let normalizer = this.borrow(&guard)
.tokenizer.read().unwrap()
.get_normalizer().cloned();
if let Some(normalizer) = normalizer {
let mut js_normalizer = JsNormalizer::new::<_, JsNormalizer, _>(&mut cx, vec![])?;
let guard = cx.lock();
js_normalizer.borrow_mut(&guard).normalizer = normalizer.normalizer;
Ok(js_normalizer.upcast())
} else {
Ok(cx.undefined().upcast())
}
}
method setNormalizer(mut cx) {
// setNormalizer(normalizer: Normalizer)
let normalizer = cx.argument::<JsNormalizer>(0)?;
let mut this = cx.this();
let guard = cx.lock();
let normalizer = normalizer.borrow(&guard).clone();
this.borrow_mut(&guard)
.tokenizer.write().unwrap()
.with_normalizer(normalizer);
Ok(cx.undefined().upcast())
}
method getPreTokenizer(mut cx) {
// getPreTokenizer(): PreTokenizer | undefined
let this = cx.this();
let guard = cx.lock();
let pretok = this.borrow(&guard)
.tokenizer.read().unwrap()
.get_pre_tokenizer().cloned();
if let Some(pretok) = pretok {
let mut js_pretok = JsPreTokenizer::new::<_, JsPreTokenizer, _>(&mut cx, vec![])?;
let guard = cx.lock();
js_pretok.borrow_mut(&guard).pretok = pretok.pretok;
Ok(js_pretok.upcast())
} else {
Ok(cx.undefined().upcast())
}
}
method setPreTokenizer(mut cx) {
// setPreTokenizer(pretokenizer: PreTokenizer)
let pretok = cx.argument::<JsPreTokenizer>(0)?;
let mut this = cx.this();
let guard = cx.lock();
let pretok = pretok.borrow(&guard).clone();
this.borrow_mut(&guard)
.tokenizer.write().unwrap()
.with_pre_tokenizer(pretok);
Ok(cx.undefined().upcast())
}
method getPostProcessor(mut cx) {
// getPostProcessor(): PostProcessor | undefined
let this = cx.this();
let guard = cx.lock();
let processor = this.borrow(&guard)
.tokenizer.read().unwrap()
.get_post_processor().cloned();
if let Some(processor) = processor {
let mut js_processor =
JsPostProcessor::new::<_, JsPostProcessor, _>(&mut cx, vec![])?;
let guard = cx.lock();
js_processor.borrow_mut(&guard).processor = processor.processor;
Ok(js_processor.upcast())
} else {
Ok(cx.undefined().upcast())
}
}
method setPostProcessor(mut cx) {
// setPostProcessor(processor: PostProcessor)
let processor = cx.argument::<JsPostProcessor>(0)?;
let mut this = cx.this();
let guard = cx.lock();
let processor = processor.borrow(&guard).clone();
this.borrow_mut(&guard)
.tokenizer.write().unwrap()
.with_post_processor(processor);
Ok(cx.undefined().upcast())
}
method getDecoder(mut cx) {
// getDecoder(): Decoder | undefined
let this = cx.this();
let guard = cx.lock();
let decoder = this.borrow(&guard)
.tokenizer.read().unwrap()
.get_decoder().cloned();
if let Some(decoder) = decoder {
let mut js_decoder = JsDecoder::new::<_, JsDecoder, _>(&mut cx, vec![])?;
let guard = cx.lock();
js_decoder.borrow_mut(&guard).decoder = decoder.decoder;
Ok(js_decoder.upcast())
} else {
Ok(cx.undefined().upcast())
}
}
method setDecoder(mut cx) {
// setDecoder(decoder: Decoder)
let decoder = cx.argument::<JsDecoder>(0)?;
let mut this = cx.this();
let guard = cx.lock();
let decoder = decoder.borrow(&guard).clone();
this.borrow_mut(&guard)
.tokenizer.write().unwrap()
.with_decoder(decoder);
Ok(cx.undefined().upcast())
}
}
}
pub fn tokenizer_from_string(mut cx: FunctionContext) -> JsResult<JsTokenizer> {
let s = cx.extract::<String>(0)?;
let tokenizer: tk::tokenizer::TokenizerImpl<
Model,
Normalizer,
PreTokenizer,
Processor,
Decoder,
> = s.parse().map_err(|e| Error(format!("{}", e)))?;
let js_model: Handle<JsModel> = JsModel::new::<_, JsModel, _>(&mut cx, vec![])?;
let mut js_tokenizer = JsTokenizer::new(&mut cx, vec![js_model])?;
let guard = cx.lock();
js_tokenizer.borrow_mut(&guard).tokenizer = Arc::new(RwLock::new(tokenizer));
Ok(js_tokenizer)
}
pub fn tokenizer_from_file(mut cx: FunctionContext) -> JsResult<JsTokenizer> {
let s = cx.extract::<String>(0)?;
let tokenizer = tk::tokenizer::TokenizerImpl::from_file(s)
.map_err(|e| Error(format!("Error loading from file{}", e)))?;
let js_model: Handle<JsModel> = JsModel::new::<_, JsModel, _>(&mut cx, vec![])?;
let mut js_tokenizer = JsTokenizer::new(&mut cx, vec![js_model])?;
let guard = cx.lock();
js_tokenizer.borrow_mut(&guard).tokenizer = Arc::new(RwLock::new(tokenizer));
Ok(js_tokenizer)
}
#[derive(Debug, Default, Serialize, Deserialize)]
#[serde(rename_all = "camelCase")]
struct FromPretrainedParametersJs {
#[serde(default)]
revision: Option<String>,
#[serde(default)]
auth_token: Option<String>,
}
impl From<FromPretrainedParametersJs> for tk::FromPretrainedParameters {
fn from(o: FromPretrainedParametersJs) -> Self {
let mut params = Self::default();
if let Some(revision) = o.revision {
params.revision = revision;
}
if let Some(auth_token) = o.auth_token {
params.auth_token = Some(auth_token);
}
params
}
}
pub fn tokenizer_from_pretrained(mut cx: FunctionContext) -> JsResult<JsTokenizer> {
let s = cx.extract::<String>(0)?;
let mut p: tk::FromPretrainedParameters = cx
.extract_opt::<FromPretrainedParametersJs>(1)?
.unwrap_or_default()
.into();
p.user_agent = [("bindings", "Node.js"), ("version", crate::VERSION)]
.iter()
.map(|(k, v)| (k.to_string(), v.to_string()))
.collect();
let tokenizer = tk::tokenizer::TokenizerImpl::from_pretrained(s, Some(p))
.map_err(|e| Error(format!("Error loading from pretrained {}", e)))?;
let js_model: Handle<JsModel> = JsModel::new::<_, JsModel, _>(&mut cx, vec![])?;
let mut js_tokenizer = JsTokenizer::new(&mut cx, vec![js_model])?;
let guard = cx.lock();
js_tokenizer.borrow_mut(&guard).tokenizer = Arc::new(RwLock::new(tokenizer));
Ok(js_tokenizer)
}
pub fn register(m: &mut ModuleContext, prefix: &str) -> Result<(), neon::result::Throw> {
m.export_class::<JsAddedToken>(&format!("{}_AddedToken", prefix))?;
m.export_class::<JsTokenizer>(&format!("{}_Tokenizer", prefix))?;
m.export_function(
&format!("{}_Tokenizer_from_string", prefix),
tokenizer_from_string,
)?;
m.export_function(
&format!("{}_Tokenizer_from_file", prefix),
tokenizer_from_file,
)?;
m.export_function(
&format!("{}_Tokenizer_from_pretrained", prefix),
tokenizer_from_pretrained,
)?;
Ok(())
}
| 0 |
hf_public_repos/tokenizers/bindings/node/native | hf_public_repos/tokenizers/bindings/node/native/src/trainers.rs | extern crate tokenizers as tk;
use crate::extraction::*;
use crate::models::Model;
use crate::tokenizer::AddedToken;
use neon::prelude::*;
use std::sync::{Arc, RwLock};
use tk::models::{
bpe::BpeTrainer, unigram::UnigramTrainer, wordlevel::WordLevelTrainer,
wordpiece::WordPieceTrainer, TrainerWrapper,
};
/// Trainer
#[derive(Clone)]
pub struct Trainer {
pub trainer: Option<Arc<RwLock<TrainerWrapper>>>,
}
impl From<TrainerWrapper> for Trainer {
fn from(trainer: TrainerWrapper) -> Self {
Self {
trainer: Some(Arc::new(RwLock::new(trainer))),
}
}
}
impl tk::Trainer for Trainer {
type Model = Model;
fn should_show_progress(&self) -> bool {
self.trainer
.as_ref()
.expect("Uninitialized Trainer")
.read()
.unwrap()
.should_show_progress()
}
fn train(&self, model: &mut Self::Model) -> tk::Result<Vec<tk::AddedToken>> {
let special_tokens = self
.trainer
.as_ref()
.ok_or("Uninitialized Trainer")?
.read()
.unwrap()
.train(
&mut model
.model
.as_ref()
.ok_or("Uninitialized Model")?
.write()
.unwrap(),
)?;
Ok(special_tokens)
}
fn feed<I, S, F>(&mut self, iterator: I, process: F) -> tk::Result<()>
where
I: Iterator<Item = S> + Send,
S: AsRef<str> + Send,
F: Fn(&str) -> tk::Result<Vec<String>> + Sync,
{
self.trainer
.as_ref()
.ok_or("Uninitialized Trainer")?
.write()
.unwrap()
.feed(iterator, process)
}
}
declare_types! {
pub class JsTrainer for Trainer {
init(_) {
// This should not be called from JS
Ok(Trainer { trainer: None })
}
}
}
// BPE
struct BpeTrainerOptions(BpeTrainer);
impl From<BpeTrainerOptions> for BpeTrainer {
fn from(v: BpeTrainerOptions) -> Self {
v.0
}
}
impl FromJsValue for BpeTrainerOptions {
fn from_value<'c, C: Context<'c>>(from: Handle<'c, JsValue>, cx: &mut C) -> LibResult<Self> {
if let Ok(options) = from.downcast::<JsObject>() {
let mut builder = BpeTrainer::builder();
if let Ok(size) = options.get(cx, "vocabSize") {
if let Some(size) = Option::from_value(size, cx)? {
builder = builder.vocab_size(size);
}
}
if let Ok(freq) = options.get(cx, "minFrequency") {
if let Some(freq) = Option::from_value(freq, cx)? {
builder = builder.min_frequency(freq);
}
}
if let Ok(tokens) = options.get(cx, "specialTokens") {
if tokens.downcast::<JsNull>().is_err() && tokens.downcast::<JsUndefined>().is_err()
{
builder = builder.special_tokens(
tokens
.downcast::<JsArray>()
.map_err(|e| Error(format!("{}", e)))?
.to_vec(cx)?
.into_iter()
.map(|token| Ok(AddedToken::from_value(token, cx)?.into()))
.collect::<Result<Vec<_>, Error>>()?,
);
}
}
if let Ok(limit) = options.get(cx, "limitAlphabet") {
if let Some(limit) = Option::from_value(limit, cx)? {
builder = builder.limit_alphabet(limit);
}
}
if let Ok(alphabet) = options.get(cx, "initialAlphabet") {
if let Some(alphabet) = Option::from_value(alphabet, cx)? {
builder = builder.initial_alphabet(alphabet);
}
}
if let Ok(show) = options.get(cx, "showProgress") {
if let Some(show) = Option::from_value(show, cx)? {
builder = builder.show_progress(show);
}
}
if let Ok(prefix) = options.get(cx, "continuingSubwordPrefix") {
if let Some(prefix) = Option::from_value(prefix, cx)? {
builder = builder.continuing_subword_prefix(prefix);
}
}
if let Ok(suffix) = options.get(cx, "endOfWordSuffix") {
if let Some(suffix) = Option::from_value(suffix, cx)? {
builder = builder.end_of_word_suffix(suffix);
}
}
Ok(Self(builder.build()))
} else {
Err(Error("Expected options type: object".into()))
}
}
}
/// bpe_trainer(options?: {
/// vocabSize?: number = 30000,
/// minFrequency?: number = 2,
/// specialTokens?: (string | AddedToken)[] = [],
/// limitAlphabet?: number = undefined,
/// initialAlphabet?: string[] = [],
/// showProgress?: bool = true,
/// continuingSubwordPrefix?: string = undefined,
/// endOfWordSuffix?: string = undefined,
/// })
fn bpe_trainer(mut cx: FunctionContext) -> JsResult<JsTrainer> {
let trainer = cx
.extract_opt::<BpeTrainerOptions>(0)?
.map_or_else(|| BpeTrainer::builder().build(), |o| o.into());
let mut js_trainer = JsTrainer::new::<_, JsTrainer, _>(&mut cx, vec![])?;
let guard = cx.lock();
js_trainer.borrow_mut(&guard).trainer = Some(Arc::new(RwLock::new(trainer.into())));
Ok(js_trainer)
}
// WordPiece
struct WordPieceTrainerOptions(WordPieceTrainer);
impl From<WordPieceTrainerOptions> for WordPieceTrainer {
fn from(v: WordPieceTrainerOptions) -> Self {
v.0
}
}
impl FromJsValue for WordPieceTrainerOptions {
fn from_value<'c, C: Context<'c>>(from: Handle<'c, JsValue>, cx: &mut C) -> LibResult<Self> {
if let Ok(options) = from.downcast::<JsObject>() {
let mut builder = WordPieceTrainer::builder();
if let Ok(size) = options.get(cx, "vocabSize") {
if let Some(size) = Option::from_value(size, cx)? {
builder = builder.vocab_size(size);
}
}
if let Ok(freq) = options.get(cx, "minFrequency") {
if let Some(freq) = Option::from_value(freq, cx)? {
builder = builder.min_frequency(freq);
}
}
if let Ok(tokens) = options.get(cx, "specialTokens") {
if tokens.downcast::<JsNull>().is_err() && tokens.downcast::<JsUndefined>().is_err()
{
builder = builder.special_tokens(
tokens
.downcast::<JsArray>()
.map_err(|e| Error(format!("{}", e)))?
.to_vec(cx)?
.into_iter()
.map(|token| Ok(AddedToken::from_value(token, cx)?.into()))
.collect::<Result<Vec<_>, Error>>()?,
);
}
}
if let Ok(limit) = options.get(cx, "limitAlphabet") {
if let Some(limit) = Option::from_value(limit, cx)? {
builder = builder.limit_alphabet(limit);
}
}
if let Ok(alphabet) = options.get(cx, "initialAlphabet") {
if let Some(alphabet) = Option::from_value(alphabet, cx)? {
builder = builder.initial_alphabet(alphabet);
}
}
if let Ok(show) = options.get(cx, "showProgress") {
if let Some(show) = Option::from_value(show, cx)? {
builder = builder.show_progress(show);
}
}
if let Ok(prefix) = options.get(cx, "continuingSubwordPrefix") {
if let Some(prefix) = Option::from_value(prefix, cx)? {
builder = builder.continuing_subword_prefix(prefix);
}
}
if let Ok(suffix) = options.get(cx, "endOfWordSuffix") {
if let Some(suffix) = Option::from_value(suffix, cx)? {
builder = builder.end_of_word_suffix(suffix);
}
}
Ok(Self(builder.build()))
} else {
Err(Error("Expected options type: object".into()))
}
}
}
/// wordpiece_trainer(options?: {
/// vocabSize?: number = 30000,
/// minFrequency?: number = 2,
/// specialTokens?: string[] = [],
/// limitAlphabet?: number = undefined,
/// initialAlphabet?: string[] = [],
/// showProgress?: bool = true,
/// continuingSubwordPrefix?: string = undefined,
/// endOfWordSuffix?: string = undefined,
/// })
fn wordpiece_trainer(mut cx: FunctionContext) -> JsResult<JsTrainer> {
let trainer = cx
.extract_opt::<WordPieceTrainerOptions>(0)?
.map_or_else(|| WordPieceTrainer::builder().build(), |o| o.into());
let mut js_trainer = JsTrainer::new::<_, JsTrainer, _>(&mut cx, vec![])?;
let guard = cx.lock();
js_trainer.borrow_mut(&guard).trainer = Some(Arc::new(RwLock::new(trainer.into())));
Ok(js_trainer)
}
// WordLevel
struct WordLevelTrainerOptions(WordLevelTrainer);
impl From<WordLevelTrainerOptions> for WordLevelTrainer {
fn from(v: WordLevelTrainerOptions) -> Self {
v.0
}
}
impl FromJsValue for WordLevelTrainerOptions {
fn from_value<'c, C: Context<'c>>(from: Handle<'c, JsValue>, cx: &mut C) -> LibResult<Self> {
if let Ok(options) = from.downcast::<JsObject>() {
let mut builder = WordLevelTrainer::builder();
if let Ok(size) = options.get(cx, "vocabSize") {
if let Some(size) = Option::from_value(size, cx)? {
builder.vocab_size(size);
}
}
if let Ok(freq) = options.get(cx, "minFrequency") {
if let Some(freq) = Option::from_value(freq, cx)? {
builder.min_frequency(freq);
}
}
if let Ok(tokens) = options.get(cx, "specialTokens") {
if tokens.downcast::<JsNull>().is_err() && tokens.downcast::<JsUndefined>().is_err()
{
builder.special_tokens(
tokens
.downcast::<JsArray>()
.map_err(|e| Error(format!("{}", e)))?
.to_vec(cx)?
.into_iter()
.map(|token| Ok(AddedToken::from_value(token, cx)?.into()))
.collect::<Result<Vec<_>, Error>>()?,
);
}
}
if let Ok(show) = options.get(cx, "showProgress") {
if let Some(show) = Option::from_value(show, cx)? {
builder.show_progress(show);
}
}
Ok(Self(
builder
.build()
.expect("WordLevelTrainerBuilder cannot fail"),
))
} else {
Err(Error("Expected options type: object".into()))
}
}
}
/// wordlevel_trainer(options?: {
/// vocabSize?: number = 30000,
/// minFrequency?: number = 0,
/// specialTokens?: string[] = [],
/// showProgress?: bool = true,
/// })
fn wordlevel_trainer(mut cx: FunctionContext) -> JsResult<JsTrainer> {
let trainer = cx.extract_opt::<WordLevelTrainerOptions>(0)?.map_or_else(
|| WordLevelTrainer::builder().build().unwrap(),
|o| o.into(),
);
let mut js_trainer = JsTrainer::new::<_, JsTrainer, _>(&mut cx, vec![])?;
let guard = cx.lock();
js_trainer.borrow_mut(&guard).trainer = Some(Arc::new(RwLock::new(trainer.into())));
Ok(js_trainer)
}
// Unigram
struct UnigramTrainerOptions(UnigramTrainer);
impl From<UnigramTrainerOptions> for UnigramTrainer {
fn from(v: UnigramTrainerOptions) -> Self {
v.0
}
}
impl FromJsValue for UnigramTrainerOptions {
fn from_value<'c, C: Context<'c>>(from: Handle<'c, JsValue>, cx: &mut C) -> LibResult<Self> {
if let Ok(options) = from.downcast::<JsObject>() {
let mut builder = UnigramTrainer::builder();
if let Ok(size) = options.get(cx, "vocabSize") {
if let Some(size) = Option::from_value(size, cx)? {
builder.vocab_size(size);
}
}
if let Ok(nsub) = options.get(cx, "nSubIterations") {
if let Some(nsub) = Option::from_value(nsub, cx)? {
builder.n_sub_iterations(nsub);
}
}
if let Ok(factor) = options.get(cx, "shrinkingFactor") {
if let Some(factor) = Option::from_value(factor, cx)? {
builder.shrinking_factor(factor);
}
}
if let Ok(tokens) = options.get(cx, "specialTokens") {
if tokens.downcast::<JsNull>().is_err() && tokens.downcast::<JsUndefined>().is_err()
{
builder.special_tokens(
tokens
.downcast::<JsArray>()
.map_err(|e| Error(format!("{}", e)))?
.to_vec(cx)?
.into_iter()
.map(|token| Ok(AddedToken::from_value(token, cx)?.into()))
.collect::<Result<Vec<_>, Error>>()?,
);
}
}
if let Ok(alphabet) = options.get(cx, "initialAlphabet") {
if let Some(alphabet) = Option::from_value(alphabet, cx)? {
builder.initial_alphabet(alphabet);
}
}
if let Ok(unk) = options.get(cx, "unkToken") {
let unk = Option::from_value(unk, cx)?;
builder.unk_token(unk);
}
if let Ok(max) = options.get(cx, "maxPieceLength") {
if let Some(max) = Option::from_value(max, cx)? {
builder.max_piece_length(max);
}
}
if let Ok(size) = options.get(cx, "seedSize") {
if let Some(size) = Option::from_value(size, cx)? {
builder.seed_size(size);
}
}
if let Ok(show) = options.get(cx, "showProgress") {
if let Some(show) = Option::from_value(show, cx)? {
builder.show_progress(show);
}
}
Ok(Self(builder.build()?))
} else {
Err(Error("Expected options type: object".into()))
}
}
}
/// unigram_trainer(options?: {
/// vocabSize?: number = 8000,
/// nSubIterations?: number = 2,
/// shrinkingFactor?: number = 0.75,
/// specialTokens?: string[] = [],
/// initialAlphabet?: string[] = [],
/// unkToken?: string = undefined,
/// maxPieceLength?: number = 16,
/// seedSize?: number = 1000000,
/// showProgress?: boolean = true,
/// })
fn unigram_trainer(mut cx: FunctionContext) -> JsResult<JsTrainer> {
let trainer = cx
.extract_opt::<UnigramTrainerOptions>(0)?
.map_or_else(|| UnigramTrainer::builder().build().unwrap(), |o| o.into());
let mut js_trainer = JsTrainer::new::<_, JsTrainer, _>(&mut cx, vec![])?;
let guard = cx.lock();
js_trainer.borrow_mut(&guard).trainer = Some(Arc::new(RwLock::new(trainer.into())));
Ok(js_trainer)
}
/// Register everything here
pub fn register(m: &mut ModuleContext, prefix: &str) -> NeonResult<()> {
m.export_function(&format!("{}_BPETrainer", prefix), bpe_trainer)?;
m.export_function(&format!("{}_WordPieceTrainer", prefix), wordpiece_trainer)?;
m.export_function(&format!("{}_WordLevelTrainer", prefix), wordlevel_trainer)?;
m.export_function(&format!("{}_UnigramTrainer", prefix), unigram_trainer)?;
Ok(())
}
| 0 |
hf_public_repos/tokenizers/bindings/node/native | hf_public_repos/tokenizers/bindings/node/native/src/utils.rs | extern crate tokenizers as tk;
use crate::encoding::JsEncoding;
use crate::extraction::*;
use crate::tokenizer::Encoding;
use neon::prelude::*;
/// slice(s: string, start?: number, end?: number)
fn slice(mut cx: FunctionContext) -> JsResult<JsString> {
let s = cx.extract::<String>(0)?;
let len = s.chars().count();
let get_index = |x: i32| -> usize {
if x >= 0 {
x as usize
} else {
(len as i32 + x) as usize
}
};
let begin_index = get_index(cx.extract_opt::<i32>(1)?.unwrap_or(0));
let end_index = get_index(cx.extract_opt::<i32>(2)?.unwrap_or(len as i32));
if let Some(slice) = tk::tokenizer::normalizer::get_range_of(&s, begin_index..end_index) {
Ok(cx.string(slice))
} else {
cx.throw_error("Error in offsets")
}
}
/// merge_encodings(encodings: Encoding[], growing_offsets: boolean = false): Encoding
fn merge_encodings(mut cx: FunctionContext) -> JsResult<JsEncoding> {
let encodings: Vec<tk::Encoding> = cx
.extract_vec::<Encoding>(0)?
.into_iter()
.map(|e| e.into())
.collect();
let growing_offsets = cx.extract_opt::<bool>(1)?.unwrap_or(false);
let new_encoding = tk::tokenizer::Encoding::merge(encodings, growing_offsets);
let mut js_encoding = JsEncoding::new::<_, JsEncoding, _>(&mut cx, vec![])?;
let guard = cx.lock();
js_encoding.borrow_mut(&guard).encoding = Some(new_encoding);
Ok(js_encoding)
}
/// Register everything here
pub fn register(m: &mut ModuleContext, prefix: &str) -> NeonResult<()> {
m.export_function(&format!("{}_slice", prefix), slice)?;
m.export_function(&format!("{}_mergeEncodings", prefix), merge_encodings)?;
Ok(())
}
| 0 |
hf_public_repos/tokenizers/bindings/node/native/src | hf_public_repos/tokenizers/bindings/node/native/src/tasks/mod.rs | pub mod models;
pub mod tokenizer;
| 0 |
hf_public_repos/tokenizers/bindings/node/native/src | hf_public_repos/tokenizers/bindings/node/native/src/tasks/models.rs | extern crate tokenizers as tk;
use crate::models::*;
use neon::prelude::*;
use std::sync::{Arc, RwLock};
use tk::models::bpe::{BpeBuilder, BPE};
use tk::models::wordlevel::{WordLevel, WordLevelBuilder};
use tk::models::wordpiece::{WordPiece, WordPieceBuilder};
pub struct WordPieceFromFilesTask(Option<WordPieceBuilder>);
impl WordPieceFromFilesTask {
pub fn new(builder: WordPieceBuilder) -> Self {
Self(Some(builder))
}
}
impl Task for WordPieceFromFilesTask {
type Output = WordPiece;
type Error = String;
type JsEvent = JsValue;
fn perform(&self) -> Result<Self::Output, Self::Error> {
let builder: Option<WordPieceBuilder> =
unsafe { std::ptr::replace(&self.0 as *const _ as *mut _, None) };
builder.unwrap().build().map_err(|e| format!("{}", e))
}
fn complete(
self,
mut cx: TaskContext,
result: Result<Self::Output, Self::Error>,
) -> JsResult<Self::JsEvent> {
let wordpiece = result.map_err(|e| cx.throw_error::<_, ()>(e).unwrap_err())?;
let mut js_model = JsModel::new::<_, JsModel, _>(&mut cx, vec![])?;
let guard = cx.lock();
js_model.borrow_mut(&guard).model = Some(Arc::new(RwLock::new(wordpiece.into())));
Ok(js_model.upcast())
}
}
pub struct WordLevelFromFilesTask(Option<WordLevelBuilder>);
impl WordLevelFromFilesTask {
pub fn new(builder: WordLevelBuilder) -> Self {
Self(Some(builder))
}
}
impl Task for WordLevelFromFilesTask {
type Output = WordLevel;
type Error = String;
type JsEvent = JsValue;
fn perform(&self) -> Result<Self::Output, Self::Error> {
let builder: Option<WordLevelBuilder> =
unsafe { std::ptr::replace(&self.0 as *const _ as *mut _, None) };
builder.unwrap().build().map_err(|e| format!("{}", e))
}
fn complete(
self,
mut cx: TaskContext,
result: Result<Self::Output, Self::Error>,
) -> JsResult<Self::JsEvent> {
let wordlevel = result.map_err(|e| cx.throw_error::<_, ()>(e).unwrap_err())?;
let mut js_model = JsModel::new::<_, JsModel, _>(&mut cx, vec![])?;
let guard = cx.lock();
js_model.borrow_mut(&guard).model = Some(Arc::new(RwLock::new(wordlevel.into())));
Ok(js_model.upcast())
}
}
pub struct BPEFromFilesTask(Option<BpeBuilder>);
impl BPEFromFilesTask {
pub fn new(builder: BpeBuilder) -> Self {
Self(Some(builder))
}
}
impl Task for BPEFromFilesTask {
type Output = BPE;
type Error = String;
type JsEvent = JsValue;
fn perform(&self) -> Result<Self::Output, Self::Error> {
let builder: Option<BpeBuilder> =
unsafe { std::ptr::replace(&self.0 as *const _ as *mut _, None) };
builder.unwrap().build().map_err(|e| format!("{}", e))
}
fn complete(
self,
mut cx: TaskContext,
result: Result<Self::Output, Self::Error>,
) -> JsResult<Self::JsEvent> {
let bpe = result.map_err(|e| cx.throw_error::<_, ()>(e).unwrap_err())?;
let mut js_model = JsModel::new::<_, JsModel, _>(&mut cx, vec![])?;
let guard = cx.lock();
js_model.borrow_mut(&guard).model = Some(Arc::new(RwLock::new(bpe.into())));
Ok(js_model.upcast())
}
}
| 0 |
hf_public_repos/tokenizers/bindings/node/native/src | hf_public_repos/tokenizers/bindings/node/native/src/tasks/tokenizer.rs | extern crate tokenizers as tk;
use crate::encoding::*;
use crate::tokenizer::Tokenizer;
use neon::prelude::*;
use tk::tokenizer::{EncodeInput, Encoding};
pub enum EncodeTask<'s> {
Single(Tokenizer, Option<EncodeInput<'s>>, bool),
Batch(Tokenizer, Option<Vec<EncodeInput<'s>>>, bool),
}
pub enum EncodeOutput {
Single(Box<Encoding>),
Batch(Vec<Encoding>),
}
impl Task for EncodeTask<'static> {
type Output = EncodeOutput;
type Error = String;
type JsEvent = JsValue;
fn perform(&self) -> Result<Self::Output, Self::Error> {
match self {
EncodeTask::Single(worker, input, add_special_tokens) => {
let mut input: Option<EncodeInput> =
unsafe { std::ptr::replace(input as *const _ as *mut _, None) };
worker
.tokenizer
.read()
.unwrap()
.encode_char_offsets(
input.take().ok_or("No provided input")?,
*add_special_tokens,
)
.map_err(|e| format!("{}", e))
.map(|item| EncodeOutput::Single(Box::new(item)))
}
EncodeTask::Batch(worker, input, add_special_tokens) => {
let mut input: Option<Vec<EncodeInput>> =
unsafe { std::ptr::replace(input as *const _ as *mut _, None) };
worker
.tokenizer
.read()
.unwrap()
.encode_batch_char_offsets(
input.take().ok_or("No provided input")?,
*add_special_tokens,
)
.map_err(|e| format!("{}", e))
.map(EncodeOutput::Batch)
}
}
}
fn complete(
self,
mut cx: TaskContext,
result: Result<Self::Output, Self::Error>,
) -> JsResult<Self::JsEvent> {
match result.map_err(|e| cx.throw_error::<_, ()>(e).unwrap_err())? {
EncodeOutput::Single(encoding) => {
let mut js_encoding = JsEncoding::new::<_, JsEncoding, _>(&mut cx, vec![])?;
// Set the actual encoding
let guard = cx.lock();
js_encoding.borrow_mut(&guard).encoding = Some(*encoding);
Ok(js_encoding.upcast())
}
EncodeOutput::Batch(encodings) => {
let result = JsArray::new(&mut cx, encodings.len() as u32);
for (i, encoding) in encodings.into_iter().enumerate() {
let mut js_encoding = JsEncoding::new::<_, JsEncoding, _>(&mut cx, vec![])?;
// Set the actual encoding
let guard = cx.lock();
js_encoding.borrow_mut(&guard).encoding = Some(encoding);
result.set(&mut cx, i as u32, js_encoding)?;
}
Ok(result.upcast())
}
}
}
}
pub enum DecodeTask {
Single(Tokenizer, Vec<u32>, bool),
Batch(Tokenizer, Vec<Vec<u32>>, bool),
}
pub enum DecodeOutput {
Single(String),
Batch(Vec<String>),
}
impl Task for DecodeTask {
type Output = DecodeOutput;
type Error = String;
type JsEvent = JsValue;
fn perform(&self) -> Result<Self::Output, Self::Error> {
match self {
DecodeTask::Single(worker, ids, skip_special_tokens) => worker
.tokenizer
.read()
.unwrap()
.decode(ids.as_slice(), *skip_special_tokens)
.map_err(|e| format!("{}", e))
.map(DecodeOutput::Single),
DecodeTask::Batch(worker, ids, skip_special_tokens) => worker
.tokenizer
.read()
.unwrap()
.decode_batch(
&ids.iter().map(|v| v.as_slice()).collect::<Vec<&[u32]>>(),
*skip_special_tokens,
)
.map_err(|e| format!("{}", e))
.map(DecodeOutput::Batch),
}
}
fn complete(
self,
mut cx: TaskContext,
result: Result<Self::Output, Self::Error>,
) -> JsResult<Self::JsEvent> {
match result.map_err(|e| cx.throw_error::<_, ()>(e).unwrap_err())? {
DecodeOutput::Single(string) => Ok(cx.string(string).upcast()),
DecodeOutput::Batch(strings) => {
let result = JsArray::new(&mut cx, strings.len() as u32);
for (i, string) in strings.into_iter().enumerate() {
let js_string = cx.string(string);
result.set(&mut cx, i as u32, js_string)?;
}
Ok(result.upcast())
}
}
}
}
| 0 |
hf_public_repos/tokenizers/bindings | hf_public_repos/tokenizers/bindings/python/CHANGELOG.md | # Changelog
All notable changes to this project will be documented in this file.
The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/),
and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html).
## [0.13.2]
- [#1096] Python 3.11 support
## [0.13.1]
- [#1072] Fixing Roberta type ids.
## [0.13.0]
- [#956] PyO3 version upgrade
- [#1055] M1 automated builds
- [#1008] `Decoder` is now a composable trait, but without being backward incompatible
- [#1047, #1051, #1052] `Processor` is now a composable trait, but without being backward incompatible
Both trait changes warrant a "major" number since, despite best efforts to not break backward
compatibility, the code is different enough that we cannot be exactly sure.
## [0.12.1]
- [#938] **Reverted breaking change**. https://github.com/huggingface/transformers/issues/16520
## [0.12.0] YANKED
Bump minor version because of a breaking change.
- [#938] [REVERTED IN 0.12.1] **Breaking change**. Decoder trait is modified to be composable. This is only breaking if you are using decoders on their own. tokenizers should be error free.
- [#939] Making the regex in `ByteLevel` pre_tokenizer optional (necessary for BigScience)
- [#952] Fixed the vocabulary size of UnigramTrainer output (to respect added tokens)
- [#954] Fixed not being able to save vocabularies with holes in vocab (ConvBert). Yell warnings instead, but stop panicking.
- [#962] Fix tests for python 3.10
- [#961] Added link for Ruby port of `tokenizers`
## [0.11.6]
- [#919] Fixing single_word AddedToken. (regression from 0.11.2)
- [#916] Deserializing faster `added_tokens` by loading them in batch.
## [0.11.5]
- [#895] Build `python 3.10` wheels.
## [0.11.4]
- [#884] Fixing bad deserialization following inclusion of a default for Punctuation
## [0.11.3]
- [#882] Fixing Punctuation deserialize without argument.
- [#868] Fixing missing direction in TruncationParams
- [#860] Adding TruncationSide to TruncationParams
## [0.11.0]
### Fixed
- [#585] Conda version should now work on old CentOS
- [#844] Fixing interaction between `is_pretokenized` and `trim_offsets`.
- [#851] Doc links
### Added
- [#657]: Add SplitDelimiterBehavior customization to Punctuation constructor
- [#845]: Documentation for `Decoders`.
### Changed
- [#850]: Added a feature gate to enable disabling `http` features
- [#718]: Fix `WordLevel` tokenizer determinism during training
- [#762]: Add a way to specify the unknown token in `SentencePieceUnigramTokenizer`
- [#770]: Improved documentation for `UnigramTrainer`
- [#780]: Add `Tokenizer.from_pretrained` to load tokenizers from the Hugging Face Hub
- [#793]: Saving a pretty JSON file by default when saving a tokenizer
## [0.10.3]
### Fixed
- [#686]: Fix SPM conversion process for whitespace deduplication
- [#707]: Fix stripping strings containing Unicode characters
### Added
- [#693]: Add a CTC Decoder for Wave2Vec models
### Removed
- [#714]: Removed support for Python 3.5
## [0.10.2]
### Fixed
- [#652]: Fix offsets for `Precompiled` corner case
- [#656]: Fix BPE `continuing_subword_prefix`
- [#674]: Fix `Metaspace` serialization problems
## [0.10.1]
### Fixed
- [#616]: Fix SentencePiece tokenizers conversion
- [#617]: Fix offsets produced by Precompiled Normalizer (used by tokenizers converted from SPM)
- [#618]: Fix Normalizer.normalize with `PyNormalizedStringRefMut`
- [#620]: Fix serialization/deserialization for overlapping models
- [#621]: Fix `ByteLevel` instantiation from a previously saved state (using `__getstate__()`)
## [0.10.0]
### Added
- [#508]: Add a Visualizer for notebooks to help understand how the tokenizers work
- [#519]: Add a `WordLevelTrainer` used to train a `WordLevel` model
- [#533]: Add support for conda builds
- [#542]: Add Split pre-tokenizer to easily split using a pattern
- [#544]: Ability to train from memory. This also improves the integration with `datasets`
- [#590]: Add getters/setters for components on BaseTokenizer
- [#574]: Add `fust_unk` option to SentencePieceBPETokenizer
### Changed
- [#509]: Automatically stubbing the `.pyi` files
- [#519]: Each `Model` can return its associated `Trainer` with `get_trainer()`
- [#530]: The various attributes on each component can be get/set (ie.
`tokenizer.model.dropout = 0.1`)
- [#538]: The API Reference has been improved and is now up-to-date.
### Fixed
- [#519]: During training, the `Model` is now trained in-place. This fixes several bugs that were
forcing to reload the `Model` after a training.
- [#539]: Fix `BaseTokenizer` enable_truncation docstring
## [0.9.4]
### Fixed
- [#492]: Fix `from_file` on `BertWordPieceTokenizer`
- [#498]: Fix the link to download `sentencepiece_model_pb2.py`
- [#500]: Fix a typo in the docs quicktour
### Changed
- [#506]: Improve Encoding mappings for pairs of sequence
## [0.9.3]
### Fixed
- [#470]: Fix hanging error when training with custom component
- [#476]: TemplateProcessing serialization is now deterministic
- [#481]: Fix SentencePieceBPETokenizer.from_files
### Added
- [#477]: UnicodeScripts PreTokenizer to avoid merges between various scripts
- [#480]: Unigram now accepts an `initial_alphabet` and handles `special_tokens` correctly
## [0.9.2]
### Fixed
- [#464]: Fix a problem with RobertaProcessing being deserialized as BertProcessing
## [0.9.1]
### Fixed
- [#459]: Fix a problem with deserialization
## [0.9.0]
### Fixed
- [#362]: Fix training deadlock with Python components.
- [#363]: Fix a crash when calling `.train` with some non-existent files
- [#355]: Remove a lot of possible crashes
- [#389]: Improve truncation (crash and consistency)
### Added
- [#379]: Add the ability to call `encode`/`encode_batch` with numpy arrays
- [#292]: Support for the Unigram algorithm
- [#378], [#394], [#416], [#417]: Many new Normalizer and PreTokenizer
- [#403]: Add `TemplateProcessing` `PostProcessor`.
- [#420]: Ability to fuse the "unk" token in BPE.
### Changed
- [#360]: Lots of improvements related to words/alignment tracking
- [#426]: Improvements on error messages thanks to PyO3 0.12
## [0.8.1]
### Fixed
- [#333]: Fix deserialization of `AddedToken`, where the content was not restored properly
### Changed
- [#329]: Improved warning and behavior when we detect a fork
- [#330]: BertNormalizer now keeps the same behavior than the original implementation when
`strip_accents` is not specified.
## [0.8.0]
### Highlights of this release
- We can now encode both pre-tokenized inputs, and raw strings. This is especially usefull when
processing datasets that are already pre-tokenized like for NER (Name Entity Recognition), and helps
while applying labels to each word.
- Full tokenizer serialization. It is now easy to save a tokenizer to a single JSON file, to later
load it back with just one line of code. That's what sharing a Tokenizer means now: 1 line of code.
- With the serialization comes the compatibility with `Pickle`! The Tokenizer, all of its components,
Encodings, everything can be pickled!
- Training a tokenizer is now even faster (up to 5-10x) than before!
- Compatibility with `multiprocessing`, even when using the `fork` start method. Since this library
makes heavy use of the multithreading capacities of our computers to allows a very fast tokenization,
this led to problems (deadlocks) when used with `multiprocessing`. This version now allows to
disable the parallelism, and will warn you if this is necessary.
- And a lot of other improvements, and fixes.
### Fixed
- [#286]: Fix various crash when training a BPE model
- [#309]: Fixed a few bugs related to additional vocabulary/tokens
### Added
- [#272]: Serialization of the `Tokenizer` and all the parts (`PreTokenizer`, `Normalizer`, ...).
This adds some methods to easily save/load an entire tokenizer (`from_str`, `from_file`).
- [#273]: `Tokenizer` and its parts are now pickable
- [#289]: Ability to pad to a multiple of a specified value. This is especially useful to ensure
activation of the Tensor Cores, while ensuring padding to a multiple of 8. Use with
`enable_padding(pad_to_multiple_of=8)` for example.
- [#298]: Ability to get the currently set truncation/padding params
- [#311]: Ability to enable/disable the parallelism using the `TOKENIZERS_PARALLELISM` environment
variable. This is especially usefull when using `multiprocessing` capabilities, with the `fork`
start method, which happens to be the default on Linux systems. Without disabling the parallelism,
the process dead-locks while encoding. (Cf [#187] for more information)
### Changed
- Improved errors generated during truncation: When the provided max length is too low are
now handled properly.
- [#249] `encode` and `encode_batch` now accept pre-tokenized inputs. When the input is pre-tokenized,
the argument `is_pretokenized=True` must be specified.
- [#276]: Improve BPE training speeds, by reading files sequentially, but parallelizing the
processing of each file
- [#280]: Use `onig` for byte-level pre-tokenization to remove all the differences with the original
implementation from GPT-2
- [#309]: Improved the management of the additional vocabulary. This introduces an option
`normalized`, controlling whether a token should be extracted from the normalized version of the
input text.
## [0.7.0]
### Changed
- Only one progress bar while reading files during training. This is better for use-cases with
a high number of files as it avoids having too many progress bars on screen. Also avoids reading the
size of each file before starting to actually read these files, as this process could take really
long.
- [#193]: `encode` and `encode_batch` now take a new optional argument, specifying whether we
should add the special tokens. This is activated by default.
- [#197]: `original_str` and `normalized_str` have been removed from the `Encoding` returned by
`encode` and `encode_batch`. This brings a reduction of 70% of the memory footprint.
- [#197]: The offsets provided on `Encoding` are now relative to the original string, and not the
normalized one anymore.
- The added token given to `add_special_tokens` or `add_tokens` on a `Tokenizer`, or while using
`train(special_tokens=...)` can now be instances of `AddedToken` to provide more control over these
tokens.
- [#136]: Updated Pyo3 version
- [#136]: Static methods `Model.from_files` and `Model.empty` are removed in favor of using
constructors.
- [#239]: `CharBPETokenizer` now corresponds to OpenAI GPT BPE implementation by default.
### Added
- [#188]: `ByteLevel` is also a `PostProcessor` now and handles trimming the offsets if activated.
This avoids the unintuitive inclusion of the whitespaces in the produced offsets, even if these
whitespaces are part of the actual token.
It has been added to `ByteLevelBPETokenizer` but it is off by default (`trim_offsets=False`).
- [#236]: `RobertaProcessing` also handles trimming the offsets.
- [#234]: New alignment mappings on the `Encoding`. Provide methods to easily convert between `char`
or `word` (input space) and `token` (output space).
- `post_process` can be called on the `Tokenizer`
- [#208]: Ability to retrieve the vocabulary from the `Tokenizer` with
`get_vocab(with_added_tokens: bool)`
- [#136] Models can now be instantiated through object constructors.
### Fixed
- [#193]: Fix some issues with the offsets being wrong with the `ByteLevel` BPE:
- when `add_prefix_space=True`
- [#156]: when a Unicode character gets split-up in multiple byte-level characters
- Fix a bug where offsets were wrong when there was any added tokens in the sequence being encoded.
- [#175]: Fix a bug that prevented the addition of more than a certain amount of tokens (even if
not advised, but that's not the question).
- [#205]: Trim the decoded string in `BPEDecoder` used by `CharBPETokenizer`
### How to migrate
- Add the `ByteLevel` `PostProcessor` to your byte-level BPE tokenizers if relevant. If you are
using `ByteLevelBPETokenizer`, this option is disabled by default (`trim_offsets=False`).
- `BertWordPieceTokenizer` option to `add_special_tokens` must now be given to `encode` or
`encode_batch`
- Access to the `original_str` on the `Encoding` has been removed. The original string is the input
of `encode` so it didn't make sense to keep it here.
- No need to call `original_str.offsets(offsets[N])` to convert offsets to the original string. They
are now relative to the original string by default.
- Access to the `normalized_str` on the `Encoding` has been removed. Can be retrieved by calling
`normalize(sequence)` on the `Tokenizer`
- Change `Model.from_files` and `Model.empty` to use constructor. The model constructor should take
the same arguments as the old methods. (ie `BPE(vocab, merges)` or `BPE()`)
- If you were using the `CharBPETokenizer` and want to keep the same behavior as before, set
`bert_normalizer=False` and `split_on_whitespace_only=True`.
## [0.6.0]
### Changed
- [#165]: Big improvements in speed for BPE (Both training and tokenization)
### Fixed
- [#160]: Some default tokens were missing from `BertWordPieceTokenizer`
- [#156]: There was a bug in ByteLevel PreTokenizer that caused offsets to be wrong if a char got
split up in multiple bytes.
- [#174]: The `longest_first` truncation strategy had a bug
## [0.5.2]
- [#163]: Do not open all files directly while training
### Fixed
- We introduced a bug related to the saving of the WordPiece model in 0.5.1: The `vocab.txt` file
was named `vocab.json`. This is now fixed.
- The `WordLevel` model was also saving its vocabulary to the wrong format.
## [0.5.1]
### Changed
- `name` argument is now optional when saving a `Model`'s vocabulary. When the name is not
specified, the files get a more generic naming, like `vocab.json` or `merges.txt`.
## [0.5.0]
### Changed
- [#145]: `BertWordPieceTokenizer` now cleans up some tokenization artifacts while decoding
- [#149]: `ByteLevelBPETokenizer` now has `dropout`.
- `do_lowercase` has been changed to `lowercase` for consistency between the different tokenizers.
(Especially `ByteLevelBPETokenizer` and `CharBPETokenizer`)
- [#139]: Expose `__len__` on `Encoding`
- Improved padding performances.
### Added
- Added a new `Strip` normalizer
### Fixed
- [#145]: Decoding was buggy on `BertWordPieceTokenizer`.
- [#152]: Some documentation and examples were still using the old `BPETokenizer`
### How to migrate
- Use `lowercase` when initializing `ByteLevelBPETokenizer` or `CharBPETokenizer` instead of
`do_lowercase`.
## [0.4.2]
### Fixed
- [#137]: Fix a bug in the class `WordPieceTrainer` that prevented `BertWordPieceTokenizer` from
being trained.
## [0.4.1]
### Fixed
- [#134]: Fix a bug related to the punctuation in BertWordPieceTokenizer
## [0.4.0]
### Changed
- [#131]: Replaced all .new() class methods by a proper __new__ implementation
- Improved typings
### How to migrate
- Remove all `.new` on all classe instanciations
## [0.3.0]
### Changed
- BPETokenizer has been renamed to CharBPETokenizer for clarity.
- Improve truncation/padding and the handling of overflowing tokens. Now when a sequence gets
truncated, we provide a list of overflowing `Encoding` that are ready to be processed by a language
model, just as the main `Encoding`.
- Provide mapping to the original string offsets using:
```
output = tokenizer.encode(...)
print(output.original_str.offsets(output.offsets[3]))
```
- [#99]: Exposed the vocabulary size on all tokenizers
### Added
- Added `CharDelimiterSplit`: a new `PreTokenizer` that allows splitting sequences on the given
delimiter (Works like `.split(delimiter)`)
- Added `WordLevel`: a new model that simply maps `tokens` to their `ids`.
### Fixed
- Fix a bug with IndexableString
- Fix a bug with truncation
### How to migrate
- Rename `BPETokenizer` to `CharBPETokenizer`
- `Encoding.overflowing` is now a List instead of a `Optional[Encoding]`
## [0.2.1]
### Fixed
- Fix a bug with the IDs associated with added tokens.
- Fix a bug that was causing crashes in Python 3.5
[#1096]: https://github.com/huggingface/tokenizers/pull/1096
[#1072]: https://github.com/huggingface/tokenizers/pull/1072
[#956]: https://github.com/huggingface/tokenizers/pull/956
[#1008]: https://github.com/huggingface/tokenizers/pull/1008
[#1009]: https://github.com/huggingface/tokenizers/pull/1009
[#1047]: https://github.com/huggingface/tokenizers/pull/1047
[#1055]: https://github.com/huggingface/tokenizers/pull/1055
[#1051]: https://github.com/huggingface/tokenizers/pull/1051
[#1052]: https://github.com/huggingface/tokenizers/pull/1052
[#938]: https://github.com/huggingface/tokenizers/pull/938
[#939]: https://github.com/huggingface/tokenizers/pull/939
[#952]: https://github.com/huggingface/tokenizers/pull/952
[#954]: https://github.com/huggingface/tokenizers/pull/954
[#962]: https://github.com/huggingface/tokenizers/pull/962
[#961]: https://github.com/huggingface/tokenizers/pull/961
[#960]: https://github.com/huggingface/tokenizers/pull/960
[#919]: https://github.com/huggingface/tokenizers/pull/919
[#916]: https://github.com/huggingface/tokenizers/pull/916
[#895]: https://github.com/huggingface/tokenizers/pull/895
[#884]: https://github.com/huggingface/tokenizers/pull/884
[#882]: https://github.com/huggingface/tokenizers/pull/882
[#868]: https://github.com/huggingface/tokenizers/pull/868
[#860]: https://github.com/huggingface/tokenizers/pull/860
[#850]: https://github.com/huggingface/tokenizers/pull/850
[#844]: https://github.com/huggingface/tokenizers/pull/844
[#845]: https://github.com/huggingface/tokenizers/pull/845
[#851]: https://github.com/huggingface/tokenizers/pull/851
[#585]: https://github.com/huggingface/tokenizers/pull/585
[#793]: https://github.com/huggingface/tokenizers/pull/793
[#780]: https://github.com/huggingface/tokenizers/pull/780
[#770]: https://github.com/huggingface/tokenizers/pull/770
[#762]: https://github.com/huggingface/tokenizers/pull/762
[#718]: https://github.com/huggingface/tokenizers/pull/718
[#714]: https://github.com/huggingface/tokenizers/pull/714
[#707]: https://github.com/huggingface/tokenizers/pull/707
[#693]: https://github.com/huggingface/tokenizers/pull/693
[#686]: https://github.com/huggingface/tokenizers/pull/686
[#674]: https://github.com/huggingface/tokenizers/pull/674
[#657]: https://github.com/huggingface/tokenizers/pull/657
[#656]: https://github.com/huggingface/tokenizers/pull/656
[#652]: https://github.com/huggingface/tokenizers/pull/652
[#621]: https://github.com/huggingface/tokenizers/pull/621
[#620]: https://github.com/huggingface/tokenizers/pull/620
[#618]: https://github.com/huggingface/tokenizers/pull/618
[#617]: https://github.com/huggingface/tokenizers/pull/617
[#616]: https://github.com/huggingface/tokenizers/pull/616
[#590]: https://github.com/huggingface/tokenizers/pull/590
[#574]: https://github.com/huggingface/tokenizers/pull/574
[#544]: https://github.com/huggingface/tokenizers/pull/544
[#542]: https://github.com/huggingface/tokenizers/pull/542
[#539]: https://github.com/huggingface/tokenizers/pull/539
[#538]: https://github.com/huggingface/tokenizers/pull/538
[#533]: https://github.com/huggingface/tokenizers/pull/533
[#530]: https://github.com/huggingface/tokenizers/pull/530
[#519]: https://github.com/huggingface/tokenizers/pull/519
[#509]: https://github.com/huggingface/tokenizers/pull/509
[#508]: https://github.com/huggingface/tokenizers/pull/508
[#506]: https://github.com/huggingface/tokenizers/pull/506
[#500]: https://github.com/huggingface/tokenizers/pull/500
[#498]: https://github.com/huggingface/tokenizers/pull/498
[#492]: https://github.com/huggingface/tokenizers/pull/492
[#481]: https://github.com/huggingface/tokenizers/pull/481
[#480]: https://github.com/huggingface/tokenizers/pull/480
[#477]: https://github.com/huggingface/tokenizers/pull/477
[#476]: https://github.com/huggingface/tokenizers/pull/476
[#470]: https://github.com/huggingface/tokenizers/pull/470
[#464]: https://github.com/huggingface/tokenizers/pull/464
[#459]: https://github.com/huggingface/tokenizers/pull/459
[#420]: https://github.com/huggingface/tokenizers/pull/420
[#417]: https://github.com/huggingface/tokenizers/pull/417
[#416]: https://github.com/huggingface/tokenizers/pull/416
[#403]: https://github.com/huggingface/tokenizers/pull/403
[#394]: https://github.com/huggingface/tokenizers/pull/394
[#389]: https://github.com/huggingface/tokenizers/pull/389
[#379]: https://github.com/huggingface/tokenizers/pull/379
[#378]: https://github.com/huggingface/tokenizers/pull/378
[#363]: https://github.com/huggingface/tokenizers/pull/363
[#362]: https://github.com/huggingface/tokenizers/pull/362
[#360]: https://github.com/huggingface/tokenizers/pull/360
[#355]: https://github.com/huggingface/tokenizers/pull/355
[#333]: https://github.com/huggingface/tokenizers/pull/333
[#330]: https://github.com/huggingface/tokenizers/pull/330
[#329]: https://github.com/huggingface/tokenizers/pull/329
[#311]: https://github.com/huggingface/tokenizers/pull/311
[#309]: https://github.com/huggingface/tokenizers/pull/309
[#292]: https://github.com/huggingface/tokenizers/pull/292
[#289]: https://github.com/huggingface/tokenizers/pull/289
[#286]: https://github.com/huggingface/tokenizers/pull/286
[#280]: https://github.com/huggingface/tokenizers/pull/280
[#276]: https://github.com/huggingface/tokenizers/pull/276
[#273]: https://github.com/huggingface/tokenizers/pull/273
[#272]: https://github.com/huggingface/tokenizers/pull/272
[#249]: https://github.com/huggingface/tokenizers/pull/249
[#239]: https://github.com/huggingface/tokenizers/pull/239
[#236]: https://github.com/huggingface/tokenizers/pull/236
[#234]: https://github.com/huggingface/tokenizers/pull/234
[#208]: https://github.com/huggingface/tokenizers/pull/208
[#205]: https://github.com/huggingface/tokenizers/issues/205
[#197]: https://github.com/huggingface/tokenizers/pull/197
[#193]: https://github.com/huggingface/tokenizers/pull/193
[#190]: https://github.com/huggingface/tokenizers/pull/190
[#188]: https://github.com/huggingface/tokenizers/pull/188
[#187]: https://github.com/huggingface/tokenizers/issues/187
[#175]: https://github.com/huggingface/tokenizers/issues/175
[#174]: https://github.com/huggingface/tokenizers/issues/174
[#165]: https://github.com/huggingface/tokenizers/pull/165
[#163]: https://github.com/huggingface/tokenizers/issues/163
[#160]: https://github.com/huggingface/tokenizers/issues/160
[#156]: https://github.com/huggingface/tokenizers/pull/156
[#152]: https://github.com/huggingface/tokenizers/issues/152
[#149]: https://github.com/huggingface/tokenizers/issues/149
[#145]: https://github.com/huggingface/tokenizers/issues/145
[#139]: https://github.com/huggingface/tokenizers/issues/139
[#137]: https://github.com/huggingface/tokenizers/issues/137
[#134]: https://github.com/huggingface/tokenizers/issues/134
[#131]: https://github.com/huggingface/tokenizers/issues/131
[#99]: https://github.com/huggingface/tokenizers/pull/99
| 0 |
hf_public_repos/tokenizers/bindings | hf_public_repos/tokenizers/bindings/python/Cargo.toml | [package]
name = "tokenizers-python"
version = "0.13.3"
authors = ["Anthony MOI <m.anthony.moi@gmail.com>"]
edition = "2021"
[lib]
name = "tokenizers"
crate-type = ["cdylib"]
[dependencies]
rayon = "1.3"
serde = { version = "1.0", features = [ "rc", "derive" ]}
serde_json = "1.0"
libc = "0.2"
env_logger = "0.7.1"
pyo3 = "0.18.1"
numpy = "0.18.0"
ndarray = "0.13"
onig = { version = "6.0", default-features = false }
itertools = "0.9"
[dependencies.tokenizers]
version = "*"
path = "../../tokenizers"
[dev-dependencies]
tempfile = "3.1"
pyo3 = { version = "0.18.1", features = ["auto-initialize"] }
[features]
default = ["pyo3/extension-module"]
| 0 |
hf_public_repos/tokenizers/bindings | hf_public_repos/tokenizers/bindings/python/MANIFEST.in | include Cargo.toml
include pyproject.toml
include rust-toolchain
include ../../LICENSE
recursive-include src *
recursive-include tokenizers-lib *
recursive-exclude tokenizers-lib/target *
| 0 |
hf_public_repos/tokenizers/bindings | hf_public_repos/tokenizers/bindings/python/Makefile | .PHONY: style check-style test
DATA_DIR = data
dir_guard=@mkdir -p $(@D)
check_dirs := examples py_src/tokenizers tests
# Format source code automatically
style:
python stub.py
black --line-length 119 --target-version py35 $(check_dirs)
# Check the source code is formatted correctly
check-style:
python stub.py --check
black --check --line-length 119 --target-version py35 examples py_src/tokenizers tests
TESTS_RESOURCES = $(DATA_DIR)/small.txt $(DATA_DIR)/roberta.json
# Launch the test suite
test: $(TESTS_RESOURCES)
pip install pytest requests setuptools_rust numpy pyarrow datasets
python -m pytest -s -v tests
cargo test --no-default-features
$(DATA_DIR)/big.txt :
$(dir_guard)
wget https://norvig.com/big.txt -O $@
$(DATA_DIR)/small.txt : $(DATA_DIR)/big.txt
head -100 $(DATA_DIR)/big.txt > $@
$(DATA_DIR)/roberta.json :
$(dir_guard)
wget https://huggingface.co/roberta-large/raw/main/tokenizer.json -O $@
| 0 |
hf_public_repos/tokenizers/bindings | hf_public_repos/tokenizers/bindings/python/README.md | <p align="center">
<br>
<img src="https://huggingface.co/landing/assets/tokenizers/tokenizers-logo.png" width="600"/>
<br>
<p>
<p align="center">
<a href="https://badge.fury.io/py/tokenizers">
<img alt="Build" src="https://badge.fury.io/py/tokenizers.svg">
</a>
<a href="https://github.com/huggingface/tokenizers/blob/master/LICENSE">
<img alt="GitHub" src="https://img.shields.io/github/license/huggingface/tokenizers.svg?color=blue">
</a>
</p>
<br>
# Tokenizers
Provides an implementation of today's most used tokenizers, with a focus on performance and
versatility.
Bindings over the [Rust](https://github.com/huggingface/tokenizers/tree/master/tokenizers) implementation.
If you are interested in the High-level design, you can go check it there.
Otherwise, let's dive in!
## Main features:
- Train new vocabularies and tokenize using 4 pre-made tokenizers (Bert WordPiece and the 3
most common BPE versions).
- Extremely fast (both training and tokenization), thanks to the Rust implementation. Takes
less than 20 seconds to tokenize a GB of text on a server's CPU.
- Easy to use, but also extremely versatile.
- Designed for research and production.
- Normalization comes with alignments tracking. It's always possible to get the part of the
original sentence that corresponds to a given token.
- Does all the pre-processing: Truncate, Pad, add the special tokens your model needs.
### Installation
#### With pip:
```bash
pip install tokenizers
```
#### From sources:
To use this method, you need to have the Rust installed:
```bash
# Install with:
curl https://sh.rustup.rs -sSf | sh -s -- -y
export PATH="$HOME/.cargo/bin:$PATH"
```
Once Rust is installed, you can compile doing the following
```bash
git clone https://github.com/huggingface/tokenizers
cd tokenizers/bindings/python
# Create a virtual env (you can use yours as well)
python -m venv .env
source .env/bin/activate
# Install `tokenizers` in the current virtual env
pip install setuptools_rust
python setup.py install
```
### Load a pretrained tokenizer from the Hub
```python
from tokenizers import Tokenizer
tokenizer = Tokenizer.from_pretrained("bert-base-cased")
```
### Using the provided Tokenizers
We provide some pre-build tokenizers to cover the most common cases. You can easily load one of
these using some `vocab.json` and `merges.txt` files:
```python
from tokenizers import CharBPETokenizer
# Initialize a tokenizer
vocab = "./path/to/vocab.json"
merges = "./path/to/merges.txt"
tokenizer = CharBPETokenizer(vocab, merges)
# And then encode:
encoded = tokenizer.encode("I can feel the magic, can you?")
print(encoded.ids)
print(encoded.tokens)
```
And you can train them just as simply:
```python
from tokenizers import CharBPETokenizer
# Initialize a tokenizer
tokenizer = CharBPETokenizer()
# Then train it!
tokenizer.train([ "./path/to/files/1.txt", "./path/to/files/2.txt" ])
# Now, let's use it:
encoded = tokenizer.encode("I can feel the magic, can you?")
# And finally save it somewhere
tokenizer.save("./path/to/directory/my-bpe.tokenizer.json")
```
#### Provided Tokenizers
- `CharBPETokenizer`: The original BPE
- `ByteLevelBPETokenizer`: The byte level version of the BPE
- `SentencePieceBPETokenizer`: A BPE implementation compatible with the one used by SentencePiece
- `BertWordPieceTokenizer`: The famous Bert tokenizer, using WordPiece
All of these can be used and trained as explained above!
### Build your own
Whenever these provided tokenizers don't give you enough freedom, you can build your own tokenizer,
by putting all the different parts you need together.
You can check how we implemented the [provided tokenizers](https://github.com/huggingface/tokenizers/tree/master/bindings/python/py_src/tokenizers/implementations) and adapt them easily to your own needs.
#### Building a byte-level BPE
Here is an example showing how to build your own byte-level BPE by putting all the different pieces
together, and then saving it to a single file:
```python
from tokenizers import Tokenizer, models, pre_tokenizers, decoders, trainers, processors
# Initialize a tokenizer
tokenizer = Tokenizer(models.BPE())
# Customize pre-tokenization and decoding
tokenizer.pre_tokenizer = pre_tokenizers.ByteLevel(add_prefix_space=True)
tokenizer.decoder = decoders.ByteLevel()
tokenizer.post_processor = processors.ByteLevel(trim_offsets=True)
# And then train
trainer = trainers.BpeTrainer(
vocab_size=20000,
min_frequency=2,
initial_alphabet=pre_tokenizers.ByteLevel.alphabet()
)
tokenizer.train([
"./path/to/dataset/1.txt",
"./path/to/dataset/2.txt",
"./path/to/dataset/3.txt"
], trainer=trainer)
# And Save it
tokenizer.save("byte-level-bpe.tokenizer.json", pretty=True)
```
Now, when you want to use this tokenizer, this is as simple as:
```python
from tokenizers import Tokenizer
tokenizer = Tokenizer.from_file("byte-level-bpe.tokenizer.json")
encoded = tokenizer.encode("I can feel the magic, can you?")
```
| 0 |
hf_public_repos/tokenizers/bindings | hf_public_repos/tokenizers/bindings/python/build-sdist.sh | #!/bin/bash
set -ex
# Create a symlink for tokenizers-lib
ln -sf ../../tokenizers tokenizers-lib
# Modify cargo.toml to include this symlink
sed -i 's/\.\.\/\.\.\/tokenizers/\.\/tokenizers-lib/' Cargo.toml
# Build the source distribution
python setup.py sdist
| 0 |
hf_public_repos/tokenizers/bindings | hf_public_repos/tokenizers/bindings/python/build-wheels.sh | #!/bin/bash
set -ex
if ! command -v cargo &> /dev/null
then
curl https://sh.rustup.rs -sSf | sh -s -- -y
fi
export PATH="$HOME/.cargo/bin:$PATH"
# https://users.rust-lang.org/t/cargo-uses-too-much-memory-being-run-in-qemu/76531
echo -e "[net]\ngit-fetch-with-cli = true" > "$HOME/.cargo/config"
for PYBIN in /opt/python/cp{37,38,39,310,311}*/bin; do
export PYTHON_SYS_EXECUTABLE="$PYBIN/python"
"${PYBIN}/pip" install -U setuptools-rust setuptools wheel
"${PYBIN}/python" setup.py bdist_wheel
rm -rf build/*
done
for whl in ./dist/*.whl; do
auditwheel repair "$whl" -w dist/
done
# Keep only manylinux wheels
rm ./dist/*-linux_*
# Upload wheels
/opt/python/cp37-cp37m/bin/pip install -U awscli
/opt/python/cp37-cp37m/bin/python -m awscli s3 sync --exact-timestamps ./dist "s3://tokenizers-releases/python/$DIST_DIR"
| 0 |
hf_public_repos/tokenizers/bindings | hf_public_repos/tokenizers/bindings/python/conftest.py | import pytest
def pytest_addoption(parser):
parser.addoption("--runslow", action="store_true", default=False, help="run slow tests")
def pytest_configure(config):
config.addinivalue_line("markers", "slow: mark test as slow to run")
def pytest_collection_modifyitems(config, items):
if config.getoption("--runslow"):
# --runslow given in cli: do not skip slow tests
return
skip_slow = pytest.mark.skip(reason="need --runslow option to run")
for item in items:
if "slow" in item.keywords:
item.add_marker(skip_slow)
| 0 |
hf_public_repos/tokenizers/bindings | hf_public_repos/tokenizers/bindings/python/pyproject.toml | [build-system]
requires = ["setuptools", "wheel", "setuptools-rust"]
build-backend = "setuptools.build_meta"
[tool.black]
target-version = ['py35']
line-length = 119
| 0 |
hf_public_repos/tokenizers/bindings | hf_public_repos/tokenizers/bindings/python/rust-toolchain | stable
| 0 |
hf_public_repos/tokenizers/bindings | hf_public_repos/tokenizers/bindings/python/setup.cfg | [isort]
default_section = FIRSTPARTY
ensure_newline_before_comments = True
force_grid_wrap = 0
include_trailing_comma = True
known_first_party = transformers
known_third_party =
absl
conllu
datasets
elasticsearch
fairseq
faiss-cpu
fastprogress
fire
fugashi
git
h5py
matplotlib
nltk
numpy
packaging
pandas
PIL
psutil
pytest
pytorch_lightning
rouge_score
sacrebleu
seqeval
sklearn
streamlit
tensorboardX
tensorflow
tensorflow_datasets
timeout_decorator
torch
torchaudio
torchtext
torchvision
torch_xla
tqdm
line_length = 119
lines_after_imports = 2
multi_line_output = 3
use_parentheses = True
[flake8]
ignore = E203, E501, E741, W503, W605
max-line-length = 119
[tool:pytest]
doctest_optionflags=NUMBER NORMALIZE_WHITESPACE ELLIPSIS
| 0 |
hf_public_repos/tokenizers/bindings | hf_public_repos/tokenizers/bindings/python/setup.py | from setuptools import setup
from setuptools_rust import Binding, RustExtension
extras = {}
extras["testing"] = ["pytest", "requests", "numpy", "datasets", "black==22.3"]
extras["docs"] = ["sphinx", "sphinx_rtd_theme", "setuptools_rust"]
extras["dev"] = extras["testing"]
setup(
name="tokenizers",
version="0.13.3",
description="Fast and Customizable Tokenizers",
long_description=open("README.md", "r", encoding="utf-8").read(),
long_description_content_type="text/markdown",
keywords="NLP tokenizer BPE transformer deep learning",
author="Anthony MOI",
author_email="anthony@huggingface.co",
url="https://github.com/huggingface/tokenizers",
license="Apache License 2.0",
rust_extensions=[RustExtension("tokenizers.tokenizers", binding=Binding.PyO3, debug=False)],
extras_require=extras,
classifiers=[
"Development Status :: 5 - Production/Stable",
"Intended Audience :: Developers",
"Intended Audience :: Education",
"Intended Audience :: Science/Research",
"License :: OSI Approved :: Apache Software License",
"Operating System :: OS Independent",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.5",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"Programming Language :: Python :: 3.9",
"Programming Language :: Python :: 3.10",
"Topic :: Scientific/Engineering :: Artificial Intelligence",
],
package_dir={"": "py_src"},
packages=[
"tokenizers",
"tokenizers.models",
"tokenizers.decoders",
"tokenizers.normalizers",
"tokenizers.pre_tokenizers",
"tokenizers.processors",
"tokenizers.trainers",
"tokenizers.implementations",
"tokenizers.tools",
],
package_data={
"tokenizers": ["py.typed", "__init__.pyi"],
"tokenizers.models": ["py.typed", "__init__.pyi"],
"tokenizers.decoders": ["py.typed", "__init__.pyi"],
"tokenizers.normalizers": ["py.typed", "__init__.pyi"],
"tokenizers.pre_tokenizers": ["py.typed", "__init__.pyi"],
"tokenizers.processors": ["py.typed", "__init__.pyi"],
"tokenizers.trainers": ["py.typed", "__init__.pyi"],
"tokenizers.implementations": ["py.typed"],
"tokenizers.tools": ["py.typed", "visualizer-styles.css"],
},
zip_safe=False,
)
| 0 |
hf_public_repos/tokenizers/bindings | hf_public_repos/tokenizers/bindings/python/stub.py | import argparse
import inspect
import os
from pathlib import Path
import black
INDENT = " " * 4
GENERATED_COMMENT = "# Generated content DO NOT EDIT\n"
def do_indent(text: str, indent: str):
return text.replace("\n", f"\n{indent}")
def function(obj, indent, text_signature=None):
if text_signature is None:
text_signature = obj.__text_signature__
string = ""
string += f"{indent}def {obj.__name__}{text_signature}:\n"
indent += INDENT
string += f'{indent}"""\n'
string += f"{indent}{do_indent(obj.__doc__, indent)}\n"
string += f'{indent}"""\n'
string += f"{indent}pass\n"
string += "\n"
string += "\n"
return string
def member_sort(member):
if inspect.isclass(member):
value = 10 + len(inspect.getmro(member))
else:
value = 1
return value
def fn_predicate(obj):
value = inspect.ismethoddescriptor(obj) or inspect.isbuiltin(obj)
if value:
return obj.__doc__ and obj.__text_signature__ and not obj.__name__.startswith("_")
if inspect.isgetsetdescriptor(obj):
return obj.__doc__ and not obj.__name__.startswith("_")
return False
def get_module_members(module):
members = [
member
for name, member in inspect.getmembers(module)
if not name.startswith("_") and not inspect.ismodule(member)
]
members.sort(key=member_sort)
return members
def pyi_file(obj, indent=""):
string = ""
if inspect.ismodule(obj):
string += GENERATED_COMMENT
members = get_module_members(obj)
for member in members:
string += pyi_file(member, indent)
elif inspect.isclass(obj):
indent += INDENT
mro = inspect.getmro(obj)
if len(mro) > 2:
inherit = f"({mro[1].__name__})"
else:
inherit = ""
string += f"class {obj.__name__}{inherit}:\n"
body = ""
if obj.__doc__:
body += f'{indent}"""\n{indent}{do_indent(obj.__doc__, indent)}\n{indent}"""\n'
fns = inspect.getmembers(obj, fn_predicate)
# Init
if obj.__text_signature__:
body += f"{indent}def __init__{obj.__text_signature__}:\n"
body += f"{indent+INDENT}pass\n"
body += "\n"
for (name, fn) in fns:
body += pyi_file(fn, indent=indent)
if not body:
body += f"{indent}pass\n"
string += body
string += "\n\n"
elif inspect.isbuiltin(obj):
string += f"{indent}@staticmethod\n"
string += function(obj, indent)
elif inspect.ismethoddescriptor(obj):
string += function(obj, indent)
elif inspect.isgetsetdescriptor(obj):
# TODO it would be interesing to add the setter maybe ?
string += f"{indent}@property\n"
string += function(obj, indent, text_signature="(self)")
else:
raise Exception(f"Object {obj} is not supported")
return string
def py_file(module, origin):
members = get_module_members(module)
string = GENERATED_COMMENT
string += f"from .. import {origin}\n"
string += "\n"
for member in members:
name = member.__name__
string += f"{name} = {origin}.{name}\n"
return string
def do_black(content, is_pyi):
mode = black.Mode(
target_versions={black.TargetVersion.PY35},
line_length=119,
is_pyi=is_pyi,
string_normalization=True,
experimental_string_processing=False,
)
try:
return black.format_file_contents(content, fast=True, mode=mode)
except black.NothingChanged:
return content
def write(module, directory, origin, check=False):
submodules = [(name, member) for name, member in inspect.getmembers(module) if inspect.ismodule(member)]
filename = os.path.join(directory, "__init__.pyi")
pyi_content = pyi_file(module)
pyi_content = do_black(pyi_content, is_pyi=True)
os.makedirs(directory, exist_ok=True)
if check:
with open(filename, "r") as f:
data = f.read()
assert data == pyi_content, f"The content of {filename} seems outdated, please run `python stub.py`"
else:
with open(filename, "w") as f:
f.write(pyi_content)
filename = os.path.join(directory, "__init__.py")
py_content = py_file(module, origin)
py_content = do_black(py_content, is_pyi=False)
os.makedirs(directory, exist_ok=True)
is_auto = False
if not os.path.exists(filename):
is_auto = True
else:
with open(filename, "r") as f:
line = f.readline()
if line == GENERATED_COMMENT:
is_auto = True
if is_auto:
if check:
with open(filename, "r") as f:
data = f.read()
assert data == py_content, f"The content of {filename} seems outdated, please run `python stub.py`"
else:
with open(filename, "w") as f:
f.write(py_content)
for name, submodule in submodules:
write(submodule, os.path.join(directory, name), f"{name}", check=check)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--check", action="store_true")
args = parser.parse_args()
import tokenizers
write(tokenizers.tokenizers, "py_src/tokenizers/", "tokenizers", check=args.check)
| 0 |
hf_public_repos/tokenizers/bindings | hf_public_repos/tokenizers/bindings/python/test.txt | <DOCUMENT> \test{bla} thisisatest </DOCUMENT>
<DOCUMENT> \test{bla} thisisatest </DOCUMENT>
<DOCUMENT> \test{bla} thisisatest </DOCUMENT>
<DOCUMENT> \test{bla} thisisatest </DOCUMENT>
<DOCUMENT> \test{bla} thisisatest </DOCUMENT>
<DOCUMENT> \test{bla} thisisatest </DOCUMENT>
<DOCUMENT> \test{bla} thisisatest </DOCUMENT>
<DOCUMENT> \test{bla} thisisatest </DOCUMENT>
<DOCUMENT> \test{bla} thisisatest </DOCUMENT>
<DOCUMENT> \test{bla} thisisatest </DOCUMENT>
<DOCUMENT> \test{bla} thisisatest </DOCUMENT>
<DOCUMENT> \test{bla} thisisatest </DOCUMENT>
<DOCUMENT> \test{bla} thisisatest </DOCUMENT>
<DOCUMENT> \test{bla} thisisatest </DOCUMENT>
<DOCUMENT> \test{bla} thisisatest </DOCUMENT>
<DOCUMENT> \test{bla} thisisatest </DOCUMENT>
<DOCUMENT> \test{bla} thisisatest </DOCUMENT>
<DOCUMENT> \test{bla} thisisatest </DOCUMENT>
<DOCUMENT> \test{bla} thisisatest </DOCUMENT>
<DOCUMENT> \test{bla} thisisatest </DOCUMENT>
<DOCUMENT> \test{bla} thisisatest </DOCUMENT>
<DOCUMENT> \test{bla} thisisatest </DOCUMENT>
<DOCUMENT> \test{bla} thisisatest </DOCUMENT>
<DOCUMENT> \test{bla} thisisatest </DOCUMENT>
<DOCUMENT> \test{bla} thisisatest </DOCUMENT>
<DOCUMENT> \test{bla} thisisatest </DOCUMENT>
<DOCUMENT> \test{bla} thisisatest </DOCUMENT>
<DOCUMENT> \test{bla} thisisatest </DOCUMENT>
<DOCUMENT> \test{bla} thisisatest </DOCUMENT>
<DOCUMENT> \test{bla} thisisatest </DOCUMENT>
<DOCUMENT> \test{bla} thisisatest </DOCUMENT>
<DOCUMENT> \test{bla} thisisatest </DOCUMENT>
<DOCUMENT> \test{bla} thisisatest </DOCUMENT>
<DOCUMENT> \test{bla} thisisatest </DOCUMENT>
<DOCUMENT> \test{bla} thisisatest </DOCUMENT>
<DOCUMENT> \test{bla} thisisatest </DOCUMENT>
| 0 |
hf_public_repos/tokenizers/bindings | hf_public_repos/tokenizers/bindings/python/tokenizers_manual_m1_build.sh | #! /bin/bash
for VARIABLE in "3.7.12" "3.8.12" "3.9.10" "3.10.2"
do
MACOSX_DEPLOYMENT_TARGET=10.11 SDKROOT="/Library/Developer/CommandLineTools/SDKs/MacOSX.sdk" CFLAGS="-I/usr/include/openssl -I/usr/local/opt/readline/include -I/Library/Developer/CommandLineTools/SDKs/MacOSX.sdk/usr/include" CPPFLAGS="-I/usr/local/opt/zlib/include" LDFLAGS="-L/usr/lib -L/usr/local/opt/readline/lib" pyenv install $VARIABLE
~/.pyenv/versions/$VARIABLE/bin/pip install setuptools wheel setuptools-rust==0.11.3 --ignore-installed --force-reinstall
MACOSX_DEPLOYMENT_TARGET=10.11 ~/.pyenv/versions/$VARIABLE/bin/python setup.py bdist_wheel
done
| 0 |
hf_public_repos/tokenizers/bindings/python | hf_public_repos/tokenizers/bindings/python/.cargo/config.toml | [target.x86_64-apple-darwin]
rustflags = [
"-C", "link-arg=-undefined",
"-C", "link-arg=dynamic_lookup",
"-C", "link-arg=-mmacosx-version-min=10.11",
]
[target.aarch64-apple-darwin]
rustflags = [
"-C", "link-arg=-undefined",
"-C", "link-arg=dynamic_lookup",
"-C", "link-arg=-mmacosx-version-min=10.11",
]
| 0 |
hf_public_repos/tokenizers/bindings/python | hf_public_repos/tokenizers/bindings/python/examples/custom_components.py | from typing import List
import jieba
from tokenizers import NormalizedString, PreTokenizedString, Regex, Tokenizer
from tokenizers.decoders import Decoder
from tokenizers.models import BPE
from tokenizers.normalizers import Normalizer
from tokenizers.pre_tokenizers import PreTokenizer
class JiebaPreTokenizer:
def jieba_split(self, i: int, normalized_string: NormalizedString) -> List[NormalizedString]:
splits = []
# we need to call `str(normalized_string)` because jieba expects a str,
# not a NormalizedString
for token, start, stop in jieba.tokenize(str(normalized_string)):
splits.append(normalized_string[start:stop])
return splits
# We can also easily do it in one line:
# return [normalized_string[w[1] : w[2]] for w in jieba.tokenize(str(normalized_string))]
def odd_number_split(self, i: int, normalized_string: NormalizedString) -> List[NormalizedString]:
# Just an odd example...
splits = []
last = 0
for i, char in enumerate(str(normalized_string)):
if char.isnumeric() and int(char) % 2 == 1:
splits.append(normalized_string[last:i])
last = i
# Don't forget the last one
splits.append(normalized_string[last:])
return splits
def pre_tokenize(self, pretok: PreTokenizedString):
# Let's call split on the PreTokenizedString to split using `self.jieba_split`
pretok.split(self.jieba_split)
# Here we can call `pretok.split` multiple times if we want to apply
# different algorithm, but we generally just need to call it once.
pretok.split(self.odd_number_split)
class CustomDecoder:
def decode(self, tokens: List[str]) -> str:
return "".join(tokens)
class CustomNormalizer:
def normalize(self, normalized: NormalizedString):
# Most of these can be replaced by a `Sequence` combining some provided Normalizer,
# (ie Sequence([ NFKC(), Replace(Regex("\s+"), " "), Lowercase() ])
# and it should be the prefered way. That being said, here is an example of the kind
# of things that can be done here:
normalized.nfkc()
normalized.filter(lambda char: not char.isnumeric())
normalized.replace(Regex("\s+"), " ")
normalized.lowercase()
# This section shows how to attach these custom components to the Tokenizer
tok = Tokenizer(BPE())
tok.normalizer = Normalizer.custom(CustomNormalizer())
tok.pre_tokenizer = PreTokenizer.custom(JiebaPreTokenizer())
tok.decoder = Decoder.custom(CustomDecoder())
input = "永和服装饰品有限公司"
print("PreTokenize:", input)
print(tok.pre_tokenizer.pre_tokenize_str(input))
# [('永和', (0, 2)), ('服装', (2, 4)), ('饰品', (4, 6)), ('有限公司', (6, 10))]
input = "112233"
print("PreTokenize:", input)
print(tok.pre_tokenizer.pre_tokenize_str(input))
# [('1', (0, 1)), ('122', (1, 4)), ('3', (4, 5)), ('3', (5, 6))]
input = "1234 ℌ𝔢𝔩𝔩𝔬 𝔱𝔥𝔢𝔯𝔢 𝓂𝓎 𝒹ℯ𝒶𝓇 𝕕𝕖𝕒𝕣 𝕗𝕣𝕚𝕖𝕟𝕕!"
print("Normalize:", input)
print(tok.normalizer.normalize_str(input))
# " hello there my dear dear friend!"
| 0 |
hf_public_repos/tokenizers/bindings/python | hf_public_repos/tokenizers/bindings/python/examples/example.py | import argparse
import logging
import time
from tqdm import tqdm
logging.getLogger("transformers").disabled = True
logging.getLogger("transformers.tokenization_utils").disabled = True
from tokenizers import Tokenizer, decoders, pre_tokenizers
from tokenizers.models import BPE, WordPiece
from tokenizers.normalizers import BertNormalizer
from tokenizers.processors import BertProcessing
from transformers import BertTokenizer, GPT2Tokenizer
parser = argparse.ArgumentParser()
parser.add_argument("--type", default="gpt2", type=str, help="The type of tokenizer (bert|gpt2)")
parser.add_argument("--file", default=None, type=str, help="The file to encode")
parser.add_argument("--vocab", default=None, type=str, required=True, help="The vocab file")
parser.add_argument("--merges", default=None, type=str, help="The merges.txt file")
parser.add_argument("--debug", action="store_true", help="Verbose output")
args = parser.parse_args()
if args.type == "gpt2" and args.merges is None:
raise Exception("Expected merges.txt file")
if args.file is not None:
with open(args.file, "r") as fp:
text = [line.strip() for line in fp]
else:
text = """
The Zen of Python, by Tim Peters
Beautiful is better than ugly.
Explicit is better than implicit.
Simple is better than complex.
Complex is better than complicated.
Flat is better than nested.
Sparse is better than dense.
Readability counts.
Special cases aren't special enough to break the rules.
Although practicality beats purity.
Errors should never pass silently.
Unless explicitly silenced.
In the face of ambiguity, refuse the temptation to guess.
There should be one-- and preferably only one --obvious way to do it.
Although that way may not be obvious at first unless you're Dutch.
Now is better than never.
Although never is often better than *right* now.
If the implementation is hard to explain, it's a bad idea.
If the implementation is easy to explain, it may be a good idea.
Namespaces are one honking great idea -- let's do more of those!
""".split(
"\n"
)
if args.type == "gpt2":
print("Running GPT-2 tokenizer")
tok_p = GPT2Tokenizer.from_pretrained("gpt2")
# Create a Tokenizer using BPE
tok_r = Tokenizer(BPE(args.vocab, args.merges))
# Use ByteLevel PreTokenizer
tok_r.pre_tokenizer = pre_tokenizers.ByteLevel(add_prefix_space=False)
# Use ByteLevel Decoder
tok_r.decoder = decoders.ByteLevel()
elif args.type == "bert":
print("Running Bert tokenizer")
tok_p = BertTokenizer.from_pretrained(args.vocab)
tok_r = Tokenizer(WordPiece(args.vocab, unk_token="[UNK]", max_input_chars_per_word=100))
tok_r.normalizer = BertNormalizer(
clean_text=True,
handle_chinese_chars=True,
strip_accents=True,
lowercase=True,
)
# tok_r.pre_tokenizer = pre_tokenizers.Whitespace()
tok_r.pre_tokenizer = pre_tokenizers.BertPreTokenizer()
tok_r.decoder = decoders.WordPiece()
tok_r.post_processor = BertProcessing(
("[SEP]", tok_r.token_to_id("[SEP]")),
("[CLS]", tok_r.token_to_id("[CLS]")),
)
else:
raise Exception(f"Unknown type {args.type}")
def tokenize_r():
return tok_r.encode_batch(text)
def tokenize_p():
return [tok_p.encode(sentence, add_special_tokens=True) for sentence in tqdm(text)]
print(f"Tokenizing {len(text)} lines")
# Rust version
start = time.time()
encoded_r = tokenize_r()
end = time.time()
time_r = end - start
print(f"Rust tokenizer took: {time_r} sec")
# Python version
start = time.time()
encoded_p = tokenize_p()
end = time.time()
time_p = end - start
print(f"Transformer tokenizer took: {time_p} sec")
print(f"SpeedUp Ratio: {time_p / time_r}")
ids_r = [sentence.ids for sentence in encoded_r]
diff_ids = 0
for i in range(0, len(encoded_r)):
if encoded_r[i].ids != encoded_p[i]:
diff_ids += 1
if args.debug:
print(encoded_r[i].ids)
print(encoded_p[i])
print(encoded_r[i].tokens)
print(tok_p.tokenize(text[i]))
print(text[i])
print("")
print(f"Ids differences: {diff_ids}")
decoded_r = tok_r.decode_batch([sentence.ids for sentence in encoded_r], False)
decoded_p = [tok_p.decode(en) for en in encoded_p]
diff_decoded = 0
for i in range(0, len(text)):
if decoded_r[i] != decoded_p[i]:
diff_decoded += 1
if args.debug:
print(f"Original: {text[i]}")
print(f"Rust: {decoded_r[i]}")
print(f"Python: {decoded_p[i]}")
print("")
print(f"Decoding differences: {diff_decoded}")
| 0 |
hf_public_repos/tokenizers/bindings/python | hf_public_repos/tokenizers/bindings/python/examples/train_bert_wordpiece.py | import argparse
import glob
from tokenizers import BertWordPieceTokenizer
parser = argparse.ArgumentParser()
parser.add_argument(
"--files",
default=None,
metavar="path",
type=str,
required=True,
help="The files to use as training; accept '**/*.txt' type of patterns \
if enclosed in quotes",
)
parser.add_argument(
"--out",
default="./",
type=str,
help="Path to the output directory, where the files will be saved",
)
parser.add_argument("--name", default="bert-wordpiece", type=str, help="The name of the output vocab files")
args = parser.parse_args()
files = glob.glob(args.files)
if not files:
print(f"File does not exist: {args.files}")
exit(1)
# Initialize an empty tokenizer
tokenizer = BertWordPieceTokenizer(
clean_text=True,
handle_chinese_chars=True,
strip_accents=True,
lowercase=True,
)
# And then train
tokenizer.train(
files,
vocab_size=10000,
min_frequency=2,
show_progress=True,
special_tokens=["[PAD]", "[UNK]", "[CLS]", "[SEP]", "[MASK]"],
limit_alphabet=1000,
wordpieces_prefix="##",
)
# Save the files
tokenizer.save_model(args.out, args.name)
| 0 |
hf_public_repos/tokenizers/bindings/python | hf_public_repos/tokenizers/bindings/python/examples/train_bytelevel_bpe.py | import argparse
import glob
from os.path import join
from tokenizers import ByteLevelBPETokenizer
parser = argparse.ArgumentParser()
parser.add_argument(
"--files",
default=None,
metavar="path",
type=str,
required=True,
help="The files to use as training; accept '**/*.txt' type of patterns \
if enclosed in quotes",
)
parser.add_argument(
"--out",
default="./",
type=str,
help="Path to the output directory, where the files will be saved",
)
parser.add_argument("--name", default="bpe-bytelevel", type=str, help="The name of the output vocab files")
args = parser.parse_args()
files = glob.glob(args.files)
if not files:
print(f"File does not exist: {args.files}")
exit(1)
# Initialize an empty tokenizer
tokenizer = ByteLevelBPETokenizer(add_prefix_space=True)
# And then train
tokenizer.train(
files,
vocab_size=10000,
min_frequency=2,
show_progress=True,
special_tokens=["<s>", "<pad>", "</s>"],
)
# Save the files
tokenizer.save_model(args.out, args.name)
# Restoring model from learned vocab/merges
tokenizer = ByteLevelBPETokenizer(
join(args.out, "{}-vocab.json".format(args.name)),
join(args.out, "{}-merges.txt".format(args.name)),
add_prefix_space=True,
)
# Test encoding
print(tokenizer.encode("Training ByteLevel BPE is very easy").tokens)
| 0 |
hf_public_repos/tokenizers/bindings/python | hf_public_repos/tokenizers/bindings/python/examples/train_with_datasets.py | import datasets
from tokenizers import Tokenizer, models, normalizers, pre_tokenizers, trainers
# Build a tokenizer
bpe_tokenizer = Tokenizer(models.BPE())
bpe_tokenizer.pre_tokenizer = pre_tokenizers.Whitespace()
bpe_tokenizer.normalizer = normalizers.Lowercase()
# Initialize a dataset
dataset = datasets.load_dataset("wikitext", "wikitext-103-raw-v1", split="train")
# Build an iterator over this dataset
def batch_iterator():
batch_size = 1000
for batch in dataset.iter(batch_size=batch_size):
yield batch["text"]
# And finally train
bpe_tokenizer.train_from_iterator(batch_iterator(), length=len(dataset))
| 0 |
hf_public_repos/tokenizers/bindings/python | hf_public_repos/tokenizers/bindings/python/examples/using_the_visualizer.ipynb | from tokenizers import BertWordPieceTokenizer
from tokenizers.tools import EncodingVisualizer
EncodingVisualizer.unk_token_regex.search("aaa[udsnk]aaa")text = """Mathias Bynens 'Z͑ͫ̓ͪ̂ͫ̽͏̴̙̤̞͉͚̯̞̠͍A̴̵̜̰͔ͫ͗͢L̠ͨͧͩ͘G̴̻͈͍͔̹̑͗̎̅͛́Ǫ̵̹̻̝̳͂̌̌͘!͖̬̰̙̗̿̋ͥͥ̂ͣ̐́́͜͞': Whenever you’re working on a piece of JavaScript code that deals with strings or regular expressions in some way, just add a unit test that contains a pile of poo (💩) in a string, 💩💩💩💩💩💩💩💩💩💩💩💩 and see if anything breaks. It’s a quick, fun, and easy way to see if your code supports astral symbols. Once you’ve found a Unicode-related bug in your code, all you need to do is apply the techniques discussed in this post to fix it."""tokenizer = BertWordPieceTokenizer("/tmp/bert-base-uncased-vocab.txt", lowercase=True)
visualizer = EncodingVisualizer(tokenizer=tokenizer)visualizer(text)from tokenizers.tools import Annotationanno1 = Annotation(start=0, end=2, label="foo")
anno2 = Annotation(start=2, end=4, label="bar")
anno3 = Annotation(start=6, end=8, label="poo")
anno4 = Annotation(start=9, end=12, label="shoe")
annotations=[
anno1,
anno2,
anno3,
anno4,
Annotation(start=23, end=30, label="random tandem bandem sandem landem fandom"),
Annotation(start=63, end=70, label="foo"),
Annotation(start=80, end=95, label="bar"),
Annotation(start=120, end=128, label="bar"),
Annotation(start=152, end=155, label="poo"),
]
visualizer(text,annotations=annotations)funnyAnnotations = [dict(startPlace=i,endPlace=i+3,theTag=str(i)) for i in range(0,20,4)]
funnyAnnotationsconverter = lambda funny: Annotation(start=funny['startPlace'], end=funny['endPlace'], label=funny['theTag'])
visualizer = EncodingVisualizer(tokenizer=tokenizer, default_to_notebook=True, annotation_converter=converter)visualizer(text, annotations=funnyAnnotations)from tokenizers import ByteLevelBPETokenizer
roberta_tokenizer = ByteLevelBPETokenizer.from_file('/tmp/roberta-base-vocab.json', '/tmp/roberta-base-merges.txt')
roberta_visualizer = EncodingVisualizer(tokenizer=roberta_tokenizer, default_to_notebook=True)
roberta_visualizer(text, annotations=annotations) | 0 |
hf_public_repos/tokenizers/bindings/python/py_src | hf_public_repos/tokenizers/bindings/python/py_src/tokenizers/__init__.py | __version__ = "0.13.3"
from enum import Enum
from typing import List, Tuple, Union
Offsets = Tuple[int, int]
TextInputSequence = str
"""A :obj:`str` that represents an input sequence """
PreTokenizedInputSequence = Union[List[str], Tuple[str]]
"""A pre-tokenized input sequence. Can be one of:
- A :obj:`List` of :obj:`str`
- A :obj:`Tuple` of :obj:`str`
"""
TextEncodeInput = Union[
TextInputSequence,
Tuple[TextInputSequence, TextInputSequence],
List[TextInputSequence],
]
"""Represents a textual input for encoding. Can be either:
- A single sequence: :data:`~tokenizers.TextInputSequence`
- A pair of sequences:
- A :obj:`Tuple` of :data:`~tokenizers.TextInputSequence`
- Or a :obj:`List` of :data:`~tokenizers.TextInputSequence` of size 2
"""
PreTokenizedEncodeInput = Union[
PreTokenizedInputSequence,
Tuple[PreTokenizedInputSequence, PreTokenizedInputSequence],
List[PreTokenizedInputSequence],
]
"""Represents a pre-tokenized input for encoding. Can be either:
- A single sequence: :data:`~tokenizers.PreTokenizedInputSequence`
- A pair of sequences:
- A :obj:`Tuple` of :data:`~tokenizers.PreTokenizedInputSequence`
- Or a :obj:`List` of :data:`~tokenizers.PreTokenizedInputSequence` of size 2
"""
InputSequence = Union[TextInputSequence, PreTokenizedInputSequence]
"""Represents all the possible types of input sequences for encoding. Can be:
- When ``is_pretokenized=False``: :data:`~TextInputSequence`
- When ``is_pretokenized=True``: :data:`~PreTokenizedInputSequence`
"""
EncodeInput = Union[TextEncodeInput, PreTokenizedEncodeInput]
"""Represents all the possible types of input for encoding. Can be:
- When ``is_pretokenized=False``: :data:`~TextEncodeInput`
- When ``is_pretokenized=True``: :data:`~PreTokenizedEncodeInput`
"""
class OffsetReferential(Enum):
ORIGINAL = "original"
NORMALIZED = "normalized"
class OffsetType(Enum):
BYTE = "byte"
CHAR = "char"
class SplitDelimiterBehavior(Enum):
REMOVED = "removed"
ISOLATED = "isolated"
MERGED_WITH_PREVIOUS = "merged_with_previous"
MERGED_WITH_NEXT = "merged_with_next"
CONTIGUOUS = "contiguous"
from .tokenizers import (
AddedToken,
Encoding,
NormalizedString,
PreTokenizedString,
Regex,
Token,
Tokenizer,
decoders,
models,
normalizers,
pre_tokenizers,
processors,
trainers,
)
from .implementations import (
BertWordPieceTokenizer,
ByteLevelBPETokenizer,
CharBPETokenizer,
SentencePieceBPETokenizer,
SentencePieceUnigramTokenizer,
)
| 0 |
hf_public_repos/tokenizers/bindings/python/py_src | hf_public_repos/tokenizers/bindings/python/py_src/tokenizers/__init__.pyi | # Generated content DO NOT EDIT
class AddedToken:
"""
Represents a token that can be be added to a :class:`~tokenizers.Tokenizer`.
It can have special options that defines the way it should behave.
Args:
content (:obj:`str`): The content of the token
single_word (:obj:`bool`, defaults to :obj:`False`):
Defines whether this token should only match single words. If :obj:`True`, this
token will never match inside of a word. For example the token ``ing`` would match
on ``tokenizing`` if this option is :obj:`False`, but not if it is :obj:`True`.
The notion of "`inside of a word`" is defined by the word boundaries pattern in
regular expressions (ie. the token should start and end with word boundaries).
lstrip (:obj:`bool`, defaults to :obj:`False`):
Defines whether this token should strip all potential whitespaces on its left side.
If :obj:`True`, this token will greedily match any whitespace on its left. For
example if we try to match the token ``[MASK]`` with ``lstrip=True``, in the text
``"I saw a [MASK]"``, we would match on ``" [MASK]"``. (Note the space on the left).
rstrip (:obj:`bool`, defaults to :obj:`False`):
Defines whether this token should strip all potential whitespaces on its right
side. If :obj:`True`, this token will greedily match any whitespace on its right.
It works just like :obj:`lstrip` but on the right.
normalized (:obj:`bool`, defaults to :obj:`True` with :meth:`~tokenizers.Tokenizer.add_tokens` and :obj:`False` with :meth:`~tokenizers.Tokenizer.add_special_tokens`):
Defines whether this token should match against the normalized version of the input
text. For example, with the added token ``"yesterday"``, and a normalizer in charge of
lowercasing the text, the token could be extract from the input ``"I saw a lion
Yesterday"``.
"""
def __init__(self, content, single_word=False, lstrip=False, rstrip=False, normalized=True):
pass
@property
def content(self):
"""
Get the content of this :obj:`AddedToken`
"""
pass
@property
def lstrip(self):
"""
Get the value of the :obj:`lstrip` option
"""
pass
@property
def normalized(self):
"""
Get the value of the :obj:`normalized` option
"""
pass
@property
def rstrip(self):
"""
Get the value of the :obj:`rstrip` option
"""
pass
@property
def single_word(self):
"""
Get the value of the :obj:`single_word` option
"""
pass
class Encoding:
"""
The :class:`~tokenizers.Encoding` represents the output of a :class:`~tokenizers.Tokenizer`.
"""
@property
def attention_mask(self):
"""
The attention mask
This indicates to the LM which tokens should be attended to, and which should not.
This is especially important when batching sequences, where we need to applying
padding.
Returns:
:obj:`List[int]`: The attention mask
"""
pass
def char_to_token(self, char_pos, sequence_index=0):
"""
Get the token that contains the char at the given position in the input sequence.
Args:
char_pos (:obj:`int`):
The position of a char in the input string
sequence_index (:obj:`int`, defaults to :obj:`0`):
The index of the sequence that contains the target char
Returns:
:obj:`int`: The index of the token that contains this char in the encoded sequence
"""
pass
def char_to_word(self, char_pos, sequence_index=0):
"""
Get the word that contains the char at the given position in the input sequence.
Args:
char_pos (:obj:`int`):
The position of a char in the input string
sequence_index (:obj:`int`, defaults to :obj:`0`):
The index of the sequence that contains the target char
Returns:
:obj:`int`: The index of the word that contains this char in the input sequence
"""
pass
@property
def ids(self):
"""
The generated IDs
The IDs are the main input to a Language Model. They are the token indices,
the numerical representations that a LM understands.
Returns:
:obj:`List[int]`: The list of IDs
"""
pass
@staticmethod
def merge(encodings, growing_offsets=True):
"""
Merge the list of encodings into one final :class:`~tokenizers.Encoding`
Args:
encodings (A :obj:`List` of :class:`~tokenizers.Encoding`):
The list of encodings that should be merged in one
growing_offsets (:obj:`bool`, defaults to :obj:`True`):
Whether the offsets should accumulate while merging
Returns:
:class:`~tokenizers.Encoding`: The resulting Encoding
"""
pass
@property
def n_sequences(self):
"""
The number of sequences represented
Returns:
:obj:`int`: The number of sequences in this :class:`~tokenizers.Encoding`
"""
pass
@property
def offsets(self):
"""
The offsets associated to each token
These offsets let's you slice the input string, and thus retrieve the original
part that led to producing the corresponding token.
Returns:
A :obj:`List` of :obj:`Tuple[int, int]`: The list of offsets
"""
pass
@property
def overflowing(self):
"""
A :obj:`List` of overflowing :class:`~tokenizers.Encoding`
When using truncation, the :class:`~tokenizers.Tokenizer` takes care of splitting
the output into as many pieces as required to match the specified maximum length.
This field lets you retrieve all the subsequent pieces.
When you use pairs of sequences, the overflowing pieces will contain enough
variations to cover all the possible combinations, while respecting the provided
maximum length.
"""
pass
def pad(self, length, direction="right", pad_id=0, pad_type_id=0, pad_token="[PAD]"):
"""
Pad the :class:`~tokenizers.Encoding` at the given length
Args:
length (:obj:`int`):
The desired length
direction: (:obj:`str`, defaults to :obj:`right`):
The expected padding direction. Can be either :obj:`right` or :obj:`left`
pad_id (:obj:`int`, defaults to :obj:`0`):
The ID corresponding to the padding token
pad_type_id (:obj:`int`, defaults to :obj:`0`):
The type ID corresponding to the padding token
pad_token (:obj:`str`, defaults to `[PAD]`):
The pad token to use
"""
pass
@property
def sequence_ids(self):
"""
The generated sequence indices.
They represent the index of the input sequence associated to each token.
The sequence id can be None if the token is not related to any input sequence,
like for example with special tokens.
Returns:
A :obj:`List` of :obj:`Optional[int]`: A list of optional sequence index.
"""
pass
def set_sequence_id(self, sequence_id):
"""
Set the given sequence index
Set the given sequence index for the whole range of tokens contained in this
:class:`~tokenizers.Encoding`.
"""
pass
@property
def special_tokens_mask(self):
"""
The special token mask
This indicates which tokens are special tokens, and which are not.
Returns:
:obj:`List[int]`: The special tokens mask
"""
pass
def token_to_chars(self, token_index):
"""
Get the offsets of the token at the given index.
The returned offsets are related to the input sequence that contains the
token. In order to determine in which input sequence it belongs, you
must call :meth:`~tokenizers.Encoding.token_to_sequence()`.
Args:
token_index (:obj:`int`):
The index of a token in the encoded sequence.
Returns:
:obj:`Tuple[int, int]`: The token offsets :obj:`(first, last + 1)`
"""
pass
def token_to_sequence(self, token_index):
"""
Get the index of the sequence represented by the given token.
In the general use case, this method returns :obj:`0` for a single sequence or
the first sequence of a pair, and :obj:`1` for the second sequence of a pair
Args:
token_index (:obj:`int`):
The index of a token in the encoded sequence.
Returns:
:obj:`int`: The sequence id of the given token
"""
pass
def token_to_word(self, token_index):
"""
Get the index of the word that contains the token in one of the input sequences.
The returned word index is related to the input sequence that contains
the token. In order to determine in which input sequence it belongs, you
must call :meth:`~tokenizers.Encoding.token_to_sequence()`.
Args:
token_index (:obj:`int`):
The index of a token in the encoded sequence.
Returns:
:obj:`int`: The index of the word in the relevant input sequence.
"""
pass
@property
def tokens(self):
"""
The generated tokens
They are the string representation of the IDs.
Returns:
:obj:`List[str]`: The list of tokens
"""
pass
def truncate(self, max_length, stride=0, direction="right"):
"""
Truncate the :class:`~tokenizers.Encoding` at the given length
If this :class:`~tokenizers.Encoding` represents multiple sequences, when truncating
this information is lost. It will be considered as representing a single sequence.
Args:
max_length (:obj:`int`):
The desired length
stride (:obj:`int`, defaults to :obj:`0`):
The length of previous content to be included in each overflowing piece
direction (:obj:`str`, defaults to :obj:`right`):
Truncate direction
"""
pass
@property
def type_ids(self):
"""
The generated type IDs
Generally used for tasks like sequence classification or question answering,
these tokens let the LM know which input sequence corresponds to each tokens.
Returns:
:obj:`List[int]`: The list of type ids
"""
pass
@property
def word_ids(self):
"""
The generated word indices.
They represent the index of the word associated to each token.
When the input is pre-tokenized, they correspond to the ID of the given input label,
otherwise they correspond to the words indices as defined by the
:class:`~tokenizers.pre_tokenizers.PreTokenizer` that was used.
For special tokens and such (any token that was generated from something that was
not part of the input), the output is :obj:`None`
Returns:
A :obj:`List` of :obj:`Optional[int]`: A list of optional word index.
"""
pass
def word_to_chars(self, word_index, sequence_index=0):
"""
Get the offsets of the word at the given index in one of the input sequences.
Args:
word_index (:obj:`int`):
The index of a word in one of the input sequences.
sequence_index (:obj:`int`, defaults to :obj:`0`):
The index of the sequence that contains the target word
Returns:
:obj:`Tuple[int, int]`: The range of characters (span) :obj:`(first, last + 1)`
"""
pass
def word_to_tokens(self, word_index, sequence_index=0):
"""
Get the encoded tokens corresponding to the word at the given index
in one of the input sequences.
Args:
word_index (:obj:`int`):
The index of a word in one of the input sequences.
sequence_index (:obj:`int`, defaults to :obj:`0`):
The index of the sequence that contains the target word
Returns:
:obj:`Tuple[int, int]`: The range of tokens: :obj:`(first, last + 1)`
"""
pass
@property
def words(self):
"""
The generated word indices.
.. warning::
This is deprecated and will be removed in a future version.
Please use :obj:`~tokenizers.Encoding.word_ids` instead.
They represent the index of the word associated to each token.
When the input is pre-tokenized, they correspond to the ID of the given input label,
otherwise they correspond to the words indices as defined by the
:class:`~tokenizers.pre_tokenizers.PreTokenizer` that was used.
For special tokens and such (any token that was generated from something that was
not part of the input), the output is :obj:`None`
Returns:
A :obj:`List` of :obj:`Optional[int]`: A list of optional word index.
"""
pass
class NormalizedString:
"""
NormalizedString
A NormalizedString takes care of modifying an "original" string, to obtain a "normalized" one.
While making all the requested modifications, it keeps track of the alignment information
between the two versions of the string.
Args:
sequence: str:
The string sequence used to initialize this NormalizedString
"""
def append(self, s):
"""
Append the given sequence to the string
"""
pass
def clear(self):
"""
Clears the string
"""
pass
def filter(self, func):
"""
Filter each character of the string using the given func
"""
pass
def for_each(self, func):
"""
Calls the given function for each character of the string
"""
pass
def lowercase(self):
"""
Lowercase the string
"""
pass
def lstrip(self):
"""
Strip the left of the string
"""
pass
def map(self, func):
"""
Calls the given function for each character of the string
Replaces each character of the string using the returned value. Each
returned value **must** be a str of length 1 (ie a character).
"""
pass
def nfc(self):
"""
Runs the NFC normalization
"""
pass
def nfd(self):
"""
Runs the NFD normalization
"""
pass
def nfkc(self):
"""
Runs the NFKC normalization
"""
pass
def nfkd(self):
"""
Runs the NFKD normalization
"""
pass
@property
def normalized(self):
"""
The normalized part of the string
"""
pass
def prepend(self, s):
"""
Prepend the given sequence to the string
"""
pass
def replace(self, pattern, content):
"""
Replace the content of the given pattern with the provided content
Args:
pattern: Pattern:
A pattern used to match the string. Usually a string or a Regex
content: str:
The content to be used as replacement
"""
pass
def rstrip(self):
"""
Strip the right of the string
"""
pass
def slice(self, range):
"""
Slice the string using the given range
"""
pass
def split(self, pattern, behavior):
"""
Split the NormalizedString using the given pattern and the specified behavior
Args:
pattern: Pattern:
A pattern used to split the string. Usually a string or a regex built with `tokenizers.Regex`
behavior: SplitDelimiterBehavior:
The behavior to use when splitting.
Choices: "removed", "isolated", "merged_with_previous", "merged_with_next",
"contiguous"
Returns:
A list of NormalizedString, representing each split
"""
pass
def strip(self):
"""
Strip both ends of the string
"""
pass
def uppercase(self):
"""
Uppercase the string
"""
pass
class PreTokenizedString:
"""
PreTokenizedString
Wrapper over a string, that provides a way to normalize, pre-tokenize, tokenize the
underlying string, while keeping track of the alignment information (offsets).
The PreTokenizedString manages what we call `splits`. Each split represents a substring
which is a subpart of the original string, with the relevant offsets and tokens.
When calling one of the methods used to modify the PreTokenizedString (namely one of
`split`, `normalize` or `tokenize), only the `splits` that don't have any associated
tokens will get modified.
Args:
sequence: str:
The string sequence used to initialize this PreTokenizedString
"""
def __init__(self, sequence):
pass
def get_splits(self, offset_referential="original", offset_type="char"):
"""
Get the splits currently managed by the PreTokenizedString
Args:
offset_referential: :obj:`str`
Whether the returned splits should have offsets expressed relative
to the original string, or the normalized one. choices: "original", "normalized".
offset_type: :obj:`str`
Whether the returned splits should have offsets expressed in bytes or chars.
When slicing an str, we usually want to use chars, which is the default value.
Now in some cases it might be interesting to get these offsets expressed in bytes,
so it is possible to change this here.
choices: "char", "bytes"
Returns
A list of splits
"""
pass
def normalize(self, func):
"""
Normalize each split of the `PreTokenizedString` using the given `func`
Args:
func: Callable[[NormalizedString], None]:
The function used to normalize each underlying split. This function
does not need to return anything, just calling the methods on the provided
NormalizedString allow its modification.
"""
pass
def split(self, func):
"""
Split the PreTokenizedString using the given `func`
Args:
func: Callable[[index, NormalizedString], List[NormalizedString]]:
The function used to split each underlying split.
It is expected to return a list of `NormalizedString`, that represent the new
splits. If the given `NormalizedString` does not need any splitting, we can
just return it directly.
In order for the offsets to be tracked accurately, any returned `NormalizedString`
should come from calling either `.split` or `.slice` on the received one.
"""
pass
def to_encoding(self, type_id=0, word_idx=None):
"""
Return an Encoding generated from this PreTokenizedString
Args:
type_id: int = 0:
The type_id to be used on the generated Encoding.
word_idx: Optional[int] = None:
An optional word index to be used for each token of this Encoding. If provided,
all the word indices in the generated Encoding will use this value, instead
of the one automatically tracked during pre-tokenization.
Returns:
An Encoding
"""
pass
def tokenize(self, func):
"""
Tokenize each split of the `PreTokenizedString` using the given `func`
Args:
func: Callable[[str], List[Token]]:
The function used to tokenize each underlying split. This function must return
a list of Token generated from the input str.
"""
pass
class Regex:
"""
Instantiate a new Regex with the given pattern
"""
def __init__(self, pattern):
pass
class Token:
pass
class Tokenizer:
"""
A :obj:`Tokenizer` works as a pipeline. It processes some raw text as input
and outputs an :class:`~tokenizers.Encoding`.
Args:
model (:class:`~tokenizers.models.Model`):
The core algorithm that this :obj:`Tokenizer` should be using.
"""
def __init__(self, model):
pass
def add_special_tokens(self, tokens):
"""
Add the given special tokens to the Tokenizer.
If these tokens are already part of the vocabulary, it just let the Tokenizer know about
them. If they don't exist, the Tokenizer creates them, giving them a new id.
These special tokens will never be processed by the model (ie won't be split into
multiple tokens), and they can be removed from the output when decoding.
Args:
tokens (A :obj:`List` of :class:`~tokenizers.AddedToken` or :obj:`str`):
The list of special tokens we want to add to the vocabulary. Each token can either
be a string or an instance of :class:`~tokenizers.AddedToken` for more
customization.
Returns:
:obj:`int`: The number of tokens that were created in the vocabulary
"""
pass
def add_tokens(self, tokens):
"""
Add the given tokens to the vocabulary
The given tokens are added only if they don't already exist in the vocabulary.
Each token then gets a new attributed id.
Args:
tokens (A :obj:`List` of :class:`~tokenizers.AddedToken` or :obj:`str`):
The list of tokens we want to add to the vocabulary. Each token can be either a
string or an instance of :class:`~tokenizers.AddedToken` for more customization.
Returns:
:obj:`int`: The number of tokens that were created in the vocabulary
"""
pass
def decode(self, ids, skip_special_tokens=True):
"""
Decode the given list of ids back to a string
This is used to decode anything coming back from a Language Model
Args:
ids (A :obj:`List/Tuple` of :obj:`int`):
The list of ids that we want to decode
skip_special_tokens (:obj:`bool`, defaults to :obj:`True`):
Whether the special tokens should be removed from the decoded string
Returns:
:obj:`str`: The decoded string
"""
pass
def decode_batch(self, sequences, skip_special_tokens=True):
"""
Decode a batch of ids back to their corresponding string
Args:
sequences (:obj:`List` of :obj:`List[int]`):
The batch of sequences we want to decode
skip_special_tokens (:obj:`bool`, defaults to :obj:`True`):
Whether the special tokens should be removed from the decoded strings
Returns:
:obj:`List[str]`: A list of decoded strings
"""
pass
@property
def decoder(self):
"""
The `optional` :class:`~tokenizers.decoders.Decoder` in use by the Tokenizer
"""
pass
def enable_padding(
self, direction="right", pad_id=0, pad_type_id=0, pad_token="[PAD]", length=None, pad_to_multiple_of=None
):
"""
Enable the padding
Args:
direction (:obj:`str`, `optional`, defaults to :obj:`right`):
The direction in which to pad. Can be either ``right`` or ``left``
pad_to_multiple_of (:obj:`int`, `optional`):
If specified, the padding length should always snap to the next multiple of the
given value. For example if we were going to pad witha length of 250 but
``pad_to_multiple_of=8`` then we will pad to 256.
pad_id (:obj:`int`, defaults to 0):
The id to be used when padding
pad_type_id (:obj:`int`, defaults to 0):
The type id to be used when padding
pad_token (:obj:`str`, defaults to :obj:`[PAD]`):
The pad token to be used when padding
length (:obj:`int`, `optional`):
If specified, the length at which to pad. If not specified we pad using the size of
the longest sequence in a batch.
"""
pass
def enable_truncation(self, max_length, stride=0, strategy="longest_first", direction="right"):
"""
Enable truncation
Args:
max_length (:obj:`int`):
The max length at which to truncate
stride (:obj:`int`, `optional`):
The length of the previous first sequence to be included in the overflowing
sequence
strategy (:obj:`str`, `optional`, defaults to :obj:`longest_first`):
The strategy used to truncation. Can be one of ``longest_first``, ``only_first`` or
``only_second``.
direction (:obj:`str`, defaults to :obj:`right`):
Truncate direction
"""
pass
def encode(self, sequence, pair=None, is_pretokenized=False, add_special_tokens=True):
"""
Encode the given sequence and pair. This method can process raw text sequences
as well as already pre-tokenized sequences.
Example:
Here are some examples of the inputs that are accepted::
encode("A single sequence")`
encode("A sequence", "And its pair")`
encode([ "A", "pre", "tokenized", "sequence" ], is_pretokenized=True)`
encode(
[ "A", "pre", "tokenized", "sequence" ], [ "And", "its", "pair" ],
is_pretokenized=True
)
Args:
sequence (:obj:`~tokenizers.InputSequence`):
The main input sequence we want to encode. This sequence can be either raw
text or pre-tokenized, according to the ``is_pretokenized`` argument:
- If ``is_pretokenized=False``: :class:`~tokenizers.TextInputSequence`
- If ``is_pretokenized=True``: :class:`~tokenizers.PreTokenizedInputSequence`
pair (:obj:`~tokenizers.InputSequence`, `optional`):
An optional input sequence. The expected format is the same that for ``sequence``.
is_pretokenized (:obj:`bool`, defaults to :obj:`False`):
Whether the input is already pre-tokenized
add_special_tokens (:obj:`bool`, defaults to :obj:`True`):
Whether to add the special tokens
Returns:
:class:`~tokenizers.Encoding`: The encoded result
"""
pass
def encode_batch(self, input, is_pretokenized=False, add_special_tokens=True):
"""
Encode the given batch of inputs. This method accept both raw text sequences
as well as already pre-tokenized sequences.
Example:
Here are some examples of the inputs that are accepted::
encode_batch([
"A single sequence",
("A tuple with a sequence", "And its pair"),
[ "A", "pre", "tokenized", "sequence" ],
([ "A", "pre", "tokenized", "sequence" ], "And its pair")
])
Args:
input (A :obj:`List`/:obj:`Tuple` of :obj:`~tokenizers.EncodeInput`):
A list of single sequences or pair sequences to encode. Each sequence
can be either raw text or pre-tokenized, according to the ``is_pretokenized``
argument:
- If ``is_pretokenized=False``: :class:`~tokenizers.TextEncodeInput`
- If ``is_pretokenized=True``: :class:`~tokenizers.PreTokenizedEncodeInput`
is_pretokenized (:obj:`bool`, defaults to :obj:`False`):
Whether the input is already pre-tokenized
add_special_tokens (:obj:`bool`, defaults to :obj:`True`):
Whether to add the special tokens
Returns:
A :obj:`List` of :class:`~tokenizers.Encoding`: The encoded batch
"""
pass
@staticmethod
def from_buffer(buffer):
"""
Instantiate a new :class:`~tokenizers.Tokenizer` from the given buffer.
Args:
buffer (:obj:`bytes`):
A buffer containing a previously serialized :class:`~tokenizers.Tokenizer`
Returns:
:class:`~tokenizers.Tokenizer`: The new tokenizer
"""
pass
@staticmethod
def from_file(path):
"""
Instantiate a new :class:`~tokenizers.Tokenizer` from the file at the given path.
Args:
path (:obj:`str`):
A path to a local JSON file representing a previously serialized
:class:`~tokenizers.Tokenizer`
Returns:
:class:`~tokenizers.Tokenizer`: The new tokenizer
"""
pass
@staticmethod
def from_pretrained(identifier, revision="main", auth_token=None):
"""
Instantiate a new :class:`~tokenizers.Tokenizer` from an existing file on the
Hugging Face Hub.
Args:
identifier (:obj:`str`):
The identifier of a Model on the Hugging Face Hub, that contains
a tokenizer.json file
revision (:obj:`str`, defaults to `main`):
A branch or commit id
auth_token (:obj:`str`, `optional`, defaults to `None`):
An optional auth token used to access private repositories on the
Hugging Face Hub
Returns:
:class:`~tokenizers.Tokenizer`: The new tokenizer
"""
pass
@staticmethod
def from_str(json):
"""
Instantiate a new :class:`~tokenizers.Tokenizer` from the given JSON string.
Args:
json (:obj:`str`):
A valid JSON string representing a previously serialized
:class:`~tokenizers.Tokenizer`
Returns:
:class:`~tokenizers.Tokenizer`: The new tokenizer
"""
pass
def get_vocab(self, with_added_tokens=True):
"""
Get the underlying vocabulary
Args:
with_added_tokens (:obj:`bool`, defaults to :obj:`True`):
Whether to include the added tokens
Returns:
:obj:`Dict[str, int]`: The vocabulary
"""
pass
def get_vocab_size(self, with_added_tokens=True):
"""
Get the size of the underlying vocabulary
Args:
with_added_tokens (:obj:`bool`, defaults to :obj:`True`):
Whether to include the added tokens
Returns:
:obj:`int`: The size of the vocabulary
"""
pass
def id_to_token(self, id):
"""
Convert the given id to its corresponding token if it exists
Args:
id (:obj:`int`):
The id to convert
Returns:
:obj:`Optional[str]`: An optional token, :obj:`None` if out of vocabulary
"""
pass
@property
def model(self):
"""
The :class:`~tokenizers.models.Model` in use by the Tokenizer
"""
pass
def no_padding(self):
"""
Disable padding
"""
pass
def no_truncation(self):
"""
Disable truncation
"""
pass
@property
def normalizer(self):
"""
The `optional` :class:`~tokenizers.normalizers.Normalizer` in use by the Tokenizer
"""
pass
def num_special_tokens_to_add(self, is_pair):
"""
Return the number of special tokens that would be added for single/pair sentences.
:param is_pair: Boolean indicating if the input would be a single sentence or a pair
:return:
"""
pass
@property
def padding(self):
"""
Get the current padding parameters
`Cannot be set, use` :meth:`~tokenizers.Tokenizer.enable_padding` `instead`
Returns:
(:obj:`dict`, `optional`):
A dict with the current padding parameters if padding is enabled
"""
pass
def post_process(self, encoding, pair=None, add_special_tokens=True):
"""
Apply all the post-processing steps to the given encodings.
The various steps are:
1. Truncate according to the set truncation params (provided with
:meth:`~tokenizers.Tokenizer.enable_truncation`)
2. Apply the :class:`~tokenizers.processors.PostProcessor`
3. Pad according to the set padding params (provided with
:meth:`~tokenizers.Tokenizer.enable_padding`)
Args:
encoding (:class:`~tokenizers.Encoding`):
The :class:`~tokenizers.Encoding` corresponding to the main sequence.
pair (:class:`~tokenizers.Encoding`, `optional`):
An optional :class:`~tokenizers.Encoding` corresponding to the pair sequence.
add_special_tokens (:obj:`bool`):
Whether to add the special tokens
Returns:
:class:`~tokenizers.Encoding`: The final post-processed encoding
"""
pass
@property
def post_processor(self):
"""
The `optional` :class:`~tokenizers.processors.PostProcessor` in use by the Tokenizer
"""
pass
@property
def pre_tokenizer(self):
"""
The `optional` :class:`~tokenizers.pre_tokenizers.PreTokenizer` in use by the Tokenizer
"""
pass
def save(self, path, pretty=True):
"""
Save the :class:`~tokenizers.Tokenizer` to the file at the given path.
Args:
path (:obj:`str`):
A path to a file in which to save the serialized tokenizer.
pretty (:obj:`bool`, defaults to :obj:`True`):
Whether the JSON file should be pretty formatted.
"""
pass
def to_str(self, pretty=False):
"""
Gets a serialized string representing this :class:`~tokenizers.Tokenizer`.
Args:
pretty (:obj:`bool`, defaults to :obj:`False`):
Whether the JSON string should be pretty formatted.
Returns:
:obj:`str`: A string representing the serialized Tokenizer
"""
pass
def token_to_id(self, token):
"""
Convert the given token to its corresponding id if it exists
Args:
token (:obj:`str`):
The token to convert
Returns:
:obj:`Optional[int]`: An optional id, :obj:`None` if out of vocabulary
"""
pass
def train(self, files, trainer=None):
"""
Train the Tokenizer using the given files.
Reads the files line by line, while keeping all the whitespace, even new lines.
If you want to train from data store in-memory, you can check
:meth:`~tokenizers.Tokenizer.train_from_iterator`
Args:
files (:obj:`List[str]`):
A list of path to the files that we should use for training
trainer (:obj:`~tokenizers.trainers.Trainer`, `optional`):
An optional trainer that should be used to train our Model
"""
pass
def train_from_iterator(self, iterator, trainer=None, length=None):
"""
Train the Tokenizer using the provided iterator.
You can provide anything that is a Python Iterator
* A list of sequences :obj:`List[str]`
* A generator that yields :obj:`str` or :obj:`List[str]`
* A Numpy array of strings
* ...
Args:
iterator (:obj:`Iterator`):
Any iterator over strings or list of strings
trainer (:obj:`~tokenizers.trainers.Trainer`, `optional`):
An optional trainer that should be used to train our Model
length (:obj:`int`, `optional`):
The total number of sequences in the iterator. This is used to
provide meaningful progress tracking
"""
pass
@property
def truncation(self):
"""
Get the currently set truncation parameters
`Cannot set, use` :meth:`~tokenizers.Tokenizer.enable_truncation` `instead`
Returns:
(:obj:`dict`, `optional`):
A dict with the current truncation parameters if truncation is enabled
"""
pass
| 0 |
hf_public_repos/tokenizers/bindings/python/py_src/tokenizers | hf_public_repos/tokenizers/bindings/python/py_src/tokenizers/decoders/__init__.py | from .. import decoders
Decoder = decoders.Decoder
ByteLevel = decoders.ByteLevel
Replace = decoders.Replace
WordPiece = decoders.WordPiece
ByteFallback = decoders.ByteFallback
Fuse = decoders.Fuse
Strip = decoders.Strip
Metaspace = decoders.Metaspace
BPEDecoder = decoders.BPEDecoder
CTC = decoders.CTC
Sequence = decoders.Sequence
| 0 |
hf_public_repos/tokenizers/bindings/python/py_src/tokenizers | hf_public_repos/tokenizers/bindings/python/py_src/tokenizers/decoders/__init__.pyi | # Generated content DO NOT EDIT
class Decoder:
"""
Base class for all decoders
This class is not supposed to be instantiated directly. Instead, any implementation of
a Decoder will return an instance of this class when instantiated.
"""
def decode(self, tokens):
"""
Decode the given list of tokens to a final string
Args:
tokens (:obj:`List[str]`):
The list of tokens to decode
Returns:
:obj:`str`: The decoded string
"""
pass
class BPEDecoder(Decoder):
"""
BPEDecoder Decoder
Args:
suffix (:obj:`str`, `optional`, defaults to :obj:`</w>`):
The suffix that was used to caracterize an end-of-word. This suffix will
be replaced by whitespaces during the decoding
"""
def __init__(self, suffix="</w>"):
pass
def decode(self, tokens):
"""
Decode the given list of tokens to a final string
Args:
tokens (:obj:`List[str]`):
The list of tokens to decode
Returns:
:obj:`str`: The decoded string
"""
pass
class ByteFallback(Decoder):
"""
ByteFallback Decoder
ByteFallback is a simple trick which converts tokens looking like `<0x61>`
to pure bytes, and attempts to make them into a string. If the tokens
cannot be decoded you will get � instead for each inconvertable byte token
"""
def __init__(self):
pass
def decode(self, tokens):
"""
Decode the given list of tokens to a final string
Args:
tokens (:obj:`List[str]`):
The list of tokens to decode
Returns:
:obj:`str`: The decoded string
"""
pass
class ByteLevel(Decoder):
"""
ByteLevel Decoder
This decoder is to be used in tandem with the :class:`~tokenizers.pre_tokenizers.ByteLevel`
:class:`~tokenizers.pre_tokenizers.PreTokenizer`.
"""
def __init__(self):
pass
def decode(self, tokens):
"""
Decode the given list of tokens to a final string
Args:
tokens (:obj:`List[str]`):
The list of tokens to decode
Returns:
:obj:`str`: The decoded string
"""
pass
class CTC(Decoder):
"""
CTC Decoder
Args:
pad_token (:obj:`str`, `optional`, defaults to :obj:`<pad>`):
The pad token used by CTC to delimit a new token.
word_delimiter_token (:obj:`str`, `optional`, defaults to :obj:`|`):
The word delimiter token. It will be replaced by a <space>
cleanup (:obj:`bool`, `optional`, defaults to :obj:`True`):
Whether to cleanup some tokenization artifacts.
Mainly spaces before punctuation, and some abbreviated english forms.
"""
def __init__(self, pad_token="<pad>", word_delimiter_token="|", cleanup=True):
pass
def decode(self, tokens):
"""
Decode the given list of tokens to a final string
Args:
tokens (:obj:`List[str]`):
The list of tokens to decode
Returns:
:obj:`str`: The decoded string
"""
pass
class Fuse(Decoder):
"""
Fuse Decoder
Fuse simply fuses every token into a single string.
This is the last step of decoding, this decoder exists only if
there is need to add other decoders *after* the fusion
"""
def __init__(self):
pass
def decode(self, tokens):
"""
Decode the given list of tokens to a final string
Args:
tokens (:obj:`List[str]`):
The list of tokens to decode
Returns:
:obj:`str`: The decoded string
"""
pass
class Metaspace(Decoder):
"""
Metaspace Decoder
Args:
replacement (:obj:`str`, `optional`, defaults to :obj:`▁`):
The replacement character. Must be exactly one character. By default we
use the `▁` (U+2581) meta symbol (Same as in SentencePiece).
add_prefix_space (:obj:`bool`, `optional`, defaults to :obj:`True`):
Whether to add a space to the first word if there isn't already one. This
lets us treat `hello` exactly like `say hello`.
"""
def __init__(self, replacement="▁", add_prefix_space=True):
pass
def decode(self, tokens):
"""
Decode the given list of tokens to a final string
Args:
tokens (:obj:`List[str]`):
The list of tokens to decode
Returns:
:obj:`str`: The decoded string
"""
pass
class Replace(Decoder):
"""
Replace Decoder
This decoder is to be used in tandem with the :class:`~tokenizers.pre_tokenizers.Replace`
:class:`~tokenizers.pre_tokenizers.PreTokenizer`.
"""
def __init__(self, pattern, content):
pass
def decode(self, tokens):
"""
Decode the given list of tokens to a final string
Args:
tokens (:obj:`List[str]`):
The list of tokens to decode
Returns:
:obj:`str`: The decoded string
"""
pass
class Sequence(Decoder):
"""
Sequence Decoder
Args:
decoders (:obj:`List[Decoder]`)
The decoders that need to be chained
"""
def __init__(self, decoders):
pass
def decode(self, tokens):
"""
Decode the given list of tokens to a final string
Args:
tokens (:obj:`List[str]`):
The list of tokens to decode
Returns:
:obj:`str`: The decoded string
"""
pass
class Strip(Decoder):
"""
Strip normalizer
Strips n left characters of each token, or n right characters of each token
"""
def __init__(self, content, left=0, right=0):
pass
def decode(self, tokens):
"""
Decode the given list of tokens to a final string
Args:
tokens (:obj:`List[str]`):
The list of tokens to decode
Returns:
:obj:`str`: The decoded string
"""
pass
class WordPiece(Decoder):
"""
WordPiece Decoder
Args:
prefix (:obj:`str`, `optional`, defaults to :obj:`##`):
The prefix to use for subwords that are not a beginning-of-word
cleanup (:obj:`bool`, `optional`, defaults to :obj:`True`):
Whether to cleanup some tokenization artifacts. Mainly spaces before punctuation,
and some abbreviated english forms.
"""
def __init__(self, prefix="##", cleanup=True):
pass
def decode(self, tokens):
"""
Decode the given list of tokens to a final string
Args:
tokens (:obj:`List[str]`):
The list of tokens to decode
Returns:
:obj:`str`: The decoded string
"""
pass
| 0 |
hf_public_repos/tokenizers/bindings/python/py_src/tokenizers | hf_public_repos/tokenizers/bindings/python/py_src/tokenizers/implementations/__init__.py | from .base_tokenizer import BaseTokenizer
from .bert_wordpiece import BertWordPieceTokenizer
from .byte_level_bpe import ByteLevelBPETokenizer
from .char_level_bpe import CharBPETokenizer
from .sentencepiece_bpe import SentencePieceBPETokenizer
from .sentencepiece_unigram import SentencePieceUnigramTokenizer
| 0 |
hf_public_repos/tokenizers/bindings/python/py_src/tokenizers | hf_public_repos/tokenizers/bindings/python/py_src/tokenizers/implementations/base_tokenizer.py | from typing import Dict, List, Optional, Tuple, Union
from tokenizers import AddedToken, EncodeInput, Encoding, InputSequence, Tokenizer
from tokenizers.decoders import Decoder
from tokenizers.models import Model
from tokenizers.normalizers import Normalizer
from tokenizers.pre_tokenizers import PreTokenizer
from tokenizers.processors import PostProcessor
Offsets = Tuple[int, int]
class BaseTokenizer:
def __init__(self, tokenizer: Tokenizer, parameters=None):
self._tokenizer = tokenizer
self._parameters = parameters if parameters is not None else {}
def __repr__(self):
return "Tokenizer(vocabulary_size={}, {})".format(
self._tokenizer.get_vocab_size(),
", ".join(k + "=" + str(v) for k, v in self._parameters.items()),
)
def num_special_tokens_to_add(self, is_pair: bool) -> int:
"""
Return the number of special tokens that would be added for single/pair sentences.
:param is_pair: Boolean indicating if the input would be a single sentence or a pair
:return:
"""
return self._tokenizer.num_special_tokens_to_add(is_pair)
def get_vocab(self, with_added_tokens: bool = True) -> Dict[str, int]:
"""Returns the vocabulary
Args:
with_added_tokens: boolean:
Whether to include the added tokens in the vocabulary
Returns:
The vocabulary
"""
return self._tokenizer.get_vocab(with_added_tokens=with_added_tokens)
def get_vocab_size(self, with_added_tokens: bool = True) -> int:
"""Return the size of vocabulary, with or without added tokens.
Args:
with_added_tokens: (`optional`) bool:
Whether to count in added special tokens or not
Returns:
Size of vocabulary
"""
return self._tokenizer.get_vocab_size(with_added_tokens=with_added_tokens)
def enable_padding(
self,
direction: Optional[str] = "right",
pad_to_multiple_of: Optional[int] = None,
pad_id: Optional[int] = 0,
pad_type_id: Optional[int] = 0,
pad_token: Optional[str] = "[PAD]",
length: Optional[int] = None,
):
"""Change the padding strategy
Args:
direction: (`optional`) str:
Can be one of: `right` or `left`
pad_to_multiple_of: (`optional`) unsigned int:
If specified, the padding length should always snap to the next multiple of
the given value. For example if we were going to pad with a length of 250 but
`pad_to_multiple_of=8` then we will pad to 256.
pad_id: (`optional`) unsigned int:
The indice to be used when padding
pad_type_id: (`optional`) unsigned int:
The type indice to be used when padding
pad_token: (`optional`) str:
The pad token to be used when padding
length: (`optional`) unsigned int:
If specified, the length at which to pad. If not specified
we pad using the size of the longest sequence in a batch
"""
return self._tokenizer.enable_padding(
direction=direction,
pad_to_multiple_of=pad_to_multiple_of,
pad_id=pad_id,
pad_type_id=pad_type_id,
pad_token=pad_token,
length=length,
)
def no_padding(self):
"""Disable padding"""
return self._tokenizer.no_padding()
@property
def padding(self) -> Optional[dict]:
"""Get the current padding parameters
Returns:
None if padding is disabled, a dict with the currently set parameters
if the padding is enabled.
"""
return self._tokenizer.padding
def enable_truncation(self, max_length: int, stride: Optional[int] = 0, strategy: Optional[str] = "longest_first"):
"""Change the truncation options
Args:
max_length: unsigned int:
The maximum length at which to truncate
stride: (`optional`) unsigned int:
The length of the previous first sequence to be included
in the overflowing sequence
strategy: (`optional`) str:
Can be one of `longest_first`, `only_first` or `only_second`
"""
return self._tokenizer.enable_truncation(max_length, stride=stride, strategy=strategy)
def no_truncation(self):
"""Disable truncation"""
return self._tokenizer.no_truncation()
@property
def truncation(self) -> Optional[dict]:
"""Get the current truncation parameters
Returns:
None if truncation is disabled, a dict with the current truncation parameters if
truncation is enabled
"""
return self._tokenizer.truncation
def add_tokens(self, tokens: List[Union[str, AddedToken]]) -> int:
"""Add the given tokens to the vocabulary
Args:
tokens: List[Union[str, AddedToken]]:
A list of tokens to add to the vocabulary. Each token can either be
a string, or an instance of AddedToken
Returns:
The number of tokens that were added to the vocabulary
"""
return self._tokenizer.add_tokens(tokens)
def add_special_tokens(self, special_tokens: List[Union[str, AddedToken]]) -> int:
"""Add the given special tokens to the vocabulary, and treat them as special tokens.
The special tokens will never be processed by the model, and will be
removed while decoding.
Args:
tokens: List[Union[str, AddedToken]]:
A list of special tokens to add to the vocabulary. Each token can either be
a string, or an instance of AddedToken
Returns:
The number of tokens that were added to the vocabulary
"""
return self._tokenizer.add_special_tokens(special_tokens)
def normalize(self, sequence: str) -> str:
"""Normalize the given sequence
Args:
sequence: str:
The sequence to normalize
Returns:
The normalized string
"""
return self._tokenizer.normalize(sequence)
def encode(
self,
sequence: InputSequence,
pair: Optional[InputSequence] = None,
is_pretokenized: bool = False,
add_special_tokens: bool = True,
) -> Encoding:
"""Encode the given sequence and pair. This method can process raw text sequences as well
as already pre-tokenized sequences.
Args:
sequence: InputSequence:
The sequence we want to encode. This sequence can be either raw text or
pre-tokenized, according to the `is_pretokenized` argument:
- If `is_pretokenized=False`: `InputSequence` is expected to be `str`
- If `is_pretokenized=True`: `InputSequence` is expected to be
`Union[List[str], Tuple[str]]`
is_pretokenized: bool:
Whether the input is already pre-tokenized.
add_special_tokens: bool:
Whether to add the special tokens while encoding.
Returns:
An Encoding
"""
if sequence is None:
raise ValueError("encode: `sequence` can't be `None`")
return self._tokenizer.encode(sequence, pair, is_pretokenized, add_special_tokens)
def encode_batch(
self,
inputs: List[EncodeInput],
is_pretokenized: bool = False,
add_special_tokens: bool = True,
) -> List[Encoding]:
"""Encode the given inputs. This method accept both raw text sequences as well as already
pre-tokenized sequences.
Args:
inputs: List[EncodeInput]:
A list of single sequences or pair sequences to encode. Each `EncodeInput` is
expected to be of the following form:
`Union[InputSequence, Tuple[InputSequence, InputSequence]]`
Each `InputSequence` can either be raw text or pre-tokenized,
according to the `is_pretokenized` argument:
- If `is_pretokenized=False`: `InputSequence` is expected to be `str`
- If `is_pretokenized=True`: `InputSequence` is expected to be
`Union[List[str], Tuple[str]]`
is_pretokenized: bool:
Whether the input is already pre-tokenized.
add_special_tokens: bool:
Whether to add the special tokens while encoding.
Returns:
A list of Encoding
"""
if inputs is None:
raise ValueError("encode_batch: `inputs` can't be `None`")
return self._tokenizer.encode_batch(inputs, is_pretokenized, add_special_tokens)
def decode(self, ids: List[int], skip_special_tokens: Optional[bool] = True) -> str:
"""Decode the given list of ids to a string sequence
Args:
ids: List[unsigned int]:
A list of ids to be decoded
skip_special_tokens: (`optional`) boolean:
Whether to remove all the special tokens from the output string
Returns:
The decoded string
"""
if ids is None:
raise ValueError("None input is not valid. Should be a list of integers.")
return self._tokenizer.decode(ids, skip_special_tokens=skip_special_tokens)
def decode_batch(self, sequences: List[List[int]], skip_special_tokens: Optional[bool] = True) -> str:
"""Decode the list of sequences to a list of string sequences
Args:
sequences: List[List[unsigned int]]:
A list of sequence of ids to be decoded
skip_special_tokens: (`optional`) boolean:
Whether to remove all the special tokens from the output strings
Returns:
A list of decoded strings
"""
if sequences is None:
raise ValueError("None input is not valid. Should be list of list of integers.")
return self._tokenizer.decode_batch(sequences, skip_special_tokens=skip_special_tokens)
def token_to_id(self, token: str) -> Optional[int]:
"""Convert the given token to its corresponding id
Args:
token: str:
The token to convert
Returns:
The corresponding id if it exists, None otherwise
"""
return self._tokenizer.token_to_id(token)
def id_to_token(self, id: int) -> Optional[str]:
"""Convert the given token id to its corresponding string
Args:
token: id:
The token id to convert
Returns:
The corresponding string if it exists, None otherwise
"""
return self._tokenizer.id_to_token(id)
def save_model(self, directory: str, prefix: Optional[str] = None):
"""Save the current model to the given directory
Args:
directory: str:
A path to the destination directory
prefix: (Optional) str:
An optional prefix, used to prefix each file name
"""
return self._tokenizer.model.save(directory, prefix=prefix)
def save(self, path: str, pretty: bool = True):
"""Save the current Tokenizer at the given path
Args:
path: str:
A path to the destination Tokenizer file
"""
return self._tokenizer.save(path, pretty)
def to_str(self, pretty: bool = False):
"""Get a serialized JSON version of the Tokenizer as a str
Args:
pretty: bool:
Whether the JSON string should be prettified
Returns:
str
"""
return self._tokenizer.to_str(pretty)
def post_process(
self, encoding: Encoding, pair: Optional[Encoding] = None, add_special_tokens: bool = True
) -> Encoding:
"""Apply all the post-processing steps to the given encodings.
The various steps are:
1. Truncate according to global params (provided to `enable_truncation`)
2. Apply the PostProcessor
3. Pad according to global params. (provided to `enable_padding`)
Args:
encoding: Encoding:
The main Encoding to post process
pair: Optional[Encoding]:
An optional pair Encoding
add_special_tokens: bool:
Whether to add special tokens
Returns:
The resulting Encoding
"""
return self._tokenizer.post_process(encoding, pair, add_special_tokens)
@property
def model(self) -> Model:
return self._tokenizer.model
@model.setter
def model(self, model: Model):
self._tokenizer.model = model
@property
def normalizer(self) -> Normalizer:
return self._tokenizer.normalizer
@normalizer.setter
def normalizer(self, normalizer: Normalizer):
self._tokenizer.normalizer = normalizer
@property
def pre_tokenizer(self) -> PreTokenizer:
return self._tokenizer.pre_tokenizer
@pre_tokenizer.setter
def pre_tokenizer(self, pre_tokenizer: PreTokenizer):
self._tokenizer.pre_tokenizer = pre_tokenizer
@property
def post_processor(self) -> PostProcessor:
return self._tokenizer.post_processor
@post_processor.setter
def post_processor(self, post_processor: PostProcessor):
self._tokenizer.post_processor = post_processor
@property
def decoder(self) -> Decoder:
return self._tokenizer.decoder
@decoder.setter
def decoder(self, decoder: Decoder):
self._tokenizer.decoder = decoder
| 0 |
hf_public_repos/tokenizers/bindings/python/py_src/tokenizers | hf_public_repos/tokenizers/bindings/python/py_src/tokenizers/implementations/bert_wordpiece.py | from typing import Dict, Iterator, List, Optional, Union
from tokenizers import AddedToken, Tokenizer, decoders, trainers
from tokenizers.models import WordPiece
from tokenizers.normalizers import BertNormalizer
from tokenizers.pre_tokenizers import BertPreTokenizer
from tokenizers.processors import BertProcessing
from .base_tokenizer import BaseTokenizer
class BertWordPieceTokenizer(BaseTokenizer):
"""Bert WordPiece Tokenizer"""
def __init__(
self,
vocab: Optional[Union[str, Dict[str, int]]] = None,
unk_token: Union[str, AddedToken] = "[UNK]",
sep_token: Union[str, AddedToken] = "[SEP]",
cls_token: Union[str, AddedToken] = "[CLS]",
pad_token: Union[str, AddedToken] = "[PAD]",
mask_token: Union[str, AddedToken] = "[MASK]",
clean_text: bool = True,
handle_chinese_chars: bool = True,
strip_accents: Optional[bool] = None,
lowercase: bool = True,
wordpieces_prefix: str = "##",
):
if vocab is not None:
tokenizer = Tokenizer(WordPiece(vocab, unk_token=str(unk_token)))
else:
tokenizer = Tokenizer(WordPiece(unk_token=str(unk_token)))
# Let the tokenizer know about special tokens if they are part of the vocab
if tokenizer.token_to_id(str(unk_token)) is not None:
tokenizer.add_special_tokens([str(unk_token)])
if tokenizer.token_to_id(str(sep_token)) is not None:
tokenizer.add_special_tokens([str(sep_token)])
if tokenizer.token_to_id(str(cls_token)) is not None:
tokenizer.add_special_tokens([str(cls_token)])
if tokenizer.token_to_id(str(pad_token)) is not None:
tokenizer.add_special_tokens([str(pad_token)])
if tokenizer.token_to_id(str(mask_token)) is not None:
tokenizer.add_special_tokens([str(mask_token)])
tokenizer.normalizer = BertNormalizer(
clean_text=clean_text,
handle_chinese_chars=handle_chinese_chars,
strip_accents=strip_accents,
lowercase=lowercase,
)
tokenizer.pre_tokenizer = BertPreTokenizer()
if vocab is not None:
sep_token_id = tokenizer.token_to_id(str(sep_token))
if sep_token_id is None:
raise TypeError("sep_token not found in the vocabulary")
cls_token_id = tokenizer.token_to_id(str(cls_token))
if cls_token_id is None:
raise TypeError("cls_token not found in the vocabulary")
tokenizer.post_processor = BertProcessing((str(sep_token), sep_token_id), (str(cls_token), cls_token_id))
tokenizer.decoder = decoders.WordPiece(prefix=wordpieces_prefix)
parameters = {
"model": "BertWordPiece",
"unk_token": unk_token,
"sep_token": sep_token,
"cls_token": cls_token,
"pad_token": pad_token,
"mask_token": mask_token,
"clean_text": clean_text,
"handle_chinese_chars": handle_chinese_chars,
"strip_accents": strip_accents,
"lowercase": lowercase,
"wordpieces_prefix": wordpieces_prefix,
}
super().__init__(tokenizer, parameters)
@staticmethod
def from_file(vocab: str, **kwargs):
vocab = WordPiece.read_file(vocab)
return BertWordPieceTokenizer(vocab, **kwargs)
def train(
self,
files: Union[str, List[str]],
vocab_size: int = 30000,
min_frequency: int = 2,
limit_alphabet: int = 1000,
initial_alphabet: List[str] = [],
special_tokens: List[Union[str, AddedToken]] = [
"[PAD]",
"[UNK]",
"[CLS]",
"[SEP]",
"[MASK]",
],
show_progress: bool = True,
wordpieces_prefix: str = "##",
):
"""Train the model using the given files"""
trainer = trainers.WordPieceTrainer(
vocab_size=vocab_size,
min_frequency=min_frequency,
limit_alphabet=limit_alphabet,
initial_alphabet=initial_alphabet,
special_tokens=special_tokens,
show_progress=show_progress,
continuing_subword_prefix=wordpieces_prefix,
)
if isinstance(files, str):
files = [files]
self._tokenizer.train(files, trainer=trainer)
def train_from_iterator(
self,
iterator: Union[Iterator[str], Iterator[Iterator[str]]],
vocab_size: int = 30000,
min_frequency: int = 2,
limit_alphabet: int = 1000,
initial_alphabet: List[str] = [],
special_tokens: List[Union[str, AddedToken]] = [
"[PAD]",
"[UNK]",
"[CLS]",
"[SEP]",
"[MASK]",
],
show_progress: bool = True,
wordpieces_prefix: str = "##",
length: Optional[int] = None,
):
"""Train the model using the given iterator"""
trainer = trainers.WordPieceTrainer(
vocab_size=vocab_size,
min_frequency=min_frequency,
limit_alphabet=limit_alphabet,
initial_alphabet=initial_alphabet,
special_tokens=special_tokens,
show_progress=show_progress,
continuing_subword_prefix=wordpieces_prefix,
)
self._tokenizer.train_from_iterator(
iterator,
trainer=trainer,
length=length,
)
| 0 |
hf_public_repos/tokenizers/bindings/python/py_src/tokenizers | hf_public_repos/tokenizers/bindings/python/py_src/tokenizers/implementations/byte_level_bpe.py | from typing import Dict, Iterator, List, Optional, Tuple, Union
from tokenizers import AddedToken, Tokenizer, decoders, pre_tokenizers, processors, trainers
from tokenizers.models import BPE
from tokenizers.normalizers import Lowercase, Sequence, unicode_normalizer_from_str
from .base_tokenizer import BaseTokenizer
class ByteLevelBPETokenizer(BaseTokenizer):
"""ByteLevelBPETokenizer
Represents a Byte-level BPE as introduced by OpenAI with their GPT-2 model
"""
def __init__(
self,
vocab: Optional[Union[str, Dict[str, int]]] = None,
merges: Optional[Union[str, Dict[Tuple[int, int], Tuple[int, int]]]] = None,
add_prefix_space: bool = False,
lowercase: bool = False,
dropout: Optional[float] = None,
unicode_normalizer: Optional[str] = None,
continuing_subword_prefix: Optional[str] = None,
end_of_word_suffix: Optional[str] = None,
trim_offsets: bool = False,
):
if vocab is not None and merges is not None:
tokenizer = Tokenizer(
BPE(
vocab,
merges,
dropout=dropout,
continuing_subword_prefix=continuing_subword_prefix or "",
end_of_word_suffix=end_of_word_suffix or "",
)
)
else:
tokenizer = Tokenizer(BPE())
# Check for Unicode normalization first (before everything else)
normalizers = []
if unicode_normalizer:
normalizers += [unicode_normalizer_from_str(unicode_normalizer)]
if lowercase:
normalizers += [Lowercase()]
# Create the normalizer structure
if len(normalizers) > 0:
if len(normalizers) > 1:
tokenizer.normalizer = Sequence(normalizers)
else:
tokenizer.normalizer = normalizers[0]
tokenizer.pre_tokenizer = pre_tokenizers.ByteLevel(add_prefix_space=add_prefix_space)
tokenizer.decoder = decoders.ByteLevel()
tokenizer.post_processor = processors.ByteLevel(trim_offsets=trim_offsets)
parameters = {
"model": "ByteLevelBPE",
"add_prefix_space": add_prefix_space,
"lowercase": lowercase,
"dropout": dropout,
"unicode_normalizer": unicode_normalizer,
"continuing_subword_prefix": continuing_subword_prefix,
"end_of_word_suffix": end_of_word_suffix,
"trim_offsets": trim_offsets,
}
super().__init__(tokenizer, parameters)
@staticmethod
def from_file(vocab_filename: str, merges_filename: str, **kwargs):
vocab, merges = BPE.read_file(vocab_filename, merges_filename)
return ByteLevelBPETokenizer(vocab, merges, **kwargs)
def train(
self,
files: Union[str, List[str]],
vocab_size: int = 30000,
min_frequency: int = 2,
show_progress: bool = True,
special_tokens: List[Union[str, AddedToken]] = [],
):
"""Train the model using the given files"""
trainer = trainers.BpeTrainer(
vocab_size=vocab_size,
min_frequency=min_frequency,
show_progress=show_progress,
special_tokens=special_tokens,
initial_alphabet=pre_tokenizers.ByteLevel.alphabet(),
)
if isinstance(files, str):
files = [files]
self._tokenizer.train(files, trainer=trainer)
def train_from_iterator(
self,
iterator: Union[Iterator[str], Iterator[Iterator[str]]],
vocab_size: int = 30000,
min_frequency: int = 2,
show_progress: bool = True,
special_tokens: List[Union[str, AddedToken]] = [],
length: Optional[int] = None,
):
"""Train the model using the given iterator"""
trainer = trainers.BpeTrainer(
vocab_size=vocab_size,
min_frequency=min_frequency,
show_progress=show_progress,
special_tokens=special_tokens,
initial_alphabet=pre_tokenizers.ByteLevel.alphabet(),
)
self._tokenizer.train_from_iterator(
iterator,
trainer=trainer,
length=length,
)
| 0 |
hf_public_repos/tokenizers/bindings/python/py_src/tokenizers | hf_public_repos/tokenizers/bindings/python/py_src/tokenizers/implementations/char_level_bpe.py | from typing import Dict, Iterator, List, Optional, Tuple, Union
from .. import AddedToken, Tokenizer, decoders, pre_tokenizers, trainers
from ..models import BPE
from ..normalizers import BertNormalizer, Lowercase, Sequence, unicode_normalizer_from_str
from .base_tokenizer import BaseTokenizer
class CharBPETokenizer(BaseTokenizer):
"""Original BPE Tokenizer
Represents the BPE algorithm, as introduced by Rico Sennrich
(https://arxiv.org/abs/1508.07909)
The defaults settings corresponds to OpenAI GPT BPE tokenizers and differs from the original
Sennrich subword-nmt implementation by the following options that you can deactivate:
- adding a normalizer to clean up the text (deactivate with `bert_normalizer=False`) by:
* removing any control characters and replacing all whitespaces by the classic one.
* handle chinese chars by putting spaces around them.
* strip all accents.
- spitting on punctuation in addition to whitespaces (deactivate it with
`split_on_whitespace_only=True`)
"""
def __init__(
self,
vocab: Optional[Union[str, Dict[str, int]]] = None,
merges: Optional[Union[str, Dict[Tuple[int, int], Tuple[int, int]]]] = None,
unk_token: Union[str, AddedToken] = "<unk>",
suffix: str = "</w>",
dropout: Optional[float] = None,
lowercase: bool = False,
unicode_normalizer: Optional[str] = None,
bert_normalizer: bool = True,
split_on_whitespace_only: bool = False,
):
if vocab is not None and merges is not None:
tokenizer = Tokenizer(
BPE(
vocab,
merges,
dropout=dropout,
unk_token=str(unk_token),
end_of_word_suffix=suffix,
)
)
else:
tokenizer = Tokenizer(BPE(unk_token=str(unk_token), dropout=dropout, end_of_word_suffix=suffix))
if tokenizer.token_to_id(str(unk_token)) is not None:
tokenizer.add_special_tokens([str(unk_token)])
# Check for Unicode normalization first (before everything else)
normalizers = []
if unicode_normalizer:
normalizers += [unicode_normalizer_from_str(unicode_normalizer)]
if bert_normalizer:
normalizers += [BertNormalizer(lowercase=False)]
if lowercase:
normalizers += [Lowercase()]
# Create the normalizer structure
if len(normalizers) > 0:
if len(normalizers) > 1:
tokenizer.normalizer = Sequence(normalizers)
else:
tokenizer.normalizer = normalizers[0]
if split_on_whitespace_only:
tokenizer.pre_tokenizer = pre_tokenizers.WhitespaceSplit()
else:
tokenizer.pre_tokenizer = pre_tokenizers.BertPreTokenizer()
tokenizer.decoder = decoders.BPEDecoder(suffix=suffix)
parameters = {
"model": "BPE",
"unk_token": unk_token,
"suffix": suffix,
"dropout": dropout,
"lowercase": lowercase,
"unicode_normalizer": unicode_normalizer,
"bert_normalizer": bert_normalizer,
"split_on_whitespace_only": split_on_whitespace_only,
}
super().__init__(tokenizer, parameters)
@staticmethod
def from_file(vocab_filename: str, merges_filename: str, **kwargs):
vocab, merges = BPE.read_file(vocab_filename, merges_filename)
return CharBPETokenizer(vocab, merges, **kwargs)
def train(
self,
files: Union[str, List[str]],
vocab_size: int = 30000,
min_frequency: int = 2,
special_tokens: List[Union[str, AddedToken]] = ["<unk>"],
limit_alphabet: int = 1000,
initial_alphabet: List[str] = [],
suffix: Optional[str] = "</w>",
show_progress: bool = True,
):
"""Train the model using the given files"""
trainer = trainers.BpeTrainer(
vocab_size=vocab_size,
min_frequency=min_frequency,
special_tokens=special_tokens,
limit_alphabet=limit_alphabet,
initial_alphabet=initial_alphabet,
end_of_word_suffix=suffix,
show_progress=show_progress,
)
if isinstance(files, str):
files = [files]
self._tokenizer.train(files, trainer=trainer)
def train_from_iterator(
self,
iterator: Union[Iterator[str], Iterator[Iterator[str]]],
vocab_size: int = 30000,
min_frequency: int = 2,
special_tokens: List[Union[str, AddedToken]] = ["<unk>"],
limit_alphabet: int = 1000,
initial_alphabet: List[str] = [],
suffix: Optional[str] = "</w>",
show_progress: bool = True,
length: Optional[int] = None,
):
"""Train the model using the given iterator"""
trainer = trainers.BpeTrainer(
vocab_size=vocab_size,
min_frequency=min_frequency,
special_tokens=special_tokens,
limit_alphabet=limit_alphabet,
initial_alphabet=initial_alphabet,
end_of_word_suffix=suffix,
show_progress=show_progress,
)
self._tokenizer.train_from_iterator(
iterator,
trainer=trainer,
length=length,
)
| 0 |
hf_public_repos/tokenizers/bindings/python/py_src/tokenizers | hf_public_repos/tokenizers/bindings/python/py_src/tokenizers/implementations/sentencepiece_bpe.py | from typing import Dict, Iterator, List, Optional, Tuple, Union
from tokenizers import AddedToken, Tokenizer, decoders, pre_tokenizers, trainers
from tokenizers.models import BPE
from tokenizers.normalizers import NFKC
from .base_tokenizer import BaseTokenizer
class SentencePieceBPETokenizer(BaseTokenizer):
"""SentencePiece BPE Tokenizer
Represents the BPE algorithm, with the pretokenization used by SentencePiece
"""
def __init__(
self,
vocab: Optional[Union[str, Dict[str, int]]] = None,
merges: Optional[Union[str, Dict[Tuple[int, int], Tuple[int, int]]]] = None,
unk_token: Union[str, AddedToken] = "<unk>",
replacement: str = "▁",
add_prefix_space: bool = True,
dropout: Optional[float] = None,
fuse_unk: Optional[bool] = False,
):
if vocab is not None and merges is not None:
tokenizer = Tokenizer(BPE(vocab, merges, dropout=dropout, unk_token=unk_token, fuse_unk=fuse_unk))
else:
tokenizer = Tokenizer(BPE(dropout=dropout, unk_token=unk_token, fuse_unk=fuse_unk))
if tokenizer.token_to_id(str(unk_token)) is not None:
tokenizer.add_special_tokens([str(unk_token)])
tokenizer.normalizer = NFKC()
tokenizer.pre_tokenizer = pre_tokenizers.Metaspace(replacement=replacement, add_prefix_space=add_prefix_space)
tokenizer.decoder = decoders.Metaspace(replacement=replacement, add_prefix_space=add_prefix_space)
parameters = {
"model": "SentencePieceBPE",
"unk_token": unk_token,
"replacement": replacement,
"add_prefix_space": add_prefix_space,
"dropout": dropout,
}
super().__init__(tokenizer, parameters)
@staticmethod
def from_file(vocab_filename: str, merges_filename: str, **kwargs):
vocab, merges = BPE.read_file(vocab_filename, merges_filename)
return SentencePieceBPETokenizer(vocab, merges, **kwargs)
def train(
self,
files: Union[str, List[str]],
vocab_size: int = 30000,
min_frequency: int = 2,
special_tokens: List[Union[str, AddedToken]] = ["<unk>"],
limit_alphabet: int = 1000,
initial_alphabet: List[str] = [],
show_progress: bool = True,
):
"""Train the model using the given files"""
trainer = trainers.BpeTrainer(
vocab_size=vocab_size,
min_frequency=min_frequency,
special_tokens=special_tokens,
limit_alphabet=limit_alphabet,
initial_alphabet=initial_alphabet,
show_progress=show_progress,
)
if isinstance(files, str):
files = [files]
self._tokenizer.train(files, trainer=trainer)
def train_from_iterator(
self,
iterator: Union[Iterator[str], Iterator[Iterator[str]]],
vocab_size: int = 30000,
min_frequency: int = 2,
special_tokens: List[Union[str, AddedToken]] = ["<unk>"],
limit_alphabet: int = 1000,
initial_alphabet: List[str] = [],
show_progress: bool = True,
length: Optional[int] = None,
):
"""Train the model using the given iterator"""
trainer = trainers.BpeTrainer(
vocab_size=vocab_size,
min_frequency=min_frequency,
special_tokens=special_tokens,
limit_alphabet=limit_alphabet,
initial_alphabet=initial_alphabet,
show_progress=show_progress,
)
self._tokenizer.train_from_iterator(
iterator,
trainer=trainer,
length=length,
)
| 0 |
hf_public_repos/tokenizers/bindings/python/py_src/tokenizers | hf_public_repos/tokenizers/bindings/python/py_src/tokenizers/implementations/sentencepiece_unigram.py | import json
import os
from typing import Iterator, List, Optional, Union, Tuple
from tokenizers import AddedToken, Regex, Tokenizer, decoders, normalizers, pre_tokenizers, trainers
from tokenizers.models import Unigram
from .base_tokenizer import BaseTokenizer
class SentencePieceUnigramTokenizer(BaseTokenizer):
"""SentencePiece Unigram Tokenizer
Represents the Unigram algorithm, with the pretokenization used by SentencePiece
"""
def __init__(
self,
vocab: Optional[List[Tuple[str, float]]] = None,
replacement: str = "▁",
add_prefix_space: bool = True,
):
if vocab is not None:
# Let Unigram(..) fail if only one of them is None
tokenizer = Tokenizer(Unigram(vocab))
else:
tokenizer = Tokenizer(Unigram())
tokenizer.normalizer = normalizers.Sequence(
[normalizers.Nmt(), normalizers.NFKC(), normalizers.Replace(Regex(" {2,}"), " ")]
)
tokenizer.pre_tokenizer = pre_tokenizers.Metaspace(replacement=replacement, add_prefix_space=add_prefix_space)
tokenizer.decoder = decoders.Metaspace(replacement=replacement, add_prefix_space=add_prefix_space)
parameters = {
"model": "SentencePieceUnigram",
"replacement": replacement,
"add_prefix_space": add_prefix_space,
}
super().__init__(tokenizer, parameters)
def train(
self,
files: Union[str, List[str]],
vocab_size: int = 8000,
show_progress: bool = True,
special_tokens: Optional[List[Union[str, AddedToken]]] = None,
initial_alphabet: Optional[List[str]] = None,
unk_token: Optional[str] = None,
):
"""
Train the model using the given files
Args:
files (:obj:`List[str]`):
A list of path to the files that we should use for training
vocab_size (:obj:`int`):
The size of the final vocabulary, including all tokens and alphabet.
show_progress (:obj:`bool`):
Whether to show progress bars while training.
special_tokens (:obj:`List[Union[str, AddedToken]]`, `optional`):
A list of special tokens the model should know of.
initial_alphabet (:obj:`List[str]`, `optional`):
A list of characters to include in the initial alphabet, even
if not seen in the training dataset.
If the strings contain more than one character, only the first one
is kept.
unk_token (:obj:`str`, `optional`):
The unknown token to be used by the model.
"""
if special_tokens is None:
special_tokens = []
if initial_alphabet is None:
initial_alphabet = []
trainer = trainers.UnigramTrainer(
vocab_size=vocab_size,
special_tokens=special_tokens,
show_progress=show_progress,
initial_alphabet=initial_alphabet,
unk_token=unk_token,
)
if isinstance(files, str):
files = [files]
self._tokenizer.train(files, trainer=trainer)
def train_from_iterator(
self,
iterator: Union[Iterator[str], Iterator[Iterator[str]]],
vocab_size: int = 8000,
show_progress: bool = True,
special_tokens: Optional[List[Union[str, AddedToken]]] = None,
initial_alphabet: Optional[List[str]] = None,
unk_token: Optional[str] = None,
length: Optional[int] = None,
):
"""
Train the model using the given iterator
Args:
iterator (:obj:`Union[Iterator[str], Iterator[Iterator[str]]]`):
Any iterator over strings or list of strings
vocab_size (:obj:`int`):
The size of the final vocabulary, including all tokens and alphabet.
show_progress (:obj:`bool`):
Whether to show progress bars while training.
special_tokens (:obj:`List[Union[str, AddedToken]]`, `optional`):
A list of special tokens the model should know of.
initial_alphabet (:obj:`List[str]`, `optional`):
A list of characters to include in the initial alphabet, even
if not seen in the training dataset.
If the strings contain more than one character, only the first one
is kept.
unk_token (:obj:`str`, `optional`):
The unknown token to be used by the model.
length (:obj:`int`, `optional`):
The total number of sequences in the iterator. This is used to
provide meaningful progress tracking
"""
if special_tokens is None:
special_tokens = []
if initial_alphabet is None:
initial_alphabet = []
trainer = trainers.UnigramTrainer(
vocab_size=vocab_size,
special_tokens=special_tokens,
show_progress=show_progress,
initial_alphabet=initial_alphabet,
unk_token=unk_token,
)
self._tokenizer.train_from_iterator(
iterator,
trainer=trainer,
length=length,
)
@staticmethod
def from_spm(filename: str):
try:
import sys
sys.path.append(".")
import sentencepiece_model_pb2 as model
except Exception:
raise Exception(
"You don't seem to have the required protobuf file, in order to use this function you need to run `pip install protobuf` and `wget https://raw.githubusercontent.com/google/sentencepiece/master/python/src/sentencepiece/sentencepiece_model_pb2.py` for us to be able to read the intrinsics of your spm_file. `pip install sentencepiece` is not required."
)
m = model.ModelProto()
m.ParseFromString(open(filename, "rb").read())
precompiled_charsmap = m.normalizer_spec.precompiled_charsmap
vocab = [(piece.piece, piece.score) for piece in m.pieces]
unk_id = m.trainer_spec.unk_id
model_type = m.trainer_spec.model_type
byte_fallback = m.trainer_spec.byte_fallback
if model_type != 1:
raise Exception(
"You're trying to run a `Unigram` model but you're file was trained with a different algorithm"
)
replacement = "▁"
add_prefix_space = True
tokenizer = Tokenizer(Unigram(vocab, unk_id, byte_fallback))
if precompiled_charsmap:
tokenizer.normalizer = normalizers.Sequence(
[
normalizers.Precompiled(precompiled_charsmap),
normalizers.Replace(Regex(" {2,}"), " "),
]
)
else:
tokenizer.normalizer = normalizers.Sequence([normalizers.Replace(Regex(" {2,}"), " ")])
tokenizer.pre_tokenizer = pre_tokenizers.Metaspace(replacement=replacement, add_prefix_space=add_prefix_space)
tokenizer.decoder = decoders.Metaspace(replacement=replacement, add_prefix_space=add_prefix_space)
parameters = {
"model": "SentencePieceUnigram",
}
obj = BaseTokenizer.__new__(SentencePieceUnigramTokenizer, tokenizer, parameters)
BaseTokenizer.__init__(obj, tokenizer, parameters)
return obj
| 0 |
hf_public_repos/tokenizers/bindings/python/py_src/tokenizers | hf_public_repos/tokenizers/bindings/python/py_src/tokenizers/models/__init__.py | # Generated content DO NOT EDIT
from .. import models
Model = models.Model
BPE = models.BPE
Unigram = models.Unigram
WordLevel = models.WordLevel
WordPiece = models.WordPiece
| 0 |
hf_public_repos/tokenizers/bindings/python/py_src/tokenizers | hf_public_repos/tokenizers/bindings/python/py_src/tokenizers/models/__init__.pyi | # Generated content DO NOT EDIT
class Model:
"""
Base class for all models
The model represents the actual tokenization algorithm. This is the part that
will contain and manage the learned vocabulary.
This class cannot be constructed directly. Please use one of the concrete models.
"""
def get_trainer(self):
"""
Get the associated :class:`~tokenizers.trainers.Trainer`
Retrieve the :class:`~tokenizers.trainers.Trainer` associated to this
:class:`~tokenizers.models.Model`.
Returns:
:class:`~tokenizers.trainers.Trainer`: The Trainer used to train this model
"""
pass
def id_to_token(self, id):
"""
Get the token associated to an ID
Args:
id (:obj:`int`):
An ID to convert to a token
Returns:
:obj:`str`: The token associated to the ID
"""
pass
def save(self, folder, prefix):
"""
Save the current model
Save the current model in the given folder, using the given prefix for the various
files that will get created.
Any file with the same name that already exists in this folder will be overwritten.
Args:
folder (:obj:`str`):
The path to the target folder in which to save the various files
prefix (:obj:`str`, `optional`):
An optional prefix, used to prefix each file name
Returns:
:obj:`List[str]`: The list of saved files
"""
pass
def token_to_id(self, tokens):
"""
Get the ID associated to a token
Args:
token (:obj:`str`):
A token to convert to an ID
Returns:
:obj:`int`: The ID associated to the token
"""
pass
def tokenize(self, sequence):
"""
Tokenize a sequence
Args:
sequence (:obj:`str`):
A sequence to tokenize
Returns:
A :obj:`List` of :class:`~tokenizers.Token`: The generated tokens
"""
pass
class BPE(Model):
"""
An implementation of the BPE (Byte-Pair Encoding) algorithm
Args:
vocab (:obj:`Dict[str, int]`, `optional`):
A dictionnary of string keys and their ids :obj:`{"am": 0,...}`
merges (:obj:`List[Tuple[str, str]]`, `optional`):
A list of pairs of tokens (:obj:`Tuple[str, str]`) :obj:`[("a", "b"),...]`
cache_capacity (:obj:`int`, `optional`):
The number of words that the BPE cache can contain. The cache allows
to speed-up the process by keeping the result of the merge operations
for a number of words.
dropout (:obj:`float`, `optional`):
A float between 0 and 1 that represents the BPE dropout to use.
unk_token (:obj:`str`, `optional`):
The unknown token to be used by the model.
continuing_subword_prefix (:obj:`str`, `optional`):
The prefix to attach to subword units that don't represent a beginning of word.
end_of_word_suffix (:obj:`str`, `optional`):
The suffix to attach to subword units that represent an end of word.
fuse_unk (:obj:`bool`, `optional`):
Whether to fuse any subsequent unknown tokens into a single one
byte_fallback (:obj:`bool`, `optional`):
Whether to use spm byte-fallback trick (defaults to False)
"""
def __init__(
self,
vocab=None,
merges=None,
cache_capacity=None,
dropout=None,
unk_token=None,
continuing_subword_prefix=None,
end_of_word_suffix=None,
fuse_unk=None,
byte_fallback=False,
):
pass
@staticmethod
def from_file(cls, vocab, merge, **kwargs):
"""
Instantiate a BPE model from the given files.
This method is roughly equivalent to doing::
vocab, merges = BPE.read_file(vocab_filename, merges_filename)
bpe = BPE(vocab, merges)
If you don't need to keep the :obj:`vocab, merges` values lying around,
this method is more optimized than manually calling
:meth:`~tokenizers.models.BPE.read_file` to initialize a :class:`~tokenizers.models.BPE`
Args:
vocab (:obj:`str`):
The path to a :obj:`vocab.json` file
merges (:obj:`str`):
The path to a :obj:`merges.txt` file
Returns:
:class:`~tokenizers.models.BPE`: An instance of BPE loaded from these files
"""
pass
def get_trainer(self):
"""
Get the associated :class:`~tokenizers.trainers.Trainer`
Retrieve the :class:`~tokenizers.trainers.Trainer` associated to this
:class:`~tokenizers.models.Model`.
Returns:
:class:`~tokenizers.trainers.Trainer`: The Trainer used to train this model
"""
pass
def id_to_token(self, id):
"""
Get the token associated to an ID
Args:
id (:obj:`int`):
An ID to convert to a token
Returns:
:obj:`str`: The token associated to the ID
"""
pass
@staticmethod
def read_file(self, vocab, merges):
"""
Read a :obj:`vocab.json` and a :obj:`merges.txt` files
This method provides a way to read and parse the content of these files,
returning the relevant data structures. If you want to instantiate some BPE models
from memory, this method gives you the expected input from the standard files.
Args:
vocab (:obj:`str`):
The path to a :obj:`vocab.json` file
merges (:obj:`str`):
The path to a :obj:`merges.txt` file
Returns:
A :obj:`Tuple` with the vocab and the merges:
The vocabulary and merges loaded into memory
"""
pass
def save(self, folder, prefix):
"""
Save the current model
Save the current model in the given folder, using the given prefix for the various
files that will get created.
Any file with the same name that already exists in this folder will be overwritten.
Args:
folder (:obj:`str`):
The path to the target folder in which to save the various files
prefix (:obj:`str`, `optional`):
An optional prefix, used to prefix each file name
Returns:
:obj:`List[str]`: The list of saved files
"""
pass
def token_to_id(self, tokens):
"""
Get the ID associated to a token
Args:
token (:obj:`str`):
A token to convert to an ID
Returns:
:obj:`int`: The ID associated to the token
"""
pass
def tokenize(self, sequence):
"""
Tokenize a sequence
Args:
sequence (:obj:`str`):
A sequence to tokenize
Returns:
A :obj:`List` of :class:`~tokenizers.Token`: The generated tokens
"""
pass
class Unigram(Model):
"""
An implementation of the Unigram algorithm
Args:
vocab (:obj:`List[Tuple[str, float]]`, `optional`, `optional`):
A list of vocabulary items and their relative score [("am", -0.2442),...]
"""
def __init__(self, vocab, unk_id, byte_fallback):
pass
def get_trainer(self):
"""
Get the associated :class:`~tokenizers.trainers.Trainer`
Retrieve the :class:`~tokenizers.trainers.Trainer` associated to this
:class:`~tokenizers.models.Model`.
Returns:
:class:`~tokenizers.trainers.Trainer`: The Trainer used to train this model
"""
pass
def id_to_token(self, id):
"""
Get the token associated to an ID
Args:
id (:obj:`int`):
An ID to convert to a token
Returns:
:obj:`str`: The token associated to the ID
"""
pass
def save(self, folder, prefix):
"""
Save the current model
Save the current model in the given folder, using the given prefix for the various
files that will get created.
Any file with the same name that already exists in this folder will be overwritten.
Args:
folder (:obj:`str`):
The path to the target folder in which to save the various files
prefix (:obj:`str`, `optional`):
An optional prefix, used to prefix each file name
Returns:
:obj:`List[str]`: The list of saved files
"""
pass
def token_to_id(self, tokens):
"""
Get the ID associated to a token
Args:
token (:obj:`str`):
A token to convert to an ID
Returns:
:obj:`int`: The ID associated to the token
"""
pass
def tokenize(self, sequence):
"""
Tokenize a sequence
Args:
sequence (:obj:`str`):
A sequence to tokenize
Returns:
A :obj:`List` of :class:`~tokenizers.Token`: The generated tokens
"""
pass
class WordLevel(Model):
"""
An implementation of the WordLevel algorithm
Most simple tokenizer model based on mapping tokens to their corresponding id.
Args:
vocab (:obj:`str`, `optional`):
A dictionnary of string keys and their ids :obj:`{"am": 0,...}`
unk_token (:obj:`str`, `optional`):
The unknown token to be used by the model.
"""
def __init__(self, vocab, unk_token):
pass
@staticmethod
def from_file(vocab, unk_token):
"""
Instantiate a WordLevel model from the given file
This method is roughly equivalent to doing::
vocab = WordLevel.read_file(vocab_filename)
wordlevel = WordLevel(vocab)
If you don't need to keep the :obj:`vocab` values lying around, this method is
more optimized than manually calling :meth:`~tokenizers.models.WordLevel.read_file` to
initialize a :class:`~tokenizers.models.WordLevel`
Args:
vocab (:obj:`str`):
The path to a :obj:`vocab.json` file
Returns:
:class:`~tokenizers.models.WordLevel`: An instance of WordLevel loaded from file
"""
pass
def get_trainer(self):
"""
Get the associated :class:`~tokenizers.trainers.Trainer`
Retrieve the :class:`~tokenizers.trainers.Trainer` associated to this
:class:`~tokenizers.models.Model`.
Returns:
:class:`~tokenizers.trainers.Trainer`: The Trainer used to train this model
"""
pass
def id_to_token(self, id):
"""
Get the token associated to an ID
Args:
id (:obj:`int`):
An ID to convert to a token
Returns:
:obj:`str`: The token associated to the ID
"""
pass
@staticmethod
def read_file(vocab):
"""
Read a :obj:`vocab.json`
This method provides a way to read and parse the content of a vocabulary file,
returning the relevant data structures. If you want to instantiate some WordLevel models
from memory, this method gives you the expected input from the standard files.
Args:
vocab (:obj:`str`):
The path to a :obj:`vocab.json` file
Returns:
:obj:`Dict[str, int]`: The vocabulary as a :obj:`dict`
"""
pass
def save(self, folder, prefix):
"""
Save the current model
Save the current model in the given folder, using the given prefix for the various
files that will get created.
Any file with the same name that already exists in this folder will be overwritten.
Args:
folder (:obj:`str`):
The path to the target folder in which to save the various files
prefix (:obj:`str`, `optional`):
An optional prefix, used to prefix each file name
Returns:
:obj:`List[str]`: The list of saved files
"""
pass
def token_to_id(self, tokens):
"""
Get the ID associated to a token
Args:
token (:obj:`str`):
A token to convert to an ID
Returns:
:obj:`int`: The ID associated to the token
"""
pass
def tokenize(self, sequence):
"""
Tokenize a sequence
Args:
sequence (:obj:`str`):
A sequence to tokenize
Returns:
A :obj:`List` of :class:`~tokenizers.Token`: The generated tokens
"""
pass
class WordPiece(Model):
"""
An implementation of the WordPiece algorithm
Args:
vocab (:obj:`Dict[str, int]`, `optional`):
A dictionnary of string keys and their ids :obj:`{"am": 0,...}`
unk_token (:obj:`str`, `optional`):
The unknown token to be used by the model.
max_input_chars_per_word (:obj:`int`, `optional`):
The maximum number of characters to authorize in a single word.
"""
def __init__(self, vocab, unk_token, max_input_chars_per_word):
pass
@staticmethod
def from_file(vocab, **kwargs):
"""
Instantiate a WordPiece model from the given file
This method is roughly equivalent to doing::
vocab = WordPiece.read_file(vocab_filename)
wordpiece = WordPiece(vocab)
If you don't need to keep the :obj:`vocab` values lying around, this method is
more optimized than manually calling :meth:`~tokenizers.models.WordPiece.read_file` to
initialize a :class:`~tokenizers.models.WordPiece`
Args:
vocab (:obj:`str`):
The path to a :obj:`vocab.txt` file
Returns:
:class:`~tokenizers.models.WordPiece`: An instance of WordPiece loaded from file
"""
pass
def get_trainer(self):
"""
Get the associated :class:`~tokenizers.trainers.Trainer`
Retrieve the :class:`~tokenizers.trainers.Trainer` associated to this
:class:`~tokenizers.models.Model`.
Returns:
:class:`~tokenizers.trainers.Trainer`: The Trainer used to train this model
"""
pass
def id_to_token(self, id):
"""
Get the token associated to an ID
Args:
id (:obj:`int`):
An ID to convert to a token
Returns:
:obj:`str`: The token associated to the ID
"""
pass
@staticmethod
def read_file(vocab):
"""
Read a :obj:`vocab.txt` file
This method provides a way to read and parse the content of a standard `vocab.txt`
file as used by the WordPiece Model, returning the relevant data structures. If you
want to instantiate some WordPiece models from memory, this method gives you the
expected input from the standard files.
Args:
vocab (:obj:`str`):
The path to a :obj:`vocab.txt` file
Returns:
:obj:`Dict[str, int]`: The vocabulary as a :obj:`dict`
"""
pass
def save(self, folder, prefix):
"""
Save the current model
Save the current model in the given folder, using the given prefix for the various
files that will get created.
Any file with the same name that already exists in this folder will be overwritten.
Args:
folder (:obj:`str`):
The path to the target folder in which to save the various files
prefix (:obj:`str`, `optional`):
An optional prefix, used to prefix each file name
Returns:
:obj:`List[str]`: The list of saved files
"""
pass
def token_to_id(self, tokens):
"""
Get the ID associated to a token
Args:
token (:obj:`str`):
A token to convert to an ID
Returns:
:obj:`int`: The ID associated to the token
"""
pass
def tokenize(self, sequence):
"""
Tokenize a sequence
Args:
sequence (:obj:`str`):
A sequence to tokenize
Returns:
A :obj:`List` of :class:`~tokenizers.Token`: The generated tokens
"""
pass
| 0 |
hf_public_repos/tokenizers/bindings/python/py_src/tokenizers | hf_public_repos/tokenizers/bindings/python/py_src/tokenizers/normalizers/__init__.py | from .. import normalizers
Normalizer = normalizers.Normalizer
BertNormalizer = normalizers.BertNormalizer
NFD = normalizers.NFD
NFKD = normalizers.NFKD
NFC = normalizers.NFC
NFKC = normalizers.NFKC
Sequence = normalizers.Sequence
Lowercase = normalizers.Lowercase
Prepend = normalizers.Prepend
Strip = normalizers.Strip
StripAccents = normalizers.StripAccents
Nmt = normalizers.Nmt
Precompiled = normalizers.Precompiled
Replace = normalizers.Replace
NORMALIZERS = {"nfc": NFC, "nfd": NFD, "nfkc": NFKC, "nfkd": NFKD}
def unicode_normalizer_from_str(normalizer: str) -> Normalizer:
if normalizer not in NORMALIZERS:
raise ValueError(
"{} is not a known unicode normalizer. Available are {}".format(normalizer, NORMALIZERS.keys())
)
return NORMALIZERS[normalizer]()
| 0 |
hf_public_repos/tokenizers/bindings/python/py_src/tokenizers | hf_public_repos/tokenizers/bindings/python/py_src/tokenizers/normalizers/__init__.pyi | # Generated content DO NOT EDIT
class Normalizer:
"""
Base class for all normalizers
This class is not supposed to be instantiated directly. Instead, any implementation of a
Normalizer will return an instance of this class when instantiated.
"""
def normalize(self, normalized):
"""
Normalize a :class:`~tokenizers.NormalizedString` in-place
This method allows to modify a :class:`~tokenizers.NormalizedString` to
keep track of the alignment information. If you just want to see the result
of the normalization on a raw string, you can use
:meth:`~tokenizers.normalizers.Normalizer.normalize_str`
Args:
normalized (:class:`~tokenizers.NormalizedString`):
The normalized string on which to apply this
:class:`~tokenizers.normalizers.Normalizer`
"""
pass
def normalize_str(self, sequence):
"""
Normalize the given string
This method provides a way to visualize the effect of a
:class:`~tokenizers.normalizers.Normalizer` but it does not keep track of the alignment
information. If you need to get/convert offsets, you can use
:meth:`~tokenizers.normalizers.Normalizer.normalize`
Args:
sequence (:obj:`str`):
A string to normalize
Returns:
:obj:`str`: A string after normalization
"""
pass
class BertNormalizer(Normalizer):
"""
BertNormalizer
Takes care of normalizing raw text before giving it to a Bert model.
This includes cleaning the text, handling accents, chinese chars and lowercasing
Args:
clean_text (:obj:`bool`, `optional`, defaults to :obj:`True`):
Whether to clean the text, by removing any control characters
and replacing all whitespaces by the classic one.
handle_chinese_chars (:obj:`bool`, `optional`, defaults to :obj:`True`):
Whether to handle chinese chars by putting spaces around them.
strip_accents (:obj:`bool`, `optional`):
Whether to strip all accents. If this option is not specified (ie == None),
then it will be determined by the value for `lowercase` (as in the original Bert).
lowercase (:obj:`bool`, `optional`, defaults to :obj:`True`):
Whether to lowercase.
"""
def __init__(self, clean_text=True, handle_chinese_chars=True, strip_accents=None, lowercase=True):
pass
def normalize(self, normalized):
"""
Normalize a :class:`~tokenizers.NormalizedString` in-place
This method allows to modify a :class:`~tokenizers.NormalizedString` to
keep track of the alignment information. If you just want to see the result
of the normalization on a raw string, you can use
:meth:`~tokenizers.normalizers.Normalizer.normalize_str`
Args:
normalized (:class:`~tokenizers.NormalizedString`):
The normalized string on which to apply this
:class:`~tokenizers.normalizers.Normalizer`
"""
pass
def normalize_str(self, sequence):
"""
Normalize the given string
This method provides a way to visualize the effect of a
:class:`~tokenizers.normalizers.Normalizer` but it does not keep track of the alignment
information. If you need to get/convert offsets, you can use
:meth:`~tokenizers.normalizers.Normalizer.normalize`
Args:
sequence (:obj:`str`):
A string to normalize
Returns:
:obj:`str`: A string after normalization
"""
pass
class Lowercase(Normalizer):
"""
Lowercase Normalizer
"""
def __init__(self):
pass
def normalize(self, normalized):
"""
Normalize a :class:`~tokenizers.NormalizedString` in-place
This method allows to modify a :class:`~tokenizers.NormalizedString` to
keep track of the alignment information. If you just want to see the result
of the normalization on a raw string, you can use
:meth:`~tokenizers.normalizers.Normalizer.normalize_str`
Args:
normalized (:class:`~tokenizers.NormalizedString`):
The normalized string on which to apply this
:class:`~tokenizers.normalizers.Normalizer`
"""
pass
def normalize_str(self, sequence):
"""
Normalize the given string
This method provides a way to visualize the effect of a
:class:`~tokenizers.normalizers.Normalizer` but it does not keep track of the alignment
information. If you need to get/convert offsets, you can use
:meth:`~tokenizers.normalizers.Normalizer.normalize`
Args:
sequence (:obj:`str`):
A string to normalize
Returns:
:obj:`str`: A string after normalization
"""
pass
class NFC(Normalizer):
"""
NFC Unicode Normalizer
"""
def __init__(self):
pass
def normalize(self, normalized):
"""
Normalize a :class:`~tokenizers.NormalizedString` in-place
This method allows to modify a :class:`~tokenizers.NormalizedString` to
keep track of the alignment information. If you just want to see the result
of the normalization on a raw string, you can use
:meth:`~tokenizers.normalizers.Normalizer.normalize_str`
Args:
normalized (:class:`~tokenizers.NormalizedString`):
The normalized string on which to apply this
:class:`~tokenizers.normalizers.Normalizer`
"""
pass
def normalize_str(self, sequence):
"""
Normalize the given string
This method provides a way to visualize the effect of a
:class:`~tokenizers.normalizers.Normalizer` but it does not keep track of the alignment
information. If you need to get/convert offsets, you can use
:meth:`~tokenizers.normalizers.Normalizer.normalize`
Args:
sequence (:obj:`str`):
A string to normalize
Returns:
:obj:`str`: A string after normalization
"""
pass
class NFD(Normalizer):
"""
NFD Unicode Normalizer
"""
def __init__(self):
pass
def normalize(self, normalized):
"""
Normalize a :class:`~tokenizers.NormalizedString` in-place
This method allows to modify a :class:`~tokenizers.NormalizedString` to
keep track of the alignment information. If you just want to see the result
of the normalization on a raw string, you can use
:meth:`~tokenizers.normalizers.Normalizer.normalize_str`
Args:
normalized (:class:`~tokenizers.NormalizedString`):
The normalized string on which to apply this
:class:`~tokenizers.normalizers.Normalizer`
"""
pass
def normalize_str(self, sequence):
"""
Normalize the given string
This method provides a way to visualize the effect of a
:class:`~tokenizers.normalizers.Normalizer` but it does not keep track of the alignment
information. If you need to get/convert offsets, you can use
:meth:`~tokenizers.normalizers.Normalizer.normalize`
Args:
sequence (:obj:`str`):
A string to normalize
Returns:
:obj:`str`: A string after normalization
"""
pass
class NFKC(Normalizer):
"""
NFKC Unicode Normalizer
"""
def __init__(self):
pass
def normalize(self, normalized):
"""
Normalize a :class:`~tokenizers.NormalizedString` in-place
This method allows to modify a :class:`~tokenizers.NormalizedString` to
keep track of the alignment information. If you just want to see the result
of the normalization on a raw string, you can use
:meth:`~tokenizers.normalizers.Normalizer.normalize_str`
Args:
normalized (:class:`~tokenizers.NormalizedString`):
The normalized string on which to apply this
:class:`~tokenizers.normalizers.Normalizer`
"""
pass
def normalize_str(self, sequence):
"""
Normalize the given string
This method provides a way to visualize the effect of a
:class:`~tokenizers.normalizers.Normalizer` but it does not keep track of the alignment
information. If you need to get/convert offsets, you can use
:meth:`~tokenizers.normalizers.Normalizer.normalize`
Args:
sequence (:obj:`str`):
A string to normalize
Returns:
:obj:`str`: A string after normalization
"""
pass
class NFKD(Normalizer):
"""
NFKD Unicode Normalizer
"""
def __init__(self):
pass
def normalize(self, normalized):
"""
Normalize a :class:`~tokenizers.NormalizedString` in-place
This method allows to modify a :class:`~tokenizers.NormalizedString` to
keep track of the alignment information. If you just want to see the result
of the normalization on a raw string, you can use
:meth:`~tokenizers.normalizers.Normalizer.normalize_str`
Args:
normalized (:class:`~tokenizers.NormalizedString`):
The normalized string on which to apply this
:class:`~tokenizers.normalizers.Normalizer`
"""
pass
def normalize_str(self, sequence):
"""
Normalize the given string
This method provides a way to visualize the effect of a
:class:`~tokenizers.normalizers.Normalizer` but it does not keep track of the alignment
information. If you need to get/convert offsets, you can use
:meth:`~tokenizers.normalizers.Normalizer.normalize`
Args:
sequence (:obj:`str`):
A string to normalize
Returns:
:obj:`str`: A string after normalization
"""
pass
class Nmt(Normalizer):
"""
Nmt normalizer
"""
def __init__(self):
pass
def normalize(self, normalized):
"""
Normalize a :class:`~tokenizers.NormalizedString` in-place
This method allows to modify a :class:`~tokenizers.NormalizedString` to
keep track of the alignment information. If you just want to see the result
of the normalization on a raw string, you can use
:meth:`~tokenizers.normalizers.Normalizer.normalize_str`
Args:
normalized (:class:`~tokenizers.NormalizedString`):
The normalized string on which to apply this
:class:`~tokenizers.normalizers.Normalizer`
"""
pass
def normalize_str(self, sequence):
"""
Normalize the given string
This method provides a way to visualize the effect of a
:class:`~tokenizers.normalizers.Normalizer` but it does not keep track of the alignment
information. If you need to get/convert offsets, you can use
:meth:`~tokenizers.normalizers.Normalizer.normalize`
Args:
sequence (:obj:`str`):
A string to normalize
Returns:
:obj:`str`: A string after normalization
"""
pass
class Precompiled(Normalizer):
"""
Precompiled normalizer
Don't use manually it is used for compatiblity for SentencePiece.
"""
def __init__(self, precompiled_charsmap):
pass
def normalize(self, normalized):
"""
Normalize a :class:`~tokenizers.NormalizedString` in-place
This method allows to modify a :class:`~tokenizers.NormalizedString` to
keep track of the alignment information. If you just want to see the result
of the normalization on a raw string, you can use
:meth:`~tokenizers.normalizers.Normalizer.normalize_str`
Args:
normalized (:class:`~tokenizers.NormalizedString`):
The normalized string on which to apply this
:class:`~tokenizers.normalizers.Normalizer`
"""
pass
def normalize_str(self, sequence):
"""
Normalize the given string
This method provides a way to visualize the effect of a
:class:`~tokenizers.normalizers.Normalizer` but it does not keep track of the alignment
information. If you need to get/convert offsets, you can use
:meth:`~tokenizers.normalizers.Normalizer.normalize`
Args:
sequence (:obj:`str`):
A string to normalize
Returns:
:obj:`str`: A string after normalization
"""
pass
class Prepend(Normalizer):
"""
Prepend normalizer
"""
def __init__(self, prepend):
pass
def normalize(self, normalized):
"""
Normalize a :class:`~tokenizers.NormalizedString` in-place
This method allows to modify a :class:`~tokenizers.NormalizedString` to
keep track of the alignment information. If you just want to see the result
of the normalization on a raw string, you can use
:meth:`~tokenizers.normalizers.Normalizer.normalize_str`
Args:
normalized (:class:`~tokenizers.NormalizedString`):
The normalized string on which to apply this
:class:`~tokenizers.normalizers.Normalizer`
"""
pass
def normalize_str(self, sequence):
"""
Normalize the given string
This method provides a way to visualize the effect of a
:class:`~tokenizers.normalizers.Normalizer` but it does not keep track of the alignment
information. If you need to get/convert offsets, you can use
:meth:`~tokenizers.normalizers.Normalizer.normalize`
Args:
sequence (:obj:`str`):
A string to normalize
Returns:
:obj:`str`: A string after normalization
"""
pass
class Replace(Normalizer):
"""
Replace normalizer
"""
def __init__(self, pattern, content):
pass
def normalize(self, normalized):
"""
Normalize a :class:`~tokenizers.NormalizedString` in-place
This method allows to modify a :class:`~tokenizers.NormalizedString` to
keep track of the alignment information. If you just want to see the result
of the normalization on a raw string, you can use
:meth:`~tokenizers.normalizers.Normalizer.normalize_str`
Args:
normalized (:class:`~tokenizers.NormalizedString`):
The normalized string on which to apply this
:class:`~tokenizers.normalizers.Normalizer`
"""
pass
def normalize_str(self, sequence):
"""
Normalize the given string
This method provides a way to visualize the effect of a
:class:`~tokenizers.normalizers.Normalizer` but it does not keep track of the alignment
information. If you need to get/convert offsets, you can use
:meth:`~tokenizers.normalizers.Normalizer.normalize`
Args:
sequence (:obj:`str`):
A string to normalize
Returns:
:obj:`str`: A string after normalization
"""
pass
class Sequence(Normalizer):
"""
Allows concatenating multiple other Normalizer as a Sequence.
All the normalizers run in sequence in the given order
Args:
normalizers (:obj:`List[Normalizer]`):
A list of Normalizer to be run as a sequence
"""
def normalize(self, normalized):
"""
Normalize a :class:`~tokenizers.NormalizedString` in-place
This method allows to modify a :class:`~tokenizers.NormalizedString` to
keep track of the alignment information. If you just want to see the result
of the normalization on a raw string, you can use
:meth:`~tokenizers.normalizers.Normalizer.normalize_str`
Args:
normalized (:class:`~tokenizers.NormalizedString`):
The normalized string on which to apply this
:class:`~tokenizers.normalizers.Normalizer`
"""
pass
def normalize_str(self, sequence):
"""
Normalize the given string
This method provides a way to visualize the effect of a
:class:`~tokenizers.normalizers.Normalizer` but it does not keep track of the alignment
information. If you need to get/convert offsets, you can use
:meth:`~tokenizers.normalizers.Normalizer.normalize`
Args:
sequence (:obj:`str`):
A string to normalize
Returns:
:obj:`str`: A string after normalization
"""
pass
class Strip(Normalizer):
"""
Strip normalizer
"""
def __init__(self, left=True, right=True):
pass
def normalize(self, normalized):
"""
Normalize a :class:`~tokenizers.NormalizedString` in-place
This method allows to modify a :class:`~tokenizers.NormalizedString` to
keep track of the alignment information. If you just want to see the result
of the normalization on a raw string, you can use
:meth:`~tokenizers.normalizers.Normalizer.normalize_str`
Args:
normalized (:class:`~tokenizers.NormalizedString`):
The normalized string on which to apply this
:class:`~tokenizers.normalizers.Normalizer`
"""
pass
def normalize_str(self, sequence):
"""
Normalize the given string
This method provides a way to visualize the effect of a
:class:`~tokenizers.normalizers.Normalizer` but it does not keep track of the alignment
information. If you need to get/convert offsets, you can use
:meth:`~tokenizers.normalizers.Normalizer.normalize`
Args:
sequence (:obj:`str`):
A string to normalize
Returns:
:obj:`str`: A string after normalization
"""
pass
class StripAccents(Normalizer):
"""
StripAccents normalizer
"""
def __init__(self):
pass
def normalize(self, normalized):
"""
Normalize a :class:`~tokenizers.NormalizedString` in-place
This method allows to modify a :class:`~tokenizers.NormalizedString` to
keep track of the alignment information. If you just want to see the result
of the normalization on a raw string, you can use
:meth:`~tokenizers.normalizers.Normalizer.normalize_str`
Args:
normalized (:class:`~tokenizers.NormalizedString`):
The normalized string on which to apply this
:class:`~tokenizers.normalizers.Normalizer`
"""
pass
def normalize_str(self, sequence):
"""
Normalize the given string
This method provides a way to visualize the effect of a
:class:`~tokenizers.normalizers.Normalizer` but it does not keep track of the alignment
information. If you need to get/convert offsets, you can use
:meth:`~tokenizers.normalizers.Normalizer.normalize`
Args:
sequence (:obj:`str`):
A string to normalize
Returns:
:obj:`str`: A string after normalization
"""
pass
| 0 |
hf_public_repos/tokenizers/bindings/python/py_src/tokenizers | hf_public_repos/tokenizers/bindings/python/py_src/tokenizers/pre_tokenizers/__init__.py | # Generated content DO NOT EDIT
from .. import pre_tokenizers
PreTokenizer = pre_tokenizers.PreTokenizer
BertPreTokenizer = pre_tokenizers.BertPreTokenizer
ByteLevel = pre_tokenizers.ByteLevel
CharDelimiterSplit = pre_tokenizers.CharDelimiterSplit
Digits = pre_tokenizers.Digits
Metaspace = pre_tokenizers.Metaspace
Punctuation = pre_tokenizers.Punctuation
Sequence = pre_tokenizers.Sequence
Split = pre_tokenizers.Split
UnicodeScripts = pre_tokenizers.UnicodeScripts
Whitespace = pre_tokenizers.Whitespace
WhitespaceSplit = pre_tokenizers.WhitespaceSplit
| 0 |
hf_public_repos/tokenizers/bindings/python/py_src/tokenizers | hf_public_repos/tokenizers/bindings/python/py_src/tokenizers/pre_tokenizers/__init__.pyi | # Generated content DO NOT EDIT
class PreTokenizer:
"""
Base class for all pre-tokenizers
This class is not supposed to be instantiated directly. Instead, any implementation of a
PreTokenizer will return an instance of this class when instantiated.
"""
def pre_tokenize(self, pretok):
"""
Pre-tokenize a :class:`~tokenizers.PyPreTokenizedString` in-place
This method allows to modify a :class:`~tokenizers.PreTokenizedString` to
keep track of the pre-tokenization, and leverage the capabilities of the
:class:`~tokenizers.PreTokenizedString`. If you just want to see the result of
the pre-tokenization of a raw string, you can use
:meth:`~tokenizers.pre_tokenizers.PreTokenizer.pre_tokenize_str`
Args:
pretok (:class:`~tokenizers.PreTokenizedString):
The pre-tokenized string on which to apply this
:class:`~tokenizers.pre_tokenizers.PreTokenizer`
"""
pass
def pre_tokenize_str(self, sequence):
"""
Pre tokenize the given string
This method provides a way to visualize the effect of a
:class:`~tokenizers.pre_tokenizers.PreTokenizer` but it does not keep track of the
alignment, nor does it provide all the capabilities of the
:class:`~tokenizers.PreTokenizedString`. If you need some of these, you can use
:meth:`~tokenizers.pre_tokenizers.PreTokenizer.pre_tokenize`
Args:
sequence (:obj:`str`):
A string to pre-tokeize
Returns:
:obj:`List[Tuple[str, Offsets]]`:
A list of tuple with the pre-tokenized parts and their offsets
"""
pass
class BertPreTokenizer(PreTokenizer):
"""
BertPreTokenizer
This pre-tokenizer splits tokens on spaces, and also on punctuation.
Each occurence of a punctuation character will be treated separately.
"""
def __init__(self):
pass
def pre_tokenize(self, pretok):
"""
Pre-tokenize a :class:`~tokenizers.PyPreTokenizedString` in-place
This method allows to modify a :class:`~tokenizers.PreTokenizedString` to
keep track of the pre-tokenization, and leverage the capabilities of the
:class:`~tokenizers.PreTokenizedString`. If you just want to see the result of
the pre-tokenization of a raw string, you can use
:meth:`~tokenizers.pre_tokenizers.PreTokenizer.pre_tokenize_str`
Args:
pretok (:class:`~tokenizers.PreTokenizedString):
The pre-tokenized string on which to apply this
:class:`~tokenizers.pre_tokenizers.PreTokenizer`
"""
pass
def pre_tokenize_str(self, sequence):
"""
Pre tokenize the given string
This method provides a way to visualize the effect of a
:class:`~tokenizers.pre_tokenizers.PreTokenizer` but it does not keep track of the
alignment, nor does it provide all the capabilities of the
:class:`~tokenizers.PreTokenizedString`. If you need some of these, you can use
:meth:`~tokenizers.pre_tokenizers.PreTokenizer.pre_tokenize`
Args:
sequence (:obj:`str`):
A string to pre-tokeize
Returns:
:obj:`List[Tuple[str, Offsets]]`:
A list of tuple with the pre-tokenized parts and their offsets
"""
pass
class ByteLevel(PreTokenizer):
"""
ByteLevel PreTokenizer
This pre-tokenizer takes care of replacing all bytes of the given string
with a corresponding representation, as well as splitting into words.
Args:
add_prefix_space (:obj:`bool`, `optional`, defaults to :obj:`True`):
Whether to add a space to the first word if there isn't already one. This
lets us treat `hello` exactly like `say hello`.
use_regex (:obj:`bool`, `optional`, defaults to :obj:`True`):
Set this to :obj:`False` to prevent this `pre_tokenizer` from using
the GPT2 specific regexp for spliting on whitespace.
"""
def __init__(self, add_prefix_space=True, use_regex=True):
pass
@staticmethod
def alphabet():
"""
Returns the alphabet used by this PreTokenizer.
Since the ByteLevel works as its name suggests, at the byte level, it
encodes each byte value to a unique visible character. This means that there is a
total of 256 different characters composing this alphabet.
Returns:
:obj:`List[str]`: A list of characters that compose the alphabet
"""
pass
def pre_tokenize(self, pretok):
"""
Pre-tokenize a :class:`~tokenizers.PyPreTokenizedString` in-place
This method allows to modify a :class:`~tokenizers.PreTokenizedString` to
keep track of the pre-tokenization, and leverage the capabilities of the
:class:`~tokenizers.PreTokenizedString`. If you just want to see the result of
the pre-tokenization of a raw string, you can use
:meth:`~tokenizers.pre_tokenizers.PreTokenizer.pre_tokenize_str`
Args:
pretok (:class:`~tokenizers.PreTokenizedString):
The pre-tokenized string on which to apply this
:class:`~tokenizers.pre_tokenizers.PreTokenizer`
"""
pass
def pre_tokenize_str(self, sequence):
"""
Pre tokenize the given string
This method provides a way to visualize the effect of a
:class:`~tokenizers.pre_tokenizers.PreTokenizer` but it does not keep track of the
alignment, nor does it provide all the capabilities of the
:class:`~tokenizers.PreTokenizedString`. If you need some of these, you can use
:meth:`~tokenizers.pre_tokenizers.PreTokenizer.pre_tokenize`
Args:
sequence (:obj:`str`):
A string to pre-tokeize
Returns:
:obj:`List[Tuple[str, Offsets]]`:
A list of tuple with the pre-tokenized parts and their offsets
"""
pass
class CharDelimiterSplit(PreTokenizer):
"""
This pre-tokenizer simply splits on the provided char. Works like `.split(delimiter)`
Args:
delimiter: str:
The delimiter char that will be used to split input
"""
def pre_tokenize(self, pretok):
"""
Pre-tokenize a :class:`~tokenizers.PyPreTokenizedString` in-place
This method allows to modify a :class:`~tokenizers.PreTokenizedString` to
keep track of the pre-tokenization, and leverage the capabilities of the
:class:`~tokenizers.PreTokenizedString`. If you just want to see the result of
the pre-tokenization of a raw string, you can use
:meth:`~tokenizers.pre_tokenizers.PreTokenizer.pre_tokenize_str`
Args:
pretok (:class:`~tokenizers.PreTokenizedString):
The pre-tokenized string on which to apply this
:class:`~tokenizers.pre_tokenizers.PreTokenizer`
"""
pass
def pre_tokenize_str(self, sequence):
"""
Pre tokenize the given string
This method provides a way to visualize the effect of a
:class:`~tokenizers.pre_tokenizers.PreTokenizer` but it does not keep track of the
alignment, nor does it provide all the capabilities of the
:class:`~tokenizers.PreTokenizedString`. If you need some of these, you can use
:meth:`~tokenizers.pre_tokenizers.PreTokenizer.pre_tokenize`
Args:
sequence (:obj:`str`):
A string to pre-tokeize
Returns:
:obj:`List[Tuple[str, Offsets]]`:
A list of tuple with the pre-tokenized parts and their offsets
"""
pass
class Digits(PreTokenizer):
"""
This pre-tokenizer simply splits using the digits in separate tokens
Args:
individual_digits (:obj:`bool`, `optional`, defaults to :obj:`False`):
If set to True, digits will each be separated as follows::
"Call 123 please" -> "Call ", "1", "2", "3", " please"
If set to False, digits will grouped as follows::
"Call 123 please" -> "Call ", "123", " please"
"""
def __init__(self, individual_digits=False):
pass
def pre_tokenize(self, pretok):
"""
Pre-tokenize a :class:`~tokenizers.PyPreTokenizedString` in-place
This method allows to modify a :class:`~tokenizers.PreTokenizedString` to
keep track of the pre-tokenization, and leverage the capabilities of the
:class:`~tokenizers.PreTokenizedString`. If you just want to see the result of
the pre-tokenization of a raw string, you can use
:meth:`~tokenizers.pre_tokenizers.PreTokenizer.pre_tokenize_str`
Args:
pretok (:class:`~tokenizers.PreTokenizedString):
The pre-tokenized string on which to apply this
:class:`~tokenizers.pre_tokenizers.PreTokenizer`
"""
pass
def pre_tokenize_str(self, sequence):
"""
Pre tokenize the given string
This method provides a way to visualize the effect of a
:class:`~tokenizers.pre_tokenizers.PreTokenizer` but it does not keep track of the
alignment, nor does it provide all the capabilities of the
:class:`~tokenizers.PreTokenizedString`. If you need some of these, you can use
:meth:`~tokenizers.pre_tokenizers.PreTokenizer.pre_tokenize`
Args:
sequence (:obj:`str`):
A string to pre-tokeize
Returns:
:obj:`List[Tuple[str, Offsets]]`:
A list of tuple with the pre-tokenized parts and their offsets
"""
pass
class Metaspace(PreTokenizer):
"""
Metaspace pre-tokenizer
This pre-tokenizer replaces any whitespace by the provided replacement character.
It then tries to split on these spaces.
Args:
replacement (:obj:`str`, `optional`, defaults to :obj:`▁`):
The replacement character. Must be exactly one character. By default we
use the `▁` (U+2581) meta symbol (Same as in SentencePiece).
add_prefix_space (:obj:`bool`, `optional`, defaults to :obj:`True`):
Whether to add a space to the first word if there isn't already one. This
lets us treat `hello` exactly like `say hello`.
"""
def __init__(self, replacement="_", add_prefix_space=True):
pass
def pre_tokenize(self, pretok):
"""
Pre-tokenize a :class:`~tokenizers.PyPreTokenizedString` in-place
This method allows to modify a :class:`~tokenizers.PreTokenizedString` to
keep track of the pre-tokenization, and leverage the capabilities of the
:class:`~tokenizers.PreTokenizedString`. If you just want to see the result of
the pre-tokenization of a raw string, you can use
:meth:`~tokenizers.pre_tokenizers.PreTokenizer.pre_tokenize_str`
Args:
pretok (:class:`~tokenizers.PreTokenizedString):
The pre-tokenized string on which to apply this
:class:`~tokenizers.pre_tokenizers.PreTokenizer`
"""
pass
def pre_tokenize_str(self, sequence):
"""
Pre tokenize the given string
This method provides a way to visualize the effect of a
:class:`~tokenizers.pre_tokenizers.PreTokenizer` but it does not keep track of the
alignment, nor does it provide all the capabilities of the
:class:`~tokenizers.PreTokenizedString`. If you need some of these, you can use
:meth:`~tokenizers.pre_tokenizers.PreTokenizer.pre_tokenize`
Args:
sequence (:obj:`str`):
A string to pre-tokeize
Returns:
:obj:`List[Tuple[str, Offsets]]`:
A list of tuple with the pre-tokenized parts and their offsets
"""
pass
class Punctuation(PreTokenizer):
"""
This pre-tokenizer simply splits on punctuation as individual characters.
Args:
behavior (:class:`~tokenizers.SplitDelimiterBehavior`):
The behavior to use when splitting.
Choices: "removed", "isolated" (default), "merged_with_previous", "merged_with_next",
"contiguous"
"""
def __init__(self, behavior="isolated"):
pass
def pre_tokenize(self, pretok):
"""
Pre-tokenize a :class:`~tokenizers.PyPreTokenizedString` in-place
This method allows to modify a :class:`~tokenizers.PreTokenizedString` to
keep track of the pre-tokenization, and leverage the capabilities of the
:class:`~tokenizers.PreTokenizedString`. If you just want to see the result of
the pre-tokenization of a raw string, you can use
:meth:`~tokenizers.pre_tokenizers.PreTokenizer.pre_tokenize_str`
Args:
pretok (:class:`~tokenizers.PreTokenizedString):
The pre-tokenized string on which to apply this
:class:`~tokenizers.pre_tokenizers.PreTokenizer`
"""
pass
def pre_tokenize_str(self, sequence):
"""
Pre tokenize the given string
This method provides a way to visualize the effect of a
:class:`~tokenizers.pre_tokenizers.PreTokenizer` but it does not keep track of the
alignment, nor does it provide all the capabilities of the
:class:`~tokenizers.PreTokenizedString`. If you need some of these, you can use
:meth:`~tokenizers.pre_tokenizers.PreTokenizer.pre_tokenize`
Args:
sequence (:obj:`str`):
A string to pre-tokeize
Returns:
:obj:`List[Tuple[str, Offsets]]`:
A list of tuple with the pre-tokenized parts and their offsets
"""
pass
class Sequence(PreTokenizer):
"""
This pre-tokenizer composes other pre_tokenizers and applies them in sequence
"""
def __init__(self, pretokenizers):
pass
def pre_tokenize(self, pretok):
"""
Pre-tokenize a :class:`~tokenizers.PyPreTokenizedString` in-place
This method allows to modify a :class:`~tokenizers.PreTokenizedString` to
keep track of the pre-tokenization, and leverage the capabilities of the
:class:`~tokenizers.PreTokenizedString`. If you just want to see the result of
the pre-tokenization of a raw string, you can use
:meth:`~tokenizers.pre_tokenizers.PreTokenizer.pre_tokenize_str`
Args:
pretok (:class:`~tokenizers.PreTokenizedString):
The pre-tokenized string on which to apply this
:class:`~tokenizers.pre_tokenizers.PreTokenizer`
"""
pass
def pre_tokenize_str(self, sequence):
"""
Pre tokenize the given string
This method provides a way to visualize the effect of a
:class:`~tokenizers.pre_tokenizers.PreTokenizer` but it does not keep track of the
alignment, nor does it provide all the capabilities of the
:class:`~tokenizers.PreTokenizedString`. If you need some of these, you can use
:meth:`~tokenizers.pre_tokenizers.PreTokenizer.pre_tokenize`
Args:
sequence (:obj:`str`):
A string to pre-tokeize
Returns:
:obj:`List[Tuple[str, Offsets]]`:
A list of tuple with the pre-tokenized parts and their offsets
"""
pass
class Split(PreTokenizer):
"""
Split PreTokenizer
This versatile pre-tokenizer splits using the provided pattern and
according to the provided behavior. The pattern can be inverted by
making use of the invert flag.
Args:
pattern (:obj:`str` or :class:`~tokenizers.Regex`):
A pattern used to split the string. Usually a string or a a regex built with `tokenizers.Regex`
behavior (:class:`~tokenizers.SplitDelimiterBehavior`):
The behavior to use when splitting.
Choices: "removed", "isolated", "merged_with_previous", "merged_with_next",
"contiguous"
invert (:obj:`bool`, `optional`, defaults to :obj:`False`):
Whether to invert the pattern.
"""
def __init__(self, pattern, behavior, invert=False):
pass
def pre_tokenize(self, pretok):
"""
Pre-tokenize a :class:`~tokenizers.PyPreTokenizedString` in-place
This method allows to modify a :class:`~tokenizers.PreTokenizedString` to
keep track of the pre-tokenization, and leverage the capabilities of the
:class:`~tokenizers.PreTokenizedString`. If you just want to see the result of
the pre-tokenization of a raw string, you can use
:meth:`~tokenizers.pre_tokenizers.PreTokenizer.pre_tokenize_str`
Args:
pretok (:class:`~tokenizers.PreTokenizedString):
The pre-tokenized string on which to apply this
:class:`~tokenizers.pre_tokenizers.PreTokenizer`
"""
pass
def pre_tokenize_str(self, sequence):
"""
Pre tokenize the given string
This method provides a way to visualize the effect of a
:class:`~tokenizers.pre_tokenizers.PreTokenizer` but it does not keep track of the
alignment, nor does it provide all the capabilities of the
:class:`~tokenizers.PreTokenizedString`. If you need some of these, you can use
:meth:`~tokenizers.pre_tokenizers.PreTokenizer.pre_tokenize`
Args:
sequence (:obj:`str`):
A string to pre-tokeize
Returns:
:obj:`List[Tuple[str, Offsets]]`:
A list of tuple with the pre-tokenized parts and their offsets
"""
pass
class UnicodeScripts(PreTokenizer):
"""
This pre-tokenizer splits on characters that belong to different language family
It roughly follows https://github.com/google/sentencepiece/blob/master/data/Scripts.txt
Actually Hiragana and Katakana are fused with Han, and 0x30FC is Han too.
This mimicks SentencePiece Unigram implementation.
"""
def __init__(self):
pass
def pre_tokenize(self, pretok):
"""
Pre-tokenize a :class:`~tokenizers.PyPreTokenizedString` in-place
This method allows to modify a :class:`~tokenizers.PreTokenizedString` to
keep track of the pre-tokenization, and leverage the capabilities of the
:class:`~tokenizers.PreTokenizedString`. If you just want to see the result of
the pre-tokenization of a raw string, you can use
:meth:`~tokenizers.pre_tokenizers.PreTokenizer.pre_tokenize_str`
Args:
pretok (:class:`~tokenizers.PreTokenizedString):
The pre-tokenized string on which to apply this
:class:`~tokenizers.pre_tokenizers.PreTokenizer`
"""
pass
def pre_tokenize_str(self, sequence):
"""
Pre tokenize the given string
This method provides a way to visualize the effect of a
:class:`~tokenizers.pre_tokenizers.PreTokenizer` but it does not keep track of the
alignment, nor does it provide all the capabilities of the
:class:`~tokenizers.PreTokenizedString`. If you need some of these, you can use
:meth:`~tokenizers.pre_tokenizers.PreTokenizer.pre_tokenize`
Args:
sequence (:obj:`str`):
A string to pre-tokeize
Returns:
:obj:`List[Tuple[str, Offsets]]`:
A list of tuple with the pre-tokenized parts and their offsets
"""
pass
class Whitespace(PreTokenizer):
"""
This pre-tokenizer simply splits using the following regex: `\w+|[^\w\s]+`
"""
def __init__(self):
pass
def pre_tokenize(self, pretok):
"""
Pre-tokenize a :class:`~tokenizers.PyPreTokenizedString` in-place
This method allows to modify a :class:`~tokenizers.PreTokenizedString` to
keep track of the pre-tokenization, and leverage the capabilities of the
:class:`~tokenizers.PreTokenizedString`. If you just want to see the result of
the pre-tokenization of a raw string, you can use
:meth:`~tokenizers.pre_tokenizers.PreTokenizer.pre_tokenize_str`
Args:
pretok (:class:`~tokenizers.PreTokenizedString):
The pre-tokenized string on which to apply this
:class:`~tokenizers.pre_tokenizers.PreTokenizer`
"""
pass
def pre_tokenize_str(self, sequence):
"""
Pre tokenize the given string
This method provides a way to visualize the effect of a
:class:`~tokenizers.pre_tokenizers.PreTokenizer` but it does not keep track of the
alignment, nor does it provide all the capabilities of the
:class:`~tokenizers.PreTokenizedString`. If you need some of these, you can use
:meth:`~tokenizers.pre_tokenizers.PreTokenizer.pre_tokenize`
Args:
sequence (:obj:`str`):
A string to pre-tokeize
Returns:
:obj:`List[Tuple[str, Offsets]]`:
A list of tuple with the pre-tokenized parts and their offsets
"""
pass
class WhitespaceSplit(PreTokenizer):
"""
This pre-tokenizer simply splits on the whitespace. Works like `.split()`
"""
def __init__(self):
pass
def pre_tokenize(self, pretok):
"""
Pre-tokenize a :class:`~tokenizers.PyPreTokenizedString` in-place
This method allows to modify a :class:`~tokenizers.PreTokenizedString` to
keep track of the pre-tokenization, and leverage the capabilities of the
:class:`~tokenizers.PreTokenizedString`. If you just want to see the result of
the pre-tokenization of a raw string, you can use
:meth:`~tokenizers.pre_tokenizers.PreTokenizer.pre_tokenize_str`
Args:
pretok (:class:`~tokenizers.PreTokenizedString):
The pre-tokenized string on which to apply this
:class:`~tokenizers.pre_tokenizers.PreTokenizer`
"""
pass
def pre_tokenize_str(self, sequence):
"""
Pre tokenize the given string
This method provides a way to visualize the effect of a
:class:`~tokenizers.pre_tokenizers.PreTokenizer` but it does not keep track of the
alignment, nor does it provide all the capabilities of the
:class:`~tokenizers.PreTokenizedString`. If you need some of these, you can use
:meth:`~tokenizers.pre_tokenizers.PreTokenizer.pre_tokenize`
Args:
sequence (:obj:`str`):
A string to pre-tokeize
Returns:
:obj:`List[Tuple[str, Offsets]]`:
A list of tuple with the pre-tokenized parts and their offsets
"""
pass
| 0 |
hf_public_repos/tokenizers/bindings/python/py_src/tokenizers | hf_public_repos/tokenizers/bindings/python/py_src/tokenizers/processors/__init__.py | # Generated content DO NOT EDIT
from .. import processors
PostProcessor = processors.PostProcessor
BertProcessing = processors.BertProcessing
ByteLevel = processors.ByteLevel
RobertaProcessing = processors.RobertaProcessing
Sequence = processors.Sequence
TemplateProcessing = processors.TemplateProcessing
| 0 |
hf_public_repos/tokenizers/bindings/python/py_src/tokenizers | hf_public_repos/tokenizers/bindings/python/py_src/tokenizers/processors/__init__.pyi | # Generated content DO NOT EDIT
class PostProcessor:
"""
Base class for all post-processors
This class is not supposed to be instantiated directly. Instead, any implementation of
a PostProcessor will return an instance of this class when instantiated.
"""
def num_special_tokens_to_add(self, is_pair):
"""
Return the number of special tokens that would be added for single/pair sentences.
Args:
is_pair (:obj:`bool`):
Whether the input would be a pair of sequences
Returns:
:obj:`int`: The number of tokens to add
"""
pass
def process(self, encoding, pair=None, add_special_tokens=True):
"""
Post-process the given encodings, generating the final one
Args:
encoding (:class:`~tokenizers.Encoding`):
The encoding for the first sequence
pair (:class:`~tokenizers.Encoding`, `optional`):
The encoding for the pair sequence
add_special_tokens (:obj:`bool`):
Whether to add the special tokens
Return:
:class:`~tokenizers.Encoding`: The final encoding
"""
pass
class BertProcessing(PostProcessor):
"""
This post-processor takes care of adding the special tokens needed by
a Bert model:
- a SEP token
- a CLS token
Args:
sep (:obj:`Tuple[str, int]`):
A tuple with the string representation of the SEP token, and its id
cls (:obj:`Tuple[str, int]`):
A tuple with the string representation of the CLS token, and its id
"""
def __init__(self, sep, cls):
pass
def num_special_tokens_to_add(self, is_pair):
"""
Return the number of special tokens that would be added for single/pair sentences.
Args:
is_pair (:obj:`bool`):
Whether the input would be a pair of sequences
Returns:
:obj:`int`: The number of tokens to add
"""
pass
def process(self, encoding, pair=None, add_special_tokens=True):
"""
Post-process the given encodings, generating the final one
Args:
encoding (:class:`~tokenizers.Encoding`):
The encoding for the first sequence
pair (:class:`~tokenizers.Encoding`, `optional`):
The encoding for the pair sequence
add_special_tokens (:obj:`bool`):
Whether to add the special tokens
Return:
:class:`~tokenizers.Encoding`: The final encoding
"""
pass
class ByteLevel(PostProcessor):
"""
This post-processor takes care of trimming the offsets.
By default, the ByteLevel BPE might include whitespaces in the produced tokens. If you don't
want the offsets to include these whitespaces, then this PostProcessor must be used.
Args:
trim_offsets (:obj:`bool`):
Whether to trim the whitespaces from the produced offsets.
"""
def __init__(self, trim_offsets=True):
pass
def num_special_tokens_to_add(self, is_pair):
"""
Return the number of special tokens that would be added for single/pair sentences.
Args:
is_pair (:obj:`bool`):
Whether the input would be a pair of sequences
Returns:
:obj:`int`: The number of tokens to add
"""
pass
def process(self, encoding, pair=None, add_special_tokens=True):
"""
Post-process the given encodings, generating the final one
Args:
encoding (:class:`~tokenizers.Encoding`):
The encoding for the first sequence
pair (:class:`~tokenizers.Encoding`, `optional`):
The encoding for the pair sequence
add_special_tokens (:obj:`bool`):
Whether to add the special tokens
Return:
:class:`~tokenizers.Encoding`: The final encoding
"""
pass
class RobertaProcessing(PostProcessor):
"""
This post-processor takes care of adding the special tokens needed by
a Roberta model:
- a SEP token
- a CLS token
It also takes care of trimming the offsets.
By default, the ByteLevel BPE might include whitespaces in the produced tokens. If you don't
want the offsets to include these whitespaces, then this PostProcessor should be initialized
with :obj:`trim_offsets=True`
Args:
sep (:obj:`Tuple[str, int]`):
A tuple with the string representation of the SEP token, and its id
cls (:obj:`Tuple[str, int]`):
A tuple with the string representation of the CLS token, and its id
trim_offsets (:obj:`bool`, `optional`, defaults to :obj:`True`):
Whether to trim the whitespaces from the produced offsets.
add_prefix_space (:obj:`bool`, `optional`, defaults to :obj:`True`):
Whether the add_prefix_space option was enabled during pre-tokenization. This
is relevant because it defines the way the offsets are trimmed out.
"""
def __init__(self, sep, cls, trim_offsets=True, add_prefix_space=True):
pass
def num_special_tokens_to_add(self, is_pair):
"""
Return the number of special tokens that would be added for single/pair sentences.
Args:
is_pair (:obj:`bool`):
Whether the input would be a pair of sequences
Returns:
:obj:`int`: The number of tokens to add
"""
pass
def process(self, encoding, pair=None, add_special_tokens=True):
"""
Post-process the given encodings, generating the final one
Args:
encoding (:class:`~tokenizers.Encoding`):
The encoding for the first sequence
pair (:class:`~tokenizers.Encoding`, `optional`):
The encoding for the pair sequence
add_special_tokens (:obj:`bool`):
Whether to add the special tokens
Return:
:class:`~tokenizers.Encoding`: The final encoding
"""
pass
class Sequence(PostProcessor):
"""
Sequence Processor
Args:
processors (:obj:`List[PostProcessor]`)
The processors that need to be chained
"""
def __init__(self, processors):
pass
def num_special_tokens_to_add(self, is_pair):
"""
Return the number of special tokens that would be added for single/pair sentences.
Args:
is_pair (:obj:`bool`):
Whether the input would be a pair of sequences
Returns:
:obj:`int`: The number of tokens to add
"""
pass
def process(self, encoding, pair=None, add_special_tokens=True):
"""
Post-process the given encodings, generating the final one
Args:
encoding (:class:`~tokenizers.Encoding`):
The encoding for the first sequence
pair (:class:`~tokenizers.Encoding`, `optional`):
The encoding for the pair sequence
add_special_tokens (:obj:`bool`):
Whether to add the special tokens
Return:
:class:`~tokenizers.Encoding`: The final encoding
"""
pass
class TemplateProcessing(PostProcessor):
"""
Provides a way to specify templates in order to add the special tokens to each
input sequence as relevant.
Let's take :obj:`BERT` tokenizer as an example. It uses two special tokens, used to
delimitate each sequence. :obj:`[CLS]` is always used at the beginning of the first
sequence, and :obj:`[SEP]` is added at the end of both the first, and the pair
sequences. The final result looks like this:
- Single sequence: :obj:`[CLS] Hello there [SEP]`
- Pair sequences: :obj:`[CLS] My name is Anthony [SEP] What is my name? [SEP]`
With the type ids as following::
[CLS] ... [SEP] ... [SEP]
0 0 0 1 1
You can achieve such behavior using a TemplateProcessing::
TemplateProcessing(
single="[CLS] $0 [SEP]",
pair="[CLS] $A [SEP] $B:1 [SEP]:1",
special_tokens=[("[CLS]", 1), ("[SEP]", 0)],
)
In this example, each input sequence is identified using a ``$`` construct. This identifier
lets us specify each input sequence, and the type_id to use. When nothing is specified,
it uses the default values. Here are the different ways to specify it:
- Specifying the sequence, with default ``type_id == 0``: ``$A`` or ``$B``
- Specifying the `type_id` with default ``sequence == A``: ``$0``, ``$1``, ``$2``, ...
- Specifying both: ``$A:0``, ``$B:1``, ...
The same construct is used for special tokens: ``<identifier>(:<type_id>)?``.
**Warning**: You must ensure that you are giving the correct tokens/ids as these
will be added to the Encoding without any further check. If the given ids correspond
to something totally different in a `Tokenizer` using this `PostProcessor`, it
might lead to unexpected results.
Args:
single (:obj:`Template`):
The template used for single sequences
pair (:obj:`Template`):
The template used when both sequences are specified
special_tokens (:obj:`Tokens`):
The list of special tokens used in each sequences
Types:
Template (:obj:`str` or :obj:`List`):
- If a :obj:`str` is provided, the whitespace is used as delimiter between tokens
- If a :obj:`List[str]` is provided, a list of tokens
Tokens (:obj:`List[Union[Tuple[int, str], Tuple[str, int], dict]]`):
- A :obj:`Tuple` with both a token and its associated ID, in any order
- A :obj:`dict` with the following keys:
- "id": :obj:`str` => The special token id, as specified in the Template
- "ids": :obj:`List[int]` => The associated IDs
- "tokens": :obj:`List[str]` => The associated tokens
The given dict expects the provided :obj:`ids` and :obj:`tokens` lists to have
the same length.
"""
def __init__(self, single, pair, special_tokens):
pass
def num_special_tokens_to_add(self, is_pair):
"""
Return the number of special tokens that would be added for single/pair sentences.
Args:
is_pair (:obj:`bool`):
Whether the input would be a pair of sequences
Returns:
:obj:`int`: The number of tokens to add
"""
pass
def process(self, encoding, pair=None, add_special_tokens=True):
"""
Post-process the given encodings, generating the final one
Args:
encoding (:class:`~tokenizers.Encoding`):
The encoding for the first sequence
pair (:class:`~tokenizers.Encoding`, `optional`):
The encoding for the pair sequence
add_special_tokens (:obj:`bool`):
Whether to add the special tokens
Return:
:class:`~tokenizers.Encoding`: The final encoding
"""
pass
| 0 |
hf_public_repos/tokenizers/bindings/python/py_src/tokenizers | hf_public_repos/tokenizers/bindings/python/py_src/tokenizers/tools/__init__.py | from .visualizer import Annotation, EncodingVisualizer
| 0 |
hf_public_repos/tokenizers/bindings/python/py_src/tokenizers | hf_public_repos/tokenizers/bindings/python/py_src/tokenizers/tools/visualizer-styles.css | .tokenized-text {
width:100%;
padding:2rem;
max-height: 400px;
overflow-y: auto;
box-sizing:border-box;
line-height:4rem; /* Lots of space between lines */
font-family: "Roboto Light", "Ubuntu Light", "Ubuntu", monospace;
box-shadow: 2px 2px 2px rgba(0,0,0,0.2);
background-color: rgba(0,0,0,0.01);
letter-spacing:2px; /* Give some extra separation between chars */
}
.non-token{
/* White space and other things the tokenizer ignores*/
white-space: pre;
letter-spacing:4px;
border-top:1px solid #A0A0A0; /* A gentle border on top and bottom makes tabs more ovious*/
border-bottom:1px solid #A0A0A0;
line-height: 1rem;
height: calc(100% - 2px);
}
.token {
white-space: pre;
position:relative;
color:black;
letter-spacing:2px;
}
.annotation{
white-space:nowrap; /* Important - ensures that annotations appears even if the annotated text wraps a line */
border-radius:4px;
position:relative;
width:fit-content;
}
.annotation:before {
/*The before holds the text and the after holds the background*/
z-index:1000; /* Make sure this is above the background */
content:attr(data-label); /* The annotations label is on a data attribute */
color:white;
position:absolute;
font-size:1rem;
text-align:center;
font-weight:bold;
top:1.75rem;
line-height:0;
left:0;
width:100%;
padding:0.5rem 0;
/* These make it so an annotation doesn't stretch beyond the annotated text if the label is longer*/
overflow: hidden;
white-space: nowrap;
text-overflow:ellipsis;
}
.annotation:after {
content:attr(data-label); /* The content defines the width of the annotation*/
position:absolute;
font-size:0.75rem;
text-align:center;
font-weight:bold;
text-overflow:ellipsis;
top:1.75rem;
line-height:0;
overflow: hidden;
white-space: nowrap;
left:0;
width:100%; /* 100% of the parent, which is the annotation whose width is the tokens inside it*/
padding:0.5rem 0;
/* Nast hack below:
We set the annotations color in code because we don't know the colors at css time.
But you can't pass a color as a data attribute to get it into the pseudo element (this thing)
So to get around that, annotations have the color set on them with a style attribute and then we
can get the color with currentColor.
Annotations wrap tokens and tokens set the color back to black
*/
background-color: currentColor;
}
.annotation:hover::after, .annotation:hover::before{
/* When the user hovers over an annotation expand the label to display in full
*/
min-width: fit-content;
}
.annotation:hover{
/* Emphasize the annotation start end with a border on hover*/
border-color: currentColor;
border: 2px solid;
}
.special-token:not(:empty){
/*
A none empty special token is like UNK (as opposed to CLS which has no representation in the text )
*/
position:relative;
}
.special-token:empty::before{
/* Special tokens that don't have text are displayed as pseudo elements so we dont select them with the mouse*/
content:attr(data-stok);
background:#202020;
font-size:0.75rem;
color:white;
margin: 0 0.25rem;
padding: 0.25rem;
border-radius:4px
}
.special-token:not(:empty):before {
/* Special tokens that have text (UNK) are displayed above the actual text*/
content:attr(data-stok);
position:absolute;
bottom:1.75rem;
min-width:100%;
width:100%;
height:1rem;
line-height:1rem;
font-size:1rem;
text-align:center;
color:white;
font-weight:bold;
background:#202020;
border-radius:10%;
}
/*
We want to alternate the color of tokens, but we can't use nth child because tokens might be broken up by annotations
instead we apply even and odd class at generation time and color them that way
*/
.even-token{
background:#DCDCDC ;
border: 1px solid #DCDCDC;
}
.odd-token{
background:#A0A0A0;
border: 1px solid #A0A0A0;
}
.even-token.multi-token,.odd-token.multi-token{
background: repeating-linear-gradient(
45deg,
transparent,
transparent 1px,
#ccc 1px,
#ccc 1px
),
/* on "bottom" */
linear-gradient(
to bottom,
#FFB6C1,
#999
);
}
.multi-token:hover::after {
content:"This char has more than 1 token"; /* The content defines the width of the annotation*/
color:white;
background-color: black;
position:absolute;
font-size:0.75rem;
text-align:center;
font-weight:bold;
text-overflow:ellipsis;
top:1.75rem;
line-height:0;
overflow: hidden;
white-space: nowrap;
left:0;
width:fit-content; /* 100% of the parent, which is the annotation whose width is the tokens inside it*/
padding:0.5rem 0;
}
| 0 |
hf_public_repos/tokenizers/bindings/python/py_src/tokenizers | hf_public_repos/tokenizers/bindings/python/py_src/tokenizers/tools/visualizer.py | import itertools
import os
import re
from string import Template
from typing import Any, Callable, Dict, List, NamedTuple, Optional, Tuple
from tokenizers import Encoding, Tokenizer
dirname = os.path.dirname(__file__)
css_filename = os.path.join(dirname, "visualizer-styles.css")
with open(css_filename) as f:
css = f.read()
class Annotation:
start: int
end: int
label: int
def __init__(self, start: int, end: int, label: str):
self.start = start
self.end = end
self.label = label
AnnotationList = List[Annotation]
PartialIntList = List[Optional[int]]
class CharStateKey(NamedTuple):
token_ix: Optional[int]
anno_ix: Optional[int]
class CharState:
char_ix: Optional[int]
def __init__(self, char_ix):
self.char_ix = char_ix
self.anno_ix: Optional[int] = None
self.tokens: List[int] = []
@property
def token_ix(self):
return self.tokens[0] if len(self.tokens) > 0 else None
@property
def is_multitoken(self):
"""
BPE tokenizers can output more than one token for a char
"""
return len(self.tokens) > 1
def partition_key(self) -> CharStateKey:
return CharStateKey(
token_ix=self.token_ix,
anno_ix=self.anno_ix,
)
class Aligned:
pass
class EncodingVisualizer:
"""
Build an EncodingVisualizer
Args:
tokenizer (:class:`~tokenizers.Tokenizer`):
A tokenizer instance
default_to_notebook (:obj:`bool`):
Whether to render html output in a notebook by default
annotation_converter (:obj:`Callable`, `optional`):
An optional (lambda) function that takes an annotation in any format and returns
an Annotation object
"""
unk_token_regex = re.compile("(.{1}\b)?(unk|oov)(\b.{1})?", flags=re.IGNORECASE)
def __init__(
self,
tokenizer: Tokenizer,
default_to_notebook: bool = True,
annotation_converter: Optional[Callable[[Any], Annotation]] = None,
):
if default_to_notebook:
try:
from IPython.core.display import HTML, display
except ImportError as e:
raise Exception(
"""We couldn't import IPython utils for html display.
Are you running in a notebook?
You can also pass `default_to_notebook=False` to get back raw HTML
"""
)
self.tokenizer = tokenizer
self.default_to_notebook = default_to_notebook
self.annotation_coverter = annotation_converter
pass
def __call__(
self,
text: str,
annotations: AnnotationList = [],
default_to_notebook: Optional[bool] = None,
) -> Optional[str]:
"""
Build a visualization of the given text
Args:
text (:obj:`str`):
The text to tokenize
annotations (:obj:`List[Annotation]`, `optional`):
An optional list of annotations of the text. The can either be an annotation class
or anything else if you instantiated the visualizer with a converter function
default_to_notebook (:obj:`bool`, `optional`, defaults to `False`):
If True, will render the html in a notebook. Otherwise returns an html string.
Returns:
The HTML string if default_to_notebook is False, otherwise (default) returns None and
renders the HTML in the notebook
"""
final_default_to_notebook = self.default_to_notebook
if default_to_notebook is not None:
final_default_to_notebook = default_to_notebook
if final_default_to_notebook:
try:
from IPython.core.display import HTML, display
except ImportError as e:
raise Exception(
"""We couldn't import IPython utils for html display.
Are you running in a notebook?"""
)
if self.annotation_coverter is not None:
annotations = list(map(self.annotation_coverter, annotations))
encoding = self.tokenizer.encode(text)
html = EncodingVisualizer.__make_html(text, encoding, annotations)
if final_default_to_notebook:
display(HTML(html))
else:
return html
@staticmethod
def calculate_label_colors(annotations: AnnotationList) -> Dict[str, str]:
"""
Generates a color palette for all the labels in a given set of annotations
Args:
annotations (:obj:`Annotation`):
A list of annotations
Returns:
:obj:`dict`: A dictionary mapping labels to colors in HSL format
"""
if len(annotations) == 0:
return {}
labels = set(map(lambda x: x.label, annotations))
num_labels = len(labels)
h_step = int(255 / num_labels)
if h_step < 20:
h_step = 20
s = 32
l = 64
h = 10
colors = {}
for label in sorted(labels): # sort so we always get the same colors for a given set of labels
colors[label] = f"hsl({h},{s}%,{l}%"
h += h_step
return colors
@staticmethod
def consecutive_chars_to_html(
consecutive_chars_list: List[CharState],
text: str,
encoding: Encoding,
):
"""
Converts a list of "consecutive chars" into a single HTML element.
Chars are consecutive if they fall under the same word, token and annotation.
The CharState class is a named tuple with a "partition_key" method that makes it easy to
compare if two chars are consecutive.
Args:
consecutive_chars_list (:obj:`List[CharState]`):
A list of CharStates that have been grouped together
text (:obj:`str`):
The original text being processed
encoding (:class:`~tokenizers.Encoding`):
The encoding returned from the tokenizer
Returns:
:obj:`str`: The HTML span for a set of consecutive chars
"""
first = consecutive_chars_list[0]
if first.char_ix is None:
# its a special token
stoken = encoding.tokens[first.token_ix]
# special tokens are represented as empty spans. We use the data attribute and css
# magic to display it
return f'<span class="special-token" data-stoken={stoken}></span>'
# We're not in a special token so this group has a start and end.
last = consecutive_chars_list[-1]
start = first.char_ix
end = last.char_ix + 1
span_text = text[start:end]
css_classes = [] # What css classes will we apply on the resulting span
data_items = {} # What data attributes will we apply on the result span
if first.token_ix is not None:
# We can either be in a token or not (e.g. in white space)
css_classes.append("token")
if first.is_multitoken:
css_classes.append("multi-token")
if first.token_ix % 2:
# We use this to color alternating tokens.
# A token might be split by an annotation that ends in the middle of it, so this
# lets us visually indicate a consecutive token despite its possible splitting in
# the html markup
css_classes.append("odd-token")
else:
# Like above, but a different color so we can see the tokens alternate
css_classes.append("even-token")
if EncodingVisualizer.unk_token_regex.search(encoding.tokens[first.token_ix]) is not None:
# This is a special token that is in the text. probably UNK
css_classes.append("special-token")
# TODO is this the right name for the data attribute ?
data_items["stok"] = encoding.tokens[first.token_ix]
else:
# In this case we are looking at a group/single char that is not tokenized.
# e.g. white space
css_classes.append("non-token")
css = f'''class="{' '.join(css_classes)}"'''
data = ""
for key, val in data_items.items():
data += f' data-{key}="{val}"'
return f"<span {css} {data} >{span_text}</span>"
@staticmethod
def __make_html(text: str, encoding: Encoding, annotations: AnnotationList) -> str:
char_states = EncodingVisualizer.__make_char_states(text, encoding, annotations)
current_consecutive_chars = [char_states[0]]
prev_anno_ix = char_states[0].anno_ix
spans = []
label_colors_dict = EncodingVisualizer.calculate_label_colors(annotations)
cur_anno_ix = char_states[0].anno_ix
if cur_anno_ix is not None:
# If we started in an annotation make a span for it
anno = annotations[cur_anno_ix]
label = anno.label
color = label_colors_dict[label]
spans.append(f'<span class="annotation" style="color:{color}" data-label="{label}">')
for cs in char_states[1:]:
cur_anno_ix = cs.anno_ix
if cur_anno_ix != prev_anno_ix:
# If we've transitioned in or out of an annotation
spans.append(
# Create a span from the current consecutive characters
EncodingVisualizer.consecutive_chars_to_html(
current_consecutive_chars,
text=text,
encoding=encoding,
)
)
current_consecutive_chars = [cs]
if prev_anno_ix is not None:
# if we transitioned out of an annotation close it's span
spans.append("</span>")
if cur_anno_ix is not None:
# If we entered a new annotation make a span for it
anno = annotations[cur_anno_ix]
label = anno.label
color = label_colors_dict[label]
spans.append(f'<span class="annotation" style="color:{color}" data-label="{label}">')
prev_anno_ix = cur_anno_ix
if cs.partition_key() == current_consecutive_chars[0].partition_key():
# If the current charchter is in the same "group" as the previous one
current_consecutive_chars.append(cs)
else:
# Otherwise we make a span for the previous group
spans.append(
EncodingVisualizer.consecutive_chars_to_html(
current_consecutive_chars,
text=text,
encoding=encoding,
)
)
# An reset the consecutive_char_list to form a new group
current_consecutive_chars = [cs]
# All that's left is to fill out the final span
# TODO I think there is an edge case here where an annotation's span might not close
spans.append(
EncodingVisualizer.consecutive_chars_to_html(
current_consecutive_chars,
text=text,
encoding=encoding,
)
)
res = HTMLBody(spans) # Send the list of spans to the body of our html
return res
@staticmethod
def __make_anno_map(text: str, annotations: AnnotationList) -> PartialIntList:
"""
Args:
text (:obj:`str`):
The raw text we want to align to
annotations (:obj:`AnnotationList`):
A (possibly empty) list of annotations
Returns:
A list of length len(text) whose entry at index i is None if there is no annotation on
charachter i or k, the index of the annotation that covers index i where k is with
respect to the list of annotations
"""
annotation_map = [None] * len(text)
for anno_ix, a in enumerate(annotations):
for i in range(a.start, a.end):
annotation_map[i] = anno_ix
return annotation_map
@staticmethod
def __make_char_states(text: str, encoding: Encoding, annotations: AnnotationList) -> List[CharState]:
"""
For each character in the original text, we emit a tuple representing it's "state":
* which token_ix it corresponds to
* which word_ix it corresponds to
* which annotation_ix it corresponds to
Args:
text (:obj:`str`):
The raw text we want to align to
annotations (:obj:`List[Annotation]`):
A (possibly empty) list of annotations
encoding: (:class:`~tokenizers.Encoding`):
The encoding returned from the tokenizer
Returns:
:obj:`List[CharState]`: A list of CharStates, indicating for each char in the text what
it's state is
"""
annotation_map = EncodingVisualizer.__make_anno_map(text, annotations)
# Todo make this a dataclass or named tuple
char_states: List[CharState] = [CharState(char_ix) for char_ix in range(len(text))]
for token_ix, token in enumerate(encoding.tokens):
offsets = encoding.token_to_chars(token_ix)
if offsets is not None:
start, end = offsets
for i in range(start, end):
char_states[i].tokens.append(token_ix)
for char_ix, anno_ix in enumerate(annotation_map):
char_states[char_ix].anno_ix = anno_ix
return char_states
def HTMLBody(children: List[str], css_styles=css) -> str:
"""
Generates the full html with css from a list of html spans
Args:
children (:obj:`List[str]`):
A list of strings, assumed to be html elements
css_styles (:obj:`str`, `optional`):
Optional alternative implementation of the css
Returns:
:obj:`str`: An HTML string with style markup
"""
children_text = "".join(children)
return f"""
<html>
<head>
<style>
{css_styles}
</style>
</head>
<body>
<div class="tokenized-text" dir=auto>
{children_text}
</div>
</body>
</html>
"""
| 0 |