pereki commited on
Commit
220d29d
1 Parent(s): 0bf202a
Files changed (7) hide show
  1. config.json +32 -31
  2. handler.py +22 -29
  3. model.onnx +3 -0
  4. requirements.txt +1 -0
  5. test.py +7 -0
  6. tokenizer.json +0 -0
  7. tokenizer_config.json +1 -42
config.json CHANGED
@@ -1,33 +1,34 @@
1
  {
2
- "alpha_pattern": {},
3
- "auto_mapping": null,
4
- "base_model_name_or_path": "Pereki/llama-2-7b-chat-hf",
5
- "bias": "none",
6
- "fan_in_fan_out": false,
7
- "inference_mode": true,
8
- "init_lora_weights": true,
9
- "layers_pattern": null,
10
- "layers_to_transform": null,
11
- "loftq_config": {},
12
- "lora_alpha": 32,
13
- "lora_dropout": 0.05,
14
- "megatron_config": null,
15
- "megatron_core": "megatron.core",
16
- "modules_to_save": null,
17
- "peft_type": "LORA",
18
- "r": 16,
19
- "rank_pattern": {},
20
- "revision": null,
21
- "target_modules": [
22
- "up_proj",
23
- "o_proj",
24
- "down_proj",
25
- "k_proj",
26
- "gate_proj",
27
- "q_proj",
28
- "v_proj"
29
  ],
30
- "task_type": "CAUSAL_LM",
31
- "use_dora": false,
32
- "use_rslora": false
33
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
  {
2
+ "_name_or_path": "/home/alexandre/research/distilbert/pruned80_vnni/zoomodels/framework",
3
+ "activation": "gelu",
4
+ "architectures": [
5
+ "DistilBertForSequenceClassification"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
6
  ],
7
+ "attention_dropout": 0.1,
8
+ "dim": 768,
9
+ "dropout": 0.1,
10
+ "finetuning_task": "sst2",
11
+ "hidden_dim": 3072,
12
+ "id2label": {
13
+ "0": "negative",
14
+ "1": "positive"
15
+ },
16
+ "initializer_range": 0.02,
17
+ "label2id": {
18
+ "negative": 0,
19
+ "positive": 1
20
+ },
21
+ "max_position_embeddings": 512,
22
+ "model_type": "distilbert",
23
+ "n_heads": 12,
24
+ "n_layers": 6,
25
+ "pad_token_id": 0,
26
+ "problem_type": "single_label_classification",
27
+ "qa_dropout": 0.1,
28
+ "seq_classif_dropout": 0.2,
29
+ "sinusoidal_pos_embds": false,
30
+ "tie_weights_": true,
31
+ "torch_dtype": "float32",
32
+ "transformers_version": "4.18.0.dev0",
33
+ "vocab_size": 30522
34
+ }
handler.py CHANGED
@@ -1,38 +1,31 @@
1
- from typing import Dict, List, Any
2
- from transformers import pipeline, AutoModelForCausalLM, AutoTokenizer, BitsAndBytesConfig
 
 
 
3
 
4
- class EndpointHandler():
5
  def __init__(self, path=""):
6
- quantization_config = BitsAndBytesConfig(load_in_4bit=True, bnb_4bit_compute_dtype=torch.float16)
7
- # load the optimized model
8
- tokenizer = AutoTokenizer.from_pretrained(path)
9
- model = AutoModelForCausalLM.from_pretrained(
10
- path,
11
- quantization_config=quantization_config,
12
- device_map="auto",
13
- torch_dtype='auto'
14
- ).eval()
15
- # create inference pipeline
16
- self.pipeline = pipeline("text-classification", model=model, tokenizer=tokenizer)
17
 
 
 
 
 
 
18
 
19
- def __call__(self, data: Any) -> List[List[Dict[str, float]]]:
20
  """
21
  Args:
22
- data (:obj:):
23
- includes the input data and the parameters for the inference.
24
- Return:
25
- A :obj:`list`:. The object returned should be a list of one list like [[{"label": 0.9939950108528137}]] containing :
26
- - "label": A string representing what the label/class is. There can be multiple labels.
27
- - "score": A score between 0 and 1 describing how confident the model is for this label/class.
28
  """
29
  inputs = data.pop("inputs", data)
30
- parameters = data.pop("parameters", None)
31
 
32
- # pass inputs with all kwargs in data
33
- if parameters is not None:
34
- prediction = self.pipeline(inputs, **parameters)
35
- else:
36
- prediction = self.pipeline(inputs)
37
- # postprocess the prediction
38
- return prediction
 
 
 
 
1
+ from typing import Dict, Any
2
+ from deepsparse import Pipeline
3
+ from time import perf_counter
4
+
5
+ class EndpointHandler:
6
 
 
7
  def __init__(self, path=""):
 
 
 
 
 
 
 
 
 
 
 
8
 
9
+ self.pipeline = Pipeline.create(
10
+ task="text-classification",
11
+ model_path=path,
12
+ scheduler=”sync
13
+ )
14
 
15
+ def __call__(self, data: Dict[str, Any]) -> Dict[str, str]:
16
  """
17
  Args:
18
+ data (:obj:): prediction input text
 
 
 
 
 
19
  """
20
  inputs = data.pop("inputs", data)
 
21
 
22
+ start = perf_counter()
23
+ prediction = self.pipeline(inputs)
24
+ end = perf_counter()
25
+ latency = end - start
26
+
27
+ return {
28
+ "labels": prediction.labels,
29
+ "scores": prediction.scores,
30
+ "latency (secs.)": latency
31
+ }
model.onnx ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c8f814a1a6b4f818e07d1183e2204eedd0fb8c8fdd708326e5d97ce4ee44c3e5
3
+ size 67197076
requirements.txt ADDED
@@ -0,0 +1 @@
 
 
1
+ deepsparse>=1.2.0
test.py ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ from sparsezoo import Model
2
+
3
+ stub = "zoo:nlp/sentiment_analysis/distilbert-none/pytorch/huggingface/sst2/pruned80_quant-none-vnni"
4
+ model = Model(stub, download_path="./deep")
5
+
6
+ # Downloads and prints the download path of the model
7
+ print(model.deployment.path)
tokenizer.json CHANGED
The diff for this file is too large to render. See raw diff
 
tokenizer_config.json CHANGED
@@ -1,42 +1 @@
1
- {
2
- "add_bos_token": true,
3
- "add_eos_token": false,
4
- "added_tokens_decoder": {
5
- "0": {
6
- "content": "<unk>",
7
- "lstrip": false,
8
- "normalized": false,
9
- "rstrip": false,
10
- "single_word": false,
11
- "special": true
12
- },
13
- "1": {
14
- "content": "<s>",
15
- "lstrip": false,
16
- "normalized": false,
17
- "rstrip": false,
18
- "single_word": false,
19
- "special": true
20
- },
21
- "2": {
22
- "content": "</s>",
23
- "lstrip": false,
24
- "normalized": false,
25
- "rstrip": false,
26
- "single_word": false,
27
- "special": true
28
- }
29
- },
30
- "bos_token": "<s>",
31
- "chat_template": "{% if messages[0]['role'] == 'system' %}{% set loop_messages = messages[1:] %}{% set system_message = messages[0]['content'] %}{% else %}{% set loop_messages = messages %}{% set system_message = false %}{% endif %}{% for message in loop_messages %}{% if (message['role'] == 'user') != (loop.index0 % 2 == 0) %}{{ raise_exception('Conversation roles must alternate user/assistant/user/assistant/...') }}{% endif %}{% if loop.index0 == 0 and system_message != false %}{% set content = '<<SYS>>\\n' + system_message + '\\n<</SYS>>\\n\\n' + message['content'] %}{% else %}{% set content = message['content'] %}{% endif %}{% if message['role'] == 'user' %}{{ bos_token + '[INST] ' + content.strip() + ' [/INST]' }}{% elif message['role'] == 'assistant' %}{{ ' ' + content.strip() + ' ' + eos_token }}{% endif %}{% endfor %}",
32
- "clean_up_tokenization_spaces": false,
33
- "eos_token": "</s>",
34
- "legacy": false,
35
- "model_max_length": 2048,
36
- "pad_token": "</s>",
37
- "padding_side": "right",
38
- "sp_model_kwargs": {},
39
- "tokenizer_class": "LlamaTokenizer",
40
- "unk_token": "<unk>",
41
- "use_default_system_prompt": false
42
- }
 
1
+ {"do_lower_case": true, "unk_token": "[UNK]", "sep_token": "[SEP]", "pad_token": "[PAD]", "cls_token": "[CLS]", "mask_token": "[MASK]", "tokenize_chinese_chars": true, "strip_accents": null, "model_max_length": 512, "special_tokens_map_file": null, "name_or_path": "/home/alexandre/research/bert_base/sst2/framework", "tokenizer_class": "BertTokenizer"}