File size: 1,148 Bytes
9c5279d
2fb3511
 
9c5279d
f242654
 
 
 
 
9c5279d
 
f242654
 
 
 
9c5279d
20cdf25
f242654
 
 
9c5279d
f242654
9c5279d
f242654
9c5279d
f242654
 
 
 
 
 
9c5279d
f242654
9c5279d
f242654
 
9c5279d
f242654
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
---
language:
- en
library_name: transformers
tags:
- art
datasets:
- gokaygokay/prompt_description_stable_diffusion_3k
pipeline_tag: text2text-generation
---

```
from transformers import AutoModelForCausalLM , GenerationConfig
import torch
import os

model_id = "gokaygokay/tiny_llama_chat_description_to_prompt"
model = AutoModelForCausalLM.from_pretrained(model_id, torch_dtype=torch.float16, load_in_8bit=False,
                                             device_map="auto",
                                             trust_remote_code=True)

def generate_response(user_input):

  prompt = f"<|im_start|>user\n{user_input}<|im_end|>\n<|im_start|>assistant:"

  inputs = tokenizer([prompt], return_tensors="pt")
  generation_config = GenerationConfig(penalty_alpha=0.6,do_sample = True,
      top_k=5,temperature=0.9,repetition_penalty=1.2,
      max_new_tokens=100,pad_token_id=tokenizer.eos_token_id
  )
  start_time = perf_counter()

  inputs = tokenizer(prompt, return_tensors="pt").to('cuda')

  outputs = model.generate(**inputs, generation_config=generation_config)
  print(tokenizer.decode(outputs[0], skip_special_tokens=True))

```