Chat2Find commited on
Commit
31adec0
·
verified ·
1 Parent(s): 643196c

Reverted to standard text completion code for Base/CPT model representation

Browse files
Files changed (1) hide show
  1. README.md +5 -8
README.md CHANGED
@@ -68,19 +68,16 @@ model, tokenizer = FastLanguageModel.from_pretrained(
68
  )
69
  FastLanguageModel.for_inference(model)
70
 
71
- messages = [
72
- {"role": "system", "content": "You are Chat2Find-CPT, a helpful assistant."},
73
- {"role": "user", "content": "ශ්‍රී ලංකාව ගැන කෙටි විස්තරයක්:"}
74
- ]
75
- prompt = tokenizer.apply_chat_template(messages, tokenize=False, add_generation_prompt=True)
76
 
77
  inputs = tokenizer(
78
  text=[prompt],
79
- return_tensors = "pt"
80
  ).to("cuda")
81
 
82
- outputs = model.generate(**inputs, max_new_tokens = 256)
83
- response = tokenizer.decode(outputs[0][inputs["input_ids"].shape[1]:], skip_special_tokens=True)
 
84
  print(response)
85
  ```
86
 
 
68
  )
69
  FastLanguageModel.for_inference(model)
70
 
71
+ prompt = "ශ්‍රී ලංකාව ගැන කෙටි විස්තරයක්:"
 
 
 
 
72
 
73
  inputs = tokenizer(
74
  text=[prompt],
75
+ return_tensors="pt"
76
  ).to("cuda")
77
 
78
+ outputs = model.generate(**inputs, max_new_tokens=256)
79
+ # Decode the generated text
80
+ response = tokenizer.decode(outputs[0], skip_special_tokens=True)
81
  print(response)
82
  ```
83