Text Generation
Transformers
PyTorch
English
llama
Eval Results
Inference Endpoints
text-generation-inference
Pankaj Mathur commited on
Commit
94cd0cd
1 Parent(s): 53bd1d1

Update README.md

Browse files
Files changed (1) hide show
  1. README.md +2 -2
README.md CHANGED
@@ -59,9 +59,9 @@ model = LlamaForCausalLM.from_pretrained(
59
  def generate_text(system, instruction, input=None):
60
 
61
  if input:
62
- prompt = f"### System:\n{system}\n\n#\n\n### User:\n{instruction}\n\n### Input:\n{input}\n\n### Response:\n"
63
  else:
64
- prompt = f"### System:\n{system}\n\n#\n\n### User:\n{instruction}\n\n### Response:\n"
65
 
66
  tokens = tokenizer.encode(prompt)
67
  tokens = torch.LongTensor(tokens).unsqueeze(0)
 
59
  def generate_text(system, instruction, input=None):
60
 
61
  if input:
62
+ prompt = f"### System:\n{system}\n\n### User:\n{instruction}\n\n### Input:\n{input}\n\n### Response:\n"
63
  else:
64
+ prompt = f"### System:\n{system}\n\n### User:\n{instruction}\n\n### Response:\n"
65
 
66
  tokens = tokenizer.encode(prompt)
67
  tokens = torch.LongTensor(tokens).unsqueeze(0)