ranpox commited on
Commit
b857f5e
1 Parent(s): 278bd0b

Update README.md

Browse files
Files changed (1) hide show
  1. README.md +14 -2
README.md CHANGED
@@ -48,14 +48,26 @@ tokenizer = AutoTokenizer.from_pretrained("OpenLemur/lemur-70b-chat-v1")
48
  model = AutoModelForCausalLM.from_pretrained("OpenLemur/lemur-70b-chat-v1", device_map="auto", load_in_8bit=True)
49
 
50
  # Text Generation Example
51
- prompt = "What's lemur's favorite fruit?"
 
 
 
 
 
 
52
  input = tokenizer(prompt, return_tensors="pt")
53
  output = model.generate(**input, max_length=50, num_return_sequences=1)
54
  generated_text = tokenizer.decode(output[0], skip_special_tokens=True)
55
  print(generated_text)
56
 
57
  # Code Generation Example
58
- prompt = "Write a Python function to merge two sorted lists into one sorted list without using any built-in sort functions."
 
 
 
 
 
 
59
  input = tokenizer(prompt, return_tensors="pt")
60
  output = model.generate(**input, max_length=200, num_return_sequences=1)
61
  generated_code = tokenizer.decode(output[0], skip_special_tokens=True)
 
48
  model = AutoModelForCausalLM.from_pretrained("OpenLemur/lemur-70b-chat-v1", device_map="auto", load_in_8bit=True)
49
 
50
  # Text Generation Example
51
+ prompt = """<|im_start|>system
52
+ You are a helpful, respectful, and honest assistant.
53
+ <|im_end|>
54
+ <|im_start|>user
55
+ What's a lemur's favorite fruit?<|im_end|>
56
+ <|im_start|>assistant
57
+ """
58
  input = tokenizer(prompt, return_tensors="pt")
59
  output = model.generate(**input, max_length=50, num_return_sequences=1)
60
  generated_text = tokenizer.decode(output[0], skip_special_tokens=True)
61
  print(generated_text)
62
 
63
  # Code Generation Example
64
+ prompt = """<|im_start|>system
65
+ Below is an instruction that describes a task. Write a response that appropriately completes the request.
66
+ <|im_end|>
67
+ <|im_start|>user
68
+ Write a Python function to merge two sorted lists into one sorted list without using any built-in sort functions.<|im_end|>
69
+ <|im_start|>assistant
70
+ """
71
  input = tokenizer(prompt, return_tensors="pt")
72
  output = model.generate(**input, max_length=200, num_return_sequences=1)
73
  generated_code = tokenizer.decode(output[0], skip_special_tokens=True)