Ichsan2895 commited on
Commit
c4454c1
1 Parent(s): 109bb76

Update README.md

Browse files
Files changed (1) hide show
  1. README.md +3 -3
README.md CHANGED
@@ -7,7 +7,7 @@ language:
7
  pipeline_tag: text-generation
8
  ---
9
 
10
- # THIS IS 5th PROTOTYPE OF MERAK-7B-v2!
11
 
12
  Merak-7B is the Large Language Model of Indonesia Languange
13
 
@@ -53,7 +53,7 @@ model = AutoModelForCausalLM.from_pretrained(model_id,
53
  tokenizer = LlamaTokenizer.from_pretrained(model_id)
54
 
55
  def generate_response(question: str) -> str:
56
- prompt = f"<|prompt|>{question}<|answer|>".strip()
57
 
58
  encoding = tokenizer(prompt, return_tensors='pt').to("cuda")
59
  with torch.inference_mode():
@@ -93,7 +93,7 @@ model = AutoModelForCausalLM.from_pretrained(model_id,
93
  tokenizer = LlamaTokenizer.from_pretrained(model_id)
94
 
95
  def generate_response(question: str) -> str:
96
- prompt = f"<|prompt|>{question}<|answer|>".strip()
97
 
98
  encoding = tokenizer(prompt, return_tensors='pt').to("cuda")
99
  with torch.inference_mode():
 
7
  pipeline_tag: text-generation
8
  ---
9
 
10
+ # Happy to announce the release of our first model, Merak-7B!
11
 
12
  Merak-7B is the Large Language Model of Indonesia Languange
13
 
 
53
  tokenizer = LlamaTokenizer.from_pretrained(model_id)
54
 
55
  def generate_response(question: str) -> str:
56
+ prompt = f"<|prompt|>{question}\n<|answer|>".strip()
57
 
58
  encoding = tokenizer(prompt, return_tensors='pt').to("cuda")
59
  with torch.inference_mode():
 
93
  tokenizer = LlamaTokenizer.from_pretrained(model_id)
94
 
95
  def generate_response(question: str) -> str:
96
+ prompt = f"<|prompt|>{question}\n<|answer|>".strip()
97
 
98
  encoding = tokenizer(prompt, return_tensors='pt').to("cuda")
99
  with torch.inference_mode():