pseudotensor commited on
Commit
f1c9bac
1 Parent(s): 12f5733

Update README.md

Browse files
Files changed (1) hide show
  1. README.md +2 -2
README.md CHANGED
@@ -36,7 +36,7 @@ pip install accelerate==0.18.0
36
  import torch
37
  from transformers import pipeline
38
 
39
- generate_text = pipeline(model="h2oai/h2ogpt-oig-oasst1-256-6_9b", torch_dtype=torch.bfloat16, trust_remote_code=True, device_map="auto")
40
 
41
  res = generate_text("Why is drinking water so healthy?", max_new_tokens=100)
42
  print(res[0]["generated_text"])
@@ -52,7 +52,7 @@ from transformers import AutoModelForCausalLM, AutoTokenizer
52
 
53
  tokenizer = AutoTokenizer.from_pretrained("h2oai/h2ogpt-oig-oasst1-256-6_9b", padding_side="left")
54
  model = AutoModelForCausalLM.from_pretrained("h2oai/h2ogpt-oig-oasst1-256-6_9b", torch_dtype=torch.bfloat16, device_map="auto")
55
- generate_text = H2OTextGenerationPipeline(model=model, tokenizer=tokenizer)
56
 
57
  res = generate_text("Why is drinking water so healthy?", max_new_tokens=100)
58
  print(res[0]["generated_text"])
 
36
  import torch
37
  from transformers import pipeline
38
 
39
+ generate_text = pipeline(model="h2oai/h2ogpt-oig-oasst1-256-6_9b", torch_dtype=torch.bfloat16, trust_remote_code=True, device_map="auto", prompt_type='human_bot')
40
 
41
  res = generate_text("Why is drinking water so healthy?", max_new_tokens=100)
42
  print(res[0]["generated_text"])
 
52
 
53
  tokenizer = AutoTokenizer.from_pretrained("h2oai/h2ogpt-oig-oasst1-256-6_9b", padding_side="left")
54
  model = AutoModelForCausalLM.from_pretrained("h2oai/h2ogpt-oig-oasst1-256-6_9b", torch_dtype=torch.bfloat16, device_map="auto")
55
+ generate_text = H2OTextGenerationPipeline(model=model, tokenizer=tokenizer, prompt_type='human_bot')
56
 
57
  res = generate_text("Why is drinking water so healthy?", max_new_tokens=100)
58
  print(res[0]["generated_text"])