arnocandel commited on
Commit
a6ccb26
1 Parent(s): 4593e56

commit files to HF hub

Browse files
Files changed (1) hide show
  1. README.md +6 -55
README.md CHANGED
@@ -28,7 +28,8 @@ import torch
28
  from transformers import pipeline
29
 
30
  generate_text = pipeline(model="h2oai/h2ogpt-oasst1-512-12b", torch_dtype=torch.bfloat16, trust_remote_code=True, device_map="auto")
31
- res = generate_text("Why is drinking water so healthy?")
 
32
  print(res[0]["generated_text"])
33
  ```
34
 
@@ -39,63 +40,13 @@ store it alongside your notebook, and construct the pipeline yourself from the l
39
  import torch
40
  from h2oai_pipeline import H2OTextGenerationPipeline
41
  from transformers import AutoModelForCausalLM, AutoTokenizer
42
- tokenizer = AutoTokenizer.from_pretrained("h2oai/h2ogpt-oasst1-512-12b", padding_side="left")
43
- model = AutoModelForCausalLM.from_pretrained("h2oai/h2ogpt-oasst1-512-12b", device_map="auto", torch_dtype=torch.bfloat16)
44
 
 
 
45
  generate_text = H2OTextGenerationPipeline(model=model, tokenizer=tokenizer)
46
- ```
47
-
48
- ### LangChain Usage
49
-
50
- To use the pipeline with LangChain, you must set `return_full_text=True`, as LangChain expects the full text to be returned
51
- and the default for the pipeline is to only return the new text.
52
-
53
- ```
54
- import torch
55
- from transformers import pipeline
56
-
57
- generate_text = pipeline(model="h2oai/h2ogpt-oasst1-512-12b", torch_dtype=torch.bfloat16,
58
- trust_remote_code=True, device_map="auto", return_full_text=True)
59
- ```
60
-
61
- You can create a prompt that either has only an instruction or has an instruction with context:
62
-
63
- ```
64
- from langchain import PromptTemplate, LLMChain
65
- from langchain.llms import HuggingFacePipeline
66
-
67
- # template for an instrution with no input
68
- prompt = PromptTemplate(
69
- input_variables=["instruction"],
70
- template="{instruction}")
71
-
72
- # template for an instruction with input
73
- prompt_with_context = PromptTemplate(
74
- input_variables=["instruction", "context"],
75
- template="{instruction}\n\nInput:\n{context}")
76
-
77
- hf_pipeline = HuggingFacePipeline(pipeline=generate_text)
78
-
79
- llm_chain = LLMChain(llm=hf_pipeline, prompt=prompt)
80
- llm_context_chain = LLMChain(llm=hf_pipeline, prompt=prompt_with_context)
81
- ```
82
-
83
- Example predicting using a simple instruction:
84
 
85
- ```
86
- print(llm_chain.predict(instruction="Why is drinking water so healthy?").lstrip())
87
- ```
88
-
89
- Example predicting using an instruction with context:
90
-
91
- ```
92
- context = """Model A: AUC=0.8
93
- Model from Driverless AI: AUC=0.95
94
- Model C: AUC=0.6
95
- Model D: AUC=0.7
96
- """
97
-
98
- print(llm_context_chain.predict(instruction="Which model performs best?", context=context).lstrip())
99
  ```
100
 
101
  ## Model Architecture
 
28
  from transformers import pipeline
29
 
30
  generate_text = pipeline(model="h2oai/h2ogpt-oasst1-512-12b", torch_dtype=torch.bfloat16, trust_remote_code=True, device_map="auto")
31
+
32
+ res = generate_text("Why is drinking water so healthy?", max_new_tokens=100)
33
  print(res[0]["generated_text"])
34
  ```
35
 
 
40
  import torch
41
  from h2oai_pipeline import H2OTextGenerationPipeline
42
  from transformers import AutoModelForCausalLM, AutoTokenizer
 
 
43
 
44
+ tokenizer = AutoTokenizer.from_pretrained("h2oai/h2ogpt-oasst1-512-12b", padding_side="left")
45
+ model = AutoModelForCausalLM.from_pretrained("h2oai/h2ogpt-oasst1-512-12b", torch_dtype=torch.bfloat16, device_map="auto")
46
  generate_text = H2OTextGenerationPipeline(model=model, tokenizer=tokenizer)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
47
 
48
+ res = generate_text("Why is drinking water so healthy?", max_new_tokens=100)
49
+ print(res[0]["generated_text"])
 
 
 
 
 
 
 
 
 
 
 
 
50
  ```
51
 
52
  ## Model Architecture