Chris4K commited on
Commit
d68106a
1 Parent(s): 33a6b68

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +7 -7
app.py CHANGED
@@ -59,11 +59,11 @@ login(token=hf_token)
59
  ###################
60
 
61
 
62
- llm = HuggingFacePipeline.from_model_id(
63
- model_id="bigscience/bloom-1b7",
64
- task="text-generation",
65
- model_kwargs={"temperature": 0, "max_length":1200, "do_sample":True},
66
- )
67
 
68
 
69
  ##### Alternative
@@ -96,9 +96,9 @@ from transformers import AutoTokenizer, AutoModelForCausalLM, BitsAndBytesConfig
96
  #####
97
  from huggingface_hub import InferenceClient
98
 
99
- #repo_id = "meta-llama/Meta-Llama-3-8B-Instruct"
100
 
101
- #llm_client = InferenceClient(model=repo_id, timeout=120)
102
 
103
  # Test your LLM client
104
  #llm_client.text_generation(prompt="How are you today?", max_new_tokens=20)
 
59
  ###################
60
 
61
 
62
+ #llm = HuggingFacePipeline.from_model_id(
63
+ # model_id="bigscience/bloom-1b7",
64
+ # task="text-generation",
65
+ # model_kwargs={"temperature": 0, "max_length":1200, "do_sample":True},
66
+ #)
67
 
68
 
69
  ##### Alternative
 
96
  #####
97
  from huggingface_hub import InferenceClient
98
 
99
+ repo_id = "meta-llama/Meta-Llama-3-8B-Instruct"
100
 
101
+ llm = InferenceClient(model=repo_id, timeout=120)
102
 
103
  # Test your LLM client
104
  #llm_client.text_generation(prompt="How are you today?", max_new_tokens=20)