Chris4K commited on
Commit
1c27be8
1 Parent(s): 051b302

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +20 -10
app.py CHANGED
@@ -45,24 +45,34 @@ hf_token = os.getenv("hf_token")
45
  login(token=hf_token)
46
 
47
  # Define the model pipeline with additional generation parameters
48
- model_pipeline = pipeline(
49
- # model="meta-llama/Llama-3.2-1B",
50
- model="dunzhang/stella_en_1.5B_v5",
51
- #pad_token_id=50256,
52
- #use_auth_token=hf_token,
53
- #max_length=1000, # You can increase this if needed
54
- max_new_tokens=900 # Limit how many tokens are generated
55
- )
56
 
57
  # Use the pipeline in HuggingFacePipeline
58
- llm = HuggingFacePipeline(pipeline=model_pipeline)
 
 
 
 
 
 
 
 
 
59
 
60
  ##### Alternative
61
  from transformers import pipeline
62
  import torch
63
  from transformers import AutoTokenizer, AutoModelForCausalLM, BitsAndBytesConfig
64
 
65
- #READER_MODEL_NAME = "HuggingFaceH4/zephyr-7b-beta"
 
66
 
67
  #bnb_config = BitsAndBytesConfig(
68
  # load_in_4bit=True,
 
45
  login(token=hf_token)
46
 
47
  # Define the model pipeline with additional generation parameters
48
+ #model_pipeline = pipeline(
49
+ # # model="meta-llama/Llama-3.2-1B",
50
+ # model="dunzhang/stella_en_1.5B_v5",
51
+ # #pad_token_id=50256,
52
+ # #use_auth_token=hf_token,
53
+ # #max_length=1000, # You can increase this if needed
54
+ # max_new_tokens=900 # Limit how many tokens are generated
55
+ #)
56
 
57
  # Use the pipeline in HuggingFacePipeline
58
+ #llm = HuggingFacePipeline(pipeline=model_pipeline)
59
+ ###################
60
+
61
+
62
+ llm = HuggingFacePipeline.from_model_id(
63
+ model_id="bigscience/bloom-1b7",
64
+ task="text-generation",
65
+ model_kwargs={"temperature": 0, "max_length": 64},
66
+ )
67
+
68
 
69
  ##### Alternative
70
  from transformers import pipeline
71
  import torch
72
  from transformers import AutoTokenizer, AutoModelForCausalLM, BitsAndBytesConfig
73
 
74
+ #READER_MODEL_NAME = "HuggingFaceH4/zephyr-7b-beta" # model="dunzhang/stella_en_1.5B_v5",
75
+
76
 
77
  #bnb_config = BitsAndBytesConfig(
78
  # load_in_4bit=True,