Chris4K commited on
Commit
9e6c18e
1 Parent(s): b66acba

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +8 -2
app.py CHANGED
@@ -13,10 +13,16 @@ model_name = "bn22/Mistral-7B-Instruct-v0.1-sharded"
13
 
14
  # function for loading 4-bit quantized model
15
  def load_quantized_model(model_name: str):
 
 
 
 
 
 
16
  """
17
  :param model_name: Name or path of the model to be loaded.
18
  :return: Loaded quantized model.
19
- """
20
  bnb_config = BitsAndBytesConfig(
21
  load_in_4bit=True,
22
  bnb_4bit_use_double_quant=True,
@@ -29,7 +35,7 @@ def load_quantized_model(model_name: str):
29
  load_in_4bit=True,
30
  torch_dtype=torch.bfloat16,
31
  quantization_config=bnb_config
32
- )
33
  return model
34
 
35
  ##################################################
 
13
 
14
  # function for loading 4-bit quantized model
15
  def load_quantized_model(model_name: str):
16
+
17
+ model = HuggingFaceHub(
18
+ repo_id="google/flan-ul2",
19
+ model_kwargs={"temperature":0.1,
20
+ "max_new_tokens":256})
21
+
22
  """
23
  :param model_name: Name or path of the model to be loaded.
24
  :return: Loaded quantized model.
25
+
26
  bnb_config = BitsAndBytesConfig(
27
  load_in_4bit=True,
28
  bnb_4bit_use_double_quant=True,
 
35
  load_in_4bit=True,
36
  torch_dtype=torch.bfloat16,
37
  quantization_config=bnb_config
38
+ )"""
39
  return model
40
 
41
  ##################################################