rameshmoorthy commited on
Commit
676f4e3
1 Parent(s): 3362322

Update backend/query_llm.py

Browse files
Files changed (1) hide show
  1. backend/query_llm.py +9 -9
backend/query_llm.py CHANGED
@@ -10,7 +10,7 @@ from huggingface_hub import InferenceClient
10
  from transformers import AutoTokenizer
11
 
12
  tokenizer = AutoTokenizer.from_pretrained("mistralai/Mistral-7B-Instruct-v0.1")
13
- #tokenizer = AutoTokenizer.from_pretrained("mistralai/Mixtral-8x7B-Instruct-v0.1")
14
  #tokenizer = AutoTokenizer.from_pretrained("mistralai/Mixtral-8x22B-Instruct-v0.1")
15
  temperature = 0.5
16
  top_p = 0.7
@@ -19,16 +19,16 @@ repetition_penalty = 1.2
19
  OPENAI_KEY = getenv("OPENAI_API_KEY")
20
  HF_TOKEN = getenv("HUGGING_FACE_HUB_TOKEN")
21
 
22
- hf_client = InferenceClient(
23
- "mistralai/Mistral-7B-Instruct-v0.1",
24
- token=HF_TOKEN
25
- )
26
 
27
 
28
- # hf_client = InferenceClient(
29
- # "mistralai/Mixtral-8x7B-Instruct-v0.1",
30
- # token=HF_TOKEN
31
- # )
32
  def format_prompt(message: str, api_kind: str):
33
  """
34
  Formats the given message using a chat template.
 
10
  from transformers import AutoTokenizer
11
 
12
  tokenizer = AutoTokenizer.from_pretrained("mistralai/Mistral-7B-Instruct-v0.1")
13
+ tokenizer = AutoTokenizer.from_pretrained("mistralai/Mixtral-8x7B-Instruct-v0.1")
14
  #tokenizer = AutoTokenizer.from_pretrained("mistralai/Mixtral-8x22B-Instruct-v0.1")
15
  temperature = 0.5
16
  top_p = 0.7
 
19
  OPENAI_KEY = getenv("OPENAI_API_KEY")
20
  HF_TOKEN = getenv("HUGGING_FACE_HUB_TOKEN")
21
 
22
+ # hf_client = InferenceClient(
23
+ # "mistralai/Mistral-7B-Instruct-v0.1",
24
+ # token=HF_TOKEN
25
+ # )
26
 
27
 
28
+ hf_client = InferenceClient(
29
+ "mistralai/Mixtral-8x7B-Instruct-v0.1",
30
+ token=HF_TOKEN
31
+ )
32
  def format_prompt(message: str, api_kind: str):
33
  """
34
  Formats the given message using a chat template.