kenghuoxiong commited on
Commit
8eb2972
·
verified ·
1 Parent(s): f99d7c3

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +6 -5
app.py CHANGED
@@ -48,8 +48,8 @@ def qwen_api(user_message, top_p=0.9,temperature=0.7, system_message='', max_tok
48
 
49
  response = ""
50
  for message in client.chat.completions.create(
51
- # model="meta-llama/Meta-Llama-3-8B-Instruct",
52
- model="Qwen/Qwen1.5-4B-Chat",
53
  max_tokens=max_tokens,
54
  stream=True,
55
  temperature=temperature,
@@ -70,8 +70,9 @@ embedding = load_embedding_mode()
70
  db = Chroma(persist_directory='./VecterStore2_512_txt/VecterStore2_512_txt', embedding_function=embedding)
71
  prompt_template = """
72
  {context}
73
- The above content is a form of biological background knowledge. Please answer the questions according to the above content. Please be sure to answer the questions according to the background knowledge and attach the doi number of the information source when answering.
74
  Question: {question}
 
75
  Answer in English:"""
76
  PROMPT = PromptTemplate(
77
  template=prompt_template, input_variables=["context", "question"]
@@ -134,8 +135,8 @@ def respond(
134
  response = ""
135
 
136
  for message in client.chat.completions.create(
137
- # model="meta-llama/Meta-Llama-3-8B-Instruct",
138
- model="Qwen/Qwen1.5-4B-Chat",
139
  max_tokens=max_tokens,
140
  stream=True,
141
  temperature=temperature,
 
48
 
49
  response = ""
50
  for message in client.chat.completions.create(
51
+ model="meta-llama/Meta-Llama-3-8B-Instruct",
52
+ # model="Qwen/Qwen1.5-4B-Chat",
53
  max_tokens=max_tokens,
54
  stream=True,
55
  temperature=temperature,
 
70
  db = Chroma(persist_directory='./VecterStore2_512_txt/VecterStore2_512_txt', embedding_function=embedding)
71
  prompt_template = """
72
  {context}
73
+ The above content is a form of biological background knowledge. Please answer the questions according to the above content.
74
  Question: {question}
75
+ Please be sure to answer the questions according to the background knowledge and attach the doi number of the information source when answering.
76
  Answer in English:"""
77
  PROMPT = PromptTemplate(
78
  template=prompt_template, input_variables=["context", "question"]
 
135
  response = ""
136
 
137
  for message in client.chat.completions.create(
138
+ model="meta-llama/Meta-Llama-3-8B-Instruct",
139
+ # model="Qwen/Qwen1.5-4B-Chat",
140
  max_tokens=max_tokens,
141
  stream=True,
142
  temperature=temperature,