silk-road commited on
Commit
7ad8a43
1 Parent(s): 730647b

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +18 -16
app.py CHANGED
@@ -59,6 +59,8 @@ for novel in tqdm(novel_list):
59
  from ChatHaruhi import ChatHaruhi
60
  from ChatHaruhi.response_openai import get_response as get_response_openai
61
  from ChatHaruhi.response_zhipu import get_response as get_response_zhipu
 
 
62
 
63
  get_response = get_response_zhipu
64
 
@@ -133,21 +135,21 @@ openai太慢了 今天试试GLM的
133
 
134
  """
135
 
136
- from transformers import AutoTokenizer, AutoModel, AutoModelForCausalLM
137
- tokenizer = AutoTokenizer.from_pretrained("silk-road/Haruhi-Zero-1_8B", trust_remote_code=True)
138
- model = AutoModelForCausalLM.from_pretrained("silk-road/Haruhi-Zero-1_8B", device_map="auto", trust_remote_code=True)
139
- model = model.eval()
140
-
141
- def get_response_qwen18(message):
142
- from ChatHaruhi.utils import normalize2uaua
143
- message_ua = normalize2uaua(message, if_replace_system = True)
144
- import json
145
- message_tuples = []
146
- for i in range(0, len(message_ua)-1, 2):
147
- message_tuple = (message_ua[i]["content"], message_ua[i+1]["content"])
148
- message_tuples.append(message_tuple)
149
- response, _ = model.chat(tokenizer, message_ua[-1]["content"], history=message_tuples)
150
- return response
151
 
152
  from ChatHaruhi.response_openai import get_response, async_get_response
153
  import gradio as gr
@@ -209,7 +211,7 @@ async def submit_chat( novel, role, user_name, user_text, chat_history, persona_
209
  elif model_sel == "Zhipu":
210
  chatbot.llm = get_response_zhipu
211
  else:
212
- chatbot.llm = get_response_qwen18
213
 
214
  history = []
215
 
 
59
  from ChatHaruhi import ChatHaruhi
60
  from ChatHaruhi.response_openai import get_response as get_response_openai
61
  from ChatHaruhi.response_zhipu import get_response as get_response_zhipu
62
+ from ChatHaruhi.response_qwen_base import get_response as get_response_qwen_base
63
+
64
 
65
  get_response = get_response_zhipu
66
 
 
135
 
136
  """
137
 
138
+ # from transformers import AutoTokenizer, AutoModel, AutoModelForCausalLM
139
+ # tokenizer = AutoTokenizer.from_pretrained("silk-road/Haruhi-Zero-1_8B", trust_remote_code=True)
140
+ # model = AutoModelForCausalLM.from_pretrained("silk-road/Haruhi-Zero-1_8B", device_map="auto", trust_remote_code=True)
141
+ # model = model.eval()
142
+
143
+ # def get_response_qwen18(message):
144
+ # from ChatHaruhi.utils import normalize2uaua
145
+ # message_ua = normalize2uaua(message, if_replace_system = True)
146
+ # import json
147
+ # message_tuples = []
148
+ # for i in range(0, len(message_ua)-1, 2):
149
+ # message_tuple = (message_ua[i]["content"], message_ua[i+1]["content"])
150
+ # message_tuples.append(message_tuple)
151
+ # response, _ = model.chat(tokenizer, message_ua[-1]["content"], history=message_tuples)
152
+ # return response
153
 
154
  from ChatHaruhi.response_openai import get_response, async_get_response
155
  import gradio as gr
 
211
  elif model_sel == "Zhipu":
212
  chatbot.llm = get_response_zhipu
213
  else:
214
+ chatbot.llm = get_response_qwen_base
215
 
216
  history = []
217