seiching commited on
Commit
40ae977
1 Parent(s): 083e8d5

update gpt model

Browse files
Files changed (1) hide show
  1. app.py +12 -12
app.py CHANGED
@@ -25,7 +25,7 @@ import tiktoken
25
 
26
  usemodelname='gpt-4-0125-preview'
27
 
28
- def call_openai_api(openaiobj,transcription):
29
 
30
  response = openaiobj.chat.completions.create(
31
  #model="gpt-3.5-turbo",
@@ -61,7 +61,7 @@ def call_openai_summary(openaiobj,transcription):
61
  ]
62
  )
63
  return response.choices[0].message.content
64
- def call_openai_summaryall(openaiobj,transcription):
65
 
66
  response = openaiobj.chat.completions.create(
67
  #model="gpt-3.5-turbo",
@@ -92,7 +92,7 @@ def split_into_chunks(text, tokens=15900):
92
  chunks.append(' '.join(encoding.decode(words[i:i + tokens])))
93
  return chunks
94
 
95
- def process_chunks(openaikeystr,inputtext):
96
  # openaiobj = OpenAI(
97
  # # This is the default and can be omitted
98
 
@@ -115,14 +115,14 @@ def process_chunks(openaikeystr,inputtext):
115
 
116
  for chunk in chunks:
117
 
118
- response=response+'第' +str(i)+'段\n'+call_openai_api(openaiobj,chunk)+'\n\n'
119
  i=i+1
120
- finalresponse=response+'\n\n 這是根據以上分段會議紀錄彙編如下 \n\n' +call_openai_api(openaiobj,response)
121
  # response=response+call_openai_summary(openaiobj,chunk)
122
 
123
 
124
  else:
125
- finalresponse=call_openai_api(openaiobj,chunk[0])
126
  return finalresponse
127
  # # Processes chunks in parallel
128
  # with ThreadPoolExecutor() as executor:
@@ -234,7 +234,7 @@ file_transcribe = gr.Interface(
234
  allow_flagging="never",
235
  )
236
  import google.generativeai as genai
237
- def gpt4write(openaikeystr,transcribe_text):
238
  # openaiobj = OpenAI(
239
  # # This is the default and can be omitted
240
 
@@ -247,13 +247,13 @@ def gpt4write(openaikeystr,transcribe_text):
247
 
248
  #openaiojb =OpenAI(base_url="http://localhost:1234/v1", api_key="not-needed")
249
  openaiobj =OpenAI( api_key=realkey)
250
- text = inputtext
251
  #openaikey.set_key(openaikeystr)
252
  #print('process_chunk',openaikey.get_key())
253
  #chunks = split_into_chunks(text)
254
  #response='這是分段會議紀錄結果\n\n'
255
 
256
- finalresponse=call_openai_api(openaiobj,transcribe_text)
257
  # response=response+call_openai_summary(openaiobj,chunk)
258
  return finalresponse
259
 
@@ -274,9 +274,9 @@ def writenotes( LLMmodel,apikeystr,inputscript):
274
  if len(inputscript)>10: #有資料表示不是來自語音辨識結果
275
  transcribe_text=inputscript
276
  if LLMmodel=="gpt-3.5-turbo":
277
- ainotestext=process_chunks(apikeystr,transcribe_text)
278
  elif LLMmodel=="gpt-4-0125-preview":
279
- ainotestext=gpt4write(apikeystr,transcribe_text)
280
  elif LLMmodel=='gemini':
281
  ainotestext=gewritenote(inputscript)
282
 
@@ -294,7 +294,7 @@ def writenotes( LLMmodel,apikeystr,inputscript):
294
  return ainotestext
295
  ainotes = gr.Interface(
296
  fn=writenotes,
297
- inputs=[ gr.inputs.Radio(["gpt-3.5-turbo", "gpt-4-0125-preview","gemini"], label="LLMmodel", default="gpt-3.5-turbo"),gr.Textbox(label="使用GPT請輸入OPEN AI API KEY",placeholder="請輸入sk..."),gr.Textbox(label="逐字稿",placeholder="若沒有做語音辨識,請輸入逐字稿")],
298
  outputs="text",
299
  layout="horizontal",
300
  theme="huggingface",
 
25
 
26
  usemodelname='gpt-4-0125-preview'
27
 
28
+ def call_openai_api(openaiobj,transcription,usemodelname):
29
 
30
  response = openaiobj.chat.completions.create(
31
  #model="gpt-3.5-turbo",
 
61
  ]
62
  )
63
  return response.choices[0].message.content
64
+ def call_openai_summaryall(openaiobj,transcription,usemodelname):
65
 
66
  response = openaiobj.chat.completions.create(
67
  #model="gpt-3.5-turbo",
 
92
  chunks.append(' '.join(encoding.decode(words[i:i + tokens])))
93
  return chunks
94
 
95
+ def process_chunks(openaikeystr,inputtext,LLMmodel):
96
  # openaiobj = OpenAI(
97
  # # This is the default and can be omitted
98
 
 
115
 
116
  for chunk in chunks:
117
 
118
+ response=response+'第' +str(i)+'段\n'+call_openai_api(openaiobj,chunk,LLMmodel)+'\n\n'
119
  i=i+1
120
+ finalresponse=response+'\n\n 這是根據以上分段會議紀錄彙編如下 \n\n' +call_openai_api(openaiobj,response,LLMmodel)
121
  # response=response+call_openai_summary(openaiobj,chunk)
122
 
123
 
124
  else:
125
+ finalresponse=call_openai_api(openaiobj,chunk[0],LLMmodel)
126
  return finalresponse
127
  # # Processes chunks in parallel
128
  # with ThreadPoolExecutor() as executor:
 
234
  allow_flagging="never",
235
  )
236
  import google.generativeai as genai
237
+ def gpt4write(openaikeystr,transcribe_text,LLMmodel):
238
  # openaiobj = OpenAI(
239
  # # This is the default and can be omitted
240
 
 
247
 
248
  #openaiojb =OpenAI(base_url="http://localhost:1234/v1", api_key="not-needed")
249
  openaiobj =OpenAI( api_key=realkey)
250
+ #text = inputtext
251
  #openaikey.set_key(openaikeystr)
252
  #print('process_chunk',openaikey.get_key())
253
  #chunks = split_into_chunks(text)
254
  #response='這是分段會議紀錄結果\n\n'
255
 
256
+ finalresponse=call_openai_api(openaiobj,transcribe_text,LLMmodel)
257
  # response=response+call_openai_summary(openaiobj,chunk)
258
  return finalresponse
259
 
 
274
  if len(inputscript)>10: #有資料表示不是來自語音辨識結果
275
  transcribe_text=inputscript
276
  if LLMmodel=="gpt-3.5-turbo":
277
+ ainotestext=process_chunks(apikeystr,transcribe_text,LLMmodel)
278
  elif LLMmodel=="gpt-4-0125-preview":
279
+ ainotestext=gpt4write(apikeystr,transcribe_text,LLMmodel)
280
  elif LLMmodel=='gemini':
281
  ainotestext=gewritenote(inputscript)
282
 
 
294
  return ainotestext
295
  ainotes = gr.Interface(
296
  fn=writenotes,
297
+ inputs=[ gr.inputs.Radio(["gemini","gpt-3.5-turbo", "gpt-4-0125-preview"], label="LLMmodel", default="gemini"),gr.Textbox(label="使用GPT請輸入OPEN AI API KEY",placeholder="請輸入sk..."),gr.Textbox(label="逐字稿",placeholder="若沒有做語音辨識,請輸入逐字稿")],
298
  outputs="text",
299
  layout="horizontal",
300
  theme="huggingface",