Spaces:
Sleeping
Sleeping
update gpt3.5
Browse files
app.py
CHANGED
@@ -61,10 +61,29 @@ def call_openai_summary(openaiobj,transcription):
|
|
61 |
]
|
62 |
)
|
63 |
return response.choices[0].message.content
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
64 |
|
65 |
|
66 |
|
67 |
-
def split_into_chunks(text, tokens=
|
68 |
#encoding = tiktoken.encoding_for_model('gpt-3.5-turbo')
|
69 |
encoding = tiktoken.encoding_for_model(usemodelname)
|
70 |
words = encoding.encode(text)
|
@@ -92,15 +111,18 @@ def process_chunks(openaikeystr,inputtext):
|
|
92 |
chunks = split_into_chunks(text)
|
93 |
response='這是分段會議紀錄結果\n\n'
|
94 |
i=1
|
95 |
-
|
|
|
|
|
96 |
|
97 |
-
|
98 |
-
|
99 |
-
# response=response+call_openai_summary(openaiobj,chunk)
|
100 |
-
if i>2:
|
101 |
finalresponse=response+'\n\n 這是根據以上分段會議紀錄彙編如下 \n\n' +call_openai_api(openaiobj,response)
|
|
|
|
|
|
|
102 |
else:
|
103 |
-
finalresponse=
|
104 |
return finalresponse
|
105 |
# # Processes chunks in parallel
|
106 |
# with ThreadPoolExecutor() as executor:
|
@@ -212,7 +234,7 @@ file_transcribe = gr.Interface(
|
|
212 |
allow_flagging="never",
|
213 |
)
|
214 |
import google.generativeai as genai
|
215 |
-
def gpt4write(
|
216 |
# openaiobj = OpenAI(
|
217 |
# # This is the default and can be omitted
|
218 |
|
|
|
61 |
]
|
62 |
)
|
63 |
return response.choices[0].message.content
|
64 |
+
def call_openai_summaryall(openaiobj,transcription):
|
65 |
+
|
66 |
+
response = openaiobj.chat.completions.create(
|
67 |
+
#model="gpt-3.5-turbo",
|
68 |
+
model=usemodelname,
|
69 |
+
temperature=0,
|
70 |
+
messages=[
|
71 |
+
{
|
72 |
+
"role": "system",
|
73 |
+
"content": "你是專業的會議紀錄製作員,請根據分段的會議決證,彙整成正式會議紀錄"
|
74 |
+
},
|
75 |
+
{
|
76 |
+
"role": "user",
|
77 |
+
"content": transcription
|
78 |
+
}
|
79 |
+
]
|
80 |
+
)
|
81 |
+
return response.choices[0].message.content
|
82 |
+
|
83 |
|
84 |
|
85 |
|
86 |
+
def split_into_chunks(text, tokens=15900):
|
87 |
#encoding = tiktoken.encoding_for_model('gpt-3.5-turbo')
|
88 |
encoding = tiktoken.encoding_for_model(usemodelname)
|
89 |
words = encoding.encode(text)
|
|
|
111 |
chunks = split_into_chunks(text)
|
112 |
response='這是分段會議紀錄結果\n\n'
|
113 |
i=1
|
114 |
+
if len(chunks)>1:
|
115 |
+
|
116 |
+
for chunk in chunks:
|
117 |
|
118 |
+
response=response+'第' +str(i)+'段\n'+call_openai_api(openaiobj,chunk)+'\n\n'
|
119 |
+
i=i+1
|
|
|
|
|
120 |
finalresponse=response+'\n\n 這是根據以上分段會議紀錄彙編如下 \n\n' +call_openai_api(openaiobj,response)
|
121 |
+
# response=response+call_openai_summary(openaiobj,chunk)
|
122 |
+
|
123 |
+
|
124 |
else:
|
125 |
+
finalresponse=call_openai_api(openaiobj,chunk[0])
|
126 |
return finalresponse
|
127 |
# # Processes chunks in parallel
|
128 |
# with ThreadPoolExecutor() as executor:
|
|
|
234 |
allow_flagging="never",
|
235 |
)
|
236 |
import google.generativeai as genai
|
237 |
+
def gpt4write(openaikeystr,transcribe_text):
|
238 |
# openaiobj = OpenAI(
|
239 |
# # This is the default and can be omitted
|
240 |
|