Spaces:
Sleeping
Sleeping
change modeo to gpt4 turbo
Browse files
app.py
CHANGED
@@ -22,12 +22,13 @@ from openai import OpenAI
|
|
22 |
from concurrent.futures import ThreadPoolExecutor
|
23 |
import tiktoken
|
24 |
|
25 |
-
|
26 |
|
27 |
def call_openai_api(openaiobj,transcription):
|
28 |
|
29 |
response = openaiobj.chat.completions.create(
|
30 |
-
model="gpt-3.5-turbo",
|
|
|
31 |
temperature=0,
|
32 |
messages=[
|
33 |
{
|
@@ -44,7 +45,8 @@ def call_openai_api(openaiobj,transcription):
|
|
44 |
def call_openai_summary(openaiobj,transcription):
|
45 |
|
46 |
response = openaiobj.chat.completions.create(
|
47 |
-
model="gpt-3.5-turbo",
|
|
|
48 |
temperature=0,
|
49 |
messages=[
|
50 |
{
|
@@ -61,8 +63,9 @@ def call_openai_summary(openaiobj,transcription):
|
|
61 |
|
62 |
|
63 |
|
64 |
-
def split_into_chunks(text, tokens=
|
65 |
-
encoding = tiktoken.encoding_for_model('gpt-3.5-turbo')
|
|
|
66 |
words = encoding.encode(text)
|
67 |
chunks = []
|
68 |
for i in range(0, len(words), tokens):
|
|
|
22 |
from concurrent.futures import ThreadPoolExecutor
|
23 |
import tiktoken
|
24 |
|
25 |
+
usemodelname='gpt-4-0125-preview'
|
26 |
|
27 |
def call_openai_api(openaiobj,transcription):
|
28 |
|
29 |
response = openaiobj.chat.completions.create(
|
30 |
+
#model="gpt-3.5-turbo",
|
31 |
+
model=usemodelname,
|
32 |
temperature=0,
|
33 |
messages=[
|
34 |
{
|
|
|
45 |
def call_openai_summary(openaiobj,transcription):
|
46 |
|
47 |
response = openaiobj.chat.completions.create(
|
48 |
+
#model="gpt-3.5-turbo",
|
49 |
+
model=usemodelname,
|
50 |
temperature=0,
|
51 |
messages=[
|
52 |
{
|
|
|
63 |
|
64 |
|
65 |
|
66 |
+
def split_into_chunks(text, tokens=127900):
|
67 |
+
#encoding = tiktoken.encoding_for_model('gpt-3.5-turbo')
|
68 |
+
encoding = tiktoken.encoding_for_model(usemodelname)
|
69 |
words = encoding.encode(text)
|
70 |
chunks = []
|
71 |
for i in range(0, len(words), tokens):
|