Update functions.py
Browse files- functions.py +3 -4
functions.py
CHANGED
@@ -2,6 +2,7 @@ import whisper
|
|
2 |
import os
|
3 |
import random
|
4 |
import openai
|
|
|
5 |
import yt_dlp
|
6 |
from pytube import YouTube, extract
|
7 |
import pandas as pd
|
@@ -21,8 +22,6 @@ from bs4 import BeautifulSoup
|
|
21 |
import base64, time
|
22 |
from annotated_text import annotated_text
|
23 |
import pickle, math
|
24 |
-
import wikipedia
|
25 |
-
from pyvis.network import Network
|
26 |
import torch
|
27 |
from pydub import AudioSegment
|
28 |
from langchain.docstore.document import Document
|
@@ -57,7 +56,7 @@ from langchain.prompts import PromptTemplate
|
|
57 |
from langsmith import Client
|
58 |
|
59 |
client = Client()
|
60 |
-
|
61 |
nltk.download('punkt')
|
62 |
|
63 |
|
@@ -153,7 +152,7 @@ def load_whisper_api(audio):
|
|
153 |
|
154 |
'''Transcribe YT audio to text using Open AI API'''
|
155 |
file = open(audio, "rb")
|
156 |
-
transcript =
|
157 |
|
158 |
return transcript
|
159 |
|
|
|
2 |
import os
|
3 |
import random
|
4 |
import openai
|
5 |
+
from openai import OpenAI
|
6 |
import yt_dlp
|
7 |
from pytube import YouTube, extract
|
8 |
import pandas as pd
|
|
|
22 |
import base64, time
|
23 |
from annotated_text import annotated_text
|
24 |
import pickle, math
|
|
|
|
|
25 |
import torch
|
26 |
from pydub import AudioSegment
|
27 |
from langchain.docstore.document import Document
|
|
|
56 |
from langsmith import Client
|
57 |
|
58 |
client = Client()
|
59 |
+
openai_audio = OpenAI()
|
60 |
nltk.download('punkt')
|
61 |
|
62 |
|
|
|
152 |
|
153 |
'''Transcribe YT audio to text using Open AI API'''
|
154 |
file = open(audio, "rb")
|
155 |
+
transcript = openai_audio.audio.transcriptions.create("whisper-2", file)
|
156 |
|
157 |
return transcript
|
158 |
|