|
from openai import OpenAI as OPENAI |
|
import os |
|
import gradio as gr |
|
import json |
|
|
|
client = OPENAI() |
|
client.api_key = os.environ.get("OPENAI_API_KEY") |
|
|
|
|
|
conversation_history=[] |
|
|
|
def predict(input): |
|
|
|
completion = client.completion.create( |
|
model="gpt-4", |
|
messages=[{"role": "user", "content": input}] |
|
) |
|
response = completion.choices[0].messag.content |
|
|
|
return response |
|
|
|
|
|
|
|
def initial_prompt(): |
|
|
|
|
|
|
|
f = open('templates/record_types.json') |
|
crm_temlate_json = json.load(f) |
|
|
|
global conversation_history |
|
default_message = (f""" |
|
Microsoft Dynamic CRM için kayıt olusturmada yardımcı bir asistan gibi davranmanı istiyorum. |
|
Senin isin yalnizca CRM kayit olsuturma ve yonetme konusunda yardimci olmak, bu sebeple kullanici |
|
CRM haricinde baska bir konuda konusmak isterse onca kibarca gorevini hatirlatarak istedigi islemi |
|
yapamayacagini belirtiyorsun. Kullanici seninle iletisime gectiginde kendini CRM Asistan Bot olarak |
|
tanitarak nasil yardimci olabilecegini soracaksin. |
|
Kullanicinin yapmak istedigi isleme gore {crm_temlate_json} json verisi icinden 'Contact' yada 'Aktivite' |
|
templatelerine gore kullanicin girmesi gereken input verilerini isteyeceksin. |
|
Kullanıcı seninle iletişime geçtiğinde amaçlarını ögrenip, amaclanan kayit icin ihtiyac olabilecek |
|
alan bilgilerini sirasiyla, adim adim kullanicidan isteyecek ve islemler bittiginda bu bilgileri |
|
json olarak doneceksin. Kullanici kayit olusturman icin onaylamadigi surece |
|
ek bilgi ekleyip eklemek istemedigini soracaksin. Ne olursa olsun kullanicinin belirtmesi gereken bilgileri kendin girmeyeceksin. |
|
Kullanici ek bilgi eklemek isterse sirasiyla o bilgileri isteyerek olusturacagin kayit bilgisine ekleyeceksin. |
|
Ornek: [Kullanici:"Merhaba", Asistan:"Merhaba! Size nasil yardimci olabilirim?", |
|
Kullanici: "Kontakt kaydi olusturmak istiyorum", ...]. Ilk kayit ile baslayalim |
|
""") |
|
|
|
conversation_history.append({"role": "system", "content": f"{default_message}"}) |
|
|
|
completion = client.chat.completions.create( |
|
model="gpt-4", |
|
messages=[{"role": "system", "content": " ".join([str(item) for item in conversation_history])}], |
|
temperature=0.3, |
|
max_tokens=7500, |
|
) |
|
|
|
message = completion.choices[0].message.content |
|
print(message) |
|
|
|
initial_prompt() |
|
|
|
def handle_input(input_str : str): |
|
global conversation_history |
|
|
|
if len(conversation_history) >=20: |
|
conversation_history = conversation_history[:1] + conversation_history[-10:] |
|
|
|
conversation_history.append({"role": "user", "content": f"{input_str}"}) |
|
|
|
content = " ".join([str(item) for item in conversation_history]) |
|
completion = client.chat.completions.create( |
|
model="gpt-4", |
|
messages=conversation_history, |
|
|
|
|
|
|
|
|
|
|
|
|
|
) |
|
|
|
|
|
|
|
message = completion.choices[0].message.content |
|
|
|
conversation_history.append({"role": "assistant", "content": f"{message}"}) |
|
|
|
def get_response_again(content): |
|
|
|
while True: |
|
completion = client.chat.completion.create( |
|
model="gpt-4", |
|
messages=[{"role": "assistant", "content": content}], |
|
|
|
|
|
|
|
|
|
|
|
|
|
) |
|
|
|
message = completion.choices[0].message.content |
|
|
|
if "Müşteri" not in message: |
|
break |
|
|
|
return message |
|
|
|
|
|
|
|
|
|
|
|
|
|
return message |
|
|
|
def get_model_reply(query,context=[]): |
|
|
|
context += [query] |
|
|
|
|
|
|
|
response = handle_input(query) |
|
|
|
|
|
context += [response] |
|
|
|
responses = [(u,b) for u,b in zip(context[::2], context[1::2])] |
|
|
|
return responses, context |
|
|
|
|
|
|
|
def speech_2_text(audio,api_key, context=[]): |
|
client.api_key = api_key |
|
audio_file= open(audio, "rb") |
|
transcript = client.Audio.transcribe("whisper-1", audio_file) |
|
prompt = transcript.text |
|
context += [prompt] |
|
|
|
completion = client.chat.completion.create( |
|
model="gpt-4", |
|
messages=[{"role": "user", "content": prompt}], |
|
max_tokens=2500, |
|
) |
|
|
|
response = completion.choices[0].message.content |
|
context += [response] |
|
|
|
responses = [(u,b) for u,b in zip(context[::2], context[1::2])] |
|
|
|
return responses, context |
|
|
|
|
|
client.api_key = api_key |
|
destination = f"./audio/test" |
|
try: |
|
ydl_opts = { |
|
'format': 'bestaudio/best', |
|
'outtmpl':destination, |
|
'postprocessors': [{ |
|
'key': 'FFmpegExtractAudio', |
|
'preferredcodec': 'mp3', |
|
'preferredquality': '192', |
|
}], |
|
} |
|
with yt_dlp.YoutubeDL(ydl_opts) as ydl: |
|
ydl.download([url]) |
|
|
|
audio_file= open(f'{destination}.mp3', "rb") |
|
transcript = client.Audio.transcribe("whisper-1", audio_file) |
|
|
|
context += [transcript.text] |
|
|
|
responses = [(u,b) for u,b in zip(context[::2], context[1::2])] |
|
|
|
return responses, context |
|
|
|
except Exception as e: |
|
print("Connection Error") |
|
|
|
|
|
with gr.Blocks(css="#chatbot {overflow-y:auto; height:400px;}") as dialog_app: |
|
|
|
with gr.Tab("ChatBot"): |
|
with gr.Row(): |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
with gr.Column(scale=2, min_width=600): |
|
chatbot = gr.Chatbot(elem_id="chatbot") |
|
state = gr.State([]) |
|
|
|
txt = gr.Textbox( |
|
show_label=False, |
|
placeholder="Enter text and press enter", |
|
container=False |
|
) |
|
|
|
|
|
txt.submit(get_model_reply, [txt, state], [chatbot, state]) |
|
txt.submit(lambda :"", None, txt) |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
dialog_app.launch() |