Spaces:
Sleeping
Sleeping
File size: 5,390 Bytes
252ec19 5cea61f da0700d 5b1e2fb 252ec19 5b1e2fb 665fd62 252ec19 be25975 252ec19 be25975 252ec19 d8ba00f 25cc5a2 d8ba00f 5b17ad4 d8ba00f 168ee0f d8ba00f 168ee0f 252ec19 da0700d 252ec19 d8ba00f 25cc5a2 d8ba00f 5b17ad4 d8ba00f 168ee0f d8ba00f 168ee0f 252ec19 de6b2f7 8b00d92 252ec19 0f7e00d c6cb6df 0f7e00d c6cb6df 207ae76 c6cb6df 0f7e00d c6cb6df 0f7e00d 58daaac c6cb6df 0f7e00d 665fd62 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 |
import streamlit as st
import openai
import openai.error
from openai.error import OpenAIError
import os
# Securely fetch the OpenAI API key
try:
openai.api_key = os.getenv('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')
except openai.APIError as e:
# Log the error for debugging purposes
st.error("An error occurred while communicating with the OpenAI API.")
# Optionally, log more detailed information for debugging
print("Error details:", e)
# You might also want to implement additional logic here, like retrying the request,
# sending a notification to an administrator, or providing a default response.
KNOWN_MODELS = [
# General ML models
"Neural Networks", "Decision Trees", "Support Vector Machines",
"Random Forests", "Linear Regression", "Reinforcement Learning",
"Logistic Regression", "k-Nearest Neighbors", "Naive Bayes",
"Gradient Boosting Machines", "Regularization Techniques",
"Ensemble Methods", "Time Series Analysis",
# Deep Learning models
"Deep Learning", "Convolutional Neural Networks",
"Recurrent Neural Networks", "Transformer Models",
"Generative Adversarial Networks", "Autoencoders",
"Bidirectional LSTM", "Residual Networks (ResNets)",
"Variational Autoencoders",
# Computer Vision models and techniques
"Object Detection (e.g., YOLO, SSD)", "Semantic Segmentation",
"Image Classification", "Face Recognition", "Optical Character Recognition (OCR)",
"Pose Estimation", "Style Transfer", "Image-to-Image Translation",
"Image Generation", "Capsule Networks",
# NLP models and techniques
"BERT", "GPT", "ELMo", "T5", "Word2Vec", "Doc2Vec",
"Topic Modeling", "Sentiment Analysis", "Text Classification",
"Machine Translation", "Speech Recognition", "Sequence-to-Sequence Models",
"Attention Mechanisms", "Named Entity Recognition", "Text Summarization"
]
def recommend_ai_model_via_gpt(description):
messages = [
{"role": "user", "content": f"Given the application described as: '{description}', which AI model would be most suitable?"}
]
try:
response = openai.ChatCompletion.create(
model="gpt-4",
messages=messages
)
recommendation = response['choices'][0]['message']['content'].strip()
return recommendation
except openai.APIError as e:
return f"Error: {e}"
except openai.RateLimitError as e:
return f"Rate limit exceeded: {e}"
except openai.APIConnectionError as e:
return f"Connection error: {e}"
def explain_recommendation(model_name):
messages = [
{"role": "user", "content": f"Why would {model_name} be a suitable choice for the application?"}
]
try:
response = openai.ChatCompletion.create(
model="gpt-4",
messages=messages
)
explanation = response['choices'][0]['message']['content'].strip()
return explanation
except openai.APIError as e:
return f"Error: {e}"
except openai.RateLimitError as e:
return f"Rate limit exceeded: {e}"
except openai.APIConnectionError as e:
return f"Connection error: {e}"
# Streamlit UI
st.image("./A8title2.png")
st.title('Find the best AI stack for your app')
description = st.text_area("Describe your application:", "")
recommendation_type = st.radio("What type of recommendation are you looking for?", ["Recommend Open-Source Model", "Recommend API Service"])
if "rec_model_pressed" not in st.session_state:
st.session_state.rec_model_pressed = False
if "feedback_submitted" not in st.session_state:
st.session_state.feedback_submitted = False
if st.button("Recommend AI Model"):
st.session_state.rec_model_pressed = True
if st.session_state.rec_model_pressed:
if description:
# Modified query based on recommendation type
if recommendation_type == "Recommend Open-Source Model":
query = f"Given the application described as: '{description}', which open-source AI model would be most suitable?"
else: # Recommend API Service
query = f"Given the application described as: '{description}', which AI service API would be best?"
recommended_model = recommend_ai_model_via_gpt(query) # Updated function call
# Validate recommended model
# Commenting out model validation for the example
# if recommended_model not in KNOWN_MODELS:
# st.warning("The recommendation is ambiguous. Please refine your description or consult an expert.")
# else:
st.subheader(f"Recommended: {recommended_model}")
explanation = explain_recommendation(recommended_model)
st.write("Reason:", explanation)
# Collecting rating and feedback through Streamlit
rating = st.slider("Rate the explanation from 1 (worst) to 5 (best):", 1, 5)
feedback = st.text_input("Any additional feedback?")
if st.button("Submit Feedback"):
st.session_state.feedback_submitted = True
if st.session_state.feedback_submitted:
st.success("Thank you for your feedback!")
st.write("Contact team@autumn8.ai or call (857) 600-0180 to learn how we can fine-tune and host this app for you.")
else:
st.warning("Please provide a description.")
|