Update app.py
Browse files
app.py
CHANGED
@@ -1,56 +1,21 @@
|
|
1 |
-
# Import necessary libraries
|
2 |
import streamlit as st
|
3 |
-
from
|
4 |
-
from mistral_common.tokens.tokenizers.mistral import MistralTokenizer
|
5 |
-
from mistral_common.protocol.instruct.messages import UserMessage
|
6 |
-
from mistral_common.protocol.instruct.request import ChatCompletionRequest
|
7 |
-
import torch
|
8 |
|
9 |
-
#
|
10 |
-
|
11 |
-
|
12 |
-
|
13 |
-
|
14 |
-
|
15 |
-
|
16 |
-
model = AutoModelForCausalLM.from_pretrained("mistralai/Codestral-22B-v0.1")
|
17 |
-
model.to("cuda")
|
18 |
-
|
19 |
-
# Function to generate text
|
20 |
-
def generate_text(prompt):
|
21 |
-
# Encode the prompt
|
22 |
-
completion_request = ChatCompletionRequest(messages=[UserMessage(content=prompt)])
|
23 |
-
tokens = tokenizer.encode_chat_completion(completion_request).tokens
|
24 |
-
|
25 |
-
# Generate text using the model
|
26 |
-
with torch.no_grad():
|
27 |
-
generated_ids = model.generate(torch.tensor([tokens]).to(model.device), max_new_tokens=1000, do_sample=True)
|
28 |
-
|
29 |
-
# Decode the generated text
|
30 |
-
result = tokenizer.decode(generated_ids[0].tolist())
|
31 |
-
return result
|
32 |
|
33 |
# Streamlit interface
|
34 |
-
st.title("
|
35 |
-
|
36 |
-
st.write("""
|
37 |
-
This is a text generation application using the Codestral model from Mistral AI.
|
38 |
-
Enter your prompt below and generate text.
|
39 |
-
""")
|
40 |
-
|
41 |
-
# User input
|
42 |
-
user_input = st.text_area("Enter your prompt here:", "")
|
43 |
|
44 |
-
|
45 |
-
if user_input:
|
46 |
-
with st.spinner("Generating text..."):
|
47 |
-
# Generate text using the model
|
48 |
-
generated_text = generate_text(user_input)
|
49 |
-
st.write("### Generated Text")
|
50 |
-
st.write(generated_text)
|
51 |
-
else:
|
52 |
-
st.warning("Please enter a prompt to generate text.")
|
53 |
|
54 |
-
|
55 |
-
|
56 |
-
st.
|
|
|
|
|
|
1 |
import streamlit as st
|
2 |
+
from clarifai.client.model import Model
|
|
|
|
|
|
|
|
|
3 |
|
4 |
+
# Function to get prediction from the model
|
5 |
+
def get_model_prediction(prompt):
|
6 |
+
model_url = "https://clarifai.com/mistralai/completion/models/codestral-22b-instruct"
|
7 |
+
pat = "c366cdccefa24e5eb7ae481a612c1b81"
|
8 |
+
model = Model(url=model_url, pat=pat)
|
9 |
+
model_prediction = model.predict_by_bytes(prompt.encode(), input_type="text")
|
10 |
+
return model_prediction.outputs[0].data.text.raw
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
11 |
|
12 |
# Streamlit interface
|
13 |
+
st.title("AI Model Prediction with Clarifai")
|
14 |
+
st.write("Enter a prompt and get a prediction from the Clarifai model.")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
15 |
|
16 |
+
prompt = st.text_input("Enter your prompt:", "What's the future of AI?")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
17 |
|
18 |
+
if st.button("Get Prediction"):
|
19 |
+
prediction = get_model_prediction(prompt)
|
20 |
+
st.write("Model Prediction:")
|
21 |
+
st.write(prediction)
|