Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -1,71 +1,54 @@
|
|
1 |
import random
|
2 |
-
import gradio as gr
|
3 |
-
import ollama
|
4 |
-
from huggingface_hub import InferenceClient
|
5 |
import os
|
|
|
|
|
6 |
|
7 |
|
8 |
def chat_short_story(length, genre, theme, tone, writing_style):
|
9 |
"""
|
10 |
-
Generates a creative short story using
|
11 |
Parameters:
|
12 |
-
length (int):
|
13 |
-
genre (str):
|
14 |
-
theme (str):
|
15 |
-
tone (str):
|
16 |
-
writing_style (str):
|
17 |
Returns:
|
18 |
str: The generated short story, or an error message if unsuccessful.
|
19 |
"""
|
20 |
-
#
|
21 |
-
|
22 |
-
|
23 |
-
"
|
24 |
-
)
|
25 |
|
26 |
-
#
|
|
|
|
|
|
|
|
|
|
|
|
|
27 |
prompt = (
|
28 |
f"Write a creative short story of approximately {length} words in the {genre} genre. "
|
29 |
f"Use a {writing_style} writing style with a {tone} tone. "
|
30 |
f"The story should revolve around the theme of {theme}. "
|
31 |
-
f"Ensure
|
32 |
)
|
33 |
|
34 |
-
#
|
35 |
-
HUGGINGFACE_API_KEY = os.getenv("HUGGINGFACE_API_KEY")
|
36 |
-
if not HUGGINGFACE_API_KEY:
|
37 |
-
return "Error: Hugging Face API key not found. Please set the HUGGINGFACE_API_KEY environment variable."
|
38 |
-
|
39 |
-
# Initialize the Hugging Face Inference Client
|
40 |
try:
|
41 |
-
|
|
|
|
|
|
|
|
|
42 |
except Exception as e:
|
43 |
-
return f"Error: Failed to
|
44 |
-
|
45 |
-
# Interact with the model
|
46 |
-
try:
|
47 |
-
result = client.chat.completions.create(
|
48 |
-
model="meta-llama/Llama-3.2-3B-Instruct",
|
49 |
-
messages=[
|
50 |
-
{"role": "system", "content": system_message},
|
51 |
-
{"role": "user", "content": prompt},
|
52 |
-
]
|
53 |
-
)
|
54 |
-
except Exception as e:
|
55 |
-
return f"Error: Failed to interact with the model. Details: {e}"
|
56 |
-
|
57 |
-
# Extract the story content from the response
|
58 |
-
if "choices" in result and len(result["choices"]) > 0:
|
59 |
-
return result["choices"][0]["message"]["content"]
|
60 |
-
else:
|
61 |
-
return "Error: No story generated. Please check your prompt or model configuration."
|
62 |
-
|
63 |
|
64 |
|
65 |
# Predefined options
|
66 |
Length = [100, 250, 750]
|
67 |
-
|
68 |
-
r_length = random.choice(Length)
|
69 |
|
70 |
Genre = [
|
71 |
"Fiction", "Nonfiction", "Drama", "Poetry", "Fantasy", "Horror", "Mystery",
|
@@ -86,19 +69,19 @@ r_Style = random.choice(Writing_Styles)
|
|
86 |
Tones = ["Formal", "Optimistic", "Worried", "Friendly", "Curious", "Assertive", "Encouraging"]
|
87 |
r_tones = random.choice(Tones)
|
88 |
|
89 |
-
|
90 |
# Gradio Interface setup
|
91 |
iface = gr.Interface(
|
92 |
fn=chat_short_story,
|
93 |
inputs=[
|
94 |
-
gr.Slider(value=100, label="
|
95 |
-
gr.Dropdown(label="
|
96 |
-
gr.Dropdown(label="
|
97 |
-
gr.Dropdown(label="
|
98 |
-
gr.Dropdown(label="
|
99 |
],
|
100 |
-
outputs=gr.
|
101 |
-
title="
|
|
|
102 |
)
|
103 |
|
104 |
iface.launch()
|
|
|
1 |
import random
|
|
|
|
|
|
|
2 |
import os
|
3 |
+
import gradio as gr
|
4 |
+
import google.generativeai as genai # Importing the Gemini API module
|
5 |
|
6 |
|
7 |
def chat_short_story(length, genre, theme, tone, writing_style):
|
8 |
"""
|
9 |
+
Generates a creative short story using Gemini API.
|
10 |
Parameters:
|
11 |
+
length (int): Approximate word count for the story.
|
12 |
+
genre (str): Genre of the story.
|
13 |
+
theme (str): Central theme of the story.
|
14 |
+
tone (str): Tone of the story.
|
15 |
+
writing_style (str): Writing style for the story.
|
16 |
Returns:
|
17 |
str: The generated short story, or an error message if unsuccessful.
|
18 |
"""
|
19 |
+
# Retrieve the Gemini API key from the environment
|
20 |
+
GEMINI_API_KEY = os.getenv("GEMINI_API_KEY")
|
21 |
+
if not GEMINI_API_KEY:
|
22 |
+
return "Error: Gemini API key not found. Please set the GEMINI_API_KEY environment variable."
|
|
|
23 |
|
24 |
+
# Configure the Gemini API
|
25 |
+
try:
|
26 |
+
genai.configure(api_key=GEMINI_API_KEY)
|
27 |
+
except Exception as e:
|
28 |
+
return f"Error: Failed to configure the Gemini API. Details: {e}"
|
29 |
+
|
30 |
+
# Construct the prompt
|
31 |
prompt = (
|
32 |
f"Write a creative short story of approximately {length} words in the {genre} genre. "
|
33 |
f"Use a {writing_style} writing style with a {tone} tone. "
|
34 |
f"The story should revolve around the theme of {theme}. "
|
35 |
+
f"Ensure it is engaging and includes a suitable title."
|
36 |
)
|
37 |
|
38 |
+
# Generate a response using the Gemini model
|
|
|
|
|
|
|
|
|
|
|
39 |
try:
|
40 |
+
response = genai.generate_text(prompt=prompt, model="gemini-1.5-flash")
|
41 |
+
if "candidates" in response and len(response["candidates"]) > 0:
|
42 |
+
return response["candidates"][0]["output"] # Extract the story content
|
43 |
+
else:
|
44 |
+
return "Error: No story generated. Please check your input or try again."
|
45 |
except Exception as e:
|
46 |
+
return f"Error: Failed to generate story. Details: {e}"
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
47 |
|
48 |
|
49 |
# Predefined options
|
50 |
Length = [100, 250, 750]
|
51 |
+
r_length = random.choice([l for l in Length if l >= 100])
|
|
|
52 |
|
53 |
Genre = [
|
54 |
"Fiction", "Nonfiction", "Drama", "Poetry", "Fantasy", "Horror", "Mystery",
|
|
|
69 |
Tones = ["Formal", "Optimistic", "Worried", "Friendly", "Curious", "Assertive", "Encouraging"]
|
70 |
r_tones = random.choice(Tones)
|
71 |
|
|
|
72 |
# Gradio Interface setup
|
73 |
iface = gr.Interface(
|
74 |
fn=chat_short_story,
|
75 |
inputs=[
|
76 |
+
gr.Slider(value=100, label="Story Length", minimum=100, maximum=2500, step=50),
|
77 |
+
gr.Dropdown(label="Story Genre", choices=Genre),
|
78 |
+
gr.Dropdown(label="Story Theme", choices=Themes),
|
79 |
+
gr.Dropdown(label="Writing Style", choices=Writing_Styles),
|
80 |
+
gr.Dropdown(label="Story Tone", choices=Tones)
|
81 |
],
|
82 |
+
outputs=gr.Textbox(label="Generated Story"),
|
83 |
+
title="Patrick's Story Generator",
|
84 |
+
description="Generate creative short stories tailored to your preferences."
|
85 |
)
|
86 |
|
87 |
iface.launch()
|