Spaces:
Sleeping
Sleeping
ChandimaPrabath
commited on
Commit
·
af3518b
1
Parent(s):
cea7309
init
Browse files- app.py +142 -0
- config.yaml +18 -0
- llm.py +35 -0
- requirements.txt +4 -0
- sd.py +76 -0
- utils.py +7 -0
app.py
ADDED
@@ -0,0 +1,142 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import os
|
2 |
+
import threading
|
3 |
+
import requests
|
4 |
+
from flask import Flask, request, jsonify
|
5 |
+
|
6 |
+
from llm import generate_llm
|
7 |
+
from sd import generate_sd
|
8 |
+
|
9 |
+
GREEN_API_URL = os.getenv("GREEN_API_URL")
|
10 |
+
GREEN_API_MEDIA_URL = os.getenv("GREEN_API_MEDIA_URL", "https://api.green-api.com")
|
11 |
+
GREEN_API_TOKEN = os.getenv("GREEN_API_TOKEN")
|
12 |
+
GREEN_API_ID_INSTANCE = os.getenv("GREEN_API_ID_INSTANCE")
|
13 |
+
WEBHOOK_AUTH_TOKEN = os.getenv("WEBHOOK_AUTH_TOKEN")
|
14 |
+
PORT = int(os.getenv("PORT"))
|
15 |
+
|
16 |
+
if not all([GREEN_API_URL, GREEN_API_TOKEN, GREEN_API_ID_INSTANCE, WEBHOOK_AUTH_TOKEN]):
|
17 |
+
raise ValueError("Environment variables are not set properly")
|
18 |
+
|
19 |
+
app = Flask(__name__)
|
20 |
+
|
21 |
+
def send_message(message_id, to_number, message, retries=3):
|
22 |
+
"""
|
23 |
+
Send a text message using Green API with retry logic.
|
24 |
+
"""
|
25 |
+
if to_number.endswith('@g.us'):
|
26 |
+
chat_id = to_number
|
27 |
+
else:
|
28 |
+
chat_id = to_number
|
29 |
+
|
30 |
+
url = f"{GREEN_API_URL}/waInstance{GREEN_API_ID_INSTANCE}/sendMessage/{GREEN_API_TOKEN}"
|
31 |
+
payload = {
|
32 |
+
"chatId": chat_id,
|
33 |
+
"message": message,
|
34 |
+
"quotedMessageId": message_id,
|
35 |
+
}
|
36 |
+
|
37 |
+
for attempt in range(retries):
|
38 |
+
try:
|
39 |
+
response = requests.post(url, json=payload)
|
40 |
+
response.raise_for_status()
|
41 |
+
return response.json()
|
42 |
+
except requests.RequestException as e:
|
43 |
+
if attempt < retries - 1:
|
44 |
+
continue
|
45 |
+
return {"error": str(e)}
|
46 |
+
|
47 |
+
def send_image(message_id, to_number, image_path, retries=3):
|
48 |
+
"""
|
49 |
+
Send an image using Green API with retry logic.
|
50 |
+
"""
|
51 |
+
if to_number.endswith('@g.us'):
|
52 |
+
chat_id = to_number
|
53 |
+
else:
|
54 |
+
chat_id = to_number
|
55 |
+
|
56 |
+
url = f"{GREEN_API_MEDIA_URL}/waInstance{GREEN_API_ID_INSTANCE}/sendFileByUpload/{GREEN_API_TOKEN}"
|
57 |
+
payload = {'chatId': chat_id, 'caption': 'Here you go!', 'quotedMessageId': message_id}
|
58 |
+
files = [('file', ('image.jpg', open(image_path, 'rb'), 'image/jpeg'))]
|
59 |
+
|
60 |
+
for attempt in range(retries):
|
61 |
+
try:
|
62 |
+
response = requests.post(url, data=payload, files=files)
|
63 |
+
response.raise_for_status()
|
64 |
+
return response.json()
|
65 |
+
except requests.RequestException as e:
|
66 |
+
if attempt < retries - 1:
|
67 |
+
continue
|
68 |
+
return {"error": str(e)}
|
69 |
+
|
70 |
+
def response_text(message_id, chat_id, prompt):
|
71 |
+
"""
|
72 |
+
Generate a response using the LLM and send it to the user.
|
73 |
+
"""
|
74 |
+
try:
|
75 |
+
msg = generate_llm(prompt)
|
76 |
+
send_message(message_id, chat_id, msg)
|
77 |
+
except Exception as e:
|
78 |
+
send_message(message_id, chat_id, "There was an error processing your request.")
|
79 |
+
|
80 |
+
def handle_image_generation(message_id, chat_id, prompt):
|
81 |
+
"""
|
82 |
+
Generate an image from the provided prompt and send it to the user.
|
83 |
+
"""
|
84 |
+
try:
|
85 |
+
image_data, image_path = generate_sd(prompt)
|
86 |
+
if image_data:
|
87 |
+
send_image(message_id, chat_id, image_path)
|
88 |
+
else:
|
89 |
+
send_message(message_id, chat_id, "Failed to generate image. Please try again later.")
|
90 |
+
except Exception as e:
|
91 |
+
send_message(message_id, chat_id, "There was an error generating the image. Please try again later.")
|
92 |
+
|
93 |
+
@app.route('/', methods=['GET'])
|
94 |
+
def index():
|
95 |
+
"""
|
96 |
+
Basic endpoint to check if the script is running.
|
97 |
+
"""
|
98 |
+
return "Server is running!"
|
99 |
+
|
100 |
+
@app.route('/whatsapp', methods=['POST'])
|
101 |
+
def whatsapp_webhook():
|
102 |
+
"""
|
103 |
+
Handle incoming WhatsApp messages.
|
104 |
+
"""
|
105 |
+
data = request.get_json()
|
106 |
+
auth_header = request.headers.get('Authorization', '').strip()
|
107 |
+
|
108 |
+
if auth_header != f"Bearer {WEBHOOK_AUTH_TOKEN}":
|
109 |
+
return jsonify({"error": "Unauthorized"}), 403
|
110 |
+
|
111 |
+
if data.get('typeWebhook') != 'incomingMessageReceived':
|
112 |
+
return jsonify(success=True)
|
113 |
+
|
114 |
+
try:
|
115 |
+
chat_id = data['senderData']['chatId']
|
116 |
+
message_id = data['idMessage']
|
117 |
+
message_data = data.get('messageData', {})
|
118 |
+
|
119 |
+
if 'textMessageData' in message_data:
|
120 |
+
body = message_data['textMessageData']['textMessage'].strip()
|
121 |
+
elif 'extendedTextMessageData' in message_data:
|
122 |
+
body = message_data['extendedTextMessageData']['text'].strip()
|
123 |
+
else:
|
124 |
+
return jsonify(success=True)
|
125 |
+
|
126 |
+
except KeyError as e:
|
127 |
+
return jsonify({"error": f"Missing key in data: {e}"}), 200
|
128 |
+
|
129 |
+
if body.lower().startswith('/imagine'):
|
130 |
+
prompt = body.replace('/imagine', '').strip()
|
131 |
+
if not prompt:
|
132 |
+
send_message(message_id, chat_id, "Please provide a prompt after /imagine.")
|
133 |
+
else:
|
134 |
+
send_message(message_id, chat_id, "Generating...")
|
135 |
+
threading.Thread(target=handle_image_generation, args=(message_id, chat_id, prompt)).start()
|
136 |
+
else:
|
137 |
+
threading.Thread(target=response_text, args=(message_id, chat_id, body)).start()
|
138 |
+
|
139 |
+
return jsonify(success=True)
|
140 |
+
|
141 |
+
if __name__ == '__main__':
|
142 |
+
app.run(debug=True, port=PORT, host="0.0.0.0")
|
config.yaml
ADDED
@@ -0,0 +1,18 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
config:
|
2 |
+
llm:
|
3 |
+
model: koboldcpp/HF_SPACE_Tiefighter-13B
|
4 |
+
system_prompt: |
|
5 |
+
Your name is {char}. You are an AI capable of creating images. users can access you through the telegram bot https://t.me/eve_uni_bot and the whatsapp group https://chat.whatsapp.com/ETDeb3AOywhCI59gbau8wU. in whatsapp just join the whatsapp group and send the command to group chat then the bot will generate the image and send it to you through the group chat so don't send command to the whatsapp bot account. so if users want to generate images privatly tell them to use the telegram bot instead of whatsapp group. because in telegram users are using the telegram bot its private. users can only create images when /imagine command is provided. example: /imagine a white cat,realistic,bight colors. if someone wrongly use the command help them with instruction on how to use generate image using /imagine command. if user struggle with prompt help them with the prompt. if user asks questions or start a conversation chat with them. if the command is mispelled tell the user about it.
|
6 |
+
char: Eve
|
7 |
+
SD:
|
8 |
+
steps: 30
|
9 |
+
negative_prompt: nsfw, nude, low quality, lowres, low details, oversaturated, undersaturated, underexposed,blurry, grainy, morbid, ugly, asymmetrical, mutated malformed, mutilated, poorly lit, bad shadow, draft, cropped, out of frame, cut off, jpeg artifacts, out of focus, glitch, duplicate,bad body parts, bad anatomy, bad hands, bad face, bad eyes, bad mouth, bad ears, bad legs, ugly face, ugly eyes, watermark, text, error, missing fingers
|
10 |
+
width: 1024
|
11 |
+
height: 1024
|
12 |
+
sampler_name: DPM++ 2M
|
13 |
+
cfg_scale: 7.0
|
14 |
+
app:
|
15 |
+
whatsapp_bot_enabled: true
|
16 |
+
telegram_bot_enabled: true
|
17 |
+
version: "0.3 Beta"
|
18 |
+
debug: true
|
llm.py
ADDED
@@ -0,0 +1,35 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from openai import OpenAI
|
2 |
+
from dotenv import load_dotenv
|
3 |
+
import os
|
4 |
+
from utils import read_config
|
5 |
+
|
6 |
+
load_dotenv()
|
7 |
+
BASE_URL = os.getenv("LLM_BASE_URL")
|
8 |
+
|
9 |
+
# Initialize the OpenAI API with your API key
|
10 |
+
client = OpenAI(
|
11 |
+
api_key="koboldcpp",
|
12 |
+
base_url=BASE_URL
|
13 |
+
)
|
14 |
+
|
15 |
+
def pre_process():
|
16 |
+
# Read the config each time pre_process is called
|
17 |
+
config = read_config()
|
18 |
+
system_prompt = config['llm']['system_prompt']
|
19 |
+
char = config['llm']['char']
|
20 |
+
|
21 |
+
system_prompt = system_prompt.replace("{char}", char)
|
22 |
+
return system_prompt
|
23 |
+
|
24 |
+
# Function to handle messages and stream response from OpenAI
|
25 |
+
def generate_llm(prompt):
|
26 |
+
system_prompt = pre_process()
|
27 |
+
response = client.chat.completions.create(
|
28 |
+
model="koboldcpp/HF_SPACE_Tiefighter-13B",
|
29 |
+
messages=[
|
30 |
+
{"role": "system", "content": system_prompt},
|
31 |
+
{"role": "user", "content": prompt}
|
32 |
+
]
|
33 |
+
)
|
34 |
+
msg = response.choices[0].message.content
|
35 |
+
return msg
|
requirements.txt
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
fastapi
|
2 |
+
uvicorn[standard]
|
3 |
+
openai
|
4 |
+
flask
|
sd.py
ADDED
@@ -0,0 +1,76 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import requests
|
2 |
+
import base64
|
3 |
+
import os
|
4 |
+
from datetime import datetime
|
5 |
+
import io
|
6 |
+
import yaml
|
7 |
+
|
8 |
+
IMAGE_GENERATION_URL = os.getenv("SD_API_URL")
|
9 |
+
CACHE_DIR = '/tmp/cache' # Use /tmp directory which is writable
|
10 |
+
|
11 |
+
# Ensure the cache directory exists
|
12 |
+
os.makedirs(CACHE_DIR, exist_ok=True)
|
13 |
+
|
14 |
+
def load_config():
|
15 |
+
with open('config.yaml', 'r') as f:
|
16 |
+
return yaml.safe_load(f)
|
17 |
+
|
18 |
+
def generate_sd(prompt):
|
19 |
+
"""
|
20 |
+
Generate an image using the specified prompt and save it to the cache directory.
|
21 |
+
|
22 |
+
Returns:
|
23 |
+
image_data (bytes): Raw image data as bytes.
|
24 |
+
image_path (str): Absolute path to the saved image file using forward slashes.
|
25 |
+
"""
|
26 |
+
config = load_config()
|
27 |
+
config = config['config']
|
28 |
+
# Define payload for the POST request
|
29 |
+
payload = {
|
30 |
+
"prompt": prompt,
|
31 |
+
"steps": config['SD']['steps'],
|
32 |
+
"width": config['SD']['width'],
|
33 |
+
"height": config['SD']['height'],
|
34 |
+
"sampler_name": config['SD']['sampler_name'],
|
35 |
+
"seed": -1,
|
36 |
+
"cfg_scale": config['SD']['cfg_scale'],
|
37 |
+
"negative_prompt": config['SD']['negative_prompt'],
|
38 |
+
}
|
39 |
+
|
40 |
+
try:
|
41 |
+
# Send POST request to the image generation endpoint
|
42 |
+
response = requests.post(IMAGE_GENERATION_URL, json=payload)
|
43 |
+
response.raise_for_status() # Raise an exception for HTTP errors
|
44 |
+
r = response.json()
|
45 |
+
|
46 |
+
# Decode the image and save it to the cache folder
|
47 |
+
if 'images' in r and r['images']:
|
48 |
+
image_data = base64.b64decode(r['images'][0])
|
49 |
+
timestamp = datetime.now().strftime("%Y%m%d%H%M%S")
|
50 |
+
image_filename = f"image_{timestamp}.jpg"
|
51 |
+
prompt_filename = f"image_{timestamp}.yaml"
|
52 |
+
image_path = os.path.abspath(os.path.join(CACHE_DIR, image_filename))
|
53 |
+
prompt_path = os.path.abspath(os.path.join(CACHE_DIR, prompt_filename))
|
54 |
+
|
55 |
+
# Save the prompt to a YAML file
|
56 |
+
with open(prompt_path, 'w') as f:
|
57 |
+
yaml.dump(payload, f)
|
58 |
+
|
59 |
+
# Save the image as a JPEG file
|
60 |
+
with open(image_path, 'wb') as f:
|
61 |
+
image = io.BytesIO(image_data)
|
62 |
+
f.write(image.getbuffer())
|
63 |
+
|
64 |
+
# Convert the path to use forward slashes
|
65 |
+
image_path = image_path.replace('\\', '/')
|
66 |
+
|
67 |
+
return image_data, image_path
|
68 |
+
else:
|
69 |
+
print("No images found in the response.")
|
70 |
+
return None, None
|
71 |
+
except requests.RequestException as e:
|
72 |
+
print(f"HTTP request error: {e}")
|
73 |
+
return None, None
|
74 |
+
except Exception as e:
|
75 |
+
print(f"Error generating image: {e}")
|
76 |
+
return None, None
|
utils.py
ADDED
@@ -0,0 +1,7 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
def read_config():
|
2 |
+
import yaml
|
3 |
+
|
4 |
+
with open("config.yaml", "r") as yamlfile:
|
5 |
+
config = yaml.load(yamlfile, Loader=yaml.FullLoader)
|
6 |
+
|
7 |
+
return config['config']
|