File size: 13,765 Bytes
868a05e
 
 
 
 
 
 
bac5cfa
 
 
 
868a05e
676bff0
 
868a05e
 
 
 
 
 
84c58dc
 
676bff0
 
 
 
 
 
 
 
 
 
 
 
 
97b167d
0cbdbf7
868a05e
676bff0
 
 
 
 
 
 
 
868a05e
 
 
 
 
676bff0
868a05e
0cbdbf7
 
 
 
 
 
 
 
 
 
fb40a32
868a05e
0cbdbf7
 
 
 
 
 
 
 
 
 
 
 
 
fb40a32
868a05e
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
84c58dc
676bff0
 
 
 
84c58dc
676bff0
 
84c58dc
676bff0
 
0cbdbf7
868a05e
676bff0
 
 
84c58dc
676bff0
 
 
84c58dc
868a05e
 
 
 
 
676bff0
0cbdbf7
 
 
 
 
 
 
 
 
 
 
fb40a32
868a05e
0cbdbf7
 
 
 
 
 
 
 
 
 
 
 
 
fb40a32
868a05e
 
 
bac5cfa
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
868a05e
 
 
 
 
 
0c07a2d
979dd72
bac5cfa
 
 
 
 
 
 
 
868a05e
bac5cfa
 
 
 
868a05e
bac5cfa
 
 
 
868a05e
bac5cfa
 
868a05e
bac5cfa
 
 
 
 
 
 
868a05e
bac5cfa
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
868a05e
bac5cfa
 
 
 
 
 
 
 
 
 
868a05e
bac5cfa
868a05e
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
import gradio as gr
from anthropic import Anthropic
from openai import OpenAI
import openai
import json
import uuid
import os
import base64
from PIL import Image
from PIL.PngImagePlugin import PngInfo
from io import BytesIO

default_urls = ["https://api.anthropic.com", "https://api.openai.com/v1"]

# List of available Claude models
claude_models = ["claude-3-opus-20240229", "claude-3-sonnet-20240229", "claude-3-haiku-20240307"]

# List of available OpenAI models
openai_models = ["gpt-4", "gpt-4-32k", "gpt-3.5-turbo", "gpt-4-0125-preview", "gpt-4-turbo-preview", "gpt-4-1106-preview", "gpt-4-0613"]

image_prompter = ["SDXL", "midjourney"]

both_models = claude_models + openai_models

def generate_response(endpoint, api_key, model, user_prompt):
    print(endpoint)
    if endpoint in default_urls:
        #check api keys as normal
        if api_key.startswith("sk-ant-"):
            client = Anthropic(api_key=api_key, base_url=endpoint)
            system_prompt_path = __file__.replace("app.py", "json.txt")
        elif api_key.startswith("sk-"):
            client = OpenAI(api_key=api_key, base_url=endpoint)
            system_prompt_path = __file__.replace("app.py", "json.txt")
        else:
            print(f"Invalid API key: {api_key}")
            return "Invalid API key", "Invalid API key", None
    else:
        if model in claude_models:
            # Set the Anthropic API key
            client = Anthropic(api_key=api_key, base_url=endpoint)
            system_prompt_path = __file__.replace("app.py", "json.txt")
        else:
            # Set the OpenAI API key
            client = OpenAI(api_key=api_key, base_url=endpoint)
            system_prompt_path = __file__.replace("app.py", "json.txt")

    # Read the system prompt from a text file
    with open(system_prompt_path, "r") as file:
        system_prompt = file.read()

    if model in claude_models:
        # Generate a response using the selected Anthropic model
        try:
            response = client.messages.create(
                system=system_prompt,
                messages=[{"role": "user", "content": user_prompt}],
                model=model,
                max_tokens=4096
            )
            response_text = response.content[0].text
        except Exception as e:
            print(e)
            response_text = f"An error occurred while generating the response. Check that your API key is correct! More info: {e}"
    else:
        try:
            # Generate a response using the selected OpenAI model
            response = client.chat.completions.create(
                model=model,
                messages=[
                    {"role": "system", "content": system_prompt},
                    {"role": "user", "content": user_prompt}
                ],
                max_tokens=4096
            )
            response_text = response.choices[0].message.content
        except Exception as e:
            print(e)
            response_text = f"An error occurred while generating the response. Check that your API key is correct! More info: {e}"

    json_string, json_json = extract_json(response_text)
    json_file = json_string if json_string else None
    create_unique_id = str(uuid.uuid4())

    json_folder = __file__.replace("app.py", f"outputs/")
    if not os.path.exists(json_folder):
        os.makedirs(json_folder)
    path = None
    if json_string:
        with open(f"{json_folder}{json_json['name']}_{create_unique_id}.json", "w") as file:
            file.write(json_file)
        path = f"{json_folder}{json_json['name']}_{create_unique_id}.json"
    else:
        json_string = "No JSON data was found, or the JSON data was incomplete."
    return response_text, json_string or "", path

def extract_json(generated_output):
    try:
        generated_output = generated_output.replace("```json", "").replace("```", "").strip()
        # Find the JSON string in the generated output
        json_start = generated_output.find("{")
        json_end = generated_output.rfind("}") + 1
        json_string = generated_output[json_start:json_end]
        print(json_string)

        # Parse the JSON string
        json_data = json.loads(json_string)
        json_data['name'] = json_data['char_name']
        json_data['personality'] = json_data['char_persona']
        json_data['scenario'] = json_data['world_scenario']
        json_data['first_mes'] = json_data['char_greeting']
        # Check if all the required keys are present
        required_keys = ["char_name", "char_persona", "world_scenario", "char_greeting", "example_dialogue", "description"]
        if all(key in json_data for key in required_keys):
            return json.dumps(json_data), json_data
        else:
            return None, None
    except Exception as e:
        print(e)
        return None, None

def generate_second_response(endpoint, api_key, model, generated_output, image_model):
    if endpoint in default_urls:
        #check api keys as normal
        if api_key.startswith("sk-ant-"):
            client = Anthropic(api_key=api_key, base_url=endpoint)
            system_prompt_path = __file__.replace("app.py", f"{image_model}.txt")
        elif api_key.startswith("sk-"):
            client = OpenAI(api_key=api_key, base_url=endpoint)
            system_prompt_path = __file__.replace("app.py", f"{image_model}.txt")
        else:
            print("Invalid API key")
            return "Invalid API key", "Invalid API key", None
    else:
        if model in claude_models:
            # Set the Anthropic API key
            client = Anthropic(api_key=api_key, base_url=endpoint)
            system_prompt_path = __file__.replace("app.py", f"{image_model}.txt")
        else:
            # Set the OpenAI API key
            client = OpenAI(api_key=api_key, base_url=endpoint)
            system_prompt_path = __file__.replace("app.py", f"{image_model}.txt")

    # Read the system prompt from a text file
    with open(system_prompt_path, "r") as file:
        system_prompt = file.read()

    if model in claude_models:
        try:
            # Generate a second response using the selected Anthropic model and the previously generated output
            response = client.messages.create(
                system=system_prompt,
                messages=[{"role": "user", "content": generated_output}],
                model=model,
                max_tokens=4096
            )
            response_text = response.content[0].text
        except Exception as e:
            print(e)
            response_text = f"An error occurred while generating the response. Check that your API key is correct! More info: {e}"
    else:
        try:
            # Generate a response using the selected OpenAI model
            response = client.chat.completions.create(
                model=model,
                messages=[
                    {"role": "system", "content": system_prompt},
                    {"role": "user", "content": generated_output}
                ],
                max_tokens=4096
            )
            response_text = response.choices[0].message.content
        except Exception as e:
            print(e)
            response_text = f"An error occurred while generating the response. Check that your API key is correct! More info: {e}"

    return response_text

def inject_json_to_png(image, json_data):
    if isinstance(json_data, str):
        json_data = json.loads(json_data)

    img = Image.open(image)
    
    # Calculate the aspect ratio of the original image
    width, height = img.size
    aspect_ratio = width / height
    
    # Calculate the cropping dimensions based on the aspect ratio
    if aspect_ratio > 400 / 600:
        # Image is wider than 400x600, crop the sides
        new_width = int(height * 400 / 600)
        left = (width - new_width) // 2
        right = left + new_width
        top = 0
        bottom = height
    else:
        # Image is taller than 400x600, crop the top and bottom
        new_height = int(width * 600 / 400)
        left = 0
        right = width
        top = (height - new_height) // 2
        bottom = top + new_height
    
    # Perform cropping
    img = img.crop((left, top, right, bottom))
    
    # Resize the cropped image to 400x600 pixels
    img = img.resize((400, 600), Image.LANCZOS)
    
    # Convert the JSON data to bytes
    json_bytes = json.dumps(json_data).encode('utf-8')
    
    # Create a new PNG image with the JSON data injected into the tEXT chunk
    output = BytesIO()
    img.save(output, format='PNG')
    output.seek(0)
    
    # Add the tEXT chunk with the tag 'chara'
    metadata = PngInfo()
    metadata.add_text("chara", base64.b64encode(json_bytes))
    
    # Save the modified PNG image to a BytesIO object
    output = BytesIO()
    create_unique_id = str(uuid.uuid4())
    if json_data['name']:
        filename = f"{json_data['name']}_{create_unique_id}.png"
    img_folder = __file__.replace("app.py", f"outputs/")
    img.save(f"{img_folder}/{filename}", format='PNG', pnginfo=metadata)
    
    return f"{img_folder}/{filename}"

# Set up the Gradio interface
with gr.Blocks() as demo:
    gr.Markdown("# SillyTavern Character Generator")

    #Text explaining that you can use the API key from the Anthropic API or the OpenAI API
    gr.Markdown("You can use the API key from the Anthropic API or the OpenAI API. The API key should start with 'sk-ant-' for Anthropic or 'sk-' for OpenAI.")
    gr.Markdown("Please Note: If you use a proxy it must support the OpenAI or Anthropic standard api calls! khanon does, Openrouter based ones usually do not.")
    gr.Markdown("Generating images locally and want to use the prompts from here in your workflow? https://github.com/AppleBotzz/ComfyUI_LLMVISION")
    with gr.Tab("JSON Generate"):
        with gr.Row():
            with gr.Column():
                endpoint = gr.Textbox(label="Endpoint", value="https://api.anthropic.com")
                api_key = gr.Textbox(label="API Key", type="password", placeholder="sk-ant-api03-... or sk-...")
                model_dropdown = gr.Dropdown(choices=[], label="Select a model")
                user_prompt = gr.Textbox(label="User Prompt", value="Make me a card for a panther made of translucent pastel colored goo. Its color never changes once it exists but each 'copy' has a different color. The creature comes out of a small jar, seemingly defying physics with its size. It is the size of a real panther, and as strong as one too. By default its female but is able to change gender. It can even split into multiple copies of itself if needed with no change in its own size or mass. Its outside is normally lightly squishy but solid, but on command it can become viscous like non-newtonian fluids. Be descriptive when describing this character, and make sure to describe all of its features in char_persona just like you do in description. Make sure to describe commonly used features in detail (visual, smell, taste, touch, etc).")
                generate_button = gr.Button("Generate JSON")

            with gr.Column():
                generated_output = gr.Textbox(label="Generated Output")
                json_output = gr.Textbox(label="JSON Output")
                json_download = gr.File(label="Download JSON")

        with gr.Row():
            with gr.Column():
                image_model = gr.Dropdown(choices=image_prompter, label="Image Model to prompt for", value="SDXL")
                generate_button_2 = gr.Button("Generate SDXL Prompt")

            with gr.Column():
                generated_output_2 = gr.Textbox(label="Generated SDXL Prompt")

        def update_models(api_key):
            if api_key.startswith("sk-ant-"):
                return gr.Dropdown(choices=claude_models), gr.Textbox(label="Endpoint", value="https://api.anthropic.com")
            elif api_key.startswith("sk-"):
                return gr.Dropdown(choices=openai_models), gr.Textbox(label="Endpoint", value="https://api.openai.com/v1")
            else:
                return gr.Dropdown(choices=both_models), gr.Textbox(label="Endpoint", value="https://api.anthropic.com")

        api_key.change(update_models, inputs=api_key, outputs=[model_dropdown, endpoint])

        generate_button.click(generate_response, inputs=[endpoint, api_key, model_dropdown, user_prompt], outputs=[generated_output, json_output, json_download])
        generate_button_2.click(generate_second_response, inputs=[endpoint, api_key, model_dropdown, generated_output, image_model], outputs=generated_output_2)
    with gr.Tab("PNG Inject"):
        gr.Markdown("# PNG Inject")
        gr.Markdown("Upload a PNG image and inject JSON content into the PNG. PNG gets resized to 400x600 Center Crop.")

        with gr.Row():
            with gr.Column():
                image_input = gr.Image(type="filepath", label="Upload PNG Image")
                json_input = gr.Textbox(label="JSON Data")
                json_file_input = gr.File(label="Or Upload JSON File", file_types=[".json"])
                inject_button = gr.Button("Inject JSON and Download PNG")

            with gr.Column():
                injected_image_output = gr.File(label="Download Injected PNG")

        def inject_json(image, json_data, json_file):
            if json_file:
                jsonc = open(json_file,)
                json_data = json.load(jsonc)
            if image is None:
                return None
            if json_data is None:
                return None
            injected_image = inject_json_to_png(image, json_data)
            return injected_image

        inject_button.click(inject_json, inputs=[image_input, json_input, json_file_input], outputs=injected_image_output)

demo.launch()