unknown commited on
Commit
9ec4c84
β€’
1 Parent(s): 5062a21

Your commit message

Browse files
Files changed (3) hide show
  1. README.md +5 -5
  2. app.py +56 -80
  3. name.txt +0 -0
README.md CHANGED
@@ -1,10 +1,10 @@
1
  ---
2
- title: Promptgenerador1
3
- emoji: πŸ“ˆ
4
- colorFrom: blue
5
- colorTo: pink
6
  sdk: gradio
7
- sdk_version: 4.26.0
8
  app_file: app.py
9
  pinned: false
10
  ---
 
1
  ---
2
+ title: Midjourney Prompt Generator
3
+ emoji: 🌍
4
+ colorFrom: pink
5
+ colorTo: gray
6
  sdk: gradio
7
+ sdk_version: 3.28.1
8
  app_file: app.py
9
  pinned: false
10
  ---
app.py CHANGED
@@ -1,80 +1,56 @@
1
- import gradio as gr
2
- import os
3
- import requests
4
-
5
- SYSTEM_PROMPT = "As an LLM, your job is to generate detailed prompts that start with generate the image, for image generation models based on user input. Be descriptive and specific, but also make sure your prompts are clear and concise."
6
- TITLE = "Image Prompter"
7
- EXAMPLE_INPUT = "A Man Riding A Horse in Space"
8
- import gradio as gr
9
- import os
10
- import requests
11
-
12
- zephyr_7b_beta = "https://api-inference.huggingface.co/models/HuggingFaceH4/zephyr-7b-beta/"
13
-
14
- HF_TOKEN = os.getenv("HF_TOKEN")
15
- HEADERS = {"Authorization": f"Bearer {HF_TOKEN}"}
16
-
17
- def build_input_prompt(message, chatbot, system_prompt):
18
- """
19
- Constructs the input prompt string from the chatbot interactions and the current message.
20
- """
21
- input_prompt = "<|system|>\n" + system_prompt + "</s>\n<|user|>\n"
22
- for interaction in chatbot:
23
- input_prompt = input_prompt + str(interaction[0]) + "</s>\n<|assistant|>\n" + str(interaction[1]) + "\n</s>\n<|user|>\n"
24
-
25
- input_prompt = input_prompt + str(message) + "</s>\n<|assistant|>"
26
- return input_prompt
27
-
28
-
29
- def post_request_beta(payload):
30
- """
31
- Sends a POST request to the predefined Zephyr-7b-Beta URL and returns the JSON response.
32
- """
33
- response = requests.post(zephyr_7b_beta, headers=HEADERS, json=payload)
34
- response.raise_for_status() # Will raise an HTTPError if the HTTP request returned an unsuccessful status code
35
- return response.json()
36
-
37
-
38
- def predict_beta(message, chatbot=[], system_prompt=""):
39
- input_prompt = build_input_prompt(message, chatbot, system_prompt)
40
- data = {
41
- "inputs": input_prompt
42
- }
43
-
44
- try:
45
- response_data = post_request_beta(data)
46
- json_obj = response_data[0]
47
-
48
- if 'generated_text' in json_obj and len(json_obj['generated_text']) > 0:
49
- bot_message = json_obj['generated_text']
50
- return bot_message
51
- elif 'error' in json_obj:
52
- raise gr.Error(json_obj['error'] + ' Please refresh and try again with smaller input prompt')
53
- else:
54
- warning_msg = f"Unexpected response: {json_obj}"
55
- raise gr.Error(warning_msg)
56
- except requests.HTTPError as e:
57
- error_msg = f"Request failed with status code {e.response.status_code}"
58
- raise gr.Error(error_msg)
59
- except json.JSONDecodeError as e:
60
- error_msg = f"Failed to decode response as JSON: {str(e)}"
61
- raise gr.Error(error_msg)
62
-
63
- def test_preview_chatbot(message, history):
64
- response = predict_beta(message, history, SYSTEM_PROMPT)
65
- text_start = response.rfind("<|assistant|>", ) + len("<|assistant|>")
66
- response = response[text_start:]
67
- return response
68
-
69
-
70
- welcome_preview_message = f"""
71
- Expand your imagination and broaden your horizons with LLM. Welcome to **{TITLE}**!:\nThis is a chatbot that can generate detailed prompts for image generation models based on simple and short user input.\nSay something like:
72
-
73
- "{EXAMPLE_INPUT}"
74
- """
75
-
76
- chatbot_preview = gr.Chatbot(layout="panel", value=[(None, welcome_preview_message)])
77
- textbox_preview = gr.Textbox(scale=7, container=False, value=EXAMPLE_INPUT)
78
-
79
- demo = gr.ChatInterface(test_preview_chatbot, chatbot=chatbot_preview, textbox=textbox_preview)
80
- demo.launch(share=True)
 
1
+ from transformers import pipeline, set_seed
2
+ import gradio as grad
3
+ import random
4
+ import re
5
+
6
+ gpt2_pipe = pipeline('text-generation', model='succinctly/text2image-prompt-generator')
7
+
8
+ with open("name.txt", "r") as f:
9
+ line = f.readlines()
10
+
11
+
12
+ def generate(starting_text):
13
+ for count in range(6):
14
+ seed = random.randint(100, 1000000)
15
+ set_seed(seed)
16
+
17
+ # If the text field is empty
18
+ if starting_text == "":
19
+ starting_text: str = line[random.randrange(0, len(line))].replace("\n", "").lower().capitalize()
20
+ starting_text: str = re.sub(r"[,:\-–.!;?_]", '', starting_text)
21
+ print(starting_text)
22
+
23
+ response = gpt2_pipe(starting_text, max_length=random.randint(60, 90), num_return_sequences=8)
24
+ response_list = []
25
+ for x in response:
26
+ resp = x['generated_text'].strip()
27
+ if resp != starting_text and len(resp) > (len(starting_text) + 4) and resp.endswith((":", "-", "β€”")) is False:
28
+ response_list.append(resp)
29
+
30
+ response_end = "\n".join(response_list)
31
+ response_end = re.sub('[^ ]+\.[^ ]+','', response_end)
32
+ response_end = response_end.replace("<", "").replace(">", "")
33
+ if response_end != "":
34
+ return response_end
35
+ if count == 5:
36
+ return response_end
37
+
38
+
39
+ txt = grad.Textbox(lines=1, label="English", placeholder="English Text here")
40
+ out = grad.Textbox(lines=6, label="Generated Text")
41
+ examples = [["mythology of the Slavs"], ["All-seeing eye monitors these world"], ["astronaut dog"],
42
+ ["A monochrome forest of ebony trees"], ["sad view of worker in office,"],
43
+ ["Headshot photo portrait of John Lennon"], ["wide field with thousands of blue nemophila,"]]
44
+ title = "Midjourney Prompt Generator"
45
+ description = "This is an unofficial demo for Midjourney Prompt Generator. To use it, simply send your text, or click one of the examples to load them. Read more at the links below.<br>Model: https://huggingface.co/succinctly/text2image-prompt-generator<br>Telegram bot: https://t.me/prompt_generator_bot<br>[![](https://img.shields.io/twitter/follow/DoEvent?label=@DoEvent&style=social)](https://twitter.com/DoEvent)"
46
+ article = "<div><center><img src='https://visitor-badge.glitch.me/badge?page_id=max_skobeev_prompt_generator_public' alt='visitor badge'></center></div>"
47
+
48
+ grad.Interface(fn=generate,
49
+ inputs=txt,
50
+ outputs=out,
51
+ examples=examples,
52
+ title=title,
53
+ description=description,
54
+ article=article,
55
+ allow_flagging='never',
56
+ cache_examples=False).queue(concurrency_count=1, api_open=False).launch(show_api=False, show_error=True)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
name.txt ADDED
The diff for this file is too large to render. See raw diff