johnsu6616 jmourad commited on
Commit
53e0942
0 Parent(s):

Duplicate from jmourad/TXT2IMG-MJ-Desc

Browse files

Co-authored-by: Rafael Mourad <jmourad@users.noreply.huggingface.co>

Files changed (4) hide show
  1. .gitattributes +34 -0
  2. README.md +14 -0
  3. app.py +77 -0
  4. requirements.txt +4 -0
.gitattributes ADDED
@@ -0,0 +1,34 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ *.7z filter=lfs diff=lfs merge=lfs -text
2
+ *.arrow filter=lfs diff=lfs merge=lfs -text
3
+ *.bin filter=lfs diff=lfs merge=lfs -text
4
+ *.bz2 filter=lfs diff=lfs merge=lfs -text
5
+ *.ckpt filter=lfs diff=lfs merge=lfs -text
6
+ *.ftz filter=lfs diff=lfs merge=lfs -text
7
+ *.gz filter=lfs diff=lfs merge=lfs -text
8
+ *.h5 filter=lfs diff=lfs merge=lfs -text
9
+ *.joblib filter=lfs diff=lfs merge=lfs -text
10
+ *.lfs.* filter=lfs diff=lfs merge=lfs -text
11
+ *.mlmodel filter=lfs diff=lfs merge=lfs -text
12
+ *.model filter=lfs diff=lfs merge=lfs -text
13
+ *.msgpack filter=lfs diff=lfs merge=lfs -text
14
+ *.npy filter=lfs diff=lfs merge=lfs -text
15
+ *.npz filter=lfs diff=lfs merge=lfs -text
16
+ *.onnx filter=lfs diff=lfs merge=lfs -text
17
+ *.ot filter=lfs diff=lfs merge=lfs -text
18
+ *.parquet filter=lfs diff=lfs merge=lfs -text
19
+ *.pb filter=lfs diff=lfs merge=lfs -text
20
+ *.pickle filter=lfs diff=lfs merge=lfs -text
21
+ *.pkl filter=lfs diff=lfs merge=lfs -text
22
+ *.pt filter=lfs diff=lfs merge=lfs -text
23
+ *.pth filter=lfs diff=lfs merge=lfs -text
24
+ *.rar filter=lfs diff=lfs merge=lfs -text
25
+ *.safetensors filter=lfs diff=lfs merge=lfs -text
26
+ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
27
+ *.tar.* filter=lfs diff=lfs merge=lfs -text
28
+ *.tflite filter=lfs diff=lfs merge=lfs -text
29
+ *.tgz filter=lfs diff=lfs merge=lfs -text
30
+ *.wasm filter=lfs diff=lfs merge=lfs -text
31
+ *.xz filter=lfs diff=lfs merge=lfs -text
32
+ *.zip filter=lfs diff=lfs merge=lfs -text
33
+ *.zst filter=lfs diff=lfs merge=lfs -text
34
+ *tfevents* filter=lfs diff=lfs merge=lfs -text
README.md ADDED
@@ -0,0 +1,14 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ title: TXT2IMG MJ Desc
3
+ emoji: 🔥
4
+ colorFrom: gray
5
+ colorTo: gray
6
+ sdk: gradio
7
+ sdk_version: 3.24.1
8
+ app_file: app.py
9
+ pinned: false
10
+ license: artistic-2.0
11
+ duplicated_from: jmourad/TXT2IMG-MJ-Desc
12
+ ---
13
+
14
+ Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
app.py ADDED
@@ -0,0 +1,77 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Importar bibliotecas
2
+ import torch
3
+ import re
4
+ import random
5
+ import requests
6
+ import shutil
7
+ from clip_interrogator import Config, Interrogator
8
+ from transformers import pipeline, set_seed, AutoTokenizer, AutoModelForSeq2SeqLM
9
+ from PIL import Image
10
+ import gradio as gr
11
+
12
+ # Configurar CLIP
13
+ config = Config()
14
+ config.device = 'cuda' if torch.cuda.is_available() else 'cpu'
15
+ config.blip_offload = False if torch.cuda.is_available() else True
16
+ config.chunk_size = 2048
17
+ config.flavor_intermediate_count = 512
18
+ config.blip_num_beams = 64
19
+ config.clip_model_name = "ViT-H-14/laion2b_s32b_b79k"
20
+ ci = Interrogator(config)
21
+
22
+ # Función para generar prompt desde imagen
23
+ def get_prompt_from_image(image, mode):
24
+ image = image.convert('RGB')
25
+ if mode == 'best':
26
+ prompt = ci.interrogate(image)
27
+ elif mode == 'classic':
28
+ prompt = ci.interrogate_classic(image)
29
+ elif mode == 'fast':
30
+ prompt = ci.interrogate_fast(image)
31
+ elif mode == 'negative':
32
+ prompt = ci.interrogate_negative(image)
33
+ return prompt
34
+
35
+ # Función para generar texto
36
+ text_pipe = pipeline('text-generation', model='succinctly/text2image-prompt-generator')
37
+
38
+ def text_generate(input):
39
+ seed = random.randint(100, 1000000)
40
+ set_seed(seed)
41
+ for count in range(6):
42
+ sequences = text_pipe(input, max_length=random.randint(60, 90), num_return_sequences=8)
43
+ list = []
44
+ for sequence in sequences:
45
+ line = sequence['generated_text'].strip()
46
+ if line != input and len(line) > (len(input) + 4) and line.endswith((':', '-', '—')) is False:
47
+ list.append(line)
48
+
49
+ result = "\n".join(list)
50
+ result = re.sub('[^ ]+\.[^ ]+','', result)
51
+ result = result.replace('<', '').replace('>', '')
52
+ if result != '':
53
+ return result
54
+ if count == 5:
55
+ return result
56
+
57
+ # Crear interfaz gradio
58
+ with gr.Blocks() as block:
59
+ with gr.Column():
60
+ gr.HTML('<h1>MidJourney / SD2 Helper Tool</h1>')
61
+ with gr.Tab('Generate from Image'):
62
+ with gr.Row():
63
+ input_image = gr.Image(type='pil')
64
+ with gr.Column():
65
+ input_mode = gr.Radio(['best', 'fast', 'classic', 'negative'], value='best', label='Mode')
66
+ img_btn = gr.Button('Discover Image Prompt')
67
+ output_image = gr.Textbox(lines=6, label='Generated Prompt')
68
+
69
+ with gr.Tab('Generate from Text'):
70
+ input_text = gr.Textbox(lines=6, label='Your Idea', placeholder='Enter your content here...')
71
+ output_text = gr.Textbox(lines=6, label='Generated Prompt')
72
+ text_btn = gr.Button('Generate Prompt')
73
+
74
+ img_btn.click(fn=get_prompt_from_image, inputs=[input_image, input_mode], outputs=output_image)
75
+ text_btn.click(fn=text_generate, inputs=input_text, outputs=output_text)
76
+
77
+ block.queue(max_size=64).launch(show_api=False, enable_queue=True, debug=True, share=False, server_name='0.0.0.0')
requirements.txt ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ transformers
2
+ gradio
3
+ clip_interrogator
4
+ torch