Spaces:
Sleeping
Sleeping
import gradio as gr | |
from huggingface_hub import HfApi, model_info | |
import tempfile | |
import zipfile | |
import os | |
from datetime import datetime | |
api = HfApi() | |
def analyze_model(model_name): | |
"""ํ๊น ํ์ด์ค ๋ชจ๋ธ ๋ถ์""" | |
if not model_name: | |
return "๋ชจ๋ธ๋ช ์ ์ ๋ ฅํด์ฃผ์ธ์.", None, None | |
try: | |
# ๋ชจ๋ธ ์ ๋ณด ๊ฐ์ ธ์ค๊ธฐ | |
info = model_info(model_name) | |
# ํ์ดํ๋ผ์ธ ํ์คํฌ ํ์ธ | |
pipeline_tag = getattr(info, 'pipeline_tag', None) | |
# ๋ชจ๋ธ ํ์ ๋ถ์ | |
model_type = "unknown" | |
if hasattr(info, 'config') and info.config: | |
model_type = info.config.get('model_type', 'unknown') | |
# ๋ผ์ด๋ธ๋ฌ๋ฆฌ ํ์ธ | |
library = getattr(info, 'library_name', 'transformers') | |
# ์ธ์ด ํ์ธ | |
languages = getattr(info, 'language', ['en']) | |
if isinstance(languages, str): | |
languages = [languages] | |
analysis_result = f""" | |
## ๐ ๋ชจ๋ธ ๋ถ์ ๊ฒฐ๊ณผ | |
### ๊ธฐ๋ณธ ์ ๋ณด | |
- **๋ชจ๋ธ๋ช **: {model_name} | |
- **ํ์คํฌ**: {pipeline_tag or 'Unknown'} | |
- **๋ชจ๋ธ ํ์ **: {model_type} | |
- **๋ผ์ด๋ธ๋ฌ๋ฆฌ**: {library} | |
- **์ธ์ด**: {', '.join(languages[:3])} | |
- **๋ค์ด๋ก๋**: {getattr(info, 'downloads', 0):,}ํ | |
### ๋ฐ๋ชจ ์์ฑ ๊ฐ๋ฅ ์ฌ๋ถ | |
""" | |
# ์ง์ ๊ฐ๋ฅํ ํ์คํฌ์ธ์ง ํ์ธ | |
supported_tasks = { | |
'text-classification': 'โ ํ ์คํธ ๋ถ๋ฅ ๋ฐ๋ชจ ์์ฑ ๊ฐ๋ฅ', | |
'question-answering': 'โ ์ง์์๋ต ๋ฐ๋ชจ ์์ฑ ๊ฐ๋ฅ', | |
'text-generation': 'โ ํ ์คํธ ์์ฑ ๋ฐ๋ชจ ์์ฑ ๊ฐ๋ฅ', | |
'summarization': 'โ ์์ฝ ๋ฐ๋ชจ ์์ฑ ๊ฐ๋ฅ', | |
'translation': 'โ ๋ฒ์ญ ๋ฐ๋ชจ ์์ฑ ๊ฐ๋ฅ', | |
'fill-mask': 'โ ๋น์นธ ์ฑ์ฐ๊ธฐ ๋ฐ๋ชจ ์์ฑ ๊ฐ๋ฅ', | |
'token-classification': 'โ ๊ฐ์ฒด๋ช ์ธ์ ๋ฐ๋ชจ ์์ฑ ๊ฐ๋ฅ' | |
} | |
if pipeline_tag in supported_tasks: | |
analysis_result += supported_tasks[pipeline_tag] | |
demo_possible = True | |
else: | |
analysis_result += f"โ ๏ธ '{pipeline_tag}' ํ์คํฌ๋ ์์ง ์ง์๋์ง ์์ต๋๋ค." | |
demo_possible = False | |
return analysis_result, info, demo_possible | |
except Exception as e: | |
return f"โ ๋ชจ๋ธ ๋ถ์ ์คํจ: {str(e)}\n\n๋ชจ๋ธ๋ช ์ด ์ ํํ์ง ํ์ธํด์ฃผ์ธ์.", None, False | |
def generate_demo_code(model_info, model_name, demo_title, demo_description): | |
"""ํ์คํฌ๋ณ ๋ฐ๋ชจ ์ฝ๋ ์์ฑ""" | |
if not model_info: | |
return "๋จผ์ ๋ชจ๋ธ์ ๋ถ์ํด์ฃผ์ธ์.", None | |
pipeline_tag = getattr(model_info, 'pipeline_tag', None) | |
if not demo_title: | |
demo_title = f"{model_name.split('/')[-1]} Demo" | |
if not demo_description: | |
demo_description = f"Demo for {model_name}" | |
# ํ์คํฌ๋ณ ์ฝ๋ ์์ฑ | |
if pipeline_tag == 'text-classification': | |
app_code = generate_text_classification_demo(model_name, demo_title, demo_description) | |
elif pipeline_tag == 'question-answering': | |
app_code = generate_qa_demo(model_name, demo_title, demo_description) | |
elif pipeline_tag == 'text-generation': | |
app_code = generate_text_generation_demo(model_name, demo_title, demo_description) | |
elif pipeline_tag == 'summarization': | |
app_code = generate_summarization_demo(model_name, demo_title, demo_description) | |
elif pipeline_tag == 'translation': | |
app_code = generate_translation_demo(model_name, demo_title, demo_description) | |
elif pipeline_tag == 'fill-mask': | |
app_code = generate_fill_mask_demo(model_name, demo_title, demo_description) | |
elif pipeline_tag == 'token-classification': | |
app_code = generate_token_classification_demo(model_name, demo_title, demo_description) | |
else: | |
return f"โ '{pipeline_tag}' ํ์คํฌ๋ ์์ง ์ง์๋์ง ์์ต๋๋ค.", None | |
return app_code, pipeline_tag | |
def generate_text_classification_demo(model_name, title, description): | |
"""ํ ์คํธ ๋ถ๋ฅ ๋ฐ๋ชจ ์ฝ๋ ์์ฑ""" | |
return f'''import gradio as gr | |
from transformers import pipeline | |
# ๋ชจ๋ธ ๋ก๋ | |
classifier = pipeline("text-classification", model="{model_name}") | |
def classify_text(text): | |
"""ํ ์คํธ ๋ถ๋ฅ ์ํ""" | |
if not text.strip(): | |
return "ํ ์คํธ๋ฅผ ์ ๋ ฅํด์ฃผ์ธ์." | |
try: | |
results = classifier(text) | |
# ๊ฒฐ๊ณผ ํฌ๋งทํ | |
output = "## ๐ฏ ๋ถ๋ฅ ๊ฒฐ๊ณผ\\n\\n" | |
for i, result in enumerate(results): | |
label = result['label'] | |
score = result['score'] | |
confidence = f"{score:.1%}" | |
output += f"**{i+1}. {label}**: {confidence}\\n" | |
return output | |
except Exception as e: | |
return f"โ ์ค๋ฅ๊ฐ ๋ฐ์ํ์ต๋๋ค: {{str(e)}}" | |
# Gradio ์ธํฐํ์ด์ค | |
with gr.Blocks(title="{title}", theme=gr.themes.Soft()) as demo: | |
gr.Markdown("# {title}") | |
gr.Markdown("{description}") | |
with gr.Row(): | |
with gr.Column(scale=1): | |
text_input = gr.Textbox( | |
label="๋ถ์ํ ํ ์คํธ", | |
placeholder="์ฌ๊ธฐ์ ํ ์คํธ๋ฅผ ์ ๋ ฅํ์ธ์...", | |
lines=5 | |
) | |
classify_btn = gr.Button("๐ฏ ๋ถ๋ฅํ๊ธฐ", variant="primary") | |
# ์์ ๋ฒํผ๋ค | |
gr.Markdown("### ๐ ์์ ํ ์คํธ") | |
examples = [ | |
["์ด ์ํ ์ ๋ง ์ฌ๋ฏธ์์ด์! ๊ฐ๋ ฅ ์ถ์ฒํฉ๋๋ค."], | |
["์๋น์ค๊ฐ ๋๋ฌด ๋ณ๋ก๋ค์. ์ค๋ง์ค๋ฝ์ต๋๋ค."], | |
["๊ทธ๋ฅ ํ๋ฒํ ์ ํ์ธ ๊ฒ ๊ฐ์์."] | |
] | |
example_btns = [] | |
for example in examples: | |
btn = gr.Button(example[0][:30] + "...", size="sm") | |
btn.click(lambda x=example[0]: x, outputs=text_input) | |
with gr.Column(scale=1): | |
output = gr.Markdown("ํ ์คํธ๋ฅผ ์ ๋ ฅํ๊ณ ๋ถ๋ฅ ๋ฒํผ์ ํด๋ฆญํ์ธ์.") | |
classify_btn.click( | |
fn=classify_text, | |
inputs=text_input, | |
outputs=output | |
) | |
gr.Markdown(f""" | |
### โน๏ธ ๋ชจ๋ธ ์ ๋ณด | |
- **๋ชจ๋ธ**: [{model_name}](https://huggingface.co/{model_name}) | |
- **ํ์คํฌ**: ํ ์คํธ ๋ถ๋ฅ | |
- **์ค๋ช **: {description} | |
""") | |
if __name__ == "__main__": | |
demo.launch()''' | |
def generate_qa_demo(model_name, title, description): | |
"""์ง์์๋ต ๋ฐ๋ชจ ์ฝ๋ ์์ฑ""" | |
return f'''import gradio as gr | |
from transformers import pipeline | |
# ๋ชจ๋ธ ๋ก๋ | |
qa_pipeline = pipeline("question-answering", model="{model_name}") | |
def answer_question(context, question): | |
"""์ง์์๋ต ์ํ""" | |
if not context.strip() or not question.strip(): | |
return "์ปจํ ์คํธ์ ์ง๋ฌธ์ ๋ชจ๋ ์ ๋ ฅํด์ฃผ์ธ์." | |
try: | |
result = qa_pipeline(question=question, context=context) | |
answer = result['answer'] | |
score = result['score'] | |
confidence = f"{score:.1%}" | |
output = f""" | |
## ๐ก ๋ต๋ณ ๊ฒฐ๊ณผ | |
**๋ต๋ณ**: {answer} | |
**์ ๋ขฐ๋**: {confidence} | |
**์์ ์์น**: {result.get('start', 'N/A')} | |
**๋ ์์น**: {result.get('end', 'N/A')} | |
""" | |
return output | |
except Exception as e: | |
return f"โ ์ค๋ฅ๊ฐ ๋ฐ์ํ์ต๋๋ค: {{str(e)}}" | |
# Gradio ์ธํฐํ์ด์ค | |
with gr.Blocks(title="{title}", theme=gr.themes.Soft()) as demo: | |
gr.Markdown("# {title}") | |
gr.Markdown("{description}") | |
with gr.Row(): | |
with gr.Column(): | |
context_input = gr.Textbox( | |
label="๐ ์ปจํ ์คํธ (๋ฐฐ๊ฒฝ ์ ๋ณด)", | |
placeholder="๋ต๋ณ์ ๊ทผ๊ฑฐ๊ฐ ๋ ํ ์คํธ๋ฅผ ์ ๋ ฅํ์ธ์...", | |
lines=8 | |
) | |
question_input = gr.Textbox( | |
label="โ ์ง๋ฌธ", | |
placeholder="์ง๋ฌธ์ ์ ๋ ฅํ์ธ์...", | |
lines=2 | |
) | |
qa_btn = gr.Button("๐ก ๋ต๋ณ ์ฐพ๊ธฐ", variant="primary") | |
with gr.Column(): | |
output = gr.Markdown("์ปจํ ์คํธ์ ์ง๋ฌธ์ ์ ๋ ฅํ๊ณ ๋ฒํผ์ ํด๋ฆญํ์ธ์.") | |
qa_btn.click( | |
fn=answer_question, | |
inputs=[context_input, question_input], | |
outputs=output | |
) | |
gr.Markdown(f""" | |
### โน๏ธ ๋ชจ๋ธ ์ ๋ณด | |
- **๋ชจ๋ธ**: [{model_name}](https://huggingface.co/{model_name}) | |
- **ํ์คํฌ**: ์ง์์๋ต | |
- **์ค๋ช **: {description} | |
""") | |
if __name__ == "__main__": | |
demo.launch()''' | |
def generate_text_generation_demo(model_name, title, description): | |
"""ํ ์คํธ ์์ฑ ๋ฐ๋ชจ ์ฝ๋ ์์ฑ""" | |
return f'''import gradio as gr | |
from transformers import pipeline | |
# ๋ชจ๋ธ ๋ก๋ | |
generator = pipeline("text-generation", model="{model_name}") | |
def generate_text(prompt, max_length, temperature, top_p): | |
"""ํ ์คํธ ์์ฑ ์ํ""" | |
if not prompt.strip(): | |
return "ํ๋กฌํํธ๋ฅผ ์ ๋ ฅํด์ฃผ์ธ์." | |
try: | |
results = generator( | |
prompt, | |
max_length=max_length, | |
temperature=temperature, | |
top_p=top_p, | |
num_return_sequences=1, | |
do_sample=True, | |
pad_token_id=generator.tokenizer.eos_token_id | |
) | |
generated_text = results[0]['generated_text'] | |
# ์๋ณธ ํ๋กฌํํธ ์ ๊ฑฐํ๊ณ ์์ฑ๋ ๋ถ๋ถ๋ง ํ์ | |
if generated_text.startswith(prompt): | |
generated_text = generated_text[len(prompt):] | |
output = f""" | |
## โจ ์์ฑ๋ ํ ์คํธ | |
**์ ๋ ฅ**: {prompt} | |
**์์ฑ ๊ฒฐ๊ณผ**: | |
{generated_text} | |
--- | |
*์ค์ : ์ต๋ ๊ธธ์ด={max_length}, ์จ๋={temperature}, Top-p={top_p}* | |
""" | |
return output | |
except Exception as e: | |
return f"โ ์ค๋ฅ๊ฐ ๋ฐ์ํ์ต๋๋ค: {{str(e)}}" | |
# Gradio ์ธํฐํ์ด์ค | |
with gr.Blocks(title="{title}", theme=gr.themes.Soft()) as demo: | |
gr.Markdown("# {title}") | |
gr.Markdown("{description}") | |
with gr.Row(): | |
with gr.Column(scale=1): | |
prompt_input = gr.Textbox( | |
label="โ๏ธ ํ๋กฌํํธ", | |
placeholder="ํ ์คํธ ์์ฑ์ ์์ํ ๋ฌธ์ฅ์ ์ ๋ ฅํ์ธ์...", | |
lines=4 | |
) | |
gr.Markdown("### โ๏ธ ์์ฑ ์ค์ ") | |
max_length = gr.Slider( | |
label="์ต๋ ๊ธธ์ด", | |
minimum=10, | |
maximum=200, | |
value=50, | |
step=10 | |
) | |
temperature = gr.Slider( | |
label="Temperature (์ฐฝ์์ฑ)", | |
minimum=0.1, | |
maximum=2.0, | |
value=0.7, | |
step=0.1 | |
) | |
top_p = gr.Slider( | |
label="Top-p (๋ค์์ฑ)", | |
minimum=0.1, | |
maximum=1.0, | |
value=0.9, | |
step=0.1 | |
) | |
generate_btn = gr.Button("โจ ํ ์คํธ ์์ฑ", variant="primary") | |
with gr.Column(scale=2): | |
output = gr.Markdown("ํ๋กฌํํธ๋ฅผ ์ ๋ ฅํ๊ณ ์์ฑ ๋ฒํผ์ ํด๋ฆญํ์ธ์.") | |
generate_btn.click( | |
fn=generate_text, | |
inputs=[prompt_input, max_length, temperature, top_p], | |
outputs=output | |
) | |
gr.Markdown(f""" | |
### โน๏ธ ๋ชจ๋ธ ์ ๋ณด | |
- **๋ชจ๋ธ**: [{model_name}](https://huggingface.co/{model_name}) | |
- **ํ์คํฌ**: ํ ์คํธ ์์ฑ | |
- **์ค๋ช **: {description} | |
""") | |
if __name__ == "__main__": | |
demo.launch()''' | |
def generate_summarization_demo(model_name, title, description): | |
"""์์ฝ ๋ฐ๋ชจ ์ฝ๋ ์์ฑ""" | |
return f'''import gradio as gr | |
from transformers import pipeline | |
# ๋ชจ๋ธ ๋ก๋ | |
summarizer = pipeline("summarization", model="{model_name}") | |
def summarize_text(text, max_length, min_length): | |
"""ํ ์คํธ ์์ฝ ์ํ""" | |
if not text.strip(): | |
return "์์ฝํ ํ ์คํธ๋ฅผ ์ ๋ ฅํด์ฃผ์ธ์." | |
if len(text.split()) < 10: | |
return "์์ฝํ๊ธฐ์๋ ํ ์คํธ๊ฐ ๋๋ฌด ์งง์ต๋๋ค. ๋ ๊ธด ํ ์คํธ๋ฅผ ์ ๋ ฅํด์ฃผ์ธ์." | |
try: | |
results = summarizer( | |
text, | |
max_length=max_length, | |
min_length=min_length, | |
do_sample=False | |
) | |
summary = results[0]['summary_text'] | |
# ํต๊ณ ๊ณ์ฐ | |
original_words = len(text.split()) | |
summary_words = len(summary.split()) | |
compression_ratio = (1 - summary_words / original_words) * 100 | |
output = f""" | |
## ๐ ์์ฝ ๊ฒฐ๊ณผ | |
**์์ฝ๋ฌธ**: | |
{summary} | |
--- | |
### ๐ ํต๊ณ | |
- **์๋ฌธ ๋จ์ด ์**: {original_words:,}๊ฐ | |
- **์์ฝ๋ฌธ ๋จ์ด ์**: {summary_words:,}๊ฐ | |
- **์์ถ๋ฅ **: {compression_ratio:.1f}% | |
""" | |
return output | |
except Exception as e: | |
return f"โ ์ค๋ฅ๊ฐ ๋ฐ์ํ์ต๋๋ค: {{str(e)}}" | |
# Gradio ์ธํฐํ์ด์ค | |
with gr.Blocks(title="{title}", theme=gr.themes.Soft()) as demo: | |
gr.Markdown("# {title}") | |
gr.Markdown("{description}") | |
with gr.Row(): | |
with gr.Column(scale=1): | |
text_input = gr.Textbox( | |
label="๐ ์์ฝํ ํ ์คํธ", | |
placeholder="๊ธด ํ ์คํธ๋ฅผ ์ ๋ ฅํ์ธ์...", | |
lines=10 | |
) | |
gr.Markdown("### โ๏ธ ์์ฝ ์ค์ ") | |
max_length = gr.Slider( | |
label="์ต๋ ์์ฝ ๊ธธ์ด", | |
minimum=20, | |
maximum=150, | |
value=50, | |
step=10 | |
) | |
min_length = gr.Slider( | |
label="์ต์ ์์ฝ ๊ธธ์ด", | |
minimum=5, | |
maximum=50, | |
value=10, | |
step=5 | |
) | |
summarize_btn = gr.Button("๐ ์์ฝํ๊ธฐ", variant="primary") | |
with gr.Column(scale=1): | |
output = gr.Markdown("ํ ์คํธ๋ฅผ ์ ๋ ฅํ๊ณ ์์ฝ ๋ฒํผ์ ํด๋ฆญํ์ธ์.") | |
summarize_btn.click( | |
fn=summarize_text, | |
inputs=[text_input, max_length, min_length], | |
outputs=output | |
) | |
gr.Markdown(f""" | |
### โน๏ธ ๋ชจ๋ธ ์ ๋ณด | |
- **๋ชจ๋ธ**: [{model_name}](https://huggingface.co/{model_name}) | |
- **ํ์คํฌ**: ํ ์คํธ ์์ฝ | |
- **์ค๋ช **: {description} | |
""") | |
if __name__ == "__main__": | |
demo.launch()''' | |
def generate_translation_demo(model_name, title, description): | |
"""๋ฒ์ญ ๋ฐ๋ชจ ์ฝ๋ ์์ฑ""" | |
return f'''import gradio as gr | |
from transformers import pipeline | |
# ๋ชจ๋ธ ๋ก๋ | |
translator = pipeline("translation", model="{model_name}") | |
def translate_text(text): | |
"""๋ฒ์ญ ์ํ""" | |
if not text.strip(): | |
return "๋ฒ์ญํ ํ ์คํธ๋ฅผ ์ ๋ ฅํด์ฃผ์ธ์." | |
try: | |
results = translator(text) | |
translated = results[0]['translation_text'] | |
output = f""" | |
## ๐ ๋ฒ์ญ ๊ฒฐ๊ณผ | |
**์๋ฌธ**: {text} | |
**๋ฒ์ญ๋ฌธ**: {translated} | |
--- | |
*๋ฌธ์ ์: {len(text)} โ {len(translated)}* | |
""" | |
return output | |
except Exception as e: | |
return f"โ ์ค๋ฅ๊ฐ ๋ฐ์ํ์ต๋๋ค: {{str(e)}}" | |
# Gradio ์ธํฐํ์ด์ค | |
with gr.Blocks(title="{title}", theme=gr.themes.Soft()) as demo: | |
gr.Markdown("# {title}") | |
gr.Markdown("{description}") | |
with gr.Row(): | |
with gr.Column(): | |
text_input = gr.Textbox( | |
label="๐ ๋ฒ์ญํ ํ ์คํธ", | |
placeholder="๋ฒ์ญํ ํ ์คํธ๋ฅผ ์ ๋ ฅํ์ธ์...", | |
lines=6 | |
) | |
translate_btn = gr.Button("๐ ๋ฒ์ญํ๊ธฐ", variant="primary") | |
with gr.Column(): | |
output = gr.Markdown("ํ ์คํธ๋ฅผ ์ ๋ ฅํ๊ณ ๋ฒ์ญ ๋ฒํผ์ ํด๋ฆญํ์ธ์.") | |
translate_btn.click( | |
fn=translate_text, | |
inputs=text_input, | |
outputs=output | |
) | |
gr.Markdown(f""" | |
### โน๏ธ ๋ชจ๋ธ ์ ๋ณด | |
- **๋ชจ๋ธ**: [{model_name}](https://huggingface.co/{model_name}) | |
- **ํ์คํฌ**: ๋ฒ์ญ | |
- **์ค๋ช **: {description} | |
""") | |
if __name__ == "__main__": | |
demo.launch()''' | |
def generate_fill_mask_demo(model_name, title, description): | |
"""๋น์นธ ์ฑ์ฐ๊ธฐ ๋ฐ๋ชจ ์ฝ๋ ์์ฑ""" | |
return f'''import gradio as gr | |
from transformers import pipeline | |
# ๋ชจ๋ธ ๋ก๋ | |
fill_mask = pipeline("fill-mask", model="{model_name}") | |
def predict_mask(text): | |
"""๋ง์คํฌ ์์ธก ์ํ""" | |
if not text.strip(): | |
return "ํ ์คํธ๋ฅผ ์ ๋ ฅํด์ฃผ์ธ์." | |
if "[MASK]" not in text: | |
return "ํ ์คํธ์ [MASK] ํ ํฐ์ ํฌํจํด์ฃผ์ธ์." | |
try: | |
results = fill_mask(text) | |
output = f"## ๐ฏ ์์ธก ๊ฒฐ๊ณผ\\n\\n**์๋ฌธ**: {text}\\n\\n### ํ๋ณด๋ค:\\n" | |
for i, result in enumerate(results[:5]): | |
token = result['token_str'] | |
score = result['score'] | |
sequence = result['sequence'] | |
confidence = f"{score:.1%}" | |
output += f"**{i+1}. {token}** ({confidence})\\n" | |
output += f" *{sequence}*\\n\\n" | |
return output | |
except Exception as e: | |
return f"โ ์ค๋ฅ๊ฐ ๋ฐ์ํ์ต๋๋ค: {{str(e)}}" | |
# Gradio ์ธํฐํ์ด์ค | |
with gr.Blocks(title="{title}", theme=gr.themes.Soft()) as demo: | |
gr.Markdown("# {title}") | |
gr.Markdown("{description}") | |
with gr.Row(): | |
with gr.Column(): | |
text_input = gr.Textbox( | |
label="๐ฏ [MASK] ์์ธกํ ํ ์คํธ", | |
placeholder="[MASK] ํ ํฐ์ ํฌํจํ ๋ฌธ์ฅ์ ์ ๋ ฅํ์ธ์...", | |
lines=4, | |
value="์ค๋ ๋ ์จ๊ฐ ์ ๋ง [MASK]๋ค์." | |
) | |
predict_btn = gr.Button("๐ฏ ์์ธกํ๊ธฐ", variant="primary") | |
gr.Markdown("### ๐ ์์") | |
examples = [ | |
"์ค๋ ๋ ์จ๊ฐ ์ ๋ง [MASK]๋ค์.", | |
"์ด ์ํ๋ [MASK] ์ฌ๋ฏธ์์ด์.", | |
"ํ๊ตญ์ ์๋๋ [MASK]์ ๋๋ค." | |
] | |
for example in examples: | |
btn = gr.Button(example, size="sm") | |
btn.click(lambda x=example: x, outputs=text_input) | |
with gr.Column(): | |
output = gr.Markdown("[MASK] ํ ํฐ์ ํฌํจํ ํ ์คํธ๋ฅผ ์ ๋ ฅํ๊ณ ์์ธก ๋ฒํผ์ ํด๋ฆญํ์ธ์.") | |
predict_btn.click( | |
fn=predict_mask, | |
inputs=text_input, | |
outputs=output | |
) | |
gr.Markdown(f""" | |
### โน๏ธ ๋ชจ๋ธ ์ ๋ณด | |
- **๋ชจ๋ธ**: [{model_name}](https://huggingface.co/{model_name}) | |
- **ํ์คํฌ**: ๋น์นธ ์ฑ์ฐ๊ธฐ (Fill Mask) | |
- **์ค๋ช **: {description} | |
""") | |
if __name__ == "__main__": | |
demo.launch()''' | |
def generate_token_classification_demo(model_name, title, description): | |
"""๊ฐ์ฒด๋ช ์ธ์ ๋ฐ๋ชจ ์ฝ๋ ์์ฑ""" | |
return f'''import gradio as gr | |
from transformers import pipeline | |
# ๋ชจ๋ธ ๋ก๋ | |
ner_pipeline = pipeline("token-classification", model="{model_name}", aggregation_strategy="simple") | |
def recognize_entities(text): | |
"""๊ฐ์ฒด๋ช ์ธ์ ์ํ""" | |
if not text.strip(): | |
return "ํ ์คํธ๋ฅผ ์ ๋ ฅํด์ฃผ์ธ์." | |
try: | |
results = ner_pipeline(text) | |
if not results: | |
return "์ธ์๋ ๊ฐ์ฒด๊ฐ ์์ต๋๋ค." | |
output = f"## ๐ท๏ธ ๊ฐ์ฒด๋ช ์ธ์ ๊ฒฐ๊ณผ\\n\\n**์๋ฌธ**: {text}\\n\\n### ์ธ์๋ ๊ฐ์ฒด๋ค:\\n" | |
for i, entity in enumerate(results): | |
word = entity['word'] | |
label = entity['entity_group'] | |
score = entity['score'] | |
start = entity['start'] | |
end = entity['end'] | |
confidence = f"{score:.1%}" | |
output += f"**{i+1}. {word}** โ *{label}* ({confidence})\\n" | |
output += f" ์์น: {start}-{end}\\n\\n" | |
return output | |
except Exception as e: | |
return f"โ ์ค๋ฅ๊ฐ ๋ฐ์ํ์ต๋๋ค: {{str(e)}}" | |
# Gradio ์ธํฐํ์ด์ค | |
with gr.Blocks(title="{title}", theme=gr.themes.Soft()) as demo: | |
gr.Markdown("# {title}") | |
gr.Markdown("{description}") | |
with gr.Row(): | |
with gr.Column(): | |
text_input = gr.Textbox( | |
label="๐ท๏ธ ๊ฐ์ฒด๋ช ์ธ์ํ ํ ์คํธ", | |
placeholder="์ฌ๋๋ช , ์ง๋ช , ๊ธฐ๊ด๋ช ๋ฑ์ด ํฌํจ๋ ํ ์คํธ๋ฅผ ์ ๋ ฅํ์ธ์...", | |
lines=5, | |
value="์ผ์ฑ์ ์์ ์ด์ฌ์ฉ ํ์ฅ์ด ์์ธ์์ ๊ธฐ์ํ๊ฒฌ์ ์ด์๋ค." | |
) | |
ner_btn = gr.Button("๐ท๏ธ ๊ฐ์ฒด๋ช ์ธ์", variant="primary") | |
with gr.Column(): | |
output = gr.Markdown("ํ ์คํธ๋ฅผ ์ ๋ ฅํ๊ณ ์ธ์ ๋ฒํผ์ ํด๋ฆญํ์ธ์.") | |
ner_btn.click( | |
fn=recognize_entities, | |
inputs=text_input, | |
outputs=output | |
) | |
gr.Markdown(f""" | |
### โน๏ธ ๋ชจ๋ธ ์ ๋ณด | |
- **๋ชจ๋ธ**: [{model_name}](https://huggingface.co/{model_name}) | |
- **ํ์คํฌ**: ๊ฐ์ฒด๋ช ์ธ์ (Named Entity Recognition) | |
- **์ค๋ช **: {description} | |
""") | |
if __name__ == "__main__": | |
demo.launch()''' | |
def create_demo_package(app_code, pipeline_tag, model_name, demo_title, demo_description): | |
"""์์ ํ ๋ฐ๋ชจ ํจํค์ง ์์ฑ""" | |
if not app_code: | |
return "๋จผ์ ๋ฐ๋ชจ ์ฝ๋๋ฅผ ์์ฑํด์ฃผ์ธ์.", None | |
try: | |
# ์์ ๋๋ ํ ๋ฆฌ ์์ฑ | |
temp_dir = tempfile.mkdtemp() | |
demo_dir = os.path.join(temp_dir, demo_title.lower().replace(' ', '-')) | |
os.makedirs(demo_dir, exist_ok=True) | |
# 1. app.py ์ ์ฅ | |
with open(os.path.join(demo_dir, "app.py"), 'w', encoding='utf-8') as f: | |
f.write(app_code) | |
# 2. requirements.txt ์์ฑ | |
requirements = """gradio | |
transformers | |
torch | |
""" | |
with open(os.path.join(demo_dir, "requirements.txt"), 'w', encoding='utf-8') as f: | |
f.write(requirements) | |
# 3. README.md ์์ฑ | |
space_name = demo_title.lower().replace(' ', '-') | |
readme_content = f"""--- | |
title: {demo_title} | |
emoji: ๐ค | |
colorFrom: blue | |
colorTo: purple | |
sdk: gradio | |
sdk_version: 5.31.0 | |
app_file: app.py | |
pinned: false | |
models: | |
- {model_name} | |
--- | |
# {demo_title} | |
{demo_description} | |
## ๐ ์ฌ์ฉ๋ฒ | |
์ด ๋ฐ๋ชจ๋ [{model_name}](https://huggingface.co/{model_name}) ๋ชจ๋ธ์ ์ฌ์ฉํฉ๋๋ค. | |
### ํ์คํฌ: {pipeline_tag} | |
{get_task_description(pipeline_tag)} | |
## ๐ ๏ธ ๋ก์ปฌ ์คํ | |
```bash | |
pip install -r requirements.txt | |
python app.py | |
``` | |
## ๐ ๋ชจ๋ธ ์ ๋ณด | |
- **๋ชจ๋ธ**: [{model_name}](https://huggingface.co/{model_name}) | |
- **ํ์คํฌ**: {pipeline_tag} | |
- **๋ผ์ด๋ธ๋ฌ๋ฆฌ**: transformers | |
--- | |
*์ด ๋ฐ๋ชจ๋ [Demo Generator](https://huggingface.co/spaces/your-username/demo-generator)๋ก ์๋ ์์ฑ๋์์ต๋๋ค.* | |
""" | |
with open(os.path.join(demo_dir, "README.md"), 'w', encoding='utf-8') as f: | |
f.write(readme_content) | |
# 4. ZIP ํ์ผ ์์ฑ | |
zip_path = os.path.join(temp_dir, f"{space_name}_demo.zip") | |
with zipfile.ZipFile(zip_path, 'w', zipfile.ZIP_DEFLATED) as zipf: | |
for root, dirs, files in os.walk(demo_dir): | |
for file in files: | |
file_path = os.path.join(root, file) | |
arcname = os.path.relpath(file_path, demo_dir) | |
zipf.write(file_path, arcname) | |
success_msg = f""" | |
## โ ๋ฐ๋ชจ ์์ฑ ์๋ฃ! | |
**{demo_title}** ๋ฐ๋ชจ๊ฐ ์ฑ๊ณต์ ์ผ๋ก ์์ฑ๋์์ต๋๋ค! | |
### ๐ฆ ํฌํจ๋ ํ์ผ๋ค: | |
- `app.py`: ๋ฉ์ธ ๋ฐ๋ชจ ์ ํ๋ฆฌ์ผ์ด์ | |
- `requirements.txt`: ํ์ํ ๋ผ์ด๋ธ๋ฌ๋ฆฌ ๋ชฉ๋ก | |
- `README.md`: Space ์ค๋ช ๋ฐ ์ค์ | |
### ๐ ๋ฐฐํฌ ๋ฐฉ๋ฒ: | |
1. ์๋ ZIP ํ์ผ์ ๋ค์ด๋ก๋ํ์ธ์ | |
2. ํ๊น ํ์ด์ค์์ ์ Space๋ฅผ ์์ฑํ์ธ์ | |
3. ํ์ผ๋ค์ ์ ๋ก๋ํ๊ฑฐ๋ Git์ผ๋ก pushํ์ธ์ | |
4. ์๋์ผ๋ก ๋น๋๋๊ณ ๋ฐฐํฌ๋ฉ๋๋ค! | |
### ๐ฏ ํ์คํฌ: {pipeline_tag} | |
### ๐ค ๋ชจ๋ธ: {model_name} | |
""" | |
return success_msg, zip_path | |
except Exception as e: | |
return f"โ ํจํค์ง ์์ฑ ์ค ์ค๋ฅ๊ฐ ๋ฐ์ํ์ต๋๋ค: {str(e)}", None | |
def get_task_description(pipeline_tag): | |
"""ํ์คํฌ๋ณ ์ค๋ช ๋ฐํ""" | |
descriptions = { | |
'text-classification': 'ํ ์คํธ๋ฅผ ์ ๋ ฅํ๋ฉด ์นดํ ๊ณ ๋ฆฌ๋ณ๋ก ๋ถ๋ฅํฉ๋๋ค. ๊ฐ์ ๋ถ์, ์ฃผ์ ๋ถ๋ฅ ๋ฑ์ ์ฌ์ฉ๋ฉ๋๋ค.', | |
'question-answering': '์ปจํ ์คํธ์ ์ง๋ฌธ์ ์ ๋ ฅํ๋ฉด ๋ต๋ณ์ ์ฐพ์์ค๋๋ค.', | |
'text-generation': '์ฃผ์ด์ง ํ๋กฌํํธ๋ฅผ ์ด์ด์ ํ ์คํธ๋ฅผ ์์ฑํฉ๋๋ค.', | |
'summarization': '๊ธด ํ ์คํธ๋ฅผ ์์ฝํด์ค๋๋ค.', | |
'translation': 'ํ ์คํธ๋ฅผ ๋ค๋ฅธ ์ธ์ด๋ก ๋ฒ์ญํฉ๋๋ค.', | |
'fill-mask': '[MASK] ํ ํฐ์ด ํฌํจ๋ ๋ฌธ์ฅ์์ ๋น์นธ์ ์ฑ์์ค๋๋ค.', | |
'token-classification': 'ํ ์คํธ์์ ์ฌ๋๋ช , ์ง๋ช , ๊ธฐ๊ด๋ช ๋ฑ์ ๊ฐ์ฒด๋ฅผ ์ธ์ํฉ๋๋ค.' | |
} | |
return descriptions.get(pipeline_tag, '๋ค์ํ NLP ํ์คํฌ๋ฅผ ์ํํฉ๋๋ค.') | |
# Gradio ์ธํฐํ์ด์ค | |
with gr.Blocks(title="๐ฎ Demo Generator", theme=gr.themes.Soft()) as demo: | |
# ์ํ ์ ์ฅ์ฉ | |
model_info_state = gr.State() | |
demo_possible_state = gr.State() | |
gr.Markdown("# ๐ฎ Hugging Face Demo Generator") | |
gr.Markdown("๋ชจ๋ธ๋ช ๋ง ์ ๋ ฅํ๋ฉด ๋ฐ๋ก ์๋ํ๋ ๋ฐ๋ชจ Space๋ฅผ ์๋์ผ๋ก ์์ฑํด๋๋ฆฝ๋๋ค!") | |
with gr.Row(): | |
with gr.Column(scale=1): | |
gr.Markdown("### ๐ ๋ชจ๋ธ ๋ถ์") | |
model_name_input = gr.Textbox( | |
label="๋ชจ๋ธ๋ช ", | |
placeholder="์: klue/bert-base", | |
info="Hugging Face Hub์ ์ ํํ ๋ชจ๋ธ๋ช ์ ์ ๋ ฅํ์ธ์" | |
) | |
analyze_btn = gr.Button("๐ ๋ชจ๋ธ ๋ถ์", variant="primary") | |
gr.Markdown("### โ๏ธ ๋ฐ๋ชจ ์ค์ ") | |
demo_title = gr.Textbox( | |
label="๋ฐ๋ชจ ์ ๋ชฉ", | |
placeholder="์: Korean BERT Sentiment Demo", | |
value="" | |
) | |
demo_description = gr.Textbox( | |
label="๋ฐ๋ชจ ์ค๋ช ", | |
placeholder="์: ํ๊ตญ์ด ๊ฐ์ ๋ถ์์ ์ํ BERT ๋ชจ๋ธ ๋ฐ๋ชจ", | |
lines=3 | |
) | |
generate_btn = gr.Button("๐ฎ ๋ฐ๋ชจ ์์ฑ", variant="secondary") | |
create_package_btn = gr.Button("๐ฆ ์์ ํ ํจํค์ง ์์ฑ", variant="secondary") | |
with gr.Column(scale=2): | |
gr.Markdown("### ๐ ๋ถ์ ๊ฒฐ๊ณผ") | |
analysis_output = gr.Markdown("๋ชจ๋ธ๋ช ์ ์ ๋ ฅํ๊ณ ๋ถ์ ๋ฒํผ์ ํด๋ฆญํ์ธ์.") | |
gr.Markdown("### ๐ป ์์ฑ๋ ๋ฐ๋ชจ ์ฝ๋") | |
code_output = gr.Code(language="python", label="app.py") | |
gr.Markdown("### ๐ฆ ๋ค์ด๋ก๋") | |
download_file = gr.File(label="์์ ํ ๋ฐ๋ชจ ํจํค์ง", visible=False) | |
# ์ด๋ฒคํธ ํธ๋ค๋ฌ | |
def analyze_and_store(model_name): | |
analysis, info, possible = analyze_model(model_name) | |
return analysis, info, possible | |
def generate_and_show_code(info, model_name, title, description): | |
if not info: | |
return "๋จผ์ ๋ชจ๋ธ์ ๋ถ์ํด์ฃผ์ธ์." | |
code, task = generate_demo_code(info, model_name, title, description) | |
return code | |
def create_package_and_download(info, model_name, title, description, code): | |
if not info or not code: | |
return "๋จผ์ ๋ฐ๋ชจ ์ฝ๋๋ฅผ ์์ฑํด์ฃผ์ธ์.", None | |
pipeline_tag = getattr(info, 'pipeline_tag', 'unknown') | |
result, zip_path = create_demo_package(code, pipeline_tag, model_name, title, description) | |
if zip_path: | |
return result, gr.File(value=zip_path, visible=True) | |
else: | |
return result, None | |
analyze_btn.click( | |
fn=analyze_and_store, | |
inputs=model_name_input, | |
outputs=[analysis_output, model_info_state, demo_possible_state] | |
) | |
generate_btn.click( | |
fn=generate_and_show_code, | |
inputs=[model_info_state, model_name_input, demo_title, demo_description], | |
outputs=code_output | |
) | |
create_package_btn.click( | |
fn=create_package_and_download, | |
inputs=[model_info_state, model_name_input, demo_title, demo_description, code_output], | |
outputs=[analysis_output, download_file] | |
) | |
gr.Markdown(""" | |
### ๐ก ์ฌ์ฉ๋ฒ ๊ฐ์ด๋ | |
#### 1๏ธโฃ ๋ชจ๋ธ ๋ถ์ | |
- Hugging Face Hub์ ์ ํํ ๋ชจ๋ธ๋ช ์ ๋ ฅ (์: `klue/bert-base`) | |
- ๋ชจ๋ธ์ ํ์คํฌ์ ํธํ์ฑ ์๋ ํ์ธ | |
#### 2๏ธโฃ ๋ฐ๋ชจ ์ค์ | |
- **์ ๋ชฉ**: Space์์ ๋ณด์ฌ์ง ๋ฐ๋ชจ ์ ๋ชฉ | |
- **์ค๋ช **: ๋ฐ๋ชจ์ ์ฉ๋์ ๊ธฐ๋ฅ ์ค๋ช | |
#### 3๏ธโฃ ์ฝ๋ ์์ฑ | |
- ํ์คํฌ์ ๋ง๋ ์ต์ ํ๋ Gradio ์ธํฐํ์ด์ค ์๋ ์์ฑ | |
- ๋ฐ๋ก ๋ณต์ฌํด์ ์ฌ์ฉ ๊ฐ๋ฅํ ์์ ํ ์ฝ๋ | |
#### 4๏ธโฃ ํจํค์ง ๋ค์ด๋ก๋ | |
- `app.py`, `requirements.txt`, `README.md` ํฌํจ | |
- ZIP ๋ค์ด๋ก๋ ํ ๋ฐ๋ก Space์ ์ ๋ก๋ ๊ฐ๋ฅ | |
### ๐ฏ ์ง์ํ๋ ํ์คํฌ | |
- **ํ ์คํธ ๋ถ๋ฅ**: ๊ฐ์ ๋ถ์, ์ฃผ์ ๋ถ๋ฅ, ์ธ์ด๊ฐ์ง ๋ฑ | |
- **์ง์์๋ต**: ๋ฌธ์ ๊ธฐ๋ฐ QA ์์คํ | |
- **ํ ์คํธ ์์ฑ**: ์ธ์ด๋ชจ๋ธ ๊ธฐ๋ฐ ํ ์คํธ ์์ฑ | |
- **์์ฝ**: ๊ธด ํ ์คํธ์ ํต์ฌ ์์ฝ | |
- **๋ฒ์ญ**: ์ธ์ด ๊ฐ ๋ฒ์ญ | |
- **๋น์นธ ์ฑ์ฐ๊ธฐ**: BERT ์คํ์ผ ๋ง์คํฌ ์์ธก | |
- **๊ฐ์ฒด๋ช ์ธ์**: ์ฌ๋๋ช , ์ง๋ช , ๊ธฐ๊ด๋ช ๋ฑ ์ถ์ถ | |
### ๐ ์ํฌํ๋ก์ฐ ์ฐ๊ณ | |
์ด ๋๊ตฌ๋ ๋ค๋ฅธ ๋๊ตฌ๋ค๊ณผ ์๋ฒฝํ๊ฒ ์ฐ๊ณ๋ฉ๋๋ค: | |
1. **[Model Search Tool](https://huggingface.co/spaces/your-username/model-search-tool)**: ์ต์ ๋ชจ๋ธ ๊ฒ์ | |
2. **[Dataset Converter](https://huggingface.co/spaces/your-username/dataset-converter)**: ๋ฐ์ดํฐ ์ค๋น | |
3. **Demo Generator**: ๋ฐ๋ชจ ์์ฑ โ *ํ์ฌ ๋๊ตฌ* | |
4. **[Model Card Generator](https://huggingface.co/spaces/your-username/model-card-generator)**: ๋ฌธ์ํ | |
""") | |
if __name__ == "__main__": | |
demo.launch() |