ginipick commited on
Commit
faec072
Β·
verified Β·
1 Parent(s): 5cda80c

Delete app.py

Browse files
Files changed (1) hide show
  1. app.py +0 -59
app.py DELETED
@@ -1,59 +0,0 @@
1
- import os
2
- import gradio as gr
3
- import spaces
4
- from transformers import AutoModelForCausalLM, AutoProcessor, MarianMTModel, MarianTokenizer
5
- import torch
6
- from PIL import Image
7
- import subprocess
8
-
9
- # ν—ˆκΉ…νŽ˜μ΄μŠ€ API 토큰 ν™˜κ²½ λ³€μˆ˜μ—μ„œ κ°€μ Έμ˜€κΈ°
10
- hf_token = os.getenv("HF_TOKEN")
11
-
12
- # ν•„μš”ν•œ μ™ΈλΆ€ νŒ¨ν‚€μ§€ μ„€μΉ˜
13
- subprocess.run('pip install flash-attn --no-build-isolation', env={'FLASH_ATTENTION_SKIP_CUDA_BUILD': "TRUE"}, shell=True)
14
-
15
- # λ²ˆμ—­ λͺ¨λΈκ³Ό ν† ν¬λ‚˜μ΄μ € λ‘œλ“œ
16
- translator_model = MarianMTModel.from_pretrained("Helsinki-NLP/opus-mt-en-ko")
17
- translator_tokenizer = MarianTokenizer.from_pretrained("Helsinki-NLP/opus-mt-en-ko")
18
-
19
- def translate_to_korean(text):
20
- # μ˜μ–΄ ν…μŠ€νŠΈλ₯Ό ν•œκ΅­μ–΄λ‘œ λ²ˆμ—­
21
- inputs = translator_tokenizer(text, return_tensors="pt", padding=True, truncation=True, max_length=512)
22
- outputs = translator_model.generate(**inputs)
23
- translated_text = translator_tokenizer.decode(outputs[0], skip_special_tokens=True)
24
- return translated_text
25
-
26
- @spaces.GPU
27
- def run_example(image, text_input=None, model_id="microsoft/Phi-3.5-vision-instruct"):
28
- # λͺ¨λΈ λ‘œλ“œ μ‹œ ν—ˆκΉ…νŽ˜μ΄μŠ€ 토큰 μ‚¬μš©
29
- model = AutoModelForCausalLM.from_pretrained(model_id, use_auth_token=hf_token)
30
- processor = AutoProcessor.from_pretrained(model_id, use_auth_token=hf_token)
31
-
32
- prompt = f"{user_prompt}\n{text_input}{prompt_suffix}{assistant_prompt}"
33
- image = Image.fromarray(image).convert("RGB")
34
-
35
- inputs = processor(prompt, image, return_tensors="pt").to("cuda:0")
36
- generate_ids = model.generate(**inputs, max_new_tokens=1000, eos_token_id=processor.tokenizer.eos_token_id)
37
- generate_ids = generate_ids[:, inputs['input_ids'].shape[1]:]
38
- response = processor.batch_decode(generate_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0]
39
-
40
- # λ²ˆμ—­λœ ν…μŠ€νŠΈ μΆ”κ°€
41
- translated_response = translate_to_korean(response)
42
- return response, translated_response
43
-
44
- # Gradio μΈν„°νŽ˜μ΄μŠ€ μ„€μ •
45
- with gr.Blocks(theme="Nymbo/Nymbo_Theme", css=css) as demo:
46
- with gr.Tab(label="Phi-3.5 Input"):
47
- with gr.Row():
48
- with gr.Column():
49
- input_img = gr.Image(label="Input Picture")
50
- model_selector = gr.Dropdown(choices=[model_id], label="Model", value=model_id)
51
- text_input = gr.Textbox(label="Question")
52
- submit_btn = gr.Button(value="Submit")
53
- with gr.Column():
54
- output_text = gr.Textbox(label="Output Text")
55
- translated_text = gr.Textbox(label="Translated Text (Korean)")
56
-
57
- submit_btn.click(run_example, [input_img, text_input, model_selector], [output_text, translated_text])
58
-
59
- demo.launch(debug=True)