Update app.py
Browse files
app.py
CHANGED
@@ -1,12 +1,19 @@
|
|
1 |
import gradio as gr
|
2 |
-
from transformers import
|
|
|
|
|
|
|
|
|
3 |
|
4 |
def generate_diary(keywords):
|
5 |
-
#
|
6 |
-
generator = pipeline('text-generation', model='anthropic/InstructGPT')
|
7 |
prompt = f"์ค๋์ ์ผ๊ธฐ:\n\n{', '.join(keywords.split(','))}์ ๋ํ ์ผ๊ธฐ๋ฅผ ์จ๋ด
์๋ค."
|
8 |
-
|
9 |
-
|
|
|
|
|
|
|
|
|
10 |
|
11 |
def app():
|
12 |
with gr.Blocks() as demo:
|
|
|
1 |
import gradio as gr
|
2 |
+
from transformers import GPT2LMHeadModel, PreTrainedTokenizerFast
|
3 |
+
|
4 |
+
# KoGPT2 ๋ชจ๋ธ๊ณผ ํ ํฌ๋์ด์ ๋ฅผ ์ ์ญ ๋ณ์๋ก ๋ฏธ๋ฆฌ ๋ก๋
|
5 |
+
model = GPT2LMHeadModel.from_pretrained("skt/kogpt2-base-v2")
|
6 |
+
tokenizer = PreTrainedTokenizerFast.from_pretrained("skt/kogpt2-base-v2")
|
7 |
|
8 |
def generate_diary(keywords):
|
9 |
+
# ํค์๋ ๊ธฐ๋ฐ ์ผ๊ธฐ ์์ฑ
|
|
|
10 |
prompt = f"์ค๋์ ์ผ๊ธฐ:\n\n{', '.join(keywords.split(','))}์ ๋ํ ์ผ๊ธฐ๋ฅผ ์จ๋ด
์๋ค."
|
11 |
+
input_ids = tokenizer.encode(prompt, return_tensors="pt")
|
12 |
+
output = model.generate(input_ids, max_length=500, num_return_sequences=1, do_sample=True, top_k=50, top_p=0.95, num_beams=5, no_repeat_ngram_size=2)
|
13 |
+
|
14 |
+
# ์์ฑ๋ ์ผ๊ธฐ ํ
์คํธ ๋ฐํ
|
15 |
+
diary = tokenizer.decode(output[0], skip_special_tokens=True)
|
16 |
+
return diary
|
17 |
|
18 |
def app():
|
19 |
with gr.Blocks() as demo:
|