from transformers import AutoModelForCausalLM, AutoTokenizer import gradio as gr import torch title = "πŸ€–AI ChatBot" description = "μ΅œμ²¨λ‹¨ λŒ€κ·œλͺ¨ 사전 ν•™μŠ΅λœ 응닡 생성 λͺ¨λΈ(DialoGPT)" examples = [["μ•ˆλ…•ν•˜μ„Έμš”?"]] tokenizer = AutoTokenizer.from_pretrained("microsoft/DialoGPT-large") model = AutoModelForCausalLM.from_pretrained("microsoft/DialoGPT-large") def predict(input, history=[]): # μƒˆ μž…λ ₯ λ¬Έμž₯을 ν† ν°ν™”ν•©λ‹ˆλ‹€ new_user_input_ids = tokenizer.encode( input + tokenizer.eos_token, return_tensors="pt" ) # μƒˆ μ‚¬μš©μž μž…λ ₯ 토큰을 μ±„νŒ… 기둝에 μΆ”κ°€ν•©λ‹ˆλ‹€ bot_input_ids = torch.cat([torch.LongTensor(history), new_user_input_ids], dim=-1) # 응닡 νžˆμŠ€ν† λ¦¬ 생성 history = model.generate( bot_input_ids, max_length=4000, pad_token_id=tokenizer.eos_token_id ).tolist() # 토큰을 ν…μŠ€νŠΈλ‘œ λ³€ν™˜ν•œ λ‹€μŒ 응닡을 μ€„λ‘œ λΆ„ν•  response = tokenizer.decode(history[0]).split("<|endoftext|>") # print('decoded_response-->>'+str(response)) response = [ (response[i], response[i + 1]) for i in range(0, len(response) - 1, 2) ] # 리슀트 νŠœν”Œλ‘œ λ³€ν™˜ # print('response-->>'+str(response)) return response, history gr.Interface( fn=predict, title=title, description=description, examples=examples, inputs=["text", "state"], outputs=["chatbot", "state"], theme="finlaymacklon/boxy_violet", ).launch()