import streamlit as st import torch from transformers import BertConfig, BertForSequenceClassification, BertTokenizer import numpy as np # Load the model and tokenizer def load_model(): tokenizer = BertTokenizer.from_pretrained('beomi/kcbert-base') config = BertConfig.from_pretrained('beomi/kcbert-base', num_labels=7) model = BertForSequenceClassification.from_pretrained('beomi/kcbert-base', config=config) model_state_dict = torch.load('YEINJEONG/sentiment7', map_location=torch.device('cpu')) # cpu 사용 model.load_state_dict(model_state_dict) model.eval() return model, tokenizer model, tokenizer = load_model() # Define the inference function def inference(input_doc): inputs = tokenizer(input_doc, return_tensors='pt') outputs = model(**inputs) probs = torch.softmax(outputs.logits, dim=1).squeeze().tolist() class_idx = {'공포': 0, '놀람': 1, '분노': 2, '슬픔': 3, '중립': 4, '행복': 5, '혐오': 6} results = {class_name: prob for class_name, prob in zip(class_idx, probs)} # Find the class with the highest probability max_prob_class = max(results, key=results.get) max_prob = results[max_prob_class] # Display results return [results, f"가장 강하게 나타난 감정: {max_prob_class}"] ''' for class_name, prob in results.items(): print(f"{class_name}: {prob:.2%}")''' # Set up the Streamlit interface st.title('감정분석(Sentiment Analysis)') st.markdown('글에 나타난 공포, 놀람, 분노, 슬픔, 중립, 행복, 혐오의 정도를 비율로 알려드립니다.', unsafe_allow_html=True) user_input = st.text_area("이 곳에 글 입력(100자 이하 권장):") if st.button('시작'): result = inference(user_input) st.write(result[0]) st.write(result[1])