Spaces:
Sleeping
Sleeping
Upload 3 files
Browse files- app.py +48 -0
- requirements.txt +6 -0
- text_classification_model.h5 +3 -0
app.py
ADDED
@@ -0,0 +1,48 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#!/usr/bin/env python
|
2 |
+
# coding: utf-8
|
3 |
+
|
4 |
+
import streamlit as st
|
5 |
+
import google.generativeai as genai
|
6 |
+
from transformers import logging
|
7 |
+
|
8 |
+
# Hugging Face Transformers kütüphanesinden gelen hataları kapat
|
9 |
+
logging.set_verbosity_error()
|
10 |
+
|
11 |
+
|
12 |
+
st.title('Chat with Me')
|
13 |
+
model = load_model('text_classification_model.h5')
|
14 |
+
|
15 |
+
# Chat history
|
16 |
+
if 'chat' not in st.session_state:
|
17 |
+
st.session_state.chat = model.start_chat(history=[])
|
18 |
+
|
19 |
+
# Input for user question
|
20 |
+
soru = st.text_input('Sor:')
|
21 |
+
|
22 |
+
# Function to extract text parts from the response
|
23 |
+
def extract_text(response_parts):
|
24 |
+
return response_parts[0].text.strip() if response_parts[0].text else ""
|
25 |
+
|
26 |
+
# Handle "Sor" button click
|
27 |
+
if st.button('Sor'):
|
28 |
+
if soru:
|
29 |
+
response = st.session_state.chat.send_message(soru)
|
30 |
+
st.session_state.chat.history.append({'role': 'model', 'text': extract_text(response.parts)})
|
31 |
+
st.session_state.chat.history.append({'role': 'user', 'text': soru})
|
32 |
+
st.session_state.last_question = soru
|
33 |
+
st.session_state.last_response = extract_text(response.parts)
|
34 |
+
st.experimental_rerun()
|
35 |
+
|
36 |
+
# Display chat history
|
37 |
+
for message in reversed(st.session_state.chat.history):
|
38 |
+
if message.role == 'user':
|
39 |
+
st.markdown(f'<div style="text-align: right; background-color: #2F2F2F; padding: 10px; border-radius: 10px; margin: 10px; width: fit-content;">👤 Sen: {message["text"]}</div>', unsafe_allow_html=True)
|
40 |
+
elif message.role == 'model':
|
41 |
+
st.markdown(f'<div style="text-align: left; background-color: #2E2E2E; padding: 10px; border-radius: 10px; margin: 10px; width: fit-content;">🤖 Bot: {message["text"]}</div>', unsafe_allow_html=True)
|
42 |
+
|
43 |
+
# Handle "Yeni Sohbet" button click
|
44 |
+
if st.button('Yeni Sohbet'):
|
45 |
+
st.session_state.chat = model.start_chat(history=[])
|
46 |
+
st.session_state.last_question = ''
|
47 |
+
st.session_state.last_response = ''
|
48 |
+
st.experimental_rerun()
|
requirements.txt
ADDED
@@ -0,0 +1,6 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
streamlit
|
2 |
+
tensorflow
|
3 |
+
opencv-python
|
4 |
+
streamlit
|
5 |
+
tensorflow
|
6 |
+
opencv-python
|
text_classification_model.h5
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:eb3d12d5919838adf305b2dc557934279d5b2f29bcdc234b360a3fee4a2b94c7
|
3 |
+
size 297856
|