File size: 1,592 Bytes
2742c1d
 
 
 
 
 
 
8c3d701
2742c1d
 
 
 
 
 
 
 
 
 
 
 
 
648f6db
2742c1d
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
8c3d701
2742c1d
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
import streamlit as st
from streamlit_chat import message as st_message
from transformers import BlenderbotTokenizer
from transformers import BlenderbotForConditionalGeneration


def app():
    @st.experimental_singleton(show_spinner=False, suppress_st_warning=True)
    def get_models():
        # it may be necessary for other frameworks to cache the model
        # seems pytorch keeps an internal state of the conversation
        model_name = "facebook/blenderbot-400M-distill"
        tokenizer = BlenderbotTokenizer.from_pretrained(model_name)
        model = BlenderbotForConditionalGeneration.from_pretrained(model_name)
        return tokenizer, model


    if "history" not in st.session_state:
        st.session_state.history = []

    st.title("IoT Chat Robot")
    st.write('Just chat with a friendly and smart AI developed by Meta')
    st.markdown('## ')


    def generate_answer():
        tokenizer, model = get_models()
        user_message = st.session_state.input_text
        inputs = tokenizer(st.session_state.input_text, return_tensors="pt")
        result = model.generate(**inputs)
        message_bot = tokenizer.decode(
            result[0], skip_special_tokens=True
        )  # .replace("<s>", "").replace("</s>", "")

        st.session_state.history.append({"message": user_message, "is_user": True})
        st.session_state.history.append({"message": message_bot, "is_user": False})


    st.text_input("Enter your prompt...", key="input_text", on_change=generate_answer)

    for chat in st.session_state.history:
        st_message(**chat)  # unpacking