File size: 3,813 Bytes
b4e5268
8f64959
55a8b20
dd507bb
4f4aca6
 
b4e5268
 
 
 
 
397c421
0a98570
9b1d956
6a7d03a
 
2cc0376
4f4aca6
 
 
37b2fc4
6a7d03a
 
 
 
 
 
 
 
 
 
9d68da3
b956157
b4e5268
 
 
b956157
 
b4e5268
 
 
b14ae52
b4e5268
 
 
 
 
 
 
 
 
 
 
 
4f4aca6
 
 
dc9b093
4f4aca6
b4e5268
 
 
 
 
 
 
 
4f4aca6
b4e5268
 
 
 
 
 
 
 
 
ab55f29
 
2651861
 
ab55f29
 
 
4f4aca6
ab55f29
 
 
2b8b939
 
bd19f91
2b8b939
 
 
 
 
 
 
 
2651861
2b8b939
ab55f29
2651861
ab55f29
 
 
 
 
2651861
 
 
 
ab55f29
2651861
 
0f28a78
 
83234d6
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
import os
import streamlit as st
from langchain_community.vectorstores import FAISS
from langchain_community.embeddings import HuggingFaceEmbeddings

from langchain_huggingface import HuggingFaceEndpoint

from langchain.prompts import PromptTemplate
from langchain.schema.runnable import RunnablePassthrough
from langchain.chains import LLMChain

from huggingface_hub import login
login(token=st.secrets["HF_TOKEN"])

from langchain_community.document_loaders import TextLoader
from langchain_text_splitters import CharacterTextSplitter
from langchain_community.document_loaders import PyPDFLoader
from langchain.chains import RetrievalQA
from langchain.prompts import PromptTemplate
from langchain.embeddings.huggingface import HuggingFaceEmbeddings

# Montez Google Drive
loader = PyPDFLoader("test-1.pdf")
data = loader.load()
# split the documents into chunks
text_splitter1 = CharacterTextSplitter(chunk_size=512, chunk_overlap=0,separator="\n\n")
texts = text_splitter1.split_documents(data)
db = FAISS.from_documents(texts,
                          HuggingFaceEmbeddings(model_name='sentence-transformers/all-MiniLM-L12-v2'))



retriever = db.as_retriever(
    search_type="mmr",
    search_kwargs={'k': 1}
)


prompt_template = """
### [INST]
Instruction: You are a Q&A assistant. Your goal is to answer questions as accurately as possible based on the instructions and context provided without using prior knowledge.You answer in FRENCH
        Analyse carefully the context and provide a direct answer based on the context. If the user said Bonjour you answer with Hi! comment puis-je vous aider?
Answer in french only
{context}
Vous devez répondre aux questions en français.

### QUESTION:
{question}
[/INST]
Answer in french only
 Vous devez répondre aux questions en français.

 """

repo_id = "mistralai/Mistral-7B-Instruct-v0.2"

mistral_llm = HuggingFaceEndpoint(
    repo_id=repo_id, max_length=512, temperature=0.05, huggingfacehub_api_token=st.secrets["HF_TOKEN"]
)

# Create prompt from prompt template
prompt = PromptTemplate(
    input_variables=["question"],
    template=prompt_template,
)

# Create llm chain
llm_chain = LLMChain(llm=mistral_llm, prompt=prompt)


retriever.search_kwargs = {'k':1}
qa = RetrievalQA.from_chain_type(
    llm=mistral_llm,
    chain_type="stuff",
    retriever=retriever,
    chain_type_kwargs={"prompt": prompt},
)
import streamlit as st

# Streamlit interface with improved aesthetics
st.set_page_config(page_title="Chatbot Interface", page_icon="🤖")

# Define function to handle user input and display chatbot response
def chatbot_response(user_input):
    response = qa.run(user_input)
    return response

# Streamlit components
st.markdown("# 🤖 ALTER-IA BOT, ton assistant virtuel de tous les jours")
st.markdown("## Votre Réponse à Chaque Défi Méthodologique 📈")

# Create columns for logos
col1, col2, col3 = st.columns([1, 6, 1])

with col1:
    st.image("Design 3_2 (1).png", use_column_width=True)  # Adjust image path and size as needed

with col3:
    st.image("Altereo logo 2023 original - eau et territoires durables.png", use_column_width=True)  # Adjust image path and size as needed

# Input and button for user interaction
user_input = st.text_input("You:", "")
submit_button = st.button("Send 📨")

# Handle user input
if submit_button:
    if user_input.strip() != "":
        bot_response = chatbot_response(user_input)
        st.markdown("### You:")
        st.markdown(f"> {user_input}")
        st.markdown("### Bot:")
        st.markdown(f"> {bot_response}")
    else:
        st.warning("⚠️ Please enter a message.")

# Motivational quote at the bottom
st.markdown("---")
st.markdown("*La collaboration est la clé du succès. Chaque question trouve sa réponse, chaque défi devient une opportunité.*")