File size: 4,689 Bytes
02a2d80 80d2c6b 200914c 80d2c6b e668a9a 80d2c6b 02a2d80 80d2c6b 02a2d80 408d87c 02a2d80 80d2c6b 02a2d80 80d2c6b 21bf972 408d87c 80d2c6b 21bf972 408d87c 80d2c6b 408d87c 80d2c6b 408d87c 80d2c6b a5b2194 80d2c6b 408d87c 80d2c6b 408d87c 02a2d80 408d87c 80d2c6b e668a9a 80d2c6b 408d87c 02a2d80 80d2c6b 02a2d80 80d2c6b c209e83 e5b04f0 c209e83 02a2d80 80d2c6b 38908cd 02a2d80 80d2c6b 02a2d80 80d2c6b 02a2d80 80d2c6b 02a2d80 80d2c6b 02a2d80 befc313 80d2c6b 02a2d80 80d2c6b 02a2d80 80d2c6b 02a2d80 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 |
import time
import streamlit as st
from langchain.document_loaders import PyPDFLoader
from langchain.text_splitter import RecursiveCharacterTextSplitter
from langchain.embeddings.base import Embeddings
from langchain.vectorstores import FAISS
from langchain.indexes import VectorstoreIndexCreator
from langchain.chains import RetrievalQA
from langchain.chat_models import ChatOpenAI
from typing import List
from together import Together
from langchain.embeddings import HuggingFaceEmbeddings
st.set_page_config(page_title="چت بات ارتش", page_icon="🪖", layout="wide")
st.markdown("""
<style>
.main {
background-color: #f9f9f9;
}
.stChatMessage {
background-color: #e0f7fa;
border-radius: 12px;
padding: 12px;
margin-bottom: 12px;
direction: rtl;
text-align: right;
font-family: 'Tahoma', sans-serif;
}
.stMarkdown, .stTextInput, .stTextArea, .stButton {
direction: rtl !important;
text-align: right !important;
font-family: 'Tahoma', sans-serif;
}
.header-container {
display: flex;
flex-direction: column;
align-items: center;
justify-content: center;
margin-top: 30px;
margin-bottom: 40px;
}
.header-container img {
width: 140px;
border-radius: 15px;
margin-bottom: 12px;
box-shadow: 0 4px 15px rgba(0,0,0,0.2);
}
.header-container h1 {
font-size: 36px;
color: #2c3e50;
font-family: 'Tahoma', sans-serif;
margin: 0;
}
</style>
""", unsafe_allow_html=True)
st.markdown("""
<div class="header-container">
<img src="army.png" alt="لوگو">
<h1>هوش مصنوعی توانا</h1>
</div>
""", unsafe_allow_html=True)
class TogetherEmbeddings(Embeddings):
def __init__(self, model_name: str, api_key: str):
self.model_name = model_name
self.client = Together(api_key=api_key)
def embed_documents(self, texts: List[str]) -> List[List[float]]:
response = self.client.embeddings.create(model=self.model_name, input=texts)
return [item.embedding for item in response.data]
def embed_query(self, text: str) -> List[float]:
return self.embed_documents([text])[0]
@st.cache_resource
def get_pdf_index():
with st.spinner('📄 در حال پردازش فایل PDF...'):
loader = [PyPDFLoader('test1.pdf')]
embeddings = TogetherEmbeddings(
model_name="togethercomputer/m2-bert-80M-8k-retrieval",
api_key="0291f33aee03412a47fa5d8e562e515182dcc5d9aac5a7fb5eefdd1759005979"
)
return VectorstoreIndexCreator(
embedding=embeddings,
text_splitter=RecursiveCharacterTextSplitter(chunk_size=300, chunk_overlap=0)
).from_loaders(loader)
index = get_pdf_index()
llm = ChatOpenAI(
base_url="https://api.together.xyz/v1",
api_key='0291f33aee03412a47fa5d8e562e515182dcc5d9aac5a7fb5eefdd1759005979',
model="meta-llama/Llama-3.3-70B-Instruct-Turbo-Free"
)
chain = RetrievalQA.from_chain_type(
llm=llm,
chain_type='stuff',
retriever=index.vectorstore.as_retriever(),
input_key='question'
)
if 'messages' not in st.session_state:
st.session_state.messages = []
if 'pending_prompt' not in st.session_state:
st.session_state.pending_prompt = None
for msg in st.session_state.messages:
with st.chat_message(msg['role']):
st.markdown(f"🗨️ {msg['content']}", unsafe_allow_html=True)
prompt = st.chat_input("چطور میتونم کمک کنم؟")
if prompt:
st.session_state.messages.append({'role': 'user', 'content': prompt})
st.session_state.pending_prompt = prompt
st.rerun()
if st.session_state.pending_prompt:
with st.chat_message('ai'):
thinking = st.empty()
thinking.markdown("🤖 در حال فکر کردن...")
response = chain.run(f'پاسخ را فقط به زبان فارسی جواب بده. سوال: {st.session_state.pending_prompt}')
answer = response.split("Helpful Answer:")[-1].strip()
if not answer:
answer = "متأسفم، اطلاعات دقیقی در این مورد ندارم."
thinking.empty()
full_response = ""
placeholder = st.empty()
for word in answer.split():
full_response += word + " "
placeholder.markdown(full_response + "▌")
time.sleep(0.03)
placeholder.markdown(full_response)
st.session_state.messages.append({'role': 'ai', 'content': full_response})
st.session_state.pending_prompt = None
|