File size: 4,075 Bytes
6dfbc1e 02a2d80 6dfbc1e 8a8d9dd cdbe11c 6dfbc1e 8a8d9dd ae60929 8a8d9dd 02a2d80 6dfbc1e 80d2c6b 02a2d80 6dfbc1e 8a8d9dd 02a2d80 8a8d9dd 02a2d80 80d2c6b 8a8d9dd 02a2d80 6dfbc1e 8a8d9dd 6dfbc1e 02a2d80 80d2c6b 6dfbc1e 02a2d80 80d2c6b 02a2d80 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 |
import os
import time
import streamlit as st
from groq import Groq
from langchain.document_loaders import PyPDFLoader
from langchain.text_splitter import RecursiveCharacterTextSplitter
from langchain.schema import Document as LangchainDocument
from langchain.vectorstores import FAISS
from langchain.embeddings import HuggingFaceEmbeddings
from langchain.chains import RetrievalQA
from langchain.llms import OpenAI
# ----------------- تنظیمات صفحه -----------------
st.set_page_config(page_title="چتبات ارتش - فقط از PDF", page_icon="🪖", layout="wide")
# استایل فارسی و بکگراند (مثل قبل...)
# ----------------- تعریف کلید API -----------------
groq_api_key = "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"
# ----------------- لود PDF و ساخت ایندکس -----------------
@st.cache_resource
def build_pdf_index():
with st.spinner('📄 در حال پردازش فایل PDF...'):
loader = PyPDFLoader("test1.pdf")
pages = loader.load()
# تکهتکه کردن متن PDF
splitter = RecursiveCharacterTextSplitter(
chunk_size=500,
chunk_overlap=50
)
texts = []
for page in pages:
texts.extend(splitter.split_text(page.page_content))
# تبدیل به Document
documents = [LangchainDocument(page_content=t) for t in texts]
# استفاده از HuggingFaceEmbedding محلی برای FAISS
embeddings = HuggingFaceEmbeddings(model_name="sentence-transformers/all-MiniLM-L6-v2")
vectordb = FAISS.from_documents(documents, embedding=embeddings)
return vectordb
# ----------------- ساختن Index از PDF -----------------
index = build_pdf_index()
# ----------------- تعریف LLM Groq -----------------
client = Groq(api_key=groq_api_key)
class GroqLLM(OpenAI):
def __init__(self, api_key, model_name):
super().__init__(openai_api_key=api_key, model_name=model_name, base_url="https://api.groq.com/openai/v1")
llm = GroqLLM(api_key=groq_api_key, model_name="deepseek-r1-distill-llama-70b")
# ----------------- Retrieval Chain -----------------
chain = RetrievalQA.from_chain_type(
llm=llm,
retriever=index.as_retriever(),
chain_type="stuff",
input_key="question"
)
# ----------------- استیت برای چت -----------------
if 'messages' not in st.session_state:
st.session_state.messages = []
if 'pending_prompt' not in st.session_state:
st.session_state.pending_prompt = None
# ----------------- نمایش پیامهای قبلی -----------------
for msg in st.session_state.messages:
with st.chat_message(msg['role']):
st.markdown(f"🗨️ {msg['content']}", unsafe_allow_html=True)
# ----------------- ورودی چت -----------------
prompt = st.chat_input("سوالی در مورد فایل بپرس...")
if prompt:
st.session_state.messages.append({'role': 'user', 'content': prompt})
st.session_state.pending_prompt = prompt
st.rerun()
# ----------------- پاسخ مدل فقط از روی PDF -----------------
if st.session_state.pending_prompt:
with st.chat_message('ai'):
thinking = st.empty()
thinking.markdown("🤖 در حال فکر کردن از روی PDF...")
try:
# گرفتن جواب فقط از PDF
response = chain.run(f"سوال: {st.session_state.pending_prompt}")
answer = response.strip()
except Exception as e:
answer = f"خطا در پاسخدهی: {str(e)}"
thinking.empty()
# انیمیشن تایپ پاسخ
full_response = ""
placeholder = st.empty()
for word in answer.split():
full_response += word + " "
placeholder.markdown(full_response + "▌")
time.sleep(0.03)
placeholder.markdown(full_response)
st.session_state.messages.append({'role': 'ai', 'content': full_response})
st.session_state.pending_prompt = None
|