File size: 3,160 Bytes
c7de61d
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
import streamlit as st
from transformers import pipeline
from sentence_transformers import SentenceTransformer
import faiss
import json
import random
import os

# ----------------- DATA SECTION -----------------

# Quran + Hadith sample data (You can expand later)
quran_data = [
    {
        "source": "Surah Al-Baqarah, Ayah 2",
        "text": "This is the Book about which there is no doubt, a guidance for those conscious of Allah."
    },
    {
        "source": "Surah Al-Ikhlas, Ayah 1",
        "text": "Say, 'He is Allah, [who is] One.'"
    }
]

hadith_data = [
    {
        "source": "Sahih Bukhari, Book 2, Hadith 13",
        "text": "None of you will have faith till he wishes for his brother what he likes for himself."
    },
    {
        "source": "Sahih Muslim, Book 1, Hadith 1",
        "text": "Actions are judged by intentions."
    }
]

# ----------------- EMBEDDING + FAISS -----------------

@st.cache_resource
def build_index(passages):
    model = SentenceTransformer('all-MiniLM-L6-v2')
    texts = [p['text'] for p in passages]
    embeddings = model.encode(texts)
    index = faiss.IndexFlatL2(embeddings.shape[1])
    index.add(embeddings)
    return model, index, passages

model, index, passages = build_index(quran_data + hadith_data)

def retrieve_passages(query, k=3):
    query_vec = model.encode([query])
    scores, idxs = index.search(query_vec, k)
    return [passages[i] for i in idxs[0]]

# ----------------- TRANSLATION -----------------

@st.cache_resource
def load_translators():
    trans_ur = pipeline("translation", model="Helsinki-NLP/opus-mt-en-ur")
    trans_ar = pipeline("translation", model="Helsinki-NLP/opus-mt-en-ar")
    return trans_ur, trans_ar

translator_ur, translator_ar = load_translators()

def translate(text, lang):
    if lang == "Urdu":
        return translator_ur(text)[0]['translation_text']
    elif lang == "Arabic":
        return translator_ar(text)[0]['translation_text']
    return text

# ----------------- DAILY VERSES -----------------

def get_random_ayah():
    return random.choice(quran_data)

def get_random_hadith():
    return random.choice(hadith_data)

# ----------------- STREAMLIT UI -----------------

st.set_page_config(page_title="Noor-e-Hidayat", layout="centered")
st.title("πŸ•ŠοΈ Noor-e-Hidayat – Your Islamic AI Assistant")

lang = st.selectbox("🌐 Choose Language", ["English", "Urdu", "Arabic"])
st.markdown("---")

st.subheader("πŸ” Ask Noor-e-Hidayat")
query = st.text_input("Type your question related to Qur’an, Hadith, or Islamic guidance...")

if query:
    results = retrieve_passages(query)
    for r in results:
        st.markdown(f"πŸ“– **{r['source']}**")
        st.write(translate(r['text'], lang))
        st.markdown("---")

st.subheader("πŸ“œ Ayah of the Day")
ayah = get_random_ayah()
st.info(f"**{ayah['source']}**\n\n{translate(ayah['text'], lang)}")

st.subheader("πŸ“œ Hadith of the Day")
hadith = get_random_hadith()
st.success(f"**{hadith['source']}**\n\n{translate(hadith['text'], lang)}")

st.markdown("---")
st.caption("βš™οΈ Powered by Transformers, Sentence-BERT, and FAISS β€’ Built with ❀️ using Streamlit")