jurischat / app.py
ramdane's picture
Update app.py
1c186cc verified
import pickle
import os
print(os.getcwd())
fileobj=open("/home/user/app/embmmn7.obj","rb")
corpus_embeddings,corpus=pickle.load(fileobj)
fileobj.close()
from sentence_transformers import SentenceTransformer, util
import torch
embedder = SentenceTransformer("ramdane/jurimodel")
import google.generativeai as genai
genai.configure(api_key="AIzaSyCcxB0xY2C1IGDqxlLRmLBH6AX_wbBORX4")
# Set up the model
generation_config = {
"temperature": 0,
"top_p": 1,
"top_k": 1,
"max_output_tokens": 2048,
}
safety_settings = [
{
"category": "HARM_CATEGORY_HARASSMENT",
"threshold": "BLOCK_NONE"
},
{
"category": "HARM_CATEGORY_HATE_SPEECH",
"threshold": "BLOCK_NONE"
},
{
"category": "HARM_CATEGORY_SEXUALLY_EXPLICIT",
"threshold": "BLOCK_NONE"
},
{
"category": "HARM_CATEGORY_DANGEROUS_CONTENT",
"threshold": "BLOCK_NONE"
},
]
model = genai.GenerativeModel(model_name="gemini-1.0-pro-001",
generation_config=generation_config,
safety_settings=safety_settings)
def show(queries):
query_embedding = embedder.encode(queries, convert_to_tensor=True)
hits = util.semantic_search(query_embedding, corpus_embeddings, top_k=10)
hits = hits[0]
history=[]
#Get the hits for the first query
for i in range(0,10):
history.append({"role": "user", "parts": [corpus[hits[i]['corpus_id']]]})
history.append({"role": "model", "parts": ["حسنا"]})
convo = model.start_chat(history=history)
convo.send_message(" اجب من خلال ما سبق من اجتهادات على السؤال التالي مع دكر الاجتهاد الدي اعتمدت عليه"+queries)
return convo.last.text
import gradio as gr
app = gr.Interface(
fn=show,
inputs=gr.Textbox(label="إسئل وسيتم الاجابة عن طريق الاجتهادات القضائية"),
outputs=gr.TextArea(label="استنتاج النموذج"),
# Prevents caching conversation history
)
app.launch()