Spaces:
Sleeping
Sleeping
File size: 3,100 Bytes
a43d5d1 ede7310 a43d5d1 ede7310 a43d5d1 ede7310 a43d5d1 ede7310 a43d5d1 ede7310 a43d5d1 ede7310 a43d5d1 ede7310 a43d5d1 ede7310 a43d5d1 ede7310 a43d5d1 ede7310 a43d5d1 ede7310 a43d5d1 ede7310 a43d5d1 ede7310 a43d5d1 ede7310 a43d5d1 ede7310 a43d5d1 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 |
import requests
import streamlit as st
from langchain.llms import CTransformers
from langchain.prompts import PromptTemplate
import os
def download_model() -> None:
"""
Downloads the model from the provided URL and saves it to the current directory.
"""
url = 'https://huggingface.co/TheBloke/Llama-2-7B-Chat-GGML/resolve/main/llama-2-7b-chat.ggmlv3.q8_0.bin'
file_name = url.split('/')[-1]
response = requests.get(url, stream=True)
with open(file_name, 'wb') as file:
for chunk in response.iter_content(chunk_size=1024):
if chunk:
file.write(chunk)
print("File downloaded successfully!")
def getLLMResponse(form_input: str, email_sender: str, email_recipient: str, email_style: str) -> str:
"""
Generates a response using the LLM model.
:param form_input: Email topic provided by the user.
:param email_sender: Sender name provided by the user.
:param email_recipient: Recipient name provided by the user.
:param email_style: Writing style provided by the user.
:return: Generated response.
"""
llm = CTransformers(model='llama-2-7b-chat.ggmlv3.q8_0.bin',
model_type='llama',
config={'max_new_tokens': 256,
'temperature': 0.01})
template = """
Write an email with {style} style and includes topic :{email_topic}.\n\nSender: {sender}\nRecipient: {recipient}
\n\nEmail Text:
"""
prompt = PromptTemplate(
input_variables=["style", "email_topic", "sender", "recipient"],
template=template,)
response = llm(prompt.format(email_topic=form_input, sender=email_sender, recipient=email_recipient, style=email_style))
print(response)
return response
st.set_page_config(page_title="Generate Emails",
page_icon='📧',
layout='centered',
initial_sidebar_state='collapsed')
st.header("Generate Emails 📧")
model_loaded = st.session_state.get('model_loaded', False)
if not model_loaded:
if st.button('Load Model'):
model_file = 'llama-2-7b-chat.ggmlv3.q8_0.bin'
if not os.path.isfile(model_file):
st.info('Loading the model, this could take ~5 minutes')
download_model()
st.session_state.model_loaded = True
st.info('Model loaded successfully')
if st.session_state.get('model_loaded'):
form_input = st.text_area('Enter the email topic', height=275)
col1, col2, col3 = st.columns([10, 10, 5])
with col1:
email_sender = st.text_input('Sender Name')
with col2:
email_recipient = st.text_input('Recipient Name')
with col3:
email_style = st.selectbox('Writing Style',
('Formal', 'Appreciating', 'Not Satisfied', 'Neutral'),
index=0)
submit = st.button("Generate")
if submit:
st.write(getLLMResponse(form_input, email_sender, email_recipient, email_style))
else:
st.write("Please load the model to proceed.")
|