Hemasagar commited on
Commit
c51fa22
1 Parent(s): 4152ada

Create app.py

Browse files
Files changed (1) hide show
  1. app.py +73 -0
app.py ADDED
@@ -0,0 +1,73 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import streamlit as st
2
+ from langchain.prompts import PromptTemplate
3
+ # Recently the below import has been replaced by later one
4
+ # from langchain.llms import CTransformers
5
+ from langchain_community.llms import CTransformers
6
+
7
+ #Function to get the response back
8
+ def getLLMResponse(form_input,email_sender,email_recipient,email_style):
9
+ #llm = OpenAI(temperature=.9, model="text-davinci-003")
10
+
11
+ # Wrapper for Llama-2-7B-Chat, Running Llama 2 on CPU
12
+
13
+ #Quantization is reducing model precision by converting weights from 16-bit floats to 8-bit integers,
14
+ #enabling efficient deployment on resource-limited devices, reducing model size, and maintaining performance.
15
+
16
+ #C Transformers offers support for various open-source models,
17
+ #among them popular ones like Llama, GPT4All-J, MPT, and Falcon.
18
+
19
+
20
+ #C Transformers is the Python library that provides bindings for transformer models implemented in C/C++ using the GGML library
21
+
22
+ llm = CTransformers(model='models/llama-2-7b-chat.ggmlv3.q8_0.bin', #https://huggingface.co/TheBloke/Llama-2-7B-Chat-GGML/tree/main
23
+ model_type='llama',
24
+ config={'max_new_tokens': 256,
25
+ 'temperature': 0.01})
26
+
27
+
28
+ #Template for building the PROMPT
29
+ template = """
30
+ Write a email with {style} style and includes topic :{email_topic}.\n\nSender: {sender}\nRecipient: {recipient}
31
+ \n\nEmail Text:
32
+
33
+ """
34
+
35
+ #Creating the final PROMPT
36
+ prompt = PromptTemplate(
37
+ input_variables=["style","email_topic","sender","recipient"],
38
+ template=template,)
39
+
40
+
41
+ #Generating the response using LLM
42
+ #Last week langchain has recommended to use 'invoke' function for the below please :)
43
+ response=llm.invoke(prompt.format(email_topic=form_input,sender=email_sender,recipient=email_recipient,style=email_style))
44
+ print(response)
45
+
46
+ return response
47
+
48
+
49
+ st.set_page_config(page_title="Generate Emails",
50
+ page_icon='📧',
51
+ layout='centered',
52
+ initial_sidebar_state='collapsed')
53
+ st.header("Generate Emails 📧")
54
+
55
+ form_input = st.text_area('Enter the email topic', height=275)
56
+
57
+ #Creating columns for the UI - To receive inputs from user
58
+ col1, col2, col3 = st.columns([10, 10, 5])
59
+ with col1:
60
+ email_sender = st.text_input('Sender Name')
61
+ with col2:
62
+ email_recipient = st.text_input('Recipient Name')
63
+ with col3:
64
+ email_style = st.selectbox('Writing Style',
65
+ ('Formal', 'Appreciating', 'Not Satisfied', 'Neutral'),
66
+ index=0)
67
+
68
+
69
+ submit = st.button("Generate")
70
+
71
+ #When 'Generate' button is clicked, execute the below code
72
+ if submit:
73
+ st.write(getLLMResponse(form_input,email_sender,email_recipient,email_style))