amir22010 commited on
Commit
4f6e6aa
1 Parent(s): 9fed6cd

updated app.py

Browse files
Files changed (2) hide show
  1. app.py +46 -3
  2. requirements.txt +3 -0
app.py CHANGED
@@ -1,7 +1,50 @@
1
  import gradio as gr
 
2
 
3
- def greet(name):
4
- return "Hello " + name + "!!"
 
 
 
5
 
6
- demo = gr.Interface(fn=greet, inputs="text", outputs="text")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
7
  demo.launch()
 
1
  import gradio as gr
2
+ from llama_cpp import Llama
3
 
4
+ llm = Llama.from_pretrained(
5
+ repo_id="amir22010/fine_tuned_product_marketing_email_gemma_2_9b_q4_k_m",
6
+ filename="unsloth.Q4_K_M.gguf",
7
+ verbose=False
8
+ )
9
 
10
+ #marketing prompt
11
+ marketing_email_prompt = """Below is a product and description, please write a marketing email for this product.
12
+
13
+ ### Product:
14
+ {}
15
+
16
+ ### Description:
17
+ {}
18
+
19
+ ### Marketing Email:
20
+ {}"""
21
+
22
+ def greet(product,description):
23
+ output = llm.create_chat_completion(
24
+ messages=[
25
+ {
26
+ "role": "system",
27
+ "content": "Your go-to Email Marketing Guru - I'm here to help you craft compelling campaigns, boost conversions, and take your business to the next level.",
28
+ },
29
+ {"role": "user", "content": marketing_email_prompt.format(
30
+ product, # product
31
+ description, # description
32
+ "", # output - leave this blank for generation!
33
+ )},
34
+ ],
35
+ # response_format={
36
+ # "type": "json_object",
37
+ # },
38
+ max_tokens=8192,
39
+ temperature=0.7,
40
+ stream=True
41
+ )
42
+ for chunk in output:
43
+ delta = chunk['choices'][0]['delta']
44
+ if 'role' in delta:
45
+ yield delta['role']
46
+ elif 'content' in delta:
47
+ yield delta['content']
48
+
49
+ demo = gr.Interface(fn=greet, inputs=["text","text"], outputs="text")
50
  demo.launch()
requirements.txt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ torch
2
+ huggingface-hub
3
+ llama-cpp-python