mazenlhm commited on
Commit
d4fbfdb
1 Parent(s): 268b115

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +27 -14
app.py CHANGED
@@ -5,25 +5,38 @@ from transformers import AutoModelForCausalLM, AutoTokenizer
5
 
6
  peft_model_id = f"mazenlhm/MarketingModel"
7
  config = PeftConfig.from_pretrained(peft_model_id)
8
- model = AutoModelForCausalLM.from_pretrained(config.base_model_name_or_path, return_dict=True, load_in_8bit=True, device_map='auto')
 
 
 
 
 
9
  tokenizer = AutoTokenizer.from_pretrained(config.base_model_name_or_path)
10
 
11
  # Load the Lora model
12
  model = PeftModel.from_pretrained(model, peft_model_id)
13
 
14
- rom IPython.display import display, Markdown
15
 
16
- def generate_text(inputs):
17
- product_name, description = inputs
18
- batch = tokenizer(f"Below is a product and description, please write a marketing email for this product.\n\n### Product:\n{product}\n### Description:\n{description}\n\n### Marketing Email:\n", return_tensors='pt')
 
 
19
 
20
- with torch.cuda.amp.autocast():
21
- output_tokens = model.generate(**batch, max_new_tokens=200)
22
 
23
- generated_text = Markdown((tokenizer.decode(output_tokens[0], skip_special_tokens=True)))
24
- return generated_text
25
-
26
- inputs = gr.inputs.Text(label="Product Name"), gr.inputs.Text(label="Description")
27
- output = gr.outputs.Text(label="Generated Text")
28
- iface = gr.Interface(fn=generate_text, inputs=inputs, outputs=output, title="Product Text Generation")
29
- iface.launch()
 
 
 
 
 
 
 
 
5
 
6
  peft_model_id = f"mazenlhm/MarketingModel"
7
  config = PeftConfig.from_pretrained(peft_model_id)
8
+ model = AutoModelForCausalLM.from_pretrained(
9
+ config.base_model_name_or_path,
10
+ return_dict=True,
11
+ load_in_8bit=True,
12
+ device_map="auto",
13
+ )
14
  tokenizer = AutoTokenizer.from_pretrained(config.base_model_name_or_path)
15
 
16
  # Load the Lora model
17
  model = PeftModel.from_pretrained(model, peft_model_id)
18
 
 
19
 
20
+ def make_inference(product_name, product_description):
21
+ batch = tokenizer(
22
+ f"### Product and Description:\n{product_name}: {product_description}\n\n### Ad:",
23
+ return_tensors="pt",
24
+ )
25
 
26
+ with torch.cuda.amp.autocast():
27
+ output_tokens = model.generate(**batch, max_new_tokens=50)
28
 
29
+ return tokenizer.decode(output_tokens[0], skip_special_tokens=True)
30
+
31
+
32
+
33
+ gr.Interface(
34
+ make_inference,
35
+ [
36
+ gr.inputs.Textbox(lines=2, label="Product Name"),
37
+ gr.inputs.Textbox(lines=5, label="Product Description"),
38
+ ],
39
+ gr.outputs.Textbox(label="Ad"),
40
+ title="GenerAd-AI",
41
+ description="GenerAd-AI is a generative model that generates ads for products.",
42
+ ).launch()