shivanikerai commited on
Commit
9568320
1 Parent(s): 86d9873

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +18 -18
app.py CHANGED
@@ -4,10 +4,10 @@ import gradio as gr
4
  # Load model directly
5
  from transformers import AutoTokenizer, AutoModelForCausalLM, pipeline
6
 
7
- pipe = pipeline("text-generation", model="shivanikerai/TinyLlama-1.1B-Chat-v1.0-seo-optimised-title-suggestion-v1.0")
8
 
9
- # tokenizer = AutoTokenizer.from_pretrained("shivanikerai/TinyLlama-1.1B-Chat-v1.0-seo-optimised-title-suggestion-v1.0")
10
- # model = AutoModelForCausalLM.from_pretrained("shivanikerai/TinyLlama-1.1B-Chat-v1.0-seo-optimised-title-suggestion-v1.0")
11
  def generate_title_suggestions(keywords, product_info):
12
  # Define the roles and markers
13
  B_SYS, E_SYS = "<<SYS>>", "<</SYS>>"
@@ -21,29 +21,29 @@ def generate_title_suggestions(keywords, product_info):
21
 
22
  # print("Prompt:")
23
  # print(prompt)
24
- predictions = pipeline(prompt)
25
- output=((predictions[0]['generated_text']).split(B_out)[-1]).strip()
26
- return (output)
27
- # encoding = tokenizer(prompt, return_tensors="pt").to("cuda:0")
28
- # output = model.generate(input_ids=encoding.input_ids,
29
- # attention_mask=encoding.attention_mask,
30
- # max_new_tokens=1024,
31
- # do_sample=True,
32
- # temperature=0.01,
33
- # eos_token_id=tokenizer.eos_token_id,
34
- # top_k=0)
35
 
36
  # print()
37
 
38
  # Subtract the length of input_ids from output to get only the model's response
39
- # output_text = tokenizer.decode(output[0, len(encoding.input_ids[0]):], skip_special_tokens=False)
40
- # output_text = re.sub('\n+', '\n', output_text) # remove excessive newline characters
41
 
42
  # print("Generated Assistant Response:")
43
- # print(output_text)
44
  gr.Interface(
45
  generate_title_suggestions,
46
  inputs=['text','text'],
47
  outputs='text',
48
- title="Title Suggestion",
49
  ).launch()
 
4
  # Load model directly
5
  from transformers import AutoTokenizer, AutoModelForCausalLM, pipeline
6
 
7
+ # pipe = pipeline("text-generation", model="shivanikerai/TinyLlama-1.1B-Chat-v1.0-seo-optimised-title-suggestion-v1.0")
8
 
9
+ tokenizer = AutoTokenizer.from_pretrained("shivanikerai/TinyLlama-1.1B-Chat-v1.0-seo-optimised-title-suggestion-v1.0")
10
+ model = AutoModelForCausalLM.from_pretrained("shivanikerai/TinyLlama-1.1B-Chat-v1.0-seo-optimised-title-suggestion-v1.0")
11
  def generate_title_suggestions(keywords, product_info):
12
  # Define the roles and markers
13
  B_SYS, E_SYS = "<<SYS>>", "<</SYS>>"
 
21
 
22
  # print("Prompt:")
23
  # print(prompt)
24
+ # predictions = pipeline(prompt)
25
+ # output=((predictions[0]['generated_text']).split(B_out)[-1]).strip()
26
+ # return (output)
27
+ encoding = tokenizer(prompt, return_tensors="pt").to("cuda:0")
28
+ output = model.generate(input_ids=encoding.input_ids,
29
+ attention_mask=encoding.attention_mask,
30
+ max_new_tokens=1024,
31
+ do_sample=True,
32
+ temperature=0.01,
33
+ eos_token_id=tokenizer.eos_token_id,
34
+ top_k=0)
35
 
36
  # print()
37
 
38
  # Subtract the length of input_ids from output to get only the model's response
39
+ output_text = tokenizer.decode(output[0, len(encoding.input_ids[0]):], skip_special_tokens=False)
40
+ output_text = re.sub('\n+', '\n', output_text) # remove excessive newline characters
41
 
42
  # print("Generated Assistant Response:")
43
+ return (output_text)
44
  gr.Interface(
45
  generate_title_suggestions,
46
  inputs=['text','text'],
47
  outputs='text',
48
+ title="SEO Optimised Title Suggestion",
49
  ).launch()