JavierGon12 commited on
Commit
6433e18
1 Parent(s): cd03817

Omit Text generatos cause it takes ages

Browse files
Files changed (2) hide show
  1. app.py +1 -1
  2. pages/Text Generation.py +22 -22
app.py CHANGED
@@ -38,7 +38,7 @@ show_pages(
38
  Page("pages/Text to Image.py", "Text to Image",":lower_left_paintbrush:"),
39
  Page("pages/Text Classification.py",'Text Classification',":book:"),
40
  Page("pages/Image to text.py","Image to Text",":camera:"),
41
- Page("pages/Text Generation.py", "Text Generation", ":printer:"),
42
  ]
43
  )
44
 
 
38
  Page("pages/Text to Image.py", "Text to Image",":lower_left_paintbrush:"),
39
  Page("pages/Text Classification.py",'Text Classification',":book:"),
40
  Page("pages/Image to text.py","Image to Text",":camera:"),
41
+ #Page("pages/Text Generation.py", "Text Generation", ":printer:"),
42
  ]
43
  )
44
 
pages/Text Generation.py CHANGED
@@ -1,32 +1,32 @@
1
- import streamlit as st
2
- from PIL import Image
3
- import base64
4
- import transformers
5
 
6
 
7
 
8
- model_name = 'Intel/neural-chat-7b-v3-1'
9
- model = transformers.AutoModelForCausalLM.from_pretrained(model_name)
10
- tokenizer = transformers.AutoTokenizer.from_pretrained(model_name)
11
 
12
- def generate_response(system_input, user_input):
13
 
14
- # Format the input using the provided template
15
- prompt = f"### System:\n{system_input}\n### User:\n{user_input}\n### Assistant:\n"
16
 
17
- # Tokenize and encode the prompt
18
- inputs = tokenizer.encode(prompt, return_tensors="pt", add_special_tokens=False)
19
 
20
- # Generate a response
21
- outputs = model.generate(inputs, max_length=1000, num_return_sequences=1)
22
- response = tokenizer.decode(outputs[0], skip_special_tokens=True)
23
 
24
- # Extract only the assistant's response
25
- return response.split("### Assistant:\n")[-1]
26
 
27
 
28
- # Example usage
29
- system_input = "You are a employee in the customer succes department of a company called Retraced that works in sustainability and traceability"
30
- prompt = st.text_input(str("Insert here you prompt?"))
31
- response = generate_response(system_input, prompt)
32
- st.write(response)
 
1
+ # import streamlit as st
2
+ # from PIL import Image
3
+ # import base64
4
+ # import transformers
5
 
6
 
7
 
8
+ # model_name = 'Intel/neural-chat-7b-v3-1'
9
+ # model = transformers.AutoModelForCausalLM.from_pretrained(model_name)
10
+ # tokenizer = transformers.AutoTokenizer.from_pretrained(model_name)
11
 
12
+ # def generate_response(system_input, user_input):
13
 
14
+ # # Format the input using the provided template
15
+ # prompt = f"### System:\n{system_input}\n### User:\n{user_input}\n### Assistant:\n"
16
 
17
+ # # Tokenize and encode the prompt
18
+ # inputs = tokenizer.encode(prompt, return_tensors="pt", add_special_tokens=False)
19
 
20
+ # # Generate a response
21
+ # outputs = model.generate(inputs, max_length=1000, num_return_sequences=1)
22
+ # response = tokenizer.decode(outputs[0], skip_special_tokens=True)
23
 
24
+ # # Extract only the assistant's response
25
+ # return response.split("### Assistant:\n")[-1]
26
 
27
 
28
+ # # Example usage
29
+ # system_input = "You are a employee in the customer succes department of a company called Retraced that works in sustainability and traceability"
30
+ # prompt = st.text_input(str("Insert here you prompt?"))
31
+ # response = generate_response(system_input, prompt)
32
+ # st.write(response)