research14 commited on
Commit
66d9704
1 Parent(s): 224700e

added lftk to requests and app

Browse files
Files changed (2) hide show
  1. app.py +25 -6
  2. requirements.txt +2 -1
app.py CHANGED
@@ -1,5 +1,6 @@
1
  import gradio as gr
2
  from transformers import AutoTokenizer, AutoModelForCausalLM
 
3
  import time
4
  import os
5
  import openai
@@ -14,6 +15,15 @@ llama_model = AutoModelForCausalLM.from_pretrained("daryl149/llama-2-7b-chat-hf"
14
 
15
  template_single = '''Please output any <{}> in the following sentence one per line without any additional text: "{}"'''
16
 
 
 
 
 
 
 
 
 
 
17
  def update_api_key(new_key):
18
  global api_key
19
  os.environ['OPENAI_API_TOKEN'] = new_key
@@ -55,14 +65,12 @@ def gpt_respond(tab_name, message, chat_history, max_convo_length = 10):
55
  chat_history.append((message, bot_message))
56
  return "", chat_history
57
 
58
- def vicuna_respond(tab_name, message, chat_history):
59
  formatted_prompt = f'''Generate the output only for the assistant. Please output any {tab_name} in the following sentence one per line without any additional text: {message}'''
60
  print('Vicuna - Prompt + Context:')
61
  print(formatted_prompt)
62
  input_ids = vicuna_tokenizer.encode(formatted_prompt, return_tensors="pt")
63
- print(input_ids)
64
  output_ids = vicuna_model.generate(input_ids, do_sample=True, max_length=1024, num_beams=5, no_repeat_ngram_size=2)
65
- print(output_ids)
66
  bot_message = vicuna_tokenizer.decode(output_ids[0], skip_special_tokens=True)
67
  print(bot_message)
68
  # Remove formatted prompt from bot_message
@@ -73,7 +81,7 @@ def vicuna_respond(tab_name, message, chat_history):
73
  time.sleep(2)
74
  return tab_name, "", chat_history
75
 
76
- def llama_respond(tab_name, message, chat_history):
77
  formatted_prompt = f'''Generate the output only for the assistant. Please output any {tab_name} in the following sentence one per line without any additional text: {message}'''
78
  print('Llama - Prompt + Context:')
79
  print(formatted_prompt)
@@ -101,6 +109,9 @@ def interface():
101
  # prompt = template_single.format(tab_name, textbox_prompt)
102
 
103
  gr.Markdown("Strategy 1 QA-Based Prompting")
 
 
 
104
  with gr.Row():
105
  vicuna_S1_chatbot = gr.Chatbot(label="vicuna-7b")
106
  llama_S1_chatbot = gr.Chatbot(label="llama-7b")
@@ -125,8 +136,16 @@ def interface():
125
 
126
  #textbox_prompt.submit(llama_respond, inputs=[textbox_prompt, llama_S1_chatbot], outputs=[textbox_prompt, llama_S1_chatbot])
127
 
128
- btn.click(vicuna_respond, inputs=[tab_name, textbox_prompt, vicuna_S1_chatbot], outputs=[tab_name, textbox_prompt, vicuna_S1_chatbot])
129
- btn.click(llama_respond, inputs=[tab_name, textbox_prompt, llama_S1_chatbot], outputs=[tab_name, textbox_prompt, llama_S1_chatbot])
 
 
 
 
 
 
 
 
130
 
131
  #api_key_btn.click(update_api_key, inputs=api_key_input)
132
  #btn.click(gpt_respond, inputs=[tab_name, textbox_prompt, gpt_S1_chatbot], outputs=[tab_name, textbox_prompt, gpt_S1_chatbot])
 
1
  import gradio as gr
2
  from transformers import AutoTokenizer, AutoModelForCausalLM
3
+ from lftk import LFTK
4
  import time
5
  import os
6
  import openai
 
15
 
16
  template_single = '''Please output any <{}> in the following sentence one per line without any additional text: "{}"'''
17
 
18
+ lftk_instance = LFTK()
19
+
20
+ def linguistic_features(linguistic_features, message):
21
+ # Use LFTK to extract linguistic features
22
+ linguistic_features = lftk_instance.extract_features(message)
23
+ print('Vicuna Linguistic Features:', linguistic_features)
24
+
25
+ return linguistic_features
26
+
27
  def update_api_key(new_key):
28
  global api_key
29
  os.environ['OPENAI_API_TOKEN'] = new_key
 
65
  chat_history.append((message, bot_message))
66
  return "", chat_history
67
 
68
+ def vicuna_respond(tab_name, message, chat_history, linguistic_features):
69
  formatted_prompt = f'''Generate the output only for the assistant. Please output any {tab_name} in the following sentence one per line without any additional text: {message}'''
70
  print('Vicuna - Prompt + Context:')
71
  print(formatted_prompt)
72
  input_ids = vicuna_tokenizer.encode(formatted_prompt, return_tensors="pt")
 
73
  output_ids = vicuna_model.generate(input_ids, do_sample=True, max_length=1024, num_beams=5, no_repeat_ngram_size=2)
 
74
  bot_message = vicuna_tokenizer.decode(output_ids[0], skip_special_tokens=True)
75
  print(bot_message)
76
  # Remove formatted prompt from bot_message
 
81
  time.sleep(2)
82
  return tab_name, "", chat_history
83
 
84
+ def llama_respond(tab_name, message, chat_history, linguistic_features):
85
  formatted_prompt = f'''Generate the output only for the assistant. Please output any {tab_name} in the following sentence one per line without any additional text: {message}'''
86
  print('Llama - Prompt + Context:')
87
  print(formatted_prompt)
 
109
  # prompt = template_single.format(tab_name, textbox_prompt)
110
 
111
  gr.Markdown("Strategy 1 QA-Based Prompting")
112
+
113
+ linguistic_features_textbox = gr.Textbox(label="Linguistic Features", disabled=True)
114
+
115
  with gr.Row():
116
  vicuna_S1_chatbot = gr.Chatbot(label="vicuna-7b")
117
  llama_S1_chatbot = gr.Chatbot(label="llama-7b")
 
136
 
137
  #textbox_prompt.submit(llama_respond, inputs=[textbox_prompt, llama_S1_chatbot], outputs=[textbox_prompt, llama_S1_chatbot])
138
 
139
+ btn.click(lambda _,
140
+ message=textbox_prompt: linguistic_features_textbox.update(linguistic_features(linguistic_features_textbox, message)),
141
+ inputs=[tab_name, textbox_prompt],
142
+ outputs=[linguistic_features_textbox])
143
+
144
+ btn.click(vicuna_respond, inputs=[tab_name, textbox_prompt, vicuna_S1_chatbot],
145
+ outputs=[tab_name, textbox_prompt, vicuna_S1_chatbot])
146
+
147
+ btn.click(llama_respond, inputs=[tab_name, textbox_prompt, llama_S1_chatbot],
148
+ outputs=[tab_name, textbox_prompt, llama_S1_chatbot])
149
 
150
  #api_key_btn.click(update_api_key, inputs=api_key_input)
151
  #btn.click(gpt_respond, inputs=[tab_name, textbox_prompt, gpt_S1_chatbot], outputs=[tab_name, textbox_prompt, gpt_S1_chatbot])
requirements.txt CHANGED
@@ -5,4 +5,5 @@ sentencepiece
5
  fschat
6
  accelerate
7
  gpt2
8
- nltk
 
 
5
  fschat
6
  accelerate
7
  gpt2
8
+ nltk
9
+ lftk