sainathBelagavi commited on
Commit
07426b3
1 Parent(s): fc591d0

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +48 -37
app.py CHANGED
@@ -33,16 +33,19 @@ model_info = {
33
  def format_promt(message, conversation_history, custom_instructions=None):
34
  prompt = ""
35
  if custom_instructions:
36
- prompt += f"\[INST\] {custom_instructions} \[/INST\]"
37
 
38
  # Add conversation history to the prompt
39
  prompt += "\[CONV_HISTORY\]\n"
40
  for role, content in conversation_history:
41
  prompt += f"{role.upper()}: {content}\n"
42
- prompt += "\[/CONV_HISTORY\]"
43
 
44
  # Add the current message
45
- prompt += f"\[INST\] {message} \[/INST\]"
 
 
 
46
 
47
  return prompt
48
 
@@ -52,7 +55,7 @@ def reset_conversation():
52
  '''
53
  st.session_state.conversation = []
54
  st.session_state.messages = []
55
- st.experimental_rerun() # Add this line to rerun the app after reset
56
 
57
  def load_conversation_history():
58
  history_file = "conversation_history.pickle"
@@ -86,6 +89,9 @@ if st.session_state.prev_option != selected_model:
86
  st.session_state.messages = []
87
  st.session_state.prev_option = selected_model
88
 
 
 
 
89
  # Load the conversation history from the file
90
  if "messages" not in st.session_state:
91
  st.session_state.messages = load_conversation_history()
@@ -93,37 +99,42 @@ if "messages" not in st.session_state:
93
  repo_id = model_links[selected_model]
94
  st.subheader(f'{selected_model}')
95
 
96
- for message in st.session_state.messages:
97
- with st.chat_message(message["role"]):
98
- st.markdown(message["content"])
99
-
100
- if prompt := st.chat_input(f"Hi I'm {selected_model}, How can I help you today?"):
101
- custom_instruction = "Act like a Human in conversation"
102
- with st.chat_message("user"):
103
- st.markdown(prompt)
104
-
105
- st.session_state.messages.append({"role": "user", "content": prompt})
106
- conversation_history = [(message["role"], message["content"]) for message in st.session_state.messages]
107
-
108
- formated_text = format_promt(prompt, conversation_history, custom_instruction)
109
-
110
- with st.chat_message("assistant"):
111
- client = InferenceClient(
112
- model=model_links[selected_model], )
113
- max_new_tokens = 2048 # Adjust this value as needed
114
- try:
115
- output = client.text_generation(
116
- formated_text,
117
- temperature=temp_values,
118
- max_new_tokens=max_new_tokens,
119
- stream=True
120
- )
121
- response = st.write_stream(output)
122
- except ValueError as e:
123
- if "Input validation error" in str(e):
124
- st.error("Error: The input prompt is too long. Please try a shorter prompt.")
 
 
 
125
  else:
126
- st.error(f"An error occurred: {e}")
127
- else:
128
- st.session_state.messages.append({"role": "assistant", "content": response})
129
- save_conversation_history(st.session_state.messages)
 
 
 
33
  def format_promt(message, conversation_history, custom_instructions=None):
34
  prompt = ""
35
  if custom_instructions:
36
+ prompt += f"\[INST\] {custom_instructions} \[/INST\]\n"
37
 
38
  # Add conversation history to the prompt
39
  prompt += "\[CONV_HISTORY\]\n"
40
  for role, content in conversation_history:
41
  prompt += f"{role.upper()}: {content}\n"
42
+ prompt += "\[/CONV_HISTORY\]\n"
43
 
44
  # Add the current message
45
+ prompt += f"\[INST\] {message} \[/INST\]\n"
46
+
47
+ # Add the response format
48
+ prompt += "\[RESPONSE\]\n"
49
 
50
  return prompt
51
 
 
55
  '''
56
  st.session_state.conversation = []
57
  st.session_state.messages = []
58
+ st.session_state.chat_state = "reset"
59
 
60
  def load_conversation_history():
61
  history_file = "conversation_history.pickle"
 
89
  st.session_state.messages = []
90
  st.session_state.prev_option = selected_model
91
 
92
+ if "chat_state" not in st.session_state:
93
+ st.session_state.chat_state = "normal"
94
+
95
  # Load the conversation history from the file
96
  if "messages" not in st.session_state:
97
  st.session_state.messages = load_conversation_history()
 
99
  repo_id = model_links[selected_model]
100
  st.subheader(f'{selected_model}')
101
 
102
+ if st.session_state.chat_state == "normal":
103
+ for message in st.session_state.messages:
104
+ with st.chat_message(message["role"]):
105
+ st.markdown(message["content"])
106
+
107
+ if prompt := st.chat_input(f"Hi I'm {selected_model}, How can I help you today?"):
108
+ custom_instruction = "Act like a Human in conversation"
109
+ with st.chat_message("user"):
110
+ st.markdown(prompt)
111
+
112
+ st.session_state.messages.append({"role": "user", "content": prompt})
113
+ conversation_history = [(message["role"], message["content"]) for message in st.session_state.messages]
114
+
115
+ formated_text = format_promt(prompt, conversation_history, custom_instruction)
116
+
117
+ with st.chat_message("assistant"):
118
+ client = InferenceClient(
119
+ model=model_links[selected_model], )
120
+ max_new_tokens = 2048 # Adjust this value as needed
121
+ try:
122
+ output = client.text_generation(
123
+ formated_text,
124
+ temperature=temp_values,
125
+ max_new_tokens=max_new_tokens,
126
+ stream=True
127
+ )
128
+ response = st.write_stream(output)
129
+ except ValueError as e:
130
+ if "Input validation error" in str(e):
131
+ st.error("Error: The input prompt is too long. Please try a shorter prompt.")
132
+ else:
133
+ st.error(f"An error occurred: {e}")
134
  else:
135
+ st.session_state.messages.append({"role": "assistant", "content": response})
136
+ save_conversation_history(st.session_state.messages)
137
+
138
+ elif st.session_state.chat_state == "reset":
139
+ st.session_state.chat_state = "normal"
140
+ st.experimental_rerun()