RAHMAN00700 commited on
Commit
f221c4b
·
unverified ·
1 Parent(s): a551d6c

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +59 -97
app.py CHANGED
@@ -6,139 +6,101 @@ from ibm_watson_machine_learning.foundation_models import Model
6
  from ibm_watson_machine_learning.metanames import GenTextParamsMetaNames as GenParams
7
  from ibm_watson_machine_learning.foundation_models.utils.enums import DecodingMethods
8
 
9
- # Configuration: API Keys and URLs
10
  DISCOVERY_API_KEY = '5sSmoI6y0ZHP7D3a6Iu80neypsbK3tsUZR_VdRAb7ed2'
11
  WATSONX_API_KEY = 'zf-5qgRvW-_RMBGb0bQw5JPPGGj5wdYpLVypdjQxBGJz'
12
  WATSONX_PROJECT_ID = '32a4b026-a46a-48df-aae3-31e16caabc3b'
13
  DISCOVERY_SERVICE_URL = 'https://api.us-south.discovery.watson.cloud.ibm.com/instances/62dc0387-6c6f-4128-b479-00cf5dea09ef'
14
-
15
- # Watsonx Model Setup
16
  url = "https://us-south.ml.cloud.ibm.com"
17
- DEFAULT_MODEL = "meta-llama/llama-3-1-70b-instruct"
18
- max_tokens = 100
19
- min_tokens = 20
20
- decoding = DecodingMethods.GREEDY
21
- temperature = 0.7
22
 
23
- # IBM Watson Discovery Setup
24
- discovery_authenticator = IAMAuthenticator(DISCOVERY_API_KEY)
 
 
 
 
 
 
 
 
 
 
 
 
25
  discovery = DiscoveryV2(
26
  version='2020-08-30',
27
- authenticator=discovery_authenticator
28
  )
29
  discovery.set_service_url(DISCOVERY_SERVICE_URL)
30
 
31
- # Define the model generator function
32
  def get_model():
33
  generate_params = {
34
  GenParams.MAX_NEW_TOKENS: max_tokens,
35
- GenParams.MIN_NEW_TOKENS: min_tokens,
36
- GenParams.DECODING_METHOD: decoding,
37
  GenParams.TEMPERATURE: temperature,
38
  }
39
  model = Model(
40
- model_id=DEFAULT_MODEL,
41
  params=generate_params,
42
  credentials={"apikey": WATSONX_API_KEY, "url": url},
43
  project_id=WATSONX_PROJECT_ID
44
  )
45
  return model
46
 
47
- # Function to query Watson Discovery and Watsonx Model
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
48
  def get_answer(question):
49
  try:
 
50
  response = discovery.query(
51
  project_id='016da9fc-26f5-464a-a0b8-c9b0b9da83c7',
52
  collection_ids=['1d91d603-cd71-5cf5-0000-019325bcd328'],
53
- passages={'enabled': True, 'max_per_document': 5, 'find_answers': True},
54
  natural_language_query=question
55
  ).get_result()
56
-
 
57
  passages = response['results'][0].get('document_passages', [])
58
  context = '\n'.join([p['passage_text'] for p in passages]) or "No relevant information found."
59
-
60
- prompt = (
61
- "<s>[INST] <<SYS>> "
62
- "Answer the question briefly. If you can't, say 'unanswerable'. "
63
- "Question: " + question + '<</SYS>>' + context + '[/INST]'
64
- )
65
-
66
  model = get_model()
67
  generated_response = model.generate(prompt)
68
  return generated_response['results'][0]['generated_text']
69
  except Exception as e:
70
  return f"Error: {str(e)}"
71
 
72
- # Streamlit UI setup
73
- st.set_page_config(page_title="Advanced AI Chat", layout="wide")
74
- st.markdown("""
75
- <style>
76
- .chat-container {
77
- max-width: 800px;
78
- margin: 0 auto;
79
- padding: 20px;
80
- }
81
- .chat-bubble {
82
- padding: 10px 15px;
83
- border-radius: 10px;
84
- margin-bottom: 10px;
85
- font-size: 16px;
86
- line-height: 1.5;
87
- }
88
- .user-question {
89
- background-color: #007bff;
90
- color: white;
91
- align-self: flex-end;
92
- }
93
- .ai-response {
94
- background-color: #f1f1f1;
95
- color: black;
96
- }
97
- .input-container {
98
- display: flex;
99
- justify-content: center;
100
- position: fixed;
101
- bottom: 10px;
102
- width: 100%;
103
- }
104
- .input-box {
105
- width: 70%;
106
- padding: 10px;
107
- border-radius: 20px;
108
- border: 1px solid #ddd;
109
- outline: none;
110
- font-size: 16px;
111
- }
112
- .send-button {
113
- padding: 10px 20px;
114
- background-color: #007bff;
115
- color: white;
116
- border: none;
117
- border-radius: 20px;
118
- cursor: pointer;
119
- font-size: 16px;
120
- margin-left: 10px;
121
- }
122
- .send-button:hover {
123
- background-color: #0056b3;
124
- }
125
- </style>
126
- """, unsafe_allow_html=True)
127
-
128
- st.markdown('<div class="chat-container">', unsafe_allow_html=True)
129
-
130
- if 'history' not in st.session_state:
131
- st.session_state['history'] = []
132
-
133
- question = st.text_input("", placeholder="Type your question here...", key="input")
134
-
135
- if st.button("Send"):
136
- if question.strip():
137
- response = get_answer(question.strip())
138
- st.session_state['history'].append({"question": question, "answer": response})
139
-
140
- for entry in st.session_state['history']:
141
- st.markdown(f'<div class="chat-bubble user-question">{entry["question"]}</div>', unsafe_allow_html=True)
142
- st.markdown(f'<div class="chat-bubble ai-response">{entry["answer"]}</div>', unsafe_allow_html=True)
143
 
144
- st.markdown('</div>', unsafe_allow_html=True)
 
6
  from ibm_watson_machine_learning.metanames import GenTextParamsMetaNames as GenParams
7
  from ibm_watson_machine_learning.foundation_models.utils.enums import DecodingMethods
8
 
9
+ # Configuration
10
  DISCOVERY_API_KEY = '5sSmoI6y0ZHP7D3a6Iu80neypsbK3tsUZR_VdRAb7ed2'
11
  WATSONX_API_KEY = 'zf-5qgRvW-_RMBGb0bQw5JPPGGj5wdYpLVypdjQxBGJz'
12
  WATSONX_PROJECT_ID = '32a4b026-a46a-48df-aae3-31e16caabc3b'
13
  DISCOVERY_SERVICE_URL = 'https://api.us-south.discovery.watson.cloud.ibm.com/instances/62dc0387-6c6f-4128-b479-00cf5dea09ef'
 
 
14
  url = "https://us-south.ml.cloud.ibm.com"
 
 
 
 
 
15
 
16
+ # Streamlit Config
17
+ st.set_page_config(page_title="Watsonx Advanced UI", layout="wide")
18
+
19
+ # Sidebar - Model Selection
20
+ st.sidebar.title("Settings")
21
+ model_type = st.sidebar.selectbox(
22
+ "Select Model",
23
+ ["meta-llama/llama-3-1-70b-instruct", "gpt-j-6b-instruct", "gpt-neo-2-7b"]
24
+ )
25
+ max_tokens = st.sidebar.slider("Max Tokens", 50, 4000, 1000)
26
+ temperature = st.sidebar.slider("Temperature", 0.0, 1.0, 0.7)
27
+
28
+ # Watson Discovery Setup
29
+ authenticator = IAMAuthenticator(DISCOVERY_API_KEY)
30
  discovery = DiscoveryV2(
31
  version='2020-08-30',
32
+ authenticator=authenticator
33
  )
34
  discovery.set_service_url(DISCOVERY_SERVICE_URL)
35
 
36
+ # Watsonx Model Setup
37
  def get_model():
38
  generate_params = {
39
  GenParams.MAX_NEW_TOKENS: max_tokens,
40
+ GenParams.MIN_NEW_TOKENS: 20,
41
+ GenParams.DECODING_METHOD: DecodingMethods.GREEDY,
42
  GenParams.TEMPERATURE: temperature,
43
  }
44
  model = Model(
45
+ model_id=model_type,
46
  params=generate_params,
47
  credentials={"apikey": WATSONX_API_KEY, "url": url},
48
  project_id=WATSONX_PROJECT_ID
49
  )
50
  return model
51
 
52
+ # Chat UI Container
53
+ st.markdown("""
54
+ <style>
55
+ .main-chat-container { max-width: 800px; margin: 0 auto; }
56
+ .chat-bubble { padding: 12px; border-radius: 12px; margin-bottom: 10px; font-size: 16px; }
57
+ .user-message { background-color: #0078D7; color: white; text-align: right; }
58
+ .ai-message { background-color: #444; color: white; }
59
+ .prompt-buttons { display: flex; gap: 10px; }
60
+ .prompt-button { padding: 10px; border-radius: 10px; background-color: #0078D7; color: white; cursor: pointer; }
61
+ </style>
62
+ """, unsafe_allow_html=True)
63
+
64
+ # Suggested Prompts
65
+ st.markdown("<h1 style='text-align: center;'>Watsonx Advanced UI</h1>", unsafe_allow_html=True)
66
+ st.markdown("<div class='main-chat-container'>", unsafe_allow_html=True)
67
+ suggested_prompts = ["Help me study", "Tell me a fun fact", "Overcome procrastination", "Give me ideas"]
68
+ st.markdown("<div class='prompt-buttons'>", unsafe_allow_html=True)
69
+ for prompt in suggested_prompts:
70
+ if st.button(prompt):
71
+ question = prompt
72
+ st.markdown("</div>", unsafe_allow_html=True)
73
+
74
+ # Text Input
75
+ question = st.text_input("Ask your question here...")
76
+
77
+ # Get Answer Function
78
  def get_answer(question):
79
  try:
80
+ # Watson Discovery Query
81
  response = discovery.query(
82
  project_id='016da9fc-26f5-464a-a0b8-c9b0b9da83c7',
83
  collection_ids=['1d91d603-cd71-5cf5-0000-019325bcd328'],
84
+ passages={'enabled': True},
85
  natural_language_query=question
86
  ).get_result()
87
+
88
+ # Extract Context
89
  passages = response['results'][0].get('document_passages', [])
90
  context = '\n'.join([p['passage_text'] for p in passages]) or "No relevant information found."
91
+
92
+ # Watsonx Prompt
93
+ prompt = f"<s>[INST] <<SYS>> Please answer the question in a concise manner: {question} <<SYS>> {context} [/INST]"
 
 
 
 
94
  model = get_model()
95
  generated_response = model.generate(prompt)
96
  return generated_response['results'][0]['generated_text']
97
  except Exception as e:
98
  return f"Error: {str(e)}"
99
 
100
+ # Display Chat
101
+ if question:
102
+ answer = get_answer(question)
103
+ st.markdown(f"<div class='chat-bubble user-message'>{question}</div>", unsafe_allow_html=True)
104
+ st.markdown(f"<div class='chat-bubble ai-message'>{answer}</div>", unsafe_allow_html=True)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
105
 
106
+ st.markdown("</div>", unsafe_allow_html=True)