Asaad Almutareb commited on
Commit
25cdf03
1 Parent(s): bec8a7b

added adaptive cards

Browse files
innovation_pathfinder_ai/backend/app/schemas/adaptive_cards_schema.py ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ from pydantic import BaseModel
2
+ from typing import Any
3
+
4
+
5
+ class ICreateMediaAC(BaseModel):
6
+ media_object: Any
7
+ media_type: str
8
+ url: str
innovation_pathfinder_ai/backend/app/utils/adaptive_cards/cards.py ADDED
@@ -0,0 +1,141 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import adaptive_cards.card_types as types
2
+ from adaptive_cards.card import AdaptiveCard
3
+ from adaptive_cards.elements import TextBlock, Image, Media
4
+ from adaptive_cards.containers import Container, ImageSet
5
+ from adaptive_cards.actions import ActionSubmit
6
+ import re
7
+
8
+ from app.schemas.adaptive_cards_schema import ICreateMediaAC
9
+
10
+
11
+ def custom_media(anwser):
12
+ regex_http = r'https?://[^\s"]+'
13
+ url_search = re.search(regex_http, anwser)
14
+ url_search_all = re.findall(regex_http, anwser)
15
+
16
+ regex_image = r"\b(https?|ftp):\/\/[^\s/$.?#].[^\s]*\.(jpg|jpeg|png|gif|webp)\b"
17
+ url_image_search = re.search(regex_image, anwser)
18
+
19
+ if url_image_search:
20
+ url_image = url_search.group()
21
+ if url_image.endswith(")"):
22
+ url_image = url_image[:-1]
23
+ if url_image.endswith(")."):
24
+ url_image = url_image[:-2]
25
+ media = Image(url=url_image)
26
+ return None
27
+ return ICreateMediaAC(media_object=media, media_type="image", url=url_image)
28
+
29
+ regex_audio = r"\b(https?|ftp):\/\/[^\s/$.?#].[^\s]*\.(mp3|wav|ogg)\b"
30
+ url_search_audio = re.search(regex_audio, anwser)
31
+ if url_search_audio:
32
+ url_audio = url_search_audio.group()
33
+ media = Media(
34
+ sources=[{"mimeType": "audio/mp3", "url": url_audio}],
35
+ poster="https://adaptivecards.io/content/poster-audio.jpg",
36
+ )
37
+ return ICreateMediaAC(media_object=media, media_type="audio", url=url_audio)
38
+
39
+ regex_video = r"\b(https?|ftp):\/\/[^\s/$.?#].[^\s]*\.(mp4|webm|ogg)\b"
40
+ url_search_video = re.search(regex_video, anwser)
41
+ if url_search_video:
42
+ url_video = url_search_video.group()
43
+ media = Media(
44
+ sources=[{"mimeType": "video/mp4", "url": url_video}],
45
+ # poster="https://adaptivecards.io/content/poster-video.png",
46
+ poster="https://douglasgreen.com/wp-content/uploads/2014/03/video-play-btn-featured.png",
47
+ )
48
+ return ICreateMediaAC(media_object=media, media_type="video", url=url_video)
49
+
50
+ regex_youtube_video = (
51
+ r"(https?://)?(www\.)?"
52
+ "(youtube|youtu|youtube-nocookie)\\.(com|be)/"
53
+ "(watch\\?v=|embed/|v/|.+\\?v=)?([^&=%\\?]{11})"
54
+ )
55
+ url_search_youtube_video = re.search(regex_youtube_video, anwser)
56
+
57
+ if url_search_youtube_video:
58
+ url_youtube_video = url_search_youtube_video.group()
59
+ media = Media(
60
+ sources=[{"mimeType": "video/mp4", "url": url_youtube_video}],
61
+ )
62
+ return ICreateMediaAC(
63
+ media_object=media, media_type="youtube_video", url=url_youtube_video
64
+ )
65
+
66
+ if len(url_search_all) > 0:
67
+ list_media_element = []
68
+ for photo in url_search_all:
69
+ if "https://images.unsplash.com" in photo:
70
+ media = Image(url=photo)
71
+ list_media_element.append(media)
72
+ body_container_images = ImageSet(images=list_media_element)
73
+ return None
74
+ return ICreateMediaAC(
75
+ media_object=body_container_images, media_type="image", url=""
76
+ )
77
+
78
+ return None
79
+
80
+
81
+ def create_hidden_video_card(url):
82
+ return Media(
83
+ sources=[
84
+ {
85
+ "mimeType": "video/mp4",
86
+ "url": url,
87
+ }
88
+ ],
89
+ is_visible=False,
90
+ )
91
+
92
+
93
+ def create_adaptive_card(answer: str, actions: list[str] = []) -> AdaptiveCard:
94
+ custom_media_element: ICreateMediaAC | None = custom_media(answer)
95
+ custom_media_item = (
96
+ custom_media_element.media_object if custom_media_element else None
97
+ )
98
+ hidden_video_youtube = None
99
+ # if custom_media_element.media_type == "youtube_video":
100
+ # hidden_video_youtube = create_hidden_video_card(custom_media_element.url)
101
+ if custom_media_element and custom_media_element.media_type == "youtube_video":
102
+ hidden_video_youtube = create_hidden_video_card(custom_media_element.url)
103
+
104
+ # if custom_media_element:
105
+ # answer = answer.replace(custom_media_element.url, "")
106
+
107
+ description_text = TextBlock(text=answer, wrap=True)
108
+ items = [
109
+ description_text,
110
+ custom_media_item,
111
+ hidden_video_youtube,
112
+ ]
113
+ body_container = Container(items=items)
114
+
115
+ # crear action
116
+ actions = [ActionSubmit(title=action) for action in actions]
117
+
118
+ # Crear Adaptive Card
119
+ adaptive_card = AdaptiveCard(body=[body_container], actions=actions, version="1.5")
120
+
121
+ return adaptive_card
122
+
123
+
124
+ def create_image_card(image_url: str) -> AdaptiveCard:
125
+ image = Image(url=image_url)
126
+ body_container = Container(items=[image])
127
+ adaptive_card = AdaptiveCard(body=[body_container], version="1.5")
128
+
129
+ return adaptive_card
130
+
131
+
132
+ def create_loading_card(image_url: str) -> AdaptiveCard:
133
+ image = Image(
134
+ url=image_url,
135
+ size="small",
136
+ horizontal_alignment=types.HorizontalAlignment.LEFT,
137
+ )
138
+ body_container = Container(items=[image])
139
+ adaptive_card = AdaptiveCard(body=[body_container], version="1.5")
140
+
141
+ return adaptive_card
innovation_pathfinder_ai/backend/app/utils/callback.py CHANGED
@@ -1,4 +1,6 @@
1
  from app.schemas.message_schema import IChatResponse
 
 
2
  from langchain.callbacks.base import AsyncCallbackHandler
3
  from app.utils.utils import generate_uuid
4
  from fastapi import WebSocket
@@ -51,6 +53,10 @@ class CustomAsyncCallbackHandler(AsyncCallbackHandler):
51
  self.message_id: str = message_id
52
  self.text: str = ""
53
  self.started: bool = False
 
 
 
 
54
 
55
  if answer_prefix_tokens is None:
56
  self.answer_prefix_tokens = DEFAULT_ANSWER_PREFIX_TOKENS
@@ -88,6 +94,7 @@ class CustomAsyncCallbackHandler(AsyncCallbackHandler):
88
  self.append_to_last_tokens(token)
89
 
90
  self.text += f"{token}"
 
91
  resp = IChatResponse(
92
  # id=generate_uuid(),
93
  id="",
@@ -96,7 +103,7 @@ class CustomAsyncCallbackHandler(AsyncCallbackHandler):
96
  message=self.adaptive_card.to_dict(),
97
  type="stream",
98
  )
99
- await self.websocket.send_json(resp.dict())
100
 
101
  async def on_llm_end(
102
  self,
@@ -115,4 +122,157 @@ class CustomAsyncCallbackHandler(AsyncCallbackHandler):
115
  message=self.adaptive_card.to_dict(),
116
  type="end",
117
  )
118
- await self.websocket.send_json(resp.dict())
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
  from app.schemas.message_schema import IChatResponse
2
+ from app.utils.adaptive_cards.cards import create_adaptive_card, create_image_card
3
+ from app.utils.chains import get_suggestions_questions
4
  from langchain.callbacks.base import AsyncCallbackHandler
5
  from app.utils.utils import generate_uuid
6
  from fastapi import WebSocket
 
53
  self.message_id: str = message_id
54
  self.text: str = ""
55
  self.started: bool = False
56
+ self.loading_card = create_image_card(
57
+ "https://res.cloudinary.com/dnv0qwkrk/image/upload/v1691005682/Alita/Ellipsis-2.4s-81px_1_nja8hq.gif"
58
+ )
59
+ self.adaptive_card = self.loading_card
60
 
61
  if answer_prefix_tokens is None:
62
  self.answer_prefix_tokens = DEFAULT_ANSWER_PREFIX_TOKENS
 
94
  self.append_to_last_tokens(token)
95
 
96
  self.text += f"{token}"
97
+ self.adaptive_card = create_adaptive_card(self.text)
98
  resp = IChatResponse(
99
  # id=generate_uuid(),
100
  id="",
 
103
  message=self.adaptive_card.to_dict(),
104
  type="stream",
105
  )
106
+ await self.websocket.send_json(resp.model_dump())
107
 
108
  async def on_llm_end(
109
  self,
 
122
  message=self.adaptive_card.to_dict(),
123
  type="end",
124
  )
125
+ await self.websocket.send_json(resp.model_dump())
126
+
127
+
128
+ class CustomFinalStreamingStdOutCallbackHandler(AsyncCallbackHandler):
129
+ """Callback handler for streaming in agents.
130
+ Only works with agents using LLMs that support streaming.
131
+
132
+ Only the final output of the agent will be streamed.
133
+ """
134
+
135
+ def append_to_last_tokens(self, token: str) -> None:
136
+ self.last_tokens.append(token)
137
+ self.last_tokens_stripped.append(token.strip())
138
+ if len(self.last_tokens) > len(self.answer_prefix_tokens):
139
+ self.last_tokens.pop(0)
140
+ self.last_tokens_stripped.pop(0)
141
+
142
+ def check_if_answer_reached(self) -> bool:
143
+ if self.strip_tokens:
144
+ return self.last_tokens_stripped == self.answer_prefix_tokens_stripped
145
+ else:
146
+ return self.last_tokens == self.answer_prefix_tokens
147
+
148
+ def update_message_id(self, message_id: str = generate_uuid()):
149
+ self.message_id = message_id
150
+
151
+ def __init__(
152
+ self,
153
+ websocket: WebSocket,
154
+ *,
155
+ message_id: str = generate_uuid(),
156
+ answer_prefix_tokens: list[str] | None = None,
157
+ strip_tokens: bool = True,
158
+ stream_prefix: bool = False,
159
+ ) -> None:
160
+ """Instantiate FinalStreamingStdOutCallbackHandler.
161
+
162
+ Args:
163
+ answer_prefix_tokens: Token sequence that prefixes the answer.
164
+ Default is ["Final", "Answer", ":"]
165
+ strip_tokens: Ignore white spaces and new lines when comparing
166
+ answer_prefix_tokens to last tokens? (to determine if answer has been
167
+ reached)
168
+ stream_prefix: Should answer prefix itself also be streamed?
169
+ """
170
+ self.websocket: WebSocket = websocket
171
+ self.message_id: str = message_id
172
+ self.text: str = ""
173
+ self.started: bool = False
174
+ self.loading_card = create_image_card(
175
+ "https://res.cloudinary.com/dnv0qwkrk/image/upload/v1691005682/Alita/Ellipsis-2.4s-81px_1_nja8hq.gif"
176
+ )
177
+ self.adaptive_card = self.loading_card
178
+
179
+ if answer_prefix_tokens is None:
180
+ self.answer_prefix_tokens = DEFAULT_ANSWER_PREFIX_TOKENS
181
+ else:
182
+ self.answer_prefix_tokens = answer_prefix_tokens
183
+ if strip_tokens:
184
+ self.answer_prefix_tokens_stripped = [
185
+ token.strip() for token in self.answer_prefix_tokens
186
+ ]
187
+ else:
188
+ self.answer_prefix_tokens_stripped = self.answer_prefix_tokens
189
+ self.last_tokens = [""] * len(self.answer_prefix_tokens)
190
+ self.last_tokens_stripped = [""] * len(self.answer_prefix_tokens)
191
+ self.strip_tokens = strip_tokens
192
+ self.stream_prefix = stream_prefix
193
+ self.answer_reached = False
194
+
195
+ async def on_llm_start(
196
+ self, serialized: dict[str, Any], prompts: list[str], **kwargs: Any
197
+ ) -> None:
198
+ """Run when LLM starts running."""
199
+ if self.started == False:
200
+ self.started = True
201
+ resp = IChatResponse(
202
+ id="",
203
+ message_id=self.message_id,
204
+ sender="bot",
205
+ message=self.loading_card.to_dict(),
206
+ type="start",
207
+ )
208
+ await self.websocket.send_json(resp.model_dump())
209
+
210
+ async def on_agent_finish(
211
+ self,
212
+ finish: AgentFinish,
213
+ *,
214
+ run_id: UUID,
215
+ parent_run_id: UUID | None = None,
216
+ **kwargs: Any,
217
+ ) -> Any:
218
+ """Run on agent end."""
219
+ message: str = (
220
+ self.text
221
+ if self.text != ""
222
+ # else "😕 Lo siento no he podido hallar lo que buscabas"
223
+ else finish.return_values["output"]
224
+ )
225
+ self.adaptive_card = create_adaptive_card(message)
226
+
227
+ resp = IChatResponse(
228
+ id="",
229
+ message_id=self.message_id,
230
+ sender="bot",
231
+ message=self.adaptive_card.to_dict(),
232
+ type="stream",
233
+ )
234
+ await self.websocket.send_json(resp.dict())
235
+
236
+ suggested_responses = await get_suggestions_questions(message)
237
+ if len(suggested_responses) > 0:
238
+ self.adaptive_card = create_adaptive_card(
239
+ answer=message,
240
+ )
241
+ medium_resp = IChatResponse(
242
+ id="",
243
+ message_id=self.message_id,
244
+ sender="bot",
245
+ message=self.adaptive_card.to_dict(),
246
+ type="end",
247
+ suggested_responses=suggested_responses,
248
+ )
249
+ await self.websocket.send_json(medium_resp.model_dump())
250
+
251
+ # Reset values
252
+ self.text = ""
253
+ self.answer_reached = False
254
+ self.started = False
255
+
256
+ async def on_llm_new_token(self, token: str, **kwargs: Any) -> None:
257
+ """Run on new LLM token. Only available when streaming is enabled."""
258
+ # Remember the last n tokens, where n = len(answer_prefix_tokens)
259
+ self.append_to_last_tokens(token)
260
+
261
+ # Check if the last n tokens match the answer_prefix_tokens list ...
262
+ if self.check_if_answer_reached():
263
+ self.answer_reached = True
264
+ return
265
+
266
+ # ... if yes, then print tokens from now on
267
+ if self.answer_reached:
268
+ self.text += f"{token}"
269
+ self.adaptive_card = create_adaptive_card(self.text)
270
+
271
+ resp = IChatResponse(
272
+ id="",
273
+ message_id=self.message_id,
274
+ sender="bot",
275
+ message=self.adaptive_card.to_dict(),
276
+ type="stream",
277
+ )
278
+ await self.websocket.send_json(resp.model_dump())
innovation_pathfinder_ai/backend/app/utils/chains.py ADDED
@@ -0,0 +1,35 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from langchain import LLMChain, PromptTemplate
2
+ from langchain_community.llms import HuggingFaceEndpoint
3
+
4
+ import re
5
+
6
+
7
+ async def get_suggestions_questions(input: str) -> list[str]:
8
+ """Get suggestions questions."""
9
+
10
+ llm = HuggingFaceEndpoint(repo_id="mistralai/Mixtral-8x7B-Instruct-v0.1",
11
+ temperature=0.1,
12
+ max_new_tokens=1024,
13
+ repetition_penalty=1.2,
14
+ return_full_text=False
15
+ )
16
+
17
+ prompt_is_farewell_topic_chain = PromptTemplate(
18
+ input_variables=["input"],
19
+ template="Determinate if the '{input}' is related to the topic of farewell and return True or False",
20
+ )
21
+ prompt = PromptTemplate(
22
+ input_variables=["input"],
23
+ template="Create three good suggestions questions about this topic of: {input}. Return the suggestions like a list.",
24
+ )
25
+ is_farewell_topic_chain = LLMChain(llm=llm, prompt=prompt_is_farewell_topic_chain)
26
+ is_farewell_topic_response = await is_farewell_topic_chain.arun(input)
27
+ suggested_responses = []
28
+
29
+ if "False" in is_farewell_topic_response:
30
+ chain = LLMChain(llm=llm, prompt=prompt)
31
+ response_chain = await chain.arun(input)
32
+ suggested_responses = re.findall(r"\d+\.\s(.*?\?)", response_chain)
33
+ suggested_responses = suggested_responses[:3]
34
+
35
+ return suggested_responses
innovation_pathfinder_ai/frontend/app.py CHANGED
@@ -57,6 +57,7 @@ if __name__ == "__main__":
57
  # response_w_sources = response['output']+"\n\n\n Sources: \n\n\n Internal knowledge base"
58
  # else:
59
  # response_w_sources = response['output']+"\n\n\n Sources: \n\n\n"+src_list
 
60
  history[-1][1] = response['output']
61
  # all_sources.clear()
62
  return history
@@ -69,7 +70,8 @@ if __name__ == "__main__":
69
  "message": question,
70
  "history": history
71
  }
72
- await websocket.send(json.dumps(message_data))
 
73
 
74
  # Wait for the response
75
  response_data = await websocket.recv()
@@ -135,8 +137,7 @@ if __name__ == "__main__":
135
  with gr.Accordion("Open for More!", open=False):
136
  gr.Markdown("Nothing yet...")
137
 
138
- demo.queue()
139
- demo.launch(debug=True, favicon_path="assets/favicon.ico", share=True)
140
 
141
  x = 0 # for debugging purposes
142
  app = gr.mount_gradio_app(app, demo, path="/")
 
57
  # response_w_sources = response['output']+"\n\n\n Sources: \n\n\n Internal knowledge base"
58
  # else:
59
  # response_w_sources = response['output']+"\n\n\n Sources: \n\n\n"+src_list
60
+ print(response)
61
  history[-1][1] = response['output']
62
  # all_sources.clear()
63
  return history
 
70
  "message": question,
71
  "history": history
72
  }
73
+ json_data = json.dumps(message_data)
74
+ await websocket.send(json_data)
75
 
76
  # Wait for the response
77
  response_data = await websocket.recv()
 
137
  with gr.Accordion("Open for More!", open=False):
138
  gr.Markdown("Nothing yet...")
139
 
140
+ demo.queue().launch(debug=True, favicon_path="assets/favicon.ico", share=True)
 
141
 
142
  x = 0 # for debugging purposes
143
  app = gr.mount_gradio_app(app, demo, path="/")
requirements.txt CHANGED
@@ -13,4 +13,6 @@ sqlmodel
13
  rich
14
  fastapi
15
  uvicorn
16
- sentence-transformers
 
 
 
13
  rich
14
  fastapi
15
  uvicorn
16
+ sentence-transformers
17
+ fastapi-pagination
18
+ adaptive-cards-py