Merge pull request #1 from headwayio/feature/summarize-all
Browse files
lib/medical_transcription/transcriber.ex
CHANGED
@@ -6,6 +6,13 @@ defmodule MedicalTranscription.Transcriber do
|
|
6 |
|
7 |
alias MedicalTranscription.CodeSearcher
|
8 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
9 |
# Ideas for future exploration:
|
10 |
# - Instead of storing the long description vectors in a binary file on disk, we could store them within a vector DB
|
11 |
# (such as pgvector or Pinecone.io)
|
@@ -15,13 +22,23 @@ defmodule MedicalTranscription.Transcriber do
|
|
15 |
def stream_transcription_and_search(live_view_pid, audio_file_path) do
|
16 |
search_configuration = CodeSearcher.prepare_search_configuration()
|
17 |
|
18 |
-
#
|
19 |
-
|
20 |
-
|
21 |
-
|
|
|
|
|
|
|
|
|
|
|
22 |
|
23 |
-
|
24 |
-
|
|
|
|
|
|
|
|
|
|
|
25 |
end
|
26 |
|
27 |
defp stream_transcription(audio_file_path) do
|
@@ -30,13 +47,14 @@ defmodule MedicalTranscription.Transcriber do
|
|
30 |
|> Stream.with_index()
|
31 |
end
|
32 |
|
33 |
-
defp build_result(index, chunk, tags) do
|
34 |
%{
|
35 |
id: index,
|
36 |
start_mark: format_timestamp(chunk.start_timestamp_seconds),
|
37 |
end_mark: format_timestamp(chunk.end_timestamp_seconds),
|
38 |
text: chunk.text,
|
39 |
-
tags: tags
|
|
|
40 |
}
|
41 |
end
|
42 |
|
|
|
6 |
|
7 |
alias MedicalTranscription.CodeSearcher
|
8 |
|
9 |
+
defp get_tags_and_send_result(search_configuration, chunk, index, live_view_pid, is_summary) do
|
10 |
+
tags = CodeSearcher.process_chunk(search_configuration, chunk.text)
|
11 |
+
result = build_result(index, chunk, tags, is_summary)
|
12 |
+
|
13 |
+
send(live_view_pid, {:transcription_row, result})
|
14 |
+
end
|
15 |
+
|
16 |
# Ideas for future exploration:
|
17 |
# - Instead of storing the long description vectors in a binary file on disk, we could store them within a vector DB
|
18 |
# (such as pgvector or Pinecone.io)
|
|
|
22 |
def stream_transcription_and_search(live_view_pid, audio_file_path) do
|
23 |
search_configuration = CodeSearcher.prepare_search_configuration()
|
24 |
|
25 |
+
# audio transcription + semantic search
|
26 |
+
all_chunk_text =
|
27 |
+
audio_file_path
|
28 |
+
|> stream_transcription()
|
29 |
+
|> Enum.reduce("", fn {chunk, index}, acc ->
|
30 |
+
get_tags_and_send_result(search_configuration, chunk, index, live_view_pid, false)
|
31 |
+
|
32 |
+
acc <> chunk.text
|
33 |
+
end)
|
34 |
|
35 |
+
get_tags_and_send_result(
|
36 |
+
search_configuration,
|
37 |
+
%{text: all_chunk_text, start_timestamp_seconds: 0, end_timestamp_seconds: 0},
|
38 |
+
0,
|
39 |
+
live_view_pid,
|
40 |
+
true
|
41 |
+
)
|
42 |
end
|
43 |
|
44 |
defp stream_transcription(audio_file_path) do
|
|
|
47 |
|> Stream.with_index()
|
48 |
end
|
49 |
|
50 |
+
defp build_result(index, chunk, tags, is_summary) do
|
51 |
%{
|
52 |
id: index,
|
53 |
start_mark: format_timestamp(chunk.start_timestamp_seconds),
|
54 |
end_mark: format_timestamp(chunk.end_timestamp_seconds),
|
55 |
text: chunk.text,
|
56 |
+
tags: tags,
|
57 |
+
is_summary: is_summary
|
58 |
}
|
59 |
end
|
60 |
|
lib/medical_transcription_web/components/transcription_text_component.ex
CHANGED
@@ -26,7 +26,9 @@ defmodule MedicalTranscriptionWeb.Components.TranscriptionTextComponent do
|
|
26 |
<div class="flex gap-12 pb-10 border-b border-[#444444]/20">
|
27 |
<div class="flex-1 flex flex-col gap-4">
|
28 |
<p class="text-[32px] leading-normal font-semibold">
|
29 |
-
<%=
|
|
|
|
|
30 |
</p>
|
31 |
<p class="text-[28px] leading-normal text-type-black-tertiary">
|
32 |
<.async_result :let={keywords} assign={@keywords}>
|
|
|
26 |
<div class="flex gap-12 pb-10 border-b border-[#444444]/20">
|
27 |
<div class="flex-1 flex flex-col gap-4">
|
28 |
<p class="text-[32px] leading-normal font-semibold">
|
29 |
+
<%= if !@row.is_summary do %>
|
30 |
+
<%= @row.start_mark %> - <%= @row.end_mark %>
|
31 |
+
<% end %>
|
32 |
</p>
|
33 |
<p class="text-[28px] leading-normal text-type-black-tertiary">
|
34 |
<.async_result :let={keywords} assign={@keywords}>
|