Gabriel commited on
Commit
b76cf08
1 Parent(s): 0b149d1

move parts of the code to tabs

Browse files
README.md CHANGED
@@ -1,5 +1,5 @@
1
  ---
2
- title: HTR Pipeline
3
  emoji: 🏢
4
  colorFrom: purple
5
  colorTo: green
 
1
  ---
2
+ title: HTRFLOW
3
  emoji: 🏢
4
  colorFrom: purple
5
  colorTo: green
app.py CHANGED
@@ -1,89 +1,44 @@
1
  import os
2
 
3
  import gradio as gr
4
- from apscheduler.schedulers.background import BackgroundScheduler
5
 
6
  from helper.gradio_config import css, theme
7
- from helper.text.text_about import TextAbout
8
  from helper.text.text_app import TextApp
9
- from helper.text.text_howto import TextHowTo
10
- from helper.text.text_roadmap import TextRoadmap
11
- from helper.utils import add_ip_data, backup_db
12
  from tabs.htr_tool import htr_tool_tab
13
  from tabs.stepwise_htr_tool import stepwise_htr_tool_tab
14
 
15
- SECRET_KEY = os.environ.get("AM_I_IN_A_DOCKER_CONTAINER", False)
16
 
17
- if SECRET_KEY:
18
- scheduler = BackgroundScheduler()
19
- scheduler.add_job(func=backup_db, trigger="interval", seconds=60)
20
- scheduler.start()
21
 
22
-
23
- with gr.Blocks(title="HTR Riksarkivet", theme=theme, css=css) as demo:
24
  with gr.Row():
25
  with gr.Column(scale=1):
26
- text_ip_output = gr.Markdown()
27
  with gr.Column(scale=1):
28
  gr.Markdown(TextApp.title_markdown)
29
  with gr.Column(scale=1):
30
  gr.Markdown(TextApp.title_markdown_img)
31
 
32
  with gr.Tabs():
33
- with gr.Tab("HTR Tool"):
34
  htr_tool_tab.render()
35
 
36
- with gr.Tab("Stepwise HTR Tool"):
37
  stepwise_htr_tool_tab.render()
38
 
39
- with gr.Tab("About"):
40
- with gr.Tabs():
41
- with gr.Tab("Project"):
42
- with gr.Row():
43
- with gr.Column():
44
- gr.Markdown(TextAbout.intro_text)
45
- with gr.Column():
46
- gr.Markdown(TextAbout.text_src_code_data_models)
47
- with gr.Row():
48
- gr.Markdown(TextAbout.pipeline_overview_text)
49
- with gr.Row():
50
- with gr.Tabs():
51
- with gr.Tab("I. Binarization"):
52
- gr.Markdown(TextAbout.binarization)
53
- with gr.Tab("II. Region Segmentation"):
54
- gr.Markdown(TextAbout.text_region_segment)
55
- with gr.Tab("III. Line Segmentation"):
56
- gr.Markdown(TextAbout.text_line_segmentation)
57
- with gr.Tab("IV. Transcriber"):
58
- gr.Markdown(TextAbout.text_htr)
59
-
60
- with gr.Tab("Contribution"):
61
- with gr.Row():
62
- gr.Markdown(TextRoadmap.text_contribution)
63
 
64
- with gr.Tab("API & Duplicate for Privat use"):
65
- with gr.Row():
66
- with gr.Column():
67
- gr.Markdown(TextHowTo.htr_tool_api_text)
68
- gr.Code(
69
- value=TextHowTo.code_for_api,
70
- language="python",
71
- interactive=False,
72
- show_label=False,
73
- )
74
- with gr.Column():
75
- gr.Markdown(TextHowTo.duplicatin_space_htr_text)
76
- gr.Markdown(TextHowTo.figure_htr_hardware)
77
- gr.Markdown(TextHowTo.duplicatin_for_privat)
78
-
79
- with gr.Tab("Roadmap"):
80
- with gr.Row():
81
- with gr.Column():
82
- gr.Markdown(TextRoadmap.roadmap)
83
- with gr.Column():
84
- gr.Markdown(TextRoadmap.discussion)
85
 
86
- demo.load(add_ip_data)
 
 
87
 
88
 
89
  demo.queue(concurrency_count=2, max_size=2)
 
1
  import os
2
 
3
  import gradio as gr
 
4
 
5
  from helper.gradio_config import css, theme
 
6
  from helper.text.text_app import TextApp
7
+ from helper.utils import TrafficDataHandler
8
+ from tabs.about_tab import about_tab
9
+ from tabs.help_tab import help_tab
10
  from tabs.htr_tool import htr_tool_tab
11
  from tabs.stepwise_htr_tool import stepwise_htr_tool_tab
12
 
13
+ handler = TrafficDataHandler()
14
 
15
+ VERSION = "Demo version 0.0.2"
 
 
 
16
 
17
+ with gr.Blocks(title="Riksarkivet", theme=theme, css=css) as demo:
 
18
  with gr.Row():
19
  with gr.Column(scale=1):
20
+ text_ip_output = gr.Markdown(VERSION)
21
  with gr.Column(scale=1):
22
  gr.Markdown(TextApp.title_markdown)
23
  with gr.Column(scale=1):
24
  gr.Markdown(TextApp.title_markdown_img)
25
 
26
  with gr.Tabs():
27
+ with gr.Tab("Fast track"):
28
  htr_tool_tab.render()
29
 
30
+ with gr.Tab("Stepwise"):
31
  stepwise_htr_tool_tab.render()
32
 
33
+ with gr.Tab("Help"):
34
+ help_tab.render()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
35
 
36
+ with gr.Tab("About"):
37
+ about_tab.render()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
38
 
39
+ SECRET_KEY = os.environ.get("AM_I_IN_A_DOCKER_CONTAINER", False)
40
+ if SECRET_KEY:
41
+ demo.load(handler.onload_store_metric_data)
42
 
43
 
44
  demo.queue(concurrency_count=2, max_size=2)
helper/text/text_app.py CHANGED
@@ -2,7 +2,7 @@ class TextApp:
2
  title_markdown = """
3
 
4
 
5
- <h1><center> HTRflow - Demo </center></h1>
6
 
7
  <h3><center> Swedish National Archives - Riksarkivet </center></h3>"""
8
 
 
2
  title_markdown = """
3
 
4
 
5
+ <h1><center> HTRFLOW </center></h1>
6
 
7
  <h3><center> Swedish National Archives - Riksarkivet </center></h3>"""
8
 
helper/utils.py CHANGED
@@ -2,68 +2,89 @@ import hashlib
2
  import os
3
  import shutil
4
  import sqlite3
 
5
  from datetime import datetime
6
 
7
  import gradio as gr
8
  import huggingface_hub
9
  import pandas as pd
10
  import pytz
11
-
12
-
13
- def hash_ip(ip_address):
14
- return hashlib.sha256(ip_address.encode()).hexdigest()
15
-
16
-
17
- def current_time_sw():
18
- swedish_tz = pytz.timezone("Europe/Stockholm")
19
- return datetime.now(swedish_tz).strftime("%Y-%m-%d %H:%M:%S")
20
-
21
-
22
- def add_ip_data(request: gr.Request):
23
- host = request.client.host
24
- hashed_ip = hash_ip(host)
25
-
26
- db = sqlite3.connect(DB_FILE)
27
- cursor = db.cursor()
28
- cursor.execute("INSERT INTO ip_data(current_time, hashed_ip) VALUES(?,?)", [current_time_sw(), hashed_ip])
29
- db.commit()
30
- db.close()
31
-
32
-
33
- def backup_db():
34
- shutil.copyfile(DB_FILE, "./data/traffic_data.db")
35
- db = sqlite3.connect(DB_FILE)
36
- ip_data = db.execute("SELECT * FROM ip_data").fetchall()
37
- pd.DataFrame(ip_data, columns=["id", "current_time", "hashed_ip"]).to_csv("./data/ip_data.csv", index=False)
38
-
39
- print("updating traffic_data")
40
- repo.push_to_hub(blocking=False, commit_message=f"Updating data at {datetime.now()}")
41
-
42
-
43
- DB_FILE = "./traffic_data.db"
44
-
45
- TOKEN = os.environ.get("HUB_TOKEN")
46
- repo = huggingface_hub.Repository(
47
- local_dir="data", repo_type="dataset", clone_from="Riksarkivet/traffic_demo_data", use_auth_token=TOKEN
48
- )
49
- repo.git_pull()
50
-
51
- # Set db to latest
52
- shutil.copyfile("./data/traffic_data.db", DB_FILE)
53
-
54
-
55
- # Create table if it doesn't already exist
56
- db = sqlite3.connect(DB_FILE)
57
- try:
58
- db.execute("SELECT * FROM ip_data").fetchall()
59
- db.close()
60
- except sqlite3.OperationalError:
61
- db.execute(
62
- """
63
- CREATE TABLE ip_data (id INTEGER PRIMARY KEY AUTOINCREMENT NOT NULL,
64
- current_time TIMESTAMP DEFAULT CURRENT_TIMESTAMP NOT NULL,
65
- hashed_ip TEXT)
66
- """
67
- )
68
- db.commit()
69
- db.close()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2
  import os
3
  import shutil
4
  import sqlite3
5
+ import uuid
6
  from datetime import datetime
7
 
8
  import gradio as gr
9
  import huggingface_hub
10
  import pandas as pd
11
  import pytz
12
+ from apscheduler.schedulers.background import BackgroundScheduler
13
+
14
+
15
+ class TrafficDataHandler:
16
+ _DB_FILE_PATH = "./traffic_data.db"
17
+ _DB_TEMP_PATH = "./data/traffic_data.db"
18
+ _TOKEN = os.environ.get("HUB_TOKEN")
19
+ _TZ = "Europe/Stockholm"
20
+ _INTERVAL_MIN_UPDATE = 30
21
+
22
+ def __init__(self, dataset_repo="Riksarkivet/traffic_demo_data"):
23
+ self._repo = huggingface_hub.Repository(
24
+ local_dir="data", repo_type="dataset", clone_from=dataset_repo, use_auth_token=self._TOKEN
25
+ )
26
+ self._pull_repo_data()
27
+ self._setup_database()
28
+
29
+ def _pull_repo_data(self):
30
+ self._repo.git_pull()
31
+ shutil.copyfile(self._DB_TEMP_PATH, self._DB_FILE_PATH)
32
+
33
+ def _hash_ip(self, ip_address):
34
+ return hashlib.sha256(ip_address.encode()).hexdigest()
35
+
36
+ def _current_time_in_sweden(self):
37
+ swedish_tz = pytz.timezone(self._TZ)
38
+ return datetime.now(swedish_tz).strftime("%Y-%m-%d %H:%M:%S")
39
+
40
+ def onload_store_metric_data(self, request: gr.Request):
41
+ self._session_uuid = str(uuid.uuid1())
42
+ hashed_host = self._hash_ip(request.client.host)
43
+ self._backup_and_update_database(hashed_host, "load")
44
+
45
+ def store_metric_data(self, action, request: gr.Request):
46
+ self._session_uuid = str(uuid.uuid1())
47
+ hashed_host = self._hash_ip(request.client.host)
48
+ self._backup_and_update_database(hashed_host, action)
49
+
50
+ def _commit_host_to_database(self, hashed_host, action):
51
+ with sqlite3.connect(self._DB_FILE_PATH) as db:
52
+ db.execute(
53
+ "INSERT INTO ip_data(current_time, hashed_ip, session_uuid, action) VALUES(?,?,?,?)",
54
+ [self._current_time_in_sweden(), hashed_host, self._session_uuid, action],
55
+ )
56
+
57
+ def _setup_database(self):
58
+ with sqlite3.connect(self._DB_FILE_PATH) as db:
59
+ try:
60
+ db.execute("SELECT * FROM ip_data").fetchall()
61
+ except sqlite3.OperationalError:
62
+ db.execute(
63
+ """
64
+ CREATE TABLE ip_data (id INTEGER PRIMARY KEY AUTOINCREMENT NOT NULL,
65
+ current_time TIMESTAMP DEFAULT CURRENT_TIMESTAMP NOT NULL,
66
+ hashed_ip TEXT,
67
+ session_uuid TEXT,
68
+ action TEXT)
69
+ """
70
+ )
71
+
72
+ def _backup_and_update_database(self, hashed_host, action):
73
+ self._commit_host_to_database(hashed_host, action)
74
+ shutil.copyfile(self._DB_FILE_PATH, self._DB_TEMP_PATH)
75
+
76
+ with sqlite3.connect(self._DB_FILE_PATH) as db:
77
+ ip_data = db.execute("SELECT * FROM ip_data").fetchall()
78
+ pd.DataFrame(ip_data, columns=["id", "current_time", "hashed_ip", "session_uuid", "action"]).to_csv(
79
+ "./data/ip_data.csv", index=False
80
+ )
81
+
82
+ self._repo.push_to_hub(blocking=False, commit_message=f"Updating data at {datetime.now()}")
83
+
84
+ def _initialize_and_schedule_backup(self, hashed_host, action):
85
+ self._backup_and_update_database(hashed_host, action)
86
+ scheduler = BackgroundScheduler()
87
+ scheduler.add_job(
88
+ self._backup_and_update_database, "interval", minutes=self._INTERVAL_MIN_UPDATE, args=(hashed_host, action)
89
+ )
90
+ scheduler.start()
requirements.txt CHANGED
@@ -7,7 +7,7 @@ numpy==1.25.0
7
  opencv-python-headless==4.7.0.72
8
  Jinja2==3.1.2
9
  transformers==4.30.2
10
- huggingface-hub
11
  datasets==2.14.5
12
  requests==2.31.0
13
  apscheduler
 
7
  opencv-python-headless==4.7.0.72
8
  Jinja2==3.1.2
9
  transformers==4.30.2
10
+ huggingface-hub==0.15.1
11
  datasets==2.14.5
12
  requests==2.31.0
13
  apscheduler
tabs/about_tab.py ADDED
@@ -0,0 +1,54 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+
3
+ from helper.text.text_about import TextAbout
4
+ from helper.text.text_roadmap import TextRoadmap
5
+
6
+ with gr.Blocks() as about_tab:
7
+ with gr.Tabs():
8
+ with gr.Tab("HTRFLOW"):
9
+ gr.Markdown(
10
+ "update... todo.. here we should talk about the pipline and the app as seperate things... pipline overview perhaps be moved?"
11
+ )
12
+ with gr.Row():
13
+ with gr.Column():
14
+ gr.Markdown(TextAbout.intro_text)
15
+ with gr.Column():
16
+ gr.Markdown(TextAbout.text_src_code_data_models)
17
+ with gr.Row():
18
+ gr.Markdown(TextAbout.pipeline_overview_text)
19
+ with gr.Row():
20
+ with gr.Tabs():
21
+ with gr.Tab("1. Binarization"):
22
+ gr.Markdown(TextAbout.binarization)
23
+ with gr.Tab("2. Region Segmentation"):
24
+ gr.Markdown(TextAbout.text_region_segment)
25
+ with gr.Tab("3. Line Segmentation"):
26
+ gr.Markdown(TextAbout.text_line_segmentation)
27
+ with gr.Tab("4. Transcriber"):
28
+ gr.Markdown(TextAbout.text_htr)
29
+
30
+ with gr.Tab("Contributions"):
31
+ with gr.Row():
32
+ gr.Markdown(TextRoadmap.text_contribution)
33
+
34
+ # with gr.Tab("API & Duplicate for own use"):
35
+ # with gr.Row():
36
+ # with gr.Column():
37
+ # gr.Markdown(TextHowTo.htr_tool_api_text)
38
+ # gr.Code(
39
+ # value=TextHowTo.code_for_api,
40
+ # language="python",
41
+ # interactive=False,
42
+ # show_label=False,
43
+ # )
44
+ # with gr.Column():
45
+ # gr.Markdown(TextHowTo.duplicatin_space_htr_text)
46
+ # gr.Markdown(TextHowTo.figure_htr_hardware)
47
+ # gr.Markdown(TextHowTo.duplicatin_for_privat)
48
+
49
+ with gr.Tab("Changelog & Roadmap"):
50
+ with gr.Row():
51
+ with gr.Column():
52
+ gr.Markdown(TextRoadmap.roadmap)
53
+ with gr.Column():
54
+ gr.Markdown(TextRoadmap.discussion)
tabs/help_tab.py ADDED
@@ -0,0 +1,35 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+
3
+ from helper.text.text_howto import TextHowTo
4
+
5
+ with gr.Blocks() as help_tab:
6
+ gr.Markdown("lorem ipsum...")
7
+ with gr.Tabs():
8
+ with gr.Tab("Discussion & FAQ"):
9
+ pass
10
+
11
+ with gr.Tab("Fast track"):
12
+ pass
13
+ with gr.Tab("Stepwise"):
14
+ with gr.Row():
15
+ with gr.Accordion("Info", open=False) as example_accord:
16
+ with gr.Row(equal_height=False):
17
+ gr.Markdown(TextHowTo.stepwise_htr_tool)
18
+ with gr.Row():
19
+ gr.Markdown(TextHowTo.stepwise_htr_tool_tab_intro)
20
+ with gr.Row():
21
+ with gr.Tabs():
22
+ with gr.Tab("1. Region Segmentation"):
23
+ gr.Markdown(TextHowTo.stepwise_htr_tool_tab1)
24
+ with gr.Tab("2. Line Segmentation"):
25
+ gr.Markdown(TextHowTo.stepwise_htr_tool_tab2)
26
+ with gr.Tab("3. Transcribe Text"):
27
+ gr.Markdown(TextHowTo.stepwise_htr_tool_tab3)
28
+ with gr.Tab("4. Explore Results"):
29
+ gr.Markdown(TextHowTo.stepwise_htr_tool_tab4)
30
+ gr.Markdown(TextHowTo.stepwise_htr_tool_end)
31
+
32
+ with gr.Tab("API"):
33
+ pass
34
+ with gr.Tab("Duplicating for own use"):
35
+ pass
tabs/htr_tool.py CHANGED
@@ -4,9 +4,7 @@ from helper.examples.examples import DemoImages
4
  from src.htr_pipeline.gradio_backend import FastTrack, SingletonModelLoader
5
 
6
  model_loader = SingletonModelLoader()
7
-
8
  fast_track = FastTrack(model_loader)
9
-
10
  images_for_demo = DemoImages()
11
 
12
  terminate = False
@@ -21,7 +19,7 @@ with gr.Blocks() as htr_tool_tab:
21
  )
22
 
23
  with gr.Row():
24
- with gr.Tab("Output and Settings") as tab_output_and_setting_selector:
25
  with gr.Row():
26
  stop_htr_button = gr.Button(
27
  value="Stop HTR",
@@ -41,10 +39,10 @@ with gr.Blocks() as htr_tool_tab:
41
  label="Download output file", visible=True, scale=1, height=100, elem_id="download_file"
42
  )
43
 
44
- with gr.Tab("Image Viewer") as tab_image_viewer_selector:
45
  with gr.Row():
46
  gr.Button(
47
- value="External Image Viewer",
48
  variant="secondary",
49
  link="https://huggingface.co/spaces/Riksarkivet/Viewer_demo",
50
  interactive=True,
@@ -58,17 +56,34 @@ with gr.Blocks() as htr_tool_tab:
58
  interactive=False, label="Text Selector", info="Select a mask on Image Viewer to return text"
59
  )
60
 
 
 
 
 
 
 
 
 
61
  with gr.Column(scale=4):
62
  with gr.Box():
63
  with gr.Row(visible=True) as output_and_setting_tab:
 
 
 
 
 
 
 
 
 
64
  with gr.Column(scale=3):
65
  with gr.Row():
66
  with gr.Group():
67
  gr.Markdown(" &nbsp; ⚙️ Settings ")
68
  with gr.Row():
69
  radio_file_input = gr.CheckboxGroup(
70
- choices=["Txt", "XML"],
71
- value=["Txt", "XML"],
72
  label="Output file extension",
73
  # info="Only txt and page xml is supported for now!",
74
  scale=1,
@@ -84,54 +99,59 @@ with gr.Blocks() as htr_tool_tab:
84
  label="Output prediction threshold",
85
  info="Output XML with prediction score",
86
  )
87
- with gr.Row():
88
- gr.Slider(
89
- value=0.6,
90
- minimum=0.5,
91
- maximum=1,
92
- label="HTR threshold",
93
- info="Prediction score threshold for transcribed lines",
94
- scale=1,
95
- )
96
- gr.Slider(
97
- value=0.7,
98
- minimum=0.6,
99
- maximum=1,
100
- label="Avg threshold",
101
- info="Average prediction score for a region",
102
- scale=1,
103
- )
104
-
105
- htr_tool_region_segment_model_dropdown = gr.Dropdown(
106
- choices=["Riksarkivet/rtmdet_region"],
107
- value="Riksarkivet/rtmdet_region",
108
- label="Region Segment models",
109
- info="Will add more models later!",
110
- )
111
-
112
- # with gr.Accordion("Transcribe settings:", open=False):
113
- htr_tool_line_segment_model_dropdown = gr.Dropdown(
114
- choices=["Riksarkivet/rtmdet_lines"],
115
- value="Riksarkivet/rtmdet_lines",
116
- label="Line Segment models",
117
- info="Will add more models later!",
118
- )
119
-
120
- htr_tool_transcriber_model_dropdown = gr.Dropdown(
121
- choices=["Riksarkivet/satrn_htr", "microsoft/trocr-base-handwritten"],
122
- value="Riksarkivet/satrn_htr",
123
- label="Transcriber models",
124
- info="Models will be continuously updated with future additions for specific cases.",
125
- )
126
 
127
- with gr.Column(scale=2):
128
- fast_name_files_placeholder = gr.Markdown(visible=False)
129
- gr.Examples(
130
- examples=images_for_demo.examples_list,
131
- inputs=[fast_name_files_placeholder, fast_track_input_region_image],
132
- label="Example images",
133
- examples_per_page=5,
134
- )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
135
 
136
  with gr.Row(visible=False) as image_viewer_tab:
137
  text_polygon_dict = gr.Variable()
@@ -140,6 +160,43 @@ with gr.Blocks() as htr_tool_tab:
140
  label="Image Viewer", type="numpy", height=600, interactive=False
141
  )
142
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
143
  xml_rendered_placeholder_for_api = gr.Textbox(visible=False)
144
 
145
  htr_event_click_event = htr_pipeline_button.click(
@@ -165,17 +222,25 @@ with gr.Blocks() as htr_tool_tab:
165
  )
166
 
167
  def update_selected_tab_output_and_setting():
168
- return gr.update(visible=True), gr.update(visible=False)
169
 
170
  def update_selected_tab_image_viewer():
171
- return gr.update(visible=False), gr.update(visible=True)
 
 
 
172
 
173
  tab_output_and_setting_selector.select(
174
- fn=update_selected_tab_output_and_setting, outputs=[output_and_setting_tab, image_viewer_tab]
 
175
  )
176
 
177
  tab_image_viewer_selector.select(
178
- fn=update_selected_tab_image_viewer, outputs=[output_and_setting_tab, image_viewer_tab]
 
 
 
 
179
  )
180
 
181
  def stop_function():
@@ -195,3 +260,5 @@ with gr.Blocks() as htr_tool_tab:
195
  fast_track_output_image.select(
196
  fast_track.get_text_from_coords, inputs=text_polygon_dict, outputs=selection_text_from_image_viewer
197
  )
 
 
 
4
  from src.htr_pipeline.gradio_backend import FastTrack, SingletonModelLoader
5
 
6
  model_loader = SingletonModelLoader()
 
7
  fast_track = FastTrack(model_loader)
 
8
  images_for_demo = DemoImages()
9
 
10
  terminate = False
 
19
  )
20
 
21
  with gr.Row():
22
+ with gr.Tab("Run & Settings") as tab_output_and_setting_selector:
23
  with gr.Row():
24
  stop_htr_button = gr.Button(
25
  value="Stop HTR",
 
39
  label="Download output file", visible=True, scale=1, height=100, elem_id="download_file"
40
  )
41
 
42
+ with gr.Tab("Visualize results") as tab_image_viewer_selector:
43
  with gr.Row():
44
  gr.Button(
45
+ value="Image Viewer (demo)",
46
  variant="secondary",
47
  link="https://huggingface.co/spaces/Riksarkivet/Viewer_demo",
48
  interactive=True,
 
56
  interactive=False, label="Text Selector", info="Select a mask on Image Viewer to return text"
57
  )
58
 
59
+ with gr.Tab("(WIP) Compare runs") as tab_model_compare_selector:
60
+ with gr.Box():
61
+ gr.Markdown(
62
+ "Compare different runs with uploaded Ground Truth and calculate CER. You will also be able to upload output format files"
63
+ )
64
+
65
+ calc_cer_button_fast = gr.Button("Calculate CER", variant="primary", visible=True)
66
+
67
  with gr.Column(scale=4):
68
  with gr.Box():
69
  with gr.Row(visible=True) as output_and_setting_tab:
70
+ with gr.Column(scale=2):
71
+ fast_name_files_placeholder = gr.Markdown(visible=False)
72
+ gr.Examples(
73
+ examples=images_for_demo.examples_list,
74
+ inputs=[fast_name_files_placeholder, fast_track_input_region_image],
75
+ label="Example images",
76
+ examples_per_page=5,
77
+ )
78
+
79
  with gr.Column(scale=3):
80
  with gr.Row():
81
  with gr.Group():
82
  gr.Markdown(" &nbsp; ⚙️ Settings ")
83
  with gr.Row():
84
  radio_file_input = gr.CheckboxGroup(
85
+ choices=["Txt", "Page XML"],
86
+ value=["Txt", "Page XML"],
87
  label="Output file extension",
88
  # info="Only txt and page xml is supported for now!",
89
  scale=1,
 
99
  label="Output prediction threshold",
100
  info="Output XML with prediction score",
101
  )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
102
 
103
+ with gr.Accordion("Models", open=False):
104
+ with gr.Group():
105
+ with gr.Row():
106
+ htr_tool_region_segment_model_dropdown = gr.Dropdown(
107
+ choices=["Riksarkivet/rtmdet_region"],
108
+ value="Riksarkivet/rtmdet_region",
109
+ label="Region segmentation models",
110
+ info="More models will be added",
111
+ )
112
+
113
+ gr.Slider(
114
+ minimum=0.4,
115
+ maximum=1,
116
+ value=0.5,
117
+ step=0.05,
118
+ label="P-threshold",
119
+ info="""Filter confidence score for a prediction score to be considered""",
120
+ )
121
+
122
+ with gr.Row():
123
+ htr_tool_line_segment_model_dropdown = gr.Dropdown(
124
+ choices=["Riksarkivet/rtmdet_lines"],
125
+ value="Riksarkivet/rtmdet_lines",
126
+ label="Line segmentation models",
127
+ info="More models will be added",
128
+ )
129
+
130
+ gr.Slider(
131
+ minimum=0.4,
132
+ maximum=1,
133
+ value=0.5,
134
+ step=0.05,
135
+ label="P-threshold",
136
+ info="""Filter confidence score for a prediction score to be considered""",
137
+ )
138
+
139
+ with gr.Row():
140
+ htr_tool_transcriber_model_dropdown = gr.Dropdown(
141
+ choices=["Riksarkivet/satrn_htr", "microsoft/trocr-base-handwritten"],
142
+ value="Riksarkivet/satrn_htr",
143
+ label="Text recognition models",
144
+ info="More models will be added",
145
+ )
146
+
147
+ gr.Slider(
148
+ value=0.6,
149
+ minimum=0.5,
150
+ maximum=1,
151
+ label="HTR threshold",
152
+ info="Prediction score threshold for transcribed lines",
153
+ scale=1,
154
+ )
155
 
156
  with gr.Row(visible=False) as image_viewer_tab:
157
  text_polygon_dict = gr.Variable()
 
160
  label="Image Viewer", type="numpy", height=600, interactive=False
161
  )
162
 
163
+ with gr.Column(visible=False) as model_compare_selector:
164
+ with gr.Row():
165
+ gr.Radio(
166
+ choices=["Compare Page XML", "Compare different runs"],
167
+ value="Compare Page XML",
168
+ info="Compare different runs from HTRFLOW or with external runs, e.g with Transkibus ",
169
+ )
170
+ with gr.Row():
171
+ gr.UploadButton(label="Run A")
172
+
173
+ gr.UploadButton(label="Run B")
174
+
175
+ gr.UploadButton(label="Ground Truth")
176
+
177
+ with gr.Row():
178
+ gr.HighlightedText(
179
+ label="Text diff runs",
180
+ combine_adjacent=True,
181
+ show_legend=True,
182
+ color_map={"+": "red", "-": "green"},
183
+ )
184
+
185
+ with gr.Row():
186
+ gr.HighlightedText(
187
+ label="Text diff ground truth",
188
+ combine_adjacent=True,
189
+ show_legend=True,
190
+ color_map={"+": "red", "-": "green"},
191
+ )
192
+
193
+ with gr.Row():
194
+ with gr.Column(scale=1):
195
+ with gr.Row(equal_height=False):
196
+ cer_output_fast = gr.Textbox(label="CER:")
197
+ with gr.Column(scale=2):
198
+ pass
199
+
200
  xml_rendered_placeholder_for_api = gr.Textbox(visible=False)
201
 
202
  htr_event_click_event = htr_pipeline_button.click(
 
222
  )
223
 
224
  def update_selected_tab_output_and_setting():
225
+ return gr.update(visible=True), gr.update(visible=False), gr.update(visible=False)
226
 
227
  def update_selected_tab_image_viewer():
228
+ return gr.update(visible=False), gr.update(visible=True), gr.update(visible=False)
229
+
230
+ def update_selected_tab_model_compare():
231
+ return gr.update(visible=False), gr.update(visible=False), gr.update(visible=True)
232
 
233
  tab_output_and_setting_selector.select(
234
+ fn=update_selected_tab_output_and_setting,
235
+ outputs=[output_and_setting_tab, image_viewer_tab, model_compare_selector],
236
  )
237
 
238
  tab_image_viewer_selector.select(
239
+ fn=update_selected_tab_image_viewer, outputs=[output_and_setting_tab, image_viewer_tab, model_compare_selector]
240
+ )
241
+
242
+ tab_model_compare_selector.select(
243
+ fn=update_selected_tab_model_compare, outputs=[output_and_setting_tab, image_viewer_tab, model_compare_selector]
244
  )
245
 
246
  def stop_function():
 
260
  fast_track_output_image.select(
261
  fast_track.get_text_from_coords, inputs=text_polygon_dict, outputs=selection_text_from_image_viewer
262
  )
263
+
264
+ htr_pipeline_button.click(fn=handler.store_metric_data, inputs="htr_pipeline_button")
tabs/stepwise_htr_tool.py CHANGED
@@ -1,11 +1,11 @@
1
  import os
2
  import shutil
 
3
 
4
  import evaluate
5
  import gradio as gr
6
 
7
  from helper.examples.examples import DemoImages
8
- from helper.text.text_howto import TextHowTo
9
  from src.htr_pipeline.gradio_backend import CustomTrack, SingletonModelLoader
10
 
11
  model_loader = SingletonModelLoader()
@@ -19,57 +19,28 @@ cer_metric = evaluate.load("cer")
19
 
20
  with gr.Blocks() as stepwise_htr_tool_tab:
21
  with gr.Tabs():
22
- with gr.Tab("1. Region Segmentation"):
23
  with gr.Row():
24
- with gr.Accordion("Info", open=False) as example_accord:
25
- with gr.Row(equal_height=False):
26
- gr.Markdown(TextHowTo.stepwise_htr_tool)
27
- with gr.Row():
28
- gr.Markdown(TextHowTo.stepwise_htr_tool_tab_intro)
29
- with gr.Row():
30
- with gr.Tabs():
31
- with gr.Tab("1. Region Segmentation"):
32
- gr.Markdown(TextHowTo.stepwise_htr_tool_tab1)
33
- with gr.Tab("2. Line Segmentation"):
34
- gr.Markdown(TextHowTo.stepwise_htr_tool_tab2)
35
- with gr.Tab("3. Transcribe Text"):
36
- gr.Markdown(TextHowTo.stepwise_htr_tool_tab3)
37
- with gr.Tab("4. Explore Results"):
38
- gr.Markdown(TextHowTo.stepwise_htr_tool_tab4)
39
- gr.Markdown(TextHowTo.stepwise_htr_tool_end)
40
- with gr.Row():
41
- with gr.Column(scale=2):
42
  vis_data_folder_placeholder = gr.Markdown(visible=False)
43
  name_files_placeholder = gr.Markdown(visible=False)
44
 
45
- with gr.Row():
46
  input_region_image = gr.Image(
47
- label="Image to Region segment",
48
  # type="numpy",
49
  tool="editor",
50
- height=400,
51
  )
52
-
53
- with gr.Row():
54
- clear_button = gr.Button("Clear", variant="secondary", elem_id="clear_button")
55
-
56
- region_segment_button = gr.Button(
57
- "Segment Region",
58
- variant="primary",
59
- elem_id="region_segment_button",
60
- )
61
-
62
- with gr.Group():
63
- with gr.Accordion("Region segment settings:", open=False):
64
- with gr.Row():
65
  reg_pred_score_threshold_slider = gr.Slider(
66
  minimum=0.4,
67
  maximum=1,
68
  value=0.5,
69
  step=0.05,
70
  label="P-threshold",
71
- info="""Filter and determine the confidence score
72
- required for a prediction score to be considered""",
73
  )
74
  reg_containments_threshold_slider = gr.Slider(
75
  minimum=0,
@@ -81,27 +52,37 @@ with gr.Blocks() as stepwise_htr_tool_tab:
81
  for a detected region or object to be considered valid""",
82
  )
83
 
84
- with gr.Row():
85
  region_segment_model_dropdown = gr.Dropdown(
86
- choices=["Riksarkivet/RmtDet_region"],
87
- value="Riksarkivet/RmtDet_region",
88
- label="Region segment model",
89
- info="Will add more models later!",
90
  )
91
 
92
- with gr.Accordion("Example images to use:", open=False) as example_accord:
93
- gr.Examples(
94
- examples=images_for_demo.examples_list,
95
- inputs=[name_files_placeholder, input_region_image],
96
- label="Example images",
97
- examples_per_page=5,
98
- )
99
 
100
- with gr.Column(scale=3):
101
- output_region_image = gr.Image(label="Segmented regions", type="numpy", height=550)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
102
 
103
  ##############################################
104
- with gr.Tab("2. Line Segmentation"):
105
  image_placeholder_lines = gr.Image(
106
  label="Segmented lines",
107
  # type="numpy",
@@ -112,50 +93,50 @@ with gr.Blocks() as stepwise_htr_tool_tab:
112
 
113
  with gr.Row(visible=False) as control_line_segment:
114
  with gr.Column(scale=2):
115
- with gr.Box():
116
- regions_cropped_gallery = gr.Gallery(
117
- label="Segmented regions",
118
- elem_id="gallery",
119
- columns=[2],
120
- rows=[2],
121
- # object_fit="contain",
122
- height=450,
123
- preview=True,
124
- container=False,
125
- )
 
126
 
127
- input_region_from_gallery = gr.Image(
128
- label="Region segmentation to line segment", interactive="False", visible=False, height=400
129
- )
130
 
131
- with gr.Row():
132
- with gr.Accordion("Line segment settings:", open=False):
133
- with gr.Row():
134
- line_pred_score_threshold_slider = gr.Slider(
135
- minimum=0.3,
136
- maximum=1,
137
- value=0.4,
138
- step=0.05,
139
- label="Pred_score threshold",
140
- info="""Filter and determine the confidence score
141
- required for a prediction score to be considered""",
142
- )
143
- line_containments_threshold_slider = gr.Slider(
144
- minimum=0,
145
- maximum=1,
146
- value=0.5,
147
- step=0.05,
148
- label="Containments threshold",
149
- info="""The minimum required overlap or similarity
150
- for a detected region or object to be considered valid""",
151
- )
152
- with gr.Row(equal_height=False):
153
- line_segment_model_dropdown = gr.Dropdown(
154
- choices=["Riksarkivet/RmtDet_lines"],
155
- value="Riksarkivet/RmtDet_lines",
156
- label="Line segment model",
157
- info="Will add more models later!",
158
- )
159
  with gr.Row():
160
  clear_line_segment_button = gr.Button(
161
  " ",
@@ -165,7 +146,7 @@ with gr.Blocks() as stepwise_htr_tool_tab:
165
  )
166
 
167
  line_segment_button = gr.Button(
168
- "Segment Lines",
169
  variant="primary",
170
  # elem_id="center_button",
171
  scale=1,
@@ -179,7 +160,7 @@ with gr.Blocks() as stepwise_htr_tool_tab:
179
  )
180
 
181
  ###############################################
182
- with gr.Tab("3. Transcribe Text"):
183
  image_placeholder_htr = gr.Image(
184
  label="Transcribed lines",
185
  # type="numpy",
@@ -192,33 +173,45 @@ with gr.Blocks() as stepwise_htr_tool_tab:
192
  inputs_lines_to_transcribe = gr.Variable()
193
 
194
  with gr.Column(scale=2):
195
- image_inputs_lines_to_transcribe = gr.Image(
196
- label="Transcribed lines", type="numpy", interactive="False", visible=False, height=470
197
- )
198
- with gr.Row():
199
- with gr.Accordion("Transcribe settings:", open=False):
200
- transcriber_model = gr.Dropdown(
201
- choices=["Riksarkivet/SATRN_transcriber", "microsoft/trocr-base-handwritten"],
202
- value="Riksarkivet/SATRN_transcriber",
203
- label="Transcriber model",
204
- info="Will add more models later!",
205
- )
 
 
 
 
 
 
 
 
 
 
 
206
  with gr.Row():
207
  clear_transcribe_button = gr.Button(" ", variant="Secondary", visible=True, scale=1)
208
 
209
- transcribe_button = gr.Button("Transcribe Lines", variant="primary", visible=True, scale=1)
210
 
211
  with gr.Column(scale=3):
212
  with gr.Row():
213
  transcribed_text = gr.Textbox(
214
  label="Transcribed text",
215
- info="Transcribed text is being streamed back from the HTR-model",
216
- lines=25,
217
  value="",
 
218
  )
219
 
220
  #####################################
221
- with gr.Tab("4. Explore Results"):
222
  image_placeholder_explore_results = gr.Image(
223
  label="Cropped transcribed lines",
224
  # type="numpy",
@@ -229,40 +222,48 @@ with gr.Blocks() as stepwise_htr_tool_tab:
229
 
230
  with gr.Row(visible=False, equal_height=False) as control_results_transcribe:
231
  with gr.Column(scale=1, visible=True):
232
- with gr.Box():
233
- temp_gallery_input = gr.Variable()
234
-
235
- gallery_inputs_lines_to_transcribe = gr.Gallery(
236
- label="Cropped transcribed lines",
237
- elem_id="gallery_lines",
238
- columns=[3],
239
- rows=[3],
240
- # object_fit="contain",
241
- height=250,
242
- preview=True,
243
- container=False,
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
244
  )
245
 
246
- dataframe_text_index = gr.Textbox(
247
- label="Text from DataFrame selection",
248
- placeholder="Select row from the DataFrame.",
249
- interactive=False,
250
- )
251
-
252
- gt_text_index = gr.Textbox(
253
- label="Ground Truth",
254
- placeholder="Provide the ground truth, if available.",
255
- interactive=True,
256
- )
257
  with gr.Row(equal_height=False):
258
- calc_cer_button = gr.Button("Calculate CER", variant="primary", visible=True)
259
-
260
  cer_output = gr.Textbox(label="CER:")
 
261
 
262
  with gr.Column(scale=1, visible=True):
263
  mapping_dict = gr.Variable()
264
  transcribed_text_df_finish = gr.Dataframe(
265
- headers=["Transcribed text", "pred score"],
266
  max_rows=14,
267
  col_count=(2, "fixed"),
268
  wrap=True,
@@ -272,6 +273,21 @@ with gr.Blocks() as stepwise_htr_tool_tab:
272
  )
273
 
274
  # custom track
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
275
  region_segment_button.click(
276
  custom_track.region_segment,
277
  inputs=[input_region_image, reg_pred_score_threshold_slider, reg_containments_threshold_slider],
@@ -318,14 +334,6 @@ with gr.Blocks() as stepwise_htr_tool_tab:
318
  ],
319
  )
320
 
321
- def compute_cer(dataframe_text_index, gt_text_index):
322
- if gt_text_index is not None and gt_text_index.strip() != "":
323
- return cer_metric.compute(predictions=[dataframe_text_index], references=[gt_text_index])
324
- else:
325
- return "Ground truth not provided"
326
-
327
- calc_cer_button.click(compute_cer, inputs=[dataframe_text_index, gt_text_index], outputs=cer_output)
328
-
329
  clear_button.click(
330
  lambda: (
331
  (shutil.rmtree("./vis_data") if os.path.exists("./vis_data") else None, None)[1],
 
1
  import os
2
  import shutil
3
+ from difflib import Differ
4
 
5
  import evaluate
6
  import gradio as gr
7
 
8
  from helper.examples.examples import DemoImages
 
9
  from src.htr_pipeline.gradio_backend import CustomTrack, SingletonModelLoader
10
 
11
  model_loader = SingletonModelLoader()
 
19
 
20
  with gr.Blocks() as stepwise_htr_tool_tab:
21
  with gr.Tabs():
22
+ with gr.Tab("1. Region segmentation"):
23
  with gr.Row():
24
+ with gr.Column(scale=1):
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
25
  vis_data_folder_placeholder = gr.Markdown(visible=False)
26
  name_files_placeholder = gr.Markdown(visible=False)
27
 
28
+ with gr.Group():
29
  input_region_image = gr.Image(
30
+ label="Image to region segment",
31
  # type="numpy",
32
  tool="editor",
33
+ height=500,
34
  )
35
+ with gr.Accordion("Settings", open=False):
36
+ with gr.Group():
 
 
 
 
 
 
 
 
 
 
 
37
  reg_pred_score_threshold_slider = gr.Slider(
38
  minimum=0.4,
39
  maximum=1,
40
  value=0.5,
41
  step=0.05,
42
  label="P-threshold",
43
+ info="""Filter the confidence score for a prediction score to be considered""",
 
44
  )
45
  reg_containments_threshold_slider = gr.Slider(
46
  minimum=0,
 
52
  for a detected region or object to be considered valid""",
53
  )
54
 
 
55
  region_segment_model_dropdown = gr.Dropdown(
56
+ choices=["Riksarkivet/rtm_region"],
57
+ value="Riksarkivet/rtm_region",
58
+ label="Region segmentation model",
59
+ info="More models will be added",
60
  )
61
 
62
+ with gr.Row():
63
+ clear_button = gr.Button("Clear", variant="secondary", elem_id="clear_button")
 
 
 
 
 
64
 
65
+ region_segment_button = gr.Button(
66
+ "Run",
67
+ variant="primary",
68
+ elem_id="region_segment_button",
69
+ )
70
+
71
+ with gr.Column(scale=2):
72
+ with gr.Box():
73
+ with gr.Row():
74
+ with gr.Column(scale=2):
75
+ gr.Examples(
76
+ examples=images_for_demo.examples_list,
77
+ inputs=[name_files_placeholder, input_region_image],
78
+ label="Example images",
79
+ examples_per_page=5,
80
+ )
81
+ with gr.Column(scale=3):
82
+ output_region_image = gr.Image(label="Segmented regions", type="numpy", height=600)
83
 
84
  ##############################################
85
+ with gr.Tab("2. Line segmentation"):
86
  image_placeholder_lines = gr.Image(
87
  label="Segmented lines",
88
  # type="numpy",
 
93
 
94
  with gr.Row(visible=False) as control_line_segment:
95
  with gr.Column(scale=2):
96
+ with gr.Group():
97
+ with gr.Box():
98
+ regions_cropped_gallery = gr.Gallery(
99
+ label="Segmented regions",
100
+ elem_id="gallery",
101
+ columns=[2],
102
+ rows=[2],
103
+ # object_fit="contain",
104
+ height=450,
105
+ preview=True,
106
+ container=False,
107
+ )
108
 
109
+ input_region_from_gallery = gr.Image(
110
+ label="Region segmentation to line segment", interactive="False", visible=False, height=400
111
+ )
112
 
113
+ with gr.Row():
114
+ with gr.Accordion("Settings", open=False):
115
+ with gr.Row():
116
+ line_pred_score_threshold_slider = gr.Slider(
117
+ minimum=0.3,
118
+ maximum=1,
119
+ value=0.4,
120
+ step=0.05,
121
+ label="Pred_score threshold",
122
+ info="""Filter the confidence score for a prediction score to be considered""",
123
+ )
124
+ line_containments_threshold_slider = gr.Slider(
125
+ minimum=0,
126
+ maximum=1,
127
+ value=0.5,
128
+ step=0.05,
129
+ label="Containments threshold",
130
+ info="""The minimum required overlap or similarity
131
+ for a detected region or object to be considered valid""",
132
+ )
133
+ with gr.Row(equal_height=False):
134
+ line_segment_model_dropdown = gr.Dropdown(
135
+ choices=["Riksarkivet/rtmdet_lines"],
136
+ value="Riksarkivet/rtmdet_lines",
137
+ label="Line segment model",
138
+ info="More models will be added",
139
+ )
 
140
  with gr.Row():
141
  clear_line_segment_button = gr.Button(
142
  " ",
 
146
  )
147
 
148
  line_segment_button = gr.Button(
149
+ "Run",
150
  variant="primary",
151
  # elem_id="center_button",
152
  scale=1,
 
160
  )
161
 
162
  ###############################################
163
+ with gr.Tab("3. Text recognition"):
164
  image_placeholder_htr = gr.Image(
165
  label="Transcribed lines",
166
  # type="numpy",
 
173
  inputs_lines_to_transcribe = gr.Variable()
174
 
175
  with gr.Column(scale=2):
176
+ with gr.Group():
177
+ image_inputs_lines_to_transcribe = gr.Image(
178
+ label="Transcribed lines", type="numpy", interactive="False", visible=False, height=470
179
+ )
180
+ with gr.Row():
181
+ with gr.Accordion("Settings", open=False):
182
+ transcriber_model = gr.Dropdown(
183
+ choices=["Riksarkivet/satrn_htr", "microsoft/trocr-base-handwritten"],
184
+ value="Riksarkivet/satrn_htr",
185
+ label="Text recognition model",
186
+ info="More models will be added",
187
+ )
188
+
189
+ gr.Slider(
190
+ value=0.6,
191
+ minimum=0.5,
192
+ maximum=1,
193
+ label="HTR threshold",
194
+ info="Prediction score threshold for transcribed lines",
195
+ scale=1,
196
+ )
197
+
198
  with gr.Row():
199
  clear_transcribe_button = gr.Button(" ", variant="Secondary", visible=True, scale=1)
200
 
201
+ transcribe_button = gr.Button("Run", variant="primary", visible=True, scale=1)
202
 
203
  with gr.Column(scale=3):
204
  with gr.Row():
205
  transcribed_text = gr.Textbox(
206
  label="Transcribed text",
207
+ info="Transcribed text is being streamed back from the Text recognition model",
208
+ lines=26,
209
  value="",
210
+ show_copy_button=True,
211
  )
212
 
213
  #####################################
214
+ with gr.Tab("4. Explore results"):
215
  image_placeholder_explore_results = gr.Image(
216
  label="Cropped transcribed lines",
217
  # type="numpy",
 
222
 
223
  with gr.Row(visible=False, equal_height=False) as control_results_transcribe:
224
  with gr.Column(scale=1, visible=True):
225
+ with gr.Group():
226
+ with gr.Box():
227
+ temp_gallery_input = gr.Variable()
228
+
229
+ gallery_inputs_lines_to_transcribe = gr.Gallery(
230
+ label="Cropped transcribed lines",
231
+ elem_id="gallery_lines",
232
+ columns=[3],
233
+ rows=[3],
234
+ # object_fit="contain",
235
+ height=150,
236
+ preview=True,
237
+ container=False,
238
+ )
239
+ with gr.Row():
240
+ dataframe_text_index = gr.Textbox(
241
+ label="Text from DataFrame selection",
242
+ placeholder="Select row from the DataFrame.",
243
+ interactive=False,
244
+ )
245
+ with gr.Row():
246
+ gt_text_index = gr.Textbox(
247
+ label="Ground Truth",
248
+ placeholder="Provide the ground truth, if available.",
249
+ interactive=True,
250
+ )
251
+ with gr.Row():
252
+ diff_token_output = gr.HighlightedText(
253
+ label="Text diff",
254
+ combine_adjacent=True,
255
+ show_legend=True,
256
+ color_map={"+": "red", "-": "green"},
257
  )
258
 
 
 
 
 
 
 
 
 
 
 
 
259
  with gr.Row(equal_height=False):
 
 
260
  cer_output = gr.Textbox(label="CER:")
261
+ calc_cer_button = gr.Button("Calculate CER", variant="primary", visible=True)
262
 
263
  with gr.Column(scale=1, visible=True):
264
  mapping_dict = gr.Variable()
265
  transcribed_text_df_finish = gr.Dataframe(
266
+ headers=["Transcribed text", "Pred score"],
267
  max_rows=14,
268
  col_count=(2, "fixed"),
269
  wrap=True,
 
273
  )
274
 
275
  # custom track
276
+
277
+ def diff_texts(text1, text2):
278
+ d = Differ()
279
+ return [(token[2:], token[0] if token[0] != " " else None) for token in d.compare(text1, text2)]
280
+
281
+ def compute_cer(dataframe_text_index, gt_text_index):
282
+ if gt_text_index is not None and gt_text_index.strip() != "":
283
+ return cer_metric.compute(predictions=[dataframe_text_index], references=[gt_text_index])
284
+ else:
285
+ return "Ground truth not provided"
286
+
287
+ calc_cer_button.click(compute_cer, inputs=[dataframe_text_index, gt_text_index], outputs=cer_output)
288
+
289
+ calc_cer_button.click(diff_texts, inputs=[dataframe_text_index, gt_text_index], outputs=[diff_token_output])
290
+
291
  region_segment_button.click(
292
  custom_track.region_segment,
293
  inputs=[input_region_image, reg_pred_score_threshold_slider, reg_containments_threshold_slider],
 
334
  ],
335
  )
336
 
 
 
 
 
 
 
 
 
337
  clear_button.click(
338
  lambda: (
339
  (shutil.rmtree("./vis_data") if os.path.exists("./vis_data") else None, None)[1],