patrickvonplaten commited on
Commit
28e6c6f
1 Parent(s): cf3d765
ami_split_segments.pl ADDED
@@ -0,0 +1,228 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env perl
2
+
3
+ # Copyright 2014 University of Edinburgh (Author: Pawel Swietojanski)
4
+
5
+ # The script - based on punctuation times - splits segments longer than #words (input parameter)
6
+ # and produces bit more more normalised form of transcripts, as follows
7
+ # MeetID Channel Spkr stime etime transcripts
8
+
9
+ #use List::MoreUtils 'indexes';
10
+ use strict;
11
+ use warnings;
12
+
13
+ sub split_transcripts;
14
+ sub normalise_transcripts;
15
+
16
+ sub merge_hashes {
17
+ my ($h1, $h2) = @_;
18
+ my %hash1 = %$h1; my %hash2 = %$h2;
19
+ foreach my $key2 ( keys %hash2 ) {
20
+ if( exists $hash1{$key2} ) {
21
+ warn "Key [$key2] is in both hashes!";
22
+ next;
23
+ } else {
24
+ $hash1{$key2} = $hash2{$key2};
25
+ }
26
+ }
27
+ return %hash1;
28
+ }
29
+
30
+ sub print_hash {
31
+ my ($h) = @_;
32
+ my %hash = %$h;
33
+ foreach my $k (sort keys %hash) {
34
+ print "$k : $hash{$k}\n";
35
+ }
36
+ }
37
+
38
+ sub get_name {
39
+ #no warnings;
40
+ my $sname = sprintf("%07d_%07d", $_[0]*100, $_[1]*100) || die 'Input undefined!';
41
+ #use warnings;
42
+ return $sname;
43
+ }
44
+
45
+ sub split_on_comma {
46
+
47
+ my ($text, $comma_times, $btime, $etime, $max_words_per_seg)= @_;
48
+ my %comma_hash = %$comma_times;
49
+
50
+ print "Btime, Etime : $btime, $etime\n";
51
+
52
+ my $stime = ($etime+$btime)/2; #split time
53
+ my $skey = "";
54
+ my $otime = $btime;
55
+ foreach my $k (sort {$comma_hash{$a} cmp $comma_hash{$b} } keys %comma_hash) {
56
+ print "Key : $k : $comma_hash{$k}\n";
57
+ my $ktime = $comma_hash{$k};
58
+ if ($ktime==$btime) { next; }
59
+ if ($ktime==$etime) { last; }
60
+ if (abs($stime-$ktime)/2<abs($stime-$otime)/2) {
61
+ $otime = $ktime;
62
+ $skey = $k;
63
+ }
64
+ }
65
+
66
+ my %transcripts = ();
67
+
68
+ if (!($skey =~ /[\,][0-9]+/)) {
69
+ print "Cannot split into less than $max_words_per_seg words! Leaving : $text\n";
70
+ $transcripts{get_name($btime, $etime)}=$text;
71
+ return %transcripts;
72
+ }
73
+
74
+ print "Splitting $text on $skey at time $otime (stime is $stime)\n";
75
+ my @utts1 = split(/$skey\s+/, $text);
76
+ for (my $i=0; $i<=$#utts1; $i++) {
77
+ my $st = $btime;
78
+ my $et = $comma_hash{$skey};
79
+ if ($i>0) {
80
+ $st=$comma_hash{$skey};
81
+ $et = $etime;
82
+ }
83
+ my (@utts) = split (' ', $utts1[$i]);
84
+ if ($#utts < $max_words_per_seg) {
85
+ my $nm = get_name($st, $et);
86
+ print "SplittedOnComma[$i]: $nm : $utts1[$i]\n";
87
+ $transcripts{$nm} = $utts1[$i];
88
+ } else {
89
+ print 'Continue splitting!';
90
+ my %transcripts2 = split_on_comma($utts1[$i], \%comma_hash, $st, $et, $max_words_per_seg);
91
+ %transcripts = merge_hashes(\%transcripts, \%transcripts2);
92
+ }
93
+ }
94
+ return %transcripts;
95
+ }
96
+
97
+ sub split_transcripts {
98
+ @_ == 4 || die 'split_transcripts: transcript btime etime max_word_per_seg';
99
+
100
+ my ($text, $btime, $etime, $max_words_per_seg) = @_;
101
+ my (@transcript) = @$text;
102
+
103
+ my (@punct_indices) = grep { $transcript[$_] =~ /^[\.,\?\!\:]$/ } 0..$#transcript;
104
+ my (@time_indices) = grep { $transcript[$_] =~ /^[0-9]+\.[0-9]*/ } 0..$#transcript;
105
+ my (@puncts_times) = delete @transcript[@time_indices];
106
+ my (@puncts) = @transcript[@punct_indices];
107
+
108
+ if ($#puncts_times != $#puncts) {
109
+ print 'Ooops, different number of punctuation signs and timestamps! Skipping.';
110
+ return ();
111
+ }
112
+
113
+ #first split on full stops
114
+ my (@full_stop_indices) = grep { $puncts[$_] =~ /[\.\?]/ } 0..$#puncts;
115
+ my (@full_stop_times) = @puncts_times[@full_stop_indices];
116
+
117
+ unshift (@full_stop_times, $btime);
118
+ push (@full_stop_times, $etime);
119
+
120
+ my %comma_puncts = ();
121
+ for (my $i=0, my $j=0;$i<=$#punct_indices; $i++) {
122
+ my $lbl = "$transcript[$punct_indices[$i]]$j";
123
+ if ($lbl =~ /[\.\?].+/) { next; }
124
+ $transcript[$punct_indices[$i]] = $lbl;
125
+ $comma_puncts{$lbl} = $puncts_times[$i];
126
+ $j++;
127
+ }
128
+
129
+ #print_hash(\%comma_puncts);
130
+
131
+ print "InpTrans : @transcript\n";
132
+ print "Full stops: @full_stop_times\n";
133
+
134
+ # my @utts1 = split (/[\.\?]/, uc join(' ', @transcript));
135
+ my $hey = join(' ', @transcript);
136
+ $hey=~s/ \././g;
137
+ $hey=~s/ \?/?/g;
138
+ # my $hey = join('.', split(/ \./, $hey));
139
+ # my $hey = join('?', split(/ \?/, $hey));
140
+ my @utts1 = split (/(?<=[\.\?])\s*/, $hey);
141
+ # my @utts1 = split (/(?<=[\.\?])\s*/, join(' ', @transcript));
142
+ my %transcripts = ();
143
+ for (my $i=0; $i<=$#utts1; $i++) {
144
+ my (@utts) = split (' ', $utts1[$i]);
145
+ my $hey = $max_words_per_seg;
146
+ if ($#utts < $max_words_per_seg) {
147
+ print "ReadyTrans: $utts1[$i]\n";
148
+ $transcripts{get_name($full_stop_times[$i], $full_stop_times[$i+1])} = $utts1[$i];
149
+ } else {
150
+ print "TransToSplit: $utts1[$i]\n";
151
+ my %transcripts2 = split_on_comma($utts1[$i], \%comma_puncts, $full_stop_times[$i], $full_stop_times[$i+1], $max_words_per_seg);
152
+ print "Hash TR2:\n"; print_hash(\%transcripts2);
153
+ print "Hash TR:\n"; print_hash(\%transcripts);
154
+ %transcripts = merge_hashes(\%transcripts, \%transcripts2);
155
+ print "Hash TR_NEW : \n"; print_hash(\%transcripts);
156
+ }
157
+ }
158
+ return %transcripts;
159
+ }
160
+
161
+ sub normalise_transcripts {
162
+ my $text = $_[0];
163
+
164
+ #DO SOME ROUGH AND OBVIOUS PRELIMINARY NORMALISATION, AS FOLLOWS
165
+ #remove the remaining punctation labels e.g. some text ,0 some text ,1
166
+ # $text =~ s/[\.\,\?\!\:][0-9]+//g;
167
+ #there are some extra spurious puncations without spaces, e.g. UM,I, replace with space
168
+ $text =~ s/[A-Z']+,[A-Z']+/ /g;
169
+ #split words combination, ie. ANTI-TRUST to ANTI TRUST (None of them appears in cmudict anyway)
170
+ #$text =~ s/(.*)([A-Z])\s+(\-)(.*)/$1$2$3$4/g;
171
+ # $text =~ s/\-/ /g;
172
+ #substitute X_M_L with X. M. L. etc.
173
+ $text =~ s/\_/. /g;
174
+ #normalise and trim spaces
175
+ $text =~ s/^\s*//g;
176
+ $text =~ s/\s*$//g;
177
+ $text =~ s/\s+/ /g;
178
+ #some transcripts are empty with -, nullify (and ignore) them
179
+ $text =~ s/^\-$//g;
180
+ $text =~ s/^\.$//g;
181
+ $text =~ s/^\?$//g;
182
+ $text =~ s/\s+\-$//;
183
+ # apply few exception for dashed phrases, Mm-Hmm, Uh-Huh, etc. those are frequent in AMI
184
+ # and will be added to dictionary
185
+ # $text =~ s/MM HMM/MM\-HMM/g;
186
+ # $text =~ s/UH HUH/UH\-HUH/g;
187
+
188
+ return $text;
189
+ }
190
+
191
+ if (@ARGV != 2) {
192
+ print STDERR "Usage: ami_split_segments.pl <meet-file> <out-file>\n";
193
+ exit(1);
194
+ }
195
+
196
+ my $meet_file = shift @ARGV;
197
+ my $out_file = shift @ARGV;
198
+ my %transcripts = ();
199
+
200
+ open(W, ">$out_file") || die "opening output file $out_file";
201
+ open(S, "<$meet_file") || die "opening meeting file $meet_file";
202
+
203
+ while(<S>) {
204
+
205
+ my @A = split(" ", $_);
206
+ if (@A < 9) { print "Skipping line @A"; next; }
207
+
208
+ my ($meet_id, $channel, $spk, $channel2, $trans_btime, $trans_etime, $aut_btime, $aut_etime) = @A[0..7];
209
+ my @transcript = @A[8..$#A];
210
+ my %transcript = split_transcripts(\@transcript, $aut_btime, $aut_etime, 30);
211
+
212
+ for my $key (keys %transcript) {
213
+ my $value = $transcript{$key};
214
+ my $segment = normalise_transcripts($value);
215
+ my @times = split(/\_/, $key);
216
+ if ($times[0] >= $times[1]) {
217
+ print "Warning, $meet_id, $spk, $times[0] > $times[1]. Skipping. \n"; next;
218
+ }
219
+ if (length($segment)>0) {
220
+ print W join " ", $meet_id, "H0${channel2}", $spk, $times[0]/100.0, $times[1]/100.0, $segment, "\n";
221
+ }
222
+ }
223
+
224
+ }
225
+ close(S);
226
+ close(W);
227
+
228
+ print STDERR "Finished."
dev.txt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:066872ae3d39964d0c227628f0247276c04b5ee8264ca7ec651c11e049cba02b
3
+ size 941005
dev_final.txt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4556a7db799e50ff089dbdc38e6f5664dfab9f1cbfa1b600e827fe0277a32c6b
3
+ size 1009169
eval.txt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:bd89c18759438910303ad8ddb9780518485229c804b229074af9e63f108ee45b
3
+ size 906632
eval_final.txt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:53dce3a4eb3c74ca3073cbf19f4543efffd621b575b3d7c7e841d7f4caf5083a
3
+ size 972246
finalize.py ADDED
@@ -0,0 +1,19 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python3
2
+ import sys
3
+
4
+ transcript = sys.argv[1]
5
+ new_transcript = sys.argv[2]
6
+
7
+
8
+ def right_fill(string):
9
+ return str(int(100 * float(string))).rjust(7, "0")
10
+
11
+
12
+ ref_look_up_dict_end = {}
13
+ with open(transcript, "r") as f, open(new_transcript, "w") as g:
14
+ ref_lines = f.readlines()
15
+ for line in ref_lines:
16
+ items = line.split(" ")
17
+ line_1 = "AMI_" + "_".join(items[:3]) + "_" + "_".join([right_fill(i) for i in items[3:5]])
18
+ line_2 = " ".join(items[5:])
19
+ g.write(line_1 + " " + line_2)
parse_text.py ADDED
@@ -0,0 +1,37 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python3
2
+ import sys
3
+ import re
4
+
5
+ transcript = sys.argv[1]
6
+ new_transcript = sys.argv[2]
7
+
8
+ ref_look_up_dict_end = {}
9
+ with open(transcript, "r") as f:
10
+ ref_lines = f.readlines()
11
+ for line in ref_lines:
12
+ items = line.split(" ")
13
+ key = "_".join(items[:3] + items[4:5])
14
+ if key in ref_look_up_dict_end:
15
+ print("WARNING: Already there", "_".join(items))
16
+
17
+ ref_look_up_dict_end[key] = " ".join(items[5:])
18
+
19
+ for line in ref_lines:
20
+ items = line.split(" ")
21
+ key = "_".join(items[:3] + items[4:5])
22
+ start_time = items[3]
23
+ if " ," in line:
24
+ index = int(line.split(" ,")[1][0])
25
+ if index > 0:
26
+ new_key = "_".join(key.split("_")[:3] + [start_time])
27
+ if new_key in ref_look_up_dict_end and ref_look_up_dict_end[new_key].strip()[-1] not in [".", ",", "?"]:
28
+ ref_look_up_dict_end[new_key] = ref_look_up_dict_end[new_key].strip() + ",\n" # noqa: E501
29
+
30
+ with open(new_transcript, "w") as f:
31
+ for line in ref_lines:
32
+ items = line.split(" ")
33
+ key = "_".join(items[:3] + items[4:5])
34
+ if key in ref_look_up_dict_end:
35
+ line = ref_look_up_dict_end.pop(key)
36
+ line = re.sub(r" \,[0-9]+", ",", line)
37
+ f.write(" ".join(items[:5]) + " " + line)
train.txt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f47867d37e775d90fb4abbdc192027ab4cbfc4c70d15a59dac150c181242e2c3
3
+ size 7925209
train_final.txt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:44d37459379a6b53f0bfe4678859ecff515148b05c14fd4268f00c13b520d2a4
3
+ size 8481002
transcripts1.txt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:44741c36f888664f2aa0f0bf223c230cc8888d42d144301fd276c79cd71b44e8
3
+ size 11203038
transcripts2.txt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7808881a0f66de5e3b4208346f5668b4a7fea2345cf85738943fc06637c50c8c
3
+ size 9609297
transcripts2_new.txt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:880cda9cba59b6d80c674b6c00d5e454608bd03634d64ad81005413e0f28fad4
3
+ size 9877941
transcripts2_new_processed.txt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:759deee0fd9aa058e2424e78d7a3c2f8432640839333f8136c198a1d4e3de366
3
+ size 9772846