de-francophones commited on
Commit
467199b
·
verified ·
1 Parent(s): 4a051a9

Upload 14 files

Browse files
.gitattributes CHANGED
@@ -57,3 +57,5 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
57
  *.mp4 filter=lfs diff=lfs merge=lfs -text
58
  *.webm filter=lfs diff=lfs merge=lfs -text
59
  data filter=lfs diff=lfs merge=lfs -text
 
 
 
57
  *.mp4 filter=lfs diff=lfs merge=lfs -text
58
  *.webm filter=lfs diff=lfs merge=lfs -text
59
  data filter=lfs diff=lfs merge=lfs -text
60
+ wikicaps_v1.0/images_retrieval.lst filter=lfs diff=lfs merge=lfs -text
61
+ wikicaps_v1.0/img_en filter=lfs diff=lfs merge=lfs -text
wikicaps_v1.0/README ADDED
@@ -0,0 +1,63 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+ Captions
3
+ ========
4
+
5
+
6
+ The dev and test caption files follow the naming scheme:
7
+
8
+ "img_<LANGUAGE>-en.tok.lc.filtered.(dev|test)"
9
+
10
+ <LANGUAGE> is a two-letter code (de = German, fr = French,
11
+ ru = Russian).
12
+
13
+
14
+ The format of the parallel image-caption pairs is:
15
+
16
+ "imagename<tab>foreign_caption ||| english_caption"
17
+
18
+
19
+
20
+ Dev and Test Images
21
+ ===================
22
+
23
+ The images have to be downloaded manually. We provide a simple
24
+ bash-script for downloading, which makes use of the following
25
+ perl-script:
26
+
27
+ https://commons.wikimedia.org/wiki/User:AzaToth/wikimgrab.pl
28
+
29
+ Simply enter the "images" directory and call the script
30
+ "download_dev_test.sh" from there, e.g.:
31
+
32
+ $ cd images
33
+ $ source ./download_dev_test.sh
34
+
35
+ Downloading the Full Corpus
36
+ ===========================
37
+
38
+ We also provide a script for downloading the full corpus, i. e.
39
+ (monolingual and multilingual) image annotations and their
40
+ associated images directly from Wikimedia commons. Note that
41
+ this may take a very long time (depending on your local bandwidth)
42
+ and that we recommend distributing the task onto serveral machines.
43
+ The script can be found in the "full_corpus" directory.
44
+
45
+ We have compiled an index of 3.816.940 images on Wikimedia Commons.
46
+ To download, for example, the first 20.000 images from the full
47
+ corpus (and their annotatins) do the following:
48
+
49
+ $ cd full_corpus
50
+ $ ./download_full_corpus.sh 0 20000
51
+
52
+ The images will be placed in sequentially numbered directories,
53
+ one directory containg a maximum of 10.000 images.
54
+
55
+ The annotations will be written to files named XX.captions, where XX
56
+ is a language identifier (en.cpations contains English captions, etc.)
57
+ The files are tab-seperated in the following format:
58
+
59
+ Corpus index <tab> Filename <tab> Annotated caption (including HTML "div"
60
+ with metadata)
61
+
62
+ Please note that some images might have been deleted since the
63
+ index was created. They will be skipped.
wikicaps_v1.0/full_corpus/download_full_corpus.sh ADDED
@@ -0,0 +1,83 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+
3
+ #useage: ./download_full_corpus.sh START_INDEX END_INDEX
4
+ #example: ./download_full_corpus.sh 0 10000
5
+
6
+ shopt -s extglob
7
+
8
+ numre='^[0-9]+$'
9
+
10
+
11
+ START=$(($1+0))
12
+ END=$(($2+0))
13
+
14
+ if ! [[ $1 =~ $numre ]] ; then
15
+ echo "Start index must be a numer (integer)!"
16
+ echo "Useage: ./download_full_corpus.sh START_INDEX END_INDEX"
17
+ exit 1
18
+ fi
19
+
20
+ if ! [[ $2 =~ $numre ]] ; then
21
+ echo "End/stopping index must be a numer (integer)!"
22
+ echo "Useage: ./download_full_corpus.sh START_INDEX END_INDEX"
23
+ exit 2
24
+ fi
25
+
26
+ if (($START > $END)) ; then
27
+ echo "End/stopping index must larger than starting index!"
28
+ echo "Useage: ./download_full_corpus.sh START_INDEX END_INDEX"
29
+ exit 3
30
+ fi
31
+
32
+ i=0
33
+ dirnumber=0
34
+
35
+ while read p; do
36
+
37
+ if (( i % 10000 == 0)) ; then
38
+ dirnumber=$((dirnumber+1))
39
+ echo $dirnumber
40
+ fi
41
+
42
+ if (( i < $START )) ; then
43
+ i=$((i+1))
44
+ continue
45
+ fi
46
+
47
+ if (( i > $END )) ; then
48
+ i=$((i+1))
49
+ break
50
+ fi
51
+
52
+ if ! [[ -d $dirnumber ]] ; then
53
+ mkdir $dirnumber
54
+ fi
55
+
56
+ #Create download cache:
57
+ mkdir tmp
58
+ cd tmp
59
+
60
+ #Download Captions:
61
+ echo "" > tmp.txt
62
+ wget -O tmp.txt --timeout=10 --tries=2 https://commons.wikimedia.org/wiki/"$p"
63
+ for lang in en fr de ru jp es it nl;
64
+ do
65
+ s=`grep "description mw-content-ltr $lang" tmp.txt`
66
+ out_line=$i$'\t'"$p"$'\t'"$s"
67
+ if ! [[ "$s" == "" ]]; then
68
+ echo $out_line >> ../$lang.captions
69
+ fi
70
+ done
71
+
72
+ #Download image:
73
+ cp ../wikimgrab.pl .
74
+ perl ./wikimgrab.pl "$p"
75
+ mv !(wikimgrab.pl|download_dev_test.sh|+([0-9])) ../$dirnumber
76
+
77
+ #Cleanup:
78
+ cd ..
79
+ rm -rf tmp
80
+ i=$((i+1))
81
+
82
+ done < ../images_retrieval.lst
83
+
wikicaps_v1.0/full_corpus/wikimgrab.pl ADDED
@@ -0,0 +1,50 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/perl
2
+
3
+ use strict;
4
+ use warnings;
5
+
6
+ use URI::Escape;
7
+ use Digest::MD5 qw(md5_hex);
8
+ use LWP::UserAgent;
9
+
10
+ my $ua = LWP::UserAgent->new;
11
+ $ua->timeout(15);
12
+ $ua->env_proxy;
13
+ $ua->show_progress(1);
14
+
15
+ foreach my $image( @ARGV ) {
16
+ $image = uri_unescape($image);
17
+
18
+ $image =~ s/ /_/g;
19
+
20
+ $image =~ s/^(File|Image)://ig;
21
+
22
+ $image =~ s/^(\w)/uc($1)/e;
23
+
24
+ my $digest = lc(md5_hex( $image ));
25
+ my $a = substr $digest, 0, 1;
26
+ my $b = substr $digest, 0, 2;
27
+ my $path = "http://upload.wikimedia.org/wikipedia/commons/$a/$b/$image";
28
+ if ($ua->mirror( $path, $image )->is_error) { #if failed, look for redirects
29
+ warn("Could not get image directly - looking for alternative name on main image page");
30
+ my $basepage = "http://commons.wikimedia.org/wiki/File:$image";
31
+ my $response = $ua->get($basepage);
32
+ if ($response->content =~ m!<link rel="canonical" href="/wiki/(.+?)"!) {
33
+ $image = uri_unescape($1); #found an alternative "canonical" link
34
+ } else {
35
+ $image = uri_unescape($response->filename); #this is a redirect
36
+ }
37
+ $image =~ s/ /_/g;
38
+
39
+ $image =~ s/^(File|Image)://ig;
40
+
41
+ $image =~ s/^(\w)/uc($1)/e;
42
+
43
+ $digest = lc(md5_hex( $image ));
44
+ $a = substr $digest, 0, 1;
45
+ $b = substr $digest, 0, 2;
46
+ $path = "http://upload.wikimedia.org/wikipedia/commons/$a/$b/$image";
47
+ $ua->mirror( $path, $image );
48
+ }
49
+ }
50
+
wikicaps_v1.0/images/download_dev_test.sh ADDED
@@ -0,0 +1,14 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+ shopt -s extglob
3
+ mkdir images_dev_test
4
+
5
+ while read p; do
6
+ mkdir tmp
7
+ cd tmp
8
+ cp ../wikimgrab.pl .
9
+ perl ./wikimgrab.pl "$p"
10
+ mv !(wikimgrab.pl|download_dev_test.sh|+([0-9])) ../images_dev_test
11
+ cd ..
12
+ rm -rf tmp
13
+ done < ../images_dev_test.lst
14
+
wikicaps_v1.0/images/wikimgrab.pl ADDED
@@ -0,0 +1,50 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/perl
2
+
3
+ use strict;
4
+ use warnings;
5
+
6
+ use URI::Escape;
7
+ use Digest::MD5 qw(md5_hex);
8
+ use LWP::UserAgent;
9
+
10
+ my $ua = LWP::UserAgent->new;
11
+ $ua->timeout(15);
12
+ $ua->env_proxy;
13
+ $ua->show_progress(1);
14
+
15
+ foreach my $image( @ARGV ) {
16
+ $image = uri_unescape($image);
17
+
18
+ $image =~ s/ /_/g;
19
+
20
+ $image =~ s/^(File|Image)://ig;
21
+
22
+ $image =~ s/^(\w)/uc($1)/e;
23
+
24
+ my $digest = lc(md5_hex( $image ));
25
+ my $a = substr $digest, 0, 1;
26
+ my $b = substr $digest, 0, 2;
27
+ my $path = "http://upload.wikimedia.org/wikipedia/commons/$a/$b/$image";
28
+ if ($ua->mirror( $path, $image )->is_error) { #if failed, look for redirects
29
+ warn("Could not get image directly - looking for alternative name on main image page");
30
+ my $basepage = "http://commons.wikimedia.org/wiki/File:$image";
31
+ my $response = $ua->get($basepage);
32
+ if ($response->content =~ m!<link rel="canonical" href="/wiki/(.+?)"!) {
33
+ $image = uri_unescape($1); #found an alternative "canonical" link
34
+ } else {
35
+ $image = uri_unescape($response->filename); #this is a redirect
36
+ }
37
+ $image =~ s/ /_/g;
38
+
39
+ $image =~ s/^(File|Image)://ig;
40
+
41
+ $image =~ s/^(\w)/uc($1)/e;
42
+
43
+ $digest = lc(md5_hex( $image ));
44
+ $a = substr $digest, 0, 1;
45
+ $b = substr $digest, 0, 2;
46
+ $path = "http://upload.wikimedia.org/wikipedia/commons/$a/$b/$image";
47
+ $ua->mirror( $path, $image );
48
+ }
49
+ }
50
+
wikicaps_v1.0/images_dev_test.lst ADDED
The diff for this file is too large to render. See raw diff
 
wikicaps_v1.0/images_retrieval.lst ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7bc10825d7f0b4201c26694bd0d9c8d500e63dd355dd7d2ff025cfb44f1e427a
3
+ size 195447269
wikicaps_v1.0/img_de-en.tok.lc.filtered.dev ADDED
The diff for this file is too large to render. See raw diff
 
wikicaps_v1.0/img_de-en.tok.lc.filtered.test ADDED
The diff for this file is too large to render. See raw diff
 
wikicaps_v1.0/img_en ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9e78f9d0f8dbbefef3b0579e6c8cc40ba747530b1338b9d96447f2de96eaec17
3
+ size 883401425
wikicaps_v1.0/img_fr-en.tok.lc.filtered.dev ADDED
The diff for this file is too large to render. See raw diff
 
wikicaps_v1.0/img_fr-en.tok.lc.filtered.test ADDED
The diff for this file is too large to render. See raw diff
 
wikicaps_v1.0/img_ru-en.tok.lc.filtered.dev ADDED
The diff for this file is too large to render. See raw diff
 
wikicaps_v1.0/img_ru-en.tok.lc.filtered.test ADDED
The diff for this file is too large to render. See raw diff