--- license: apache-2.0 dataset_info: - config_name: mmc_en features: - name: doc_name dtype: string - name: sentences sequence: sequence: sequence: string - name: coref_chains sequence: sequence: sequence: int64 splits: - name: train num_bytes: 28164357 num_examples: 955 - name: dev num_bytes: 4043571 num_examples: 134 - name: test num_bytes: 3103262 num_examples: 133 download_size: 3609139 dataset_size: 35311190 - config_name: mmc_fa features: - name: doc_name dtype: string - name: sentences sequence: sequence: sequence: string - name: coref_chains sequence: sequence: sequence: int64 splits: - name: train num_bytes: 22553374 num_examples: 950 - name: dev num_bytes: 3579538 num_examples: 134 - name: test num_bytes: 2480699 num_examples: 133 download_size: 2969009 dataset_size: 28613611 - config_name: mmc_fa_corrected features: - name: doc_name dtype: string - name: sentences sequence: sequence: sequence: string - name: coref_chains sequence: sequence: sequence: int64 splits: - name: train num_bytes: 22553374 num_examples: 950 - name: dev num_bytes: 3579538 num_examples: 134 - name: test num_bytes: 2512884 num_examples: 133 download_size: 2975807 dataset_size: 28645796 - config_name: mmc_zh_corrected features: - name: doc_name dtype: string - name: sentences sequence: sequence: sequence: string - name: coref_chains sequence: sequence: sequence: int64 splits: - name: train num_bytes: 29749762 num_examples: 948 - name: dev num_bytes: 4442503 num_examples: 134 - name: test num_bytes: 2240351 num_examples: 133 download_size: 3416567 dataset_size: 36432616 - config_name: mmc_zh_uncorrected features: - name: doc_name dtype: string - name: sentences sequence: sequence: sequence: string - name: coref_chains sequence: sequence: sequence: int64 splits: - name: train num_bytes: 29749762 num_examples: 948 - name: dev num_bytes: 4442503 num_examples: 134 - name: test num_bytes: 3373346 num_examples: 133 download_size: 3457199 dataset_size: 37565611 configs: - config_name: mmc_en data_files: - split: train path: mmc_en/train-* - split: dev path: mmc_en/dev-* - split: test path: mmc_en/test-* - config_name: mmc_fa data_files: - split: train path: mmc_fa/train-* - split: dev path: mmc_fa/dev-* - split: test path: mmc_fa/test-* - config_name: mmc_fa_corrected data_files: - split: train path: mmc_fa_corrected/train-* - split: dev path: mmc_fa_corrected/dev-* - split: test path: mmc_fa_corrected/test-* - config_name: mmc_zh_corrected data_files: - split: train path: mmc_zh_corrected/train-* - split: dev path: mmc_zh_corrected/dev-* - split: test path: mmc_zh_corrected/test-* - config_name: mmc_zh_uncorrected data_files: - split: train path: mmc_zh_uncorrected/train-* - split: dev path: mmc_zh_uncorrected/dev-* - split: test path: mmc_zh_uncorrected/test-* --- # MMC (Multilingual Multiparty Coreference) - Project: https://github.com/boyuanzheng010/mmc - Data source: https://github.com/boyuanzheng010/mmc/commit/a7007d1d4556a3f4347a3d7b686f71d66bd1e2d9 ## Details Data for the paper "Multilingual Coreference Resolution in Multiparty Dialogue" TACL 2023 ## Citation ``` @article{zheng-etal-2023-multilingual, title = "Multilingual Coreference Resolution in Multiparty Dialogue", author = "Zheng, Boyuan and Xia, Patrick and Yarmohammadi, Mahsa and Van Durme, Benjamin", journal = "Transactions of the Association for Computational Linguistics", volume = "11", year = "2023", address = "Cambridge, MA", publisher = "MIT Press", url = "https://aclanthology.org/2023.tacl-1.52", doi = "10.1162/tacl_a_00581", pages = "922--940", abstract = "Existing multiparty dialogue datasets for entity coreference resolution are nascent, and many challenges are still unaddressed. We create a large-scale dataset, Multilingual Multiparty Coref (MMC), for this task based on TV transcripts. Due to the availability of gold-quality subtitles in multiple languages, we propose reusing the annotations to create silver coreference resolution data in other languages (Chinese and Farsi) via annotation projection. On the gold (English) data, off-the-shelf models perform relatively poorly on MMC, suggesting that MMC has broader coverage of multiparty coreference than prior datasets. On the silver data, we find success both using it for data augmentation and training from scratch, which effectively simulates the zero-shot cross-lingual setting.", } ```