--- dataset_info: - config_name: default features: - name: version dtype: string - name: hypothesis dtype: string - name: hypothesis_formula dtype: string - name: context dtype: string - name: context_formula dtype: string - name: proofs sequence: string - name: proofs_formula sequence: string - name: negative_hypothesis dtype: string - name: negative_hypothesis_formula dtype: string - name: negative_proofs sequence: string - name: negative_original_tree_depth dtype: int64 - name: original_tree_depth dtype: int64 - name: depth dtype: int64 - name: num_formula_distractors dtype: int64 - name: num_translation_distractors dtype: int64 - name: num_all_distractors dtype: int64 - name: proof_label dtype: string - name: negative_proof_label dtype: string - name: world_assump_label dtype: string - name: negative_world_assump_label dtype: string - name: prompt_serial dtype: string - name: proof_serial dtype: string splits: - name: train num_bytes: 103394163 num_examples: 30000 - name: validation num_bytes: 17205990 num_examples: 5000 - name: test num_bytes: 17215356 num_examples: 5000 download_size: 51122839 dataset_size: 137815509 - config_name: star features: - name: version dtype: string - name: hypothesis dtype: string - name: hypothesis_formula dtype: string - name: context dtype: string - name: context_formula dtype: string - name: proofs sequence: string - name: proofs_formula sequence: string - name: negative_hypothesis dtype: string - name: negative_hypothesis_formula dtype: string - name: negative_proofs sequence: string - name: negative_original_tree_depth dtype: int64 - name: original_tree_depth dtype: int64 - name: depth dtype: int64 - name: num_formula_distractors dtype: int64 - name: num_translation_distractors dtype: int64 - name: num_all_distractors dtype: int64 - name: proof_label dtype: string - name: negative_proof_label dtype: string - name: world_assump_label dtype: string - name: negative_world_assump_label dtype: string - name: prompt_serial dtype: string - name: proof_serial dtype: string splits: - name: train num_bytes: 129618848 num_examples: 30000 - name: validation num_bytes: 21529187 num_examples: 5000 - name: test num_bytes: 21731836 num_examples: 5000 download_size: 63147762 dataset_size: 172879871 configs: - config_name: default data_files: - split: train path: data/train-* - split: validation path: data/validation-* - split: test path: data/test-* - config_name: star data_files: - split: train path: star/train-* - split: validation path: star/validation-* - split: test path: star/test-* --- # Dataset Card for "FLD.v2" For the schema of the dataset, see [here](https://github.com/hitachi-nlp/FLD-corpus.git). For the whole of the project, see [our project page](https://github.com/hitachi-nlp/FLD/). [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)