--- dataset_info: - config_name: default features: - name: version dtype: string - name: hypothesis dtype: string - name: hypothesis_formula dtype: string - name: context dtype: string - name: context_formula dtype: string - name: proofs sequence: string - name: proofs_formula sequence: string - name: negative_hypothesis dtype: string - name: negative_hypothesis_formula dtype: string - name: negative_proofs sequence: string - name: negative_original_tree_depth dtype: int64 - name: original_tree_depth dtype: int64 - name: depth dtype: int64 - name: num_formula_distractors dtype: int64 - name: num_translation_distractors dtype: int64 - name: num_all_distractors dtype: int64 - name: proof_label dtype: string - name: negative_proof_label dtype: string - name: world_assump_label dtype: string - name: negative_world_assump_label dtype: string - name: prompt_serial dtype: string - name: proof_serial dtype: string splits: - name: train num_bytes: 101861795 num_examples: 30000 - name: validation num_bytes: 16956757 num_examples: 5000 - name: test num_bytes: 16952009 num_examples: 5000 download_size: 50451962 dataset_size: 135770561 - config_name: star features: - name: version dtype: string - name: hypothesis dtype: string - name: hypothesis_formula dtype: string - name: negative_hypothesis dtype: string - name: negative_hypothesis_formula dtype: string - name: negative_original_tree_depth dtype: int64 - name: original_tree_depth dtype: int64 - name: depth dtype: int64 - name: num_formula_distractors dtype: int64 - name: num_translation_distractors dtype: int64 - name: num_all_distractors dtype: int64 - name: proof_label dtype: string - name: negative_proof_label dtype: string - name: world_assump_label dtype: string - name: negative_world_assump_label dtype: string - name: facts dtype: string - name: facts_formula dtype: string - name: proofs sequence: string - name: proofs_formula sequence: string - name: negative_proofs sequence: string - name: prompt_serial dtype: string - name: proof_serial dtype: string splits: - name: train num_bytes: 126945152 num_examples: 30000 - name: validation num_bytes: 21067447 num_examples: 5000 - name: test num_bytes: 21287828 num_examples: 5000 download_size: 61766317 dataset_size: 169300427 configs: - config_name: default data_files: - split: train path: data/train-* - split: validation path: data/validation-* - split: test path: data/test-* - config_name: star data_files: - split: train path: star/train-* - split: validation path: star/validation-* - split: test path: star/test-* --- # Dataset Card for "FLD.v2" To train a casual language model, simply use "prompt_serial" column for the model input and "proof_serial" for the output. For more info, see [our project page](https://github.com/hitachi-nlp/FLD/). [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)