Datasets:

License:
VALID / README.md
huu-ontocord's picture
Upload dataset
46d4d65 verified
|
raw
history blame
15.6 kB
metadata
license: cc-by-sa-4.0
dataset_info:
  features:
    - name: video_id
      dtype: string
    - name: chunk_idx
      dtype: int64
    - name: chunk_text
      dtype: string
    - name: video_metadata
      dtype: string
    - name: video_language
      dtype: string
    - name: chunk_media
      dtype: string
  splits:
    - name: shard_10339
      num_bytes: 1997009
      num_examples: 631
    - name: shard_10400
      num_bytes: 2638827
      num_examples: 722
    - name: shard_10324
      num_bytes: 1700655
      num_examples: 515
    - name: shard_10418
      num_bytes: 3034319
      num_examples: 947
    - name: shard_1045
      num_bytes: 2042334
      num_examples: 648
    - name: shard_10428
      num_bytes: 2314345
      num_examples: 706
    - name: shard_10435
      num_bytes: 2300183
      num_examples: 677
    - name: shard_10424
      num_bytes: 1839226
      num_examples: 552
    - name: shard_10442
      num_bytes: 1543285
      num_examples: 419
    - name: shard_10411
      num_bytes: 2005599
      num_examples: 604
    - name: shard_10344
      num_bytes: 1796239
      num_examples: 589
    - name: shard_10439
      num_bytes: 1780546
      num_examples: 567
    - name: shard_10351
      num_bytes: 2156111
      num_examples: 677
    - name: shard_10446
      num_bytes: 2117151
      num_examples: 525
    - name: shard_10457
      num_bytes: 1851306
      num_examples: 555
    - name: shard_10464
      num_bytes: 1316832
      num_examples: 440
    - name: shard_10405
      num_bytes: 1820556
      num_examples: 613
    - name: shard_10396
      num_bytes: 3458836
      num_examples: 956
    - name: shard_10471
      num_bytes: 2397197
      num_examples: 682
    - name: shard_10289
      num_bytes: 3470407
      num_examples: 963
    - name: shard_10298
      num_bytes: 2823620
      num_examples: 791
    - name: shard_10311
      num_bytes: 4072154
      num_examples: 1148
    - name: shard_10456
      num_bytes: 1279577
      num_examples: 430
    - name: shard_1035
      num_bytes: 2102014
      num_examples: 687
    - name: shard_10430
      num_bytes: 2293697
      num_examples: 686
    - name: shard_10469
      num_bytes: 2521584
      num_examples: 743
    - name: shard_10360
      num_bytes: 2329044
      num_examples: 680
    - name: shard_10443
      num_bytes: 2222280
      num_examples: 641
    - name: shard_10453
      num_bytes: 3277011
      num_examples: 931
    - name: shard_10462
      num_bytes: 3202984
      num_examples: 912
    - name: shard_10258
      num_bytes: 2899614
      num_examples: 881
    - name: shard_10206
      num_bytes: 3714862
      num_examples: 891
    - name: shard_10481
      num_bytes: 2163505
      num_examples: 709
    - name: shard_10482
      num_bytes: 1885620
      num_examples: 503
    - name: shard_10365
      num_bytes: 1789825
      num_examples: 453
    - name: shard_10475
      num_bytes: 2290432
      num_examples: 635
    - name: shard_10315
      num_bytes: 2911312
      num_examples: 743
    - name: shard_10486
      num_bytes: 1946726
      num_examples: 564
    - name: shard_10444
      num_bytes: 1915386
      num_examples: 550
    - name: shard_10493
      num_bytes: 2240928
      num_examples: 752
    - name: shard_10433
      num_bytes: 1728758
      num_examples: 554
  download_size: 48893568
  dataset_size: 95191896
configs:
  - config_name: default
    data_files:
      - split: train
        path: data/*.parquet
      - split: shard_10339
        path: data/shard_10339-*
      - split: shard_10400
        path: data/shard_10400-*
      - split: shard_10424
        path: data/shard_10424-*
      - split: shard_10324
        path: data/shard_10324-*
      - split: shard_10428
        path: data/shard_10428-*
      - split: shard_10258
        path: data/shard_10258-*
      - split: shard_10396
        path: data/shard_10396-*
      - split: shard_10411
        path: data/shard_10411-*
      - split: shard_10418
        path: data/shard_10418-*
      - split: shard_10206
        path: data/shard_10206-*
      - split: shard_10442
        path: data/shard_10442-*
      - split: shard_1045
        path: data/shard_1045-*
      - split: shard_10289
        path: data/shard_10289-*
      - split: shard_10298
        path: data/shard_10298-*
      - split: shard_10344
        path: data/shard_10344-*
      - split: shard_10435
        path: data/shard_10435-*
      - split: shard_10311
        path: data/shard_10311-*
      - split: shard_10405
        path: data/shard_10405-*
      - split: shard_10464
        path: data/shard_10464-*
      - split: shard_10457
        path: data/shard_10457-*
      - split: shard_10439
        path: data/shard_10439-*
      - split: shard_10351
        path: data/shard_10351-*
      - split: shard_10446
        path: data/shard_10446-*
      - split: shard_10315
        path: data/shard_10315-*
      - split: shard_10471
        path: data/shard_10471-*
      - split: shard_1035
        path: data/shard_1035-*
      - split: shard_10456
        path: data/shard_10456-*
      - split: shard_10486
        path: data/shard_10486-*
      - split: shard_10430
        path: data/shard_10430-*
      - split: shard_10469
        path: data/shard_10469-*
      - split: shard_10360
        path: data/shard_10360-*
      - split: shard_10443
        path: data/shard_10443-*
      - split: shard_10453
        path: data/shard_10453-*
      - split: shard_10462
        path: data/shard_10462-*
      - split: shard_10481
        path: data/shard_10481-*
      - split: shard_10482
        path: data/shard_10482-*
      - split: shard_10365
        path: data/shard_10365-*
      - split: shard_10475
        path: data/shard_10475-*
      - split: shard_10444
        path: data/shard_10444-*
      - split: shard_10493
        path: data/shard_10493-*
      - split: shard_10433
        path: data/shard_10433-*

VALID Dataset

VALID (Video-Audio Large Interleaved Dataset)

Overview

The VALID (Video-Audio Large Interleaved Dataset) is a multimodal dataset comprising approximately 720,000 Creative Commons licensed videos crawled from YouTube, and processed into audio-video-text data records for machine learning research. We are in the process of uploading so please be patient. The dataset provides a unique opportunity for training models to understand relationships between modalities such as video frames, audio clips, and multilingual textual data, making it suitable for applications like multimodal representation learning.

Features

  • Audio-Video-Text Format: A combination of:
<video>
    <caption><image> the caption </caption>
    <caption><image> the caption </caption>
    <caption><image> the caption </caption>
</video>
<transcript> <audio> multi-lingual transcript </transcript>
English text
  • The non-text multimodal portion begins the data item and can include multiple media. Some snippets may have more than one audio, and more than one video. Others may have only images/videos or only audio paired with English text. Each video contains multiple frames stored as images, and text captions for each image. There can also be standalone images interleaved as well. Even though each audio video snippets are no more than 10 seconds, a data record may span over more than 10 secs (e.g., if a data item has two 10 second videos, then the corresponding English text corresponds roughly to 20 seconds of video). The intention for this format is to teach a model to associate multiple modalities with each other, and understand multiple audio-video elements in an interleaved fashion.

  • Data Components:

    • Images: PNG format, phashed to ensure variability, with 0–10 images per audio snippet. Each image includes a caption created with Florence-2.
    • Audio: OGG format, multilingual, ~10 seconds per snippet, with shorter sound or music snippets (1–3 seconds) to minimize copyright issues. Each audio snippet is transcribed either with Whisper for non-English, or with the original Youtube ASR for English.
    • Text: Not including the captions and transcripts, the “text” portion is a concatenation of Youtube’s original English transcripts associated with the above media of around 1–40 words per data record.
  • Dataset Size:

    • About 7,000,000 records.
    • About 15,000,000 images, each captioned with FLorence-2.
    • About 30,000,000 audio snippets, about half of which transcribed with Whisper-large, and half with Youtube ASR.
    • Divided into about 12K shards of about 600 records, each in a parquet file and a corresponding .tar.gz file for the media.
    • About 14TB in total.

File Organization

  • Each data entry follows the <video><image(s)><audio><text> structure as described above.
  • Metadata includes timestamps and alignment between modalities.

Multimodal Details

  • Audio-Video Alignment: Snippets allow learning temporal relationships between audio and visual elements.
  • Text Annotations: Text descriptions, including captions and contextual keywords, provide linguistic alignment.

Preprocessing

  • Phashing for Images: Ensures that images within the dataset are dynamic and non-static.
  • Audio Snippet Lengths: Music and sound effects are clipped to 1–3 seconds to minimize copyright concerns.

Licenses

All videos in VALID are CC BY, as declared by their original uploaders on YouTube. We publish the snippets of these videos here under these rights and under the principles of fair use. However, we cannot guarantee that original uploaders had the rights to share the content. This dataset has only been lightly filtered for safety by removing data records with high proportions of children related words AND high proportions of sexual or violence related words. Moreover, we disclaim all warranties, whether express or implied and all laibilities with respect to infringment, fitness for a particular puprpose, or otherwise.

Intended Uses

  • Primary Use Case: Training models for multimodal understanding, such as contrastive multimodal learning (e.g., CLIP, CLAP).
  • Not Recommended For: Generation tasks, as the dataset's quality may not meet generative model requirements.

Dataset Limitations

  • Quality: Images and audio are sourced from YouTube and may vary in resolution and clarity.
  • Rights Uncertainty: While videos are marked as CC-BY by the third party authors of the videos, original rights may not be verifiable.
  • Biases: The dataset's multilingual audio paired with English-only text may introduce linguistic biases. The large variety of videos may introduce bias.

Ethical Considerations

The dataset was built under the principles of fair use and CC-BY licensing. Its creation strives to align with the spirit of the EU AI Act, emphasizing transparency and safety in AI model development. Users must exercise caution and adhere to copyright and licensing rules when using VALID.


Policy for Managing Video Deletion Requests

Our goal is to establish a clear process for removing videos from our dataset when requested by users or required by external factors, while balancing the rights of content owners, compliance with CC-BY licenses, and the community's ability to utilize the dataset for training and research purposes.

  • 1. Respecting Content Owners' Rights: All videos in the dataset are under the CC-BY license. As such, proper attribution will always be maintained as required by the license. If a content owner requests the removal of a video from the dataset, we will balance this request with the community's ability to train on the data, considering the original intent of the CC-BY license.

  • 2. Deletion Request Process:

    • Content owners or users can request the removal of a video by FIRST requesting it be removed from Youtube: Here and Here.
    • Then verifying that it has been removed from YouTube and providing this feedback to us Here.
    • Requests must demonstrate that the video is no longer publicly available on YouTube.
    • We will remove the confirmed videos in the next release of this dataset.
  • 3. Verification and Balancing Interests: All deletion requests will be verified by checking YouTube to ensure the video is no longer available. We may also remove a video in our sole discretion. Decisions on video removal will take into account: The rights and wishes of content owners, including their ability to remove their videos from public availability. The community's need for robust datasets for training and research. The spirit of the CC-BY license, which permits redistribution and use with proper attribution.

  • 4. Responsibilities for Derivative Datasets: Users creating derivative datasets must ensure compliance by deleting videos listed in delete_these_videos.json.

  • 5. Proactive Deletion: Videos may be removed proactively under the following circumstances:

  • Requests from the hosting provider (e.g., Hugging Face).

  • Legal requirements or enforcement actions.

  • Internal decisions.

  • 6. Community Considerations:

  • The community is encouraged to respect the balance between individual content owners’ wishes and the public benefit derived from open access datasets.

  • Efforts will be made to keep the dataset robust while honoring legitimate requests for content removal.

  • 7. Updates: Users are encouraged to check the delete_these_videos.json, from time to time to ensure their copy of the dataset is up to date.


Related Materials:

Acknowledgement and Thanks

This dataset was built by Ontocord.AI in cooperation with Grass and LAION.AI. It was created as part of the EUHPC grant EUHPC_E03_068 for the Leonardo supercomputers resources in order to build safe multimodal models that comply with the EU AI Act. This dataset was built on a subset of the Grass Video Repository, a massive video dataset of creative commons videos. We deeply thank EuroHPC and Cineca, as well as Huggingface and the open source community for their support.

About the Contributors:

  • Grass is committed to making the public web accessible again. Through its network of millions of globally distributed nodes, it is capable of collecting petabyte-scale datasets for a variety of use cases, including training AI models. The network is run exclusively by users who have downloaded an application to their devices, allowing them to contribute their unused internet bandwidth to the network. On X: @getgrass_io
  • LAION, is a non-profit organization, that provides datasets, tools and models to liberate machine learning research. By doing so, we encourage open public education and a more environment-friendly use of resources by reusing existing datasets and models.
  • Ontocord is a technology company focused on legally compliant AI. Our mission is to make our AGI future lawful and accessible to everyone.
  • Alignment Lab AI: Our mission is to build a future leveraging AI as a force for good and as a tool that enhances human lives. We believe everyone deserves to harness the power of personal intelligence.
  • And many others ...

Citation

@misc{Huu2024VALID,
title = {VALID (Video-Audio Large Interleaved Dataset)},
author = {Huu Nguyen, Ken Tsui, Andrej Radonjic, Christoph Schuhmann},
year = {2024}
url = {https://huggingface.co/datasets/ontocord/VALID},
}