File size: 3,593 Bytes
fb852f9
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
import datasets
import json
import numpy
import tarfile
import io

_FEATURES = datasets.Features(
    {
        "id": datasets.Value("string"),
        "prompt": datasets.Array3D(shape=(1, 77, 768), dtype="float32"),
        "video": datasets.Sequence(feature=datasets.Array3D(shape=(4, 64, 64), dtype="float32")),
        "description": datasets.Value("string"),
        "videourl": datasets.Value("string"),
        "categories": datasets.Value("string"),
        "duration": datasets.Value("float"),
        "full_metadata": datasets.Value("string"),
    }
)

class FunkLoaderStream(datasets.GeneratorBasedBuilder):
    """TempoFunk Dataset"""

    def _info(self):
        return datasets.DatasetInfo(
            description="TempoFunk Dataset",
            features=_FEATURES,
            homepage="tempofunk.github.io",
            citation="""
@misc{TempoFunk2023,
author = {Lopho, Carlos Chavez},
title = {TempoFunk: Extending latent diffusion image models to Video},
url = {tempofunk.github.io},
month = {5},
year = {2023}
}
            """,
            license="AGPL v3"
        )

    def _split_generators(self, dl_manager):
        # Load the chunk list.
        print("PATH:", dl_manager.download("lists/chunk_list.json"))
        thing = json.load(open(dl_manager.download("lists/chunk_list.json"), 'rb'))
        _CHUNK_LIST = thing

        # Create a list to hold the downloaded chunks.
        _list = []

        # Download each chunk file.
        for chunk in _CHUNK_LIST:
           _list.append(dl_manager.download(f"data/{chunk}.tar"))

        # Return the list of downloaded chunks.
        return [
            datasets.SplitGenerator(
                name=datasets.Split.TRAIN,
                gen_kwargs={
                    "chunks": _list,
                },
            ),
        ]
    
    def _generate_examples(self, chunks):
        """Generate images and labels for splits."""
        for chunk in chunks:
            tar_data = open(chunk, 'rb')
            tar_bytes = tar_data.read()
            tar_bytes_io = io.BytesIO(tar_bytes)

            response_dict = {}

            with tarfile.open(fileobj=tar_bytes_io, mode='r') as tar:
                for file_info in tar:
                    if file_info.isfile():
                        file_name = file_info.name
                        #filename format is typ_id.ext
                        file_type = file_name.split('_')[0]
                        file_id = file_name.split('_')[1].split('.')[0]
                        file_ext = file_name.split('_')[1].split('.')[1]
                        file_contents = tar.extractfile(file_info).read()

                        if file_id not in response_dict:
                            response_dict[file_id] = {}

                        if file_type == 'txt' or file_type == 'vid':
                            response_dict[file_id][file_type] = numpy.load(io.BytesIO(file_contents))
                        elif file_type == 'jso':
                            response_dict[file_id][file_type] = json.loads(file_contents)
            
            for key, value in response_dict.items():
                yield key, {
                    "id": key,
                    "description": value['jso']['description'],
                    "prompt": value['txt'],
                    "video": value['vid'],
                    "videourl": value['jso']['videourl'],
                    "categories": value['jso']['categories'],
                    "duration": value['jso']['duration'],
                    "full_metadata": value['jso']
                }