Datasets:
License:
import os | |
import shutil | |
import random | |
import hashlib | |
import datasets | |
from midi2abc import midi2abc | |
_HOMEPAGE = f"https://huggingface.co/datasets/MuGeminorum/{os.path.basename(__file__).split('.')[0]}" | |
_CITATION = """\ | |
@dataset{mihoyo_pianos, | |
author = {MuGeminorum Studio}, | |
title = {mihoyo game piano songs}, | |
month = {nov}, | |
year = {2023}, | |
publisher = {HF}, | |
version = {1.1}, | |
url = {https://huggingface.co/datasets/MuGeminorum/hoyo_pianos} | |
} | |
""" | |
_DESCRIPTION = """\ | |
This database contains mihoyo game piano songs downloaded from musescore | |
""" | |
_URLS = { | |
"genshin": f"{_HOMEPAGE}/resolve/main/data/genshin.zip", | |
"starail": f"{_HOMEPAGE}/resolve/main/data/starail.zip" | |
} | |
class hoyo_pianos(datasets.GeneratorBasedBuilder): | |
def _info(self): | |
return datasets.DatasetInfo( | |
features=datasets.Features( | |
{ | |
"midi": datasets.Value("string"), | |
"abc": datasets.Value("string"), | |
"tag": datasets.Value("string") | |
} | |
), | |
supervised_keys=("abc", "tags"), | |
homepage=_HOMEPAGE, | |
license="mit", | |
citation=_CITATION, | |
description=_DESCRIPTION | |
) | |
def _calculate_hash(self, file_path): | |
# 计算文件的哈希值 | |
with open(file_path, 'rb') as midi_file: | |
content = midi_file.read() | |
return hashlib.md5(content).hexdigest() | |
def _rm_duplicates_in_folder(self, input_folder): | |
# 用于存储文件哈希值的字典 | |
hash_dict = {} | |
duplist = [] | |
# 遍历输入文件夹 | |
for root, _, files in os.walk(input_folder): | |
for file in files: | |
file_path = os.path.join(root, file) | |
file_hash = self._calculate_hash(file_path) | |
# 检查文件哈希值是否已存在 | |
if file_hash in hash_dict: | |
print(f"Duplicates found: {file}") | |
# 将重复文件直接删除 | |
duplist.append(file_path) | |
shutil.rmtree(file_path) | |
else: | |
# 存储文件哈希值 | |
hash_dict[file_hash] = file_path | |
return duplist | |
def _split_generators(self, dl_manager): | |
dataset = [] | |
for key in _URLS.keys(): | |
data_files = dl_manager.download_and_extract(_URLS[key]) | |
files = dl_manager.iter_files([data_files]) | |
subset = [] | |
extract_dir = f'{data_files}\\{key}' | |
duplist = self._rm_duplicates_in_folder(extract_dir) | |
for path in files: | |
if (not path in duplist) and (os.path.basename(path).endswith(".mid")): | |
subset.append(path) | |
random.shuffle(subset) | |
dataset.append( | |
datasets.SplitGenerator( | |
name=key, | |
gen_kwargs={ | |
"files": subset | |
} | |
) | |
) | |
return dataset | |
def _generate_examples(self, files): | |
for i, path in enumerate(files): | |
yield i, { | |
"midi": path, | |
"abc": midi2abc(path), | |
"tag": os.path.basename(path)[:-4].encode('cp437').decode('gbk') | |
} | |