George commited on
Commit
7d1d580
1 Parent(s): 30d4168

combine data.py into main py

Browse files
Files changed (2) hide show
  1. data.py +0 -63
  2. genshin_piano.py +33 -2
data.py DELETED
@@ -1,63 +0,0 @@
1
- import os
2
- import shutil
3
- import hashlib
4
- from tqdm import tqdm
5
-
6
-
7
- def calculate_hash(file_path):
8
- # 计算文件的哈希值
9
- with open(file_path, 'rb') as midi_file:
10
- content = midi_file.read()
11
- return hashlib.md5(content).hexdigest()
12
-
13
-
14
- def mv_duplicates_to_folder(input_folder, output_folder):
15
- # 创建重复文件夹
16
- if not os.path.exists(output_folder):
17
- os.makedirs(output_folder)
18
-
19
- # 用于存储文件哈希值的字典
20
- hash_dict = {}
21
-
22
- # 遍历输入文件夹
23
- for root, _, files in os.walk(input_folder):
24
- for file in tqdm(files, desc='Removing duplicates'):
25
- file_path = os.path.join(root, file)
26
- file_hash = calculate_hash(file_path)
27
-
28
- # 检查文件哈希值是否已存在
29
- if file_hash in hash_dict:
30
- print(f"Duplicates found: {file}")
31
- # 将重复文件移动到重复文件夹
32
- destination_path = os.path.join(output_folder, file)
33
- shutil.move(file_path, destination_path)
34
- else:
35
- # 存储文件哈希值
36
- hash_dict[file_hash] = file_path
37
-
38
-
39
- def rm_duplicates_in_folder(input_folder):
40
- # 用于存储文件哈希值的字典
41
- hash_dict = {}
42
- duplist = []
43
- # 遍历输入文件夹
44
- for root, _, files in os.walk(input_folder):
45
- for file in tqdm(files, desc='Removing duplicates'):
46
- file_path = os.path.join(root, file)
47
- file_hash = calculate_hash(file_path)
48
-
49
- # 检查文件哈希值是否已存在
50
- if file_hash in hash_dict:
51
- print(f"Duplicates found: {file}")
52
- # 将重复文件直接删除
53
- duplist.append(file_path)
54
- shutil.rmtree(file_path)
55
- else:
56
- # 存储文件哈希值
57
- hash_dict[file_hash] = file_path
58
-
59
- return duplist
60
-
61
-
62
- if __name__ == "__main__":
63
- mv_duplicates_to_folder(input_folder="data", output_folder="duplicates")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
genshin_piano.py CHANGED
@@ -1,8 +1,9 @@
1
  import os
 
2
  import random
 
3
  import datasets
4
  from midi2abc import midi2abc
5
- from data import rm_duplicates_in_folder
6
 
7
 
8
  _HOMEPAGE = f"https://huggingface.co/datasets/MuGeminorum/{os.path.basename(__file__).split('.')[0]}"
@@ -26,6 +27,36 @@ This database contains genshin piano songs downloaded from musescore
26
  _URL = f"{_HOMEPAGE}/resolve/main/data/dataset.zip"
27
 
28
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
29
  class genshin_piano(datasets.GeneratorBasedBuilder):
30
  def _info(self):
31
  return datasets.DatasetInfo(
@@ -48,7 +79,7 @@ class genshin_piano(datasets.GeneratorBasedBuilder):
48
  files = dl_manager.iter_files([data_files])
49
  dataset = []
50
 
51
- extract_dir = os.path.dirname(data_files[0])
52
  duplist = rm_duplicates_in_folder(extract_dir)
53
 
54
  for path in files:
 
1
  import os
2
+ import shutil
3
  import random
4
+ import hashlib
5
  import datasets
6
  from midi2abc import midi2abc
 
7
 
8
 
9
  _HOMEPAGE = f"https://huggingface.co/datasets/MuGeminorum/{os.path.basename(__file__).split('.')[0]}"
 
27
  _URL = f"{_HOMEPAGE}/resolve/main/data/dataset.zip"
28
 
29
 
30
+ def calculate_hash(file_path):
31
+ # 计算文件的哈希值
32
+ with open(file_path, 'rb') as midi_file:
33
+ content = midi_file.read()
34
+ return hashlib.md5(content).hexdigest()
35
+
36
+
37
+ def rm_duplicates_in_folder(input_folder):
38
+ # 用于存储文件哈希值的字典
39
+ hash_dict = {}
40
+ duplist = []
41
+ # 遍历输入文件夹
42
+ for root, _, files in os.walk(input_folder):
43
+ for file in files:
44
+ file_path = os.path.join(root, file)
45
+ file_hash = calculate_hash(file_path)
46
+
47
+ # 检查文件哈希值是否已存在
48
+ if file_hash in hash_dict:
49
+ print(f"Duplicates found: {file}")
50
+ # 将重复文件直接删除
51
+ duplist.append(file_path)
52
+ shutil.rmtree(file_path)
53
+ else:
54
+ # 存储文件哈希值
55
+ hash_dict[file_hash] = file_path
56
+
57
+ return duplist
58
+
59
+
60
  class genshin_piano(datasets.GeneratorBasedBuilder):
61
  def _info(self):
62
  return datasets.DatasetInfo(
 
79
  files = dl_manager.iter_files([data_files])
80
  dataset = []
81
 
82
+ extract_dir = data_files + '\\dataset'
83
  duplist = rm_duplicates_in_folder(extract_dir)
84
 
85
  for path in files: