DolphinNie commited on
Commit
d1b244e
1 Parent(s): dc0a88a

fixed new dataset

Browse files
Files changed (4) hide show
  1. README.md +14 -5
  2. env_num_test.zip +2 -2
  3. env_num_train.zip +2 -2
  4. env_num_valid.zip +2 -2
README.md CHANGED
@@ -16,7 +16,7 @@ git clone https://huggingface.co/datasets/DolphinNie/dungeon-dataset
16
 
17
  ## 1. Data Explanation
18
 
19
- This is the Map dataset from the open-sourced game [Brogue](https://github.com/tmewett/BrogueCE). It contains 40,000 train dataset, 10,000 test dataset and 10,000 validation dataset.
20
 
21
  Each map is stored in a `.csv` file. The map is a `(32x32)` array, which is the map size.
22
 
@@ -87,8 +87,17 @@ import matplotlib.pyplot as plt
87
  # Load dataset from hugging face
88
  dataset = load_dataset("DolphinNie/dungeon-dataset")
89
 
90
- # Dataset stored by huggingface is not in a correct format, we need to do further process
91
- def dataset_convert(dataset):
 
 
 
 
 
 
 
 
 
92
  dataset_train = list()
93
  dataset_test = list()
94
  dataset_valid = list()
@@ -98,11 +107,11 @@ def dataset_convert(dataset):
98
  datapoint_num = int(dataset[name[i]].num_rows / 32)
99
  dataset_tf = dataset[name[i]].to_pandas()
100
  for n in range(datapoint_num):
101
- env_num = tf_train[n*32:(n+1)*32]
102
  datasets[i].append(env_num)
103
  return dataset_train, dataset_test, dataset_valid
104
 
105
- dataset_train, dataset_test, dataset_valid = dataset_convert(dataset)
106
 
107
  # Visualize the datapoints if you want
108
  def visualize_map(dungeon_map):
 
16
 
17
  ## 1. Data Explanation
18
 
19
+ This is the Map dataset from the open-sourced game [Brogue](https://github.com/tmewett/BrogueCE). It contains 49,000 train dataset, 14,000 test dataset and 7,000 validation dataset.
20
 
21
  Each map is stored in a `.csv` file. The map is a `(32x32)` array, which is the map size.
22
 
 
87
  # Load dataset from hugging face
88
  dataset = load_dataset("DolphinNie/dungeon-dataset")
89
 
90
+
91
+ def get_processed_dataset(load_dataset_from_pickle=False,
92
+ save_dataset_to_pickle=False,
93
+ pickle_save_path='dungeon-dataset.pkl'):
94
+ dataset = pull_hugging_face_dataset(load_dataset_from_pickle,
95
+ save_dataset_to_pickle,
96
+ pickle_save_path)
97
+ dataset_train, dataset_test, dataset_valid = convert_dataset(dataset)
98
+ return dataset_train, dataset_test, dataset_valid
99
+
100
+ def convert_dataset(dataset):
101
  dataset_train = list()
102
  dataset_test = list()
103
  dataset_valid = list()
 
107
  datapoint_num = int(dataset[name[i]].num_rows / 32)
108
  dataset_tf = dataset[name[i]].to_pandas()
109
  for n in range(datapoint_num):
110
+ env_num = dataset_tf[n * 32:(n + 1) * 32]
111
  datasets[i].append(env_num)
112
  return dataset_train, dataset_test, dataset_valid
113
 
114
+ dataset_train, dataset_test, dataset_valid = get_processed_dataset(load_dataset_from_pickle, save_dataset_to_pickle)
115
 
116
  # Visualize the datapoints if you want
117
  def visualize_map(dungeon_map):
env_num_test.zip CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:dcb44c2b4168f12e5d514b0df8b5352f320c82da0372ea0f9eea7e210bbe92e7
3
- size 4256874
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:96a40d89fee5643190fb4c0279fcb2ddf8163c2f7f48e91c78740790f979f8e8
3
+ size 5966510
env_num_train.zip CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:c1bf4e80a23630fc026ff6a20f1b1e0fef6b4e107da8848d33093d311d2f28da
3
- size 17194333
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b70df308db028325a2c98de635bc88e3acd1d9956f9a73c9dbcdc7f1f4e2cb96
3
+ size 21061633
env_num_valid.zip CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:6f3293a3269fc6ce99c509654b48037a7d529b749a1c45022bbfa2f50ae04629
3
- size 4303217
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7301cd6db76f25ea986244771d57d8c63f23ac94149c438c5954593905495ff2
3
+ size 3011923