from datasets import get_dataset
from distance import dataset_transform
import numpy as np

# import re,httpx
# page = httpx.get('https://github.com/erikbern/ann-benchmarks/blob/main/README.md', )
# re.findall('http://ann-benchmarks\.com/(.*?)\.hdf5', page.text)
# 抓取所有的数据集

benchmark_dataset = {
#  'deep-image-96-angular',
#  'fashion-mnist-784-euclidean',
#  'gist-960-euclidean',
#  'glove-25-angular',
#  'glove-50-angular',
#  'glove-100-angular',
#  'glove-200-angular',
#  'kosarak-jaccard',
 'mnist-784-euclidean',
#  'movielens10m-jaccard',
#  'nytimes-256-angular',
#  'sift-128-euclidean',
#  'lastfm-64-dot'
}

def array2fvecs(train_set: np.ndarray, test_set: np.ndarray, prefix: str=''):
  """ 数据的格式如下:
        dimension[int]
        vector1: dimension * float + float[填充值, 可以理解为添加的一个维度]
  """
  assert len(train_set.shape) == 2 and len(test_set.shape) == 2
  train_padding = np.zeros((train_set.shape[0], 1))
  test_padding = np.zeros((test_set.shape[0], 1))
  train_set_padding = np.column_stack((train_set, train_padding)).astype('float32')
  test_set_padding = np.column_stack((test_set, test_padding)).astype('float32')
  import aiofiles, asyncio
  async def write_file(data: np.ndarray, filename: str):
    async with aiofiles.open(filename, 'wb') as f:
      await f.write((data.shape[1] - 1).to_bytes(4, 'little'))
      await f.write(data.tobytes())
  
  async def run():
    tasks = []
    tasks.append(asyncio.ensure_future(write_file(train_set_padding, prefix + 'learn.fvecs')))
    tasks.append(asyncio.ensure_future(write_file(test_set_padding, prefix + 'query.fvecs')))

    await asyncio.gather(*tasks)

  loop = asyncio.get_event_loop()
  loop.run_until_complete(run())    


def get_single_dataset(name):
  file, dimension = get_dataset(name)
  x, y = dataset_transform(file)
  print('writing data for ' + name)
  import os
  if not os.path.exists('data/' + name):
    os.mkdir('data/' + name)
  array2fvecs(x, y, prefix=f'data/{name}/')


def get_all_dataset(nprogress: int = 2):
  import multiprocessing
  pool = multiprocessing.Pool(nprogress)
  pool.map(get_single_dataset, benchmark_dataset)
  pool.close()
  pool.join()


if __name__ == '__main__':
  get_all_dataset()