from typing import Dict, Any

import numpy as np
import ray
from tqdm import tqdm

from storage import Boto3Storage
from modules import MFCCInfer
import boto3
import torch

torch.set_num_threads(3)
storage = Boto3Storage('192.168.50.114:9080',
                       'admin',
                       '12345678')

tp = {"client": boto3.client("s3")}

s3_client = tp["client"]

response = s3_client.list_buckets()
buckets = [bucket['Name'] for bucket in response['Buckets']]
s3 = boto3.resource('s3')
my_bucket = s3.Bucket(buckets[0])

files_path_list = []
noid = 0
for object_summary in my_bucket.objects.filter(Prefix='meta'):
    if '.' in str(object_summary.key):
        obj = f's3://{buckets[0]}/{object_summary.key}'
        item = {'id': noid, 'path': obj}
        files_path_list.append(item)


# for file in tqdm(files_path_list):
#     # print("processing file: ", file)
#     metadata = storage.get_json(file)
#     emb_path = metadata['embedding_saved_path']
#     emb = storage.get_npy(emb_path)
#     # if emb.shape[1] < 256:
#     #     continue
#     model = MFCCInfer()
#     # try:
#     prediction, silent_points = model(metadata['path'])
#     storage.put_npy(prediction, emb_path)
#
#     assert np.array_equal(emb, prediction)
    # except:
    #     continue

def transform_batch(batch: Dict[str, Any]) -> Dict[str, Any]:
    file = batch['path']
    print(file)
    metadata = storage.get_json(file)
    emb_path = metadata['embedding_saved_path']
    # emb = storage.get_npy(emb_path)
    # if emb.shape[1] < 256:
    #     continue
    model = MFCCInfer()
    try:
        prediction, silent_points = model(metadata['path'])
        storage.put_npy(prediction, emb_path)
    except:
        return batch

    return batch

ds = ray.data.from_items(files_path_list)
mds = ds.map(transform_batch, concurrency=5, num_cpus=2)
mds.take_all()


