Spaces:
Build error
Build error
File size: 2,595 Bytes
171f55b |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 |
import gc
import json
import librosa
import laion_clap
import torch
import numpy as np
import time
from itertools import islice
from safetensors import safe_open
from safetensors.numpy import save_file
def read_default_prompt():
import json
with open('/root/autodl-tmp/dedup_audio_text_80.json', 'r') as f:
data = json.load(f)
return data
def init_audio_pipe():
# quantization
def int16_to_float32(x):
return (x / 32767.0).astype(np.float32)
def float32_to_int16(x):
x = np.clip(x, a_min=-1., a_max=1.)
return (x * 32767.).astype(np.int16)
model = laion_clap.CLAP_Module(enable_fusion=False)
model.load_ckpt() # download the default pretrained checkpoint.
# Get audio embeddings from audio data
audio_data, _ = librosa.load('/root/autodl-tmp/下载.wav', sr=48000) # sample rate should be 48000
audio_data = audio_data.reshape(1, -1) # Make it (1,T) or (N,T)
audio_data = torch.from_numpy(
int16_to_float32(float32_to_int16(audio_data))).float() # quantize before send it in to the model
audio_embed = model.get_audio_embedding_from_data(x=audio_data, use_tensor=True)
# print(audio_embed[:, -20:])
print(audio_embed)
print(audio_embed.shape)
# Get text embedings from texts, but return torch tensor:
start_time = time.time()
# change this file to iterator the text_data batch_size 300 and save the embedding to audio_text.safetensors
text_data = read_default_prompt()
batch_size = 256
num_batches = int(np.ceil(len(text_data) / batch_size))
text_embed = []
for i in range(num_batches):
# Get the next batch of text data
batch_data = list(islice(text_data, i * batch_size, (i + 1) * batch_size))
# Embed the batch of text data
batch_embed = model.get_text_embedding(batch_data, use_tensor=False)
# Append the batch embeddings to the list
text_embed.append(batch_embed)
# Concatenate the embeddings from all batches into a single tensor
text_embed = np.concatenate(text_embed)
# Save the embeddings to a file
print(text_embed)
print(text_embed.shape)
tensors = {
"text_embed": text_embed,
}
save_file(tensors, "/root/autodl-tmp/audio_text_embeddings.safetensors")
# end_time = time.time()
# print(end_time - start_time)
#
# result_tensor = torch.matmul(audio_embed, text_embed.transpose(0, 1))
# similarity_scores = torch.softmax(result_tensor, dim=1)
# print(similarity_scores)
if __name__ == "__main__":
init_audio_pipe()
|