import torch
import torchaudio
from torch import nn

import data_processing

# audio_path = "datasets/train/chorus/296.mp3"
# waveform, sample_rate = torchaudio.load(audio_path, format="mp3")
# print(f"原始音频文件形状为：{waveform.shape}")
# i = waveform.shape[1]
# # 计算从第 15 秒开始的位置（对应的样本数）
# start_sample = 15 * 44100  # 15秒 * 16000样本/秒 = 240000样本
#
# resampler = torchaudio.transforms.Resample(
#                 orig_freq=sample_rate,
#                 new_freq=44100
#             )
#
# waveform = resampler(waveform)
#
# print(f"重采样后形状：{waveform.shape}")
#
# # 对 waveform 进行剪切，取15秒后的数据
# waveform = waveform[:, start_sample:]  # 选择从第240000个样本开始到结束
#
# print(f"剪切后形状：{waveform.shape}")
#
# if waveform.shape[0] > 1:
#     waveform = torch.mean(waveform, dim=0, keepdim=True)
#
# waveform = waveform / waveform.abs().max()
#
# frames = data_processing.frame_audio(waveform)
# print(f"分帧后形状：{frames.shape}")
#
# frames_windowed = data_processing.windows(frames, 22050)
# print(f"加窗后形状：{frames_windowed.shape}")
# print(i/44100)

# m = nn.Linear(32, 2)
# input = torch.randn(1, 44, 32)
# output = m(input)
# print(output.size())

input_tensor = torch.randn(1, 42, 30, 40, 1)

# 去除大小为 1 的维度，得到形状为 [1, 42, 30, 40]
squeezed_tensor = input_tensor.squeeze(-1)

# 调整维度顺序，将维度 1 和 2 交换位置，得到形状为 [42, 1, 30, 40]
# output_tensor = squeezed_tensor.permute(1, 0, 2, 3)
#
# print(squeezed_tensor.shape)
label_path = "datasets/test/dmer_annotations(std).csv"
va_matrix, music_id = data_processing.read_label(label_path)
va_vals = va_matrix[0]
va_vals = torch.tensor(va_vals, dtype=torch.float32)
print(va_vals)
va_vals = va_vals[:-1, :]
print(va_vals)
