import os
import sys
import glob

import librosa
import librosa.display

import simplejpeg
import numpy as np

import torch
import torch.nn.functional as F
import torchvision as tv

import matplotlib.pyplot as plt

from PIL import Image
from IPython.display import Audio, display

sys.path.append(os.path.abspath(f'{os.getcwd()}/..'))

from model import MusicVideoCLIP
from utils.transforms import ToTensor1D


#############################################
#正向过程中存在维度的输出
#
#############################################


## 维度输出

icheck=False

##

torch.set_grad_enabled(False)

MODEL_FILENAME = 'AudioCLIP-Full-Training.pt'
# derived from ESResNeXt
SAMPLE_RATE = 44100
# derived from CLIP
IMAGE_SIZE = 224
IMAGE_MEAN = 0.48145466, 0.4578275, 0.40821073
IMAGE_STD = 0.26862954, 0.26130258, 0.27577711

LABELS = ['exciting_145', 'exciting_146', 'exciting_147', 'exciting_158', 'exciting_189']

mvclip = MusicVideoCLIP(pretrained=f'../assets/{MODEL_FILENAME}')

audio_transforms = ToTensor1D()

image_transforms = tv.transforms.Compose([
    tv.transforms.ToTensor(),
#     tv.transforms.Resize(IMAGE_SIZE, interpolation=Image.BICUBIC),
    tv.transforms.Resize(IMAGE_SIZE, interpolation=Image.BILINEAR),
    tv.transforms.CenterCrop(IMAGE_SIZE),
    tv.transforms.Normalize(IMAGE_MEAN, IMAGE_STD)
])

def MusicLoading():

    music = list()

    path2music = './music'

    
    ##########################################
    # 
    # return 
    #
    ##########################################
    
    
    for name in os.listdir(path2music):
        tag=0
        if name != '.ipynb_checkpoints':
            print(name)
            paths_to_audio = glob.glob(path2music+'/'+name+'/*.wav')
            music_single = list()
            for path_to_audio in paths_to_audio:
                track, _ = librosa.load(path_to_audio, sr=SAMPLE_RATE, dtype=np.float32)
                
#                 if tag==0:
#                     print("---------------------here------------------------")
#                     print("type:{track.size()}")
#                     print(track)
#                     tag=1
                    
                # compute spectrograms using trained audio-head (fbsp-layer of ESResNeXt)
                # thus, the actual time-frequency representation will be visualized
                spec = mvclip.audio.spectrogram(torch.from_numpy(track.reshape(1, 1, -1)))
                spec = np.ascontiguousarray(spec.numpy()).view(np.complex64)
                pow_spec = 10 * np.log10(np.abs(spec) ** 2 + 1e-18).squeeze()

                track_cut = np.zeros((44100,))
                length = min(len(track), 44100)
                track_cut[:length] = track[:length]
                
                music_single.append((track_cut, pow_spec))
                print(len(music_single))
                
            music.append(music_single)

    print('len(music):', len(music))
    return music

def VideoLoading():

    video_rgb = list()
    video_flow_x = list()
    video_flow_y = list()

    path2video = './video'
    
    print("---------------MusicVideoCLIP.py VideoLoading----------------------")
    print(f"path2video:{os.listdir(path2video)}")
    for name in os.listdir(path2video):
        if name != '.ipynb_checkpoints':
            
            print(f"---------name:{name}-------------")
            
            paths_to_video = glob.glob(path2video+'/'+name+'/img*.jpg')
            video_single = list()
            for path_to_video in paths_to_video:
                with open(path_to_video, 'rb') as jpg:
                    image = simplejpeg.decode_jpeg(jpg.read())
                    video_single.append(image)  #3*224*224
            
                    
            print(f"video_rgb len:{len(video_single)}") 
            video_rgb.append(video_single) 
            
            print("----------------------MusicVideoCLIP.py VideoLoading------------------------- ")
            print(f"video_rgb len {len(video_rgb)}; len(video_rgb[0]) {len(video_rgb[0])}\n")
            
            paths_to_video_flow_x = glob.glob(path2video+'/'+name+'/flow_x*.jpg')
            video_flow_x_single = list()
            for path_to_video in paths_to_video_flow_x:
                with open(path_to_video, 'rb') as jpg:
                    image = simplejpeg.decode_jpeg(jpg.read())
                    video_flow_x_single.append(image)
       
                    
            print(len(video_flow_x_single))
            video_flow_x.append(video_flow_x_single)
            
            paths_to_video_flow_y = glob.glob(path2video+'/'+name+'/flow_y*.jpg')
            video_flow_y_single = list()
            for path_to_video in paths_to_video_flow_x:
                with open(path_to_video, 'rb') as jpg:
                    image = simplejpeg.decode_jpeg(jpg.read())
                    video_flow_y_single.append(image)
                    
                    
            print(len(video_flow_y_single))
            video_flow_y.append(video_flow_y_single)

    print('len(video_rgb):', len(video_rgb))
    print('len(video_flow_x):', len(video_flow_x))
    print('len(video_flow_y):', len(video_flow_y))
    
    video = [video_rgb, video_flow_x, video_flow_y]
    #--------------------print-----------------------
    print("----------------------MusicVideoCLIP.py VideoLoading------------------------- ")
    print(f"video len {len(video)} :\n")
    
    
    return video

def Input_Preparation(music, video, text):

    # AudioCLIP handles raw audio on input, so the input shape is [batch x segments x channels x duration]
    for i in range(len(music)):
        music_single = torch.stack([audio_transforms(track.reshape(1, -1)) for track, _ in music[i]])
        music[i] = music_single

    # standard channel-first shape [batch x segments x channels x height x width]
    for j in range(len(video)):
        for i in range(len(video[j])):
            video_single = torch.stack([image_transforms(image) for image in video[j][i]])
            video[j][i] = video_single

    # textual input is processed internally, so no need to transform it beforehand
    text = [[label] for label in LABELS]

    return music, video, text

def main_pro():
    music = MusicLoading()
#     music = []
    video = VideoLoading()
#     video = []
    text = None
    music, video, text = Input_Preparation(music, video, text)

#     ((music_features, _, _), _), _ = mvclip(music=music)
    # ((_, video_features, _), _), _ = mvclip(video=video)
    # ((_, _, text_features), _), _ = mvclip(text=text)
    
    ((total_audio_features, total_image_features, text_features, music_features, video_features, text_features, music_features_for_align, video_features_for_align), (logits_music_video_align, logits_audio_image, logits_audio_text, logits_image_text, logits_music_video, logits_music_text, logits_video_text)), (loss, align_loss, total_loss, segment_loss) = mvclip(music=music, video=video, text=text)
    
    print('loss:', loss)
    print('align_loss:', align_loss)
    print('total_loss:', total_loss)
    print('segment_loss:', segment_loss)
    
# class main():
    
main_pro()
