from collections import namedtuple

import cv2
import torch
# from utils.icbhi_preprocess import get_annotations, get_sound_samples, split_and_pad
import utils.icbhi as icbhi_utils
import utils.image as image_utils

import librosa
import soundfile
import os
import numpy as np
import random

import tqdm
import typing

from const import ProjectDir, AudioDatasetDir_Original, PatientFoldFile
import pandas as pd
import cmapy

# region -------------------------
# parameters for spectrograms
pr_sample_rate = 4000
pr_desired_length = 8
pr_n_mels = 64
pr_nfft = 256
pr_hop = pr_nfft//2
pr_f_min = 40
pr_f_max = 2000
# other parameters
save_detect_img = False
# pr_min_audio_length = 2*pr_nfft
pr_min_audio_length = 5*pr_sample_rate
pr_max_audio_length = 10*pr_sample_rate # 假定最长呼吸周期为五秒

pr_use_fixed_audio_length = True
pr_fixed_audio_length = 10*pr_sample_rate

pr_use_background = False
# endregion ----------------------

CycleAudio = namedtuple(
    "CycleAudio", 
    [
        'start_index'
        ,'end_index'
        ,'audio_data' 
        ,'label' 
        ,'file_name' 
        ,'cycle_id'
        # ,'split_id'
        # ,'aug_id'
    ]
)
class Audio():    
    def __init__(self, filename:str):
        assert isinstance(filename, str)
        assert len(filename.strip().split('_')) == 5
        
        # region parameters
        self.FileName = ""  # 226_1b1_Pl_sc_LittC2SE
        self.AudioData = []
        self.TotalLength = 0
        self.SampleRate = 0
        self.CycleList: typing.List[CycleAudio] = []
        # endregion
        
        self.FileName = filename
        recording_annotations = pd.read_csv(
            os.path.join(AudioDatasetDir_Original, f'{filename}.txt'), 
            names = ['Start', 'End', 'Crackles', 'Wheezes'], 
            delimiter= '\t'
        )
        
        self.AudioData, self.SampleRate = librosa.load(os.path.join(AudioDatasetDir_Original, f'{filename}.wav'), sr=pr_sample_rate)
        self.TotalLength = len(self.AudioData)
        for i in range(len(recording_annotations.index)):
            row = recording_annotations.loc[i]
            start = int(min(row['Start'] * self.SampleRate, self.TotalLength))
            end = int(min(row['End'] * self.SampleRate, self.TotalLength))
            crackles = int(row['Crackles'])
            wheezes = int(row['Wheezes'])
            
            audio_chunk = self.AudioData[start: end]
            self.CycleList.append(
                CycleAudio(
                    start_index = start,
                    end_index = end,
                    audio_data = audio_chunk,
                    label = icbhi_utils.GetLabel(crackles, wheezes),
                    file_name = filename,
                    cycle_id = i
                )
            )
    
    def SaveCycleAudio(self, save_dir:str):
        for cycle in self.CycleList:
            soundfile.write(
                f'{save_dir}/{cycle.file_name}[cycle_{cycle.cycle_id}].wav', 
                cycle.audio_data,
                self.SampleRate
            )
            
    def GeneratePic(self):
        if pr_fixed_audio_length:
            audio_len = pr_fixed_audio_length
            if audio_len >= self.TotalLength:
                audio_len = self.TotalLength - 1
            start = int(random.random() * (self.TotalLength - audio_len - 1))
            end = start + audio_len
            
            Audio = self.AudioData[start: end]
        else:
            audio_len = pr_max_audio_length + 1
            
            while audio_len > pr_max_audio_length:
                start = int(random.random() * (self.TotalLength - pr_min_audio_length))
                end = int(random.random() * (self.TotalLength - start - pr_min_audio_length) + start + pr_min_audio_length)
                audio_len = end - start
                
            Audio = self.AudioData[start: end]
        
        # convert audio signal to spectrogram
        # spectrograms resized to 3x of original size
            
        S = librosa.feature.melspectrogram(
            y=Audio, 
            sr=self.SampleRate, 
            n_mels=pr_n_mels, 
            fmin=pr_f_min, 
            fmax=pr_f_max, 
            n_fft=pr_nfft, 
            hop_length=pr_hop
        )
        S = librosa.power_to_db(S, ref=np.max)
        S = (S-S.min()) / (S.max() - S.min())
        S *= 255
        img = cv2.applyColorMap(S.astype(np.uint8), cmapy.cmap('magma'))
        height, width, _ = img.shape
        img = cv2.flip(img, 0)  # 1--水平翻转；0--垂直翻转；-1--水平垂直翻转

        # region blank region clipping
        # endregion
        
        # 检测框
        bboxes = []
        # print('----------------')
        # print(self.CycleList)
        for cycle in self.CycleList:
            if cycle.start_index >= start and cycle.end_index <= end:
                bbox_start = int(1.0*width*(cycle.start_index - start)/audio_len)
                bbox_end = int(1.0*width*(cycle.end_index-start)/audio_len)
                bboxes.append({
                    'cycle_id': cycle.cycle_id,
                    'start_index': cycle.start_index,
                    'end_index': cycle.end_index,
                    'start_x': bbox_start,
                    'start_y': 0,
                    'end_x': bbox_end,
                    'end_y': height,
                })
                
        # print(f'the start index of audio is {start}, end index is {end}; the bboxes are 【{bboxes}】')
    
        # cv2.imwrite('temp1.jpg', img)
        # input()
        
        if pr_use_background:
            img, (delta_h, delta_w), (target_h, target_w) = image_utils.PutImageOnBackground(img, height, width)
            
            h_scale = (1.0*target_h)/height
            w_scale = (1.0*target_w)/width
            
            
            # 检测框位置随之变换
            for box in bboxes:
                # box['end_x'] = int(w_scale*(box['end_x']-box['start_x']) + box['start_x'])
                # box['end_y'] = int(h_scale*(box['end_y']-box['start_y']) + box['start_y'])
                # resize
                box['start_x'], box['end_x'] = int(w_scale*box['start_x']), int(w_scale*box['end_x'])
                box['start_y'], box['end_y'] = int(h_scale*box['start_y']), int(h_scale*box['end_y'])
                
                # relocation
                box['start_x'], box['start_y'] = box['start_x'] + delta_w, box['start_y'] + delta_h
                box['end_x'], box['end_y'] = box['end_x'] + delta_w, box['end_y'] + delta_h
            
        if save_detect_img:
            cv2.imwrite(
                f'./{self.CycleList[0].file_name}-[{start}]-[{end}].jpg', 
                cv2.cvtColor(img, cv2.COLOR_RGB2BGR)
            )
            
        # print(1)
        return img, bboxes
         
class ICBHI_Dataset():
    def __init__(self):
        # self.exclude_patient_list = [
        #     '110', '112', '222']  # 排除一些呼吸周期特别长的样本
        
        self.Audio_List: typing.Dict[str,Audio] = dict()  
        # key is str like '102_1b1_Ar_sc_Meditron'
        # value is a 'Audio' class
        # region parameters
        self.DeviceDict = {
            "AKGC417L": 0,
            "Meditron": 1,
            "LittC2SE": 2,
            "Litt3200": 3,
        }    
        self.FiletoDevice = dict() # 键为 '226_1b1_Pl_sc_LittC2SE'，值为 DeviceDict 中的 device_id
        self.PatientNums = set()
        # PatientList_in_EveryDevice = [[],[],[],[]] # 记录每个设备下的病人列表
        self.PatientFoldDict = dict() # {'226': 1}, 值为 fold num，取值范围为 0 ~ 4，根据此字典划分训练集和测试集
   
        
        # endregion
        
        files = os.listdir(AudioDatasetDir_Original)
        for f in files:
            f_info = f.strip().split('.')[0].split('_')
            # f_info = ['226', '1b1', 'Pl', 'sc', 'LittC2SE'], f='226_1b1_Pl_sc_LittC2SE.wav'
            if len(f_info) != 5:
                print(f'find an unrelated file {f} in the [AudioDatasetDir]')
                continue
            
            self.Audio_List['_'.join(f_info)] = Audio('_'.join(f_info))
            if f_info[-1] not in self.DeviceDict.keys():
                # f_info[-1] 为 device 名称
                raise Exception(f"unrecognized device name [{f_info[-1]}]")
            
            self.FiletoDevice['_'.join(f_info)] = self.DeviceDict[f_info[-1]]
            self.PatientNums.add(f_info[0])
            
        # region patient_fold
        # get patients dict in current fold based on train flag
        patient_foldwise_lines = open(PatientFoldFile).read().splitlines()    # data/patient_list_foldwise.txt 
        for line in patient_foldwise_lines:
            idx, fold_num = line.strip().split(' ')
            
            self.PatientFoldDict[idx] = fold_num

        # endregion
        
    def DatasetDivision(self, test_fold:int = 4):
        assert isinstance(test_fold, int)
        assert test_fold==0 or test_fold==1 or test_fold==2 or test_fold==3 or test_fold==4
        
        train_dict, test_dict = dict(), dict()
        
        for filename in self.Audio_List:
            audio = self.Audio_List[filename]
            patient_num = filename.strip().split('_')[0]
            if int(self.PatientFoldDict[patient_num]) == int(test_fold):
                test_dict[filename] = audio
            else:
                train_dict[filename] = audio
                
        return train_dict, test_dict
            
class Dataset():
    def __init__(self, audio_dict: typing.Dict[str, Audio], transforms=None) -> None:
        assert isinstance(audio_dict, dict)
        assert len(audio_dict) > 0
        for key in audio_dict:
            assert isinstance(audio_dict[key], Audio)
        
        self.AudioDict = audio_dict
        self.DictKeys = list(self.AudioDict.keys())
        
        self.transforms = transforms
        
    def __getitem__(self, idx):
        audio = self.AudioDict[self.DictKeys[idx]]
        
        img, bboxes = audio.GeneratePic()
        while len(bboxes) == 0:
            img, bboxes = audio.GeneratePic()
            
        img = (1.0*img)/255.0
        # print(img)
        final_boxes = []
        labels = []
        for bbox in bboxes:
            final_boxes.append([
                bbox['start_x'],
                bbox['start_y'],
                bbox['end_x'],
                bbox['end_y']
            ])
            labels.append(1) # breath cycle
            
        final_boxes = torch.as_tensor(final_boxes, dtype=torch.float32)
        # print(final_boxes)
        area = (final_boxes[:, 3] - final_boxes[:, 1]) * (final_boxes[:, 2] - final_boxes[:, 0])
        # no crowd instances
        iscrowd = torch.zeros((final_boxes.shape[0],), dtype=torch.int64)
        labels = torch.as_tensor(labels, dtype=torch.int64)
        
        target = {
            "boxes": final_boxes,
            "labels": labels,
            "area": area,
            "iscrowd": iscrowd
        }
        
        if self.transforms:
            sample = self.transforms(
                image = img,
                bboxes = target['boxes'],
                labels = labels
            )
            img = torch.Tensor(sample['image']).float()
            target['boxes'] = torch.Tensor(sample['bboxes']).float()
            
        img = torch.Tensor(img).float()
        target['boxes'] = torch.Tensor(target['boxes']).float()
        # print(img.shape)  # torch.Size([600, 800, 3])
        img = img.transpose(0, 2)
        # print(img.shape)  # torch.Size([3, 800, 600])
        return img, target
        
    def __len__(self):
        return len(self.DictKeys)


if __name__ == '__main__':
    Audio('226_1b1_Pl_sc_LittC2SE').GeneratePic()
    Audio('101_1b1_Al_sc_Meditron').GeneratePic()
    Audio('101_1b1_Pr_sc_Meditron').GeneratePic()
    Audio('102_1b1_Ar_sc_Meditron').GeneratePic()
    
