"""
 @Author: Envision
 @Github: ElapsingDreams
 @Gitee: ElapsingDreams
 @Email: None
 @FileName: main.py
 @DateTime: 2024/7/11 20:37
 @SoftWare: PyCharm
"""

import os
import pathlib
from typing import Any, Union

import soundfile as sf
import librosa
import numpy as np
from Musicreater import MusicSequence, MidiConvert, MM_INSTRUMENT_DEVIATION_TABLE

from numpy import ndarray

PATH = pathlib.Path(__file__)
# vanilla
vanilla_ASSETS = PATH.parent / "assets" / "wav"
vanilla_pack_ID = "vanilla"
resource_pack_list = [vanilla_pack_ID]


# 存单音节PCM数据的类
class NoteResources:
    """ """

    """
    原始资源存储规范：
    Dict[str"ID1": Dict[str"SoundID": ndarray[]]]
    转换资源存储规范：
    Dict[str"ID1": Dict[str"SoundID": Dict[int"Pitch": ndarray]]]
    测试阶段，请直接vanilla作为默认传入，不考虑载入
    """
    res_raw_data: dict = {}  # 原始资源存储
    # res_raw_index: list = []  # 资源转换情况，key: [88]  留着优化
    res_data: dict = {}  # 转换后资源存储
    res_data_sr: int  # 转换后采样率，和输出采样率一致

    def load_res(self):
        pass

    # 增删改查全套


class MusicPreview(NoteResources):

    @staticmethod
    def res_pitch_shift(y: ndarray, *, sr: float, n_steps: float, **kwargs: Any):
        return librosa.effects.pitch_shift(y, sr=sr, n_steps=n_steps)

    @staticmethod
    def res_time_stretch(y: ndarray, *, rate: float, **kwargs: Any):
        return librosa.effects.time_stretch(y, rate=rate)

    @staticmethod
    def res_resample(
        y: ndarray, *, orig_sr: float, target_sr: float, fix: bool = True, **kwargs: Any
    ):
        return librosa.resample(y, orig_sr=orig_sr, target_sr=target_sr, fix=fix)

    def __init__(self, load_all: bool = False):
        self.mode = 1
        self.oc = 1
        self.out_sr = 44100
        self.gvm = 1
        self.out_path = r"E:\Work2024\test-midi\newMP"
        # 我管你嘞，直接载入默认资源包再说
        self.__init_raw_res__()
        print(111111)

    def res_shift(
        self,
        packageID: str,
        soundID: str,
        percussive: bool,
        pitch: int,
        duration_tick: int,
        # rate: int
    ):
        y_orig, sr_orig = self.res_raw_data[packageID][soundID]
        if not percussive:
            if self.mode == 1:
                # 变调， 时域压扩， 重采样 mc方法
                self.res_data[packageID][soundID][pitch] = librosa.resample(
                    librosa.effects.time_stretch(
                        librosa.effects.pitch_shift(y_orig, sr=sr_orig, n_steps=pitch),
                        rate=2 ** (pitch / 12),
                    ),
                    orig_sr=sr_orig,
                    target_sr=self.out_sr,
                    fix=False,
                )
            elif self.mode == 0:
                # 重采样， 变调
                self.res_data[packageID][soundID][pitch] = librosa.resample(
                    librosa.effects.pitch_shift(y_orig, sr=sr_orig, n_steps=pitch),
                    orig_sr=sr_orig,
                    target_sr=self.out_sr,
                    fix=False,
                )
            elif self.mode == 4:

                # 变调， 时域压扩， 重采样 MIDI-FFT
                if self.oc == 2:
                    rate = duration_tick / 20 / (len(y_orig[0]) / sr_orig)
                else:
                    rate = duration_tick / 20 / (len(y_orig) / sr_orig)
                rate = rate if rate != 0 else 1
                self.res_data[packageID][soundID][pitch] = librosa.resample(
                    librosa.effects.time_stretch(
                        librosa.effects.pitch_shift(y_orig, sr=sr_orig, n_steps=pitch),
                        rate=rate,
                    ),
                    orig_sr=sr_orig,
                    target_sr=self.out_sr,
                    fix=False,
                )
            elif self.mode == 2:
                # 变调， 时域压扩， 重采样 MIDI-cut
                if self.oc == 2:
                    deal = librosa.effects.pitch_shift(
                        y_orig, sr=sr_orig, n_steps=pitch
                    )[
                        ...,
                        : (
                            int(duration_tick / 20 * sr_orig)
                            if duration_tick / 20 * sr_orig > len(y_orig[0])
                            else len(y_orig[0])
                        ),
                    ]
                else:
                    deal = librosa.effects.pitch_shift(
                        y_orig, sr=sr_orig, n_steps=pitch
                    )[
                        : (
                            int(duration_tick / 20 * sr_orig)
                            if duration_tick / 20 * sr_orig > len(y_orig)
                            else len(y_orig)
                        )
                    ]
                self.res_data[packageID][soundID][pitch] = librosa.resample(
                    deal, orig_sr=sr_orig, target_sr=self.out_sr, fix=False
                )
        else:
            # if self.mode == 1:
            # 重采样, 不变调
            print(">>", packageID, soundID, pitch)
            self.res_data[packageID][soundID][pitch] = librosa.resample(
                y_orig, orig_sr=sr_orig, target_sr=self.out_sr, fix=False
            )
            """elif self.mode == 0:
                # 重采样, 不变调, 衰弱
                self.cache_dict[raw_name] = librosa_resample(y_orig,
                                                             orig_sr=sr_orig,
                                                             target_sr=self.out_sr,
                                                             fix=False)"""

    def __init_raw_res__(self, *resources_pack):  # 测试阶段，强制载入vanilla资源包

        # 遵循原则，解包(此处应该换成zip解包操作，注意防止夹带私货(木马))--运算，缓存字典--写入主字典--删掉缓存字典
        print(111)
        vanilla_res_raw_data = {}
        for file in vanilla_ASSETS.iterdir():
            if file.is_file():
                vanilla_res_raw_data[file.stem] = librosa.load(file, sr=None)
        self.res_raw_data[vanilla_pack_ID] = vanilla_res_raw_data
        del vanilla_res_raw_data

    # 获取资源方法，需要指定采样率，采样率变了即全局重载
    # 资源存储类里面有就取，没就写入并标识已有数据
    def get_res(
        self,
        soundID: str,
        pitch: int,
        percussive: bool,
        duration_tick: int,
    ):
        print(soundID, pitch, percussive, duration_tick)

        for packageID in resource_pack_list:
            if packageID in self.res_data:
                if soundID in self.res_data[packageID]:
                    if pitch in self.res_data[packageID][soundID]:
                        return self.res_data[packageID][soundID][pitch]
                    else:
                        self.res_shift(
                            packageID=packageID,
                            soundID=soundID,
                            percussive=percussive,
                            pitch=pitch,
                            duration_tick=duration_tick,
                        )
                        # print(2, self.res_data[packageID][soundID][pitch])
                        return self.res_data[packageID][soundID][pitch]
                else:
                    self.res_data[packageID][soundID] = {}
                    self.res_shift(
                        packageID=packageID,
                        soundID=soundID,
                        percussive=percussive,
                        pitch=pitch,
                        duration_tick=duration_tick,
                    )
                    # print(2, self.res_data[packageID][soundID][pitch])
                    return self.res_data[packageID][soundID][pitch]
            else:
                self.res_data[packageID] = {}
                self.res_data[packageID][soundID] = {}
                self.res_shift(
                    packageID=packageID,
                    soundID=soundID,
                    percussive=percussive,
                    pitch=pitch,
                    duration_tick=duration_tick,
                )
                # print(2, self.res_data[packageID][soundID][pitch])
                return self.res_data[packageID][soundID][pitch]

    # 掰开揉碎，变成单通道
    @staticmethod
    def channels_merge(MusicSeq: MusicSequence) -> MusicSequence:
        all_channels = [
            note for sublist in MusicSeq.channels.values() for note in sublist
        ]
        sorted_channels = sorted(all_channels, key=lambda note: note.start_tick)
        MusicSeq.channels = {0: sorted_channels}
        return MusicSeq

    def convert(self, MusicSeq: Union[MusicSequence, MidiConvert]):
        MusicSeq_one_channel = self.channels_merge(MusicSeq)

        if self.oc == 1:

            def overlay(seg_overlay: np.ndarray, pos_tick: int):
                pos_ = int(out_sr * pos_tick * 0.05)
                wav_model[pos_ : seg_overlay.size + pos_] += seg_overlay

            # print(0)
            wav_model = np.zeros(
                int(
                    max(
                        [
                            (
                                i.start_tick * 0.05 * self.out_sr
                                + len(
                                    self.get_res(
                                        soundID=i.sound_name,
                                        pitch=i.note_pitch
                                        - 60
                                        - MM_INSTRUMENT_DEVIATION_TABLE.get(
                                            i.sound_name, 6
                                        ),
                                        percussive=i.percussive,
                                        duration_tick=i.duration,
                                    )
                                )
                            )
                            for i in MusicSeq_one_channel.channels[0]
                        ]
                    )
                ),
                dtype=np.float32,
            )

        elif self.oc == 2:

            def overlay(seg_overlay: np.ndarray, pos_tick: int):
                pos_ = int(out_sr * pos_tick * 0.05)
                wav_model[..., pos_ : len(seg_overlay[0]) + pos_] += seg_overlay

            wav_model = np.zeros(
                (
                    2,
                    int(
                        max(
                            [
                                (
                                    i.start_tick * 0.05 * self.out_sr
                                    + len(
                                        self.get_res(
                                            soundID=i.sound_name,
                                            pitch=i.note_pitch
                                            - 60
                                            - MM_INSTRUMENT_DEVIATION_TABLE.get(
                                                i.sound_name, 6
                                            ),
                                            percussive=i.percussive,
                                            duration_tick=i.duration,
                                        )
                                    )
                                )
                                for i in MusicSeq_one_channel.channels[0]
                            ]
                        )
                    ),
                ),
                dtype=np.float32,
            )

        else:
            raise ValueError("Illegal overlay_mode")

        out_sr = self.out_sr

        for Note in MusicSeq_one_channel.channels[0]:
            print(
                ":",
                Note.sound_name,
                Note.note_pitch
                - 60
                - MM_INSTRUMENT_DEVIATION_TABLE.get(Note.sound_name, 6),
                Note.percussive,
                Note.duration,
            )
            for pack in resource_pack_list:
                if not Note.percussive:
                    overlay(
                        self.res_data[pack][Note.sound_name][
                            Note.note_pitch
                            - 60
                            - MM_INSTRUMENT_DEVIATION_TABLE.get(Note.sound_name, 6)
                        ]
                        * Note.velocity
                        / 127,
                        Note.start_tick,
                    )
                else:
                    overlay(
                        self.res_data[pack][Note.sound_name][Note.note_pitch - 60]
                        * Note.velocity
                        / 127,
                        Note.start_tick,
                    )
                break

        if self.gvm == 0:
            # 归一化，抚摸耳朵 (bushi
            max_val = np.max(np.abs(wav_model))
            if not max_val == 0:
                wav_model = wav_model / max_val
        elif self.gvm == 1:
            wav_model[wav_model > 1] = 1
            wav_model[wav_model < -1] = -1
        if self.oc == 2:
            return wav_model.T
        else:
            return wav_model[:, np.newaxis]

    def stream(self, MusicSeq: Union[MusicSequence, MidiConvert]):
        pass

    # debug
    def to_wav_file(
        self, MsSeq: Union[MusicSequence, MidiConvert], out_file_name="out"
    ):
        path = os.path.join(
            self.out_path,
            out_file_name + ".wav",
        )
        sf.write(path, self.convert(MsSeq), samplerate=self.out_sr, format="wav")
        return path
