"""
    jupylet/audio/sound.py
    
    Copyright (c) 2020, Nir Aides - nir@winpdb.org

    Redistribution and use in source and binary forms, with or without
    modification, are permitted provided that the following conditions are met:

    1. Redistributions of source code must retain the above copyright notice, this
       list of conditions and the following disclaimer.
    2. Redistributions in binary form must reproduce the above copyright notice,
       this list of conditions and the following disclaimer in the documentation
       and/or other materials provided with the distribution.

    THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
    ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
    WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
    DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
    ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
    (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
    LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
    ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
    (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
    SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""


import functools
import inspect
import logging
import weakref
import random
import copy
import math
import time
import sys
import os

import scipy.signal

import numpy as np

from ..utils import settable, Dict, trimmed_traceback

from ..audio import FPS, MIDDLE_C, DEFAULT_AMP, t2frames, frames2t   
from ..audio import get_time, get_bpm, get_note_value

from .note import note2key, key2note
from .device import add_sound, get_schedule
from .device import set_device_latency, get_device_latency_ms


logger = logging.getLogger(__name__)


DEBUG = False

EPSILON = 1e-6


def get_plot(*args, grid=True, figsize=(10, 5), xlim=None, ylim=None, **kwargs):
    
    import matplotlib.pyplot as plt
    import PIL.Image
    import io
    
    b = io.BytesIO()

    plt.figure(figsize=figsize)
    plt.grid(grid)
    
    if xlim:
        plt.xlim(*xlim)

    if ylim:
        plt.ylim(*ylim)

    plt.plot(*args, **kwargs)    
    plt.savefig(b, format='PNG', bbox_inches='tight')
    plt.close()
    
    return PIL.Image.open(b)


def compute_running_mean(x, n=1024):
    
    nb = n // 2
    na = n - nb
    
    px = np.pad(x, (na, nb))
    cs = np.cumsum(px) 
    
    po = np.pad(np.ones(len(x)), (na, nb))
    ns = np.cumsum(po) 

    return (cs[n:] - cs[:-n]) / (ns[n:] - ns[:-n])


def get_power_spectrum_plot(a0, sampling_frequency=FPS, window=None, **kwargs):
    
    ft = np.fft.fft(a0.squeeze())
    sa = np.square(np.abs(ft))
    ps = 10 * np.log10(sa)
    
    ff = np.fft.fftfreq(len(a0), 1/sampling_frequency)

    #print(a0.shape, ps.shape, ff.shape)
    
    if window == 1:
        return get_plot(ff, ps, **kwargs)
    
    if window is None:
        window = len(a0) // 4096
    
    rm = compute_running_mean(ps, window)
    
    return get_plot(ff, rm, **kwargs)


#
# Played sounds are schedulled a little into the future so as to start at a 
# particular planned moment in time rather than at the arbitrary time of the 
# start of the next sound buffer.
# 播放的声音被安排在未来的某个时间点，以便在特定的计划时刻开始，
# 而不是在下一个声音缓冲区开始的任意时间。
#
_latency = get_device_latency_ms() / 1000


def set_latency(latency='high'):

    assert latency in ['high', 'low', 'lowest', 'minimal']

    global _latency

    set_device_latency(latency)
    _latency = get_device_latency_ms(latency) / 1000


def get_latency_ms():
    return _latency * 1000

    
def _expand_channels(a0, channels):

    if len(a0.shape) == 1:
        a0 = np.expand_dims(a0, -1)

    if a0.shape[1] < channels:
        a0 = a0.repeat(channels, 1)

    if a0.shape[1] > channels:
        a0 = a0[:,:channels]

    return a0


#
# A helper function to amplify and pan (balance) audio between the left and
# right channels.
# 辅助功能，用于放大和平移（平衡）左右声道之间的音频。
#

#@functools.lru_cache(maxsize=1024)
def _ampan(amp, pan):
    return np.array([1 - pan, 1 + pan]) * (amp / 2)


_LOG_C4 = math.log(MIDDLE_C)
_LOG_CC = math.log(2) / 12
_LOG_CX = _LOG_C4 - 60 * _LOG_CC


def key2freq(key):
    
    if isinstance(key, np.ndarray):
        return np.exp(key * _LOG_CC + _LOG_CX)
    else:
        return math.exp(key * _LOG_CC + _LOG_CX)
    
 
def freq2key(freq):
    
    if isinstance(freq, np.ndarray):
        return (np.log(freq) - _LOG_CX) / _LOG_CC
    else:
        return (math.log(freq) - _LOG_CX) / _LOG_CC
        

class Sound(object):
    """所有其他声音类别的基类，包括音频样本、振荡器和效果器。

    Jupylet Sound类是将声音处理计算图定义为声音类层次结构的基本元素；
    例如一个包含混响效果和全通滤波器的合成器。

    音频构建块和组件，如振荡器、效果器等,
    通常继承自Sound类，而合成器等乐器通常应继承自 :class:`GatedSound` 类。

    Args:
        freq (float): 默认频率。
        amp (float): 输出振幅-介于0和1之间的值。
        pan (float): 左（-1）和右（1）输出通道之间的平衡值。
        shared (bool): 将声音对象指定为由多个其他声音实例共享。
    """
    def __init__(self, freq=MIDDLE_C, amp=DEFAULT_AMP, pan=0., shared=False):
        
        self.freq = freq
        
        # MIDI attribute corresponding to velocity of pressed key,
        # between 0 and 128.
        # MIDI属性对应于按键的速度，介于0和128之间。
        self.velocity = 64

        # Amplitude (or volume) beween 0 and 1.
        # 振幅（或音量）介于0和1之间。
        self.amp = amp
        
        # Left-right audio balance - a value between -1 and 1.
        # 左右音频平衡-介于-1和1之间的值。
        self.pan = pan
        
        # The number of frames the forward() method is expected to return.
        # forward()方法预期返回的帧数。
        self.frames = 1024

        # The frame counter.
        # 帧数。
        self.index = 0
        
        self._buffer = None

        # Indicate if sound is shared by multiple sounds. For example
        # an effect may be shared by multiple sounds. This affects how it 
        # should react to reset() calls.
        # 指示声音是否由多个声音共享。例如，一个效果可能由多个声音共享。
        # 这会影响它对reset()调用的反应。
        self._shared = shared
        
        # A somewhat brittle mechanism to force a note to keep "playing"
        # for a few seconds after it's done, so a shared effect may still
        # be applied to it (for example in the case of a long reverb).
        # 这是一种有点脆弱的机制，可以迫使音符在完成后保持播放("playing")几秒钟，
        # 因此仍然可以对其应用共享效果（例如在长混响的情况下）。
        self._done = 0
        self._done_decay = 5 * FPS

        # The lastest output arrays of the forward() function.
        # forward()函数的最新输出数组。
        self._a0 = None
        self._ac = None
        self._al = []

        self._polys = []
        self._effects = ()

        self._fargs = None  
        self._error = None

    def _rset(self, key, value, force=False):
        """递归地，但惰性地，将属性设置为所有子声音的给定值。
        
        例如，该函数用于在调用forward()方法之前设置整个声音对象树上所需的帧数。
        """
        if force or self.__dict__.get(key, '__NONE__') != value:
            for s in self.__dict__.values():
                if isinstance(s, Sound):
                    s._rset(key, value, force=True)
            
        self.__dict__[key] = value
    
    def _ccall(self, name, *args, **kwargs):
        """递归调用声音树中每个声音对象的给定函数。
        """
        for s in self.__dict__.values():
            if isinstance(s, Sound):
                getattr(s, name)(*args, **kwargs)
                
    def play_release(self, stop=True, **kwargs):
        """停止播放声音和所有复调音乐。"""

        polys = []

        while self._polys:
            wr = self._polys.pop(-1)
            ps = wr()
            if ps is not None:
                ps.play_release(stop=stop, **kwargs)
                polys.append(wr)

        for wr in polys[:512]:
            self._polys.append(wr)

        if stop:
            self._done = self.index or 1
        
    def play_poly(self, note=None, **kwargs):
        """以复调演奏给定音符。

        此功能将在新的self副本上播放音符。
        如果声音已经在播放，新的音符将加入其中。
        
        Args:
            note (float): 音符以半音为单位演奏，其中60对应中央C。
            **kwargs: 要修改的乐器属性。
        
        Returns:
            Sound: 代表新演奏音符的声音对象。
        """
        o = self.copy(track=True)
        o.play(note, **kwargs)

        return o

    def play(self, note=None, **kwargs):
        """按单声道播放给定的音符。

        如果声音已经播放，它将被重置。
        
        Args:
            note (float): 音符以半音为单位演奏，其中60对应中央C。
            **kwargs: 要修改的乐器属性。
        """
        #logger.info('Enter Sample.play(note=%r, **kwargs=%r).', note, kwargs)
        
        self.reset(self._shared)
        
        if note is not None:
            self.note = note

        # This mechanism allows the play() function to modify any of the 
        # sound properties before playing.
        # 此机制允许play()函数在播放之前修改任何声音属性。
        self.set(**kwargs)

        # Send sound to audio device for playing. 
        # 将声音发送到音频设备进行播放。
        add_sound(self)

    def set(self, **kwargs):

        for k, v in kwargs.items():
            if settable(self, k):
                setattr(self, k, v)  
    
        return self

    def copy(self, track=False):
        """创建声音对象的副本。

        这个函数是浅拷贝和深拷贝的混合。它深层复制子声音对象的整个树，
        而浅层复制树中每个声音对象的其他属性。其动机是避免创建不必要
        的numpy缓冲区副本。
        
        然而，这意味着它之后应该对新复制的声音进行reset()调用，以防止
        无意中共享了缓冲区。

        Returns:
            Sound object: 新复制的声音对象。
        """
        o = copy.copy(self)

        for k, v in o.__dict__.items():
            if isinstance(v, Sound) and not v._shared:
                setattr(o, k, v.copy())

        if track:
            self._polys.append(weakref.ref(o))
          
        o._polys = []

        return o
       
    def reset(self, shared=False):
        
        # TODO: think how to handle reset of shared index.
        # TODO:思考如何处理共享索引的重置。
        self.index = 0

        # When a sound (effect) is shared by multiple other sounds, its state
        # should not be reset in the usual way. However this is probably not 
        # correctly implemented. For example, the self.index should probably 
        # not reset either - need to think about this more.
        # 当一个声音（效果）被多个其他声音共享时，其状态不应以通常的方式重置。
        # 然而，这可能没有得到正确的实施。
        # 例如，self.index可能也不应该重置——需要更多地考虑这一点。
        if not shared:
            self._buffer = None 

        self._done = 0
        self._a0 = None
        self._ac = None
        self._al = []

        self._error = None

        self._ccall('reset', shared=shared or self._shared)
        
    @property
    def done(self):
        
        # The done() function is used by the sound device to determine when
        # a playing sound may be considered done and discarded.
        # There are various criteria and the logic is probably brittle and 
        # needs to be considered again and simplified.
        # 声音设备使用done() 函数来确定何时可以认为播放的声音已完成并被丢弃。
        # 有各种标准，逻辑可能很脆弱，需要重新考虑和简化。
        #
        # The general idea is to consider a sound done if after it has played
        # for a while, it becomes nearly zero for an entire output buffer
        # length. 
        # 一般的想法是考虑一个声音，如果在播放了一段时间之后，它就变成了几乎整
        # 个输出缓冲区长度为零。
        #
        # However, in the case effects are applied to the sound, it may be
        # needed around for a while longer even if its output has become 
        # zero. For example in the case of a reverb effect.
        # 然而，如果对声音应用效果，即使其输出为零，也可能需要更长的时间。
        # 例如，在混响效果的情况下。

        if self._error:
            return True

        if self.index < FPS / 8:
            return False
        
        if self._a0 is None or self._ac is None:
            return False
        
        if not self._done:
            if np.abs(self._a0).max() < 1e-4:
                self._done = self.index or 1
                self._a0 = self._a0 * 0
                self._ac = self._ac * 0

            return False

        if not self.get_effects():
            return True
            
        if self.index - self._done < self._done_decay:
            return False

        return True
        
    #
    # This is the function called by the sound device to compute the next
    # *frames* to be sent to the sound device for playing.
    # 这是声音设备调用的函数，用于计算要发送到声音设备播放的下一个帧(*frames*)。
    #

    def consume(self, frames, channels=2, raw=False, *args, **kwargs):
        
        self._rset('frames', frames)
        
        a0 = self(*args, **kwargs)

        if raw:
            return a0

        # The following mechanism is a brittle way to minimize the
        # computation time in case the sound is done but is kept around for 
        # an effect applied to it.
        # 下面的机制是一种简单的方法，可以在声音已经发出，但由于对其施加了影响
        # 而保持不变的情况下，最大限度地减少计算时间。

        if not self._done or self._ac is None or len(self._ac) != self.frames:

            a0 = _expand_channels(a0, channels)
            
            if channels == 2:
                self._ac = a0 * _ampan(self.velocity / 128 * self.amp, self.pan)
            else:
                self._ac = a0 * (self.velocity / 128 * self.amp)

        return self._ac

    def __call__(self, *args, **kwargs):
        
        assert getattr(self, 'frames', None) is not None, 'You must call super() from the sound class constructor'
        
        for k in list(kwargs.keys()):
            if hasattr(self, k) and k not in self._get_forward_args():
                if k == 'frames':
                    self._rset('frames', kwargs.pop('frames'))
                else:        
                    setattr(self, k, kwargs.pop(k))

        if not self._done or self._a0 is None or len(self._a0) != self.frames:
            try:
                self._a0 = self.forward(*args, **kwargs)
            except:
                self._error = trimmed_traceback()
                logger.error(self._error)
                self._a0 = np.zeros((self.frames, 1))

        if isinstance(self._a0, np.ndarray):
            self.index += len(self._a0)
        
        if DEBUG:
            self._al = self._al[-255:] + [self._a0]

        return self._a0

    def _get_forward_args(self):
        if self._fargs is None:
            self._fargs = set(inspect.getfullargspec(self.forward).args)
        return self._fargs

    # This is for debugging.
    # 这是为了调试。
    @property
    def _a1(self):
        return np.concatenate(self._al)

    #
    # The pytorch style forward function to compute the next sound buffer.
    # pytorch风格的forward函数用于计算下一个声音缓冲区。
    #
    def forward(self, *args, **kwargs):
        return np.zeros((self.frames,))
    
    @property
    def key(self):
        """float: 以半音单位获取当前的声音频率，其中60对应中央C。"""
        return freq2key(self.freq)
    
    @key.setter
    def key(self, value):
        self.freq = key2freq(value)
        
    @property
    def note(self):
        """str: 以字符串的形式获取最接近当前声音频率的音符。"""
        return key2note(self.key)
    
    @note.setter
    def note(self, value):
        self.key = note2key(value) if type(value) is str else value

    def get_effects(self):
        """获取此声音对象的效果列表。
        
        Returns:
            list: 音效列表（可能为空）。
        """
        return self._effects

    def set_effects(self, *effects):
        """设置要应用于此声音实例输出的效果。

        Args:
            *effects: 声音效果器实例。
        """
        self._effects = effects


class LatencyGate(Sound):
    """合成器的开/关(on/off)门控。
    
    合成器门输出开/关信号，用于触发信号处理，如包络发生器等

    这个特殊的延迟门设计用于利用系统时间安排 `on` 和 `off` 转换，以便在操
    作系统延迟波动的情况下，以精确的时间触发音符。
    """
    def __init__(self):
        
        super().__init__()
        
        self.states = []
        self.opened = False
        self.value = 0

    def reset(self, shared=False):
        
        super().reset(shared)
        
        self.states = []
        self.opened = False
        self.value = 0

    def forward(self):
        
        #
        # open/close events are scheduled in terms of absolute time. Here these 
        # timestamps are converted into a frame index.
        # 打开/关闭事件按绝对时间安排。在这里，这些时间戳被转换为帧索引。
        #

        #states = []

        a0 = np.zeros((self.frames, 1))
        v0 = self.value
        i0 = 0

        t0 = time.time()
        schedule = get_schedule()
        
        while self.states:
            
            t, event = self.states[0]
            
            if schedule:
                dt = max(0, t + _latency - schedule)
            else:
                dt = max(0, t - t0)
                
            df = t2frames(dt)
            i1 = min(df, self.frames)

            if df > i1:
                break

            if self.value == 0 and event == 'open':
                self.value = 1
                self.opened = True
                i0 = i1
                #if df <= i1:
                #    states.append((self.index + i1, 'open'))          

            elif self.value == 1 and event == 'close':
                self.value = 0
                a0[i0:i1] += 1
                #if df <= i1:
                #    states.append((self.index + i1, 'close'))          

            self.states.pop(0)

        if self.value == 1 and i0 < self.frames:
            a0[i0:self.frames] += 1

        #states.append((self.index + self.frames, 'continue'))          

        return a0

        
    def open(self, t=None, dt=None, **kwargs):
        """安排闸门在指定时间打开。

        时间表可以是参数 `t` 给出的绝对时间，也可以是已安排的最新事件时间表之后的增量 `dt` 。

        Args:
            t (float, optional): 以秒为单位的时间, 由Python的标准库 ``time.time()`` 返回。
            dt (float, optional): 当前上次计划事件后的时间（秒）增量。
        """
        self.schedule('open', t, dt)
        
    def close(self, t=None, dt=None, **kwargs):
        """安排闸门在指定时间关闭。

        时间表可以是参数 `t` 给出的绝对时间，也可以是已安排的最新事件时间表之后的增量 `dt` 。

        Args:
            t (float, optional): 以秒为单位的时间, 由Python的标准库 ``time.time()`` 返回。
            dt (float, optional): 当前上次计划事件后的时间（秒）增量。
        """
        self.schedule('close', t, dt)
        
    def schedule(self, event, t=None, dt=None):
        logger.debug('Enter LatencyGate.schedule(event=%r, t=%r, dt=%r).', event, t, dt)

        tt = get_time()

        if not self.states:
            last_t = tt
        else:
            last_t = self.states[-1][0]

        if dt is not None:
            t = dt + last_t
        else:
            t = t or tt

        t = max(t, tt)

        # Discard events scheduled to run after this new event.
        # 放弃原计划在此新事件之后运行的事件。
        while self.states and self.states[-1][0] > t:
            self.states.pop(-1)

        self.states.append((t, event))


def gate2events(gate, v0=0, index=0):
    
    states = []

    end = index + len(gate)
    gate = gate > 0
    
    while len(gate):

        if v0 == 0:

            am = int(gate.argmax())
            gv = int(bool(gate[am]))

            if gv == v0:
                break
            
            v0 = gv
            index += am            
            states.append((index, 'open'))
            gate = gate[am:]
            
        else:
            
            am = int(gate.argmin())
            gv = int(bool(gate[am]))

            if gv == v0:
                break
            
            v0 = gv
            index += am            
            states.append((index, 'close'))
            gate = gate[am:]
            
    states.append((end, 'continue'))
    
    return states, v0, end

    
class GatedSound(Sound):
    """能精确计算音符时间和持续时间的声音类。

     Args:
        freq (float): 基频。
        amp (float): 输出振幅-介于0和1之间的值。
        pan (float): 左（-1）和右（1）输出通道之间的平衡值。
        duration (float, optional): 播放音符的持续时间，以整音符为单位。   
    """
    def __init__(self, freq=MIDDLE_C, amp=DEFAULT_AMP, pan=0., duration=None):
        
        super().__init__(freq=freq, amp=amp, pan=pan)

        self.gate = LatencyGate()

        self.duration = duration
        
    @property
    def done(self):

        if self._error:
            return True

        return Sound.done.fget(self) if self.gate.opened else False

    def play_poly(self, note=None, duration=None, **kwargs):
        """以复调演奏给定音符。

        此功能将在新的self副本上播放音符。
        如果声音已经在播放，新的音符将加入其中。
        
        Args:
            note (float): 音符以半音为单位演奏，其中60对应中央C。
            duration (float, optional): 播放音符的持续时间，以整音符为单位。 
            **kwargs: 要修改的乐器的属性。
        
        Returns:
            GatedSound: 代表新演奏音符的声音对象。
        """
        o = self.copy(track=True)
        o.play(note, duration, **kwargs)

        return o

    def play(self, note=None, duration=None, **kwargs):
        """按单声道播放指定的音符。

        如果声音已经播放，它将被重置。
        
        Args:
            note (float): 音符以半音为单位演奏，其中60对应中央C。
            duration (float, optional): 播放音符的持续时间，以整音符为单位。    
            **kwargs: 要修改的乐器的属性。
        """
        if duration is None:
            duration = self.duration

        t = kwargs.pop('t', None)
        dt = kwargs.pop('dt', None)

        super().play(note, **kwargs)
        self.gate.open(t, dt)

        if duration is not None:
            self.gate.close(dt=duration * get_note_value() * 60 / get_bpm())
        
    def play_release(self, stop=False, **kwargs):

        super().play_release(stop=stop, **kwargs)

        kwargs = dict(kwargs)

        t = kwargs.pop('t', None)
        dt = kwargs.pop('dt', None)

        self.set(**kwargs)
        self.gate.close(t, dt)


#
# An envelope curve may span multiple buffers and it is therefore generated
# piece by piece. The code to do that is very delicate. Be extra careful to 
# modify it. Computations appeared to require float64 precision (!) since in 
# float32 they occasionally emit buffers of the wrong length.
# 包络曲线可能跨越多个缓冲区，因此逐段生成。这样做的代码非常微妙。
# 要特别小心修改它。计算似乎需要float64精度（！）因为在float32中，
# 它们偶尔会触发超出缓冲区长度的错误。
#

def get_exponential_adsr_curve(dt, start=0, end=None, th=0.01):
    """计算一段指数包络曲线。
    
    Args:
        dt (float): 曲线从0.0变为1.0所需的时间。
            减去给定的阈值 (th).
        start (int): 曲线的起始帧。
        end (int): 曲线的结束帧。

    Returns:
        ndarray: 带有曲线值的数组。    
    """
    df = max(math.ceil(dt * FPS), 1)
    end = min(df, end if end is not None else 60 * FPS)
    start = start + 1
        
    a0 = np.arange(start/df, end/df + EPSILON, 1/df, dtype='float64')
    a1 = np.exp(a0 * math.log(th))
    a2 = (1. - a1) / (1. - th)
    
    return a2


def get_linear_adsr_curve(dt, start=0, end=None):
    """计算一段线性包络曲线。
    
    Args:
        dt (float): 曲线从0.0变为1.0所需的时间。
        start (int):  曲线的起始帧。
        end (int): 曲线的结束帧。

    Returns:
        ndarray: 带有曲线值的数组。    
    """
    df = max(math.ceil(dt * FPS), 1)
    end = min(df, end if end is not None else 60 * FPS)
    start = start + 1
    
    a0 = np.arange(start/df, end/df + EPSILON, 1/df, dtype='float64')
    
    return a0


#
# Envelopes are currently the only consumers of gate open/close signals.
# 目前，包络绘制(Envelopes)是门开/关信号(门控信号——gate open/close signals)的唯一消费者。
#

class Envelope(Sound):
    
    def __init__(
        self, 
        attack=0.,
        decay=0., 
        sustain=1., 
        release=0.,
        linear=True,
    ):
        
        super().__init__()
        
        self.attack = attack
        self.decay = decay
        self.sustain = sustain
        self.release = release

        # Linear or exponential envelope curve.
        # 线性或指数包络曲线。
        self.linear = linear

        # The current state of the envelope, one of attack, decay, ...
        # 包络线的当前状态，生成、衰减...
        self._state = None

        # The first frame index of the current envelope state.
        # 当前包络状态的第一帧索引。
        self._start = 0

        #
        # Pure envelope curves go from 0 to 1, but in practice a curve may go
        # from arbitrary level A to level B. e.g. release may start at sustain
        # level and go down to 0. The following two properties are use to 
        # implement this.
        # 纯包络曲线从0到1，但实际上曲线可能从任意级别A到级别B。
        # 例如，释放可能从维持级别开始，然后下降到0。以下两个属性用于实现这一点。
        #
        self._valu0 = 0
        self._valu1 = 0
        
        # Last gate value.
        # 最后的门控值。
        self._lgate = 0

    def reset(self, shared=False):
        
        super().reset(shared)
        
        self._state = None
        self._start = 0
        self._valu0 = 0
        self._valu1 = 0
        self._lgate = 0        
        
    def forward(self, gate):
        
        if isinstance(gate, np.ndarray):
            states, self._lgate, end = gate2events(gate, self._lgate, self.index)
        else:
            states = gate

        #print(states)

        index = self.index
        
        # TODO: This code assumes the envelope frame index and the gate frame
        # index are synchronized (the same). In practice this is correct, but
        # it should not be assumed. Instead the gate itself should include 
        # its buffer start and end index. 
        # TODO：该代码假定包络线帧索引和门控帧索引是同步的（相同）。
        # 在实践中，这是正确的，但不应假设。
        # 相反，门控本身应该包括其缓冲区开始和结束索引。

        curves = []
        
        for event_index, event in states:
            #print(event_index, event)
            
            while index < event_index:
                curves.append(self.get_curve(index, event_index))
                index += len(curves[-1])
                    
            if event == 'open' and self._state != 'attack':
                self._state = 'attack'
                self._start = index
                self._valu0 = self._valu1
            
            if event == 'close' and self._state not in ('release', None):
                self._state = 'release'
                self._start = index
                self._valu0 = self._valu1
            
        return np.concatenate(curves)[:,None]
    
    def get_curve(self, start, end):

        end = max(start, end)

        if self._state in (None, 'sustain'):
            return np.ones((end - start,), dtype='float64') * self._valu0
        
        start = start - self._start
        end = end - self._start
        dt = getattr(self, self._state)
                    
        if self.linear:
            curve = get_linear_adsr_curve(dt, start, end)
        else:
            curve = get_exponential_adsr_curve(dt, start, end)
    
        if len(curve) == 0:
            return curve

        done = curve[-1] >= 1 - EPSILON
        
        if self._state == 'attack':
            target = 1.
            next_state = 'decay'
            
        elif self._state == 'decay':
            target = self.sustain * self._valu0
            next_state = 'sustain' if self.sustain else None
            
        elif self._state == 'release':
            target = 0.
            next_state = None
        
        else:
            target = 0.
            next_state = None

        curve = (target - self._valu0) * curve  + self._valu0
        
        if done:
            self._state = next_state
            self._start += start + len(curve)
            self._valu0 = curve[-1]
            
        self._valu1 = curve[-1]
        
        return curve


#
# Do not change this "constant"!
# 不要改变这个常数("constant")!
#
_NP_ZERO = np.zeros((1,), dtype='float64')


def get_radians(freq, start=0, frames=8192):
    
    pt = 2 * math.pi / FPS * freq
    
    if isinstance(pt, np.ndarray):
        pt = pt.reshape(-1)
    else:
        pt = pt * np.ones((frames,), dtype='float64')
            
    p0 = start + _NP_ZERO
    p1 = np.concatenate((p0, pt))
    p2 = np.cumsum(p1)
    
    radians = p2[:-1]
    next_start = p2[-1] 
    
    return radians, next_start


def get_sine_wave(freq, phase=0, frames=8192, **kwargs):
    
    radians, phase_o = get_radians(freq, phase, frames)
    
    a0 = np.sin(radians)
    
    return a0, phase_o


def get_triangle_wave(freq, phase=0, frames=8192, **kwargs):
    
    radians, phase_o = get_radians(freq, phase, frames)

    a0 = radians % (2 * math.pi)
    a1 = a0 / math.pi - 1
    a2 = a1 * np.sign(-a1)
    a3 = a2 * 2 + 1

    return a3, phase_o


@functools.lru_cache(maxsize=256)
def get_sawtooth_cycle(nharmonics, size=1024):
    
    k = nharmonics
    radians = 2 * math.pi * np.arange(0, 1, 1 / size)
    harmonic = - 2 / math.pi * ((-1) ** k) / k * np.sin(k * radians)

    if k == 1:
        return harmonic

    return harmonic + get_sawtooth_cycle(nharmonics - 1, size)
    
# Warmup
# 热身
len(get_sawtooth_cycle(128))


def get_sawtooth_wave(freq, phase=0, frames=8192, sign=1., **kwargs):
    
    radians, phase_o = get_radians(freq, phase, frames)

    size = 1024

    # Use mean frequency for the purpose of determining number of 
    # harmonics to use - this may introduce some aliasing.
    # 使用平均频率来确定要使用的谐波数量——这可能会引入一些混叠。
    if type(freq) not in (int, float):
        freq = float(np.mean(freq))

    nharmonics = max(1, min(128, FPS / 2 // freq))
    nharmonics = kwargs.get('nharmonics', nharmonics)

    sawtooth = get_sawtooth_cycle(nharmonics, size)

    indices = (size / 2 / math.pi * radians).astype('int32') % size
    samples = sawtooth[indices]
    
    if sign != 1.:
        samples = samples * sign

    return samples, phase_o


_nduties = 64
_nharmonics = 128

_km = np.arange(0, _nharmonics+1)[:, None, None] 
_dm = np.linspace(0, 1, _nduties+1)[None, :, None]
_kdm = _km * _dm


@functools.lru_cache(maxsize=256)
def get_square_cycle(nharmonics, size=1024):
    
    k = nharmonics
    radians = 2 * math.pi * np.arange(0, 1, 1 / size)
    harmonic = 4 / math.pi / k * np.sin(math.pi * _kdm[k]) * np.cos(k * radians)[None, :]
    
    if k == 1:
        return harmonic + 2 * _kdm[1] - 1

    return harmonic + get_square_cycle(nharmonics - 1, size)

# Warmup
# 热身
len(get_square_cycle(128))


def get_square_wave(freq, phase=0, frames=8192, duty=0.5, **kwargs):
    
    if isinstance(duty, np.ndarray):
        duty = duty.reshape(-1).clip(0.01, 0.99)
        
    radians, phase_o = get_radians(freq, phase, frames)

    # Use mean frequency for the purpose of determining number of 
    # harmonics to use - this may introduce some aliasing.
    # 使用平均频率来确定要使用的谐波数量——这可能会引入一些混叠。
    if type(freq) not in (int, float):
        freq = float(np.mean(freq))

    nharmonics = max(1, min(128, FPS / 2 // freq))
    nharmonics = kwargs.get('nharmonics', nharmonics)

    size = 1024

    square0 = get_square_cycle(nharmonics, size)

    indices = (size / 2 / math.pi * radians).astype('int32') % size

    #
    # When duty is a modulating array, the following simple scheme
    # may result in aliasing. It would be preferable to find
    # a scheme that can efficiently sync changes in duty with the
    # begining of wave cycles.
    # 当占空比为调制阵列时，以下简单方案可能会导致混叠。
    # 最好找到一种方案，能够有效地将占空比的变化与波周期的开始同步。
    #
    if type(duty) in (int, float):
        duty = int(duty * _nduties)
    else:
        duty = (duty * _nduties).astype('int32')

    samples = square0[duty, indices]

    return samples, phase_o


class Oscillator(Sound):

    """用于 正弦波(`sine`), 三角波(`triangle`), 抗锯齿锯齿波(anti-aliased `sawtooth`) 和
    抗锯齿锯可变占空比方波(variable duty anti-aliased `square` waveform) 的波形发生器。

    Args:
        shape (str): 波形类型是 `sine`, `triangle`, `sawtooth`, 或 `square` 中的一种。
        freq (float): 发生器的基频。
        key (float, optional): 以半音为单位的发生器基频，其中中央C对应值为60。
        sign (float): 设置为-1可将锯齿波倒置。
        duty (float): 方波周期的分数，其值为1。

    Note:
        发生器继承声音类的所有方法和属性。
    """
    
    def __init__(self, shape='sine', freq=MIDDLE_C, key=None, phase=0., sign=1, duty=0.5, **kwargs):
        """"""

        super().__init__(freq=freq)
        
        self.shape = shape
        self.phase = phase
        
        if key is not None:
            self.key = key
        
        self.sign = sign
        self.duty = duty
        self.kwargs = kwargs
        
    def forward(self, key_modulation=None, sign=None, duty=None, **kwargs):
        
        if key_modulation is not None:
            freq = key2freq(self.key + key_modulation)
        else:
            freq = self.freq
            
        if sign is None:
            sign = self.sign
            
        if duty is None:
            duty = self.duty
            
        if self.kwargs:
            kwargs = dict(kwargs)
            kwargs.update(self.kwargs)
        
        get_wave = dict(
            sine = get_sine_wave,
            triangle = get_triangle_wave,
            sawtooth = get_sawtooth_wave,
            square = get_square_wave,
            pulse = get_square_wave,
            saw = get_sawtooth_wave,
            tri = get_triangle_wave,
        ).get(self.shape, self.shape)
        
        a0, self.phase = get_wave(
            freq, 
            self.phase, 
            self.frames, 
            sign=self.sign,
            duty=duty, 
            **kwargs
        )
        
        return a0[:,None]


noise_color = Dict(
    brownian = -6,
    brown = -6,
    red = -6,
    pink = -3,
    white = 0,
    blue = 3,
    violet = 6,
    purple = 6,
)


class Noise(Sound):
    
    def __init__(self, color=noise_color.white):
        
        super().__init__()
        
        if type(color) is str:
            assert color in noise_color, 'Noise color name should be one of %s.' % ', '.join(noise_color.keys())

        self.color = color
        self.state = None
        self.noise = None

        self._color = color

    def forward(self, color_modulation=0):
        
        if type(self.color) is str:
            color = noise_color[self.color]
        else:
            color = self.color

        if isinstance(color_modulation, np.ndarray):
            color = color + np.mean(color_modulation[-1]).item()
        else:
            color = color + color_modulation
            
        if self._color != color:
            self._color = color
            self.noise = None

        if self.noise is None or len(self.noise) < self.frames:

            a0, self.state = get_noise(
                self._color, 
                max(2048, self.frames), 
                self.state, 
            )

            if self.noise is None:
                self.noise = a0
            else:
                self.noise = np.concatenate((self.noise, a0))

        a0, self.noise = self.noise[:self.frames], self.noise[self.frames:]

        return a0[:,None]


def get_noise(color, frames=4096, state=None, kernel_size=2048, fs=FPS):
    
    assert kernel_size % 2 == 0
    
    if state is None or len(state) != kernel_size:
        state = np.random.randn(kernel_size) / math.pi
        
    wn = np.random.randn(frames) / math.pi
    wn = np.concatenate((state, wn))

    if color == noise_color.red:
        
        pad = kernel_size // 2
        
        c0 = np.cumsum(wn)
        c1 = np.cumsum(c0)

        c2 = (c1[pad:] - c1[:-pad]) / pad
        c3 = (c0[2*pad:] - c2[:-pad]) / 30
    
        return c3, wn[-kernel_size:]
    
    if color == noise_color.white:
        return wn[-frames:], wn[-kernel_size:]
    
    if color == noise_color.violet:
        return np.diff(wn[-frames-1:]), wn[-kernel_size:]
    
    kernel = get_noise_kernel(color, kernel_size, fs)
    
    cn = scipy.signal.convolve(
        wn[1:].astype('float32'), 
        kernel.astype('float32'), 
        'valid'
    ).astype('float64') / 17
    
    return cn[:frames], wn[-kernel_size:]


@functools.lru_cache(maxsize=128)
def get_noise_kernel(color, kernel_size=8192, fs=FPS):
    
    cc = 6.020599915832349
    
    f0 = get_fftfreq(kernel_size, 1/fs, 1)
    f1 = f0 ** (color / cc)
    f2 = fftnoise(f1)
    f3 = (kernel_size / 8 / (f2 ** 2).sum()) ** 0.5 * f2 
    
    return f3


@functools.lru_cache(maxsize=16)
def get_fftfreq(n, d=1., clip=0):
    return np.abs(np.fft.fftfreq(n, d)).clip(clip, 1e6)


def fftnoise(freqs):
    
    f = np.array(freqs, dtype='complex')
    n = (len(f) - 1) // 2
    
    phases = 2 * math.pi * np.random.rand(n) 
    phases = np.cos(phases) + 1j * np.sin(phases)
    
    f[1:n+1] *= phases
    f[-1:-1-n:-1] = np.conj(f[1:n+1])
    
    return np.fft.ifft(f).real


class PhaseModulator(Sound):
    
    def __init__(self, beta=1., shared=False):
        
        super().__init__(shared=shared)
        
        self.beta = beta
                
    def forward(self, carrier, signal):
        
        signal = signal.mean(-1).clip(-1, 1)
        beta = int(self.beta) + 1
        
        if self._buffer is None:
            self._buffer = np.zeros((2 * beta, carrier.shape[1]), dtype=carrier.dtype)
            
        t1 = np.arange(beta, beta + len(carrier), dtype='float64') + self.beta * signal
        t2 = t1.astype('int64')
        t3 = (t1 - t2.astype('float64'))[:, None]
        
        a0 = np.concatenate((self._buffer, carrier))
        a1 = a0[t2]
        a2 = a0[t2 + 1]
        a3 = a2 * t3 + a1 * (1 - t3)
        
        self._buffer = a0[-2 * beta:]
        
        return a3

