/*
 * Copyright (C) 2007 The Android Open Source Project
 *
 * Licensed under the Apache License, Version 2.0 (the "License");
 * you may not use this file except in compliance with the License.
 * You may obtain a copy of the License at
 *
 *      http://www.apache.org/licenses/LICENSE-2.0
 *
 * Unless required by applicable law or agreed to in writing, software
 * distributed under the License is distributed on an "AS IS" BASIS,
 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 * See the License for the specific language governing permissions and
 * limitations under the License.
 */

#ifndef ANDROID_AUDIOTRACK_H
#define ANDROID_AUDIOTRACK_H

#include <cutils/sched_policy.h>
#include <media/AudioSystem.h>
#include <media/AudioTimestamp.h>
#include <media/IAudioTrack.h>
#include <media/AudioResamplerPublic.h>
#include <media/MediaMetricsItem.h>
#include <media/Modulo.h>
#include <utils/threads.h>

#include <string>

#include "android/media/BnAudioTrackCallback.h"
#include "android/media/IAudioTrackCallback.h"

namespace android {

// ----------------------------------------------------------------------------

struct audio_track_cblk_t;
class AudioTrackClientProxy;
class StaticAudioTrackClientProxy;

// ----------------------------------------------------------------------------

class AudioTrack : public AudioSystem::AudioDeviceCallback
{
public:

    /* Events used by AudioTrack callback function (callback_t).
     * Keep in sync with frameworks/base/media/java/android/media/AudioTrack.java NATIVE_EVENT_*.
     */

//请求向缓冲区写入更多数据。
//此事件仅发生在TRANSFER_CALLBACK中。
//如果传递了此事件，但回调处理程序
//不想写入更多数据，处理程序必须
//通过将frameCount设置为零来忽略该事件。
//例如，如果应用程序
//正在等待源数据或处于流的末尾。
//
//对于数据填充，首选回调
//不阻塞，而是返回一个短计数
//实际交付的数据量
//（如果当前没有可用数据，则为0）。
//发生缓冲区不足。这种情况不会发生在
//静态轨道。
//样本循环结束；播放从重新启动
//如果静态轨道的循环计数不为0，则循环开始。
//播放头位于指定的标记位置
//（请参见setMarkerPosition（））。
//播放头位于新位置
//（请参见setPositionUpdatePeriod（））。
//静态音轨的播放已完成。
//IAudioTrack被重新创建，原因可能是重新布线和
//媒体服务器的自愿失效或媒体服务器崩溃。
//在播放完AF和HW中排队的所有缓冲区后发送
//返回（在调用停止后），用于卸载的轨道。
    enum event_type {
        EVENT_MORE_DATA = 0,        // Request to write more data to buffer.
                                    // This event only occurs for TRANSFER_CALLBACK.
                                    // If this event is delivered but the callback handler
                                    // does not want to write more data, the handler must
                                    // ignore the event by setting frameCount to zero.
                                    // This might occur, for example, if the application is
                                    // waiting for source data or is at the end of stream.
                                    //
                                    // For data filling, it is preferred that the callback
                                    // does not block and instead returns a short count on
                                    // the amount of data actually delivered
                                    // (or 0, if no data is currently available).
        EVENT_UNDERRUN = 1,         // Buffer underrun occurred. This will not occur for
                                    // static tracks.
        EVENT_LOOP_END = 2,         // Sample loop end was reached; playback restarted from
                                    // loop start if loop count was not 0 for a static track.
        EVENT_MARKER = 3,           // Playback head is at the specified marker position
                                    // (See setMarkerPosition()).
        EVENT_NEW_POS = 4,          // Playback head is at a new position
                                    // (See setPositionUpdatePeriod()).
        EVENT_BUFFER_END = 5,       // Playback has completed for a static track.
        EVENT_NEW_IAUDIOTRACK = 6,  // IAudioTrack was re-created, either due to re-routing and
                                    // voluntary invalidation by mediaserver, or mediaserver crash.
        EVENT_STREAM_END = 7,       // Sent after all the buffers queued in AF and HW are played
                                    // back (after stop is called) for an offloaded track.
#if 0   // FIXME not yet implemented
        EVENT_NEW_TIMESTAMP = 8,    // Delivered periodically and when there's a significant change
                                    // in the mapping from frame position to presentation time.
                                    // See AudioTimestamp for the information included with event.
#endif
        EVENT_CAN_WRITE_MORE_DATA = 9,// Notification that more data can be given by write()
                                    // This event only occurs for TRANSFER_SYNC_NOTIF_CALLBACK.
    };

    /* Client should declare a Buffer and pass the address to obtainBuffer()
     * and releaseBuffer().  See also callback_t for EVENT_MORE_DATA.
     */

    class Buffer
    {
    public:
        // FIXME use m prefix
        size_t      frameCount;   // number of sample frames corresponding to size;//corresponding :相应的
                                  // on input to obtainBuffer() it is the number of frames desired,
                                  // on output from obtainBuffer() it is the number of available
                                  //    [empty slots for] frames to be filled
                                  // on input to releaseBuffer() it is currently ignored

        size_t      size;         // input/output in bytes == frameCount * frameSize
                                  // on input to obtainBuffer() it is ignored
                                  // on output from obtainBuffer() it is the number of available
                                  //    [empty slots for] bytes to be filled,
                                  //    which is frameCount * frameSize
                                  // on input to releaseBuffer() it is the number of bytes to
                                  //    release
                                  // FIXME This is redundant with respect to frameCount.  Consider
                                  //    removing size and making frameCount the primary field.

        union {
            void*       raw;
            int16_t*    i16;      // signed 16-bit
            int8_t*     i8;       // unsigned 8-bit, offset by 0x80
        };                        // input to obtainBuffer(): unused, output: pointer to buffer

        uint32_t    sequence;       // IAudioTrack instance sequence number, as of obtainBuffer().
                                    // It is set by obtainBuffer() and confirmed by releaseBuffer().
                                    // Not "user-serviceable".
                                    // TODO Consider sp<IMemory> instead, or in addition to this.
    };

    /* As a convenience, if a callback is supplied, a handler thread
     * is automatically created with the appropriate priority. This thread
     * invokes the callback when a new buffer becomes available or various conditions occur.
     * Parameters:
     *
     * event:   type of event notified (see enum AudioTrack::event_type).
     * user:    Pointer to context for use by the callback receiver.
     * info:    Pointer to optional parameter according to event type:
     *          - EVENT_MORE_DATA: pointer to AudioTrack::Buffer struct. The callback must not write
     *            more bytes than indicated by 'size' field and update 'size' if fewer bytes are
     *            written.
     *          - EVENT_UNDERRUN: unused.
     *          - EVENT_LOOP_END: pointer to an int indicating the number of loops remaining.
     *          - EVENT_MARKER: pointer to const uint32_t containing the marker position in frames.
     *          - EVENT_NEW_POS: pointer to const uint32_t containing the new position in frames.
     *          - EVENT_BUFFER_END: unused.
     *          - EVENT_NEW_IAUDIOTRACK: unused.
     *          - EVENT_STREAM_END: unused.
     *          - EVENT_NEW_TIMESTAMP: pointer to const AudioTimestamp.
     */


/*为了方便起见，如果提供了回调，则处理程序线程
*以适当的优先级自动创建。此线程
*当新的缓冲区可用或出现各种情况时调用回调。
*参数：
*
*event：通知的事件类型（请参见enum AudioTrack:：event_type）。
*user：指向回调接收器使用的上下文的指针。
*info：根据事件类型指向可选参数的指针：
*-EVENT_MORE_DATA：指向AudioTrack:：缓冲区结构的指针。回调不能写入
*比“size”字段指示的字节数多，如果字节数少则更新“size”
*书面的。
*-EVENT_ENDERRUN：未使用。
*-EVENT_LOOP_END：指向int的指针，指示剩余循环数。
*-EVENT_MARKER：指向const uint32_t的指针，包含帧中的标记位置。
*-EVENT_NEW_POS：指向const uint32_t的指针，该指针包含帧中的新位置。
*-EVENT_BUFFER_END：未使用。
*-EVENT_NEW_IAUDIOTRACK：未使用。
*-EVENT_STREAM_END:未使用。
*-EVENT_NEW_TIMESTAMP：指向常量AudioTimestamp的指针。
*/

   //recorderCallback(int event, void* user, void *info) 
   //定义一个 函数类型。
    typedef void (*callback_t)(int event, void* user, void *info);

    /* Returns the minimum frame count required for the successful creation of
     * an AudioTrack object.
     * Returned status (from utils/Errors.h) can be:
     *  - NO_ERROR: successful operation
     *  - NO_INIT: audio server or audio hardware not initialized
     *  - BAD_VALUE: unsupported configuration
     * frameCount is guaranteed to be non-zero if status is NO_ERROR,
     * and is undefined otherwise.
     * FIXME This API assumes a route, and so should be deprecated.
     */


/*返回成功创建所需的最小帧数
*AudioTrack对象。
*返回的状态（来自utils/Errors.h）可以是：
*-NO_ERROR：操作成功
*-NO_INIT：音频服务器或音频硬件未初始化
*-BAD_VALUE:不支持的配置
*如果状态为NO_ ERROR，则保证frameCount为非零，
*并且在其他方面是未定义的。
*FIXME这个API假设一个路由，因此应该弃用。
*/

    static status_t getMinFrameCount(size_t* frameCount,
                                     audio_stream_type_t streamType,
                                     uint32_t sampleRate);

    /* Check if direct playback is possible for the given audio configuration and attributes.
     * Return true if output is possible for the given parameters. Otherwise returns false.
     */
    /*检查给定的音频配置和属性是否可以直接播放。

*如果给定参数可以输出，则返回true。否则返回false。

*/
    static bool isDirectOutputSupported(const audio_config_base_t& config,
                                        const audio_attributes_t& attributes);

    /* How data is transferred to AudioTrack
     */
    /*如何将数据传输到AudioTrack

*/
    enum transfer_type {
        TRANSFER_DEFAULT,   // not specified explicitly; determine from the other parameters
        TRANSFER_CALLBACK,  // callback EVENT_MORE_DATA
        TRANSFER_OBTAIN,    // call obtainBuffer() and releaseBuffer()
        TRANSFER_SYNC,      // synchronous write()
        TRANSFER_SHARED,    // shared memory
        TRANSFER_SYNC_NOTIF_CALLBACK, // synchronous write(), notif EVENT_CAN_WRITE_MORE_DATA
    };

    /* Constructs an uninitialized AudioTrack. No connection with
     * AudioFlinger takes place.  Use set() after this.
     */
                        AudioTrack();

                        AudioTrack(const std::string& opPackageName);

    /* Creates an AudioTrack object and registers it with AudioFlinger.
     * Once created, the track needs to be started before it can be used.
     * Unspecified values are set to appropriate default values.
     *
     * Parameters:
     *
     * streamType:         Select the type of audio stream this track is attached to
     *                     (e.g. AUDIO_STREAM_MUSIC).
     * sampleRate:         Data source sampling rate in Hz.  Zero means to use the sink sample rate.
     *                     A non-zero value must be specified if AUDIO_OUTPUT_FLAG_DIRECT is set.
     *                     0 will not work with current policy implementation for direct output
     *                     selection where an exact match is needed for sampling rate.
     * format:             Audio format. For mixed tracks, any PCM format supported by server is OK.
     *                     For direct and offloaded tracks, the possible format(s) depends on the
     *                     output sink.
     * channelMask:        Channel mask, such that audio_is_output_channel(channelMask) is true.
     * frameCount:         Minimum size of track PCM buffer in frames. This defines the
     *                     application's contribution to the
     *                     latency of the track. The actual size selected by the AudioTrack could be
     *                     larger if the requested size is not compatible with current audio HAL
     *                     configuration.  Zero means to use a default value.
     * flags:              See comments on audio_output_flags_t in <system/audio.h>.
     * cbf:                Callback function. If not null, this function is called periodically
     *                     to provide new data in TRANSFER_CALLBACK mode
     *                     and inform of marker, position updates, etc.
     * user:               Context for use by the callback receiver.
     * notificationFrames: The callback function is called each time notificationFrames PCM
     *                     frames have been consumed from track input buffer by server.
     *                     Zero means to use a default value, which is typically:
     *                      - fast tracks: HAL buffer size, even if track frameCount is larger
     *                      - normal tracks: 1/2 of track frameCount
     *                     A positive value means that many frames at initial source sample rate.
     *                     A negative value for this parameter specifies the negative of the
     *                     requested number of notifications (sub-buffers) in the entire buffer.
     *                     For fast tracks, the FastMixer will process one sub-buffer at a time.
     *                     The size of each sub-buffer is determined by the HAL.
     *                     To get "double buffering", for example, one should pass -2.
     *                     The minimum number of sub-buffers is 1 (expressed as -1),
     *                     and the maximum number of sub-buffers is 8 (expressed as -8).
     *                     Negative is only permitted for fast tracks, and if frameCount is zero.
     *                     TODO It is ugly to overload a parameter in this way depending on
     *                     whether it is positive, negative, or zero.  Consider splitting apart.
     * sessionId:          Specific session ID, or zero to use default.
     * transferType:       How data is transferred to AudioTrack.
     * offloadInfo:        If not NULL, provides offload parameters for
     *                     AudioSystem::getOutputForAttr().
     * uid:                User ID of the app which initially requested this AudioTrack
     *                     for power management tracking, or -1 for current user ID.
     * pid:                Process ID of the app which initially requested this AudioTrack
     *                     for power management tracking, or -1 for current process ID.
     * pAttributes:        If not NULL, supersedes streamType for use case selection.
     * doNotReconnect:     If set to true, AudioTrack won't automatically recreate the IAudioTrack
                           binder to AudioFlinger.
                           It will return an error instead.  The application will recreate
                           the track based on offloading or different channel configuration, etc.
     * maxRequiredSpeed:   For PCM tracks, this creates an appropriate buffer size that will allow
     *                     maxRequiredSpeed playback. Values less than 1.0f and greater than
     *                     AUDIO_TIMESTRETCH_SPEED_MAX will be clamped.  For non-PCM tracks
     *                     and direct or offloaded tracks, this parameter is ignored.
     * selectedDeviceId:   Selected device id of the app which initially requested the AudioTrack
     *                     to open with a specific device.
     * threadCanCallJava:  Not present in parameter list, and so is fixed at false.
     */

    /*创建AudioTrack对象并将其注册到AudioFlinger。

*创建后，需要先启动轨迹，然后才能使用它。

*未指定的值将设置为适当的默认值。

*

*参数：

*

*streamType：选择此曲目所连接的音频流的类型

*（例如AUDIO_STREAM_MUSIC）。

*sampleRate：数据源采样率，单位为Hz。零表示使用汇点采样率。

*如果设置了AUDIO_OUTPUT_FLAG_DIRECT，则必须指定一个非零值。

*0将不适用于直接输出的当前策略实现

*采样率需要精确匹配的选择。

*format：音频格式。对于混合音轨，服务器支持的任何PCM格式都可以。

*对于直接和卸载轨道，可能的格式取决于

*输出接收器。

*channelMask：通道掩码，使audio_is_output_Channel（channelMask）为true。

*frameCount：磁道PCM缓冲区的最小大小（以帧为单位）。这定义了

*应用程序对的贡献

*轨道的延迟。AudioTrack选择的实际大小可以是

*如果请求的大小与当前音频HAL不兼容，则较大

*配置。零表示使用默认值。

*flags:请参阅<system/audio.h>中对audio_output_flags_t的评论。

*cbf：回调函数。如果不为null，则会定期调用此函数

*以TRANSFER_CALLBACK模式提供新数据

*并通知标记、位置更新等。

*user：回调接收器使用的上下文。

*notificationFrames:callback函数在每次notificationFrames PCM时调用

*服务器已经从轨道输入缓冲区消耗了帧。

*零表示使用默认值，通常为：

*-快速轨道：HAL缓冲区大小，即使轨道frameCount更大

*-正常轨迹：轨迹帧计数的1/2

*正值表示许多帧处于初始源采样率。

*此参数的负值指定

*请求的整个缓冲区中通知（子缓冲区）的数量。

*对于快速通道，FastMixer将一次处理一个子缓冲区。

*每个子缓冲器的大小由HAL决定。

*例如，要获得“双重缓冲”，应该通过-2。

*子缓冲器的最小数量是1（表示为-1），

*并且子缓冲器的最大数量为8（表示为-8）。

*只有在frameCount为零的情况下，快速通道才允许为负数。

*TODO根据

*无论是正、负还是零。考虑分开。

*sessionId：特定的会话ID，或使用默认值为零。

*transferType：如何将数据传输到AudioTrack。

*offloadInfo：如果不是NULL，则为提供卸载参数

*音频系统：：getOutputForAttr（）。

*uid：最初请求此AudioTrack的应用程序的用户ID

*用于电源管理跟踪，或-1用于当前用户ID。

*pid：最初请求此AudioTrack的应用程序的进程ID

*用于电源管理跟踪，或-1用于当前进程ID。

*pAttributes：如果不是NULL，则取代streamType进行用例选择。

*doNotReconnect:如果设置为true，AudioTrack将不会自动重新创建IAudioTrack

AudioFlinger的活页夹。

它将返回一个错误。应用程序将重新创建

基于卸载或不同信道配置的轨道等。

*maxRequiredSpeed：对于PCM磁道，这将创建一个适当的缓冲区大小，允许

*maxRequiredSpeed播放。值小于1.0f且大于

*AUDIO_TIMESTRETCH_SPEED_MAX将被箝位。对于非PCM音轨

*以及直接或卸载轨道，则忽略此参数。

*selectedDeviceId：最初请求AudioTrack的应用程序的选定设备id

*使用特定设备打开。

*threadCanCallJava:参数列表中不存在，因此
*/

                        AudioTrack( audio_stream_type_t streamType,
                                    uint32_t sampleRate,
                                    audio_format_t format,
                                    audio_channel_mask_t channelMask,
                                    size_t frameCount    = 0,
                                    audio_output_flags_t flags = AUDIO_OUTPUT_FLAG_NONE,
                                    callback_t cbf       = NULL,
                                    void* user           = NULL,
                                    int32_t notificationFrames = 0,
                                    audio_session_t sessionId  = AUDIO_SESSION_ALLOCATE,
                                    transfer_type transferType = TRANSFER_DEFAULT,
                                    const audio_offload_info_t *offloadInfo = NULL,
                                    uid_t uid = AUDIO_UID_INVALID,
                                    pid_t pid = -1,
                                    const audio_attributes_t* pAttributes = NULL,
                                    bool doNotReconnect = false,
                                    float maxRequiredSpeed = 1.0f,
                                    audio_port_handle_t selectedDeviceId = AUDIO_PORT_HANDLE_NONE,
                                    const std::string& opPackageName = "");

    /* Creates an audio track and registers it with AudioFlinger.
     * With this constructor, the track is configured for static buffer mode.
     * Data to be rendered is passed in a shared memory buffer
     * identified by the argument sharedBuffer, which should be non-0.
     * If sharedBuffer is zero, this constructor is equivalent to the previous constructor
     * but without the ability to specify a non-zero value for the frameCount parameter.
     * The memory should be initialized to the desired data before calling start().
     * The write() method is not supported in this case.
     * It is recommended to pass a callback function to be notified of playback end by an
     * EVENT_UNDERRUN event.
     */

    /*创建一个音轨并将其注册到AudioFlinger。

*使用此构造函数，可以为静态缓冲区模式配置轨道。

*要渲染的数据在共享内存缓冲区中传递

*由参数sharedBuffer标识，该参数应为非0。

*如果sharedBuffer为零，则此构造函数等效于上一个构造函数

*但是没有为frameCount参数指定非零值的能力。

*在调用start（）之前，应该将内存初始化为所需的数据。

*在这种情况下，不支持write（）方法。

*建议传递一个回调函数，由

*EVENT_ENDERRUN事件。

*/


                        AudioTrack( audio_stream_type_t streamType,
                                    uint32_t sampleRate,
                                    audio_format_t format,
                                    audio_channel_mask_t channelMask,
                                    const sp<IMemory>& sharedBuffer,
                                    audio_output_flags_t flags = AUDIO_OUTPUT_FLAG_NONE,
                                    callback_t cbf      = NULL,
                                    void* user          = NULL,
                                    int32_t notificationFrames = 0,
                                    audio_session_t sessionId   = AUDIO_SESSION_ALLOCATE,
                                    transfer_type transferType = TRANSFER_DEFAULT,
                                    const audio_offload_info_t *offloadInfo = NULL,
                                    uid_t uid = AUDIO_UID_INVALID,
                                    pid_t pid = -1,
                                    const audio_attributes_t* pAttributes = NULL,
                                    bool doNotReconnect = false,
                                    float maxRequiredSpeed = 1.0f,
                                    const std::string& opPackageName = "");

    /* Terminates the AudioTrack and unregisters it from AudioFlinger.
     * Also destroys all resources associated with the AudioTrack.
     */
protected:
                        virtual ~AudioTrack();
public:

    /* Initialize an AudioTrack that was created using the AudioTrack() constructor.
     * Don't call set() more than once, or after the AudioTrack() constructors that take parameters.
     * set() is not multi-thread safe.
     * Returned status (from utils/Errors.h) can be:
     *  - NO_ERROR: successful initialization
     *  - INVALID_OPERATION: AudioTrack is already initialized
     *  - BAD_VALUE: invalid parameter (channelMask, format, sampleRate...)
     *  - NO_INIT: audio server or audio hardware not initialized
     * If status is not equal to NO_ERROR, don't call any other APIs on this AudioTrack.
     * If sharedBuffer is non-0, the frameCount parameter is ignored and
     * replaced by the shared buffer's total allocated size in frame units.
     *
     * Parameters not listed in the AudioTrack constructors above:
     *
     * threadCanCallJava:  Whether callbacks are made from an attached thread and thus can call JNI.
     *      Only set to true when AudioTrack object is used for a java android.media.AudioTrack
     *      in its JNI code.
     *
     * Internal state post condition:
     *      (mStreamType == AUDIO_STREAM_DEFAULT) implies this AudioTrack has valid attributes
     */



/*初始化使用AudioTrack（）构造函数创建的AudioPrack。

*不要多次调用set（），或者在使用参数的AudioTrack（）构造函数之后调用。

*set（）不是多线程安全的。

*返回的状态（来自utils/Errors.h）可以是：

*-NO_ERROR：初始化成功

*-INVALID_OPERATION：AudioTrack已初始化

*-BAD_VALUE:无效参数（channelMask、format、sampleRate…）

*-NO_INIT：音频服务器或音频硬件未初始化

*如果status不等于NO_ERROR，则不要调用此AudioTrack上的任何其他API。

*如果sharedBuffer为非0，则frameCount参数将被忽略，并且

*替换为以帧为单位的共享缓冲区的总分配大小。

*

*以上AudioTrack构造函数中未列出的参数：

*

*threadCanCallJava：是否从附加的线程进行回调，从而可以调用JNI。

*仅当AudioTrack对象用于java android.media.AudioTrack时设置为true

*在其JNI代码中。

*

*内部状态后置条件：

*（mStreamType==AUDIO_STREAM_DEFAULT）表示此AudioTrack具有有效属性

*/

            status_t    set(
                audio_stream_type_t streamType,
                            uint32_t sampleRate,
                            audio_format_t format,
                            audio_channel_mask_t channelMask,
                            size_t frameCount   = 0,
                            audio_output_flags_t flags = AUDIO_OUTPUT_FLAG_NONE,
                            callback_t cbf      = NULL,
                            void* user          = NULL,
                            int32_t notificationFrames = 0,
                            const sp<IMemory>& sharedBuffer = 0,
                            bool threadCanCallJava = false,
                            audio_session_t sessionId  = AUDIO_SESSION_ALLOCATE,
                            transfer_type transferType = TRANSFER_DEFAULT,
                            const audio_offload_info_t *offloadInfo = NULL,
                            uid_t uid = AUDIO_UID_INVALID,
                            pid_t pid = -1,
                            const audio_attributes_t* pAttributes = NULL,
                            bool doNotReconnect = false,
                            float maxRequiredSpeed = 1.0f,
                            audio_port_handle_t selectedDeviceId = AUDIO_PORT_HANDLE_NONE
                            
                            );

    /* Result of constructing the AudioTrack. This must be checked for successful initialization
     * before using any AudioTrack API (except for set()), because using
     * an uninitialized AudioTrack produces undefined results.
     * See set() method above for possible return codes.
     */

    /*构建AudioTrack的结果。必须检查是否成功初始化

*在使用任何AudioTrack API之前（set（）除外），因为使用

*未初始化的AudioTrack会产生未定义的结果。

*有关可能的返回代码，请参阅上面的set（）方法。

*/

            status_t    initCheck() const   { return mStatus; }

    /* Returns this track's estimated latency in milliseconds.
     * This includes the latency due to AudioTrack buffer size, AudioMixer (if any)
     * and audio hardware driver.
     */

    /*返回此曲目的估计延迟（以毫秒为单位）。

*这包括由于AudioTrack缓冲区大小、AudioMixer（如果有）造成的延迟

*和音频硬件驱动程序。

*/
            uint32_t    latency();

    /* Returns the number of application-level buffer underruns
     * since the AudioTrack was created.
     */

    /*返回应用程序级缓冲区不足的次数

*自从AudioTrack创建以来。

*/
            uint32_t    getUnderrunCount() const;

    /* getters, see constructors and set() */

            audio_stream_type_t streamType() const;
            audio_format_t format() const   { return mFormat; }

    /* Return frame size in bytes, which for linear PCM is
     * channelCount * (bit depth per channel / 8).
     * channelCount is determined from channelMask, and bit depth comes from format.
     * For non-linear formats, the frame size is typically 1 byte.
     */
    /*以字节为单位的返回帧大小，对于线性PCM为

*channelCount*（每个通道的比特深度/8）。

*channelCount由channelMask决定，位深度由格式决定。

*对于非线性格式，帧大小通常为1字节。

*/
            size_t      frameSize() const   { return mFrameSize; }

            uint32_t    channelCount() const { return mChannelCount; }
            size_t      frameCount() const  { return mFrameCount; }

    /*
     * Return the period of the notification callback in frames.
     * This value is set when the AudioTrack is constructed.
     * It can be modified if the AudioTrack is rerouted.
     */

    /*

*以帧为单位返回通知回调的周期。

*该值是在构造AudioTrack时设置的。

*如果重新路由AudioTrack，则可以对其进行修改。

*/


            uint32_t    getNotificationPeriodInFrames() const { return mNotificationFramesAct; }

    /* Return effective size of audio buffer that an application writes to
     * or a negative error if the track is uninitialized.
     */

    /*返回应用程序写入的音频缓冲区的有效大小

*或者如果轨道未初始化则为负错误。

*/
            ssize_t     getBufferSizeInFrames();

    /* Returns the buffer duration in microseconds at current playback rate.
     */

    /*返回当前播放速率下的缓冲区持续时间（以微秒为单位）。

*/
            status_t    getBufferDurationInUs(int64_t *duration);

    /* Set the effective size of audio buffer that an application writes to.
     * This is used to determine the amount of available room in the buffer,
     * which determines when a write will block.
     * This allows an application to raise and lower the audio latency.
     * The requested size may be adjusted so that it is
     * greater or equal to the absolute minimum and
     * less than or equal to the getBufferCapacityInFrames().
     * It may also be adjusted slightly for internal reasons.
     *
     * Return the final size or a negative error if the track is unitialized
     * or does not support variable sizes.
     */

    /*设置应用程序写入的音频缓冲区的有效大小。

*这用于确定缓冲器中的可用空间的量，

*其确定写入何时将被阻止。

*这允许应用程序提高和降低音频延迟。

*可以调整请求的大小，使其

*大于或等于绝对最小值，以及

*小于或等于getBufferCapacityInFrame（）。

*由于内部原因，它也可能略有调整。

*

*如果轨道是单元化的，则返回最终尺寸或负错误

*或者不支持可变大小。

*/
            ssize_t     setBufferSizeInFrames(size_t size);

    /* Return the static buffer specified in constructor or set(), or 0 for streaming mode */
    /*返回构造函数或set（）中指定的静态缓冲区，或0用于流模式*/
            sp<IMemory> sharedBuffer() const { return mSharedBuffer; }

    /*
     * return metrics information for the current track.
     */
    /*

*返回当前曲目的度量信息。

*/
            status_t getMetrics(mediametrics::Item * &item);

    /*
     * Set name of API that is using this object.
     * For example "aaudio" or "opensles".
     * This may be logged or reported as part of MediaMetrics.
     */

    /*

*设置正在使用此对象的API的名称。

*例如“aaudio”或“opensles”。

*这可能会作为MediaMetrics的一部分进行记录或报告。

*/
            void setCallerName(const std::string &name) {
                mCallerName = name;
            }

            std::string getCallerName() const {
                return mCallerName;
            };

    /* After it's created the track is not active. Call start() to
     * make it active. If set, the callback will start being called.
     * If the track was previously paused, volume is ramped up over the first mix buffer.
     */
    /*创建轨迹后，该轨迹将不处于活动状态。调用start（）到

*使其处于活动状态。如果设置了，将开始调用回调。

*如果曲目先前已暂停，则音量将在第一个混合缓冲区上递增。

*/
            status_t        start();

    /* Stop a track.
     * In static buffer mode, the track is stopped immediately.
     * In streaming mode, the callback will cease being called.  Note that obtainBuffer() still
     * works and will fill up buffers until the pool is exhausted, and then will return WOULD_BLOCK.
     * In streaming mode the stop does not occur immediately: any data remaining in the buffer
     * is first drained, mixed, and output, and only then is the track marked as stopped.
     */
    /*停下一条轨道。

*在静态缓冲模式下，轨迹会立即停止。

*在流模式下，将停止调用回调。注意获取缓冲区（）仍然

*工作，并将填充缓冲区，直到池耗尽，然后返回WOULD_BLOCK。

*在流模式下，停止不会立即发生：缓冲区中剩余的任何数据

*首先排出、混合并输出，然后才将轨道标记为停止。

*/
            void        stop();
            bool        stopped() const;

    /* Flush a stopped or paused track. All previously buffered data is discarded immediately.
     * This has the effect of draining the buffers without mixing or output.
     * Flush is intended for streaming mode, for example before switching to non-contiguous content.
     * This function is a no-op if the track is not stopped or paused, or uses a static buffer.
     */

    /*冲洗停止或暂停的轨迹。所有先前缓冲的数据将立即丢弃。

*这具有在没有混合或输出的情况下排空缓冲区的效果。

*Flush用于流模式，例如在切换到非连续内容之前。

*如果轨道没有停止或暂停，或者使用静态缓冲区，则此功能为非操作。

*/
            void        flush();

    /* Pause a track. After pause, the callback will cease being called and
     * obtainBuffer returns WOULD_BLOCK. Note that obtainBuffer() still works
     * and will fill up buffers until the pool is exhausted.
     * Volume is ramped down over the next mix buffer following the pause request,
     * and then the track is marked as paused.  It can be resumed with ramp up by start().
     */
    /*暂停曲目。暂停后，回调将停止调用，并且

*获取缓冲区返回WOULD_BLOCK。请注意，获取缓冲区（）仍然有效

*并且将填充缓冲器直到池耗尽。

*在暂停请求之后，音量在下一个混合缓冲区上倾斜下降，

*然后该轨道被标记为暂停。它可以通过start（）的斜坡上升来恢复。

*/
            void        pause();

    /* Set volume for this track, mostly used for games' sound effects
     * left and right volumes. Levels must be >= 0.0 and <= 1.0.
     * This is the older API.  New applications should use setVolume(float) when possible.
     */

    /*设置此曲目的音量，主要用于游戏的音效

*左侧和右侧体积。级别必须大于等于0.0且小于等于1.0。

*这是较旧的API。新应用程序应尽可能使用setVolume（float）。

*/
            status_t    setVolume(float left, float right);

    /* Set volume for all channels.  This is the preferred API for new applications,
     * especially for multi-channel content.
     */
    /*设置所有频道的音量。这是用于新应用的优选API，

*尤其是对于多频道内容。

*/
            status_t    setVolume(float volume);

    /* Set the send level for this track. An auxiliary effect should be attached
     * to the track with attachEffect(). Level must be >= 0.0 and <= 1.0.
     */

    /*设置此曲目的发送级别。应附加辅助效果

*使用attachEffect（）将其添加到曲目。级别必须大于等于0.0且小于等于1.0。

//TSR-Start
setAuxEffectSendLevel是Android平台上用于控制音频信号发送到混响效果器的方法之一。
这个方法通常用于设置特定音频通道发送到混响效果器的级别。通过调整这个级别，您可以控制音频信号与混响效果器的混合程度。这在音频处理和音效应用程序中非常有用
//TSR-End

*/
            status_t    setAuxEffectSendLevel(float level);
            void        getAuxEffectSendLevel(float* level) const;

    /* Set source sample rate for this track in Hz, mostly used for games' sound effects.
     * Zero is not permitted.
     */

    /*将此曲目的源采样率设置为Hz，主要用于游戏的音效。

*不允许为零。

*/
            status_t    setSampleRate(uint32_t sampleRate);

    /* Return current source sample rate in Hz.
     * If specified as zero in constructor or set(), this will be the sink sample rate.
     */
            uint32_t    getSampleRate() const;

    /* Return the original source sample rate in Hz. This corresponds to the sample rate
     * if playback rate had normal speed and pitch.
     */

    /*返回以Hz为单位的原始源采样率。这对应于采样率

*如果播放速率具有正常的速度和音高。

*/
            uint32_t    getOriginalSampleRate() const;

    /* Set source playback rate for timestretch
     * 1.0 is normal speed: < 1.0 is slower, > 1.0 is faster
     * 1.0 is normal pitch: < 1.0 is lower pitch, > 1.0 is higher pitch
     *
     * AUDIO_TIMESTRETCH_SPEED_MIN <= speed <= AUDIO_TIMESTRETCH_SPEED_MAX
     * AUDIO_TIMESTRETCH_PITCH_MIN <= pitch <= AUDIO_TIMESTRETCH_PITCH_MAX
     *
     * Speed increases the playback rate of media, but does not alter pitch.
     * Pitch increases the "tonal frequency" of media, but does not affect the playback rate.
     */
    /*设置时间间隔的源播放速率

*1.0为正常速度：<1.0较慢，>1.0较快

*1.0为正常节距：<1.0为较低节距，>1.0为较高节距

*

*AUDIO_TIMESTRETCH_SPEED_MIN<=速度<=AUDIO_IMESTRETCH_SPEED_MAX

*AUDIO_TIMESTRETCH_PITCH_MIN<=音高<=AUDIO_IMESTRETCH_PITCH_MAX

*

*速度可以提高媒体的播放速率，但不会改变音高。

*音调会增加媒体的“音调频率”，但不会影响播放速率。

*/

            status_t    setPlaybackRate(const AudioPlaybackRate &playbackRate);

    /* Return current playback rate */
            const AudioPlaybackRate& getPlaybackRate() const;

    /* Enables looping and sets the start and end points of looping.
     * Only supported for static buffer mode.
     *
     * Parameters:
     *
     * loopStart:   loop start in frames relative to start of buffer.
     * loopEnd:     loop end in frames relative to start of buffer.
     * loopCount:   number of loops to execute. Calling setLoop() with loopCount == 0 cancels any
     *              pending or active loop. loopCount == -1 means infinite looping.
     *
     * For proper operation the following condition must be respected:
     *      loopCount != 0 implies 0 <= loopStart < loopEnd <= frameCount().
     *
     * If the loop period (loopEnd - loopStart) is too small for the implementation to support,
     * setLoop() will return BAD_VALUE.  loopCount must be >= -1.
     *
     */

    /*启用循环并设置循环的起点和终点。

*仅支持静态缓冲区模式。

*

*参数：

*

*loopStart：帧中相对于缓冲区开始的循环开始。

*loopEnd：帧中相对于缓冲区开始的循环结束。

*loopCount：要执行的循环数。调用loopCount==0的setLoop（）将取消任何

*挂起或活动循环。loopCount==-1表示无限循环。

*

*为了正确操作，必须遵守以下条件：

*loopCount！=0表示0<=loopStart<=loopEnd<=frameCount（）。

*

*如果循环周期（loopEnd-loopStart）对于实现来说太小而无法支持，

*setLoop（）将返回BAD_VALUE。loopCount必须>=-1。

*

*/

            status_t    setLoop(uint32_t loopStart, uint32_t loopEnd, int loopCount);

    /* Sets marker position. When playback reaches the number of frames specified, a callback with
     * event type EVENT_MARKER is called. Calling setMarkerPosition with marker == 0 cancels marker
     * notification callback.  To set a marker at a position which would compute as 0,
     * a workaround is to set the marker at a nearby position such as ~0 or 1.
     * If the AudioTrack has been opened with no callback function associated, the operation will
     * fail.
     *
     * Parameters:
     *
     * marker:   marker position expressed in wrapping (overflow) frame units,
     *           like the return value of getPosition().
     *
     * Returned status (from utils/Errors.h) can be:
     *  - NO_ERROR: successful operation
     *  - INVALID_OPERATION: the AudioTrack has no callback installed.
     */

    /*设置标记位置。当播放达到指定的帧数时，带有

*调用事件类型event_MARKER。调用标记为==0的setMarkerPosition取消标记

*通知回调。为了在将计算为0的位置处设置标记，

*解决方法是将标记设置在附近的位置，例如~0或1。

*如果打开AudioTrack时没有关联回调函数，则操作将

*失败。

*

*参数：

*

*标记：以换行（溢出）帧为单位表示的标记位置，

*比如getPosition（）的返回值。

*

*返回的状态（来自utils/Errors.h）可以是：

*-NO_ERROR：操作成功

*-INVALID_OPERATION：AudioTrack没有安装回调。

*/
            status_t    setMarkerPosition(uint32_t marker);
            status_t    getMarkerPosition(uint32_t *marker) const;

    /* Sets position update period. Every time the number of frames specified has been played,
     * a callback with event type EVENT_NEW_POS is called.
     * Calling setPositionUpdatePeriod with updatePeriod == 0 cancels new position notification
     * callback.
     * If the AudioTrack has been opened with no callback function associated, the operation will
     * fail.
     * Extremely small values may be rounded up to a value the implementation can support.
     *
     * Parameters:
     *
     * updatePeriod:  position update notification period expressed in frames.
     *
     * Returned status (from utils/Errors.h) can be:
     *  - NO_ERROR: successful operation
     *  - INVALID_OPERATION: the AudioTrack has no callback installed.
     */

    /*设置职位更新周期。每次播放指定帧数时，

*调用事件类型为event_NEW_POS的回调。

*调用updatePeriod===0的setPositionUpdatePeriod取消新职位通知

*回调。

*如果打开AudioTrack时没有关联回调函数，则操作将

*失败。

*极小的值可以四舍五入到实现可以支持的值。

*

*参数：

*

*updatePeriod：以帧表示的位置更新通知周期。

*

*返回的状态（来自utils/Errors.h）可以是：

*-NO_ERROR：操作成功

*-INVALID_OPERATION：AudioTrack没有安装回调。

*/
            status_t    setPositionUpdatePeriod(uint32_t updatePeriod);
            status_t    getPositionUpdatePeriod(uint32_t *updatePeriod) const;

    /* Sets playback head position.
     * Only supported for static buffer mode.
     *
     * Parameters:
     *
     * position:  New playback head position in frames relative to start of buffer.
     *            0 <= position <= frameCount().  Note that end of buffer is permitted,
     *            but will result in an immediate underrun if started.
     *
     * Returned status (from utils/Errors.h) can be:
     *  - NO_ERROR: successful operation
     *  - INVALID_OPERATION: the AudioTrack is not stopped or paused, or is streaming mode.
     *  - BAD_VALUE: The specified position is beyond the number of frames present in AudioTrack
     *               buffer
     */
    /*设置播放头的位置。

*仅支持静态缓冲区模式。

*

*参数：

*

*position：帧中相对于缓冲区起点的新播放头位置。

*0<=位置<=帧计数（）。注意，允许缓冲区结束，

*但如果启动，将立即导致欠载。

*

*返回的状态（来自utils/Errors.h）可以是：

*-NO_ERROR：操作成功

*-INVALID_OPERATION：AudioTrack未停止或暂停，或处于流模式。

*-BAD_VALUE:指定的位置超出了AudioTrack中的帧数

*缓冲器

*/
            status_t    setPosition(uint32_t position);

    /* Return the total number of frames played since playback start.
     * The counter will wrap (overflow) periodically, e.g. every ~27 hours at 44.1 kHz.
     * It is reset to zero by flush(), reload(), and stop().
     *
     * Parameters:
     *
     *  position:  Address where to return play head position.
     *
     * Returned status (from utils/Errors.h) can be:
     *  - NO_ERROR: successful operation
     *  - BAD_VALUE:  position is NULL
     */
            status_t    getPosition(uint32_t *position);

    /* For static buffer mode only, this returns the current playback position in frames
     * relative to start of buffer.  It is analogous to the position units used by
     * setLoop() and setPosition().  After underrun, the position will be at end of buffer.
     */

    /*仅对于静态缓冲区模式，这将以帧为单位返回当前播放位置

*相对于缓冲区的开始。它类似于使用的位置单位

*setLoop（）和setPosition（）。欠载后，该位置将位于缓冲区的末端。

*/
            status_t    getBufferPosition(uint32_t *position);

    /* Forces AudioTrack buffer full condition. When playing a static buffer, this method avoids
     * rewriting the buffer before restarting playback after a stop.
     * This method must be called with the AudioTrack in paused or stopped state.
     * Not allowed in streaming mode.
     *
     * Returned status (from utils/Errors.h) can be:
     *  - NO_ERROR: successful operation
     *  - INVALID_OPERATION: the AudioTrack is not stopped or paused, or is streaming mode.
     */

/*
强制AudioTrack缓冲区处于满状态。当播放静态缓冲区时，此方法避免
在流媒体模式下不允许。
*在停止之后重新开始播放之前重写缓冲器。

*必须在AudioTrack处于暂停或停止状态的情况下调用此方法。

*

*

*返回的状态（来自utils/Errors.h）可以是：

*-NO_ERROR：操作成功

*-INVALID_OPERATION：AudioTrack未停止或暂停，或处于流模式。

*/

            status_t    reload();

    /**
     * @param transferType
     * @return text string that matches the enum name
     */
            static const char * convertTransferToText(transfer_type transferType);

public:
    /* Returns a handle on the audio output used by this AudioTrack.
     *
     * Parameters:
     *  none.
     *
     * Returned value:
     *  handle on audio hardware output, or AUDIO_IO_HANDLE_NONE if the
     *  track needed to be re-created but that failed
     */
    /*返回此AudioTrack使用的音频输出的句柄。
*参数：
*没有。
*返回值：
*处理音频硬件输出，或者audio_IO_handle_NONE，如果
*需要重新创建轨道，但失败了

*/
            audio_io_handle_t    getOutput() const;

    /* Selects the audio device to use for output of this AudioTrack. A value of
     * AUDIO_PORT_HANDLE_NONE indicates default (AudioPolicyManager) routing.
     *
     * Parameters:
     *  The device ID of the selected device (as returned by the AudioDevicesManager API).
     *
     * Returned value:
     *  - NO_ERROR: successful operation
     *    TODO: what else can happen here?
     */

    /*选择用于输出此AudioTrack的音频设备。的值
*AUDIO_PORT_HANDLE_NONE表示默认（AudioPolicyManager）路由。
*参数：
*所选设备的设备ID（由AudioDevicesManager API返回）。
*返回值：
*-NO_ERROR：操作成功
*TODO：这里还能发生什么？
*/
            status_t    setOutputDevice(audio_port_handle_t deviceId);

    /* Returns the ID of the audio device selected for this AudioTrack.
     * A value of AUDIO_PORT_HANDLE_NONE indicates default (AudioPolicyManager) routing.
     *
     * Parameters:
     *  none.
     */
     audio_port_handle_t getOutputDevice();

     /* Returns the ID of the audio device actually used by the output to which this AudioTrack is
      * attached.
      * When the AudioTrack is inactive, the device ID returned can be either:
      * - AUDIO_PORT_HANDLE_NONE if the AudioTrack is not attached to any output.
      * - The device ID used before paused or stopped.
      * - The device ID selected by audio policy manager of setOutputDevice() if the AudioTrack
      * has not been started yet.
      *
      * Parameters:
      *  none.
      */

/*返回此AudioTrack所在的输出实际使用的音频设备的ID
*附件。
*当AudioTrack处于非活动状态时，返回的设备ID可以是：
*-如果AudioTrack未连接到任何输出，则为AUDIO_PORT_HANDLE_NONE。
*-暂停或停止之前使用的设备ID。
*-如果AudioTrack
*尚未启动。
*参数：
*没有。
*/
     
     audio_port_handle_t getRoutedDeviceId();

    /* Returns the unique session ID associated with this track.
     *
     * Parameters:
     *  none.
     *
     * Returned value:
     *  AudioTrack session ID.
     */
            audio_session_t getSessionId() const { return mSessionId; }

    /* Attach track auxiliary output to specified effect. Use effectId = 0
     * to detach track from effect.
     *
     * Parameters:
     *
     * effectId:  effectId obtained from AudioEffect::id().
     *
     * Returned status (from utils/Errors.h) can be:
     *  - NO_ERROR: successful operation
     *  - INVALID_OPERATION: the effect is not an auxiliary effect.
     *  - BAD_VALUE: The specified effect ID is invalid
     */


    /*将轨道辅助输出附加到指定效果。使用effectId=0
*将轨迹与效果分离。
*参数：
*effectId:从AudioEffect:：id（）获得的effectId。
*返回的状态（来自utils/Errors.h）可以是：
*-NO_ERROR：操作成功
*-INVALID_OPERATION：该效果不是辅助效果。
*-BAD_VALUE:指定的效果ID无效
*/
            status_t    attachAuxEffect(int effectId);

    /* Public API for TRANSFER_OBTAIN mode.
     * Obtains a buffer of up to "audioBuffer->frameCount" empty slots for frames.
     * After filling these slots with data, the caller should release them with releaseBuffer().
     * If the track buffer is not full, obtainBuffer() returns as many contiguous
     * [empty slots for] frames as are available immediately.
     *
     * If nonContig is non-NULL, it is an output parameter that will be set to the number of
     * additional non-contiguous frames that are predicted to be available immediately,
     * if the client were to release the first frames and then call obtainBuffer() again.
     * This value is only a prediction, and needs to be confirmed.
     * It will be set to zero for an error return.
     *
     * If the track buffer is full and track is stopped, obtainBuffer() returns WOULD_BLOCK
     * regardless of the value of waitCount.
     * If the track buffer is full and track is not stopped, obtainBuffer() blocks with a
     * maximum timeout based on waitCount; see chart below.
     * Buffers will be returned until the pool
     * is exhausted, at which point obtainBuffer() will either block
     * or return WOULD_BLOCK depending on the value of the "waitCount"
     * parameter.
     *
     * Interpretation of waitCount:
     *  +n  limits wait time to n * WAIT_PERIOD_MS,
     *  -1  causes an (almost) infinite wait time,
     *   0  non-blocking.
     *
     * Buffer fields
     * On entry:
     *  frameCount  number of [empty slots for] frames requested
     *  size        ignored
     *  raw         ignored
     *  sequence    ignored
     * After error return:
     *  frameCount  0
     *  size        0
     *  raw         undefined
     *  sequence    undefined
     * After successful return:
     *  frameCount  actual number of [empty slots for] frames available, <= number requested
     *  size        actual number of bytes available
     *  raw         pointer to the buffer
     *  sequence    IAudioTrack instance sequence number, as of obtainBuffer()
     */



/*TRANSFER_OBTAIN模式的公共API。
*获得最多为“audioBuffer->frameCount”空插槽的帧缓冲区。
*在用数据填充这些槽之后，调用者应该使用releaseBuffer（）来释放它们。
*如果轨道缓冲区未满，则获取缓冲区（）返回尽可能多的连续
*[空插槽用于]帧，因为它们立即可用。
*如果nonContig为非NULL，则它是一个输出参数，将被设置为
*被预测为立即可用的附加非连续帧，
*如果客户端要释放第一帧，然后再次调用获取缓冲区（）。
*这个值只是一个预测，需要确认。
*它将被设置为零以返回错误。
*如果轨道缓冲区已满且轨道已停止，则获取缓冲区（）返回WOULD_BLOCK
*而不管waitCount的值如何。
*如果轨道缓冲区已满且轨道未停止，则获取缓冲区（）会用
*基于waitCount的最大超时；见下图。
*缓冲区将一直返回到池
*已用尽，此时gainBuffer（）将阻塞
*或根据“waitCount”的值返回WOULD_BLOCK
*参数。
*waitCount的解释：
*+n将等待时间限制为n*wait_ PERIOD_，
*-1导致（几乎）无限的等待时间，
*0非阻塞。
*缓冲区字段
*进入时：
*frameCount请求的[空插槽数]帧数
*忽略大小
*已忽略原始
*已忽略序列
*返回错误后：
*frameCount 0
*大小0
*原始未定义
*序列未定义
*成功返回后：
*frameCount可用[空插槽]帧的实际数量，<=请求的数量
*size实际可用字节数
*缓冲区的原始指针
*sequence IAudioTrack实例序列号，截至获取缓冲区（）
*/


            status_t    obtainBuffer(Buffer* audioBuffer, int32_t waitCount,
                                size_t *nonContig = NULL);

private:
    /* If nonContig is non-NULL, it is an output parameter that will be set to the number of
     * additional non-contiguous frames that are predicted to be available immediately,
     * if the client were to release the first frames and then call obtainBuffer() again.
     * This value is only a prediction, and needs to be confirmed.
     * It will be set to zero for an error return.
     * FIXME We could pass an array of Buffers instead of only one Buffer to obtainBuffer(),
     * in case the requested amount of frames is in two or more non-contiguous regions.
     * FIXME requested and elapsed are both relative times.  Consider changing to absolute time.
     */
            status_t    obtainBuffer(Buffer* audioBuffer, const struct timespec *requested,
                                     struct timespec *elapsed = NULL, size_t *nonContig = NULL);
public:

    /* Public API for TRANSFER_OBTAIN mode.
     * Release a filled buffer of frames for AudioFlinger to process.
     *
     * Buffer fields:
     *  frameCount  currently ignored but recommend to set to actual number of frames filled
     *  size        actual number of bytes filled, must be multiple of frameSize
     *  raw         ignored
     */
            void        releaseBuffer(const Buffer* audioBuffer);

    /* As a convenience we provide a write() interface to the audio buffer.
     * Input parameter 'size' is in byte units.
     * This is implemented on top of obtainBuffer/releaseBuffer. For best
     * performance use callbacks. Returns actual number of bytes written >= 0,
     * or one of the following negative status codes:
     *      INVALID_OPERATION   AudioTrack is configured for static buffer or streaming mode
     *      BAD_VALUE           size is invalid
     *      WOULD_BLOCK         when obtainBuffer() returns same, or
     *                          AudioTrack was stopped during the write
     *      DEAD_OBJECT         when AudioFlinger dies or the output device changes and
     *                          the track cannot be automatically restored.
     *                          The application needs to recreate the AudioTrack
     *                          because the audio device changed or AudioFlinger died.
     *                          This typically occurs for direct or offload tracks
     *                          or if mDoNotReconnect is true.
     *      or any other error code returned by IAudioTrack::start() or restoreTrack_l().
     * Default behavior is to only return when all data has been transferred. Set 'blocking' to
     * false for the method to return immediately without waiting to try multiple times to write
     * the full content of the buffer.
     */

    /*为了方便起见，我们为音频缓冲区提供了一个write（）接口。

*输入参数“size”以字节为单位。

*这是在获得缓冲区/释放缓冲区之上实现的。为了最好

*性能使用回调。返回实际写入的字节数>=0，

*或以下否定状态代码之一：

*INVALID_OPERATION AudioTrack配置为静态缓冲区或流模式

*BAD_VALUE大小无效

*当获取缓冲区（）返回相同值时，WOULD_BLOCK，或

*AudioTrack在写入过程中停止

*AudioFlinger失效或输出设备发生变化时的DEAD_OJECT

*轨道不能自动恢复。

*应用程序需要重新创建AudioTrack

*因为音频设备发生更改或AudioFlinger死亡。

*这种情况通常发生在直达或卸载轨道上

*或者如果mDoNotReconnect为true。

*或IAudioTrack:：start（）或restoreTrack_l（）返回的任何其他错误代码。

*默认行为是仅在传输完所有数据后返回。将“阻止”设置为

*false表示方法立即返回而不等待多次尝试写入

*缓冲区的全部内容。

*/

            ssize_t     write(const void* buffer, size_t size, bool blocking = true);

    /*
     * Dumps the state of an audio track.
     * Not a general-purpose API; intended only for use by media player service to dump its tracks.
     */
            status_t    dump(int fd, const Vector<String16>& args) const;

    /*
     * Return the total number of frames which AudioFlinger desired but were unavailable,
     * and thus which resulted in an underrun.  Reset to zero by stop().
     */
            uint32_t    getUnderrunFrames() const;

    /* Get the flags */
            audio_output_flags_t getFlags() const { AutoMutex _l(mLock); return mFlags; }

    /* Set parameters - only possible when using direct output */
            status_t    setParameters(const String8& keyValuePairs);

    /* Sets the volume shaper object */
            media::VolumeShaper::Status applyVolumeShaper(
                    const sp<media::VolumeShaper::Configuration>& configuration,
                    const sp<media::VolumeShaper::Operation>& operation);

    /* Gets the volume shaper state */
            sp<media::VolumeShaper::State> getVolumeShaperState(int id);

    /* Selects the presentation (if available) */
            status_t    selectPresentation(int presentationId, int programId);

    /* Get parameters */
            String8     getParameters(const String8& keys);

    /* Poll for a timestamp on demand.
     * Use if EVENT_NEW_TIMESTAMP is not delivered often enough for your needs,
     * or if you need to get the most recent timestamp outside of the event callback handler.
     * Caution: calling this method too often may be inefficient;
     * if you need a high resolution mapping between frame position and presentation time,
     * consider implementing that at application level, based on the low resolution timestamps.
     * Returns NO_ERROR    if timestamp is valid.
     *         WOULD_BLOCK if called in STOPPED or FLUSHED state, or if called immediately after
     *                     start/ACTIVE, when the number of frames consumed is less than the
     *                     overall hardware latency to physical output. In WOULD_BLOCK cases,
     *                     one might poll again, or use getPosition(), or use 0 position and
     *                     current time for the timestamp.
     *         DEAD_OBJECT if AudioFlinger dies or the output device changes and
     *                     the track cannot be automatically restored.
     *                     The application needs to recreate the AudioTrack
     *                     because the audio device changed or AudioFlinger died.
     *                     This typically occurs for direct or offload tracks
     *                     or if mDoNotReconnect is true.
     *         INVALID_OPERATION  wrong state, or some other error.
     *
     * The timestamp parameter is undefined on return, if status is not NO_ERROR.
     */

    /*按需轮询时间戳。

*如果EVENT_NEW_TIMESTAMP的交付频率不足以满足您的需求，请使用，

*或者如果您需要获取事件回调处理程序之外的最新时间戳。

*注意：过于频繁地调用此方法可能效率低下；

*如果您需要帧位置和呈现时间之间的高分辨率映射，

*考虑在应用程序级别基于低分辨率时间戳来实现它。

*如果时间戳有效，则返回NO_ERROR。

*如果在STOPPED或FLUSSHED状态下调用WOULD_BLOCK，或者在之后立即调用

*start/ACTIVE，当消耗的帧数小于

*到物理输出的总体硬件延迟。在WOULD_ BLOCK的情况下，

*可以再次轮询，或者使用getPosition（），或者使用0位置和

*时间戳的当前时间。

*如果AudioFlinger失效或输出设备发生变化，则DEAD_OJECT

*轨道不能自动恢复。

*应用程序需要重新创建AudioTrack

*因为音频设备发生更改或AudioFlinger死亡。

*这种情况通常发生在直达或卸载轨道上

*或者如果mDoNotReconnect为true。

*INVALID_OPERATION状态错误或其他错误。

*

*如果状态不是NO_ERROR，则返回时未定义时间戳参数。

*/
            status_t    getTimestamp(AudioTimestamp& timestamp);
private:
            status_t    getTimestamp_l(AudioTimestamp& timestamp);
public:

    /* Return the extended timestamp, with additional timebase info and improved drain behavior.
     *
     * This is similar to the AudioTrack.java API:
     * getTimestamp(@NonNull AudioTimestamp timestamp, @AudioTimestamp.Timebase int timebase)
     *
     * Some differences between this method and the getTimestamp(AudioTimestamp& timestamp) method
     *
     *   1. stop() by itself does not reset the frame position.
     *      A following start() resets the frame position to 0.
     *   2. flush() by itself does not reset the frame position.
     *      The frame position advances by the number of frames flushed,
     *      when the first frame after flush reaches the audio sink.
     *   3. BOOTTIME clock offsets are provided to help synchronize with
     *      non-audio streams, e.g. sensor data.
     *   4. Position is returned with 64 bits of resolution.
     *
     * Parameters:
     *  timestamp: A pointer to the caller allocated ExtendedTimestamp.
     *
     * Returns NO_ERROR    on success; timestamp is filled with valid data.
     *         BAD_VALUE   if timestamp is NULL.
     *         WOULD_BLOCK if called immediately after start() when the number
     *                     of frames consumed is less than the
     *                     overall hardware latency to physical output. In WOULD_BLOCK cases,
     *                     one might poll again, or use getPosition(), or use 0 position and
     *                     current time for the timestamp.
     *                     If WOULD_BLOCK is returned, the timestamp is still
     *                     modified with the LOCATION_CLIENT portion filled.
     *         DEAD_OBJECT if AudioFlinger dies or the output device changes and
     *                     the track cannot be automatically restored.
     *                     The application needs to recreate the AudioTrack
     *                     because the audio device changed or AudioFlinger died.
     *                     This typically occurs for direct or offloaded tracks
     *                     or if mDoNotReconnect is true.
     *         INVALID_OPERATION  if called on a offloaded or direct track.
     *                     Use getTimestamp(AudioTimestamp& timestamp) instead.
     */
            status_t getTimestamp(ExtendedTimestamp *timestamp);
private:
            status_t getTimestamp_l(ExtendedTimestamp *timestamp);
public:

    /* Add an AudioDeviceCallback. The caller will be notified when the audio device to which this
     * AudioTrack is routed is updated.
     * Replaces any previously installed callback.
     * Parameters:
     *  callback:  The callback interface
     * Returns NO_ERROR if successful.
     *         INVALID_OPERATION if the same callback is already installed.
     *         NO_INIT or PREMISSION_DENIED if AudioFlinger service is not reachable
     *         BAD_VALUE if the callback is NULL
     */

    /*添加AudioDeviceCallback。呼叫方将在收到此消息的音频设备

*AudioTrack已路由并已更新。

*替换以前安装的任何回调。

*参数：

*callback：回调接口

*如果成功，则返回NO_ERROR。

*INVALID_OPERATION（如果已安装相同的回调）。

*如果无法访问AudioFlinger服务，则为NO_INIT或PREMISSION_DENIED

*如果回调为NULL，则返回BAD_VALUE

*/
            status_t addAudioDeviceCallback(const sp<AudioSystem::AudioDeviceCallback>& callback);

    /* remove an AudioDeviceCallback.
     * Parameters:
     *  callback:  The callback interface
     * Returns NO_ERROR if successful.
     *         INVALID_OPERATION if the callback is not installed
     *         BAD_VALUE if the callback is NULL
     */
            status_t removeAudioDeviceCallback(
                    const sp<AudioSystem::AudioDeviceCallback>& callback);

            // AudioSystem::AudioDeviceCallback> virtuals
            virtual void onAudioDeviceUpdate(audio_io_handle_t audioIo,
                                             audio_port_handle_t deviceId);

    /* Obtain the pending duration in milliseconds for playback of pure PCM
     * (mixable without embedded timing) data remaining in AudioTrack.
     *
     * This is used to estimate the drain time for the client-server buffer
     * so the choice of ExtendedTimestamp::LOCATION_SERVER is default.
     * One may optionally request to find the duration to play through the HAL
     * by specifying a location ExtendedTimestamp::LOCATION_KERNEL; however,
     * INVALID_OPERATION may be returned if the kernel location is unavailable.
     *
     * Returns NO_ERROR  if successful.
     *         INVALID_OPERATION if ExtendedTimestamp::LOCATION_KERNEL cannot be obtained
     *                   or the AudioTrack does not contain pure PCM data.
     *         BAD_VALUE if msec is nullptr or location is invalid.
     */

/*获取纯PCM播放的挂起持续时间（以毫秒为单位）
*AudioTrack中剩余的（可混合，无需嵌入计时）数据。
*这用于估计客户端服务器缓冲区的消耗时间
*因此默认选择ExtendedTimestamp:：LOCATION_SERVER。
*可以选择性地请求通过HAL找到播放的持续时间
*通过指定位置ExtendedTimestamp:：location_KERNEL；然而
*如果内核位置不可用，则可能返回INVALID_OPERATION。
*如果成功，则返回NO_ERROR。
*INVALID_OPERATION如果无法获得ExtendedTimestamp:：LOCATION_KERNEL
*或者AudioTrack不包含纯PCM数据。
*如果msec为nullptr或位置无效，则BAD_VALUE。
*/


            status_t pendingDuration(int32_t *msec,
                    ExtendedTimestamp::Location location = ExtendedTimestamp::LOCATION_SERVER);

    /* hasStarted() is used to determine if audio is now audible at the device after
     * a start() command. The underlying implementation checks a nonzero timestamp position
     * or increment for the audible assumption.
     *
     * hasStarted() returns true if the track has been started() and audio is audible
     * and no subsequent pause() or flush() has been called.  Immediately after pause() or
     * flush() hasStarted() will return false.
     *
     * If stop() has been called, hasStarted() will return true if audio is still being
     * delivered or has finished delivery (even if no audio was written) for both offloaded
     * and normal tracks. This property removes a race condition in checking hasStarted()
     * for very short clips, where stop() must be called to finish drain.
     *
     * In all cases, hasStarted() may turn false briefly after a subsequent start() is called
     * until audio becomes audible again.
     */

/*hasStarted（）用于确定在
*start（）命令。底层实现检查非零时间戳位置
*或针对可听假设的增量。
*如果曲目已经启动（）并且可以听到音频，hasStarted（）将返回true
*并且没有调用后续的pause（）或flush（）。紧接在pause（）或之后
*flush（）hasStarted（）将返回false。
*如果调用了stop（），那么如果音频仍在播放，hasStarted（）将返回true
*已交付或已完成交付（即使未写入音频）
*和正常轨道。此属性删除检查hasStarted（）中的竞争条件
*对于非常短的剪辑，必须调用where stop（）来完成排水。
*在所有情况下，调用后续的start（）后，hasStarted（）可能会短暂变为false
*直到音频再次变得可听。
*/

            bool hasStarted(); // not const

            bool isPlaying() {
                AutoMutex lock(mLock);
                return mState == STATE_ACTIVE || mState == STATE_STOPPING;
            }

    /* Get the unique port ID assigned to this AudioTrack instance by audio policy manager.
     * The ID is unique across all audioserver clients and can change during the life cycle
     * of a given AudioTrack instance if the connection to audioserver is restored.
     */

/*获取音频策略管理器分配给此AudioTrack实例的唯一端口ID。
*ID在所有音频服务器客户端中都是唯一的，并且可以在生命周期中更改
*如果恢复了与音频服务器的连接，则为给定AudioTrack实例的。
*/

            audio_port_handle_t getPortId() const { return mPortId; };

            void setAudioTrackCallback(const sp<media::IAudioTrackCallback>& callback) {
                mAudioTrackCallback->setAudioTrackCallback(callback);
            }

 protected:
    /* copying audio tracks is not allowed */
                        AudioTrack(const AudioTrack& other);
            AudioTrack& operator = (const AudioTrack& other);

    /* a small internal class to handle the callback */
    class AudioTrackThread : public Thread
    {
    public:
        explicit AudioTrackThread(AudioTrack& receiver);

        // Do not call Thread::requestExitAndWait() without first calling requestExit().
        // Thread::requestExitAndWait() is not virtual, and the implementation doesn't do enough.
        virtual void        requestExit();

                void        pause();    // suspend thread from execution at next loop boundary
                void        resume();   // allow thread to execute, if not requested to exit
                void        wake();     // wake to handle changed notification conditions.

    private:
                void        pauseInternal(nsecs_t ns = 0LL);
                                        // like pause(), but only used internally within thread

        friend class AudioTrack;
        virtual bool        threadLoop();
        AudioTrack&         mReceiver;
        virtual ~AudioTrackThread();
        Mutex               mMyLock;    // Thread::mLock is private
        Condition           mMyCond;    // Thread::mThreadExitedCondition is private
        bool                mPaused;    // whether thread is requested to pause at next loop entry
        bool                mPausedInt; // whether thread internally requests pause
        nsecs_t             mPausedNs;  // if mPausedInt then associated timeout, otherwise ignored
        bool                mIgnoreNextPausedInt;   // skip any internal pause and go immediately
                                        // to processAudioBuffer() as state may have changed
                                        // since pause time calculated.
    };

            // body of AudioTrackThread::threadLoop()
            // returns the maximum amount of time before we would like to run again, where:
            //      0           immediately
            //      > 0         no later than this many nanoseconds from now
            //      NS_WHENEVER still active but no particular deadline
            //      NS_INACTIVE inactive so don't run again until re-started
            //      NS_NEVER    never again
            static const nsecs_t NS_WHENEVER = -1, NS_INACTIVE = -2, NS_NEVER = -3;
            nsecs_t processAudioBuffer();

            // caller must hold lock on mLock for all _l methods

            void updateLatency_l(); // updates mAfLatency and mLatency from AudioSystem cache

            status_t createTrack_l();

            // can only be called when mState != STATE_ACTIVE
            void flush_l();

            void setLoop_l(uint32_t loopStart, uint32_t loopEnd, int loopCount);

            // FIXME enum is faster than strcmp() for parameter 'from'
            status_t restoreTrack_l(const char *from);

            uint32_t    getUnderrunCount_l() const;

            bool     isOffloaded() const;
            bool     isDirect() const;
            bool     isOffloadedOrDirect() const;

            bool     isOffloaded_l() const
                { return (mFlags & AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD) != 0; }

            bool     isOffloadedOrDirect_l() const
                { return (mFlags & (AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD|
                                                AUDIO_OUTPUT_FLAG_DIRECT)) != 0; }

            bool     isDirect_l() const
                { return (mFlags & AUDIO_OUTPUT_FLAG_DIRECT) != 0; }

            // pure pcm data is mixable (which excludes HW_AV_SYNC, with embedded timing)
            bool     isPurePcmData_l() const
                { return audio_is_linear_pcm(mFormat)
                        && (mAttributes.flags & AUDIO_FLAG_HW_AV_SYNC) == 0; }

            // increment mPosition by the delta of mServer, and return new value of mPosition
            Modulo<uint32_t> updateAndGetPosition_l();

            // check sample rate and speed is compatible with AudioTrack
            bool     isSampleRateSpeedAllowed_l(uint32_t sampleRate, float speed);

            void     restartIfDisabled();

            void     updateRoutedDeviceId_l();

    // Next 4 fields may be changed if IAudioTrack is re-created, but always != 0
    sp<IAudioTrack>         mAudioTrack;
    sp<IMemory>             mCblkMemory;
  
    audio_track_cblk_t*     mCblk;                  // re-load after mLock.unlock()
    //zouqile  输出设备
    audio_io_handle_t       mOutput = AUDIO_IO_HANDLE_NONE; // from AudioSystem::getOutputForAttr()

    sp<AudioTrackThread>    mAudioTrackThread;
    bool                    mThreadCanCallJava;

    float                   mVolume[2];
    float                   mSendLevel;
    mutable uint32_t        mSampleRate;            // mutable because getSampleRate() can update it
    uint32_t                mOriginalSampleRate;
    AudioPlaybackRate       mPlaybackRate;
    float                   mMaxRequiredSpeed;      // use PCM buffer size to allow this speed

    // Corresponds to current IAudioTrack, value is reported back by AudioFlinger to the client.
    // This allocated buffer size is maintained by the proxy.
    size_t                  mFrameCount;            // maximum size of buffer

    size_t                  mReqFrameCount;         // frame count to request the first or next time
                                                    // a new IAudioTrack is needed, non-decreasing

    // The following AudioFlinger server-side values are cached in createAudioTrack_l().
    // These values can be used for informational purposes until the track is invalidated,
    // whereupon restoreTrack_l() calls createTrack_l() to update the values.
    uint32_t                mAfLatency;             // AudioFlinger latency in ms
    size_t                  mAfFrameCount;          // AudioFlinger frame count
    uint32_t                mAfSampleRate;          // AudioFlinger sample rate

    // constant after constructor or set()
    audio_format_t          mFormat;                // as requested by client, not forced to 16-bit
    audio_stream_type_t     mStreamType;            // mStreamType == AUDIO_STREAM_DEFAULT implies
                                                    // this AudioTrack has valid attributes
    uint32_t                mChannelCount;
    //比如：AUDIO_CHANNEL_OUT_STEREO或者  立体声 输出
    // AUDIO_CHANNEL_IN_STEREO   立体声 输入
    audio_channel_mask_t    mChannelMask;
    sp<IMemory>             mSharedBuffer;
    transfer_type           mTransfer;
    audio_offload_info_t    mOffloadInfoCopy;
    const audio_offload_info_t* mOffloadInfo;
    audio_attributes_t      mAttributes;

    size_t                  mFrameSize;             // frame size in bytes

    status_t                mStatus;

    // can change dynamically when IAudioTrack invalidated
    uint32_t                mLatency;               // in ms

    // Indicates the current track state.  Protected by mLock.
    enum State {
        STATE_ACTIVE,
        STATE_STOPPED,
        STATE_PAUSED,
        STATE_PAUSED_STOPPING,
        STATE_FLUSHED,
        STATE_STOPPING,
    }                       mState;

    static constexpr const char *stateToString(State state)
    {
        switch (state) {
        case STATE_ACTIVE:          return "STATE_ACTIVE";
        case STATE_STOPPED:         return "STATE_STOPPED";
        case STATE_PAUSED:          return "STATE_PAUSED";
        case STATE_PAUSED_STOPPING: return "STATE_PAUSED_STOPPING";
        case STATE_FLUSHED:         return "STATE_FLUSHED";
        case STATE_STOPPING:        return "STATE_STOPPING";
        default:                    return "UNKNOWN";
        }
    }

    // for client callback handler
    callback_t              mCbf;                   // callback handler for events, or NULL
    void*                   mUserData;

    // for notification APIs

    // next 2 fields are const after constructor or set()
    uint32_t                mNotificationFramesReq; // requested number of frames between each
                                                    // notification callback,
                                                    // at initial source sample rate
    uint32_t                mNotificationsPerBufferReq;
                                                    // requested number of notifications per buffer,
                                                    // currently only used for fast tracks with
                                                    // default track buffer size

    uint32_t                mNotificationFramesAct; // actual number of frames between each
                                                    // notification callback,
                                                    // at initial source sample rate
    bool                    mRefreshRemaining;      // processAudioBuffer() should refresh
                                                    // mRemainingFrames and mRetryOnPartialBuffer

                                                    // used for static track cbf and restoration
    int32_t                 mLoopCount;             // last setLoop loopCount; zero means disabled
    uint32_t                mLoopStart;             // last setLoop loopStart
    uint32_t                mLoopEnd;               // last setLoop loopEnd
    int32_t                 mLoopCountNotified;     // the last loopCount notified by callback.
                                                    // mLoopCountNotified counts down, matching
                                                    // the remaining loop count for static track
                                                    // playback.

    // These are private to processAudioBuffer(), and are not protected by a lock
    uint32_t                mRemainingFrames;       // number of frames to request in obtainBuffer()
    bool                    mRetryOnPartialBuffer;  // sleep and retry after partial obtainBuffer()
    uint32_t                mObservedSequence;      // last observed value of mSequence

    Modulo<uint32_t>        mMarkerPosition;        // in wrapping (overflow) frame units
    bool                    mMarkerReached;
    Modulo<uint32_t>        mNewPosition;           // in frames
    uint32_t                mUpdatePeriod;          // in frames, zero means no EVENT_NEW_POS

    Modulo<uint32_t>        mServer;                // in frames, last known mProxy->getPosition()
                                                    // which is count of frames consumed by server,
                                                    // reset by new IAudioTrack,
                                                    // whether it is reset by stop() is TBD
    Modulo<uint32_t>        mPosition;              // in frames, like mServer except continues
                                                    // monotonically after new IAudioTrack,
                                                    // and could be easily widened to uint64_t
    Modulo<uint32_t>        mReleased;              // count of frames released to server
                                                    // but not necessarily consumed by server,
                                                    // reset by stop() but continues monotonically
                                                    // after new IAudioTrack to restore mPosition,
                                                    // and could be easily widened to uint64_t
    int64_t                 mStartFromZeroUs;       // the start time after flush or stop,
                                                    // when position should be 0.
                                                    // only used for offloaded and direct tracks.
    int64_t                 mStartNs;               // the time when start() is called.
    ExtendedTimestamp       mStartEts;              // Extended timestamp at start for normal
                                                    // AudioTracks.
    AudioTimestamp          mStartTs;               // Timestamp at start for offloaded or direct
                                                    // AudioTracks.

    bool                    mPreviousTimestampValid;// true if mPreviousTimestamp is valid
    bool                    mTimestampStartupGlitchReported;      // reduce log spam
    bool                    mTimestampRetrogradePositionReported; // reduce log spam
    bool                    mTimestampRetrogradeTimeReported;     // reduce log spam
    bool                    mTimestampStallReported;              // reduce log spam
    bool                    mTimestampStaleTimeReported;          // reduce log spam
    AudioTimestamp          mPreviousTimestamp;     // used to detect retrograde motion
    
    ExtendedTimestamp::Location mPreviousLocation;  // location used for previous timestamp

    uint32_t                mUnderrunCountOffset;   // updated when restoring tracks

    int64_t                 mFramesWritten;         // total frames written. reset to zero after
                                                    // the start() following stop(). It is not
                                                    // changed after restoring the track or
                                                    // after flush.
    int64_t                 mFramesWrittenServerOffset; // An offset to server frames due to
                                                    // restoring AudioTrack, or stop/start.
                                                    // This offset is also used for static tracks.
    int64_t                 mFramesWrittenAtRestore; // Frames written at restore point (or frames
                                                    // delivered for static tracks).
                                                    // -1 indicates no previous restore point.

    audio_output_flags_t    mFlags;                 // same as mOrigFlags, except for bits that may
                                                    // be denied by client or server, such as
                                                    // AUDIO_OUTPUT_FLAG_FAST.  mLock must be
                                                    // held to read or write those bits reliably.
    audio_output_flags_t    mOrigFlags;             // as specified in constructor or set(), const

    bool                    mDoNotReconnect;

    audio_session_t         mSessionId;
    int                     mAuxEffectId;
    audio_port_handle_t     mPortId;                    // Id from Audio Policy Manager

    mutable Mutex           mLock;

    int                     mPreviousPriority;          // before start()
    SchedPolicy             mPreviousSchedulingGroup;
    bool                    mAwaitBoost;    // thread should wait for priority boost before running

    // The proxy should only be referenced while a lock is held because the proxy isn't
    // multi-thread safe, especially the SingleStateQueue part of the proxy.
    // An exception is that a blocking ClientProxy::obtainBuffer() may be called without a lock,
    // provided that the caller also holds an extra reference to the proxy and shared memory to keep
    // them around in case they are replaced during the obtainBuffer().
    sp<StaticAudioTrackClientProxy> mStaticProxy;   // for type safety only
    sp<AudioTrackClientProxy>       mProxy;         // primary owner of the memory

    bool                    mInUnderrun;            // whether track is currently in underrun state
    uint32_t                mPausedPosition;

    // For Device Selection API
    //  a value of AUDIO_PORT_HANDLE_NONE indicated default (AudioPolicyManager) routing.
    audio_port_handle_t    mSelectedDeviceId; // Device requested by the application.
    audio_port_handle_t    mRoutedDeviceId;   // Device actually selected by audio policy manager:
                                              // May not match the app selection depending on other
                                              // activity and connected devices.

    sp<media::VolumeHandler>       mVolumeHandler;

    const std::string      mOpPackageName;

private:
    class DeathNotifier : public IBinder::DeathRecipient {
    public:
        explicit DeathNotifier(AudioTrack* audioTrack) : mAudioTrack(audioTrack) { }
    protected:
        virtual void        binderDied(const wp<IBinder>& who);
    private:
        const wp<AudioTrack> mAudioTrack;
    };

    sp<DeathNotifier>       mDeathNotifier;
    uint32_t                mSequence;              // incremented for each new IAudioTrack attempt
    uid_t                   mClientUid;
    pid_t                   mClientPid;

    wp<AudioSystem::AudioDeviceCallback> mDeviceCallback;

private:
    class MediaMetrics {
      public:
        MediaMetrics() : mMetricsItem(mediametrics::Item::create("audiotrack")) {
        }
        ~MediaMetrics() {
            // mMetricsItem alloc failure will be flagged in the constructor
            // don't log empty records
            if (mMetricsItem->count() > 0) {
                mMetricsItem->selfrecord();
            }
        }
        void gather(const AudioTrack *track);
        mediametrics::Item *dup() { return mMetricsItem->dup(); }
      private:
        std::unique_ptr<mediametrics::Item> mMetricsItem;
    };
    MediaMetrics mMediaMetrics;
    std::string mMetricsId;  // GUARDED_BY(mLock), could change in createTrack_l().
    std::string mCallerName; // for example "aaudio"
    bool                    mTrackOffloaded;

private:
    class AudioTrackCallback : public media::BnAudioTrackCallback {
    public:
        binder::Status onCodecFormatChanged(const std::vector<uint8_t>& audioMetadata) override;

        void setAudioTrackCallback(const sp<media::IAudioTrackCallback>& callback);
    private:
        Mutex mAudioTrackCbLock;
        wp<media::IAudioTrackCallback> mCallback;
    };
    sp<AudioTrackCallback> mAudioTrackCallback;
};

}; // namespace android

#endif // ANDROID_AUDIOTRACK_H
