﻿using FFmpeg.AutoGen;
using System;
using System.Collections.Generic;
using System.Linq;
using System.Runtime.InteropServices;
using System.Text;
using System.Threading.Tasks;

namespace FFmpegWinui3.MediaFolder
{
    public unsafe class AudioMedia : MediaBase
    {
        public int Channels { get; private set; }
        public long SampleRate { get; private set; }
        public long BitsPerSample { get; private set; }

        public ulong ChannelLayout { get; private set; }
        public override AVMediaType Type { get; protected set; } = AVMediaType.AVMEDIA_TYPE_AUDIO;

        SwrContext* Scaler = null;

        public AudioMedia(AVFormatContext* format, int streamIndex) : base(format, streamIndex)
        {
            Channels = Stream->codecpar->channels;
            ChannelLayout = Stream->codecpar->channel_layout;
            SampleRate = Stream->codecpar->sample_rate;
            BitsPerSample = ffmpeg.av_samples_get_buffer_size(null, 1, 1, CodecContext->sample_fmt, 1) * 8;
            FrameRate = ffmpeg.av_q2d(Stream->r_frame_rate);
        }


        AudioParams LastSourceSpec = null;


        public override byte[] GetNextFrameData(TimeSpan time)
        {
            var tempFrame = GetNextFrame(time);
            if (tempFrame == CurrentFrame)
                return null;

            DisposeFrame();
            CurrentFrame = tempFrame;
            if (CurrentFrame == null)
                return null;

            var sourceSpec = AudioParams.CreateSource(CurrentFrame.frame);
            var targetSpec = AudioParams.CreateTarget(CurrentFrame.frame);
            if (Scaler == null || LastSourceSpec == null || AudioParams.AreCompatible(LastSourceSpec, sourceSpec) == false)
            {

                Scaler = ffmpeg.swr_alloc_set_opts(Scaler, targetSpec.ChannelLayout, targetSpec.Format, sourceSpec.SampleRate,
                    sourceSpec.ChannelLayout, sourceSpec.Format, sourceSpec.SampleRate, 0, null);

                ffmpeg.swr_init(Scaler);
                LastSourceSpec = sourceSpec;
            }

          

            var audioBuffer = Marshal.AllocHGlobal(targetSpec.BufferLength+1);
            var bufferPtr = (byte*)audioBuffer;

            var outputSamplesPerChannel = ffmpeg.swr_convert(Scaler, &bufferPtr, targetSpec.SamplesPerChannel,CurrentFrame.frame->extended_data, CurrentFrame.frame->nb_samples);
            // 计算缓冲区长度
            var outputBufferLength =
                ffmpeg.av_samples_get_buffer_size(null, targetSpec.ChannelCount, outputSamplesPerChannel, targetSpec.Format, 1);
            if (outputSamplesPerChannel < 0)
                return null;
            byte[] bytes = new byte[outputBufferLength];
            Marshal.Copy(audioBuffer, bytes, 0, outputBufferLength);
            return bytes;

        }



    }

    public unsafe class AudioParams
    {

        static public readonly AudioParams Output;

        public const int BufferPadding = 256;
        public const int OutputBitsPerSample = 16;


        public int ChannelCount { get; private set; }
        public long ChannelLayout { get; private set; }
        public int SamplesPerChannel { get; private set; }
        public int SampleRate { get; private set; }
        public AVSampleFormat Format { get; private set; }
        public int BufferLength { get; private set; }


        static AudioParams()
        {
            Output = new AudioParams();
            Output.ChannelCount = 2;
            Output.SampleRate = 48000;
            Output.Format = AVSampleFormat.AV_SAMPLE_FMT_S16;
            Output.ChannelLayout = (int)ffmpeg.av_get_default_channel_layout((int)Output.ChannelCount);

            Output.SamplesPerChannel = Output.SampleRate;
            Output.BufferLength = ffmpeg.av_samples_get_buffer_size(null, Output.ChannelCount, Output.SamplesPerChannel + BufferPadding, Output.Format, 1);
        }

        private AudioParams() { }

        public AudioParams(AVFrame* frame)
        {

            ChannelCount = frame->channels;
            ChannelLayout = (long)frame->channel_layout;
            Format = (AVSampleFormat)frame->format;
            SamplesPerChannel = frame->nb_samples;
            BufferLength = ffmpeg.av_samples_get_buffer_size(null, ChannelCount, SamplesPerChannel, Format, 1);
            SampleRate = frame->sample_rate;
        }

        static internal AudioParams CreateSource(AVFrame* frame)
        {
            var spec = new AudioParams(frame);
            if (spec.ChannelLayout == 0)
                spec.ChannelLayout = ffmpeg.av_get_default_channel_layout(spec.ChannelCount);

            return spec;
        }

        static internal AudioParams CreateTarget(AVFrame* frame)
        {
            var spec = new AudioParams
            {
                ChannelCount = Output.ChannelCount,
                Format = Output.Format,
                SampleRate = Output.SampleRate,
                ChannelLayout = Output.ChannelLayout
            };

            //目标变换只是源帧样本的比率。 这就是我们想要的样本数量
            spec.SamplesPerChannel = (int)Math.Round((double)frame->nb_samples * spec.SampleRate / frame->sample_rate, 0);
            spec.BufferLength = ffmpeg.av_samples_get_buffer_size(null, spec.ChannelCount, spec.SamplesPerChannel + BufferPadding, spec.Format, 1);
            return spec;
        }

        static internal bool AreCompatible(AudioParams a, AudioParams b)
        {
            if (a.Format != b.Format) return false;
            if (a.ChannelCount != b.ChannelCount) return false;
            if (a.ChannelLayout != b.ChannelLayout) return false;
            if (a.SampleRate != b.SampleRate) return false;

            return true;
        }
    }
}
