package main

import (
	"bytes"
	"fmt"
	"github.com/pterm/pterm"
	"io"
	"os"
	"time"
	"xmediaEmu/pkg/decoder"
	"xmediaEmu/pkg/decoder/codec"
	"xmediaEmu/pkg/decoder/codec/aacparser"
	"xmediaEmu/pkg/decoder/codec/amrparser"
	"xmediaEmu/pkg/decoder/codec/h264parser"
	"xmediaEmu/pkg/decoder/fdkaac"
	"xmediaEmu/pkg/format/rtmp"
	"xmediaEmu/pkg/media/amrwriter"
)

// test amr.
func main() {
	//if len(os.Args) < 2 {
	//	fmt.Println("Please provide a aac audio file like test.aac")
	//	os.Exit(1)
	//}

	// aac decode

	// 直接拉取aac流.
	demuxer, err := rtmp.Dial("rtmp://10.153.90.4:1935/live/guangdong")
	if err != nil {
		pterm.FgLightRed.Printfln("Unable to Dial, error: %s\n", err.Error())
		os.Exit(-1)
	}
	defer demuxer.Close()

	var streams []codec.CodecData
	if streams, err = demuxer.Streams(); err != nil && err != io.EOF {
		pterm.FgLightRed.Println(err)
		return
	}
	if err = rtmpStreamer.WriteHeader(streams); err != nil {
		pterm.FgRed.Println(err)
		return
	}

	var adecodec codec.AudioCodecData
	// 打印标签.
	for _, stream := range streams {
		// fmt.Print(stream.Type(), " ")
		if stream.Type() == codec.H264 {
			pterm.FgYellow.Println("found dest stream type:", stream.Type(), " ")
		} else {
			adecodec = stream.(codec.AudioCodecData)
			pterm.FgWhite.Println(stream.Type(), " ", adecodec.ChannelLayout(), " ", adecodec.SampleRate(), " ", adecodec.SampleFormat())
		}
	}
	// audio 打印 end
	// aacodeData.Config.SampleRate =
	aacDecoder, err := decoder.NewAudioDecoder(adecodec)
	if err != nil {
		panic(err)
	}
	// aacodeData.Config.ChannelLayout = codec.CH_MONO

	// amrwb encode
	amrCodecData := amrparser.AmrWbCodecData{}
	amrDecoder, err := decoder.NewAudioEncoder(amrCodecData)
	if err != nil {
		panic(err)
	}
	defer amrDecoder.Close()

	// 目的解码.
	_ = amrDecoder.SetChannelLayout(codec.CH_MONO)
	_ = amrDecoder.SetBitrate(24000)
	_ = amrDecoder.SetSampleRate(16000)
	_ = amrDecoder.SetSampleFormat(codec.S16)
	if err = amrDecoder.Setup(); err != nil {
		panic(err)
	}
	defer amrDecoder.Close()

	// 初始化编码.
	if err = aacDecoder.Setup(); err != nil {
		panic(err)
	}
	defer aacDecoder.Close()

	// amr的deocode必须是s16形式
	var resample decoder.Resampler
	resample.OutChannelLayout = codec.CH_MONO
	resample.OutSampleFormat = codec.S16
	resample.OutSampleRate = 16000
	defer resample.Close()

	// 解码器.
	//f, err := os.Create("testwb.amr")
	//if err != nil {
	//	panic(err)
	//}
	//defer f.Close()
	//amrhead := []byte{'#', '!', 'A', 'M', 'R', '-', 'W', 'B', '\n'}
	//_, _ = f.Write(amrhead)

	// create amr-wb file.
	// 解码器.
	amrWbFile, err := amrwriter.NewWb("testwb.amr")
	if err != nil {
		panic(err)
	}
	defer amrWbFile.Close()
	amrhead := []byte{'#', '!', 'A', 'M', 'R', '-', 'W', 'B', '\n'}
	_ = amrWbFile.WriteFile(amrhead)

	ticker := time.NewTicker(time.Second * 10)

	d := fdkaac.NewAacDecoder()
	if err = d.InitAdts(); err != nil {
		fmt.Println("init decoder failed, err is", err)
		return
	}
	defer d.Close()
	f, err := os.Create("testaac.pcm")
	if err != nil {
		panic(err)
	}
	defer f.Close()

	// 一帧帧解析.
	for {
		select {
		case <-ticker.C:
			pterm.FgLightYellow.Printfln("ticker stop!.")
			got, frameResult, _ := aacDecoder.ReceiveReserved()
			for len(frameResult.Data) > 0 {
				pkts, err := amrDecoder.Encode(frameResult)
				if len(pkts) <= 0 {
					pterm.FgRed.Printfln("ReceiveReserved Encode amr result 0: len(pkts):%v err:%v\n", len(pkts), err)
				} else {
					// 直接写入本地amrwb文件.
					for index, pkt := range pkts {
						//n, data, err := amrWbFile.ExtractSpeechFrame(pkt)
						//if err != nil || n == 0 {
						//	continue
						//}

						_ = amrWbFile.WriteFile(pkt)
						pterm.FgGreen.Printfln("ReceiveReserved Encode amr result length is: got:%d pkts[%d]: length:%d\n", got, index, len(pkt))
					}
				}
				got, frameResult, err = aacDecoder.ReceiveReserved()
			}
			return
		default:
			pkt, _ := demuxer.ReadPacket()
			if err == io.EOF {
				pterm.FgYellow.Println("demuxer: end EOF")
				break
			} else if err != nil {
				pterm.FgLightRed.Println("demuxer error:", err)
				return
			}

			streamId := int(pkt.Idx)
			// 打印当前编码，非视频则跳过.
			if demuxer.AudioStreamId() != streamId {
				pterm.FgWhite.Printfln("mockLocalEmuProcess stream[%d] id not audio: AudioStreamId[%d]", streamId, demuxer.AudioStreamId())
				continue
			}

			// ffmpeg re
			ModifyPacket(&pkt)

			// AAC内容内容提取.
			audioData := rtmpStreamer.GetH264OrAacFromPacket(&pkt)
			if len(audioData) > 0 && len(audioData[0]) > 0 {
				pterm.FgGreen.Printfln("GetH264OrAacFromPacket aac result length is: audioData[0]:length:%d audioData:length:%d err:%v\n", len(audioData[0]), len(audioData), err)
			} else {
				pterm.FgRed.Println("GetH264OrAacFromPacket aac audioData is nil \n")
				continue
			}

			// 长度2048.
			pcmFrame := decoderADTSFrames(d, audioData[0])
			if pcmFrame != nil {
				_, _ = f.Write(pcmFrame.Data[0]) // 单声道只有.
			}

			//// AAC解码,加了头部.
			//got, frameResult, err := aacDecoder.Decode(audioData[0])
			// 去掉aac的编码头部.
			//got, frameResult, err := aacDecoder.Decode(pkt.Data)
			//if len(frameResult.Data) > 0 {
			//	pterm.FgGreen.Printfln("Decode aac result length is: got:%v frameResult.Data[0]:length:%d frameResult.Data:length:%d err:%v\n", got, len(frameResult.Data[0]), len(frameResult.Data), err)
			//} else {
			//	pterm.FgRed.Printfln("Decode aac result 0: got:%v err:%v\n", got, err)
			//	continue
			//}

			// resample : amrwb only support s16.
			//frameResample, err := resample.Resample(*pcmFrame)
			////if _, err := adecodec.PacketDuration(audioData[0]); err != nil {	`
			////	pterm.FgRed.Printfln("Decode aac result 0: pkt.Idx:%d err:%v\n", pkt.Idx, err)
			////	continue
			////}
			//if err != nil {
			//	pterm.FgRed.Printfln("Resample frameResult failed: pkt.Idx:%d err:%v\n", pkt.Idx, err)
			//	continue
			//}

			// TODO:测试pcm文件播放.

			// amr-wb编码
			pkts, err := amrDecoder.Encode(*pcmFrame)
			if len(pkts) <= 0 {
				pterm.FgRed.Printfln("Encode amr result 0: len(pkts):%v err:%v\n", len(pkts), err)
				continue
			}

			// pkts需要增加amr头？.

			// 直接写入本地amrwb文件.
			for index, pkt := range pkts {
				//n, data, err := amrWbFile.ExtractSpeechFrame(pkt)
				//if err != nil || n == 0 {
				//	continue
				//}

				_ = amrWbFile.WriteFile(pkt)
				pterm.FgGreen.Printfln("Encode amr result length is: pkts[%d]:length:%d\n", index, len(pkt))
			}
		}
	}

}

func decoderADTSFrames(d *fdkaac.AacDecoder, aacData []byte) (resultFrame *codec.AudioFrame) {
	// AAC FRAME #0
	var pcm0 []byte
	var err error
	if pcm0, err = d.Decode(aacData); err != nil {
		pterm.FgRed.Println("decoderADTSFrames: decode failed, err is", err)
		return nil
	}

	pterm.FgGreen.Println("decoderADTSFrames: SampleRate:", d.SampleRate(), " FrameSize:", d.FrameSize(), " NumChannels:", d.NumChannels(), " AacSampleRate:", d.AacSampleRate(),
		" Bitrate:", d.Bitrate(), " ChannelConfig:", d.ChannelConfig())
	pterm.FgGreen.Println("decoderADTSFrames: AacSamplesPerFrame:", d.AacSamplesPerFrame(), " AacNumChannels:", d.AacNumChannels(), " NumTotalBytes:", d.NumTotalBytes(), " NumBadBytes:",
		d.NumBadBytes(), " SampleBits:", d.SampleBits())
	pterm.FgGreen.Println("PCM0 length:", len(pcm0))

	resultFrame = &codec.AudioFrame{SampleRate: d.AacSampleRate(), ChannelLayout: codec.ChannelLayout(d.ChannelConfig()), SampleCount: d.AacSamplesPerFrame(), SampleFormat: codec.S16}
	resultFrame.Data = append(resultFrame.Data, pcm0)

	return
	// Output:
	// SampleRate: 44100
	// FrameSize: 1024
	// NumChannels: 2
	// AacSampleRate: 44100
	// Profile: 1
	// AudioObjectType: 2
	// ChannelConfig: 2
	// Bitrate: 32730
	// AacSamplesPerFrame: 1024
	// AacNumChannels: 2
	// ExtensionAudioObjectType: 0
	// ExtensionSamplingRate: 0
	// NumLostAccessUnits: 0
	// NumTotalBytes: 193
	// NumBadBytes: 0
	// NumTotalAccessUnits: 2
	// NumBadAccessUnits: 0
	// SampleBits: 16
	// PCM0: 4096
	// PCM1: 4096
}

var firsttime time.Time

func ModifyPacket(pkt *codec.Packet) {
	if pkt.Idx == 0 {
		if firsttime.IsZero() {
			firsttime = time.Now()
		}
		pkttime := firsttime.Add(pkt.Time)
		delta := pkttime.Sub(time.Now())
		if delta > 0 {
			time.Sleep(delta)
		}
	}
	return
}

var rtmpStreamer = NewRtmpStreamer(Capabilities["audio"], Capabilities["video"])

// Capabilities 写死编解码信息.
var Capabilities = map[string]*Capability{
	"audio": &Capability{
		Codecs: []string{"AAC"},
		Rtcpfbs: []*RtcpFeedback{
			&RtcpFeedback{
				ID: "nack",
			},
		},
	},
	"video": &Capability{
		Codecs: []string{"H264"},
		Rtx:    true,
		Rtcpfbs: []*RtcpFeedback{
			&RtcpFeedback{
				ID: "transport-cc",
			},
			&RtcpFeedback{
				ID:     "ccm",
				Params: []string{"fir"},
			},
			&RtcpFeedback{
				ID: "nack",
			},
		},
		Extensions: []string{
			"urn:3gpp:video-orientation",
			"http://www.ietf.org/id/draft-holmer-rmcat-transport-wide-cc-extensions-01",
			"http://www.webrtc.org/experiments/rtp-hdrext/abs-send-time",
		},
	},
}

// RtmpStream 定义rtmp解析的结构streamer.
type RtmpStream struct {
	streams        []codec.CodecData
	videoCodecData h264parser.CodecData
	audioCodecData aacparser.CodecData

	spspps bool

	// aac头部
	adtsheader []byte

	// TODO: 以下用于构建SDP。不需要则跳过.
	// 后续和外部媒体通信需要根据外部媒体调节自己的编码属性，尽量不做转码，比如h265.
	audioCapability *Capability
	videoCapability *Capability
}

type RtcpFeedback struct {
	ID     string   `json:"id,omitempty"`
	Params []string `json:"params,omitempty"`
}

type Capability struct {
	Codecs     []string        `json:"codecs"`
	Rtx        bool            `json:"rtx,omitempty"`
	Rtcpfbs    []*RtcpFeedback `json:"rtcpfbs,omitempty"`
	Extensions []string        `json:"extensions,omitempty"`
	Simulcast  bool            `json:"simulcast,omitempty"`
}

// NewMediaTransform  create media transform
func NewRtmpStreamer(audio *Capability, video *Capability) *RtmpStream {
	streamer := &RtmpStream{}
	streamer.audioCapability = audio
	streamer.videoCapability = video
	return streamer
}

// WriteHeader got sps and pps
func (self *RtmpStream) WriteHeader(streams []codec.CodecData) error {
	self.streams = streams

	for _, stream := range streams {
		if stream.Type() == codec.H264 {
			h264Codec := stream.(h264parser.CodecData)
			self.videoCodecData = h264Codec
		}
		if stream.Type() == codec.AAC {
			aacCodec := stream.(aacparser.CodecData)
			self.audioCodecData = aacCodec
			self.adtsheader = make([]byte, 7)
		}
	}

	return nil
}

// GetH264FromPacket 解析rtmp的数据块部分.
// 音频AAC情况下返回的byte数组只有一个，不用添加类型.
func (self *RtmpStream) GetH264OrAacFromPacket(packet *codec.Packet) (result [][]byte) {
	stream := self.streams[packet.Idx]
	// pterm.FgYellow.Println("GetH264FromPacket stream type：", stream.Type())
	if stream.Type() == codec.H264 {
		// pterm.FgYellow.Println("GetH264FromPacket 1")
		if !self.spspps {
			var b bytes.Buffer
			b.Write([]byte{0, 0, 0, 1})
			b.Write(self.videoCodecData.SPS())
			b.Write([]byte{0, 0, 0, 1})
			b.Write(self.videoCodecData.PPS())
			result = append(result, b.Bytes())
			self.spspps = true
			// pterm.FgYellow.Println("GetH264FromPacket 2")
		}

		pktnalus, _ := h264parser.SplitNALUs(packet.Data)
		for _, nalu := range pktnalus {
			var b bytes.Buffer
			b.Write([]byte{0, 0, 0, 1})
			b.Write(nalu)
			result = append(result, b.Bytes())
			// pterm.FgYellow.Println("GetH264FromPacket 3")
		}

	}

	// aac 获取,加上adst头部.
	if stream.Type() == codec.AAC {
		adtsbuffer := []byte{}
		aacparser.FillADTSHeader(self.adtsheader, self.audioCodecData.Config, 1024, len(packet.Data))
		adtsbuffer = append(adtsbuffer, self.adtsheader...)
		adtsbuffer = append(adtsbuffer, packet.Data...)
		result = append(result, adtsbuffer)
	}
	return
}
