use std::{net::SocketAddr, time::Duration};

use bytes::{Bytes, BytesMut};
use tokio::{net::UdpSocket, sync::mpsc};
use tools::{utils::gen_id::get_now_duration, ResultEx};
use tracing::{error, trace};

use crate::{
	audio_codes::{rfc2833::Rfc2833, AudioCodes},
	empty_sockaddr,
	sip2::dialog::DialogMessage,
	wav::file::WavBuf,
	Codec,
};

use super::{data::RtpData, head::RtpHead};

///因为这里是两个通话,通过rtpManager把两端联通.因此每一个即代表一个通道的相关处理
///
///
#[derive(Debug)]
pub(crate) struct RtpLeg {
	///对应dialog的id.用来区分是从哪一个dialog来的消息
	pub(crate) id: u32,
	pub(crate) peer_rtp_addr: SocketAddr,
	pub(crate) peer_rtcp_addr: SocketAddr,
	pub(crate) to_sip_dialog: mpsc::Sender<DialogMessage>,
	///包的序列号,用来生成包时使用.
	pub(crate) seq_num: u16,
	///也是rtp包需要的一个时间戳.
	pub(crate) timestamp: u32,
	pub(crate) ssrc: u32,
	///保存事件的时间戳
	curr_event_timestamp: u32,
	///保存用来比较的时间戳
	save_event_timestamp: u32,
	///播放内容的列表.这里用bytes.目的是可以保存并大量通用
	pub(crate) play_bufs: Vec<WavBuf>,
	///下次发送的时间点.为0则为不发送
	next_send_wav_timestamp: Duration,
	///记录上一次dtfm的时间戳
	last_dtmf_timestamp: u32,
}

impl RtpLeg {
	pub(crate) fn new(
		dialog_id: u32,
		to_sip_dialog: mpsc::Sender<DialogMessage>,
		peer_rtp_addr: SocketAddr,
	) -> Self {
		let t: u32 = (get_now_duration().as_secs() & 0x00ffffff) as u32;
		let peer_rtcp_addr = if peer_rtp_addr.port() == 0 {
			empty_sockaddr()
		} else {
			SocketAddr::new(peer_rtp_addr.ip(), peer_rtp_addr.port() + 1)
		};

		Self {
			id: dialog_id,
			to_sip_dialog,
			seq_num: (t & 0x00ff) as u16,
			timestamp: t,
			peer_rtp_addr,
			peer_rtcp_addr,
			ssrc: t,
			curr_event_timestamp: 0,
			save_event_timestamp: 0,
			play_bufs: Vec::with_capacity(1),
			next_send_wav_timestamp: Duration::ZERO,
			last_dtmf_timestamp: 0
		}
	}

	///处理收到的事件.因为收到多个事件才代表一个按键.因此需要当前进行判断
	///
	/// 根据情况向远端发送按键事件消息
	pub(crate) fn handle_tel_event(&mut self, mut buf: Bytes) -> ResultEx<()> {
		let rtp = RtpData::from_bytes(&mut buf)?;
		if self.last_dtmf_timestamp == rtp.head.timestamp {
			Ok(())
		} else {
			self.last_dtmf_timestamp = rtp.head.timestamp;
			let tel_event = Rfc2833::from_bytes(&mut rtp.body.clone())?;
			self
				.to_sip_dialog
				.try_send(DialogMessage::Dtmf(tel_event.dtmf))?;
			Ok(())
		}
	}

	///转发一个数据至对应的端点的音频数据
	/// 调用此方法的原因是转发过程需要修改rtp包头部分数据
	/// 目前这个处理方式都是根据固定间隔采样的数据来进行
	/// 使用 wrapping_add_signed是因为可能溢出,而可以循环使用
	pub(crate) async fn send_buf_to_rtp(
		&mut self,
		rtp_sock: &UdpSocket,
		audio_codec: (u8, AudioCodes),
		buf: &mut BytesMut,
	) {
		let (_, codec) = audio_codec;

		//直接修改对应头部数据,seq
		buf[3] = (self.seq_num & 0xff) as u8;
		buf[2] = (self.seq_num >> 8) as u8;
		self.seq_num = self.seq_num.wrapping_add_signed(1);

		buf[7] = (self.timestamp & 0xff) as u8;
		buf[6] = (self.timestamp >> 8 & 0xff) as u8;
		buf[5] = (self.timestamp >> 16 & 0xff) as u8;
		buf[4] = (self.timestamp >> 24) as u8;
		self.timestamp = self
			.timestamp
			.wrapping_add_signed(codec.packet_size() as i32);

		buf[11] = (self.ssrc & 0xff) as u8;
		buf[10] = (self.ssrc >> 8 & 0xff) as u8;
		buf[9] = (self.ssrc >> 16 & 0xff) as u8;
		buf[8] = (self.ssrc >> 24) as u8;

		let _ = rtp_sock.send_to(&buf, self.peer_rtp_addr).await;
	}

	///转发一个数据至对应的端点的事件数据.telephone_event
	/// 调用此方法的原因是转发过程需要修改rtp包头部分数据
	/// 目前这个处理方式都是根据固定间隔采样的数据来进行
	pub(crate) async fn send_tele_event_buf_to_rtp(
		&mut self,
		rtp_sock: &UdpSocket,
		tele_event_codec: &Option<(u8, AudioCodes)>,
		buf: &mut BytesMut,
	) {
		let Some((_, codec)) = tele_event_codec else {
			error!("未设置音频编码格式.无法转发数据");
			return;
		};

		buf[3] = (self.seq_num & 0xff) as u8;
		buf[2] = (self.seq_num >> 8) as u8;
		self.seq_num = self.seq_num.wrapping_add_signed(1);

		let curr_s = u32::from_ne_bytes([buf[7], buf[6], buf[5], buf[4]]);
		if curr_s != self.save_event_timestamp {
			self.save_event_timestamp = curr_s;
			self.curr_event_timestamp = self.timestamp;
			self.timestamp = self
				.timestamp
				.wrapping_add_signed(codec.packet_size() as i32);
		}
		//如果这次的与保存一样,代表同一个事件内,因此要减回去再处理
		buf[7] = (self.curr_event_timestamp & 0xff) as u8;
		buf[6] = (self.curr_event_timestamp >> 8 & 0xff) as u8;
		buf[5] = (self.curr_event_timestamp >> 16 & 0xff) as u8;
		buf[4] = (self.curr_event_timestamp >> 24) as u8;

		buf[11] = (self.ssrc & 0xff) as u8;
		buf[10] = (self.ssrc >> 8 & 0xff) as u8;
		buf[9] = (self.ssrc >> 16 & 0xff) as u8;
		buf[8] = (self.ssrc >> 24) as u8;

		let _ = rtp_sock.send_to(&buf, self.peer_rtp_addr).await;
	}

	pub(crate) fn is_prepare_complete(&self) -> bool {
		self.peer_rtp_addr != empty_sockaddr()
	}

	///停止正在播放的数据
	#[inline]
	pub(crate) fn stop_play(&mut self) {
		self.play_bufs.clear();
		let _ = self.to_sip_dialog.try_send(DialogMessage::StopPlay);
	}

	///尝试播放缓存区内数据
	/// 返回需要下次播放的时间点..如果不再播放,返回Duration::ZERO
	pub(crate) async fn try_play_buf(
		&mut self,
		rtp_sock: &UdpSocket,
		codes: (u8, AudioCodes),
		ptime: &Duration,
	) -> Duration {
		//如果还没有到达需要播放的时间点.直接返回播放时间点
		if self.next_send_wav_timestamp > get_now_duration() {
			return self.next_send_wav_timestamp;
		}

		//取第一个缓存区数据.如果没取到,代表播放完成了
		let Some(wav_buf) = self.play_bufs.get_mut(0) else {
			return Duration::ZERO;
		};

		let (payload_type, codec) = codes;

		//TODO 现在仅是判断一下是否一致....不一致直接退出.不发送.后续可以考虑转换数据
		if codec != wav_buf.data_codes {
			error!("音频编码格式不一致,无法发送数据");
			self.stop_play();
			return Duration::ZERO;
		}

		let body_len = (wav_buf.sample_rate / (1000 / ptime.as_millis() as u32)) as usize;
		let rtp_pak = RtpData::new(
			RtpHead::new(
				payload_type,
				self.seq_num,
				self.ssrc == self.timestamp, //初始时候这两值相等,过后就不相等了
				self.timestamp,
				self.ssrc,
			),
			wav_buf.data.split_to(body_len),
		);

		self.timestamp += body_len as u32;
		self.seq_num += 1;
		let mut buf = BytesMut::with_capacity(rtp_pak.body.len() + 12);
		rtp_pak.write_to(&mut buf);
		if let Err(e) = rtp_sock.send_to(&buf, self.peer_rtp_addr).await {
			trace!("发送rtp数据失败:{}", e);
		}

		if wav_buf.data.is_empty() {
			self.play_bufs.remove(0);
		}

		if self.play_bufs.is_empty() {
			Duration::ZERO
		} else {
			self.next_send_wav_timestamp += *ptime;
			self.next_send_wav_timestamp
		}
	}

	///是否需要播放缓存区内数据
	pub(crate) fn need_play_buf(&self) -> bool {
		!self.play_bufs.is_empty()
	}

	///开始播放指定的声音,如果已经存在播放,会将之前的播放停止,重新播放
	pub(crate) fn start_play_buf(&mut self, buf: WavBuf) {
		self.next_send_wav_timestamp = get_now_duration();
		self.play_bufs.clear();
		self.play_bufs.push(buf);
	}

	///添加播放声音...传入一个buf的数组..如果已经在播放中,则添加在后面,
	pub(crate) fn add_play_buf(&mut self, mut bufs: Vec<WavBuf>) {
		if self.play_bufs.is_empty() {
			self.play_bufs = bufs;
			self.next_send_wav_timestamp = get_now_duration();
		} else {
			self.play_bufs.append(&mut bufs);
		}
	}
}
