﻿#pragma once

#include <memory>
#include <thread>
#include <mutex>
#include <atomic>
#include <queue>
#include <condition_variable>
#include <string.h>
#include <vector>


#ifdef _WIN32
#include <windows.h>
#else
#include <sys/time.h>
#endif


#include "../capture/VideoCapture.h"
#include "rtc_base/thread.h"
#include "media/base//adapted_video_track_source.h"
#include "pc/video_track_source.h"
#include "api/video/i420_buffer.h"
#include "modules/desktop_capture/desktop_capturer.h"
#include "modules/desktop_capture/desktop_capture_options.h"

#include "modules/video_capture/video_capture.h"
#include "modules/video_capture/video_capture_factory.h"

#include "libyuv/video_common.h"
#include "libyuv/convert.h"

#include "media/base/codec.h"
#include "media/base/video_common.h"
#include "media/base/video_broadcaster.h"
#include "media/engine/internal_decoder_factory.h"
#include "common_video/h264/h264_common.h"
#include "common_video/h264/sps_parser.h"
#include "api/video_codecs/video_decoder.h"
#include "VideoDecoder.h"
#include "MediaFifo.h"

#if defined(_MSC_VER)
#include <BaseTsd.h>
typedef SSIZE_T ssize_t;
#endif
struct VideoFrame {
	uint8_t* y;
	int strideY;
	uint8_t* u;
	int strideU;
	uint8_t* v;
	int strideV;
	int nWidth;
	int nHeight;
	int64_t nTimeStamp;
};
class VideoTrackSourceInput : public rtc::AdaptedVideoTrackSource
{
public:
	VideoTrackSourceInput();
	~VideoTrackSourceInput();

	static VideoTrackSourceInput* Create(const std::string& videourl, const std::map<std::string, std::string>& opts);

	bool Init(size_t width,
		size_t height,
		size_t target_fps,
		const std::string& videourl);

	bool Init(std::string videourl, std::map<std::string, std::string> opts);

	//修改输入源
	void changeVideoInput(size_t width,
		size_t height,
		size_t target_fps,
		std::string videourl);

	// AdaptedVideoTrackSource implementation.	
	bool is_screencast() const override;

	absl::optional<bool> needs_denoising() const override;

	webrtc::MediaSourceInterface::SourceState state() const override;

	bool remote() const override;

	void InputVideoFrame(const unsigned char* y, const unsigned char* u, const unsigned char* v,
		int width, int height, int frame_rate);

	void InputVideoFrame(uint8_t* y, int strideY, uint8_t* u, int strideU, uint8_t* v, int strideV, int nWidth, int nHeight, int64_t nTimeStamp);

	bool InputVideoFrame(unsigned char* data, size_t size, int nWidth, int nHeigh, int fps);

	//	//直接发送h264的数据
	bool InputVideoFrame(const char* id, unsigned char* buffer, size_t size, int nWidth, int nHeigh, int64_t ts);

	void Run();

	void onH264Data(unsigned char* buffer, ssize_t size, int64_t ts, const std::string& codec) {
		std::vector<webrtc::H264::NaluIndex> indexes = webrtc::H264::FindNaluIndices(buffer, size);
		SPDLOG_LOGGER_INFO(spdlogptr, "LiveVideoSource:onData nbNalu:{}", indexes.size());
		for (const webrtc::H264::NaluIndex& index : indexes) {
			webrtc::H264::NaluType nalu_type = webrtc::H264::ParseNaluType(buffer[index.payload_start_offset]);
			RTC_LOG(LS_VERBOSE) << "LiveVideoSource:onData NALU type:" << nalu_type << " payload_size:" << index.payload_size << " payload_start_offset:" << index.payload_start_offset << " start_offset:" << index.start_offset;
			if (nalu_type == webrtc::H264::NaluType::kSps)
			{
				SPDLOG_LOGGER_INFO(spdlogptr, "LiveVideoSource:onData SPS");
				m_cfg.clear();
				m_cfg.insert(m_cfg.end(), buffer + index.start_offset, buffer + index.payload_size + index.payload_start_offset);

				absl::optional<webrtc::SpsParser::SpsState> sps = webrtc::SpsParser::ParseSps(buffer + index.payload_start_offset + webrtc::H264::kNaluTypeSize, index.payload_size - webrtc::H264::kNaluTypeSize);
				if (!sps)
				{
					SPDLOG_LOGGER_ERROR(spdlogptr, "cannot parse sps");
				
				}
				else
				{
					SPDLOG_LOGGER_INFO(spdlogptr, "LiveVideoSource:onData SPS set format:{} x={} "
						, sps->height);
					
				}
			}
			else if (nalu_type == webrtc::H264::NaluType::kPps)
			{
				SPDLOG_LOGGER_INFO(spdlogptr, "LiveVideoSource:onData PPS");
				m_cfg.insert(m_cfg.end(), buffer + index.start_offset, buffer + index.payload_size + index.payload_start_offset);
			}
			else if (nalu_type == webrtc::H264::NaluType::kSei)
			{
			}
			else
			{
				webrtc::VideoFrameType frameType = webrtc::VideoFrameType::kVideoFrameDelta;
				std::vector<uint8_t> content;
				if (nalu_type == webrtc::H264::NaluType::kIdr)
				{
					frameType = webrtc::VideoFrameType::kVideoFrameKey;
					SPDLOG_LOGGER_INFO(spdlogptr, "LiveVideoSource:onData IDR");
					content.insert(content.end(), m_cfg.begin(), m_cfg.end());
				}
				else
				{
					SPDLOG_LOGGER_INFO(spdlogptr, "LiveVideoSource:onData SLICE NALU:{}"
						, (int)nalu_type);
				}
			
				content.insert(content.end(), buffer + index.start_offset, buffer + index.payload_size + index.payload_start_offset);
				//rtc::scoped_refptr<webrtc::EncodedImageBuffer> frame = webrtc::EncodedImageBuffer::Create(content.data(), content.size());

				std::unique_lock<std::mutex> lock(m_mutex);
				videoFifo.push((unsigned char*)content.data(), content.size());
			//	m_fifosize = videoFifo.GetSize();
				m_condition.notify_one();
			}
		}
	}


public:

private:
	VideoCapture* m_vCapture = nullptr;
	std::string m_videourl;
	std::map<std::string, std::string> m_opts;
	std::mutex m_mutex;                                        //互斥锁	
	int64_t next_timestamp_us_ = rtc::kNumMicrosecsPerMillisec;
	int64_t                              m_prevts = 0;//上一帧的时间
	std::atomic<bool>m_bStop;
	CMediaFifo videoFifo;
	std::condition_variable m_condition;              // 条件变量，用于线程等待
	std::shared_ptr<std::thread>  m_thread;
	int m_nWidth, m_nHeigh, m_fps;


	// 定义 VideoFrame 结构体

	std::queue<VideoFrame> frameQueue;
	// 互斥锁
	std::mutex queueMutex;
	std::condition_variable queueCV;
	size_t  m_fifosize = 0;

	std::vector<uint8_t>               m_cfg;
};
