﻿using System;
using System.Collections.Generic;
using System.Text;
using System.Runtime.InteropServices;
using uint8_t = System.Byte;
using int64_t = System.Int64;
using uint64_t = System.UInt64;

namespace ArgusLib.FFmpeg.avutil.Interop
{
	/// <summary>
	/// This structure describes decoded (raw) audio or video data.
	/// 
	/// AVFrame must be allocated using av_frame_alloc(). Note that this only
	/// allocates the AVFrame itself, the buffers for the data must be managed
	/// through other means (see below).
	/// AVFrame must be freed with av_frame_free().
	/// 
	/// AVFrame is typically allocated once and then reused multiple times to hold
	/// different data (e.g. a single AVFrame to hold frames received from a
	/// decoder). In such a case, av_frame_unref() will free any references held by
	/// the frame and reset it to its original clean state before it
	/// is reused again.
	/// 
	/// The data described by an AVFrame is usually reference counted through the
	/// AVBuffer API. The underlying buffer references are stored in AVFrame.buf /
	/// AVFrame.extended_buf. An AVFrame is considered to be reference counted if at
	/// least one reference is set, i.e. if AVFrame.buf[0] != NULL. In such a case,
	/// every single data plane must be contained in one of the buffers in
	/// AVFrame.buf or AVFrame.extended_buf.
	/// each plane, or anything in between.
	/// 
	/// sizeof(AVFrame) is not a part of the public ABI, so new fields may be added
	/// to the end with a minor bump.
	/// Similarly fields that are marked as to be only accessed by
	/// av_opt_ptr() can be reordered. This allows 2 forks to add fields
	/// without breaking compatibility with each other.
	/// </summary>
	[StructLayout(LayoutKind.Sequential)]
	public class AVFrameNative
	{
		/// <summary>
		/// pointer to the picture/channel planes.
		/// This might be different from the first allocated byte
		///
		/// Some decoders access areas outside 0,0 - width,height, please
		/// see avcodec_align_dimensions2(). Some filters and swscale can read
		/// up to 16 bytes beyond the planes, if these filters are to be used,
		/// then 16 extra bytes must be allocated.
		/// </summary>
		[MarshalAs(UnmanagedType.ByValArray, SizeConst=Defines.AV_NUM_DATA_POINTERS)]
		public IntPtr[] data;

		/// <summary>
		/// For video, size in bytes of each picture line.
		/// For audio, size in bytes of each plane.
		///
		/// For audio, only linesize[0] may be set. For planar audio, each channel
		/// plane must be the same size.
		///
		/// For video the linesizes should be multiplies of the CPUs alignment
		/// preference, this is 16 or 32 for modern desktop CPUs.
		/// Some code requires such alignment other code can be slower without
		/// correct alignment, for yet other it makes no difference.
		/// </summary>
		[MarshalAs(UnmanagedType.ByValArray, SizeConst=Defines.AV_NUM_DATA_POINTERS)]
		public int[] linesize;

		/// <summary>
		/// pointers to the data planes/channels.
		///
		/// For video, this should simply point to data[].
		///
		/// For planar audio, each channel has a separate data pointer, and
		/// linesize[0] contains the size of each channel buffer.
		/// For packed audio, there is just one data pointer, and linesize[0]
		/// contains the total size of the buffer for all channels.
		///
		/// Note: Both data and extended_data should always be set in a valid frame,
		/// but for planar audio with more channels that can fit in data,
		/// extended_data must be used in order to access all channels.
		/// </summary>
		public IntPtr extended_data;

		/// <summary>
		/// width and height of the video frame
		/// </summary>
		public int width, height;

		/// <summary>
		/// number of audio samples (per channel) described by this frame
		/// </summary>
		public int nb_samples;

		/// <summary>
		/// format of the frame, -1 if unknown or unset
		/// Values correspond to enum AVPixelFormat for video frames,
		/// enum AVSampleFormat for audio)
		/// </summary>
		public int format;

		/// <summary>
		/// 1 -> keyframe, 0-> not
		/// </summary>
		public int key_frame;

		/// <summary>
		/// Picture type of the frame.
		/// </summary>
		public AVPictureType pict_type;

		#if FF_API_AVFRAME_LAVC
		attribute_deprecated
		uint8_t ///base[AV_NUM_DATA_POINTERS];
		#endif

		/// <summary>
		/// Sample aspect ratio for the video frame, 0/1 if unknown/unspecified.
		/// </summary>
		public AVRational sample_aspect_ratio;

		/// <summary>
		/// Presentation timestamp in time_base units (time when frame should be shown to user).
		/// </summary>
		public int64_t pts;

		/// <summary>
		/// PTS copied from the AVPacket that was decoded to produce this frame.
		/// </summary>
		public int64_t pkt_pts;

		/// <summary>
		/// DTS copied from the AVPacket that triggered returning this frame. (if frame threading isnt used)
		/// This is also the Presentation time of this AVFrame calculated from
		/// only AVPacket.dts values without pts values.
		/// </summary>
		public int64_t pkt_dts;

		/// <summary>
		/// picture number in bitstream order
		/// </summary>
		public int coded_picture_number;
		/// <summary>
		/// picture number in display order
		/// </summary>
		public int display_picture_number;

		/// <summary>
		/// quality (between 1 (good) and FF_LAMBDA_MAX (bad))
		/// </summary>
		public int quality;

		#if FF_API_AVFRAME_LAVC
		attribute_deprecated
		int reference;

		/// <summary>
		/// QP table
		/// </summary>
		attribute_deprecated
		int8_t ///qscale_table;
		/// <summary>
		/// QP store stride
		/// </summary>
		attribute_deprecated
		int qstride;

		attribute_deprecated
		int qscale_type;

		/// <summary>
		/// mbskip_table[mb]>=1 if MB didn't change
		/// stride= mb_width = (width+15)>>4
		/// </summary>
		attribute_deprecated
		uint8_t ///mbskip_table;

		/// <summary>
		/// motion vector table
		/// @code
		/// example:
		/// int mv_sample_log2= 4 - motion_subsample_log2;
		/// int mb_width= (width+15)>>4;
		/// int mv_stride= (mb_width << mv_sample_log2) + 1;
		/// motion_val[direction][x + y///mv_stride][0->mv_x, 1->mv_y];
		/// @endcode
		/// </summary>
		attribute_deprecated
		int16_t (///motion_val[2])[2];

		/// <summary>
		/// macroblock type table
		/// mb_type_base + mb_width + 2
		/// </summary>
		attribute_deprecated
		uint32_t ///mb_type;

		/// <summary>
		/// DCT coefficients
		/// </summary>
		attribute_deprecated
		short ///dct_coeff;

		/// <summary>
		/// motion reference frame index
		/// the order in which these are stored can depend on the codec.
		/// </summary>
		attribute_deprecated
		int8_t ///ref_index[2];
		#endif

		/// <summary>
		/// for some private data of the user
		/// </summary>
		public IntPtr opaque;

		/// <summary>
		/// error
		/// </summary>
		[MarshalAs(UnmanagedType.ByValArray, SizeConst=Defines.AV_NUM_DATA_POINTERS)]
		public uint64_t[] error;

		#if FF_API_AVFRAME_LAVC
		attribute_deprecated
		int type;
		#endif

		/// <summary>
		/// When decoding, this signals how much the picture must be delayed.
		/// extra_delay = repeat_pict / (2*fps)
		/// </summary>
		public int repeat_pict;

		/// <summary>
		/// The content of the picture is interlaced.
		/// </summary>
		public int interlaced_frame;

		/// <summary>
		/// If the content is interlaced, is top field displayed first.
		/// </summary>
		public int top_field_first;

		/// <summary>
		/// Tell user application that palette has changed from previous frame.
		/// </summary>
		public int palette_has_changed;

		#if FF_API_AVFRAME_LAVC
		attribute_deprecated
		int buffer_hints;

		/// <summary>
		/// Pan scan.
		/// </summary>
		attribute_deprecated
		struct AVPanScan ///pan_scan;
		#endif

		/// <summary>
		/// reordered opaque 64bit (generally an integer or a double precision float
		/// PTS but can be anything).
		/// The user sets AVCodecContext.reordered_opaque to represent the input at
		/// that time,
		/// the decoder reorders values as needed and sets AVFrame.reordered_opaque
		/// to exactly one of the values provided by the user through AVCodecContext.reordered_opaque
		/// @deprecated in favor of pkt_pts
		/// </summary>
		public int64_t reordered_opaque;

		#if FF_API_AVFRAME_LAVC
		/// <summary>
		/// @deprecated this field is unused
		/// </summary>
		attribute_deprecated void ///hwaccel_picture_private;

		attribute_deprecated
		struct AVCodecContext ///owner;
		attribute_deprecated
		void ///thread_opaque;

		/// <summary>
		/// log2 of the size of the block which a single vector in motion_val represents:
		/// (4->16x16, 3->8x8, 2-> 4x4, 1-> 2x2)
		/// </summary>
		attribute_deprecated
		uint8_t motion_subsample_log2;
		#endif

		/// <summary>
		/// Sample rate of the audio data.
		/// </summary>
		public int sample_rate;

		/// <summary>
		/// Channel layout of the audio data.
		/// </summary>
		public uint64_t channel_layout;

		/// <summary>
		/// AVBuffer references backing the data for this frame. If all elements of
		/// this array are NULL, then this frame is not reference counted.
		///
		/// There may be at most one AVBuffer per data plane, so for video this array
		/// always contains all the references. For planar audio with more than
		/// AV_NUM_DATA_POINTERS channels, there may be more buffers than can fit in
		/// this array. Then the extra AVBufferRef pointers are stored in the
		/// extended_buf array.
		/// </summary>
		[MarshalAs(UnmanagedType.ByValArray, SizeConst=Defines.AV_NUM_DATA_POINTERS)]
		public IntPtr[] buf;

		/// <summary>
		/// For planar audio which requires more than AV_NUM_DATA_POINTERS
		/// AVBufferRef pointers, this array will hold all the references which
		/// cannot fit into AVFrame.buf.
		///
		/// Note that this is different from AVFrame.extended_data, which always
		/// contains all the pointers. This array only contains the extra pointers,
		/// which cannot fit into AVFrame.buf.
		///
		/// This array is always allocated using av_malloc() by whoever constructs
		/// the frame. It is freed in av_frame_unref().
		/// </summary>
		public IntPtr extended_buf;

		/// <summary>
		/// Number of elements in extended_buf.
		/// </summary>
		public int nb_extended_buf;

		public IntPtr side_data;
		public int nb_side_data;

		/// <summary>
		/// frame timestamp estimated using various heuristics, in stream time base
		/// Code outside libavcodec should access this field using:
		/// av_frame_get_best_effort_timestamp(frame)
		/// - encoding: unused
		/// - decoding: set by libavcodec, read by user.
		/// </summary>
		public int64_t best_effort_timestamp;

		/// <summary>
		/// reordered pos from the last AVPacket that has been input into the decoder
		/// Code outside libavcodec should access this field using:
		/// av_frame_get_pkt_pos(frame)
		/// - encoding: unused
		/// - decoding: Read by user.
		/// </summary>
		public int64_t pkt_pos;

		/// <summary>
		/// duration of the corresponding packet, expressed in
		/// AVStream->time_base units, 0 if unknown.
		/// Code outside libavcodec should access this field using:
		/// av_frame_get_pkt_duration(frame)
		/// - encoding: unused
		/// - decoding: Read by user.
		/// </summary>
		public int64_t pkt_duration;

		/// <summary>
		/// metadata.
		/// Code outside libavcodec should access this field using:
		/// av_frame_get_metadata(frame)
		/// - encoding: Set by user.
		/// - decoding: Set by libavcodec.
		/// </summary>
		public IntPtr metadata;

		/// <summary>
		/// decode error flags of the frame, set to a combination of
		/// FF_DECODE_ERROR_xxx flags if the decoder produced a frame, but there
		/// were errors during the decoding.
		/// Code outside libavcodec should access this field using:
		/// av_frame_get_decode_error_flags(frame)
		/// - encoding: unused
		/// - decoding: set by libavcodec, read by user.
		/// </summary>
		public FF_DECODE_ERROR decode_error_flags;

		/// <summary>
		/// number of audio channels, only used for audio.
		/// Code outside libavcodec should access this field using:
		/// av_frame_get_channels(frame)
		/// - encoding: unused
		/// - decoding: Read by user.
		/// </summary>
		public int channels;

		/// <summary>
		/// size of the corresponding packet containing the compressed
		/// frame. It must be accessed using av_frame_get_pkt_size() and
		/// av_frame_set_pkt_size().
		/// It is set to a negative value if unknown.
		/// - encoding: unused
		/// - decoding: set by libavcodec, read by user.
		/// </summary>
		public int pkt_size;

		/// <summary>
		/// YUV colorspace type.
		/// It must be accessed using av_frame_get_colorspace() and
		/// av_frame_set_colorspace().
		/// - encoding: Set by user
		/// - decoding: Set by libavcodec
		/// </summary>
		public AVColorSpace colorspace;

		/// <summary>
		/// MPEG vs JPEG YUV range.
		/// It must be accessed using av_frame_get_color_range() and
		/// av_frame_set_color_range().
		/// - encoding: Set by user
		/// - decoding: Set by libavcodec
		/// </summary>
		public AVColorRange color_range;


		/// <summary>
		/// Not to be accessed directly from outside libavutil
		/// </summary>
		public IntPtr qp_table_buf;
	}
}