#ifndef _AUDIO_H_
#define _AUDIO_H_

#define AUDIO_SIZE 8000 / 25

class cls_audio
{
public:
	cls_audio(cls_spkcam *p_app);
	~cls_audio();

	cls_spkcam *app;

	bool audio_running;

	std::unique_ptr<std::thread> main_loop_thread;

	void audio_main_loop();
	void audio_loop_start();
	void audio_loop_stop();

private:
	AVFormatContext *input_format_context = NULL, *output_format_context = NULL; // if not save audio no need for output format context
	AVCodecContext *input_codec_context = NULL, *output_codec_context = NULL;
	SwrContext *resample_context = NULL;
	AVAudioFifo *fifo = NULL;

	int64_t pts = 0;

	void open_audio_device(); // init input format context and input codec context
	void close_audio_device();

	void init_codec(); // init output codec context, I dont init output_format
	void free_codec();

	void init_resampler(); // init resample context
	void free_resampler();

	void init_fifo(); // init audio fifo
	void free_fifo();

	int read_decode_convert_and_store();
	int load_encode_and_add_queue();

	void init_packet(AVPacket *packet);

	int init_input_frame(AVFrame **frame);
	int decode_audio_frame(AVFrame *frame,
						   AVFormatContext *input_format_context,
						   AVCodecContext *input_codec_context,
						   int *data_present);
	int init_converted_samples(uint8_t ***converted_input_samples,
							   AVCodecContext *output_codec_context,
							   int frame_size);
	int convert_samples(const uint8_t **input_data,
						uint8_t **converted_data, const int frame_size,
						SwrContext *resample_context);
	int add_samples_to_fifo(AVAudioFifo *fifo,
							uint8_t **converted_input_samples,
							const int frame_size);

	int init_output_frame(AVFrame **frame,
						  AVCodecContext *output_codec_context,
						  int frame_size);

	int encode_audio_frame(AVFrame *frame,
						   AVFormatContext *output_format_context,
						   AVCodecContext *output_codec_context,
						   int *data_present);
};

#endif //  _AUDIO_H_