#include "inference.h"

#include <memory>
#include <opencv2/dnn.hpp>
#include <random>

namespace yolo {

// Constructor to initialize the model with default input shape
Inference::Inference(const std::string &model_path, const float &model_confidence_threshold, const float &model_NMS_threshold) {
	model_input_shape_ = cv::Size(640, 640); // Set the default size for models with dynamic shapes to prevent errors.
	model_confidence_threshold_ = model_confidence_threshold;
	model_NMS_threshold_ = model_NMS_threshold;
	InitializeModel(model_path);
}

// Constructor to initialize the model with specified input shape
Inference::Inference(const std::string &model_path, const cv::Size model_input_shape, const float &model_confidence_threshold, const float &model_NMS_threshold) {
	model_input_shape_ = model_input_shape;
	model_confidence_threshold_ = model_confidence_threshold;
	model_NMS_threshold_ = model_NMS_threshold;
	InitializeDefaultClasses();
	InitializeModel(model_path);
}

// Constructor with custom class names
Inference::Inference(const std::string &model_path, const cv::Size model_input_shape, const float &model_confidence_threshold, const float &model_NMS_threshold, const std::vector<std::string> &class_names) {
	model_input_shape_ = model_input_shape;
	model_confidence_threshold_ = model_confidence_threshold;
	model_NMS_threshold_ = model_NMS_threshold;
	model_config_.class_names = class_names;
	model_config_.num_classes = class_names.size();
	InitializeModel(model_path);
}

void Inference::InitializeModel(const std::string &model_path) {
	ov::Core core; // OpenVINO core object
	std::shared_ptr<ov::Model> model = core.read_model(model_path); // Read the model from file

	// Detect model version
	model_config_.version = DetectModelVersion(*model);

	// Preprocessing setup for the model
	ov::preprocess::PrePostProcessor ppp = ov::preprocess::PrePostProcessor(model);
	ppp.input().tensor().set_element_type(ov::element::u8).set_layout("NHWC").set_color_format(ov::preprocess::ColorFormat::BGR);
	ppp.input().preprocess().convert_element_type(ov::element::f32).convert_color(ov::preprocess::ColorFormat::RGB).scale({255, 255, 255});
	ppp.input().model().set_layout("NCHW");
	ppp.output().tensor().set_element_type(ov::element::f32);
	model = ppp.build(); // Build the preprocessed model

	// Detect available devices and select the best one
	std::vector<std::string> available_devices = core.get_available_devices();
	std::string device = "CPU";  // Default to CPU

	// Try to use GPU if available (GPU is more stable than NPU)
	for (const auto& dev : available_devices) {
		if (dev.find("GPU") != std::string::npos) {
			device = "GPU";
			break;
		}
	}

	// Note: NPU support is experimental and may have compatibility issues
	// Uncomment below to enable NPU if needed
	// for (const auto& dev : available_devices) {
	// 	if (dev.find("NPU") != std::string::npos) {
	// 		device = "NPU";
	// 		break;
	// 	}
	// }

	device_info_ = device;
	std::cerr << "[INFO] Using device: " << device << std::endl;
	std::cerr << "[INFO] Available devices: ";
	for (const auto& dev : available_devices) {
		std::cerr << dev << " ";
	}
	std::cerr << std::endl;

	// Compile the model for inference
	compiled_model_ = core.compile_model(model, device);
	inference_request_ = compiled_model_.create_infer_request(); // Create inference request

	short width, height;

	// Get input shape from the model using partial shape to handle dynamic shapes
	const std::vector<ov::Output<ov::Node>> inputs = model->inputs();
	const ov::PartialShape input_partial_shape = inputs[0].get_partial_shape();

	if (input_partial_shape.is_static()) {
		const ov::Shape input_shape = input_partial_shape.get_shape();
		height = input_shape[2];  // NCHW format: N, C, H, W
		width = input_shape[3];
		model_input_shape_ = cv::Size2f(width, height);
	} else {
		// Use default size if shape is still dynamic
		model_input_shape_ = cv::Size2f(640, 640);
	}

	// Get output shape from the model
	const std::vector<ov::Output<ov::Node>> outputs = model->outputs();
	const ov::PartialShape output_partial_shape = outputs[0].get_partial_shape();

	if (output_partial_shape.is_static()) {
		const ov::Shape output_shape = output_partial_shape.get_shape();
		height = output_shape[2];
		width = output_shape[3];
		model_output_shape_ = cv::Size(width, height);
	} else {
		// Use default size if shape is still dynamic
		model_output_shape_ = cv::Size(640, 640);
	}

	// Initialize default classes if not already set
	InitializeDefaultClasses();
}

// Method to run inference on an input frame
void Inference::RunInference(cv::Mat &frame) {
	try {
		// std::cout << "Starting inference..." << std::endl;

		Preprocessing(frame); // Preprocess the input frame
		// std::cout << "Preprocessing completed" << std::endl;

		inference_request_.infer(); // Run inference
		// std::cout << "Inference completed" << std::endl;

		PostProcessing(frame); // Postprocess the inference results
		// std::cout << "PostProcessing completed" << std::endl;

	} catch (const std::exception &e) {
		std::cerr << "Error in RunInference: " << e.what() << std::endl;
		throw;
	} catch (...) {
		std::cerr << "Unknown error in RunInference" << std::endl;
		throw std::runtime_error("Unknown error in RunInference");
	}
}

// Method to preprocess the input frame
void Inference::Preprocessing(const cv::Mat &frame) {
	try {
		if (frame.empty()) {
			throw std::runtime_error("Input frame is empty");
		}

		// std::cout << "Input frame size: " << frame.cols << "x" << frame.rows << std::endl;
		// std::cout << "Model input shape: " << model_input_shape_.width << "x" << model_input_shape_.height << std::endl;

		cv::Mat resized_frame;
		// Convert Size2f to Size for cv::resize
		cv::Size resize_size(static_cast<int>(model_input_shape_.width), static_cast<int>(model_input_shape_.height));
		cv::resize(frame, resized_frame, resize_size, 0, 0, cv::INTER_AREA);

		if (resized_frame.empty()) {
			throw std::runtime_error("Resized frame is empty");
		}

		// std::cout << "Resized frame size: " << resized_frame.cols << "x" << resized_frame.rows << std::endl;

		// Calculate scaling factor
		scale_factor_.x = static_cast<float>(frame.cols / model_input_shape_.width);
		scale_factor_.y = static_cast<float>(frame.rows / model_input_shape_.height);

		// Create a new tensor with the correct shape
		// Model expects NHWC format: [1, 640, 640, 3]
		ov::Shape input_shape = {1, static_cast<size_t>(resized_frame.rows), static_cast<size_t>(resized_frame.cols), 3};
		ov::Tensor input_tensor(ov::element::u8, input_shape, resized_frame.data);

		// std::cout << "Created input tensor with shape: ";
		// for (size_t i = 0; i < input_shape.size(); ++i) {
		// 	std::cout << input_shape[i] << " ";
		// }
		// std::cout << std::endl;

		inference_request_.set_input_tensor(input_tensor);
		// std::cout << "Preprocessing completed successfully" << std::endl;
	} catch (const std::exception &e) {
		std::cerr << "Error in Preprocessing: " << e.what() << std::endl;
		throw;
	}
}

// Method to postprocess the inference results
void Inference::PostProcessing(cv::Mat &frame) {
	// Get the output tensor from the inference request
	try {
		ov::Tensor output_tensor = inference_request_.get_output_tensor();

		// Get the actual output shape from the tensor
		ov::Shape tensor_shape = output_tensor.get_shape();

		// Log output shape for debugging
		// std::cout << "Output tensor shape: ";
		// for (size_t i = 0; i < tensor_shape.size(); ++i) {
		// 	std::cout << tensor_shape[i] << " ";
		// }
		// std::cout << std::endl;

		// For YOLOv8, output shape is typically [1, 84, 8400] or similar
		// We need to handle this correctly
		if (tensor_shape.size() < 2) {
			std::cerr << "Invalid output tensor shape" << std::endl;
			return;
		}

		// TODO: Implement proper YOLOv8 output parsing
		// For now, we return without processing to avoid segmentation faults
		// The model output format is:
		// - Shape: [1, 84, 8400] for YOLOv8
		// - Each detection: [x, y, w, h, conf, class_scores...]

		// std::cout << "PostProcessing: Skipping detection parsing (TODO)" << std::endl;

	} catch (const std::exception &e) {
		std::cerr << "Error in PostProcessing: " << e.what() << std::endl;
	}
}

// Method to get the bounding box in the correct scale
cv::Rect Inference::GetBoundingBox(const cv::Rect &src) const {
	cv::Rect box = src;
	box.x = (box.x - box.width / 2) * scale_factor_.x;
	box.y = (box.y - box.height / 2) * scale_factor_.y;
	box.width *= scale_factor_.x;
	box.height *= scale_factor_.y;
	return box;
}

void Inference::DrawDetectedObject(cv::Mat &frame, const Detection &detection) const {
	const cv::Rect &box = detection.box;
	const float &confidence = detection.confidence;
	const int &class_id = detection.class_id;

	// Generate a random color for the bounding box
	std::random_device rd;
	std::mt19937 gen(rd());
	std::uniform_int_distribution<int> dis(120, 255);
	const cv::Scalar &color = cv::Scalar(dis(gen), dis(gen), dis(gen));

	// Draw the bounding box around the detected object
	cv::rectangle(frame, cv::Point(box.x, box.y), cv::Point(box.x + box.width, box.y + box.height), color, 3);

	// Get class name from config
	std::string class_name = "unknown";
	if (class_id >= 0 && class_id < model_config_.class_names.size()) {
		class_name = model_config_.class_names[class_id];
	}

	// Prepare the class label and confidence text
	std::string classString = class_name + " " + std::to_string(confidence).substr(0, 4);

	// Get the size of the text box
	cv::Size textSize = cv::getTextSize(classString, cv::FONT_HERSHEY_DUPLEX, 0.75, 2, 0);
	cv::Rect textBox(box.x, box.y - 40, textSize.width + 10, textSize.height + 20);

	// Draw the text box
	cv::rectangle(frame, textBox, color, cv::FILLED);

	// Put the class label and confidence text above the bounding box
	cv::putText(frame, classString, cv::Point(box.x + 5, box.y - 10), cv::FONT_HERSHEY_DUPLEX, 0.75, cv::Scalar(0, 0, 0), 2, 0);
}

// Detect model version based on model structure
ModelVersion Inference::DetectModelVersion(const ov::Model &model) {
	// Check model name for version hints (avoid calling get_shape() on potentially dynamic shapes)
	std::string model_name = model.get_friendly_name();
	if (model_name.find("11") != std::string::npos) {
		return ModelVersion::YOLOv11;
	} else if (model_name.find("8") != std::string::npos) {
		return ModelVersion::YOLOv8;
	}

	// Default to YOLOv8 for backward compatibility
	return ModelVersion::YOLOv8;
}

// Initialize default COCO classes
void Inference::InitializeDefaultClasses() {
	if (model_config_.class_names.empty()) {
		model_config_.class_names = default_classes_;
		model_config_.num_classes = default_classes_.size();
	}
}
} // namespace yolo
