#include "segment_anything_model.h"

#include <opencv2/imgproc.hpp>
#include <opencv2/highgui.hpp>

#include <sstream>
#include <iomanip>

// Helper functions
namespace
{
	std::string image_to_string(const cv::Mat& image)
	{
		// Serialize image data to string for caching
		std::ostringstream oss;
		oss << std::hex << std::setfill('0');
		const uchar* data = image.ptr<uchar>();
		for (size_t i = 0; i < image.total() * image.elemSize(); ++i)
		{
			oss << std::setw(2) << static_cast<unsigned>(data[i]);
		}
		return oss.str();
	}

	void compute_scale_to_resize_image(int image_size, const cv::Mat& image, double& scale, int& new_height,
	                                   int& new_width)
	{
		int height = image.rows;
		int width = image.cols;
		if (width > height)
		{
			scale = static_cast<double>(image_size) / width;
			new_height = static_cast<int>(std::round(height * scale));
			new_width = image_size;
		}
		else
		{
			scale = static_cast<double>(image_size) / height;
			new_height = image_size;
			new_width = static_cast<int>(std::round(width * scale));
		}
	}

	cv::Mat resize_image(int image_size, const cv::Mat& image, double& scale)
	{
		int new_height, new_width;
		compute_scale_to_resize_image(image_size, image, scale, new_height, new_width);

		cv::Mat resized_image;
		cv::resize(image, resized_image, cv::Size(new_width, new_height), 0, 0, cv::INTER_AREA);
		resized_image.convertTo(resized_image, CV_32FC3);

		return resized_image;
	}

	cv::Mat compute_image_embedding(int image_size, Ort::Session& encoder_session, const cv::Mat& image)
	{
		cv::Mat rgb_image;
		if (image.channels() == 4)
		{
			cv::cvtColor(image, rgb_image, cv::COLOR_RGBA2RGB);
		}
		else if (image.channels() == 1)
		{
			cv::cvtColor(image, rgb_image, cv::COLOR_GRAY2RGB);
		}
		else
		{
			rgb_image = image.clone();
		}

		double scale;
		cv::Mat x = resize_image(image_size, rgb_image, scale);

		// Normalize
		cv::Mat mean = (cv::Mat_<float>(1, 3) << 123.675f, 116.28f, 103.53f);
		cv::Mat std = (cv::Mat_<float>(1, 3) << 58.395f, 57.12f, 57.375f);
		x = (x - mean) / std;

		// Pad to image_size x image_size
		int pad_bottom = image_size - x.rows;
		int pad_right = image_size - x.cols;
		cv::copyMakeBorder(x, x, 0, pad_bottom, 0, pad_right, cv::BORDER_CONSTANT, cv::Scalar(0, 0, 0));

		// Prepare input tensor
		// From HWC to CHW
		std::vector<cv::Mat> channels(3);
		cv::split(x, channels); // Split into channels

		// Flatten the channels into a single vector
		// std::vector<float> input_tensor_values;
		// for (const auto& ch : channels) {
		//     input_tensor_values.insert(input_tensor_values.end(), (float*)ch.datastart, (float*)ch.dataend);
		// }

		std::vector<int64_t> input_shape = {1, 3, image_size, image_size};
		std::vector<float> input_tensor_values(x.begin<float>(), x.end<float>());

		Ort::MemoryInfo memory_info = Ort::MemoryInfo::CreateCpu(OrtArenaAllocator, OrtMemTypeDefault);
		Ort::Value input_tensor = Ort::Value::CreateTensor<float>(
			memory_info, input_tensor_values.data(), input_tensor_values.size(),
			input_shape.data(), input_shape.size()
		);

		// Run encoder session
		const char* input_names[] = {"x"};
		auto output_tensors = encoder_session.Run(
			Ort::RunOptions{nullptr}, input_names, &input_tensor, 1, nullptr, 0
		);

		// Retrieve output
		Ort::Value& output_tensor = output_tensors.front();
		float* output_data = output_tensor.GetTensorMutableData<float>();
		std::vector<int64_t> output_shape = output_tensor.GetTensorTypeAndShapeInfo().GetShape();

		// Convert output to cv::Mat
		std::vector<int> output_shape_int(output_shape.begin(), output_shape.end());
		cv::Mat image_embedding(output_shape_int, CV_32F, output_data);
		return image_embedding.clone();
	}

	cv::Mat compute_mask_from_points(
		int image_size, Ort::Session& decoder_session, const cv::Mat& image, const cv::Mat& image_embedding,
		const std::vector<cv::Point2f>& points, const std::vector<int>& point_labels
	)
	{
		// Prepare point coordinates and labels
		std::vector<float> input_point_values;
		for (const auto& pt : points)
		{
			input_point_values.push_back(pt.x);
			input_point_values.push_back(pt.y);
		}
		input_point_values.push_back(0.0f); // Adding [0.0, 0.0]
		input_point_values.push_back(0.0f);

		std::vector<int64_t> point_coords_shape = {1, static_cast<int64_t>(points.size() + 1), 2};

		std::vector<float> input_label_values(point_labels.begin(), point_labels.end());
		input_label_values.push_back(-1.0f); // Adding [-1]
		std::vector<int64_t> point_labels_shape = {1, static_cast<int64_t>(point_labels.size() + 1)};

		// Compute scaling factors
		double scale;
		int new_height, new_width;
		compute_scale_to_resize_image(image_size, image, scale, new_height, new_width);

		// Scale point coordinates
		for (size_t i = 0; i < input_point_values.size(); i += 2)
		{
			input_point_values[i] *= static_cast<float>(new_width) / image.cols;
			input_point_values[i + 1] *= static_cast<float>(new_height) / image.rows;
		}

		// Prepare mask inputs
		std::vector<float> mask_input_values(1 * 1 * 256 * 256, 0.0f);
		std::vector<int64_t> mask_input_shape = {1, 1, 256, 256};

		float has_mask_input_value = -1.0f;
		std::vector<int64_t> has_mask_input_shape = {1};

		// Prepare orig_im_size
		std::vector<float> orig_im_size = {static_cast<float>(image.rows), static_cast<float>(image.cols)};
		std::vector<int64_t> orig_im_size_shape = {2};

		// Create tensors
		Ort::MemoryInfo memory_info = Ort::MemoryInfo::CreateCpu(OrtArenaAllocator, OrtMemTypeDefault);

		// Ort::Value image_embeddings_tensor = Ort::Value::CreateTensor<float>(
		//     memory_info, (float*)image_embedding.data, image_embedding.total(),
		//     image_embedding.size.p, image_embedding.dims
		//     );

		// Convert image_embedding shape to std::vector<int64_t>
		std::vector<int64_t> embedding_shape;
		for (int i = 0; i < image_embedding.dims; ++i)
		{
			embedding_shape.push_back(static_cast<int64_t>(image_embedding.size.p[i]));
		}

		Ort::Value image_embeddings_tensor = Ort::Value::CreateTensor<float>(
			memory_info, (float*)image_embedding.data, image_embedding.total(),
			embedding_shape.data(), embedding_shape.size()
		);

		Ort::Value point_coords_tensor = Ort::Value::CreateTensor<float>(
			memory_info, input_point_values.data(), input_point_values.size(),
			point_coords_shape.data(), point_coords_shape.size()
		);

		Ort::Value point_labels_tensor = Ort::Value::CreateTensor<float>(
			memory_info, input_label_values.data(), input_label_values.size(),
			point_labels_shape.data(), point_labels_shape.size()
		);

		Ort::Value mask_input_tensor = Ort::Value::CreateTensor<float>(
			memory_info, mask_input_values.data(), mask_input_values.size(),
			mask_input_shape.data(), mask_input_shape.size()
		);

		Ort::Value has_mask_input_tensor = Ort::Value::CreateTensor<float>(
			memory_info, &has_mask_input_value, 1,
			has_mask_input_shape.data(), has_mask_input_shape.size()
		);

		Ort::Value orig_im_size_tensor = Ort::Value::CreateTensor<float>(
			memory_info, orig_im_size.data(), orig_im_size.size(),
			orig_im_size_shape.data(), orig_im_size_shape.size()
		);

		// Prepare inputs
		const char* input_names[] = {
			"image_embeddings", "point_coords", "point_labels", "mask_input", "has_mask_input", "orig_im_size"
		};
		std::array<Ort::Value, 6> input_tensors = {
			std::move(image_embeddings_tensor),
			std::move(point_coords_tensor),
			std::move(point_labels_tensor),
			std::move(mask_input_tensor),
			std::move(has_mask_input_tensor),
			std::move(orig_im_size_tensor)
		};

		// Run decoder session
		auto output_tensors = decoder_session.Run(
			Ort::RunOptions{nullptr}, input_names, input_tensors.data(), input_tensors.size(), nullptr, 0
		);

		// Process output
		Ort::Value& masks_tensor = output_tensors.front();
		float* masks_data = masks_tensor.GetTensorMutableData<float>();
		std::vector<int64_t> masks_shape = masks_tensor.GetTensorTypeAndShapeInfo().GetShape();

		int H = static_cast<int>(masks_shape[2]);
		int W = static_cast<int>(masks_shape[3]);
		cv::Mat mask(H, W, CV_32F, masks_data);
		cv::Mat binary_mask;
		cv::threshold(mask, binary_mask, 0.0, 1.0, cv::THRESH_BINARY);

		// Remove small objects
		double min_size_ratio = 0.05;
		int min_size = static_cast<int>(cv::countNonZero(binary_mask) * min_size_ratio);

		// Convert to 8-bit image
		binary_mask.convertTo(binary_mask, CV_8U);

		// Remove small objects using connected components
		cv::Mat labels, stats, centroids;
		int num_labels = cv::connectedComponentsWithStats(binary_mask, labels, stats, centroids);

		for (int i = 1; i < num_labels; ++i)
		{
			int area = stats.at<int>(i, cv::CC_STAT_AREA);
			if (area < min_size)
			{
				binary_mask.setTo(0, labels == i);
			}
		}

		return binary_mask;
	}
} // namespace

// Class implementation

SegmentAnythingModel::SegmentAnythingModel(const std::string& encoder_path, const std::string& decoder_path)
	: image_size_(1024), env_(ORT_LOGGING_LEVEL_WARNING, "SegmentAnythingModel"), session_options_(),
	  encoder_session_(nullptr), decoder_session_(nullptr), embedding_computed_(false)
{
	session_options_.SetIntraOpNumThreads(1);

	// Convert std::string to std::wstring
	std::wstring wencoder_path(encoder_path.begin(), encoder_path.end());
	std::wstring wdecoder_path(decoder_path.begin(), decoder_path.end());

	// Pass wchar_t* to Ort::Session constructor
	encoder_session_ = Ort::Session(env_, wencoder_path.c_str(), session_options_);
	decoder_session_ = Ort::Session(env_, wdecoder_path.c_str(), session_options_);
}

SegmentAnythingModel::~SegmentAnythingModel()
{
	if (worker_thread_.joinable())
	{
		worker_thread_.join();
	}
}

void SegmentAnythingModel::set_image(const cv::Mat& image_input)
{
	{
		std::lock_guard<std::mutex> lock(mtx_);
		image_ = image_input.clone();
		std::string image_key = image_to_string(image_);
		auto it = image_embedding_cache_.find(image_key);
		if (it != image_embedding_cache_.end())
		{
			image_embedding_ = it->second;
			embedding_computed_ = true;
			return;
		}
		else
		{
			embedding_computed_ = false;
		}
	}

	if (worker_thread_.joinable())
	{
		worker_thread_.join();
	}
	worker_thread_ = std::thread(&SegmentAnythingModel::compute_and_cache_image_embedding, this);
}

void SegmentAnythingModel::compute_and_cache_image_embedding()
{
	cv::Mat image_copy;
	{
		std::lock_guard<std::mutex> lock(mtx_);
		image_copy = image_.clone();
	}

	std::cout << "Computing image embedding..." << std::endl;
	cv::Mat embedding = compute_image_embedding(image_size_, encoder_session_, image_copy);

	{
		std::lock_guard<std::mutex> lock(mtx_);
		image_embedding_ = embedding;
		std::string image_key = image_to_string(image_);
		if (image_embedding_cache_.size() > 10)
		{
			image_embedding_cache_.erase(image_embedding_cache_.begin());
		}
		image_embedding_cache_[image_key] = embedding;
		embedding_computed_ = true;
		cv_.notify_all();
	}
	std::cout << "Done computing image embedding." << std::endl;
}

cv::Mat SegmentAnythingModel::get_image_embedding()
{
	std::unique_lock<std::mutex> lock(mtx_);
	cv_.wait(lock, [this]() { return embedding_computed_; });
	return image_embedding_;
}

cv::Mat SegmentAnythingModel::predict_mask_from_points(const std::vector<cv::Point2f>& points,
                                                       const std::vector<int>& point_labels)
{
	cv::Mat embedding = get_image_embedding();
	cv::Mat mask = compute_mask_from_points(image_size_, decoder_session_, image_, embedding, points, point_labels);
	return mask;
}

cv::Mat SegmentAnythingModel::predict_polygon_from_points(const std::vector<cv::Point2f>& points,
                                                          const std::vector<int>& point_labels)
{
	cv::Mat mask = predict_mask_from_points(points, point_labels);

	// Find contours to create polygon
	std::vector<std::vector<cv::Point>> contours;
	cv::findContours(mask, contours, cv::RETR_EXTERNAL, cv::CHAIN_APPROX_SIMPLE);

	cv::Mat polygon = cv::Mat::zeros(mask.size(), CV_8UC1);
	cv::drawContours(polygon, contours, -1, cv::Scalar(255), cv::FILLED);

	return polygon;
}
