/*
 * Copyright 2025 Snowflake Inc.
 * SPDX-License-Identifier: Apache-2.0
 *
 * Licensed under the Apache License, Version 2.0 (the "License");
 * you may not use this file except in compliance with the License.
 * You may obtain a copy of the License at
 *
 *     https://www.apache.org/licenses/LICENSE-2.0
 *
 * Unless required by applicable law or agreed to in writing, software
 * distributed under the License is distributed on an "AS IS" BASIS,
 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 * See the License for the specific language governing permissions and
 * limitations under the License.
 */

#include "postgres.h"
#include "fmgr.h"
#include "miscadmin.h"
#include "libpq-fe.h"

#include "access/relation.h"
#include "commands/dbcommands.h"
#include "pg_lake/copy/remote_query.h"
#include "pg_lake/csv/csv_options.h"
#include "pg_lake/pgduck/client.h"
#include "pg_lake/pgduck/read_data.h"
#include "pg_lake/util/parallel_workers.h"
#include "postmaster/bgworker_internals.h"
#include "pg_extension_base/attached_worker.h"
#include "pg_extension_base/base_workers.h"
#include "utils/builtins.h"
#include "utils/lsyscache.h"
#include "utils/memutils.h"
#include "utils/rel.h"

#define DEFAULT_JOB_SLEEP 500

/* this should probably live in extension_base */

static bool JobIsComplete(AttachedWorker * worker);

/*
 * RunCommandsInParallel runs a list of commands in parallel, using at
 * most requestedWorkerCount attached workers at a time.
 */
void
RunCommandsInParallel(List *commands,
					  char *dbname, char *user,
					  int requestedWorkerCount, int maxFailures)
{
	if (commands == NIL)
		return;

	Assert(requestedWorkerCount >= 1);

	AttachedWorker **workers = palloc0(sizeof(AttachedWorker *) * requestedWorkerCount);

	int			jobCount = list_length(commands);
	int			nextJobIndex = 0;
	int			completedJobCount = 0;
	int			sleepTime = DEFAULT_JOB_SLEEP;
	int			failureCount = 0;

	/* main loop for our parallel job */
	while (completedJobCount < jobCount)
	{
		CHECK_FOR_INTERRUPTS();

		for (int workerIndex = 0; workerIndex < requestedWorkerCount; workerIndex++)
		{
			/*
			 * Check for completion first so we can potentially reassign in
			 * this same pass.
			 */
			if (workers[workerIndex] != NULL && JobIsComplete(workers[workerIndex]))
			{
				/* cleanup */
				EndAttachedWorker(workers[workerIndex]);
				workers[workerIndex] = NULL;
				completedJobCount++;
			}

			/* check for available worker slot */
			if (workers[workerIndex] == NULL)
			{
				/* are there any more jobs to assign? */
				if (nextJobIndex < jobCount)
				{
					/* start job */
					char	   *command = list_nth(commands, nextJobIndex);

					nextJobIndex++;

					PG_TRY();
					{
						/* run in the given database with the given username */
						workers[workerIndex] =
							StartAttachedWorkerInDatabase(command,
														  dbname,
														  user);

						/*
						 * If we successfully started our job, we can reset
						 * our sleep time to the default.
						 */
						sleepTime = DEFAULT_JOB_SLEEP;
					}
					PG_CATCH();
					{
						/*
						 * If there was an error in the invocation (most
						 * likely no available workers), then push the job
						 * back on the queue and adjust out sleep timeout to
						 * wait additional time.
						 *
						 * This does not count errors generated by the
						 * background job, only those with launching.
						 */

						ErrorData  *errdata;

						MemoryContext oldcontext = MemoryContextSwitchTo(ErrorContext);

						errdata = CopyErrorData();
						MemoryContextSwitchTo(oldcontext);

						ereport(LOG, (errmsg("error starting job in attached worker: %s (sqlerrcode: %s)",
											 errdata->message,
											 unpack_sql_state(errdata->sqlerrcode))));

						/*
						 * If maxFailures is disabled, we always keep going,
						 * otherwise see if we've exceeded the limit.
						 */
						if (maxFailures != -1 && ++failureCount > maxFailures)
							ereport(ERROR, (errmsg("failed jobs more than %d times", maxFailures)));

						/*
						 * Our own cleanup; theoretically we should analyze
						 * the specific sql state to ensure we are catching
						 * only the one we care about and PG_RE_THROW()
						 * otherwise.
						 */

						nextJobIndex--;
						sleepTime *= 2;


						/* cleanup error inspection */

						FreeErrorData(errdata);
						FlushErrorState();

						/*
						 * Since we just failed, sleep a bit; subsequent
						 * attempts will reset sleepTime if successful.
						 */

						LightSleep(sleepTime);

						/*
						 * It is possible we want to have some overall retry
						 * failures or overall time limit here.
						 */
					}
					PG_END_TRY();
				}
				else
				{
					/* no job to start, just waiting for all to be complete */
				}
			}
		}

		/* sleep for some amount of time if we still have jobs processing */
		if (completedJobCount < jobCount)
			LightSleep(sleepTime);
	}

	pfree(workers);
}


/*
 * JobIsComplete() - check for completion
 */
static bool
JobIsComplete(AttachedWorker * worker)
{
	bool		wait = false;

	/* consume any messages for now */
	ReadFromAttachedWorker(worker, wait);

	return !IsAttachedWorkerRunning(worker);
}
