. $LKP_SRC/lib/upload.sh
. $LKP_SRC/lib/job.sh
. $LKP_SRC/lib/reproduce-log.sh
. $LKP_SRC/lib/install_depends.sh

job_install_depend_packages()
{
	# Install OS, pip, and gem packages
	export PATH="$PATH:/root/.local/bin" # for commands installed by pip install --user
	test -n "$install_os_packages_all" && log_cmd install_depends "${install_os_packages_all}"
	test -n "$install_pip_packages_all" && log_cmd pip install --user --quiet $install_pip_packages_all -i https://repo.huaweicloud.com/repository/pypi/simple
	test -n "$install_gem_packages_all" && log_cmd gem install --quiet $install_gem_packages_all
}

mount_cgroup()
{
	[ -f "$CGROUP_MNT/tasks" ] && return

	[ -e '/proc/cgroups' ] || {
		echo "/proc/cgroups not found, skip cgroup mount."
		return 1
	}

	awk 'NR > 1 {print "\\s\\+" $1 "\\."}' /proc/cgroups > $TMP/availble-cgroup_subsys
	[ -f "$job" ] && cgroup_subsys=$(grep -o -f $TMP/availble-cgroup_subsys $job| sort | uniq)
	[ -n "$cgroup_subsys" ] || return
	cgroup_subsys=$(echo $cgroup_subsys | sed -e 's/\. / /g' -e 's/\.$/ /')
	log_cmd mkdir -p $CGROUP_MNT
	#Bind each subsystem to an individual hierachy
	for item in $cgroup_subsys
	do
		log_cmd mkdir -p $CGROUP_MNT/$item
		log_cmd mount -t cgroup -o $item $item $CGROUP_MNT/$item
	done
}

validate_result_root()
{
	[ -n "$RESULT_ROOT" ] || RESULT_ROOT=$result_root
	[ -n "$RESULT_ROOT" ] || {
		echo 'No RESULT_ROOT' >&2
		run_job_failed=1
		return 1
	}

	return 0
}

supports_netfs()
{
	has_cmd mount.$1 || return
	grep -q -w $1 /proc/filesystems && return
	modprobe $1 >/dev/null || return
	grep -q -w $1 /proc/filesystems
}

supports_fs()
{
	grep -q -w $1 /proc/filesystems && return
	modprobe $1 >/dev/null || return
	grep -q -w $1 /proc/filesystems
}

supports_http_upload()
{
	[ -z "$NO_NETWORK" ] && has_cmd curl
}

setup_result_root()
{
	validate_result_root || return 1

	export JOB_RESULT_ROOT=$RESULT_ROOT
	unset UPLOAD_BY_COPY_TO
	unset result_fs

	echo RESULT_ROOT=$RESULT_ROOT
	echo job=$job

	supports_http_upload && {
		export result_fs=http_upload # so that post-run can read it
		export TMP_RESULT_ROOT=$TMP/result
		mkdir -p $TMP_RESULT_ROOT

		return
	}

	[ "$tbox_type" = 'vm' ] && supports_fs '9p' && {
		result_fs=virtfs
		export TMP_RESULT_ROOT=$RESULT_ROOT
		export UPLOAD_BY_COPY_TO=$RESULT_ROOT

		mkdir -p -m 02775 $RESULT_ROOT
		local cmd="mount -t 9p -o trans=virtio 9p/result_root $RESULT_ROOT -oversion=9p2000.L,posixacl,cache=loose"
		echo 
		echo "$cmd"
		$cmd || {
			set_job_state 'error_mount'
			return 1
		}
		return 0
	}

	is_docker && is_mount_point $RESULT_ROOT && {
		result_fs=virtfs
		export TMP_RESULT_ROOT=$RESULT_ROOT
		export UPLOAD_BY_COPY_TO=$RESULT_ROOT
		return 0
	}

	return 1
}

wait_on_renew_deadline()
{
	local renew_path=/tmp/renew
	[ -s $renew_path ] || return 0

	local cur_time=$(date +%s)
	local end_time=$(cat "$renew_path")
	rm "$renew_path"

	if [ "$cur_time" -lt "$end_time" ]; then
		sleep $((end_time - cur_time))
	fi
}

# in case someone is logged in, give him at most 10hour time to do manual checks
wait_on_manual_check()
{
	# extra sleep for user login after a failed job,
	# if "wait_debug_on_fail" is defined in the job file
	[ "$run_job_failed" != 0 ] && [ -n "$wait_debug_on_fail" ] && {
		sleep $wait_debug_on_fail
	}

	for i in $(seq 600)
	do
		if [ -f $TMP/disturbed ]; then
			:
		elif is_user_logged_in; then
			:
		elif ! has_cmd 'users'; then
			break
		# t100 has XWindow auto login
		# sof-minniow-1 and sof-minnow-2 has root user on its local rootfs
		elif [ "$HOSTNAME" = 't100' ] || [ "$HOSTNAME" = 'sof-minnow-1' ] || [ "$HOSTNAME" = 'sof-minnow-2' ]; then
			local users="$(users)"
			[ "${users#* }" != "$users" ] || break
		else
			[ -n "$(users)" ] || break
		fi

		disturbed=1
		[ "$i" = 1 ] && set_job_state 'manual_check'

		if [ -f /tmp/renew ]; then
			wait_on_renew_deadline
		else
			sleep 60
		fi
	done
	return $disturbed
}

clean_job_resource()
{
	test -f /tmp/pid-tail || return
	kill $(cat /tmp/pid-tail)
	rm /tmp/pid-tail
}

job_done() {
	${WAIT_BIN_PATH}wakeup job-finished
	touch $TMP/job-finished
	clean_job_resource
	wait_on_manual_check
	set_job_state 'finish'

	# The randconfig VM boot/trinity tests often cannot reliably finish and
	# may not have the network to run trigger_post_process.
	# The host side monitor will upload qemu.time/dmesg/kmsg files and then
	# trigger_post_process for the test job in VM.
	[ -n "$nr_vm" ] && return

	trigger_post_process
}

refresh_lkp_tmp()
{
	export TMP=/tmp/lkp
	rm -fr $TMP
	mkdir -p $TMP
}

job_redirect_one()
{
	local file=$1
	shift

	tail -n 0 -f $* > $file &
	echo $! >> /tmp/pid-tail
}

job_redirect_stdout_stderr()
{
	[ -e /tmp/stdout ] || return
	[ -e /tmp/stderr ] || return

	job_redirect_one $TMP_RESULT_ROOT/stdout /tmp/stdout
	job_redirect_one $TMP_RESULT_ROOT/stderr /tmp/stderr
	job_redirect_one $TMP_RESULT_ROOT/output /tmp/stdout /tmp/stderr
}

job_env()
{
	if echo $job_file | grep -q '\.sh$'; then
		. $job_file
	else
		. ${job_file%.yaml}.sh
	fi

	has_cmd export_top_env && export_top_env
	has_cmd read_job_vars && read_job_vars
}

# per-job initiation; should be invoked before run a job
job_init()
{
	refresh_lkp_tmp

	job_env
	read_secret_vars

	# for use by upload_one, which is mostly called from post-run and program run script
	export job_id
	export job_token

	setup_result_root || { job_done; exit 1; }
	job_redirect_stdout_stderr
	cp /proc/uptime $TMP_RESULT_ROOT/boot-time
}

# vim:set ts=4 sw=4 et:
