package main

import (
	"context"
	"flag"
	"fmt"
	"io"
	batchv1 "k8s.io/api/batch/v1"
	v1 "k8s.io/api/core/v1"
	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
	"k8s.io/apimachinery/pkg/watch"
	kubernetes "k8s.io/client-go/kubernetes"
	clientcmd "k8s.io/client-go/tools/clientcmd"
	"log"
	"math/rand"
	"os"
	"path/filepath"
	"sync"
	"time"
)

var charset = []byte("abcdefghijklmnopqrstuvwxyz")

// n is the length of random string we want to generate
func randStr(n int) string {
	b := make([]byte, n)
	for i := range b {
		// randomly select 1 character from given charset
		b[i] = charset[rand.Intn(len(charset))]
	}
	return string(b)
}

func connectToK8s() *kubernetes.Clientset {
	home, exists := os.LookupEnv("HOME")
	if !exists {
		home = "/root"
	}

	configPath := filepath.Join(home, ".kube", "config")

	config, err := clientcmd.BuildConfigFromFlags("", configPath)
	if err != nil {
		log.Panicln("failed to create K8s config")
	}

	clientset, err := kubernetes.NewForConfig(config)
	if err != nil {
		log.Panicln("Failed to create K8s clientset")
	}

	return clientset
}

func launchK8sJob(clientset *kubernetes.Clientset, image *string, cmd *string) {
	jobs := clientset.BatchV1().Jobs("default")
	var backOffLimit int32 = 0

	jobName := "job-" + randStr(5)

	jobSpec := &batchv1.Job{
		ObjectMeta: metav1.ObjectMeta{
			Name:      jobName,
			Namespace: "default",
		},
		Spec: batchv1.JobSpec{
			Template: v1.PodTemplateSpec{
				Spec: v1.PodSpec{
					Containers: []v1.Container{
						{
							Name:  jobName,
							Image: *image,
							//Command: strings.Split(*cmd, " "),
						},
					},
					RestartPolicy: v1.RestartPolicyNever,
				},
			},
			BackoffLimit: &backOffLimit,
		},
	}

	_, err := jobs.Create(context.TODO(), jobSpec, metav1.CreateOptions{})
	if err != nil {
		log.Fatalln("Failed to create K8s job.", err)
	}

	var wg sync.WaitGroup
	go processJobPods(&wg, &jobName, clientset)
	wg.Add(1)
	wg.Wait()
}

func processJobPods(wg *sync.WaitGroup, jobName *string, clientset *kubernetes.Clientset) {
	fmt.Printf("Getting pods\n")
	podCtx, cancel := context.WithTimeout(context.Background(), 60*time.Second)
	defer cancel()
	defer wg.Done()
	pods, err := clientset.CoreV1().Pods("default").Watch(podCtx, metav1.ListOptions{LabelSelector: "job-name=" + *jobName})
	if err != nil {
		panic(err.Error())
	}
	for event := range pods.ResultChan() {
		pod := event.Object.(*v1.Pod)

		switch event.Type {
		case watch.Modified:
			fmt.Printf("%s - %s\n", pod.Name, pod.Status.Phase)
			if pod.Status.Phase == "Running" || pod.Status.Phase == "Succeeded" {
				podLogOpts := v1.PodLogOptions{}
				req := clientset.CoreV1().Pods(pod.Namespace).GetLogs(pod.Name, &podLogOpts)
				podLogs, err := req.Stream(context.Background())
				if err != nil {
					panic(err)
				}
				defer podLogs.Close()
				for {
					buf := make([]byte, 2000)
					numBytes, err := podLogs.Read(buf)
					if numBytes == 0 {
						continue
					}
					if err == io.EOF {
						break
					}
					if err != nil {
						fmt.Print(err)
					}
					message := string(buf[:numBytes])
					fmt.Print(message)
				}
				wg.Done()
			}
		}
	}
}

func main() {
	containerImage := flag.String("image", "ubuntu:latest", "Name of the container image")
	entryCommand := flag.String("command", "ls", "The command to run inside the container")

	flag.Parse()

	clientset := connectToK8s()
	launchK8sJob(clientset, containerImage, entryCommand)
}
