/*
Copyright 2019 The Knative Authors

Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at

	http://www.apache.org/licenses/LICENSE-2.0

Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/

package parallel

import (
	"context"
	"fmt"

	"go.uber.org/zap"
	corev1 "k8s.io/api/core/v1"
	"k8s.io/apimachinery/pkg/api/equality"
	apierrs "k8s.io/apimachinery/pkg/api/errors"
	"k8s.io/apimachinery/pkg/api/meta"
	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
	"k8s.io/apimachinery/pkg/labels"
	"k8s.io/apimachinery/pkg/runtime"
	"k8s.io/apimachinery/pkg/util/sets"
	"k8s.io/client-go/dynamic"
	duckapis "knative.dev/pkg/apis/duck"

	"knative.dev/pkg/kmeta"
	"knative.dev/pkg/logging"
	pkgreconciler "knative.dev/pkg/reconciler"

	duckv1 "knative.dev/eventing/pkg/apis/duck/v1"
	eventingv1alpha1 "knative.dev/eventing/pkg/apis/eventing/v1alpha1"
	"knative.dev/eventing/pkg/apis/feature"
	v1 "knative.dev/eventing/pkg/apis/flows/v1"
	messagingv1 "knative.dev/eventing/pkg/apis/messaging/v1"
	"knative.dev/eventing/pkg/auth"
	clientset "knative.dev/eventing/pkg/client/clientset/versioned"
	parallelreconciler "knative.dev/eventing/pkg/client/injection/reconciler/flows/v1/parallel"
	eventingv1alpha1listers "knative.dev/eventing/pkg/client/listers/eventing/v1alpha1"
	listers "knative.dev/eventing/pkg/client/listers/flows/v1"
	messaginglisters "knative.dev/eventing/pkg/client/listers/messaging/v1"
	ducklib "knative.dev/eventing/pkg/duck"
	"knative.dev/eventing/pkg/reconciler/parallel/resources"
	"knative.dev/pkg/kmp"
)

type Reconciler struct {
	// listers index properties about resources
	parallelLister     listers.ParallelLister
	channelableTracker ducklib.ListableTracker
	subscriptionLister messaginglisters.SubscriptionLister

	// eventingClientSet allows us to configure Eventing objects
	eventingClientSet clientset.Interface

	// dynamicClientSet allows us to configure pluggable Build objects
	dynamicClientSet dynamic.Interface

	eventPolicyLister eventingv1alpha1listers.EventPolicyLister
}

// Check that our Reconciler implements parallelreconciler.Interface
var _ parallelreconciler.Interface = (*Reconciler)(nil)

func (r *Reconciler) ReconcileKind(ctx context.Context, p *v1.Parallel) pkgreconciler.Event {
	// Reconciling parallel is pretty straightforward, it does the following things:
	// 1. Create a channel fronting the whole parallel and one filter channel per branch.
	// 2. For each of the Branches:
	//     2.1 create a Subscription to the fronting Channel, subscribe the filter and send reply to the filter Channel
	//     2.2 create a Subscription to the filter Channel, subscribe the subscriber and send reply to
	//         either the branch Reply. If not present, send reply to the global Reply. If not present, do not send reply.
	// 3. Rinse and repeat step #2 above for each branch in the list
	featureFlags := feature.FromContext(ctx)

	if p.Status.BranchStatuses == nil {
		p.Status.BranchStatuses = make([]v1.ParallelBranchStatus, 0)
	}

	gvr, _ := meta.UnsafeGuessKindToResource(p.Spec.ChannelTemplate.GetObjectKind().GroupVersionKind())
	channelResourceInterface := r.dynamicClientSet.Resource(gvr).Namespace(p.Namespace)
	if channelResourceInterface == nil {
		return fmt.Errorf("unable to create dynamic client for: %+v", p.Spec.ChannelTemplate)
	}

	var ingressChannel *duckv1.Channelable
	channels := make([]*duckv1.Channelable, 0, len(p.Spec.Branches))
	for i := -1; i < len(p.Spec.Branches); i++ {
		var channelName string
		if i == -1 {
			channelName = resources.ParallelChannelName(p.Name)
		} else {
			channelName = resources.ParallelBranchChannelName(p.Name, i)
		}

		channelObjRef := corev1.ObjectReference{
			Kind:       p.Spec.ChannelTemplate.Kind,
			APIVersion: p.Spec.ChannelTemplate.APIVersion,
			Name:       channelName,
			Namespace:  p.Namespace,
		}

		channelable, err := r.reconcileChannel(ctx, channelResourceInterface, p, channelObjRef)
		if err != nil {
			err = fmt.Errorf("failed to reconcile channel %s at step %d: %w", channelName, i, err)
			p.Status.MarkChannelsNotReady("ChannelsNotReady", err.Error())
			return err
		}
		logging.FromContext(ctx).Infof("Reconciled Channel Object: %s/%s %+v", p.Namespace, channelName, channelable)

		if i == -1 {
			ingressChannel = channelable
		} else {
			channels = append(channels, channelable)
		}
	}
	p.Status.PropagateChannelStatuses(ingressChannel, channels)

	filterSubs := make([]*messagingv1.Subscription, 0, len(p.Spec.Branches))
	subs := make([]*messagingv1.Subscription, 0, len(p.Spec.Branches))
	for i := 0; i < len(p.Spec.Branches); i++ {
		filterSub, sub, err := r.reconcileBranch(ctx, i, p)
		if err != nil {
			return fmt.Errorf("failed to reconcile Subscription Objects for branch: %d : %s", i, err)
		}
		subs = append(subs, sub)
		filterSubs = append(filterSubs, filterSub)
		logging.FromContext(ctx).Debugf("Reconciled Subscription Objects for branch: %d: %+v, %+v", i, filterSub, sub)
	}
	p.Status.PropagateSubscriptionStatuses(filterSubs, subs)

	// If a parallel instance is modified resulting in the number of steps decreasing, there will be
	// leftover channels and subscriptions that need to be removed.
	if err := r.removeUnwantedChannels(ctx, channelResourceInterface, p, append(channels, ingressChannel)); err != nil {
		return fmt.Errorf("error removing unwanted Channels: %w", err)
	}

	if err := r.removeUnwantedSubscriptions(ctx, p, append(filterSubs, subs...)); err != nil {
		return fmt.Errorf("error removing unwanted Subscriptions: %w", err)
	}

	// Reconcile EventPolicies for the parallel.
	if err := r.reconcileEventPolicies(ctx, p, ingressChannel, channels, filterSubs, featureFlags); err != nil {
		return fmt.Errorf("failed to reconcile EventPolicies for Parallel: %w", err)
	}

	err := auth.UpdateStatusWithEventPolicies(featureFlags, &p.Status.AppliedEventPoliciesStatus, &p.Status, r.eventPolicyLister, v1.SchemeGroupVersion.WithKind("Parallel"), p.ObjectMeta)
	if err != nil {
		return fmt.Errorf("could not update parallel status with EventPolicies: %v", err)
	}

	return nil
}

func (r *Reconciler) reconcileChannel(ctx context.Context, channelResourceInterface dynamic.ResourceInterface, p *v1.Parallel, channelObjRef corev1.ObjectReference) (*duckv1.Channelable, error) {
	logger := logging.FromContext(ctx)
	c, err := r.trackAndFetchChannel(ctx, p, channelObjRef)
	if err != nil {
		if apierrs.IsNotFound(err) {
			newChannel, err := ducklib.NewPhysicalChannel(
				p.Spec.ChannelTemplate.TypeMeta,
				metav1.ObjectMeta{
					Name:      channelObjRef.Name,
					Namespace: p.Namespace,
					OwnerReferences: []metav1.OwnerReference{
						*kmeta.NewControllerRef(p),
					},
				},
				ducklib.WithPhysicalChannelSpec(p.Spec.ChannelTemplate.Spec),
			)
			if err != nil {
				return nil, fmt.Errorf("failed to create Channel resource %v: %w", channelObjRef, err)
			}
			created, err := channelResourceInterface.Create(ctx, newChannel, metav1.CreateOptions{})
			if err != nil {
				return nil, fmt.Errorf("failed to create channel %v: %w", channelObjRef, err)
			}
			logger.Debugw("Created Channel", zap.Any("channel", newChannel))
			// Convert to Channel duck so that we can treat all Channels the same.
			channelable := &duckv1.Channelable{}
			if err = duckapis.FromUnstructured(created, channelable); err != nil {
				return nil, fmt.Errorf("failed to convert Channelable %v: %w", created, err)
			}
			return channelable, nil
		}
		return nil, fmt.Errorf("failed to get channel %v: %w", channelObjRef, err)
	}
	logger.Debugw("Found Channel", zap.Any("channel", channelObjRef))
	channelable, ok := c.(*duckv1.Channelable)
	if !ok {
		return nil, fmt.Errorf("failed to convert to Channelable Object %+v: %w", c, err)
	}
	return channelable, nil
}

func (r *Reconciler) reconcileBranch(ctx context.Context, branchNumber int, p *v1.Parallel) (*messagingv1.Subscription, *messagingv1.Subscription, error) {
	filterExpected := resources.NewFilterSubscription(branchNumber, p)
	filterSub, err := r.reconcileSubscription(ctx, branchNumber, filterExpected)
	if err != nil {
		return nil, nil, err
	}

	expected := resources.NewSubscription(branchNumber, p)
	sub, err := r.reconcileSubscription(ctx, branchNumber, expected)
	if err != nil {
		return nil, nil, err
	}

	return filterSub, sub, nil
}

func (r *Reconciler) reconcileSubscription(ctx context.Context, branchNumber int, expected *messagingv1.Subscription) (*messagingv1.Subscription, error) {
	sub, err := r.subscriptionLister.Subscriptions(expected.Namespace).Get(expected.Name)

	// If the resource doesn't exist, we'll create it.
	if apierrs.IsNotFound(err) {
		sub = expected
		logging.FromContext(ctx).Infof("Creating subscription: %+v", sub)
		newSub, err := r.eventingClientSet.MessagingV1().Subscriptions(sub.Namespace).Create(ctx, sub, metav1.CreateOptions{})
		if err != nil {
			// TODO: Send events here, or elsewhere?
			//r.Recorder.Eventf(p, corev1.EventTypeWarning, subscriptionCreateFailed, "Create Parallel's subscription failed: %v", err)
			return nil, fmt.Errorf("failed to create Subscription Object for branch: %d : %s", branchNumber, err)
		}
		return newSub, nil
	} else if err != nil {
		logging.FromContext(ctx).Errorw("Failed to get Subscription", zap.Error(err))
		// TODO: Send events here, or elsewhere?
		//r.Recorder.Eventf(p, corev1.EventTypeWarning, subscriptionCreateFailed, "Create Parallels's subscription failed: %v", err)
		return nil, fmt.Errorf("failed to get Subscription: %s", err)
	} else if immutableFieldsChanged := expected.CheckImmutableFields(ctx, sub); immutableFieldsChanged != nil {
		// Given that spec.channel is immutable, we cannot just update the subscription. We delete
		// it instead, and re-create it.
		err = r.eventingClientSet.MessagingV1().Subscriptions(sub.Namespace).Delete(ctx, sub.Name, metav1.DeleteOptions{})
		if err != nil {
			logging.FromContext(ctx).Infow("Cannot delete Subscription", zap.Error(err))
			return nil, err
		}
		newSub, err := r.eventingClientSet.MessagingV1().Subscriptions(sub.Namespace).Create(ctx, expected, metav1.CreateOptions{})
		if err != nil {
			logging.FromContext(ctx).Infow("Cannot create Subscription", zap.Error(err))
			return nil, err
		}
		return newSub, nil
	} else if equal, err := kmp.SafeEqual(sub.Spec, expected.Spec); !equal || err != nil {
		updatedSub, err := r.eventingClientSet.MessagingV1().Subscriptions(sub.Namespace).Update(ctx, expected, metav1.UpdateOptions{})
		if err != nil {
			logging.FromContext(ctx).Infow("Cannot update subscription", zap.Error(err))
			return nil, err
		}
		return updatedSub, nil
	}
	return sub, nil
}

func (r *Reconciler) trackAndFetchChannel(ctx context.Context, p *v1.Parallel, ref corev1.ObjectReference) (runtime.Object, error) {
	// Track the channel using the channelableTracker.
	// We don't need the explicitly set a channelInformer, as this will dynamically generate one for us.
	// This code needs to be called before checking the existence of the `channel`, in order to make sure the
	// subscription controller will reconcile upon a `channel` change.
	if err := r.channelableTracker.TrackInNamespace(ctx, p)(ref); err != nil {
		return nil, fmt.Errorf("unable to track changes to Channel ref %+v: %w", ref, err)
	}
	chLister, err := r.channelableTracker.ListerFor(ref)
	if err != nil {
		return nil, fmt.Errorf("failed to get lister for channel ref %v: %w", ref, err)
	}
	obj, err := chLister.ByNamespace(p.Namespace).Get(ref.Name)
	if err != nil {
		return nil, fmt.Errorf("failed to get channel from lister for ref %v: %w", ref, err)
	}
	return obj, err
}

func (r *Reconciler) removeUnwantedChannels(ctx context.Context, channelResourceInterface dynamic.ResourceInterface, p *v1.Parallel, wanted []*duckv1.Channelable) error {
	channelObjRef := corev1.ObjectReference{
		Kind:       p.Spec.ChannelTemplate.Kind,
		APIVersion: p.Spec.ChannelTemplate.APIVersion,
	}

	l, err := r.channelableTracker.ListerFor(channelObjRef)
	if err != nil {
		return fmt.Errorf("error getting lister for Channels: %w", err)
	}

	ownedChannels, err := l.ByNamespace(p.GetNamespace()).List(labels.Everything())
	if err != nil {
		return fmt.Errorf("error listing Channels: %w", err)
	}

	ownedSet := sets.String{}
	for _, c := range ownedChannels {
		ch, err := kmeta.DeletionHandlingAccessor(c)
		if err != nil {
			return fmt.Errorf("error reading Channel %q: %w", ch.GetName(), err)
		}

		if !ch.GetDeletionTimestamp().IsZero() ||
			!metav1.IsControlledBy(ch, p) {
			continue
		}

		ownedSet.Insert(ch.GetName())
	}

	wantedSet := sets.String{}
	for _, cw := range wanted {
		wantedSet.Insert(cw.Name)
	}

	for _, c := range ownedSet.Difference(wantedSet).List() {
		err = channelResourceInterface.Delete(ctx, c, metav1.DeleteOptions{})
		if err != nil {
			return fmt.Errorf("error deleting Channel %q: %w", c, err)
		}
	}

	return nil
}

func (r *Reconciler) removeUnwantedSubscriptions(ctx context.Context, p *v1.Parallel, wanted []*messagingv1.Subscription) error {
	subs, err := r.subscriptionLister.Subscriptions(p.Namespace).List(labels.Everything())
	if err != nil {
		return fmt.Errorf("error listing Subscriptions: %w", err)
	}

	ownedSet := sets.String{}
	for _, sub := range subs {
		if !sub.GetDeletionTimestamp().IsZero() ||
			!metav1.IsControlledBy(sub, p) {
			continue
		}

		ownedSet.Insert(sub.GetName())
	}

	wantedSet := sets.String{}
	for _, sw := range wanted {
		wantedSet.Insert(sw.Name)
	}

	for _, s := range ownedSet.Difference(wantedSet).List() {
		err = r.eventingClientSet.MessagingV1().Subscriptions(p.Namespace).Delete(ctx, s, metav1.DeleteOptions{})
		if err != nil {
			return fmt.Errorf("error deleting Subscription %q: %w", s, err)

		}
	}

	return nil
}

func (r *Reconciler) reconcileEventPolicies(ctx context.Context, p *v1.Parallel, ingressChannel *duckv1.Channelable,
	channels []*duckv1.Channelable, filterSubs []*messagingv1.Subscription, featureFlags feature.Flags) error {

	if !featureFlags.IsOIDCAuthentication() {
		return r.cleanupAllEventPolicies(ctx, p)
	}
	// list all the existing event policies for the parallel.
	existingPolicies, err := r.listEventPoliciesForParallel(p)
	if err != nil {
		return fmt.Errorf("failed to list existing event policies for parallel: %w", err)
	}
	// make a map of existing event policies for easy and efficient lookup.
	existingPolicyMap := make(map[string]*eventingv1alpha1.EventPolicy)
	for _, policy := range existingPolicies {
		existingPolicyMap[policy.Name] = policy
	}

	// prepare the list of event policies to create, update and delete.
	var policiesToCreate, policiesToUpdate []*eventingv1alpha1.EventPolicy
	policiesToDelete := make([]*eventingv1alpha1.EventPolicy, 0, len(existingPolicyMap))

	for i, channel := range channels {
		filterSub := filterSubs[i]
		expectedPolicy := resources.MakeEventPolicyForParallelChannel(p, channel, filterSub)
		if existingPolicy, ok := existingPolicyMap[expectedPolicy.Name]; ok {
			if !equality.Semantic.DeepDerivative(expectedPolicy, existingPolicy) {
				expectedPolicy.SetResourceVersion(existingPolicy.ResourceVersion)
				policiesToUpdate = append(policiesToUpdate, expectedPolicy)
			}
			delete(existingPolicyMap, expectedPolicy.Name)
		} else {
			policiesToCreate = append(policiesToCreate, expectedPolicy)
		}
	}

	// prepare the event policies for the ingress channel.
	ingressChannelEventPolicies, err := r.prepareIngressChannelEventpolicies(p, ingressChannel)
	if err != nil {
		return fmt.Errorf("failed to prepare event policies for ingress channel: %w", err)
	}

	for _, policy := range ingressChannelEventPolicies {
		if existingIngressChannelPolicy, ok := existingPolicyMap[policy.Name]; ok {
			if !equality.Semantic.DeepDerivative(policy, existingIngressChannelPolicy) {
				policy.SetResourceVersion(existingIngressChannelPolicy.ResourceVersion)
				policiesToUpdate = append(policiesToUpdate, policy)
			}
			delete(existingPolicyMap, policy.Name)
		} else {
			policiesToCreate = append(policiesToCreate, policy)
		}
	}

	// delete the remaining event policies in the map.
	for _, policy := range existingPolicyMap {
		policiesToDelete = append(policiesToDelete, policy)
	}

	// now that we have the list of event policies to create, update and delete, we can perform the operations.
	if err := r.createEventPolicies(ctx, policiesToCreate); err != nil {
		return fmt.Errorf("failed to create event policies: %w", err)
	}
	if err := r.updateEventPolicies(ctx, policiesToUpdate); err != nil {
		return fmt.Errorf("failed to update event policies: %w", err)
	}
	if err := r.deleteEventPolicies(ctx, policiesToDelete); err != nil {
		return fmt.Errorf("failed to delete event policies: %w", err)
	}

	return nil
}

func (r *Reconciler) createEventPolicies(ctx context.Context, policies []*eventingv1alpha1.EventPolicy) error {
	for _, policy := range policies {
		_, err := r.eventingClientSet.EventingV1alpha1().EventPolicies(policy.Namespace).Create(ctx, policy, metav1.CreateOptions{})
		if err != nil {
			return err
		}
	}
	return nil
}

func (r *Reconciler) updateEventPolicies(ctx context.Context, policies []*eventingv1alpha1.EventPolicy) error {
	for _, policy := range policies {
		_, err := r.eventingClientSet.EventingV1alpha1().EventPolicies(policy.Namespace).Update(ctx, policy, metav1.UpdateOptions{})
		if err != nil {
			return err
		}
	}
	return nil
}

func (r *Reconciler) deleteEventPolicies(ctx context.Context, policies []*eventingv1alpha1.EventPolicy) error {
	for _, policy := range policies {
		err := r.eventingClientSet.EventingV1alpha1().EventPolicies(policy.Namespace).Delete(ctx, policy.Name, metav1.DeleteOptions{})
		if err != nil && !apierrs.IsNotFound(err) {
			return err
		}
	}
	return nil
}

func (r *Reconciler) prepareIngressChannelEventpolicies(p *v1.Parallel, ingressChannel *duckv1.Channelable) ([]*eventingv1alpha1.EventPolicy, error) {
	applyingEventPoliciesForParallel, err := auth.GetEventPoliciesForResource(r.eventPolicyLister, v1.SchemeGroupVersion.WithKind("Parallel"), p.ObjectMeta)
	if err != nil {
		return nil, fmt.Errorf("could not get EventPolicies for Parallel %s/%s: %w", p.Namespace, p.Name, err)
	}

	if len(applyingEventPoliciesForParallel) == 0 {
		return nil, nil
	}

	ingressChannelEventPolicies := make([]*eventingv1alpha1.EventPolicy, 0, len(applyingEventPoliciesForParallel))
	for _, eventPolicy := range applyingEventPoliciesForParallel {
		ingressChannelEventPolicies = append(ingressChannelEventPolicies, resources.MakeEventPolicyForParallelIngressChannel(p, ingressChannel, eventPolicy))
	}

	return ingressChannelEventPolicies, nil
}

func (r *Reconciler) cleanupAllEventPolicies(ctx context.Context, p *v1.Parallel) error {
	// list all the event policies for the parallel.
	eventPolicies, err := r.listEventPoliciesForParallel(p)
	if err != nil {
		return err
	}
	return r.deleteEventPolicies(ctx, eventPolicies)
}

// listEventPoliciesForParallel lists all EventPolicies (e.g. the policies for the input channel and the intermediate channels)
// created during reconcileKind that are associated with the given Parallel.
func (r *Reconciler) listEventPoliciesForParallel(p *v1.Parallel) ([]*eventingv1alpha1.EventPolicy, error) {
	labelSelector := labels.SelectorFromSet(map[string]string{
		resources.ParallelChannelEventPolicyLabelPrefix + "parallel-name": p.Name,
	})
	return r.eventPolicyLister.EventPolicies(p.Namespace).List(labelSelector)
}
