package org.slusk.thynwor.feedback;

import org.slusk.thynwor.corestructures.Drone;
import org.slusk.thynwor.corestructures.Environment;
import org.slusk.thynwor.corestructures.Drone.OnlineLearningType;

import com.anji.hyperneat.onlinereinforcement.OnlineLearningNetType;

public class FeedbackProviderFactory {

	public static AbstractFeedbackProvider getFeedbackProvider(Environment environment, OnlineLearningType onlineLearningType, OnlineLearningNetType learningNetType) {
		AbstractFeedbackProvider provider = null;
//		OnlineLearningType.NONE
//		OnlineLearningType.REINFORCEMENT
//		OnlineLearningType.SUPERVISED
//		OnlineLearningType.BACKPROPREINFORCEMENT
		
//		OnlineLearningNetType.BACKPROPAGATION
//		OnlineLearningNetType.HEBBIAN
//		OnlineLearningNetType.HEBBIANABC
//		OnlineLearningNetType.ROTATIONALBP3D
//		OnlineLearningNetType.TEMPORALDIFFERENCE
//		OnlineLearningNetType.NONE
		
		if (onlineLearningType == OnlineLearningType.NONE || learningNetType == OnlineLearningNetType.NONE) provider = new BaseFeedbackProvider(environment);
		
		else if (onlineLearningType == OnlineLearningType.SUPERVISED) provider = new SupervisedLearningProvider(environment);
		
		else if (onlineLearningType == OnlineLearningType.BACKPROPREINFORCEMENT) provider = new BackPropReinforcementProvider(environment);
		
		else if (onlineLearningType == OnlineLearningType.REINFORCEMENT) {
			switch(learningNetType) {
			case TEMPORALDIFFERENCE:
				provider = new TdReinforcementProvider(environment);
				break;
			default:
				provider = new ReinforcementProvider(environment);
			}
		}
		
		return provider;
	}

}
