package com.yangyang.ralearn.client.rlclient;

import java.util.ArrayList;
import java.util.Collections;
import java.util.HashMap;
import java.util.HashSet;
import java.util.List;
import java.util.Map;
import java.util.Map.Entry;
import java.util.Random;
import java.util.Set;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.Executors;
import java.util.concurrent.TimeUnit;

import org.apache.log4j.Logger;
import org.springframework.stereotype.Component;

import com.yangyang.ralearn.common.BaseAlgorithm;
import com.yangyang.ralearn.common.Constants;
import com.yangyang.ralearn.common.JEnvironment;
import com.yangyang.ralearn.common.entity.base.AbstractDomainPredicate;
import com.yangyang.ralearn.common.entity.base.ActionLimit;
import com.yangyang.ralearn.common.entity.base.BaseAction;
import com.yangyang.ralearn.common.entity.base.BaseActionModelDef;
import com.yangyang.ralearn.common.entity.base.BasePredicate;
import com.yangyang.ralearn.common.exception.AlgorithmEndException;
import com.yangyang.ralearn.common.exception.ErrorActionException;
import com.yangyang.ralearn.common.exception.ReachGoalException;
import com.yangyang.ralearn.common.framework.BeanManager;
import com.yangyang.ralearn.common.types.JType;
import com.yangyang.ralearn.net.BaseClientSimulatorHandler;
import com.yangyang.ralearn.server.ServerManager;
import com.yangyang.ralearn.util.RLConfig;

@Component
public class Ralearn extends BaseAlgorithm {

	private Random random = new Random();

	public void init(JEnvironment environment) {
		// this.goalStates = goalStates;
		Map<String, Map<String, String>> actions = new HashMap<String, Map<String, String>>();
		{
			for (ActionLimit limit : environment.mapName2ActionLimit.values()) {
				Map<String, String> params = new HashMap<String, String>();
				for (Entry<String, JType> entry : limit.getMapVar2Type()
						.entrySet()) {
					params.put(entry.getKey(), entry.getValue().getName());
				}
				actions.put(limit.getName(), params);
			}
		}
		super.init(environment, actions);
	}

	@Override
	public void run(IRLAgentHandler handler) throws AlgorithmEndException {
		// TODO Auto-generated method stub
		Logger.getLogger(Ralearn.class).debug(this.mapName2Models);

		int counter = 0;
		List<String> actionSignature = new ArrayList<String>();
		actionSignature.addAll(this.mapName2Models.keySet());
		List<AbstractDomainPredicate> currentStates = handler.observe();
		Logger.getLogger(Ralearn.class).info(
				"***************************************");
		while (counter++ < Constants.MAX_ITER_TIME) {
			// randomly choose an action a
			String actionName = actionSignature.get(random
					.nextInt(actionSignature.size()));

			// select the best action model
			BaseActionModelDef model = this.getActionModel(actionName);

			// these actions are all actions that can do in currentStates witch
			// action-model: model
			Set<BaseAction> actions = this.getAllPossiableAction(model,
					currentStates);

			boolean removeFlag = false;
			List<BaseAction> list = new ArrayList<BaseAction>();
			for (BaseAction action : actions)
				if (!handler.canDo(action.toString())) {
					if (BaseActionModelDef.canDo(action, model, currentStates)) {

						this.removeActionModel(actionName, model);
						// this.mapName2Models.get(actionName).remove(model);
						// if
						// (ServerManager.instance().getEnvironmentById(1).mapNameActionModelDef
						// .containsValue(model)
						// || this.mapName2Models.get(model.getName())
						// .size() <= 0) {
						// System.out
						// .println("removed the right action model!");
						// ((IRLAgentHandler) RLMultiagentHandler.instance())
						// .canDo(action.toString());
						// }
						removeFlag = true;
						Logger.getLogger(Ralearn.class)
								.debug("remove:" + model);
						break;
					}
				} else {
					list.add(action);
				}
			if (list.size() > 0 && !removeFlag) {
				try {
					BaseAction action = list.get(random.nextInt(list.size()));
					List<AbstractDomainPredicate> oldStates = currentStates;
					handler.doAction(action.toString());
					currentStates = handler.observe();
					int qValue = Ralearn.getReward(
							BaseActionModelDef.doAction(action, model),
							oldStates, currentStates);
					if (qValue == -Constants.MAX_SCORE) {
						this.removeActionModel(actionName, model);
						// this.mapName2Models.get(actionName).remove(model);
						// if
						// (ServerManager.instance().getEnvironmentById(1).mapNameActionModelDef
						// .containsValue(model)
						// || this.mapName2Models.get(model.getName())
						// .size() <= 0) {
						// System.out
						// .println("removed the right action model!");
						// }
						Logger.getLogger(Ralearn.class)
								.debug("remove:" + model);
					} else {
						model.setQValue((int) (qValue * Constants.alpha + (1 - Constants.alpha)
								* model.getQValue()));
					}
				} catch (ReachGoalException e) {
					// TODO Auto-generated catch block
					e.printStackTrace();
					throw new AlgorithmEndException("reach the goal!");

				} catch (ErrorActionException e) {
					// TODO Auto-generated catch block
					// e.printStackTrace();
				}
			}

		}
		Logger.getLogger(Ralearn.class).info(
				"***************************************");
		Logger.getLogger(Ralearn.class).info(this.mapName2Models);

	}

	/**
	 * 获取当前动作下Q值最大的动作模型
	 * 
	 * @param action
	 * @return
	 */
	public BaseActionModelDef getActionModel(String action) {
		List<BaseActionModelDef> set = new ArrayList<BaseActionModelDef>();
		int maxQValue = -1;
		for (BaseActionModelDef def : this.mapName2Models.get(action)) {
			if (def.getQValue() > maxQValue) {
				set.clear();
				set.add(def);
				maxQValue = def.getQValue();
			} else if (def.getQValue() == maxQValue) {
				set.add(def);
			}
		}
		if (set.size() <= 0) {
			Logger.getLogger(Ralearn.class).error("error!");
		}
		return set.get(random.nextInt(set.size()));
	}

	/**
	 * 根据预期状态和实际状态获取相应的奖励
	 * 
	 * @param expectedPredicates
	 * @param observedPredicates
	 * @param currentStates
	 * @return
	 */
	public static int getReward(
			List<AbstractDomainPredicate> expectedPredicates,
			List<AbstractDomainPredicate> oldStates,
			List<AbstractDomainPredicate> observedPredicates) {
		int reward = 0;
		AbstractDomainPredicate[] predicates = observedPredicates
				.toArray(new AbstractDomainPredicate[0]);
		for (AbstractDomainPredicate predicate : expectedPredicates) {
			if (predicate.cntraPredicate(predicates))// 预期的结果和实际相反
				return -Constants.MAX_SCORE;
		}
		for (AbstractDomainPredicate predicate : expectedPredicates) {
			if (observedPredicates.contains(predicate)) {// 预期的前提效果不是效果
				if (oldStates.contains(predicate)) {
					return -Constants.MAX_SCORE;
				}
				reward += 100;
			} else if (predicate.isDeletePreidcate()) {
				BasePredicate negPredicate = predicate.negPredicate();
				if (observedPredicates.contains(negPredicate)) {// 预期需要删除的效果出现在观察中
					return -Constants.MAX_SCORE;
				} else if (oldStates.contains(negPredicate)) // 预期需要删除的效果没有发现，但是上一个状态发现了
					reward += 50;
			}
		}
		return reward;
	}

	@Override
	public String getName() {
		// TODO Auto-generated method stub
		return "Ralearn 1.0";
	}

	/*
	 * 过滤掉部分不可能的action-model
	 */
	public void innerFilter() {
		RLConfig config = BeanManager.getRLConfig();
		if (config.getFilterStrategy().equalsIgnoreCase("none"))
			return;
		for (Entry<String, Set<BaseActionModelDef>> entry : this.mapName2Models
				.entrySet()) {
			if (entry.getValue().size() > config.getStopModelCount()) {
				Set<BaseActionModelDef> list = entry.getValue();
				List<BaseActionModelDef> toRemove = new ArrayList<BaseActionModelDef>();
				if (config.getFilterStrategy().equalsIgnoreCase("least")) {
					int minQ = 1000;
					for (BaseActionModelDef def : list) {
						if (def.getQValue() < minQ)
							minQ = def.getQValue();
					}
					for (BaseActionModelDef def : list) {
						if (def.getQValue() == minQ)
							toRemove.add(def);
					}
				} else if (config.getFilterStrategy()
						.equalsIgnoreCase("middle")) {
					List<Integer> values = new ArrayList<Integer>();
					for (BaseActionModelDef def : list) {
						if (!values.contains(def.getQValue())) {
							values.add(def.getQValue());
						}
					}
					Collections.sort(values);
					int middleValue = values.get(values.size() / 2);
					for (BaseActionModelDef def : list) {
						if (def.getQValue() < middleValue)
							toRemove.add(def);
					}
				}
				list.removeAll(toRemove);
			}
		}
	}

	public void printFinalActionModel() {
		int rightModelCount = 0;
		for (Entry<String, Set<BaseActionModelDef>> entry : this.mapName2Models
				.entrySet()) {
			int maxQ = -1;
			BaseActionModelDef model = null;
			for (BaseActionModelDef def : entry.getValue()) {
				if (def.getQValue() > maxQ) {
					maxQ = def.getQValue();
					model = def;
				}
			}
			Logger.getLogger(Ralearn.class).error(
					"model count:" + entry.getValue().size() + ",result:"
							+ model);
			if (ServerManager.instance().getEnvironmentById(1).mapNameActionModelDef
					.get(entry.getKey()).toStringIngnoreQ()
					.equals(model.toStringIngnoreQ()))
				rightModelCount++;
		}
		Logger.getLogger(Ralearn.class).error(
				"rate:" + rightModelCount * 1.0f / this.mapName2Models.size());
	}

	@Override
	public void filter(IRLAgentHandler handler) {
		// TODO Auto-generated method stub
		List<String> actionSignature = new ArrayList<String>();
		actionSignature.addAll(this.mapName2Models.keySet());
		// random select an action a
		if (true) {
			List<AbstractDomainPredicate> currentStates = handler.observe();
			String actionName = actionSignature.get(random
					.nextInt(actionSignature.size()));
			Set<BaseActionModelDef> toRemoveModels = new HashSet<BaseActionModelDef>();
			for (BaseActionModelDef model : this.mapName2Models.get(actionName)) {
				Logger.getLogger(Ralearn.class.getName()).debug(model);
				Set<BaseAction> actions = this.getAllPossiableAction(model,
						currentStates);
				for (BaseAction action : actions) {
					if (BaseActionModelDef.canDo(action, model, currentStates)
							&& !handler.canDo(action.toString()))
						toRemoveModels.add(model);
					break;
				}
			}
			this.removeAllActionModels(actionName, toRemoveModels);
		}
	}

	public static void main(String[] args) {

		final Ralearn algorithm = new Ralearn();
		final RLConfig config = BeanManager.getRLConfig();
		IRLAgentHandler rlAgentHandler = (IRLAgentHandler) BaseClientSimulatorHandler
				.instance();
		algorithm.init(rlAgentHandler.init(config.getDomainFile(), ""));

		System.out.println("client is running ...");
		if (config.getAgentCount() <= 1) {
			run(algorithm, config, rlAgentHandler);
		} else {
			// BufferedReader reader = new BufferedReader(new InputStreamReader(
			// System.in));
			ExecutorService pool = Executors.newFixedThreadPool(config
					.getAgentCount());
			for (int p = 0; p < config.getAgentCount(); p++) {
				final int simulatorId = p + 1;
				pool.execute(new Runnable() {
					@Override
					public void run() {
						Ralearn.run(algorithm, config,
								new RLMultiThreadAgentHandler(1, simulatorId));
					}
				});

			}
			pool.shutdown();
			try {
				boolean loop = true;
				do { // 等待所有任务完成
					loop = !pool.awaitTermination(2, TimeUnit.SECONDS);
				} while (loop);
			} catch (InterruptedException e) {
				e.printStackTrace();
			}
		}
		algorithm.printFinalActionModel();
	}

	private static void run(final Ralearn algorithm, final RLConfig config,
			IRLAgentHandler rlAgentHandler) {
		int trainCount = 0;
		String[] problems = config.getProblemFiles().split(";");
		for (int i = 0; i < problems.length; i++) {
			int remainTime = config.getTrainTimePerProblem();
			int reachGoalCount = 0;
			while (remainTime-- > 0) {
				rlAgentHandler.nextProblem(problems[i]);
				try {
					algorithm.filter(rlAgentHandler);
					algorithm.run(rlAgentHandler);
					Thread.sleep(1000);
				} catch (InterruptedException e) {
					// TODO Auto-generated catch block
					e.printStackTrace();
				} catch (AlgorithmEndException e) {
					// TODO Auto-generated catch block
					e.printStackTrace();
					reachGoalCount++;
				}

			}
			trainCount++;
			if (trainCount > config.getFilterLimit())
				algorithm.innerFilter();
			Logger.getLogger(Ralearn.class).error(
					"total:" + config.getTrainTimePerProblem() + ",reach goal:"
							+ reachGoalCount);
		}
	}

}
