package com.bff.gaia.optimizer.traversals;

import com.esotericsoftware.minlog.Log;
import com.bff.gaia.api.common.InvalidProgramException;
import com.bff.gaia.api.common.distributions.CommonRangeBoundaries;
import com.bff.gaia.api.common.operators.UnaryOperatorInformation;
import com.bff.gaia.api.common.operators.base.GroupReduceOperatorBase;
import com.bff.gaia.api.common.operators.base.MapPartitionOperatorBase;
import com.bff.gaia.api.common.operators.util.FieldList;
import com.bff.gaia.api.common.typeinfo.TypeInformation;
import com.bff.gaia.api.common.typeutils.TypeComparatorFactory;
import com.bff.gaia.api.java.functions.SampleInCoordinator;
import com.bff.gaia.api.java.functions.SampleInPartition;
import com.bff.gaia.api.java.sampling.IntermediateSampleData;
import com.bff.gaia.api.java.typeutils.TupleTypeInfo;
import com.bff.gaia.api.java.typeutils.TypeExtractor;
import com.bff.gaia.optimizer.costs.Costs;
import com.bff.gaia.optimizer.dag.GroupReduceNode;
import com.bff.gaia.optimizer.dag.MapPartitionNode;
import com.bff.gaia.optimizer.dag.TempMode;
import com.bff.gaia.optimizer.dataproperties.GlobalProperties;
import com.bff.gaia.optimizer.dataproperties.LocalProperties;
import com.bff.gaia.optimizer.plan.*;
import com.bff.gaia.optimizer.util.Utils;
import com.bff.gaia.runtime.io.network.DataExchangeMode;
import com.bff.gaia.runtime.maqy.*;
import com.bff.gaia.runtime.operators.DriverStrategy;
import com.bff.gaia.runtime.operators.shipping.ShipStrategyType;
import com.bff.gaia.runtime.operators.udf.GetBoundaryForPartitioner;
import com.bff.gaia.util.Visitor;

import java.util.ArrayList;
import java.util.HashSet;
import java.util.List;
import java.util.Set;

/**
 * 根据带宽将数据进行分发的调整，主要针对需要shuffle的位置，在shuffle之前加入采样，并修改shuffle之后的partitioner
 *
 * 注意：为了避免重复代码，每个方法都加了一个    System.out.println();  到时候需要删掉
 */
public class BandwidthRewriter implements Visitor<PlanNode> {

	final static long SEED = 0;
	final static String AP_NAME = "BandwidthPartition: AttachLocation";
	final static String SP_NAME = "BandwidthPartition: Statistic";
	final static String SIP_NAME = "BandwidthPartition: LocalSample";
	final static String SIC_NAME = "BandwidthPartition: GlobalSample";
	final static String RB_NAME = "BandwidthPartition: Histogram";
	final static String ARI_NAME = "BandwidthPartition: PreparePartition";
	final static String PR_NAME = "BandwidthPartition: Partition";

	final static int SAMPLES_PER_PARTITION = 1000; //这里采样数目可以考虑是不是应该修改一下

	final OptimizedPlan plan;
	final Set<IterationPlanNode> visitedIterationNodes;

	public BandwidthRewriter(OptimizedPlan plan) {
		this.plan = plan;
		this.visitedIterationNodes = new HashSet<>();
	}

	@Override
	public boolean preVisit(PlanNode visitable) {
		return true;
	}

	@Override
	public void postVisit(PlanNode node) {

		if(node instanceof IterationPlanNode) {
			System.out.println();//以后要删除
			IterationPlanNode iNode = (IterationPlanNode)node;
			if(!visitedIterationNodes.contains(iNode)) {
				visitedIterationNodes.add(iNode);
				iNode.acceptForStepFunction(this);
			}
		}
		//提取当前所有的计划节点的输入通道
		final Iterable<Channel> inputChannels = node.getInputs();
		for (Channel channel : inputChannels) {//遍历输入通道
			System.out.println();//以后要删除
			ShipStrategyType shipStrategy = channel.getShipStrategy();
			// Make sure we only optimize the DAG for range partition, and do not optimize multi times.
			if (shipStrategy == ShipStrategyType.PARTITION_RANGE) {// 确保优化的通道的数据传输策略为范围分区
				System.out.println();
				if(channel.getDataDistribution() == null) {
					System.out.println();
					if (node.isOnDynamicPath()) {
						throw new InvalidProgramException("Bandwidth Partitioning not supported within iterations if users do not supply the data distribution.");
					}
					//对该通道的范围分区进行“重写”，并将当前通道从源计划节点的通道中删除，然后加入新的通道集合
					PlanNode channelSource = channel.getSource();
					List<Channel> newSourceOutputChannels = rewriteRangePartitionChannel(channel);
					//移除原来的OutgoingChannels
					channelSource.getOutgoingChannels().remove(channel);
					//添加新的OutgoingChannels
					channelSource.getOutgoingChannels().addAll(newSourceOutputChannels);
				}
			}
		}
	}

	/** 基于带宽进行任务分配的三个处理分支逻辑如下：
	 *                                                           pipelined
	 *          +--> attach location info(source parallelism) ——————————————> group by location and calculate proportions(1)
	 *          |                                                                                                        |
	 *		    |																	                                     | broadcast(proportions)
	 *		    |					   				    pipelined                     batch                              |
	 *          +--> local sample(source parallelism) ————————————> global sample(1) ———————> calculate boundaries(1) <--+
	 *          |                                                                                                 |
	 *          |                                                                                                 | broadcast(boundaries)
	 * 		    |							batch						                                          |
	 * {source} +—————————————————————————————————————————————————————> prepared partition(source parallelism) <--+
	 *                                                                                |
	 *                                                                                +—————————————————————————————————————————————————————————> {target}
	 **/
	private List<Channel> rewriteRangePartitionChannel(Channel channel) {
		//RangePartition前驱Operator的输出？
		final List<Channel> sourceNewOutputChannels = new ArrayList<>();
		//sourceNode是RangePartition的前驱Operator
		final PlanNode sourceNode = channel.getSource();
		//targetNode就是RangePartition该Operator
		final PlanNode targetNode = channel.getTarget();
		//得到并行度
		final int sourceParallelism = sourceNode.getParallelism();
		final int targetParallelism = targetNode.getParallelism();
		final Costs defaultZeroCosts = new Costs(0, 0, 0);
		final TypeComparatorFactory<?> comparator = Utils.getShipComparator(channel, this.plan.getOriginalPlan().getExecutionConfig());

		Log.info("hfy", String.format("BandwidthRewriter: source parallelism=%d, target parallelism=%d", sourceParallelism, targetParallelism));

		// -----------------------统计分支------------------------------

		// 1. Attach location info to every record.
		final AttachPartition attachPartition = new AttachPartition();
		final TypeInformation<?> sourceOutputType = sourceNode.getOptimizerNode().getOperator().getOperatorInfo().getOutputType();
		final TypeInformation<?> apTypeInformation = new TupleTypeInfo<>(TypeExtractor.getForClass(HostInfo.class), TypeExtractor.getForClass(Long.class));
		final UnaryOperatorInformation apOperatorInformation = new UnaryOperatorInformation(sourceOutputType, apTypeInformation);
		final MapPartitionOperatorBase apOperatorBase = new MapPartitionOperatorBase(attachPartition, apOperatorInformation, AP_NAME);
		final MapPartitionNode apNode = new MapPartitionNode(apOperatorBase);
		final Channel apChannel = new Channel(sourceNode, TempMode.NONE);
		apChannel.setShipStrategy(ShipStrategyType.FORWARD, DataExchangeMode.PIPELINED);
		final SingleInputPlanNode apPlanNode = new SingleInputPlanNode(apNode, AP_NAME, apChannel, DriverStrategy.MAP_PARTITION);
		apNode.setParallelism(sourceParallelism);
		apPlanNode.setParallelism(sourceParallelism);
		apPlanNode.initProperties(new GlobalProperties(), new LocalProperties());
		apPlanNode.setCosts(defaultZeroCosts);
		apChannel.setTarget(apPlanNode);
		this.plan.getAllNodes().add(apPlanNode);
		sourceNewOutputChannels.add(apChannel);

		// 2. count amount of data in each pipeline
		final StatisticPartition statisticPartition = new StatisticPartition();
		final TypeInformation<?> spTypeInformation = TypeExtractor.getForClass(ArrayList.class);
		final UnaryOperatorInformation spOperatorInformation = new UnaryOperatorInformation(apTypeInformation, spTypeInformation);
		final MapPartitionOperatorBase spOperatorBase = new MapPartitionOperatorBase(statisticPartition, spOperatorInformation, SP_NAME);
		final MapPartitionNode spNode = new MapPartitionNode(spOperatorBase);
		final Channel spChannel = new Channel(apPlanNode, TempMode.NONE);
		spChannel.setShipStrategy(ShipStrategyType.FORWARD, DataExchangeMode.PIPELINED);
		final SingleInputPlanNode spPlanNode = new SingleInputPlanNode(spNode, SP_NAME, spChannel, DriverStrategy.MAP_PARTITION);
		spNode.setParallelism(1);
		spPlanNode.setParallelism(1);
		spPlanNode.initProperties(new GlobalProperties(), new LocalProperties());
		spPlanNode.setCosts(defaultZeroCosts);
		spChannel.setTarget(spPlanNode);
		apPlanNode.addOutgoingChannel(spChannel);
		this.plan.getAllNodes().add(spPlanNode);

		// -----------------------采样分支------------------------------

		// 1. Fixed size sample in each partitions.
		final int sampleSize = SAMPLES_PER_PARTITION * targetParallelism;
		//SampleInPartition继承自RichMapPartitionFunction，用于各分区的采样,SampleInPartition在Gaia-core中，RichMapPartitionFunction则在gaia-runtime中
		final SampleInPartition sampleInPartition = new SampleInPartition(false, sampleSize, SEED);
		//IntermediateSampleData中存储了元素值和权重weight
		final TypeInformation<IntermediateSampleData> isdTypeInformation = TypeExtractor.getForClass(IntermediateSampleData.class);
		final UnaryOperatorInformation sipOperatorInformation = new UnaryOperatorInformation(sourceOutputType, isdTypeInformation);
		//MapPartitionOperatorBase是gaia-core中的类
		final MapPartitionOperatorBase sipOperatorBase = new MapPartitionOperatorBase(sampleInPartition, sipOperatorInformation, SIP_NAME);
		//采样的Map节点，MapPartitionNode是gaia-optimizer中dag包的类，其中只有DagConnection
		final MapPartitionNode sipNode = new MapPartitionNode(sipOperatorBase);
		//新建一个Channel，该Channel的source是RangePartition的前驱Operator
		final Channel sipChannel = new Channel(sourceNode, TempMode.NONE);
		sipChannel.setShipStrategy(ShipStrategyType.FORWARD, DataExchangeMode.PIPELINED);
		//SingleInputPlanNode也是gaia-optimizer中plan包的类，其中多了Channel input 的信息，应该是用在OptimizedPlan中的
		final SingleInputPlanNode sipPlanNode = new SingleInputPlanNode(sipNode, SIP_NAME, sipChannel, DriverStrategy.MAP_PARTITION);
		sipNode.setParallelism(sourceParallelism);
		sipPlanNode.setParallelism(sourceParallelism);
		sipPlanNode.initProperties(new GlobalProperties(), new LocalProperties());
		sipPlanNode.setCosts(defaultZeroCosts);
		//设置新加的Channel的target节点为新创建的采样节点
		sipChannel.setTarget(sipPlanNode);
		this.plan.getAllNodes().add(sipPlanNode);
		sourceNewOutputChannels.add(sipChannel);

		// 2. Fixed size sample in a single coordinator.
		//SampleInCoordinator实现了GroupReduceFunction,用于将各分区的采样混合？
		final SampleInCoordinator sampleInCoordinator = new SampleInCoordinator(false, sampleSize, SEED);
		final UnaryOperatorInformation sicOperatorInformation = new UnaryOperatorInformation(isdTypeInformation, sourceOutputType);
		//GroupReduceOperatorBase是gaia-core中的
		final GroupReduceOperatorBase sicOperatorBase = new GroupReduceOperatorBase(sampleInCoordinator, sicOperatorInformation, SIC_NAME);
		//Reduce节点，GroupReduceNode-optimizer中dag的类，其中有DagConnection
		final GroupReduceNode sicNode = new GroupReduceNode(sicOperatorBase);
		//初始化一个新的Channel，其Source是1.中的Map算子
		final Channel sicChannel = new Channel(sipPlanNode, TempMode.NONE);
		sicChannel.setShipStrategy(ShipStrategyType.FORWARD, DataExchangeMode.PIPELINED);
		//同理，根据sicNode和Channel信息构建PlanNode，是gaia-optimizer中plan中的类
		final SingleInputPlanNode sicPlanNode = new SingleInputPlanNode(sicNode, SIC_NAME, sicChannel, DriverStrategy.ALL_GROUP_REDUCE);
		//注意这里的并行度需要设置为1，因为要将各节点的样本根据权值进行聚合到一起，相当于中心Coordinate的作用
		sicNode.setParallelism(1);
		sicPlanNode.setParallelism(1);
		sicPlanNode.initProperties(new GlobalProperties(), new LocalProperties());
		sicPlanNode.setCosts(defaultZeroCosts);
		//设置Channel的Target
		sicChannel.setTarget(sicPlanNode);
		sipPlanNode.addOutgoingChannel(sicChannel);
		this.plan.getAllNodes().add(sicPlanNode);

		// 3. Use sampled data to build range boundaries.
		//RangeBoundaryBuilder实现了RichMapPartitionFunction，用于计算各个分段的界    注意这里修改了
		final PercentRangeBoundaryBuilder rangeBoundaryBuilder = new PercentRangeBoundaryBuilder(comparator, targetParallelism);
		final TypeInformation<CommonRangeBoundaries> rbTypeInformation = TypeExtractor.getForClass(CommonRangeBoundaries.class);
		final UnaryOperatorInformation rbOperatorInformation = new UnaryOperatorInformation(sourceOutputType, rbTypeInformation);
		//MapPartitionOperatorBase是gaia-core中的类
		final MapPartitionOperatorBase rbOperatorBase = new MapPartitionOperatorBase(rangeBoundaryBuilder, rbOperatorInformation, RB_NAME);
		//Map节点，MapPartitionNode是gaia-optimizer中dag的类，其中只有DagConnection
		final MapPartitionNode rbNode = new MapPartitionNode(rbOperatorBase);
		//创建以2.中reduce的节点为Source的Channel
		final Channel rbChannel = new Channel(sicPlanNode, TempMode.NONE);
		rbChannel.setShipStrategy(ShipStrategyType.FORWARD, DataExchangeMode.BATCH);
		//创建PlanNode，gaia-optimizer中plan中的类
		final SingleInputPlanNode rbPlanNode = new SingleInputPlanNode(rbNode, RB_NAME, rbChannel, DriverStrategy.MAP_PARTITION);
		rbNode.setParallelism(1);
		rbPlanNode.setParallelism(1);
		rbPlanNode.initProperties(new GlobalProperties(), new LocalProperties());
		rbPlanNode.setCosts(defaultZeroCosts);
		rbChannel.setTarget(rbPlanNode);
		sicPlanNode.addOutgoingChannel(rbChannel);
		this.plan.getAllNodes().add(rbPlanNode);

		// 创建接收广播变量的边
		final NamedChannel rbBroadcastChannel = new NamedChannel("Proportions", spPlanNode);
		rbBroadcastChannel.setShipStrategy(ShipStrategyType.BROADCAST, DataExchangeMode.PIPELINED);
		rbBroadcastChannel.setTarget(rbPlanNode);
		List<NamedChannel> rbBroadcastChannels = new ArrayList<>(1);
		rbBroadcastChannels.add(rbBroadcastChannel);
		rbPlanNode.setBroadcastInputs(rbBroadcastChannels);


		// ------------------数据处理分支（主分支）---------------------

		// 1. Take range boundaries as broadcast input and take the tuple of partition id and record as output.
		//AssignRangeIndex实现了RichMapPartitionFunction，利用了boundary来得到每个记录的输出
		final GetBoundaryForPartitioner getBoundaryForPartitioner = new GetBoundaryForPartitioner();
		final UnaryOperatorInformation ariOperatorInformation = new UnaryOperatorInformation(sourceOutputType, sourceOutputType);
		//MapPartitionOperatorBase是gaia-core中的类
		final MapPartitionOperatorBase ariOperatorBase = new MapPartitionOperatorBase(getBoundaryForPartitioner, ariOperatorInformation, ARI_NAME);
		//Map节点，MapPartitionNode是gaia-optimizer中dag的类，其中只有DagConnection
		final MapPartitionNode ariNode = new MapPartitionNode(ariOperatorBase);
		//创建以RangePartition的前驱Operator为Source的Channel
		final Channel ariChannel = new Channel(sourceNode, TempMode.NONE);
		// To avoid deadlock, set the DataExchangeMode of channel between source node and this to Batch.
		//为了防止死锁，将Channel的DataExchangeMode设置为Batch
		ariChannel.setShipStrategy(ShipStrategyType.FORWARD, DataExchangeMode.BATCH);
		//创建PlanNode，gaia-optimizer中plan中的类
		final SingleInputPlanNode ariPlanNode = new SingleInputPlanNode(ariNode, ARI_NAME, ariChannel, DriverStrategy.MAP_PARTITION);
		ariNode.setParallelism(sourceParallelism);
		ariPlanNode.setParallelism(sourceParallelism);
		ariPlanNode.initProperties(new GlobalProperties(), new LocalProperties());
		ariPlanNode.setCosts(defaultZeroCosts);
		//将创建的Channel的Target指向新创建的Map Operator
		ariChannel.setTarget(ariPlanNode);
		this.plan.getAllNodes().add(ariPlanNode);
		//将新创建的Channel添加到RangePartition的前一个Operator的sourceNewOutputChannels中
		sourceNewOutputChannels.add(ariChannel);

		//计算得到的boundaries会被输出到广播通道，rbPlanNode即Channel的sourceNode，为step3创建的map节点，用于计算各个分段的界的
		final NamedChannel ariBroadcastChannel = new NamedChannel("RangeBoundaries", rbPlanNode);
		ariBroadcastChannel.setShipStrategy(ShipStrategyType.BROADCAST, DataExchangeMode.PIPELINED);
		//将Channel的Target指向step4创建的Map Operator，用于得到每个记录属于哪个分区的
		ariBroadcastChannel.setTarget(ariPlanNode);
		List<NamedChannel> ariBroadcastChannels = new ArrayList<>(1);
		ariBroadcastChannels.add(ariBroadcastChannel);
		//broadcastChannels的source是计算各个分段界的rbPlanNode，target是根据界将各个记录分离的ariPlanNode
		//这里将broadcastChannels添加给ariPlanNode
		ariPlanNode.setBroadcastInputs(ariBroadcastChannels);

		BoundaryPartitioner boundaryPartitioner = new BoundaryPartitioner(comparator.createComparator());
		final FieldList keys = channel.getShipStrategyKeys();
//		final FieldList keys = new FieldList(0);
		//		partChannel.setShipStrategy(ShipStrategyType.PARTITION_CUSTOM, keys, boundaryPartitioner, DataExchangeMode.PIPELINED);
//		ariPlanNode.addOutgoingChannel(partChannel);
		channel.setSource(ariPlanNode);
		channel.setShipStrategy(ShipStrategyType.PARTITION_CUSTOM, keys, boundaryPartitioner, DataExchangeMode.PIPELINED);
		ariPlanNode.addOutgoingChannel(channel);

		return sourceNewOutputChannels;
	}

}
