package com.surfilter.massdata.spark.task.ipcheck;

import java.util.ArrayList;
import java.util.List;
import java.util.Map;

import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.spark.api.java.JavaRDD;
import org.apache.spark.api.java.function.VoidFunction;
import org.apache.spark.sql.DataFrame;
import org.apache.spark.sql.SQLContext;

import com.act.sparkanalyz.service.impl.SparkService.OutQueueEntity;
import com.act.sparkanalyz.task.ISparkTask;
import com.surfilter.massdata.spark.bean.ipcheck.IpBean;
import com.surfilter.massdata.spark.util.ipcheck.IpCheckAnalyz;
import com.surfilter.massdata.spark.util.ipcheck.IpCheckSync;
import com.surfilter.massdata.spark.util.ipcheck.IpCheckUploadVsActiveIp;
import com.surfilter.massdata.spark.util.ipcheck.IpCheckUtil;

/** 
* @author  hutao
* @date 创建时间：2017年6月5日 下午2:57:37 
* @Description：ipCheck过程业务处理
* @version 1.0 
* 
**/
public class IpCheckProcessTask implements ISparkTask {
	private static final Log log = LogFactory.getLog(IpCheckProcessTask.class);
	private static final long serialVersionUID = 1L;
	private String source_data;
	private String allocate_data;
	private String dfKey1;
	private String dfKey2;
	private String dfKey3;
	private String dfKey4;
	private String dfKey5;
	
	@Override
	public List<OutQueueEntity> execute(Map<String, DataFrame> dataFrames, Map<String, String> commandMap) {
		// TODO Auto-generated method stub
		log.info("==============IpCheckProcessTask begin====================");
		List<OutQueueEntity> outList = new ArrayList<OutQueueEntity>();
		try{
			IpCheckUtil.initPartition("BR7003");
			IpCheckUtil.initPartition("BR7004");
			IpCheckUtil.truncateTable("BR7003");
			IpCheckUtil.truncateTable("BR7004");
			IpCheckUtil.truncateTable("BR7006");
			IpCheckUtil.truncateTable("BR7007");
			DataFrame beianSourceDF = dataFrames.get(dfKey1);//备案来源库
			DataFrame beianAllocateDF = dataFrames.get(dfKey2);//备案分配库
			DataFrame uploadSourceDF = dataFrames.get(dfKey3);//上报来源库
			DataFrame uploadAllocateDF = dataFrames.get(dfKey4);//上报分配库
//			DataFrame activeIpDF = dataFrames.get(dfKey5);//活跃IP数据
			SQLContext sc=beianSourceDF.sqlContext();
			JavaRDD<IpBean> activeIpRdd=null;
//			if(null!=activeIpDF){
//				activeIpRdd=IpCheckUtil.getIpBeanRdd(activeIpDF);//活跃IP RDD
//			}
			
			JavaRDD<IpBean> beianSourceRdd=IpCheckUtil.getIpBeanRdd(beianSourceDF);//备案来源RDD
			JavaRDD<IpBean> rightBeianSourceRdd=IpCheckUtil.getFilterIpBeanRdd(beianSourceRdd, true);//获取备案来源正确的RDD
			JavaRDD<IpBean> errBeianSourceRdd=IpCheckUtil.getFilterIpBeanRdd(beianSourceRdd, false);//获取备案来源错误的RDD
			
			JavaRDD<IpBean> beianAllocateRdd=IpCheckUtil.getIpBeanRdd(beianAllocateDF);//备案分配RDD
//			JavaRDD<IpBean> rightBeianAllocateRdd=IpCheckUtil.getFilterIpBeanRdd(beianAllocateRdd, true);
			JavaRDD<IpBean> rightBeianAllocateRdd=IpCheckUtil.getFilterIpBeanRdd(beianAllocateRdd, true).repartition(20);//获取备案分配正确的RDD
			JavaRDD<IpBean> errBeianAllocateRdd=IpCheckUtil.getFilterIpBeanRdd(beianAllocateRdd, false);//获取备案分配错误的RDD
			
			System.out.println("备案来源表分区数为>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>"+rightBeianSourceRdd.getNumPartitions());
			System.out.println("备案分配表分区数为>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>"+rightBeianAllocateRdd.getNumPartitions());
			
			JavaRDD<IpBean> uploadSourceRdd=IpCheckUtil.getIpBeanRdd(uploadSourceDF);//上报来源RDD
			JavaRDD<IpBean> righUploadSourceRdd=IpCheckUtil.getFilterIpBeanRdd(uploadSourceRdd, true);//获取上报来源正确的RDD
			JavaRDD<IpBean> errUploadSourceRdd=IpCheckUtil.getFilterIpBeanRdd(uploadSourceRdd, false);//获取上报来源错误的RDD
//			
			JavaRDD<IpBean> uploadAllcoateRdd=IpCheckUtil.getIpBeanRdd(uploadAllocateDF);//上报分配RDD
//			JavaRDD<IpBean> rightUploadAllocateRdd=IpCheckUtil.getFilterIpBeanRdd(uploadAllcoateRdd, true);
			JavaRDD<IpBean> rightUploadAllocateRdd=IpCheckUtil.getFilterIpBeanRdd(uploadAllcoateRdd, true).repartition(20);//获取上报分配正确的RDD
			JavaRDD<IpBean> errUploadAllocateRdd=IpCheckUtil.getFilterIpBeanRdd(uploadAllcoateRdd, false);//获取上报分配错误的RDD
			
			System.out.println("上报来源表分区数为>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>"+righUploadSourceRdd.getNumPartitions());
			System.out.println("上报分配表分区数为>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>"+rightUploadAllocateRdd.getNumPartitions());
			
			
			
//			JavaRDD<IpBean> allRightIpBeanRdd=rightBeianSourceRdd.union(rightBeianAllocateRdd).union(righUploadSourceRdd).union(rightUploadAllocateRdd);
//			System.err.println("总共正确记录>>>>>>>>>>>>>>>"+allRightIpBeanRdd.count());
			
//			########################IP检车错误数据入库冲突表开始##################################################
			JavaRDD<IpBean> allErrorIpBeanRdd=errBeianSourceRdd.union(errBeianAllocateRdd).union(errUploadSourceRdd).union(errUploadAllocateRdd);
			
			System.out.println("IP监测校验错误RDD分区数为[allErrorIpBeanRdd]>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>"+allErrorIpBeanRdd.getNumPartitions());
			
			DataFrame errDF = sc.createDataFrame(allErrorIpBeanRdd, IpBean.class);
//			System.err.println("errCount ip validate>>"+errDF.count());
			IpCheckUtil.writeValidateConflictToTable(errDF);//将错误IP写入数据库中
//			#########################IP检车错误数据入库冲突表结束#################################################	
			
			
			//经过备案库  来源冲突比对、分配冲突比对、扫描来源库，参照分配库、扫描分配库，参照来源库后正确的数据
			JavaRDD<IpBean> beianRdd=IpCheckAnalyz.dealBusiness(rightBeianSourceRdd.union(rightBeianAllocateRdd),sc);//处理备案库正确RDD
			System.out.println("ipCheckAnalyz后备案库RDD分区数为[beianRdd]>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>"+beianRdd.getNumPartitions());
			//经过分配库  来源冲突比对、分配冲突比对、扫描来源库，参照分配库、扫描分配库，参照来源库后正确的数据
			JavaRDD<IpBean> uploadRdd=IpCheckAnalyz.dealBusiness(righUploadSourceRdd.union(rightUploadAllocateRdd),sc);//处理上报库正确RDD
			System.out.println("ipCheckAnalyz后上报库RDD分区数为[uploadRdd]>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>"+uploadRdd.getNumPartitions());
			
//			JavaRDD<IpBean> testRDD=beianRdd.union(uploadRdd);
//			System.out.println(testRDD.count());
			
//			testRDD.foreach(new VoidFunction<IpBean>() {
//				
//				@Override
//				public void call(IpBean t) throws Exception {
//					System.out.println("msg>>>"+t.getConflictMsg());
//				}
//			});
			
//			System.err.println("beianRdd>>"+beianRdd.count());
//			System.err.println("uploadRdd>>"+uploadRdd.count());
//			备案来源库vs上报来源库、备案分配库vs上报分配库、上报来源库vs备案来源库、上报分配库vs备案分配库比对后正确是数据
			System.err.println("同步数据前数据总数为>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>"+beianRdd.union(uploadRdd).count());
			JavaRDD<IpBean> syncRdd=IpCheckSync.dealBusiness(beianRdd.union(uploadRdd),sc);//将备案库、上报库进行同步流程
			System.out.println("IpCheckSync后RDD分区数为[syncRdd]>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>"+syncRdd.getNumPartitions());
			System.err.println("syncRdd>>>>>>>>>>>>>>>>>>>>>>>>>>"+syncRdd.count());
			//过滤出同步后的来源库、分配库
			JavaRDD<IpBean> sourceRdd=IpCheckUtil.getFilterUploadSourceIpBeanRdd(syncRdd);//获取来源库
			JavaRDD<IpBean> allocateRdd=IpCheckUtil.getFilterUploadAllocateIpBeanRdd(syncRdd);//获取分配库
			System.out.println("IpCheckSync后RDD分区数为[sourceRdd]>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>"+sourceRdd.getNumPartitions());
			System.out.println("IpCheckSync后RDD分区数为[allocateRdd]>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>"+allocateRdd.getNumPartitions());
			
			System.out.println("IpCheckSync后RDD记录数为[sourceRdd]>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>"+sourceRdd.count());
			System.out.println("IpCheckSync后RDD记录数为[allocateRdd]>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>"+allocateRdd.count());
			DataFrame sourceDF = beianSourceDF.sqlContext().createDataFrame(sourceRdd, IpBean.class);
			DataFrame allocateDF = beianSourceDF.sqlContext().createDataFrame(allocateRdd, IpBean.class);
			outList.add(new OutQueueEntity(source_data, sourceDF));
			outList.add(new OutQueueEntity(allocate_data, allocateDF));
			
			//上报库VS活跃IP
//			if(null!=activeIpRdd){
//				//如果存在活跃IP数据
//				IpCheckUploadVsActiveIp.dealBusiness(sourceRdd.union(allocateRdd), sc);
//			}
//			JavaRDD<IpBean> beianRdd=IpCheckUtil.getFilterUploadIpBeanRdd(syncRdd);//通过过滤获取备案库
			log.info("==============IpCheckProcessTask end====================");
//			IpCheckUtil.writeConflictToTable(rightDF,ConflictType.IP_SEG_ERROR.getTypeValue());
			
//			MultipleOutputs<Text, NullWritable> mos = null;
//			mos.write(namedOutput, key, value, baseOutputPath);
		}catch(Exception e){
			if(log.isErrorEnabled()){
				log.error(e.getMessage(),e);
			}
		}
	
		return outList;
	}
	

}
