package hit.edu.zjc.DataIntegration;
import hit.edu.zjc.Operator.Build2GramMapOperatorDescriptor;
import hit.edu.zjc.Operator.FrameFileScanOperatorDescriptor;
import hit.edu.zjc.Operator.SourceOperatorDescriptor;
import hit.edu.zjc.Operator.TestOperatorDescriptor;
import hit.edu.zjc.Tool.DataBaseTool;
import hit.edu.zjc.Tool.HyracksJobEx;
import hit.edu.zjc.marshalling.ByteSerializerDeserializer;
import hit.edu.zjc.parsers.ByteParserFactory;
import hit.edu.zjc.parsers.ByteTupleParserFactory;
import hit.edu.zjc.DataClean.DataCleaner;
import java.io.*;
import java.sql.*;
import java.net.*;
import edu.uci.ics.hyracks.api.constraints.PartitionConstraintHelper;
import edu.uci.ics.hyracks.api.dataflow.IConnectorDescriptor;
import edu.uci.ics.hyracks.api.dataflow.IOperatorDescriptor;
import edu.uci.ics.hyracks.api.dataflow.value.IBinaryComparatorFactory;
import edu.uci.ics.hyracks.api.dataflow.value.IBinaryHashFunctionFactory;
import edu.uci.ics.hyracks.api.dataflow.value.ISerializerDeserializer;
import edu.uci.ics.hyracks.api.dataflow.value.RecordDescriptor;
import edu.uci.ics.hyracks.api.io.FileReference;
import edu.uci.ics.hyracks.api.job.JobSpecification;
import edu.uci.ics.hyracks.data.std.accessors.PointableBinaryComparatorFactory;
import edu.uci.ics.hyracks.data.std.accessors.PointableBinaryHashFunctionFactory;
import edu.uci.ics.hyracks.data.std.primitive.IntegerPointable;
import edu.uci.ics.hyracks.data.std.primitive.UTF8StringPointable;
import edu.uci.ics.hyracks.data.std.primitive.LongPointable;
import edu.uci.ics.hyracks.dataflow.common.data.marshalling.IntegerSerializerDeserializer;
import edu.uci.ics.hyracks.dataflow.common.data.marshalling.UTF8StringSerializerDeserializer;
import edu.uci.ics.hyracks.dataflow.common.data.parsers.IValueParserFactory;
import edu.uci.ics.hyracks.dataflow.common.data.parsers.IntegerParserFactory;
import edu.uci.ics.hyracks.dataflow.common.data.parsers.UTF8StringParserFactory;
import edu.uci.ics.hyracks.dataflow.common.data.partition.FieldHashPartitionComputerFactory;
import edu.uci.ics.hyracks.dataflow.std.connectors.MToNPartitioningConnectorDescriptor;
import edu.uci.ics.hyracks.dataflow.std.connectors.MToNPartitioningMergingConnectorDescriptor;
import edu.uci.ics.hyracks.dataflow.std.connectors.OneToOneConnectorDescriptor;
import edu.uci.ics.hyracks.dataflow.std.file.ConstantFileSplitProvider;
import edu.uci.ics.hyracks.dataflow.std.file.DelimitedDataTupleParserFactory;
import edu.uci.ics.hyracks.dataflow.std.file.FileScanOperatorDescriptor;
import edu.uci.ics.hyracks.dataflow.std.file.FileSplit;
import edu.uci.ics.hyracks.dataflow.std.file.FrameFileWriterOperatorDescriptor;
import edu.uci.ics.hyracks.dataflow.std.file.IFileSplitProvider;
import edu.uci.ics.hyracks.dataflow.std.file.LineFileWriteOperatorDescriptor;
import edu.uci.ics.hyracks.dataflow.std.file.PlainFileWriterOperatorDescriptor;
import edu.uci.ics.hyracks.dataflow.std.file.RecordFileScanOperatorDescriptor;
import edu.uci.ics.hyracks.dataflow.std.misc.SplitOperatorDescriptor;
import edu.uci.ics.hyracks.dataflow.std.sort.ExternalSortOperatorDescriptor;


public class DataInteDis//将数据集成，并通过Hyracks分布出去 
{
	public static final String DataDir="/home/chinahitzjc/Hyracks_Data";
	
	public static final String filename="DBData.txt";//包含完整数据的文件名称
	public static final String split_filename="sp_Data.txt";//分解后到文件名称
	public static final String SumValid_filename="CompletedValidData.txt";//处理异常值、值缺失后的数据汇总文件
	public static int Col_Number;//数据列数+2
	public static void validateDataDir() //获取Dir，验证文件夹是否存在，建立文件夹
	{
		//Get Dir
		File datadir=new File(DataDir);
		if(datadir.exists())
		{
			if(datadir.isFile())
			{
				datadir.delete();
				datadir.mkdirs();
			}
		}
		else
			datadir.mkdirs();
	}
	
	
	/*
	 * 在原来记录的内容上，插入新的第一列和最后一列
	 * 第一列为long型的新主键，防止数据集成后的主键冲突问题。
	 * 最后一列为数据的原属数据库，格式为IP地址：端口号。
	 * */
	public static boolean DataIntergrate()
	{
		try
		{
			validateDataDir();
			long New_id=1;
			File outfile=new File(DataDir+"/"+filename);
			FileWriter fw=new FileWriter(outfile);
			DBConInfo[] dbci_array=DBConInfo.GetConInfo();//进行验证数据库，测试数据库连接的操作
			ResultSet rs=null;
			for(int i=0;i<dbci_array.length;i++)
			{
				System.out.println("downloading the "+(i+1)+"th database");
				DataBaseTool dbt=new DataBaseTool(dbci_array[i]);
				if(dbt.connected())
				{
				   rs=dbt.exesql("select * from "+DBConInfo.TableName);
				   int colnum=rs.getMetaData().getColumnCount();	
				   Col_Number=colnum;
				   while(rs.next())
				   {
					  fw.write(New_id+"|");//加入新的第一列。
					  New_id++;
					  for(int j=1;j<=colnum;j++)
					   {
						  //fw.write(rs.getString(j)+"|");
						  String datastr=rs.getString(j);
						  if(datastr!=null)
						  {
						  datastr.replaceAll(System.getProperty("line.separator"), "");
						  datastr.replaceAll("|", "");//避免出错，因为系统默认使用 | 来进行列分割
						  fw.write(datastr.trim());
						  }
						  fw.write("|");
					   }
					  fw.write(dbci_array[i].dbip+":"+dbci_array[i].dbport+"|");//加入新的最后一列。用于表示数据的来源。
					  fw.write(System.getProperty("line.separator"));
				   }
				}		
			}
			Col_Number=Col_Number+2;//新加入了两列。
			fw.flush();
			fw.close();
			return true;
		}
		catch(Exception e)
		{
			e.printStackTrace();
			return false;
		}
		
	}
	
	/*
	 * 系统默认数据库下载的文件存在第一个节点上，即NC1上。
	 * 将NC1上的总文件进行分割，并分发到其他NC上。
	 * nc[0] filename split_filename
	 * */
	public static boolean DataDispath(String NC,String sourcefilename,String targetfilename)
	{
		 JobSpecification spec = new JobSpecification();
        HyracksJobEx.initNC();
        IFileSplitProvider splitProvider = new ConstantFileSplitProvider(new FileSplit[] {
                new FileSplit(NC, new FileReference(new File(DataDir,sourcefilename))) });
        ISerializerDeserializer[] isd=new ISerializerDeserializer[Col_Number];
        for(int i=0;i<Col_Number;i++)
            isd[i]=UTF8StringSerializerDeserializer.INSTANCE;
        RecordDescriptor desc = new RecordDescriptor(isd);
        IValueParserFactory[] ipf=new IValueParserFactory[Col_Number];
        for(int i=0;i<Col_Number;i++)
        	 ipf[i]=UTF8StringParserFactory.INSTANCE;
        FileScanOperatorDescriptor Scanner = new FileScanOperatorDescriptor(
                spec,
                splitProvider,
                new DelimitedDataTupleParserFactory(ipf, '|'),
                desc);
        PartitionConstraintHelper.addAbsoluteLocationConstraint(spec, Scanner,NC);

        FileSplit[] fs=new FileSplit[HyracksJobEx.NC_ID_array.length];
        for(int i=0;i<HyracksJobEx.NC_ID_array.length;i++)
         {
        	//File file=new File();
        	fs[i]=new FileSplit(HyracksJobEx.NC_ID_array[i],new FileReference(new File(DataDir,targetfilename)));
         }
        IFileSplitProvider outSplits = new ConstantFileSplitProvider(fs);
        IOperatorDescriptor printer = new PlainFileWriterOperatorDescriptor(spec, outSplits, "|");
        PartitionConstraintHelper.addAbsoluteLocationConstraint(spec, printer,HyracksJobEx.NC_ID_array);

       
        int[] key=new int[]{0};//将新的第一列的ID作为Hash的主键。
        IBinaryHashFunctionFactory[] ibhff_array=new IBinaryHashFunctionFactory[]{PointableBinaryHashFunctionFactory.of(UTF8StringPointable.FACTORY)};
        /*
        for(int i=0;i<DBConInfo.Primary_Key.length;i++)//分组时，将所有类型都按字符串处理了
         {
        	ibhff_array[i]=PointableBinaryHashFunctionFactory.of(UTF8StringPointable.FACTORY);
        	key[i]=DBConInfo.Primary_Key[i]-1;
         }*/
        
        IConnectorDescriptor conn = new MToNPartitioningConnectorDescriptor(spec,
                new FieldHashPartitionComputerFactory(key,ibhff_array));

        
        
        spec.connect(conn, Scanner, 0, printer, 0);
        spec.addRoot(printer);
        return HyracksJobEx.exe_job(spec, "DataDispatchJob");//
	}
	
	public static void main(String[] args)
	{
		//boolean res=
		DataIntergrate();
		//if(res)
		//{
			//res=
	   DataDispath(HyracksJobEx.NC_ID_array[0],filename,split_filename);//数据分解
	   DataSplit();//数据分发
	   //test();
			//if(res)System.out.println("suc!");
			
		//}
		//DataInteDis did=new DataInteDis();
		//did.findclass();
		//File file=new File("/home/chinahitzjc/applib.zip");
		//if(file.exists())
		//	System.out.println("yeah");
	}

	
	/*
	 * 将数据文件完整的分发到每个NC上。
	 * */
	public static void DataSplit()  {
		     HyracksJobEx.initNC();
	        final int outputArity = HyracksJobEx.NC_ID_array.length-1;
	        JobSpecification spec = new JobSpecification();

	        File[] outputFile = new File[outputArity];
	        for (int i = 0; i < outputArity; i++) {
	            outputFile[i] = new File(DataDir,SumValid_filename);
	         }
	        IFileSplitProvider splitProvider = new ConstantFileSplitProvider(new FileSplit[] {
	                new FileSplit(HyracksJobEx.NC_ID_array[0], new FileReference(new File(DataDir,SumValid_filename))) });//默认在NC1上执行。
	        ISerializerDeserializer[] isd=new ISerializerDeserializer[Col_Number];
	        for(int i=0;i<Col_Number;i++)
	            isd[i]=UTF8StringSerializerDeserializer.INSTANCE;
	        RecordDescriptor desc = new RecordDescriptor(isd);
	        IValueParserFactory[] ipf=new IValueParserFactory[Col_Number];
	        for(int i=0;i<Col_Number;i++)
	        	 ipf[i]=UTF8StringParserFactory.INSTANCE;
	        FileScanOperatorDescriptor Scanner = new FileScanOperatorDescriptor(
	                spec,
	                splitProvider,
	                new DelimitedDataTupleParserFactory(ipf, '|'),
	                desc);

	        PartitionConstraintHelper.addAbsoluteLocationConstraint(spec, Scanner, HyracksJobEx.NC_ID_array[0]);

	        SplitOperatorDescriptor splitOp = new SplitOperatorDescriptor(spec, desc, outputArity);
	        PartitionConstraintHelper.addAbsoluteLocationConstraint(spec, splitOp, HyracksJobEx.NC_ID_array[0]);

	        IOperatorDescriptor outputOp[] = new IOperatorDescriptor[outputFile.length];
	        for (int i = 0; i < outputArity; i++) {
	            //outputOp[i] = new LineFileWriteOperatorDescriptor(spec, new FileSplit[] { new FileSplit(HyracksJobEx.NC_ID_array[i+1],//locations[i]
	            //        outputFile[i].getAbsolutePath()) });
	            outputOp[i] = new PlainFileWriterOperatorDescriptor(spec, new ConstantFileSplitProvider(new FileSplit[] { new FileSplit(HyracksJobEx.NC_ID_array[i+1],//locations[i]
	                    outputFile[i].getAbsolutePath()) }), "|");
	            PartitionConstraintHelper.addAbsoluteLocationConstraint(spec, outputOp[i], HyracksJobEx.NC_ID_array[i+1]);
	        }

	        spec.connect(new OneToOneConnectorDescriptor(spec), Scanner, 0, splitOp, 0);
	        for (int i = 0; i < outputArity; i++) {
	            spec.connect(new OneToOneConnectorDescriptor(spec), splitOp, i, outputOp[i], 0);
	        }

	        for (int i = 0; i < outputArity; i++) {
	            spec.addRoot(outputOp[i]);
	        }
	       HyracksJobEx.exe_job(spec, "DataSplitJob");

	    }
	
	/*
	 * 将各个主机的数据进行汇总
	 * 数据是无序的。
	 * */
	public static void ValidDataCollect()
	{
		JobSpecification spec = new JobSpecification();
		HyracksJobEx.initNC();
		final int NodeNumber=HyracksJobEx.NC_ID_array.length;
		FileSplit[] inputFiles=new FileSplit[NodeNumber];
		for(int i=0;i<NodeNumber;i++)
			inputFiles[i]=new FileSplit(HyracksJobEx.NC_ID_array[i],new FileReference(new File(DataDir,DataCleaner.ValueValidFileName)));
		
		IFileSplitProvider splitProvider = new ConstantFileSplitProvider(inputFiles);
       ISerializerDeserializer[] isd=new ISerializerDeserializer[Col_Number];
       for(int i=0;i<Col_Number;i++)
           isd[i]=UTF8StringSerializerDeserializer.INSTANCE;
        RecordDescriptor desc = new RecordDescriptor(isd);
        IValueParserFactory[] ipf=new IValueParserFactory[Col_Number];
        for(int i=0;i<Col_Number;i++)
        	 ipf[i]=UTF8StringParserFactory.INSTANCE;
        FileScanOperatorDescriptor Scanner = new FileScanOperatorDescriptor(
                spec,
                splitProvider,
                new DelimitedDataTupleParserFactory(ipf, '|'),
                desc);
        PartitionConstraintHelper.addAbsoluteLocationConstraint(spec, Scanner,HyracksJobEx.NC_ID_array);

      
        FileSplit[] outputFiles=new FileSplit[1];
        outputFiles[0]=new FileSplit(HyracksJobEx.NC_ID_array[0],new FileReference(new File(DataDir,SumValid_filename)));//将清洗后的数据集成回NC[0]中
        
        IFileSplitProvider outSplits = new ConstantFileSplitProvider(outputFiles);
        IOperatorDescriptor printer = new PlainFileWriterOperatorDescriptor(spec, outSplits, "|");
        PartitionConstraintHelper.addAbsoluteLocationConstraint(spec, printer,HyracksJobEx.NC_ID_array[0]);

        IConnectorDescriptor conn=new MToNPartitioningMergingConnectorDescriptor(spec, new FieldHashPartitionComputerFactory(new int[] { 0 }, 
        		new IBinaryHashFunctionFactory[] { PointableBinaryHashFunctionFactory.of(UTF8StringPointable.FACTORY) }), 
        		new int[] { 0 },new IBinaryComparatorFactory[] { PointableBinaryComparatorFactory.of(UTF8StringPointable.FACTORY) },true);
        spec.connect(conn, Scanner, 0, printer, 0);
        spec.addRoot(printer);
        HyracksJobEx.exe_job(spec, "DataCollect");//
		}
	 
	/*
	 * 数据的排列是按照第一列的序号有序排列的。
	 * */
	public static void ValidDataCollectOrdered()
	{
		JobSpecification spec = new JobSpecification();
		HyracksJobEx.initNC();
		final int NodeNumber=HyracksJobEx.NC_ID_array.length;
		FileSplit[] inputFiles=new FileSplit[NodeNumber];
		for(int i=0;i<NodeNumber;i++)
			inputFiles[i]=new FileSplit(HyracksJobEx.NC_ID_array[i],new FileReference(new File(DataDir,DataCleaner.ValueValidFileName)));
		
		IFileSplitProvider splitProvider = new ConstantFileSplitProvider(inputFiles);
       ISerializerDeserializer[] isd=new ISerializerDeserializer[Col_Number];
       isd[0]=IntegerSerializerDeserializer.INSTANCE;
       for(int i=1;i<Col_Number;i++)
           isd[i]=UTF8StringSerializerDeserializer.INSTANCE;
        RecordDescriptor desc = new RecordDescriptor(isd);
        IValueParserFactory[] ipf=new IValueParserFactory[Col_Number];
        ipf[0]=IntegerParserFactory.INSTANCE;
        for(int i=1;i<Col_Number;i++)
        	 ipf[i]=UTF8StringParserFactory.INSTANCE;
        FileScanOperatorDescriptor Scanner = new FileScanOperatorDescriptor(
                spec,
                splitProvider,
                new DelimitedDataTupleParserFactory(ipf, '|'),
                desc);
        PartitionConstraintHelper.addAbsoluteLocationConstraint(spec, Scanner,HyracksJobEx.NC_ID_array);

        
        
        //ExternalSortOperatorDescriptor sorter = new ExternalSortOperatorDescriptor(spec, 4, new int[] { 0 },
        //        new IBinaryComparatorFactory[] { PointableBinaryComparatorFactory.of(IntegerPointable.FACTORY)}, desc);
        //PartitionConstraintHelper.addAbsoluteLocationConstraint(spec, sorter, HyracksJobEx.NC_ID_array);
        
      
        FileSplit[] outputFiles=new FileSplit[1];
        outputFiles[0]=new FileSplit(HyracksJobEx.NC_ID_array[0],new FileReference(new File(DataDir,SumValid_filename)));//将清洗后的数据集成回NC[0]中
        
        IFileSplitProvider outSplits = new ConstantFileSplitProvider(outputFiles);
        IOperatorDescriptor printer = new PlainFileWriterOperatorDescriptor(spec, outSplits, "|");
        PartitionConstraintHelper.addAbsoluteLocationConstraint(spec, printer,HyracksJobEx.NC_ID_array[0]);

        IConnectorDescriptor conn=new MToNPartitioningMergingConnectorDescriptor(spec, new FieldHashPartitionComputerFactory(new int[] { 0 }, 
        		new IBinaryHashFunctionFactory[] { PointableBinaryHashFunctionFactory.of(IntegerPointable.FACTORY) }), 
        		new int[] { 0 },new IBinaryComparatorFactory[] { PointableBinaryComparatorFactory.of(IntegerPointable.FACTORY) });
        //spec.connect(new OneToOneConnectorDescriptor(spec), Scanner, 0, sorter, 0);
        //spec.connect(conn, sorter, 0, printer, 0);
        spec.connect(conn, Scanner, 0, printer, 0);
        spec.addRoot(printer);
        HyracksJobEx.exe_job(spec, "DataCollect");//
		}
	 
	/*
	 * 用于Test的代码
	 * This class AFK.
	 * */
	private static void testBuild2GramOperator()
	{
		JobSpecification spec = new JobSpecification();
		HyracksJobEx.initNC();
		final int NodeNumber=HyracksJobEx.NC_ID_array.length;
		FileSplit[] inputFiles=new FileSplit[NodeNumber];
		for(int i=0;i<NodeNumber;i++)
			inputFiles[i]=new FileSplit(HyracksJobEx.NC_ID_array[i],new FileReference(new File(DataDir,DataCleaner.ValueValidFileName)));
		
		IFileSplitProvider splitProvider = new ConstantFileSplitProvider(inputFiles);
       ISerializerDeserializer[] isd=new ISerializerDeserializer[Col_Number];
       for(int i=0;i<Col_Number;i++)
           isd[i]=UTF8StringSerializerDeserializer.INSTANCE;
        RecordDescriptor desc = new RecordDescriptor(isd);
        IValueParserFactory[] ipf=new IValueParserFactory[Col_Number];
        for(int i=0;i<Col_Number;i++)
        	 ipf[i]=UTF8StringParserFactory.INSTANCE;
        FileScanOperatorDescriptor Scanner = new FileScanOperatorDescriptor(
                spec,
                splitProvider,
                new DelimitedDataTupleParserFactory(ipf, '|'),
                desc);
        PartitionConstraintHelper.addAbsoluteLocationConstraint(spec, Scanner,HyracksJobEx.NC_ID_array);
        
               
        Build2GramMapOperatorDescriptor builder = new Build2GramMapOperatorDescriptor(spec,isd,null,null);
        PartitionConstraintHelper.addAbsoluteLocationConstraint(spec, builder, HyracksJobEx.NC_ID_array);

        
      /*
        FileSplit[] outputFiles=new FileSplit[NodeNumber];
        for(int i=0;i<NodeNumber;i++)
        	outputFiles[i]=new FileSplit(HyracksJobEx.NC_ID_array[i],new FileReference(new File(DataDir,"Test.txt")));//将清洗后的数据集成回NC[0]中
        
        IFileSplitProvider outSplits = new ConstantFileSplitProvider(outputFiles);
        IOperatorDescriptor printer = new FrameFileWriterOperatorDescriptor(spec, outSplits);
        PartitionConstraintHelper.addAbsoluteLocationConstraint(spec, printer,HyracksJobEx.NC_ID_array);
       
        spec.connect(new OneToOneConnectorDescriptor(spec), Scanner, 0, builder, 0);
        spec.connect(new OneToOneConnectorDescriptor(spec), builder, 0, printer, 0);
        spec.addRoot(printer);
        */
        spec.connect(new OneToOneConnectorDescriptor(spec), Scanner, 0, builder, 0);
        spec.addRoot(builder);
        HyracksJobEx.exe_job(spec, "BuildTest");
		}
	 
	 
	 
	/*
	 * 用于Test的代码
	 * This class AFK.
	 * */
	private static void ByteDataCollectTest()
	{
		JobSpecification spec = new JobSpecification();
		HyracksJobEx.initNC();
		FileSplit[] inputFiles=new FileSplit[1];
		inputFiles[0]=new FileSplit(HyracksJobEx.NC_ID_array[1],new FileReference(new File(DataDir,DataCleaner.HashMapBytesFileName)));
		IFileSplitProvider splitProvider = new ConstantFileSplitProvider(inputFiles);
		ISerializerDeserializer[] isd=new ISerializerDeserializer[1];
	   isd[0]=ByteSerializerDeserializer.INSTANCE;
	   RecordDescriptor desc = new RecordDescriptor(isd);
		
		IOperatorDescriptor Scanner = new FileScanOperatorDescriptor(
                spec,
                splitProvider,
                new ByteTupleParserFactory(),
                desc);
       PartitionConstraintHelper.addAbsoluteLocationConstraint(spec, Scanner,HyracksJobEx.NC_ID_array[1]);
    
        FileSplit[] outputFiles=new FileSplit[1];
        outputFiles[0]=new FileSplit(HyracksJobEx.NC_ID_array[0],new FileReference(new File(DataDir,"DataFromNC2.dat")));//将清洗后的数据集成回NC[0]中
        
        IFileSplitProvider outSplits = new ConstantFileSplitProvider(outputFiles);
        IOperatorDescriptor printer = new FrameFileWriterOperatorDescriptor(spec, outSplits);
        PartitionConstraintHelper.addAbsoluteLocationConstraint(spec, printer,HyracksJobEx.NC_ID_array[0]);

      
        spec.connect(new OneToOneConnectorDescriptor(spec), Scanner, 0, printer, 0);
        spec.addRoot(printer);
        HyracksJobEx.exe_job(spec, "ByteDataCollectTest");//
		}
	 
	 

	
	
}
