package fda.hash;

import java.util.LinkedList;
import java.util.List;

import fda.base.Block;
import fda.base.Constants;
import fda.base.DiskFile2;
import fda.base.Memory2;
import fda.base.Tuple;
import fda.main.Main;
import fda.schema.Schema;
import fda.schema.TableInfo;

public class JoinHelper {

	public static DiskFile2 JoinDiskFile = null;
	
	public static int getTotal(List<DiskFile2> bucketList1) {
		int result = 0;
		for(DiskFile2 df : bucketList1){
			result += df.getTupleCount();
		}
		return result;
	}

	public static void doJoin(List<DiskFile2> bl1, List<DiskFile2> bl2) {
		if(bl1 == null || bl2 == null || bl1.isEmpty() || bl2.isEmpty())
			return;
		
		if(bl1.size() != bl2.size())
			throw new IllegalStateException("Number of buckets MUST be the same");
		
		// create a Join TableInfo
		TableInfo tabInfoOut = Schema.joinTables(bl1.get(0).getTableInfo(), bl2.get(0).getTableInfo(), "sub/joinHashResult.txt");
		DiskFile2 diskOut = new DiskFile2(tabInfoOut, false);
		JoinDiskFile = diskOut;
		
		for(int i = 0; i < bl1.size(); ++i){
			mergeBuckets(bl1.get(i), bl2.get(i), diskOut);
		}
		
		// each sublist has set of unsorted tuples (set is created based on hash)
	}

	
	private static void mergeBuckets(DiskFile2 df1, DiskFile2 df2, DiskFile2 diskOut) {
		final TableInfo ti = diskOut.getTableInfo();
		final Block joinBlock = diskOut.getEmptyBlock();
		Tuple[] mem = null;
		DiskFile2 df = null;
		
		// put in memory smallest set
		if(df1.getTupleCount() < df2.getTupleCount()){
			mem = Memory2.getInstance().fillMemory(df1);
			df = df2;
		} else {
			mem = Memory2.getInstance().fillMemory(df2);
			df = df1;
		}
		
		int blockCount = df.getBlockCount();
		Block bl = null;
		for(int i = 0; i < blockCount; ++i){
			bl = df.getBlock(i);
			
			for(Tuple t1 : bl.getRecords()){
				for(Tuple t2 : mem){
					if(t1.getKeyValue() == t2.getKeyValue()){
						Tuple joinedTuple = ti.joinTuples(t1, t2);
						joinBlock.addRecord(joinedTuple);
						if(joinBlock.isFull()){
							saveToFile(diskOut, joinBlock);
						}
					}
				}
			}
		}
		
		saveToFile(diskOut, joinBlock);
	}

	
	private static void saveToFile(DiskFile2 diskOut, final Block joinBlock) {
		if(joinBlock.getRecords().isEmpty())
			return;
		
		diskOut.appendBlock(joinBlock.getRecords());
		joinBlock.clear();
	}

	/**
	 * recursive call
	 * @param oldHash
	 * @param list1
	 * @param list2
	 */
	public void analyzeBuckets(int oldHash, List<DiskFile2> list1, List<DiskFile2> list2) {
		int hash = getNextHash(oldHash);;
		if(hash < 2)
			throw new IllegalStateException("Limit of Phases is OVER!!!");
		
		final List<DiskFile2> toRemove1 = new LinkedList<DiskFile2>();
		final List<DiskFile2> toRemove2 = new LinkedList<DiskFile2>();
		final List<DiskFile2> toKeep1 = new LinkedList<DiskFile2>();
		final List<DiskFile2> toKeep2 = new LinkedList<DiskFile2>();
		
		List<DiskFile2> sublist1 = null;
		List<DiskFile2> sublist2 = null;
		
		for(int i = 0; i < list1.size(); ++i){
			if(list1.get(i).getBlockCount() > Constants.MAX_BLOCKS_IN_BUCKET || list2.get(i).getBlockCount() > Constants.MAX_BLOCKS_IN_BUCKET){
				
				sublist1 = new LinkedList<DiskFile2>();
				sublist2 = new LinkedList<DiskFile2>();
				
				for(int k = 0; k < hash; ++k){
					sublist1.add(new DiskFile2(new TableInfo(list1.get(i).getTableInfo(), Constants.PRE_BUCKET_1 + hash +"_" + k + "_" + System.currentTimeMillis()), true));
					sublist2.add(new DiskFile2(new TableInfo(list2.get(i).getTableInfo(), Constants.PRE_BUCKET_2 + hash +"_" + k + "_" + System.currentTimeMillis()), true));
				}
				
				distributeBucket(hash, sublist1, list1.get(i));
				distributeBucket(hash, sublist2, list2.get(i));
				
				toKeep1.addAll(sublist1);
				toKeep2.addAll(sublist2);
				
				toRemove1.add(list1.get(i));
				toRemove2.add(list2.get(i));
			}
		}
		
		list1.removeAll(toRemove1);
		list2.removeAll(toRemove2);
		
		list1.addAll(toKeep1);
		list2.addAll(toKeep2);
		
//		Main.log("After break list1 has " + list1.size() + "blocks");
//		Main.log("After break list2 has " + list2.size() + "blocks");
//		Main.log("First file has: " + JoinHelper.getTotal(list1) + " records");
//		Main.log("Second file has: " + JoinHelper.getTotal(list2) + " records");
		
		if(!toRemove1.isEmpty() || !toRemove2.isEmpty())
			analyzeBuckets(hash, list1, list2);
	}

	
	
	
	private int getNextHash(int oldHash) {
		int[] pr = new int[] {2, 3, 5, 7, 11, 13, 17, 19, 23, 29, 31, 37, 41};
		for(int i = (pr.length - 1); i > -1; --i){
			if(pr[i] < oldHash)
				return pr[i];
		}
		return 0;
	}

	private void distributeBucket(int hash, List<DiskFile2> sublist1, DiskFile2 diskFile2) {
		for(int i = 0; i < diskFile2.getBlockCount(); ++i){
			Block b = diskFile2.getBlock(i);
			for(Tuple t : b.getRecords()){
				int index = (int) (t.getKeyValue() % hash);
				sublist1.get(index).appendTuple(t);
			}
			
		}
		
		for(DiskFile2 df : sublist1){
			df.flush();
		}
	}


}
