package offline;

import java.util.Iterator;
import java.util.LinkedList;
import java.util.List;
import java.util.concurrent.CountDownLatch;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.Executors;

import tlsb.Utils;
import util.Configuration;
import util.IPartition;
import util.Pair;
import util.RedisCluster;
import util.RedisClusterFactory;
import util.TextFileLoader;
import weibo4j.org.json.JSONException;
import weibo4j.org.json.JSONObject;

public class DataLoader {
	// private final String separator = System.getProperty("file.separator");
	final private static String SETOFFOLLOWEES = ":fle";
	final public static String SETOFFOLLOWERS = ":flr";
	final private static String READFREQFIELD = "rf";
	final private static String WRITEFREQFIELD = "wf";
	final private static String ALLUIDS = "uids";
	final private static String SETOFCOMMUNITIES = ":com";
	// final private static String PREFIXOFCID = "c:";
	final private static String FSTCOMFIELD = "fstc";
	RedisCluster redisCluster = RedisClusterFactory.instance
			.getMetaDataRedisCluster();
	ExecutorService executor = Executors.newFixedThreadPool(20);

	IPartition part = redisCluster.getPartition();

	TextFileLoader flwIfr = new TextFileLoader(Configuration.instance()
			.getFlwShipFile(), false, 2000000);
	TextFileLoader rfIfr = new TextFileLoader(Configuration.instance()
			.getReadFreqFile(), false, 500000);
	TextFileLoader wfIfr = new TextFileLoader(Configuration.instance()
			.getWriteFreqFile(), false, 500000);
	TextFileLoader comIfr = new TextFileLoader(Configuration.instance()
			.getCommunityFile(), false, 500000);

	public void init() {
		rfIfr.start();
		wfIfr.start();
		flwIfr.start();
		comIfr.start();
	}

	public void close() {
		rfIfr.close();
		wfIfr.close();
		flwIfr.close();
		comIfr.close();
		redisCluster.close();
	}

	public void load() {
		long start = System.currentTimeMillis();
		List<Pair<String, CountDownLatch>> latches = new LinkedList<Pair<String, CountDownLatch>>();

		int threadNum = 10;
		CountDownLatch latch = new CountDownLatch(threadNum);
		latches.add(new Pair<String, CountDownLatch>("loadCommunity", latch));
		for (int i = 0; i < threadNum; i++) {
			executor.execute(new loadCommunity(latch));
		}

		threadNum = 10;
		latch = new CountDownLatch(threadNum);
		latches.add(new Pair<String, CountDownLatch>("loadWF", latch));
		for (int i = 0; i < threadNum; i++) {
			executor.execute(new loadWF(latch));
		}

		threadNum = 10;
		latch = new CountDownLatch(threadNum);
		latches.add(new Pair<String, CountDownLatch>("loadRF", latch));
		for (int i = 0; i < threadNum; i++) {
			executor.execute(new loadRF(latch));
		}

		threadNum = 20;
		latch = new CountDownLatch(threadNum);
		latches.add(new Pair<String, CountDownLatch>("loadFollowshipAndUid",
				latch));
		for (int i = 0; i < threadNum; i++) {
			executor.execute(new loadFollowshipAndUid(latch));
		}

		for (Pair<String, CountDownLatch> pair : latches) {
			long taskStart = System.currentTimeMillis();
			while (true) {
				try {
					pair.b.await();
					System.out.println(pair.a + " terminated");
					break;
				} catch (InterruptedException e) {
				}
			}
			System.out.println(String.format("loading data costs: %f mins",
					(System.currentTimeMillis() - taskStart) / 60000.0));
		}
		System.out.println(String.format("loading data costs: %f mins",
				(System.currentTimeMillis() - start) / 60000.0));
	}

	/**
	 * 将followship信息加载到redis中 加载到单一节点上？
	 */
	public class loadFollowshipAndUid implements Runnable {
		CountDownLatch flwLatch;

		public loadFollowshipAndUid(CountDownLatch flwLatch_) {
			flwLatch = flwLatch_;
		}

		public void run() {
			// follow himself
			String line = null;
			int counter = 0;
			while ((line = flwIfr.readLine()) != null) {
				counter++;
				String[] tmp = line.split("\t");
				redisCluster.getPipeline(part.shard(ALLUIDS)).sadd(ALLUIDS,
						tmp[1]); // 需要计算share
				// timeline 的用户
				redisCluster.getPipeline(part.shard(tmp[1])).sadd(
						Utils.joinStr(tmp[1], SETOFFOLLOWEES), tmp[0]);
				redisCluster.getPipeline(part.shard(tmp[1])).sadd(
						Utils.joinStr(tmp[1], SETOFFOLLOWEES), tmp[1]);
				redisCluster.getPipeline(part.shard(tmp[0])).sadd(
						Utils.joinStr(tmp[0], SETOFFOLLOWERS), tmp[1]);
				redisCluster.getPipeline(part.shard(tmp[0])).sadd(
						Utils.joinStr(tmp[0], SETOFFOLLOWERS), tmp[0]);

				if (counter % 200 == 0)
					redisCluster.synAllPipelines();

				if (counter % 100000 == 0) {
					System.out.println("loading flw " + counter);
				}
			}
			redisCluster.synAllPipelines();
			System.out
					.println("B counter:"
							+ redisCluster.getJedis(part.shard(ALLUIDS)).scard(
									ALLUIDS));

			flwIfr.close();
			flwLatch.countDown();
		}
	}

	// 读频率, 数据格式： uid, read probability
	// read frequency用来觉得max cut的值
	public class loadRF implements Runnable {
		CountDownLatch rfLatch;

		public loadRF(CountDownLatch rfLatch_) {
			rfLatch = rfLatch_;
		}

		public void run() {
			String line = null;
			int counter = 0;
			while ((line = rfIfr.readLine()) != null) {
				String[] tmp = line.split("\t");
				redisCluster.getPipeline(part.shard(tmp[0])).hset(tmp[0],
						READFREQFIELD, tmp[1]);
				if (counter++ % 200 == 0)
					redisCluster.synAllPipelines();
			}
			redisCluster.synAllPipelines();
			rfIfr.close();
			System.out.println("one thread of loading RF terminated");
			rfLatch.countDown();
		}
	}

	// 写频率，觉得和那个用户共享subset
	public class loadWF implements Runnable {
		CountDownLatch wfLatch;

		public loadWF(CountDownLatch wfLatch_) {
			wfLatch = wfLatch_;
		}

		public void run() {
			String line = null;
			int counter = 0;
			while ((line = wfIfr.readLine()) != null) {
				String[] tmp = line.split("\t");
				redisCluster.getPipeline(part.shard(tmp[0])).hset(tmp[0],
						WRITEFREQFIELD, tmp[1]);
				if (counter++ % 200 == 0)
					redisCluster.synAllPipelines();
			}
			redisCluster.synAllPipelines();
			wfIfr.close();
			System.out.println("one thread of loadWF terminated");
			wfLatch.countDown();
		}
	}

	public class loadCommunity implements Runnable {
		CountDownLatch loadLatch;

		public loadCommunity(CountDownLatch loadLatch_) {
			loadLatch = loadLatch_;
		}

		public void run() {
			boolean flag = true;

			String line = "";
			String uid = "";
			String cid = "";
			int counter = 0;
			while ((line = comIfr.readLine()) != null) {
				counter++;
				JSONObject obj;
				JSONObject jLabel;
				try {
					obj = new JSONObject(line);
					uid = obj.getString("uid");
					jLabel = obj.getJSONObject("labels");
				} catch (JSONException e) {
					continue;
				}
				flag = true;
				for (Iterator<String> iter = jLabel.keys(); iter.hasNext();) {
					cid = iter.next();
					if (flag) {
						redisCluster.getPipeline(part.shard(uid)).hset(uid,
								FSTCOMFIELD, cid); // first
						// community用来决定stl的community。
						flag = false;
					}
					redisCluster.getPipeline(part.shard(uid)).sadd(
							Utils.joinStr(uid, SETOFCOMMUNITIES), cid);// 记录一个用户属于的community
					/*
					 * redisCluster.getPipeline(part.shard(cid)).sadd(
					 * Utils.joinStr(PREFIXOFCID, cid), uid);//
					 * 记录一个community包含的用户
					 */}

				if (counter % 200 == 0)
					redisCluster.synAllPipelines();

				if (counter % 1000000 == 0) {
					System.out.println(counter);
				}
			}
			redisCluster.synAllPipelines();
			System.out.println("one thread of loadCommunity terminated");
			loadLatch.countDown();
		}
	}
}
