package com.ipinyou.hf2redis;

import java.io.IOException;
import java.net.SocketException;
import java.util.ArrayList;
import java.util.Date;
import java.util.List;
import java.util.Map;
import java.util.concurrent.ConcurrentHashMap;

import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.Mapper;
import org.apache.hadoop.mapreduce.Reducer;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;

import redis.clients.jedis.Jedis;
import redis.clients.jedis.Pipeline;
import redis.clients.jedis.Response;

import com.ipinyou.data.common.util.HdfsUtil;
import com.ipinyou.iredis.IRedisNode;
import com.ipinyou.iredis.IRedisNodeConfig;
import com.ipinyou.iredis.exception.IRedisNodeNotFoundException;
import com.ipinyou.iredis.strategy.ShardingStrategy;
import com.ipinyou.iredis.strategy.factory.RemoteShardingStrategyFactory;
import com.ipinyou.iredis.strategy.factory.ShardingStrategyFactory;

/**
 * 批量增量更新userprofile
 * 
 * @author Administrator
 *
 */

public class BatchIncrUserprofileIntoRedis {

	private static int JOB_PAGE_FILE_NUM = 10;
	private static int SLEEP_1MS_PER_NUM = 100; // 累计多少条休眠1毫秒

	public static class ReadPyidMap extends Mapper<Object, Text, Text, Text> {

		String strategyConfig = "";

		// iRedis的类
		ShardingStrategyFactory redisSharding = null;

		static int dbg_cnt = 0;

		@Override
		protected void setup(Mapper<Object, Text, Text, Text>.Context context)
				throws IOException, InterruptedException {
			if (redisSharding == null) {
				strategyConfig = context.getConfiguration().get("shardingStrategyConfig");
				redisSharding = new RemoteShardingStrategyFactory(strategyConfig);
				System.out.println("setup strategyConfig:" + strategyConfig);
			}
			super.setup(context);
		}

		public void map(Object key, Text value, Context context) throws IOException {

			// value是输入文件的一行，在文件里是每行三列:pyid<tab>catid<tab>weight，
			String[] part = value.toString().split("\t");
			if (part.length < 3)
				return;

			String pyId = part[0].trim();
			String weight = part[2].trim();
			// 修复 标签是inf的情况
			String cate = part[1].trim();
			if ("".equals(cate) || "inf".equals(cate) || "".equals(weight) || "inf".equals(weight) || "".equals(pyId)
					|| "inf".equals(pyId)) {
				return;
			}

			if (redisSharding == null) {
				strategyConfig = context.getConfiguration().get("shardingStrategyConfig");
				redisSharding = new RemoteShardingStrategyFactory(strategyConfig);
				System.out.println("(shouldn't display this)strategyConfig:" + strategyConfig);
			}

			if (pyId != null) {
				ShardingStrategy shardingStrategy = redisSharding.getShardingStrategy();
				if (shardingStrategy != null) {
					IRedisNodeConfig irCfg = shardingStrategy.getServiceNode(pyId);
					if (irCfg != null) {
						String redisKey = irCfg.toString();

						int tryIdx = 0;
						while (tryIdx < 5) {
							try {
								context.write(new Text(redisKey.toString()), value);
							} catch (IOException e) {
								++tryIdx;
								e.printStackTrace();
								try {
									Thread.sleep(50);
								} catch (InterruptedException e1) {
								}
								continue;
							} catch (InterruptedException e) {
								++tryIdx;
								e.printStackTrace();
								try {
									Thread.sleep(50);
								} catch (InterruptedException e1) {
								}
								continue;
							}

							break;
						}
					}
				}
			}

		}

	}

	public static class JedisPipeline {
		public Jedis m_jedis = null;
		public Pipeline m_pipeline = null;

		JedisPipeline() {
		}

		JedisPipeline(Jedis jedis, Pipeline pipeline) {
			m_jedis = jedis;
			m_pipeline = pipeline;
		}
	}

	static JedisPipeline GenJedisPipeline(String redisKey) {
		// 需要写入的redis节点的host和port
		String host = redisKey.substring(0, redisKey.indexOf(":"));
		String port = redisKey.substring(redisKey.indexOf(":") + 1, redisKey.length());
		System.out.println("new redis_host:" + host);
		System.out.println("new redis_port:" + port);

		Jedis newJedis = new Jedis(host, Integer.parseInt(port), 55000); // Connect
																			// time-out
		Pipeline newPipeline = newJedis.pipelined();
		JedisPipeline newJP = new JedisPipeline(newJedis, newPipeline);
		return newJP;
	}

	public static class UpdateCombiner extends Reducer<Text, Text, Text, Text> {

		// iRedis的类
		private ShardingStrategyFactory shardingStrategyFactory;
		private ShardingStrategy strategy;
		String strategyConfig = "";

		private Map<IRedisNodeConfig, IRedisNode> resources = new ConcurrentHashMap<IRedisNodeConfig, IRedisNode>();

		private long rmCount = 0;

		private int sleepPerNum = 100;

		@Override
		protected void setup(Reducer<Text, Text, Text, Text>.Context context) throws IOException, InterruptedException {
			if (strategy == null) {
				strategyConfig = context.getConfiguration().get("shardingStrategyConfig");
				shardingStrategyFactory = new RemoteShardingStrategyFactory(strategyConfig);
				strategy = shardingStrategyFactory.getShardingStrategy();
				System.out.println("setup strategyConfig:" + strategyConfig);
			}
			String sleepnum = context.getConfiguration().get("sleep1s.per.number");
			if (sleepnum != null && !sleepnum.isEmpty()) {
				sleepPerNum = Integer.parseInt(sleepnum);
			}
			super.setup(context);
		}

		// key是map输出的key，即目标redis的key
		public void reduce(Text key, Iterable<Text> values, Context context) {
			// throws IOException, InterruptedException {

			if (strategy == null) {
				strategyConfig = context.getConfiguration().get("shardingStrategyConfig");
				shardingStrategyFactory = new RemoteShardingStrategyFactory(strategyConfig);
				strategy = shardingStrategyFactory.getShardingStrategy();
				System.out.println("(shouldn't display this)strategyConfig:" + strategyConfig);
			}
			// ShardingStrategy shardingStrategy =
			// shardingStrategyFactory.getShardingStrategy();
			String redisKey = key.toString();
			JedisPipeline newJP = null;
			if (redisKey != null && !redisKey.isEmpty()) {
				// redisKey是目标redis的链接
				newJP = GenJedisPipeline(redisKey);

			} else {
				return;
			}
			if (newJP == null)
				return;
			Integer sum = 0;

			// Map<String, Response<Long>> deletedPyids = new HashMap<String,
			// Response<Long>>();
			for (Text element : values) {
				sum++;
				String[] part = element.toString().split("\t");
				if (part.length < 3)
					continue;

				String pyId = part[0].trim();
				String catId = part[1].trim();
				double weight = 0;
				try {
					weight = Double.parseDouble(part[2].trim());
				} catch (Exception e) {
					continue;
				}
				if (pyId != null && !pyId.isEmpty()) {

					try {
						if (newJP != null) {
							Response<Double> resp = newJP.m_pipeline.zincrby(pyId, weight, catId);
							// deletedPyids.put(pyId, resp);
							rmCount++;
							if (rmCount % sleepPerNum == 0) // 暂停1毫秒
								Thread.sleep(1);

						}

					} catch (Exception e) {
						// ++tryIdx;
						e.printStackTrace();

						if (e instanceof SocketException) {
							// break; // FixMe: Try to reconnect
							newJP = GenJedisPipeline(redisKey);

						} else {
							try {

								Thread.sleep(50);
							} catch (InterruptedException e1) {
							}
						}

						continue;
					}

				}

			}
			System.out.println("updated pyid in redis count is " + rmCount);

			// 清空pipeline，并断开链接
			newJP.m_pipeline.sync();
			newJP.m_jedis.disconnect();

			// Set<Entry<String, Response<Long>>> entrys =
			// deletedPyids.entrySet();
			// for (Entry<String, Response<Long>> e : entrys) {
			// String pyid = e.getKey();
			// Response<Long> res = e.getValue();
			// if (res.get() == 1) {
			//
			// try {
			// context.write(new Text(pyid), new Text(""));
			// } catch (IOException | InterruptedException e1) {
			//
			// e1.printStackTrace();
			// }
			//
			// }
			// }

			System.out.println("FINISH ALL");

		}

		private IRedisNode getIRedisNode(String pyid) {

			IRedisNodeConfig nodeConfig = strategy.getServiceNode(pyid);
			if (nodeConfig == null) {
				throw new IRedisNodeNotFoundException("无法定位key[" + pyid + "]对应的redis节点.");
			}
			return getIRedisNode(nodeConfig);
		}

		private IRedisNode getIRedisNode(IRedisNodeConfig nodeConfig) {
			IRedisNode node = resources.get(nodeConfig);
			if (node == null) {
				// 新创建
				node = new IRedisNode(nodeConfig);
				resources.put(nodeConfig, node);
			}
			return node;
		}

	}

	static List<String> ListFilesUnderHdfsDir(String hdfsDir) throws IOException {
		// Output File's
		// Demo:hdfs://hadoop220:54310/tmp/userInfo/unselectable_cates/part-m-00000
		List<String> pathList = new ArrayList<String>();

		// FileSystem fs = HdfsUtil.getFileSystem();
		FileSystem fs = getFs();

		Path inputFolder = new Path(hdfsDir);
		FileStatus[] inputFiles = null;
		try {
			inputFiles = fs.listStatus(inputFolder);
			if (!fs.exists(inputFolder) || inputFiles == null || inputFiles.length == 0) {
				// System.out.println("there is no files under hdfs dir:"
				// + uselessCatesPath);
				return pathList;
			}
		} catch (IOException e) {
			e.printStackTrace();
		}

		for (int i = 0; i < inputFiles.length; i++) {
			try {
				if (fs.isFile(inputFiles[i].getPath())) {
					Path filePath = inputFiles[i].getPath();
					String tmpPath = filePath.toString();
					// System.out.println(tmpPath);
					pathList.add(tmpPath);
				}
			} catch (IOException e) {
				e.printStackTrace();
			}
		}

		return pathList;
	}

	private static FileSystem getFs() throws IOException {
		Configuration conf = new Configuration();
		conf.set("mapreduce.job.queuename", "normal");
		FileSystem fs = FileSystem.get(conf);
		return fs;
	}

	public static void main(String[] args) throws IOException {
		long preTime;
		String outputPath = "/tmp/test/";
		String inputDataPath;
		// String uselessCatesPath = "";
		String shardingStrategyConfig = "";

		if (args.length == 5) {
			// 目前使用的调用参数（2014-09-15）
			shardingStrategyConfig = args[0]; // 需要写入数据的集群链接
			inputDataPath = args[1]; // 需要写入的数据路径
			outputPath = args[2]; // debug输出路径
			JOB_PAGE_FILE_NUM = Integer.parseInt(args[3]); // 每次写入的文件数量
			SLEEP_1MS_PER_NUM = Integer.parseInt(args[4]); // 休眠计数阈值

		} else {
			System.out.println("param error! need 5 params.");
			return;
		}
		System.out.println(new Date().toString() + ": begin the job of decay weight to the redis db.");
		System.out.println("JOB_PAGE_FILE_NUM: " + JOB_PAGE_FILE_NUM);
		preTime = System.currentTimeMillis();

		List<String> pathList = new ArrayList<String>();

		FileSystem fs = getFs();

		Path inputFolder = new Path(inputDataPath);
		FileStatus[] inputFiles = null;
		// 获取输入数据路径下的所有文件
		try {
			inputFiles = fs.listStatus(inputFolder);
			if (!fs.exists(inputFolder) || inputFiles == null || inputFiles.length == 0) {
				System.out.println("there is no log data to parase for the " + inputDataPath);
				return;
			}
		} catch (IOException e) {
			e.printStackTrace();
		}

		// 遍历输入路径下的所有文件，去掉log文件，生成数据文件的全路径的列表
		for (int i = 0; i < inputFiles.length; i++) {
			try {
				if (fs.isFile(inputFiles[i].getPath())) {
					Path filePath = inputFiles[i].getPath();
					String tmpPath = filePath.toString();

					if (!tmpPath.endsWith("_SUCCESS")) {
						pathList.add(tmpPath);
						System.out.println("hdfs file : " + tmpPath);
					}
				}
			} catch (IOException e) {
				e.printStackTrace();
			}
		}

		// 所有数据文件的数量
		int totalSize = pathList.size();
		System.out.println("Total num of part files:" + totalSize);
		// 需要执行的job的数量
		int pageNum = ((totalSize % JOB_PAGE_FILE_NUM) == 0) ? (totalSize / JOB_PAGE_FILE_NUM)
				: (totalSize / JOB_PAGE_FILE_NUM + 1);

		System.out.println("-->>there is " + pageNum + " job-------to be finished.");

		// 创建job
		syncData2Redis_multiProcesses(shardingStrategyConfig, outputPath, pathList, totalSize, pageNum);

		long curTime = System.currentTimeMillis();
		float seconds = (float) (curTime - preTime) / 1000;

		System.out.println(new Date().toString() + ": finish the job of decay weight to the redis db, total cost "
				+ seconds + " seconds.");

	}

	private static void syncData2Redis_multiProcesses(String shardingStrategyConfig, String outputPath,
			List<String> pathList, int totalSize, int pageNum)
	// throws IOException, InterruptedException, ClassNotFoundException
	{
		ArrayList<Job> jobArr = new ArrayList<Job>();

		for (int page = 1; page <= pageNum; page++) {

			int start = (page - 1) * JOB_PAGE_FILE_NUM;
			int end = start + JOB_PAGE_FILE_NUM;
			if (end > totalSize)
				end = totalSize;
			System.out.println("Cur Job Index Range:" + start + " - " + end);

			String path = "";
			for (int m = start; m < end; m++) {
				if (m != end - 1) {
					path = path + pathList.get(m) + ",";
				} else {
					path = path + pathList.get(m);
				}
			}

			if (!path.isEmpty()) {
				Job job = genJob(shardingStrategyConfig, path, outputPath + "/" + page, page, pageNum);
				jobArr.add(job);

			} else {
				System.out.println("Cur job index range is empty. Skip it");
			}
		}

		int idx = 0;
		int numJobs = jobArr.size();
		while (idx < numJobs) {
			Job job1 = null;
			Job job2 = null;

			if (idx < numJobs) {
				job1 = jobArr.get(idx);
				try {
					System.out.println("-->>start the sync job:" + job1.getJobName());
					job1.submit();
				} catch (IOException e) {
					e.printStackTrace();
				} catch (InterruptedException e) {
					e.printStackTrace();
				} catch (ClassNotFoundException e) {
					e.printStackTrace();
				}
				++idx;
			}
			if (idx < numJobs) {
				job2 = jobArr.get(idx);
				System.out.println("-->>start the sync job:" + job2.getJobName());
				try {
					job2.submit();
				} catch (IOException e) {
					e.printStackTrace();
				} catch (InterruptedException e) {
					e.printStackTrace();
				} catch (ClassNotFoundException e) {
					e.printStackTrace();
				}
				++idx;
			}

			if (job1 != null) {
				try {
					job1.waitForCompletion(true);
					System.out.println("Sync job:" + job1.getJobName() + " finished");
				} catch (IOException e) {
					e.printStackTrace();
				} catch (InterruptedException e) {
					e.printStackTrace();
				} catch (ClassNotFoundException e) {
					e.printStackTrace();
				}
			}
			if (job2 != null) {
				try {
					job2.waitForCompletion(true);
					System.out.println("Sync job:" + job2.getJobName() + " finished");
				} catch (IOException e) {
					e.printStackTrace();
				} catch (InterruptedException e) {
					e.printStackTrace();
				} catch (ClassNotFoundException e) {
					e.printStackTrace();
				}
			}

		}
	}

	private static Job genJob(String shardingStrategyConfig, String hadoopPath, String outputPath, int jobPage,
			int totalPage) {
		Job job = null;
		try {
			Configuration conf = new Configuration();

			conf.setInt("mapred.task.timeout", 10800000);
			conf.set("hadoopPath", hadoopPath);
			conf.set("shardingStrategyConfig", shardingStrategyConfig);
			conf.set("mapred.job.queue.name", "normal");
			conf.set("sleep1s.per.number", Integer.toString(SLEEP_1MS_PER_NUM));
			// conf.set("uselessCatesPath", uselessCatesPath);
			System.out.println("hadoopPath:" + hadoopPath);
			System.out.println("shardingStrategyConfig:" + shardingStrategyConfig);

			job = Job.getInstance(conf, "Userprofile_redis_decay: job No " + jobPage + ", total job " + totalPage);
			job.setJarByClass(BatchIncrUserprofileIntoRedis.class);
			job.setOutputKeyClass(Text.class);
			job.setOutputValueClass(Text.class);

			// job.setNumReduceTasks(REDUCE_TASK_NUM);// todo

			job.setMapOutputKeyClass(Text.class);
			job.setMapOutputValueClass(Text.class);

			job.setMapperClass(ReadPyidMap.class);
			job.setCombinerClass(UpdateCombiner.class);
			// job.setReducerClass(Reduce.class);

			FileInputFormat.addInputPaths(job, hadoopPath);
			FileOutputFormat.setOutputPath(job, new Path(outputPath));
		} catch (Exception e) {
			e.printStackTrace();
		}
		return job;
	}

}
