package com.servlet;

import java.io.BufferedReader;
import java.io.IOException;
import java.io.InputStreamReader;
import java.net.URI;
import java.util.ArrayList;
import java.util.List;

import javax.servlet.ServletException;
import javax.servlet.annotation.WebServlet;
import javax.servlet.http.HttpServlet;
import javax.servlet.http.HttpServletRequest;
import javax.servlet.http.HttpServletResponse;

import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FSDataInputStream;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.Mapper;
import org.apache.hadoop.mapreduce.Reducer;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;

import com.beans.UserInfo;
import com.beans.WordCountInfo;

import net.sf.json.JSONArray;


@WebServlet("/MR_uniqueServlet")
public class MR_uniqueServlet extends HttpServlet {

	protected void service(HttpServletRequest request, HttpServletResponse response) throws ServletException, IOException {
		try {
			//	System.setProperty("HADOOP_USER_NAME","root");
				// 创建一个作业
				Job job = Job.getInstance();

				// 指定main函数所在的类
			//	job.setJarByClass(WordCountTest.class);

				// 设定map 相关的配置
				job.setMapperClass(uniqueMapper.class);
				job.setMapOutputKeyClass(Text.class);
				//job.setMapOutputValueClass(LongWritable.class);

               String filePath=request.getParameter("filePath");

				FileInputFormat.setInputPaths(job, new Path("hdfs://slave1:9000/"+filePath));

				// 设定 reduce 相关的配置
				job.setReducerClass(uniqueReducer.class);
				job.setOutputKeyClass(Text.class);
				job.setOutputValueClass(LongWritable.class);

				// 因为如果目标目录存在,将出错,所以可以先将目标删除
				URI uri = new URI("hdfs://slave1:9000");
				Configuration conf = new Configuration();
				FileSystem fs = FileSystem.get(uri, conf);
				UserInfo user=(UserInfo)request.getSession().getAttribute("session_user");
				String userRoot=user.getUserName();
				fs.delete(new Path("/"+userRoot+"/uniquettmp"), true);

				// 指明计算完成以后,输出结果放在哪里
				FileOutputFormat.setOutputPath(job, new Path("hdfs://slave1:9000/"+userRoot+"/uniquettmp"));

				// 提交作业
				job.waitForCompletion(true); // true 表示在执行作业的时候输出提示信
				response.setContentType("text/html;charset=utf-8");
				//response.getWriter().println("处理完成");
				System.out.println("unique Mapreduce 任务执行完毕");

		        Path path = new Path("hdfs://slave1:9000/"+userRoot+"/uniquettmp/part-r-00000");
		        FSDataInputStream fsInput = fs.open(path);
		        
		        BufferedReader br=new BufferedReader(new InputStreamReader(fsInput,"utf-8"));
		       
		        List<WordCountInfo> wordCountList=new ArrayList<>();
		        String str=null;
		        
		        while((str=br.readLine())!=null) {
		        	System.out.println(str);
		        	String [] data=str.split("\t");
		        	
		        	String word=data[0];
		        	Integer count=0;
		        	try {
		        		count=Integer.parseInt(data[1]);
		        	}
		        	catch(Exception ex) {
		        	}
		            wordCountList.add(new WordCountInfo(count,word));
		        }
		        
		        br.close();
		        fsInput.close();
		     
		        JSONArray jsonobj=JSONArray.fromObject(wordCountList);
		       response.setContentType("text/html;charset=utf-8");
		        response.getWriter().println(jsonobj);
		        //request.setAttribute("wordCountList", wordCountList);
				//request.getRequestDispatcher("/mapreduce/unique-result.jsp").forward(request, response);
		        fs.close(); 
				
			} catch (Exception e) {
				e.printStackTrace();
			}

		}

		// KEYIN LongWritable key 代表行号
		// VALUEIN Text value 代表当前进行处理的那行数据
		// KEYOUT 往后面写的key的类型
		// VALUEOUT 往后面写的value的类型
		static class uniqueMapper extends Mapper<LongWritable, Text, Text, LongWritable> {

			protected void map(LongWritable key, Text value, Context context) throws IOException, InterruptedException {
				String line = value.toString();
				String[] wordList = line.split(" ");

				for (String word : wordList) {
					context.write(new Text(word),new LongWritable());
				}
			}
		}

		static class uniqueReducer extends Reducer<Text, LongWritable, Text, LongWritable> {
			protected void reduce(Text key, Iterable<LongWritable> values, Context context)
					throws IOException, InterruptedException {

				context.write(key, new LongWritable());
			}
	}

}
