package com.shuting.Analysis_PersonClassifyPaper;

import java.io.IOException;
import java.util.ArrayList;

import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hbase.HBaseConfiguration;
import org.apache.hadoop.hbase.client.Put;
import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
import org.apache.hadoop.hbase.mapreduce.TableMapReduceUtil;
import org.apache.hadoop.hbase.mapreduce.TableReducer;
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.Mapper;
import org.apache.hadoop.mapreduce.Mapper.Context;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.mapreduce.lib.input.SequenceFileInputFormat;

public class WeightPaper_Num {

	public static class MyMapper extends Mapper<Text, Text, Text, Text> {
 
		public void map(Text key, Text value, Context context) throws IOException, InterruptedException {
  
			String personID=key.toString().split("->")[0];
			String classifyTnf=key.toString().split("->")[1];
			String paperID=value.toString().split("<=>")[0];
			String sortnum=value.toString().split("<=>")[1];
			String journal=value.toString().split("<=>")[2];
			//权威文献必须有期刊！！！
			if(journal.equals("null")){
				return;
			}			
			ArrayList<String> classifySet=new ArrayList<String>();
			for(String classify:classifyTnf.split("#")){
				classifySet.add(classify);				
			}
			for(String sn : sortnum.split(";")){
				for(String classify:classifySet){
					Boolean isOk=true;
					String[] split=classify.split(";");
					for(int i=0;i<split.length;i++){
						if(!sn.contains(split[i])){
							isOk=false;
							break;
						}
					}
					if(isOk){
						String keyStr=personID+"->"+classify;
						String valueStr=paperID+"<=>"+journal;
						context.write(new Text(keyStr), new Text(valueStr));
					}				
				}		
			}
		}
	}

	// MyReduce函数
	public static class MyReducer extends TableReducer<Text, Text, ImmutableBytesWritable> {

		protected void setup(Context context) throws IOException, InterruptedException {

			Configuration conf = context.getConfiguration();
			String journalPath = conf.get("journalPath");
			ConfigInf.initJournalMap(journalPath);
		}		
		
		public void reduce(Text key, Iterable<Text> values, Context context) throws IOException, InterruptedException {

			String personID = key.toString().split("->")[0];
			String classify = key.toString().split("->")[1];			
			ArrayList<String> paperIDSet = new ArrayList<String>();
			ArrayList<String> journalSet = new ArrayList<String>();
			for (Text value : values) {
				String valueStr = value.toString();
				String paperID=valueStr.split("<=>")[0];
				String journal=valueStr.split("<=>")[1];				
				paperIDSet.add(paperID);
				journalSet.add(journal);
			}
			
			double paperNumD=0.0;			
			for(String journal : journalSet){				
				Double weight=ConfigInf.journalSet.get(journal);
				if(weight==null){
					continue;
				}
				paperNumD+=weight.doubleValue();			
			}
			
			//四舍五入 
			long paperNumL=Math.round(paperNumD);			
			String paperNumS=String.valueOf(paperNumL);
			
			Put put = new Put(personID.getBytes());
			put.addColumn(Bytes.toBytes("weightPaper"), Bytes.toBytes(classify), Bytes.toBytes(paperNumS));
			context.write(null, put);
		}
	}

	// 启动函数
	public void start(String ResultTable, String hdfsPath,String journalPath) throws Exception {

		Configuration hbaseConf = HBaseConfiguration.create();
		hbaseConf.set("journalPath", journalPath);	
		
		Job job = Job.getInstance(hbaseConf, "weightPaper_num");
		job.setJarByClass(Paper_Num.class);
		job.setNumReduceTasks(6);

		job.setMapperClass(MyMapper.class);
		job.setMapOutputKeyClass(Text.class);
		job.setMapOutputValueClass(Text.class);

		TableMapReduceUtil.initTableReducerJob(ResultTable, MyReducer.class, job);

		job.setInputFormatClass(SequenceFileInputFormat.class);
		FileInputFormat.setInputPaths(job, new Path(hdfsPath));

		job.waitForCompletion(true);

	}

}
