import java.io.IOException;
import java.util.*;

import org.apache.commons.lang3.StringUtils;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.Mapper;
import org.apache.hadoop.mapreduce.Reducer;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.mapreduce.lib.input.FileSplit;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
import org.apache.hadoop.util.GenericOptionsParser;
public class wordcount {

    public static class Mapnew extends Mapper<Object, Text, Text, Text> {
        private Text keyInfo = new Text(); // 存储单词和URL组合,url+word
        private Text valueInfo = new Text(); // 存储词频
        private FileSplit split; // 存储Split对象
        // 实现map函数

        public void map(Object key, Text value, Context context)

                throws IOException, InterruptedException {
            // 获得<key,value>对所属的FileSplit对象
            split = (FileSplit) context.getInputSplit();
            StringTokenizer itr = new StringTokenizer(value.toString());
            while (itr.hasMoreTokens()) {
                // key值由单词和URL组成，如"file1.txt:loveyou"
                // 获取文件的完整路径
                // keyInfo.set(itr.nextToken()+":"+split.getPath().toString());
                // 这里为了好看，只获取文件的名称。
                //int splitIndex = split.getPath().toString().indexOf("file");
                String tmp=split.getPath().toString();
                //-第二次出现的位置
                int splitIndex = StringUtils.ordinalIndexOf(tmp,"-",2);
                String token=itr.nextToken();
                //转换成只含有英文字母
                token=token.replaceAll("[^a-zA-Z]","");
                if (token!=null)
                {
                    //big to small
                    token=token.toLowerCase();
                    //judge whether
                    String [] stopwordarray={"a", "about", "above", "across", "after", "afterwards", "again",
                            "against", "all", "almost", "alone", "along", "already", "also", "although", "always",
                            "am", "among", "amongst", "amoungst", "amount", "an", "and", "another", "any", "anyhow",
                            "anyone", "anything", "anyway", "anywhere", "are", "around", "as", "at", "back", "be",
                            "became", "because", "become", "becomes", "becoming", "been", "before", "beforehand",
                            "behind", "being", "below", "beside", "besides", "between", "beyond", "bill", "both",
                            "bottom", "but", "by", "call", "can", "cannot", "cant", "co", "computer", "con", "could",
                            "couldnt", "cry", "de", "describe", "detail", "do", "done", "down", "due", "during",
                            "each", "eg", "eight", "either", "eleven", "else", "elsewhere", "empty", "enough", "etc",
                            "even", "ever", "every", "everyone", "everything", "everywhere", "except", "few",
                            "fifteen", "fify", "fill", "find", "fire", "first", "five", "for", "former", "formerly",
                            "forty", "found", "four", "from", "front", "full", "further", "get", "give", "go", "had",
                            "has", "hasnt", "have", "he", "hence", "her", "here", "hereafter", "hereby", "herein",
                            "hereupon", "hers", "herse", "him", "himse", "his", "how", "however", "hundred", "i", "ie",
                            "if", "in", "inc", "indeed", "interest", "into", "is", "it", "its", "itse", "keep",
                            "last", "latter", "latterly", "least", "less", "ltd", "made", "many", "may", "me",
                            "meanwhile", "might", "mill", "mine", "more", "moreover", "most", "mostly", "move",
                            "much", "must", "my", "myse", "name", "namely", "neither", "never", "nevertheless",
                            "next", "nine", "no", "nobody", "none", "noone", "nor", "not", "nothing", "now", "nowhere",
                            "of", "off", "often", "on", "once", "one", "only", "onto", "or", "other", "others",
                            "otherwise", "our", "ours", "ourselves", "out", "over", "own", "part", "per", "perhaps",
                            "please", "put", "rather", "re", "same", "see", "seem", "seemed", "seeming", "seems",
                            "serious", "several", "she", "should", "show", "side", "since", "sincere", "six", "sixty",
                            "so", "some", "somehow", "someone", "something", "sometime", "sometimes", "somewhere",
                            "still", "such", "system", "take", "ten", "than", "that", "the", "their", "them",
                            "themselves", "then", "thence", "there", "thereafter", "thereby", "therefore", "therein",
                            "thereupon", "these", "they", "thick", "thin", "third", "this", "those", "though",
                            "three", "through", "throughout", "thru", "thus", "to", "together", "too", "top", "toward",
                            "towards", "twelve", "twenty", "two", "un", "under", "until", "up", "upon", "us", "very",
                            "via", "was", "we", "well", "were", "what", "whatever", "when", "whence", "whenever",
                            "where", "whereafter", "whereas", "whereby", "wherein", "whereupon", "wherever", "whether",
                            "which", "while", "whither", "who", "whoever", "whole", "whom", "whose", "why", "will",
                            "with", "within", "without", "would", "yet", "you", "your", "yours", "yourself", "yourselves"};
                    List<String> stopwordList = Arrays.asList(stopwordarray);
                    if(stopwordList.contains(token)==false)
                    {

                        if(token.length()>3)
                        {
                            //keyInfo.set(token + ":" + split.getPath().toString().substring(splitIndex));
                            keyInfo.set(token);
                            // 词频初始化为1
                            valueInfo.set("1");
                            context.write(keyInfo, valueInfo);
                        }
                    }
                }
                /*keyInfo.set(itr.nextToken() + ":"
                        + split.getPath().toString().substring(splitIndex));
                // 词频初始化为1
                valueInfo.set("1");
                context.write(keyInfo, valueInfo);*/
            }
        }
    }

    public static class Combine extends Reducer<Text, Text, Text, Text> {

        private Text info = new Text();
        // 实现reduce函数
        // key值由单词和URL组成，如"file1.txt:loveyou"
        public void reduce(Text key, Iterable<Text> values, Context context)
                throws IOException, InterruptedException {
            //对于每一个key
            // 统计词频
            int sum = 0;
            for (Text value : values) {
                sum += Integer.parseInt(value.toString());
            }
            info.set(key.toString()+":"+Integer.toString(sum));
            key.set(Integer.toString(1));
            context.write(key, info);
        }

    }




    public static class Reduce extends Reducer<Text, Text, Text, Text> {
        private Text result = new Text();
        //Map<String, Integer> map = new HashMap<>();
        // 实现reduce函数
        public void reduce(Text key, Iterable<Text> values, Context context)
                throws IOException, InterruptedException {
            /*
            // 生成文档列表
            String fileList = new String();
            for (Text value : values) {
                fileList += value.toString() + ";";
            }
            result.set(fileList);
            context.write(key, result);
            */
            //排序，这里key是url，value是word+数量，例如 loveyou:4
            //创建map进行排序

            Map<String,Integer> map = new HashMap<>();
            for (Text value : values) {
                //定义新的key与value，放入map中，之后排序
                String valuenew=value.toString();
                String wordname=valuenew.substring(0, valuenew.indexOf(":"));//截取:之前的字符串,这是新的key
                String numberstr=valuenew.substring(valuenew.lastIndexOf(":")+1);//截取:后的字符串,这是新的value
                Integer number=Integer.parseInt(numberstr);//换成整形
                map.put(wordname, number);
            }
            //进行倒序排序，创建了一个新的infolds
            List<Map.Entry<String, Integer>> infoIds = new ArrayList<Map.Entry<String, Integer>>(map.entrySet());
            Collections.sort(infoIds, new Comparator<Map.Entry<String, Integer>>() {
                public int compare(Map.Entry<String, Integer> o1, Map.Entry<String, Integer> o2) {
                    return (o2.getValue() - o1.getValue());
                    //return (o1.getKey()).toString().compareTo(o2.getKey());
                }
            });
            //遍历新的infolds，将结果储存在stringlist
            String fileList = new String();
            fileList+="\n";
            Integer minnum=Math.min(100, infoIds.size());
            for (int i = 0; i < minnum; i++) {
                fileList += Integer.toString(i+1)+"\t"+infoIds.get(i).toString() + "\n";
            }
            fileList += "----------------------------------------------------------------";
            result.set(fileList);
            String tt="wordcount-all\n";
            key.set(tt);
            context.write(key, result);
        }
    }
    public static void main(String[] args) throws Exception {
        Configuration conf = new Configuration();
        // 这句话很关键
        conf.set("mapred.job.tracker", "192.168.1.2:9001");
        String[] ioArgs = new String[] { "/user/root/input", "/user/root/output" };
        String[] otherArgs = new GenericOptionsParser(conf, ioArgs)
                .getRemainingArgs();
        if (otherArgs.length != 2) {
            System.err.println("Usage: Inverted Index <in> <out>");
            System.exit(2);
        }

        Job job = new Job(conf, "Inverted Index");
        job.setJarByClass(wordcount.class);

        // 设置Map、Combine和Reduce处理类
        job.setMapperClass(Mapnew.class);
        job.setCombinerClass(Combine.class);
        job.setReducerClass(Reduce.class);

        // 设置Map输出类型
        job.setMapOutputKeyClass(Text.class);
        job.setMapOutputValueClass(Text.class);

        // 设置Reduce输出类型
        job.setOutputKeyClass(Text.class);
        job.setOutputValueClass(Text.class);
        // 设置输入和输出目录
        FileInputFormat.addInputPath(job, new Path(otherArgs[0]));
        FileOutputFormat.setOutputPath(job, new Path(otherArgs[1]));
        System.exit(job.waitForCompletion(true) ? 0 : 1);
    }
}