package itemCF.step1;

import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;

/**
 * @author legolas
 * @date 2020/3/20 上午12:20
 *  * 根据用户行为列表构建评分矩阵
 *  * 输入：用户ID，物品ID，分值
 *  * 输出：物品ID(行)    用户ID(列)_分值
 */
public class Step1Job {
    private static String inPath = "/itemCF/step1_input/action.txt";
    private static String outPath = "/itemCF/step1_output";
    private static String hdfs = "hdfs://hadoopmaster:9000";

    public static void main(String[] args) {
        try {

            //创建Job需要的配置参数
            Configuration conf = new Configuration();
            conf.set("fs.defaultFS", hdfs);
            //创建一个job
            Job job = Job.getInstance(conf);

            //注意：这一行必须设置，否则在集群中执行找不到WordCountJob这个类
            job.setJarByClass(Step1Job.class);

            //指定输入路径
            FileInputFormat.setInputPaths(job, new Path(inPath));
            FileOutputFormat.setOutputPath(job, new Path(outPath));
            //指定map相关代码
            job.setMapperClass(Mapper1.class);
            job.setMapOutputKeyClass(Text.class);
            job.setMapOutputValueClass(Text.class);

            //指定reduce相关代码
            job.setReducerClass(Reducer1.class);
            job.setOutputKeyClass(Text.class);
            job.setOutputValueClass(Text.class);

            //提交job
            job.waitForCompletion(true);

        } catch (Exception e) {
            e.printStackTrace();
        }
    }
}
