package org.example.ItemCF.src.recommend;


import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Mapper;

import java.io.IOException;

/**
 * Map函数用于拆分用户向量。
 * 输入键值对：行号，用户ID和其偏好向量（格式为"用户ID:物品ID1:偏好值1,物品ID2:偏好值2,..."）。
 * 输出键值对：物品ID，用户ID:偏好值。
 * /*map input
 * 1	101:5.0,102:3.0,103:2.5
 * 2	101:2.0,102:2.5,103:5.0,104:2.0
 * 3	107:5.0,105:4.5,104:4.0,101:2.0
 * 4	106:4.0,103:3.0,101:5.0,104:4.5
 * 5	104:4.0,105:3.5,106:4.0,101:4.0,102:3.0,103:2.0
 *
 */
public class Step3UserVectorSplitterMapper extends Mapper<LongWritable, Text, IntWritable, Text> {
    private final static IntWritable k = new IntWritable();
    private final static Text v = new Text();

    @Override
    public void map(LongWritable key, Text values, Context context) throws IOException, InterruptedException {
        // 按照偏好向量的分隔符拆分输入值
        String[] tokens = Recommend.DELIMITER.split(values.toString());
        for (int i = 1; i < tokens.length; i++) {
            // 按照":"拆分物品ID和偏好值
            String[] vector = tokens[i].split(":");

            String userID = tokens[0]; // 用户ID
            int itemID = Integer.parseInt(vector[0]); // 物品ID
            String pref = vector[1]; // 偏好值

            k.set(itemID); // 设置物品ID为键
            v.set(userID + ":" + pref); // 设置用户ID和偏好值为值
            context.write(k, v); // 输出键值对
        }
    }
}
   
