package org.wyz.mapreduce.pagerank;

import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Mapper;

import java.io.IOException;

public class PageRankMapper extends Mapper<LongWritable, Text, Text, PageWritable> {
    // 网页数量
    private static final int N_PAGE = 4;

    // 超参数ratio
    private static final double RATIO = 0.85;

    /**
     * PAGERANK(i) = (1-RATIO)/N_PAGE + RATIO * SUM(PAGERANK(j)/OUT(j))
     */

    @Override
    protected void map(LongWritable key, Text value, Context context) throws IOException, InterruptedException {
        // 输入文件是有讲究的，既要表示每个节点的pagerank值，又要显示各每个节点的出度情况
        // "page-i pageank(i) outpage1 outpage2 ..."
        // 因为既要保存pagerank，又要保存原始的结构，所以干脆就用Text来存
        String[] row = value.toString().split("\\s");

        String currentPage = row[0];
        double currentRank = Double.parseDouble(row[1]);
        int outDegree = row.length - 2;

        // 加入出度的信息
        for (int i = 2; i < row.length; i++) {
            PageWritable v = new PageWritable(String.valueOf(currentRank / outDegree), 0);
            System.out.println(":::"+v);
            context.write(new Text(row[i]), v);
        }

        // 在原封不动的加入一个，为了表示原来的结构
        PageWritable v = new PageWritable(concatRest(row), 1);
        System.out.println(":::"+v);
        context.write(new Text(currentPage), v);
    }


    private String concatRest(String[] row) {
        StringBuilder sb = new StringBuilder("");
        for (int i = 1; i < row.length; i++) {
            sb.append(row[i]);
            if (i < row.length - 1) {
                sb.append(" ");
            }
        }
        return sb.toString();
    }
}
