# -*- coding: UTF-8 -*-
from pyspark import SparkContext


# ********** Begin **********#
path = '/root/friend.txt'
def dealWith(x):
    x = x.split()
    y = []
    for i in range(len(x) - 1):
        for j in range(i + 1, len(x)):
            before = str(x[i] if hash(str(x[i])) > hash(str(x[j])) else x[j])
            after = str(x[j] if hash(str(x[i])) > hash(str(x[j])) else x[i])
            newLine = before + '_' + after
            if i == 0:
                y.append((newLine, 10000))
            else:
                y.append((newLine, 1))
    x = y
    return x

# ********** End **********#

if __name__ == "__main__":
    """
        需求：对本地文件系统URI为：/root/friend.txt 的数据统计间接好友的数量
    """
    # ********** Begin **********#
    sc = SparkContext('local', 'myapp')
    oriFile = sc.textFile(path)
    rdd = oriFile.map(lambda x: dealWith(x))
    # rdd = oriFile.map(lambda x: x.split())
    # print(type(rdd))
    # print(rdd.collect())
    rdd_add = rdd.flatMap(lambda x: x)
    # print(rdd_add.collect())
    # rdd_filter = rdd_add.filter(lambda x: filterx(x))
    rdd_add = rdd_add.reduceByKey(lambda x, y: x + y)
    # print(rdd_add.collect())
    rdd_filter = rdd_add.filter(lambda x: x[1] < 10)
    print(rdd_filter.collect())

    # print("[('tom_world', 2), ('mr_hadoop', 1), ('mr_tom', 1), ('mr_world', 2), ('cat_world', 1), ('hive_tom', 3), ('mr_cat', 1), ('cat_hadoop', 2)]")
    # ********** End **********#
