from pyspark import SparkContext, SparkConf
import re


def combine(s, n):
    def _iterator(collector, s, i, c, n, data=None):
        if c >= n:
            collector.append(tuple(sorted(data)))
            return

        for x in range(i, len(s)):
            data.append(s[x])
            _iterator(collector, s, x + 1, c + 1, n, data)
            data.pop()

    data_set = []
    chars = []
    _iterator(data_set, s, 0, 0, n, chars)
    return data_set


sparkConf = SparkConf()
sparkConf.set("spark.master", "spark://10.0.0.252:7077")
sparkConf.set("spark.app.name", "MBA")
sparkConf.set("spark.executor.memory", "512m")
sparkConf.set("spark.executor.cores", "2")

sparkContext = SparkContext.getOrCreate(sparkConf)
sparkContext.setLogLevel("ERROR")
rdd = sparkContext.textFile("file:///root/transaction.txt")


def parse_T(x):
    _, info = re.split(":\\s*", x)
    data = re.split("\\s*,\\s*", info)

    data_set = []

    for d in combine(data, 2) + combine(data, 3):
        data_set.append((d, 1))

    return data_set


rdd1 = rdd.flatMap(f=parse_T).groupByKey().mapValues(lambda v: sum(v))

# for d in rdd1.collect():
#     print(d)
# print(rdd1.collect())


def gen_sub_seq(x):
    key, value = x
    sub_data_set = [(key, (None, value))]
    keys = combine(key, len(key) - 1)
    for k in keys:
        sub_data_set.append((k, (key, value)))
    return sub_data_set


rdd2 = rdd1.flatMap(f=gen_sub_seq).groupByKey().mapValues(lambda v: [i for i in v])
# for x in rdd2.collect():
#     print(x)


def not_list(src, dst):
    for x in dst:
        src.remove(x)
    return src


def do_result(x):
    key, value = x
    data_set = []
    total = 0
    for c in value:
        if c[0] == None:
            total = c[1]
        else:
            data_set.append(c)

    if not data_set:
        return data_set

    results = []
    for k, v in data_set:
        if total == 0:
            continue

        pp = float(v) / float(total)
        results.append((pp, (key, not_list(list(k), key))))

    return results


rdd3 = rdd2.flatMap(f=do_result).sortByKey(ascending=False)


for x in rdd3.top(100):
    print(x)

sparkContext.stop()
