# /opt/spark/bin/spark-submit test.py

import pb, mm, os, pyspark, torch
from pyspark import SparkContext, SparkConf

conf = SparkConf().setAppName("test").setMaster("local[4]")
sc = SparkContext(conf=conf)
sc.setLogLevel("ERROR")

def mm_(ms):
    m1 = torch.Tensor(ms[0])
    m2 = torch.Tensor(ms[1])
    m3 = m1 @ m2
    return m3.tolist()

def task(size, blocks, cores):
    m1 = mm.rand(size)
    m2 = mm.rand(size)
    mats = mm.split(m1,m2,blocks)
    mats = [[m[0].tolist(), m[1].tolist()] for m in mats]
    rdd = sc.parallelize(mats, 4) # 这里的 4 指的是分区数量
    rdd_map = rdd.map(mm_)
    result = rdd_map.collect()
    result = [torch.Tensor(m) for m in result]
    mm.merge(result,blocks)
    return size*size*size

if __name__ == '__main__':
    #  请尝试增大java虚拟机的内存空间，使得size能增大
	pb.benchmark(task, size=1024, blocks=[1,2,4], cores=4)
	pb.benchmark(task, size=[256,512,1024,], 
						blocks=2, cores=4)
	pb.benchmark(task, size=1024, blocks=4, 
						cores=[1,2,3,4,5,6,7,8])