from pyspark import SparkConf,SparkContext

conf = SparkConf().setMaster("local").setAppName("stu_1")
sc = SparkContext(conf = conf)

def average(x):
    l = len(x)
    s = sum(x)
    return s/l

def seg(x):
    if 90<=x and x<=100:
        return "90-100"
    if 80<=x and x<=89:
        return "80-89"
    if 70<=x and x<=79:
        return "70-79"
    if 60<=x and x<=69:
        return "60-69"
    else:
        return "<60"

textData = sc.textFile("/student_score.txt")
filterData = textData.filter(lambda line: "必修" in line)
mapData = filterData.map(lambda line:( line.split(",")[0] + " " + line.split(",")[1] , int(line.split(",")[4])))
groupData = mapData.groupByKey()
remapData = groupData.mapValues(list)
calData = remapData.mapValues(average)
calData.saveAsTextFile("/stu_1")
segData = calData.map(lambda line:(seg(line[1]), 1))
segData = segData.reduceByKey(lambda x,y:x+y)
segData.saveAsTextFile("/stu_2")
