# encoding=utf-8

from pyspark import SparkContext
import os

from pyspark import storagelevel

os.environ['PYSPARK_PYTHON'] = "python3"
os.environ['PYSPARK_DRIVER_PYTHON'] = "python3"

sc = SparkContext(appName="JobAnalyzer")
rdd = sc.textFile("file:///home/bigdata/Workspace/P1905/spark_example/jobs.txt")

sc.setLogLevel("ERROR")

def str_to_json(line):
    import json
    job_info = json.loads(line)
    return job_info


def map_address_and_salary(job_info):
    import re

    try:
        address = job_info['address']
        m = re.match('(?P<min_salary>[\d\.]+)-(?P<max_salary>[0-9\.]+)(?P<unit>(万|千))/(?P<time>(月|年))',
                     job_info['salary'])
        if not m:
            return

        min_salary = float(m.group("min_salary"))
        max_salary = float(m.group("max_salary"))

        if m.group("unit") == "万":
            min_salary *= 10
            max_salary *= 10

        if m.group("time") == "年":
            min_salary /= 12
            max_salary /= 12

        avg_salary = (min_salary + max_salary) / 2
        return address, avg_salary
    except:
        return ()


job_rdd = rdd.map(str_to_json).map(map_address_and_salary).filter(lambda x: True if x else False)


def get_avg_and_len(obj):
    key, values = obj
    count = len(values)
    avg_salary = sum(values) / count
    return (key, (count, avg_salary))


rdd3 = job_rdd.groupByKey().map(get_avg_and_len)

# 缓存RDD
rdd3.persist()

# 取消缓存
rdd3.unpersist()
# rdd3.cache()

# rdd3.sortByKey().foreach(lambda x:print(x))

p = rdd3.repartition(1).sortBy(lambda x: x[1][0], ascending=False).take(3)
print(p)
