from pyspark import SparkContext

sc = SparkContext(master="local[4]", appName='JobAnalyzer')
rdd = sc.textFile('file:///E:\python_pro\P1905\lizhi\practice\jobs.txt')

def str_to_json(line):
    import json
    job_info = json.loads(line)
    return job_info

def map_address_and_salary(job_info):
    import re

    try:
        address = job_info['address']

        m = re.match('(?P<min_salary>[\d\.]+)-(?P<max_salary>[0-9\.])(?P<unit>[万|千])/(?P<time>[年|月])',
                     job_info['salary'])

        if not m:
            return

        min_salary = float(m.group('min_salary'))
        max_salary = float(m.group('max_salary'))

        if m.group('unit') == '万':
            min_salary *= 10
            max_salary *= 10
        if m.group('time') == '年':
            min_salary /= 12
            max_salary /= 12

        avg_salary = (min_salary + max_salary) / 2
        return address, avg_salary
    except:
        return ()


def get_avg_and_len(obj):
    key, values = obj
    count = len(values)
    avg_salary = sum(values) / count
    return (key, (count, avg_salary))


job_rdd = rdd.map(str_to_json).map(map_address_and_salary).filter(lambda x: True if x else False)

rdd3 = job_rdd.groupByKey().map(get_avg_and_len)

for i in rdd3.sortBy(lambda x: x[1][0], ascending=False).take(5):
    print(i)

# p = rdd3.repartition(1).sortBy(lambda x: x[1][0], ascending=False).take(3)
# print(p)