# 将这个demo提交到Yarn中运行
#coding:utf8

from pyspark import SparkContext, SparkConf
import json
import os
from pyspark.sql.functions import json_tuple, json_array_length
from defs_19 import city_with_catagory

# 1.加入环境变量，让pycharm直接提交到yarn的时候知道yarn配置在哪里读取
os.environ['HADOOP_CONF_DIR'] = '/opt/module/hadoop-3.2.4/etc/hadoop'
if __name__ == '__main__':

    # 2.修改Master
    conf = SparkConf().setAppName("demo_02_yarn").setMaster("yarn")
    # 如果提交到了集群运行依赖了别的文件，需要设置一个参数，参数可以是.py文件，也可以是.zip(多个.py压缩后上传)
    conf.set("spark.submit.pyFiles",'defs_19.py')
    sc = SparkContext(conf=conf)


    # 文件需要修改成HDFS读取
    textFile = sc.textFile("hdfs://node1:8020/input/order.txt")

    json_rdd = textFile.flatMap(lambda line : line.split("|"))

    # 通过json将数据转换成为字典
    dict_rdd = json_rdd.map(lambda x:json.loads(x))

    # 筛选出北京的数据
    beijing_rdd = dict_rdd.filter(lambda a: a['areaName'] == '北京')

    print(beijing_rdd.map(city_with_catagory).distinct().collect())