from pyspark.sql import SparkSession
# from pyspark import SparkConf, SparkContext

import json
import os
# import subprocess

spark = SparkSession.builder.appName("demo").master(
  "local").enableHiveSupport().getOrCreate()
# sc = SparkContext()

# conf = SparkConf().setAppName("spark_pyspark")
# sc = SparkContext(conf=conf)
spark.sql("select count(1) from turing.algo_dc_ml_fill_null_102_1663066966911_0").show()

param_path = os.environ.get("TASK_INFO_PATH")
model_category = ""
model_info = {}
with open(param_path, "r") as f:
  param_json = f.read()
  param = json.loads(param_json)
outputFilePath = param.get("outputFilePath")
print(outputFilePath)

json_path = 'file:///home/admin/interpret/demo.json'

# textFile = sc.textFile(json_path)
# textFile.saveAsTextFile(outputFilePath)
# subprocess.call('hadoop fs -put /hadoop/TestSpark/output',shell=True)
# spark_submit_str = "hadoop fs -put "+json_path+" "+outputFilePath
# process = subprocess.Popen(spark_submit_str, stdout=subprocess.PIPE ,stderr=subprocess.PIPE, universal_newlines=True, shell=True)
# process.communicate()

# df = spark.read.format('json').load(json_path)
# //df.write.format('json').mode('overwrite').save(outputFilePath)
# df.coalesce(1).write.format('json').save(outputFilePath)
