package com.fudan;

import java.io.BufferedReader;
import java.io.File;
import java.io.FileInputStream;
import java.io.IOException;
import java.io.InputStreamReader;
import java.net.URL;
import java.util.Map;
import java.util.UUID;

import org.apache.spark.SparkConf;
import org.apache.spark.api.java.JavaRDD;
import org.apache.spark.api.java.function.MapFunction;
import org.apache.spark.sql.Dataset;
import org.apache.spark.sql.Encoders;
import org.apache.spark.sql.Row;
import org.apache.spark.sql.RowFactory;
import org.apache.spark.sql.SparkSession;
import org.apache.spark.sql.catalyst.encoders.RowEncoder;
import org.apache.spark.sql.types.DataType;
import org.apache.spark.sql.types.MapType;
import org.apache.spark.sql.types.StringType;
import org.apache.spark.sql.types.StructField;

import com.fudan.cfg.RunConfiguration;
import com.fudan.run.JobRunner;

import scala.Function1;
import scala.collection.immutable.HashMap;
import scala.collection.immutable.List;
import scala.collection.mutable.StringBuilder;

public class RunApplication {
	public static String localJson(String path) throws IOException {
		File file = new File(path);
		StringBuilder json = new StringBuilder();
		BufferedReader reader = new BufferedReader(new InputStreamReader(new FileInputStream(file)));
		String line = reader.readLine();
		while(line != null) {
			json.append(line);
			line = reader.readLine();
		}
		reader.close();
		return json.toString();
	}
	
	public static RunConfiguration buildRunConfiguration(String json) {
		URL hadoop = RunApplication.class.getClassLoader().getResource("hadoop-common");
		System.setProperty("hadoop.home.dir", hadoop.getPath());
		return RunConfiguration.buildConfigurationFromJson(json);
	}
	
	public static void start(RunConfiguration runConfiguration) {
		if(runConfiguration.getRunId() == null) {
			runConfiguration.setRunId(UUID.randomUUID().toString().replaceAll("-", ""));
		}
		JobRunner jobRunner = new JobRunner();
		jobRunner
			.buildRunEnv(runConfiguration)
			.buildDataset(runConfiguration)
			.doTrans(runConfiguration)
			.doAction(runConfiguration);
	}

	public static void main(String[] args) throws Exception {
		URL hadoop = RunApplication.class.getClassLoader().getResource("hadoop-common");
		if(args.length == 0) {
			System.setProperty("hadoop.home.dir", hadoop.getPath());
		}
		String path = "src/main/resources/conf.json";
		if(args.length>0) {
			path = args[0];
		}
		RunConfiguration runConfig = RunConfiguration.buildConfigurationFromJson("{\n" + 
				"	\"properties\": {\n" + 
				"		\"spark\": {\n" + 
				"			\"mode\": \"local\",\n" + 
				"			\"spark.es.nodes\": \"\"\n" + 
				"		}\n" + 
				"	},\n" + 
				"	\"dataset\": [\n" + 
				"		{\n" + 
				"			\"name\": \"a\",\n" + 
				"			\"type\": \"csv\",\n" + 
				"			\"mode\": \"SERVER\",\n" + 
				"			\"path\": \"C:\\\\Users\\\\49271\\\\Desktop\\\\简历\\\\csv_test.csv\"\n" + 
				"		}\n" + 
				"	],\n" + 
				"	\"trans\": [],\n" + 
				"	\"action\": [\n" + 
				"		{\n" + 
				"			\"target\": \"a\",\n" + 
				"			\"type\": \"foreach\"\n" + 
				"		}\n" + 
				"	]\n" + 
				"}");
		JobRunner jobRunner = new JobRunner();
		jobRunner
			.buildRunEnv(runConfig)
			.buildDataset(runConfig)
			.doTrans(runConfig)
			.doAction(runConfig);
		System.out.println("ok");
	}
}
