package com.rainsoft.center.isec.stream.library.service;

import com.rainsoft.center.isec.stream.library.dao.LibDao;
import org.apache.spark.api.java.function.Function;
import org.apache.spark.sql.Dataset;
import org.apache.spark.sql.Row;
import org.apache.spark.sql.SparkSession;
import scala.Tuple2;

import java.util.ArrayList;
import java.util.List;

/**
 * @Name com.rainsoft.center.isec.stream.library.service.Stat
 * @Description
 * @Author Elwyn
 * @Version 2017/12/1
 * @Copyright 上海云辰信息科技有限公司
 **/
public class Stat implements java.io.Serializable{

	private LibDao libDao=new LibDao();

	public void saveRealCountByMachine(SparkSession sparkSession, String joinTable) {
		//存STAT_REAL_MACHINE_DAY
		//language=sql
		Dataset<Row> countByMachineId = sparkSession.sql("SELECT count(1) count," +
				"serviceId service_id,machineId machine_id,to_date(loginTime) " +
				"on_date,areaCode area_code FROM " + joinTable +
				" GROUP BY serviceId,machineId,to_date(loginTime),areaCode");
		countByMachineId.printSchema();
		countByMachineId.show();
		//language=sql
		String groupByMachineIdSql = "MERGE INTO STAT_REAL_MACHINE_DAY a USING " +
				"(SELECT ? AS count ,? AS service_id,? AS machine_id,? AS on_date,? AS area_code FROM dual) b" +
				" ON (a.ON_DATE=b.on_date AND a.MACHINE_ID=b.machine_id " +
				" AND a.SERVICE_ID=b.service_id)" +
				" WHEN MATCHED " +
				" THEN UPDATE SET a.count=a.count+b.count" +
				" WHEN NOT MATCHED" +
				" THEN INSERT (ID,ON_DATE,count,AREA_CODE,SERVICE_ID,MACHINE_ID) VALUES " +
				"(SEQ_STAT_REAL_MACHINE_DAY.nextval,b.on_date,b.count,b.area_code,b.service_id,b.machine_id)";
		List<Tuple2<String, Dataset<Row>>> tuple2List = new ArrayList<>();
		tuple2List.add(new Tuple2<>(groupByMachineIdSql, countByMachineId));
		//bean.saveToOracle(countByMachineId, groupByMachineIdSql, jdbcTemplate);

		//存STAT_REAL_MACHINE_DAY_DETAIL
		//language=sql
		Dataset<Row> countByMachineIdDetail = sparkSession.sql("SELECT count(1) count," +
				"serviceId service_id,machineId machine_id,to_date(loginTime) " +
				"on_date,areaCode area_code,idtype FROM " + joinTable +
				" GROUP BY serviceId,machineId,to_date(loginTime),areaCode,idtype");
		//language=sql
		String groupByMachineIdDetailSql = "MERGE INTO STAT_REAL_MACHINE_DAY_DETAIL a USING " +
				"(SELECT ? AS count ,? AS service_id,? AS machine_id,? AS on_date,? AS area_code,? AS type FROM dual) b" +
				" ON (a.ON_DATE=b.on_date AND a.MACHINE_ID=b.machine_id " +
				" AND a.SERVICE_ID=b.service_id AND a.TYPE=b.type)" +
				"WHEN MATCHED " +
				" THEN UPDATE SET a.count=a.count+b.count" +
				" WHEN NOT MATCHED" +
				" THEN INSERT (ID,ON_DATE,count,AREA_CODE,SERVICE_ID,MACHINE_ID,TYPE) VALUES " +
				"(SEQ_STAT_REAL_MACHINE_DAY.nextval,b.on_date,b.count,b.area_code,b.service_id,b.machine_id,b.type)";
		tuple2List.add(new Tuple2<>(groupByMachineIdDetailSql, countByMachineIdDetail));

		saveToOracle(tuple2List);
	}

	public void saveRealCountByService(SparkSession sparkSession, String joinTable) {
		//language=sql
		Dataset<Row> countByService = sparkSession.sql("SELECT count(1) count," +
				"serviceId service_id,to_date(loginTime) " +
				"on_date,areaCode area_code FROM " + joinTable +
				" GROUP BY serviceId,to_date(loginTime),areaCode");
		//language=sql
		String groupByServiceSql = "MERGE INTO STAT_REAL_SERVICE_DAY a USING " +
				"(SELECT ? AS count ,? AS service_id,? AS on_date,? AS area_code FROM dual) b" +
				" ON (a.ON_DATE=b.on_date " +
				" AND a.SERVICE_ID=b.service_id)" +
				" WHEN MATCHED " +
				" THEN UPDATE SET a.count=a.count+b.count" +
				" WHEN NOT MATCHED" +
				" THEN INSERT (ID,ON_DATE,count,AREA_CODE,SERVICE_ID) VALUES " +
				"(SEQ_STAT_REAL_MACHINE_DAY.nextval,b.on_date,b.count,b.area_code,b.service_id)";
		List<Tuple2<String, Dataset<Row>>> tuple2List = new ArrayList<>();
		tuple2List.add(new Tuple2<>(groupByServiceSql, countByService));

		//language=sql
		Dataset<Row> countByServiceDetail = sparkSession.sql("SELECT count(1) count," +
				"serviceId service_id,to_date(loginTime) on_date,areaCode area_code,idtype FROM " + joinTable +
				" GROUP BY serviceId,to_date(loginTime),areaCode,idtype");
		//language=sql
		String groupByServiceDetailSql = "MERGE INTO STAT_REAL_SERVICE_DAY_DETAIL a USING " +
				"(SELECT ? AS count ,? AS service_id,? AS on_date,? AS area_code,? AS type FROM dual) b" +
				" ON (a.ON_DATE=b.on_date  " +
				" AND a.SERVICE_ID=b.service_id AND a.TYPE=b.type)" +
				"WHEN MATCHED " +
				" THEN UPDATE SET a.count=a.count+b.count" +
				" WHEN NOT MATCHED" +
				" THEN INSERT (ID,ON_DATE,count,AREA_CODE,SERVICE_ID,TYPE) VALUES " +
				"(SEQ_STAT_REAL_MACHINE_DAY.nextval,b.on_date,b.count,b.area_code,b.service_id,b.type)";
		tuple2List.add(new Tuple2<>(groupByServiceDetailSql, countByServiceDetail));

		saveToOracle(tuple2List);
	}

	public void saveRealCountByArea(SparkSession sparkSession, String joinTable) {
		//language=sql
		Dataset<Row> countByArea = sparkSession.sql("SELECT count(1) count," +
				"to_date(loginTime) on_date,areaCode area_code FROM " + joinTable +
				" GROUP BY to_date(loginTime),areaCode");
		//language=sql
		String groupByAreaSql = "MERGE INTO STAT_REAL_AREA_DAY a USING " +
				"(SELECT ? AS count ,? AS on_date,? AS area_code FROM dual) b" +
				" ON (a.ON_DATE=b.on_date " +
				" AND a.AREA_CODE=b.area_code)" +
				" WHEN MATCHED " +
				" THEN UPDATE SET a.count=a.count+b.count" +
				" WHEN NOT MATCHED" +
				" THEN INSERT (ID,ON_DATE,count,AREA_CODE) VALUES " +
				"(SEQ_STAT_REAL_MACHINE_DAY.nextval,b.on_date,b.count,b.area_code)";
		List<Tuple2<String, Dataset<Row>>> tuple2List = new ArrayList<>();
		tuple2List.add(new Tuple2<>(groupByAreaSql, countByArea));

		//language=sql
		Dataset<Row> countByAreaDetail = sparkSession.sql("SELECT count(1) count," +
				"to_date(loginTime) on_date,areaCode area_code,idtype FROM " + joinTable +
				" GROUP BY to_date(loginTime),areaCode,idtype");
		//language=sql
		String groupByAreaDetailSql = "MERGE INTO STAT_REAL_AREA_DAY_DETAIL a USING " +
				"(SELECT ? AS count ,? AS on_date,? AS area_code,? AS type FROM dual) b" +
				" ON (a.ON_DATE=b.on_date  " +
				"  AND a.TYPE=b.type)" +
				"WHEN MATCHED " +
				" THEN UPDATE SET a.count=a.count+b.count" +
				" WHEN NOT MATCHED" +
				" THEN INSERT (ID,ON_DATE,count,AREA_CODE,TYPE) VALUES " +
				"(SEQ_STAT_REAL_MACHINE_DAY.nextval,b.on_date,b.count,b.area_code,b.type)";
		tuple2List.add(new Tuple2<>(groupByAreaDetailSql, countByAreaDetail));

		saveToOracle(tuple2List);
	}


	public void saveToOracle(List<Tuple2<String, Dataset<Row>>> tuple2List) {
		for (Tuple2<String, Dataset<Row>> aTuple2List : tuple2List) {
			List<Object[]> jdbcArgs = aTuple2List._2.javaRDD().map((Function<Row, Object[]>) v1 -> {
				int size = v1.size();
				Object[] objects = new Object[size];
				for (int i = 0; i < size; i++) {
					Object o = v1.get(i);
					objects[i] = o;
				}
				return objects;
			}).collect();

			libDao.batchUpdate(aTuple2List._1, jdbcArgs);
		}
	}
}
