package edu.zju.gis.dbfg.model.loader;

import edu.zju.gis.dbfg.common.exception.ModelFailedException;
import edu.zju.gis.dbfg.common.model.LCRA;
import edu.zju.gis.dbfg.common.util.FileUtil;
import lombok.Getter;
import lombok.Setter;
import org.apache.spark.SparkConf;
import org.apache.spark.api.java.JavaRDD;
import org.apache.spark.api.java.JavaSparkContext;
import org.apache.spark.api.java.function.Function;
import org.locationtech.jts.geom.*;
import org.locationtech.jts.io.WKTReader;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;

import java.io.File;
import java.lang.reflect.Field;
import java.lang.reflect.Method;
import java.nio.file.Paths;
import java.util.*;
import java.util.stream.Collectors;

/**
 * @author Hu
 * @date 2019/9/28
 * LCRA 数据入库 ES 工具类
 * TODO 入库完成后，在数据库里写入对应的元数据信息
 * TODO 目前仅支持本地文件或文件夹且数据源为 shapefile 格式
 **/
@Getter
@Setter
public class LCRADataLoader extends ESDataLoader2<LCRA> {

  private final static Logger logger = LoggerFactory.getLogger(LCRADataLoader.class);
  private LCRALoaderArgs mArgs;

  @Override
  protected void init(String[] args) {
    logger.info("Init Arguments");
    this.mArgs = LCRALoaderArgs.initArgs(args);
    if (mArgs == null) {
      String msg = "init args failed, exit";
      logger.error(msg);
      throw new ModelFailedException(LCRALoaderArgs.class, "main()", msg, args);
    }
  }

  @Override
  protected JavaRDD<LCRA> readToRDD(JavaSparkContext jsc) {
    String in = mArgs.getInput();
    return jsc.textFile(in).map(new Function<String, LCRA>() {
      @Override
      public LCRA call(String s) throws Exception {
        LCRA lcra = new LCRA();
        String[] ds = s.split("\t");
        Field[] fields = LCRA.class.getDeclaredFields();
        // F_LABEL 字段的位置是：62
        int ccpIndex = 61;
        for (int i=0; i<fields.length; i++) {
          Field f = fields[i];
          String name = f.getName();
          if (name.equals("id")) {
            continue;
          }
          char c = name.charAt(0);
          String setMethodName = "set" + String.valueOf(c).toUpperCase() + name.substring(1);
          Method m = LCRA.class.getDeclaredMethod(setMethodName, f.getType());
          String v;
          if (i>ccpIndex && ds.length >= fields.length) {
            v = ds[i];
          } else {
            v = ds[i-1];
          }
          v = v.trim();
          if (f.getType().getTypeName().equals(String.class.getTypeName())) {
            m.invoke(lcra, v);
          } else if (f.getType().getTypeName().equals(Long.class.getTypeName())){
            if (v.length() > 0) {
              m.invoke(lcra, Long.valueOf(v));
            }
          } else if (f.getType().getTypeName().equals(Integer.class.getTypeName())) {
            if (v.length() > 0) {
              m.invoke(lcra, Integer.valueOf(v));
            }
          } else if (f.getType().getTypeName().equals(Double.class.getTypeName())) {
            if (v.length() > 0) {
              m.invoke(lcra, Double.valueOf(v));
            }
          } else if (f.getType().getTypeName().equals(Geometry.class.getTypeName())) {
            WKTReader wktReader = new WKTReader();
            Geometry g = wktReader.read(v);
            m.invoke(lcra, g);
          } else {
            logger.error("Unsupport field type: " + f.getType().getTypeName());
          }
        }
        StringBuilder id = new StringBuilder();
        String pac = lcra.getPac().substring(0, 6);
        id.append(pac);
        id.append("_" );
        id.append(lcra.getObjID());
        lcra.setId(id.toString());
        return lcra;
      }
    });
  }


  @Override
  public boolean run(JavaSparkContext jsc) throws Exception {

    this.createIndex(mArgs.getId());

    logger.info("Loader start");

    // Read data from shapefile dirs
    logger.info("Read data to Feature RDD");
    JavaRDD<LCRA> featuresRDD = this.readToRDD(jsc);
    // Write data to elasticsearch cluster
    logger.info(String.format("IndexId = %s", mArgs.getId()));
    this.ingest(featuresRDD, mArgs.getId());

    this.finish();
    return true;
  }

  public static void main(String[] args) throws Exception {

    LCRALoaderArgs mArgs = LCRALoaderArgs.initArgs(args);

    SparkConf conf = new SparkConf();
    conf.setAppName(mArgs.getName());
    conf.set("spark.driver.maxResultSize", "10g");
    JavaSparkContext jsc = new JavaSparkContext(conf);
    LCRADataLoader loader = new LCRADataLoader();
    loader.init(args);
    loader.createIndex(mArgs.getId());
    JavaRDD<LCRA> featuresRDD = loader.readToRDD(jsc);

    SparkConf scc = jsc.getConf();
    int numExecutors = (jsc.sc().getExecutorIds().size() == 0 ? 1 : jsc.sc().getExecutorIds().size());
    int executorCores = scc.getInt("spark.executor.cores", 2);

    featuresRDD.coalesce(numExecutors * executorCores, false);

    IngestResult r = loader.ingest(featuresRDD, mArgs.getId());
    List<String> errors = r.getErrors();
    List<String> aborts = r.getAborts();
    List<String> errorObjs = r.getErrorObj().stream().map(LCRA::toString).collect(Collectors.toList());

    // 将 errors, aborts 写出到文件
    try {
      File fd = new File(mArgs.getErrorOut());
      if (fd.isFile()) {
        String dir = fd.getParent();
        fd = new File(dir);
        logger.warn("errorsOut needs to be set to a directory, default set to " + dir);
      }
      File f = new File(fd.getAbsolutePath() + File.separator + "errors.txt");
      File fo = new File(fd.getAbsolutePath() + File.separator + "error_objs.txt");
      File fa = new File(mArgs.getErrorOut() + File.separator + "aborts.txt");

      if (f.exists()) {
        f.delete();
      }
      if (fo.exists()) {
        fo.delete();
      }
      if (fa.exists()) {
        fa.delete();
      }

      FileUtil.write(Paths.get(f.getAbsolutePath()),errors);
      FileUtil.write(Paths.get(fa.getAbsolutePath()),aborts);
      FileUtil.write(Paths.get(fo.getAbsolutePath()), errorObjs);
    } catch (Exception e) {
      e.printStackTrace();
    }


    loader.finish();
    jsc.stop();
  }

}
