package com.kingsoft.dc.khaos.module.spark.source

import com.alibaba.fastjson.JSON
import com.alibaba.fastjson.serializer.SerializerFeature
import com.kingsoft.dc.khaos.KhaosContext
import com.kingsoft.dc.khaos.metadata.{Dependency, KhaosStructField}
import com.kingsoft.dc.khaos.module.spark.constants.{SchedulerConstants, TechCheckConst}
import com.kingsoft.dc.khaos.module.spark.model.TechCheck.{KsyunErrorCode, KsyunTechCheckException, NewTechCheckInfo}
import com.kingsoft.dc.khaos.module.spark.util.{HdfsUtils, TechCheckUtils}
import com.kingsoft.dc.khaos.util.Logging
import org.apache.hadoop.fs.{FileSystem, Path}
import org.apache.hadoop.io.{LongWritable, Text}
import org.apache.hadoop.mapred.TextInputFormat
import org.apache.spark.rdd.RDD
import org.apache.spark.sql.types.{StringType, StructField, StructType}
import org.apache.spark.sql.{DataFrame, Row}
import org.json4s.DefaultFormats

/**
  * Created by 陈子初 on 2019/6/10.
  */
class HdfsCheckSource extends SourceStrategy with Logging{
  /** 数据抽取 */
  override def source(kc: KhaosContext,
                      module_id: String,
                      config: String,
                      dependence: Dependency): DataFrame = {

    implicit val formats = DefaultFormats
    val hdfsCheckConfig = TechCheckUtils.getHdfsCheckConfigByKC(kc)

    //加载DDL 1.根据hdfsConf.path
    val datPath = TechCheckUtils.getTechCheckFilePath(kc.conf.getString(SchedulerConstants.CLUSTER_NAMESPACE), kc.conf.getString(SchedulerConstants.PROXY_USER),hdfsCheckConfig,TechCheckConst.FileType.DATA)
    val ddlPath = TechCheckUtils.getTechCheckFilePath(kc.conf.getString(SchedulerConstants.CLUSTER_NAMESPACE), kc.conf.getString(SchedulerConstants.PROXY_USER),hdfsCheckConfig,TechCheckConst.FileType.DDL)
    val xmlPath = TechCheckUtils.getTechCheckFilePath(kc.conf.getString(SchedulerConstants.CLUSTER_NAMESPACE), kc.conf.getString(SchedulerConstants.PROXY_USER),hdfsCheckConfig,TechCheckConst.FileType.XML)
    logInfo("dat路径是："+datPath);
    logInfo("ddl路径是："+ddlPath);
    logInfo("xml路径是："+xmlPath);

    val fs = FileSystem.get(kc.sparkSession.sparkContext.hadoopConfiguration)

    val inputStream = HdfsUtils.getFileInputStream(ddlPath,fs)
    val xmlInputStream = HdfsUtils.getFileInputStream(xmlPath,fs)
    var techCheckInfo:NewTechCheckInfo = null
    try{
      techCheckInfo = TechCheckUtils.loadDDL(inputStream)
      techCheckInfo = TechCheckUtils.loadXML(techCheckInfo,xmlInputStream)
    }catch{
      case e:Exception => throw new KsyunTechCheckException("解析ddl，xml文件报错",e)
    }finally {
      inputStream.close()
      xmlInputStream.close()
    }
    //(检查文件名格式) xml XML存放的数据文件名与数据文件的文件名是否一致  输出告警信息 todo czc 文件名需剪切
    if (!techCheckInfo.getFilename.equals(hdfsCheckConfig.input_dat_name)){
      val log = KsyunErrorCode.XML_FILE_NAME_CHECK.toString+"! XML filename :"+ techCheckInfo.getFilename +",real filename:" + hdfsCheckConfig.input_dat_name
      logWarning(log);
      techCheckInfo.addLog(log);
    }

    //加载rdd
    val sc = kc.sparkSession.sparkContext
    //根据编码读取数据
    var rowRDD:RDD[Row] = null;
    if(hdfsCheckConfig.source_encoding.eq("GBK"))
      rowRDD = sc.hadoopFile(datPath,classOf[TextInputFormat],classOf[LongWritable],classOf[Text],1)
        .map(p => Row(new String(p._2.getBytes, 0, p._2.getLength, "GBK")))
    else
      rowRDD = sc.textFile(datPath).map(x => Row(x))
    // 实际记录数与控制文件中的的记录数不一致，记录警告日志
    if(techCheckInfo.getRecordnum != rowRDD.count()){
      val log = KsyunErrorCode.XML_RECORD_COUNT.toString+"! XML record num :"+ techCheckInfo.getRecordnum +",file record num:" + rowRDD.count()
      logWarning(log);
      techCheckInfo.addLog(log)
    }
    //实际文件字节数与控制文件中的字节数不一致，记录警告日志
    val realFilesize = fs.getContentSummary(new Path(datPath)).getLength
    if(techCheckInfo.getFilesize != realFilesize ){
      val log = KsyunErrorCode.XML_FILE_SIZE_CHECK.toString+"! XML file size :"+ techCheckInfo.getFilesize +",real file size:" + realFilesize
      logWarning(log);
      techCheckInfo.addLog(log);
    }

    //将 techCheckInfo 存储到 kc
    kc.conf.set("techCheckInfo",JSON.toJSONString(techCheckInfo,SerializerFeature.QuoteFieldNames))
    //清空输出文件dat log rj
    var outputDir = TechCheckUtils.getHdfsPath(kc.conf.getString(SchedulerConstants.CLUSTER_NAMESPACE), kc.conf.getString(SchedulerConstants.PROXY_USER),hdfsCheckConfig.output_path,hdfsCheckConfig.input_dat_name +".log")
    HdfsUtils.deletePath(outputDir,fs)
    outputDir = TechCheckUtils.getHdfsPath(kc.conf.getString(SchedulerConstants.CLUSTER_NAMESPACE), kc.conf.getString(SchedulerConstants.PROXY_USER),hdfsCheckConfig.output_path,hdfsCheckConfig.input_dat_name +".log")
    HdfsUtils.deletePath(outputDir,fs)
    outputDir = TechCheckUtils.getHdfsPath(kc.conf.getString(SchedulerConstants.CLUSTER_NAMESPACE), kc.conf.getString(SchedulerConstants.PROXY_USER),hdfsCheckConfig.output_path,hdfsCheckConfig.input_dat_name +".rj")
    HdfsUtils.deletePath(outputDir,fs)
    logInfo("清空输出目录："+outputDir)

    //不切分文本,判断数据编码，进行转换，默认是utf-8读取，

    val extrFields:List[StructField] = List(StructField("row",StringType ,nullable = true))
      val schema = StructType(extrFields)
    val data = kc.sparkSession.createDataFrame(rowRDD,schema)
    data
  }
  override def schema(kc: KhaosContext, config: String, dependence: Dependency): List[KhaosStructField] = {
    val fieldList = List(KhaosStructField("row", "String"))
    fieldList
  }

}
