package com.bd03.streaminglearn.day0328

import java.io.{FileInputStream, FileOutputStream}
import java.net.Socket

import org.apache.spark.rdd.RDD
import org.apache.spark.sql.{DataFrame, Dataset, Row, SparkSession}

object WriteData {
  def main(args: Array[String]): Unit = {
    val session = SparkSession.builder()
      .master("local[*]")
      .appName(this.getClass.getSimpleName)
      .getOrCreate()
    //把frame,按批次写出到hdp01的9999端口中
    val context = session.sparkContext
    val path = "D:\\data\\DXY-COVID-19-Data-master\\DXY-COVID-19-Data-master\\csv\\DXYArea.csv"
    val data: Dataset[String] = session.read.textFile(path)

    data.write.mode("append").text("d://aaa/")
   /* val res: RDD[(String, String, String, String)] = value.map(t => {
      val str = t.toString().split(",")
      (str(0), str(2), str(7), str(10))
    })*/


      //2.使用spark,模拟生产环境,
      // 写出DXY-COVID-19-Data-master\csv\DXYArea.csv文件中的数据到指定端口
    //每写出去一条数据我们统一认为是新增的

      //3.使用streaming读取端口中的数据

      //4.统计当前批次的结果数据   新增总人数,新增总death    各大洲新增人数,各大洲新增死亡

      //5.统计非中国外的各大洲总人数,总death,使用redis和updatestateBykey两种方式


  }

}
