import org.apache.spark.SparkConf
import org.apache.spark.sql.SparkSession

object Test1 {
  def main(args: Array[String]): Unit = {
//    System.setProperty("HADOOP_USER_NAME","root")
    val app = new SparkConf().setAppName("app").setMaster("local[*]")
    val session = SparkSession.builder.config(app)
      .enableHiveSupport.getOrCreate();
      import session.implicits._
/*

     "userip",
     "items" ,
     "goodsname" ,
     "price" ,
     "pcs" ).createOrReplaceTempView("salelnfo")*/


    val rdd = session.sparkContext.textFile("C:\\Users\\Administrator\\Desktop\\新建文本文档.txt")


//   rdd.foreach(println)

    rdd.map(x=>{
      val strings = x.split("\t")
      strings match {
        case Array(q,w,e,r,t,y)=>(q,w,e,r,t,y)
      }
    }).toDF("orderid" ,
      "userip" ,
      "items" ,
      "goodsname" ,
      "price" ,
      "pcs" ).createOrReplaceTempView("salelnfo")






      session.sql("select * from salelnfo").show()


    session.sql(
      """
        |
        |with
        |t1 as ( select items,max(price)s from salelnfo group by items),
        |t2 as ( select items,min(price)c from salelnfo group by items )
        |select t1.s-t2.c
        |from t1 left join t2 on t2.items=t1.items
        |
        |""".stripMargin).show()


  }
}
