package com.chenjj.bigdata.spark.scala.simple

import org.apache.spark.{SparkConf, SparkContext}

object mapOperator {
  def main(args: Array[String]): Unit = {
    val conf = new SparkConf()
      .setAppName("WordCountLocal")
      .setMaster("local")
      .set("spark.testing.memory", "1024000000")
    val sc = new SparkContext(conf)

    //map自乘
    val a = sc.parallelize(1 to 9)
    val b = a.map(x => x *2)
    a.collect().foreach(x => print(x+","))
    b.collect().foreach(x => print(x+","))


    //map转元组
    val a1 = sc.parallelize(List("dog1","dog2","dog3","dog4"))
    val b1 = a1.map(x => (x,1))
    b1.collect().foreach(x=>{print(x+",")})
    b1.collect().foreach(x=>{print(x._1+":"+x._2)})


    //mapValues
    val a2 = sc.parallelize(List("dog1","dog2","dog3","dog4"))
    a2.map(x=>(x.length,x)).mapValues("x"+_+"x").collect().foreach(println)
  }
}
