package HadoopWithSpark.class1st

/**
 * @Author TheKernel
 * @Date 2019/9/29 8:34 下午
 * @Version 1.0
 * @Description 1.7 柯里化函数
 */
object CurryingFunction {

  def main(args: Array[String]): Unit = {
    // 柯里化: 原来接受两个参数的函数变成新的接收一个参数的函数的过程
    def subtract(x: Int)(y: Int) = x - y
    def subtract2(x: Int) = (y: Int) => x - y

    val result = subtract(2)_  // 函数型变量
    val result2 = subtract2(17)
    val s1 = result(1)
    val s2 = result2(9)

    println("s1=" + s1)
    println("s2=" + s2)

    // 偏函数
    def func: PartialFunction[String, Int] = {
      case "a" => 97
      case _ => 0  // default
    }
    println(func("a"))
    println(func("test"))

    def f2: PartialFunction[Any, Int] = {
      case i: Int => i * 10
      case i: String => 0
    }
    val arr = Array[Int](1, 2, 3, 4)
    val collect = arr.collect(f2)
    println(collect.toBuffer)
    val arr2 = Array[Any](1, 2, 3, 4, "Scala")
    val collect2 = arr2.collect(f2)
    println(collect2.toBuffer)

    // 高阶函数, 参数为函数
    def apply(f: Int => String, v: Int) = f(v)
    def layout(x: Int) = "[" + x.toString + "]"
    println(apply(layout, 8))

  }

}
