/*
 * Licensed to the Apache Software Foundation (ASF) under one or more
 * contributor license agreements.  See the NOTICE file distributed with
 * this work for additional information regarding copyright ownership.
 * The ASF licenses this file to You under the Apache License, Version 2.0
 * (the "License"); you may not use this file except in compliance with
 * the License.  You may obtain a copy of the License at
 *
 *    http://www.apache.org/licenses/LICENSE-2.0
 *
 * Unless required by applicable law or agreed to in writing, software
 * distributed under the License is distributed on an "AS IS" BASIS,
 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 * See the License for the specific language governing permissions and
 * limitations under the License.
 */

package edu.csl.study.spark.source

import java.io._
import java.lang.invoke.CallSite
import java.lang.management.{LockInfo, ManagementFactory, MonitorInfo, ThreadInfo}
import java.lang.reflect.InvocationTargetException
import java.math.{MathContext, RoundingMode}
import java.net._
import java.nio.ByteBuffer
import java.nio.channels.FileChannel
import java.nio.charset.StandardCharsets
import java.nio.file.Files
import java.util.concurrent.TimeUnit.NANOSECONDS
import java.util.concurrent._
import java.util.concurrent.atomic.AtomicBoolean
import java.util.zip.GZIPInputStream
import java.util.{Locale, Properties, Random, UUID}

import com.google.common.cache.{CacheBuilder, CacheLoader, LoadingCache}
import com.google.common.io.{ByteStreams, Files => GFiles}
import com.google.common.net.InetAddresses
import edu.csl.study.spark.source.exception.SparkUncaughtExceptionHandler
import org.apache.commons.lang3.SystemUtils
import org.apache.hadoop.conf.Configuration
import org.apache.hadoop.fs.{FileSystem, Path}
import org.apache.hadoop.security.UserGroupInformation
import org.apache.hadoop.yarn.conf.YarnConfiguration
import org.apache.spark._
import org.apache.spark.deploy.SparkHadoopUtil
import org.apache.spark.internal.Logging
import org.apache.spark.launcher.SparkLauncher
import org.apache.spark.network.util.JavaUtils
import org.apache.spark.serializer.{DeserializationStream, SerializationStream, SerializerInstance}
import org.apache.spark.status.api.v1.{StackTrace, ThreadStackTrace}

import scala.annotation.tailrec
import scala.collection.JavaConverters._
import scala.collection.Map
import scala.collection.mutable.ArrayBuffer
import scala.io.Source
import scala.reflect.ClassTag
import scala.util.control.{ControlThrowable, NonFatal}
import scala.util.matching.Regex
import scala.util.{Failure, Success, Try}

/**
 * Various utility methods used by Spark.
 */
private[spark] object Utils extends Logging {
    val random = new Random()

    private val sparkUncaughtExceptionHandler = new SparkUncaughtExceptionHandler
    @volatile private var cachedLocalDir: String = ""

    /**
     * Define a default value for driver memory here since this value is referenced across the code
     * base and nearly all files already use Utils.scala
     */
    val DEFAULT_DRIVER_MEM_MB = JavaUtils.DEFAULT_DRIVER_MEM_MB.toInt

    private val MAX_DIR_CREATION_ATTEMPTS: Int = 10
    @volatile private var localRootDirs: Array[String] = null

    /**
     * The performance overhead of creating and logging strings for wide schemas can be large. To
     * limit the impact, we bound the number of fields to include by default. This can be overridden
     * by setting the 'spark.debug.maxToStringFields' conf in SparkEnv.
     */
    val DEFAULT_MAX_TO_STRING_FIELDS = 25

    private[spark] def maxNumToStringFields = {
        if (SparkEnv.get != null) {
            SparkEnv.get.conf.getInt("spark.debug.maxToStringFields", DEFAULT_MAX_TO_STRING_FIELDS)
        } else {
            DEFAULT_MAX_TO_STRING_FIELDS
        }
    }

    /** Whether we have warned about plan string truncation yet. */
    private val truncationWarningPrinted = new AtomicBoolean(false)

    /**
     * Format a sequence with semantics similar to calling .mkString(). Any elements beyond
     * maxNumToStringFields will be dropped and replaced by a "... N more fields" placeholder.
     *
     * @return the trimmed and formatted string.
     */
    def truncatedString[T](seq: Seq[T], start: String, sep: String, end: String, maxNumFields: Int = maxNumToStringFields): String = {
        if (seq.length > maxNumFields) {
            if (truncationWarningPrinted.compareAndSet(false, true)) {
                logWarning(
                    "Truncated the string representation of a plan since it was too large. This " + "behavior can be adjusted by setting 'spark.debug.maxToStringFields' in SparkEnv.conf.")
            }
            val numFields = math.max(0, maxNumFields - 1)
            seq.take(numFields).mkString(start, sep, sep + "... " + (seq.length - numFields) + " more fields" + end)
        } else {
            seq.mkString(start, sep, end)
        }
    }

    /** Shorthand for calling truncatedString() without start or end strings. */
    def truncatedString[T](seq: Seq[T], sep: String): String = truncatedString(seq, "", sep, "")

    /** Serialize an object using Java serialization */
    def serialize[T](o: T): Array[Byte] = {
        val bos = new ByteArrayOutputStream()
        val oos = new ObjectOutputStream(bos)
        oos.writeObject(o)
        oos.close()
        bos.toByteArray
    }
    
    /** Deserialize an object using Java serialization */
    def deserialize[T](bytes: Array[Byte]): T = {
        val bis = new ByteArrayInputStream(bytes)
        val ois = new ObjectInputStream(bis)
        ois.readObject.asInstanceOf[T]
    }
    
    /** Deserialize an object using Java serialization and the given ClassLoader */
    def deserialize[T](bytes: Array[Byte], loader: ClassLoader): T = {
        val bis = new ByteArrayInputStream(bytes)
        val ois = new ObjectInputStream(bis) {
            override def resolveClass(desc: ObjectStreamClass): Class[_] = {
                // scalastyle:off classforname
                Class.forName(desc.getName, false, loader) // scalastyle:on classforname
            }
        }
        ois.readObject.asInstanceOf[T]
    }
    
    /** Deserialize a Long value (used for [[org.apache.spark.api.python.PythonPartitioner]]) */
    def deserializeLongValue(bytes: Array[Byte]): Long = {
        // Note: we assume that we are given a Long value encoded in network (big-endian) byte order
        var result = bytes(7) & 0xFFL
        result = result + ((bytes(6) & 0xFFL) << 8)
        result = result + ((bytes(5) & 0xFFL) << 16)
        result = result + ((bytes(4) & 0xFFL) << 24)
        result = result + ((bytes(3) & 0xFFL) << 32)
        result = result + ((bytes(2) & 0xFFL) << 40)
        result = result + ((bytes(1) & 0xFFL) << 48)
        result + ((bytes(0) & 0xFFL) << 56)
    }
    
    /** Serialize via nested stream using specific serializer */
    def serializeViaNestedStream(os: OutputStream, ser: SerializerInstance)(f: SerializationStream => Unit): Unit = {
        val osWrapper = ser.serializeStream(new OutputStream {
            override def write(b: Int): Unit = os.write(b)
            
            override def write(b: Array[Byte], off: Int, len: Int): Unit = os.write(b, off, len)
        })
        try {
            f(osWrapper)
        } finally {
            osWrapper.close()
        }
    }
    
    /** Deserialize via nested stream using specific serializer */
    def deserializeViaNestedStream(is: InputStream, ser: SerializerInstance)(f: DeserializationStream => Unit): Unit = {
        val isWrapper = ser.deserializeStream(new InputStream {
            override def read(): Int = is.read()
            
            override def read(b: Array[Byte], off: Int, len: Int): Int = is.read(b, off, len)
        })
        try {
            f(isWrapper)
        } finally {
            isWrapper.close()
        }
    }
    
    /**
     * Get the ClassLoader which loaded Spark.
     */
    def getSparkClassLoader: ClassLoader = getClass.getClassLoader
    
    /**
     * Get the Context ClassLoader on this thread or, if not present, the ClassLoader that
     * loaded Spark.
     *
     * This should be used whenever passing a ClassLoader to Class.ForName or finding the currently
     * active loader when setting up ClassLoader delegation chains.
     */
    def getContextOrSparkClassLoader: ClassLoader = Option(Thread.currentThread().getContextClassLoader).getOrElse(getSparkClassLoader)
    
    /** Determines whether the provided class is loadable in the current thread. */
    def classIsLoadable(clazz: String): Boolean = {
        // scalastyle:off classforname
        Try {
            Class.forName(clazz, false, getContextOrSparkClassLoader)
        }.isSuccess // scalastyle:on classforname
    }
    
    // scalastyle:off classforname
    /** Preferred alternative to Class.forName(className) */
    def classForName(className: String): Class[_] = {
        Class.forName(className, true, getContextOrSparkClassLoader) // scalastyle:on classforname
    }
    
    /**
     * Run a segment of code using a different context class loader in the current thread
     */
    def withContextClassLoader[T](ctxClassLoader: ClassLoader)(fn: => T): T = {
        val oldClassLoader = Thread.currentThread().getContextClassLoader()
        try {
            Thread.currentThread().setContextClassLoader(ctxClassLoader)
            fn
        } finally {
            Thread.currentThread().setContextClassLoader(oldClassLoader)
        }
    }
    
    /**
     * Primitive often used when writing [[java.nio.ByteBuffer]] to [[java.io.DataOutput]]
     */
    def writeByteBuffer(bb: ByteBuffer, out: DataOutput): Unit = {
        if (bb.hasArray) {
            out.write(bb.array(), bb.arrayOffset() + bb.position(), bb.remaining())
        } else {
            val originalPosition = bb.position()
            val bbval = new Array[Byte](bb.remaining())
            bb.get(bbval)
            out.write(bbval)
            bb.position(originalPosition)
        }
    }
    
    /**
     * Primitive often used when writing [[java.nio.ByteBuffer]] to [[java.io.OutputStream]]
     */
    def writeByteBuffer(bb: ByteBuffer, out: OutputStream): Unit = {
        if (bb.hasArray) {
            out.write(bb.array(), bb.arrayOffset() + bb.position(), bb.remaining())
        } else {
            val originalPosition = bb.position()
            val bbval = new Array[Byte](bb.remaining())
            bb.get(bbval)
            out.write(bbval)
            bb.position(originalPosition)
        }
    }
    
    /**
     * JDK equivalent of `chmod 700 file`.
     *
     * @param file the file whose permissions will be modified
     * @return true if the permissions were successfully changed, false otherwise.
     */
    def chmod700(file: File): Boolean = {
        file.setReadable(false, false) && file.setReadable(true, true) && file.setWritable(false, false) && file.setWritable(true, true) && file
            .setExecutable(false, false) && file.setExecutable(true, true)
    }
    
    /**
     * Create a directory inside the given parent directory. The directory is guaranteed to be
     * newly created, and is not marked for automatic deletion.
     */
    def createDirectory(root: String, namePrefix: String = "spark"): File = {
        var attempts = 0
        val maxAttempts = MAX_DIR_CREATION_ATTEMPTS
        var dir: File = null
        while (dir == null) {
            attempts += 1
            if (attempts > maxAttempts) {
                throw new IOException("Failed to create a temp directory (under " + root + ") after " + maxAttempts + " attempts!")
            }
            try {
                dir = new File(root, namePrefix + "-" + UUID.randomUUID.toString)
                if (dir.exists() || !dir.mkdirs()) {
                    dir = null
                }
            } catch {
                case e: SecurityException => dir = null;
            }
        }
        
        dir.getCanonicalFile
    }

    /**
     * Copy all data from an InputStream to an OutputStream. NIO way of file stream to file stream
     * copying is disabled by default unless explicitly set transferToEnabled as true,
     * the parameter transferToEnabled should be configured by spark.file.transferTo = [true|false].
     */
    def copyStream(in: InputStream, out: OutputStream, closeStreams: Boolean = false, transferToEnabled: Boolean = false): Long = {
        tryWithSafeFinally {
            if (in.isInstanceOf[FileInputStream] && out.isInstanceOf[FileOutputStream] && transferToEnabled) {
                // When both streams are File stream, use transferTo to improve copy performance.
                val inChannel = in.asInstanceOf[FileInputStream].getChannel()
                val outChannel = out.asInstanceOf[FileOutputStream].getChannel()
                val size = inChannel.size()
                copyFileStreamNIO(inChannel, outChannel, 0, size)
                size
            } else {
                var count = 0L
                val buf = new Array[Byte](8192)
                var n = 0
                while (n != -1) {
                    n = in.read(buf)
                    if (n != -1) {
                        out.write(buf, 0, n)
                        count += n
                    }
                }
                count
            }
        } {
            if (closeStreams) {
                try {
                    in.close()
                } finally {
                    out.close()
                }
            }
        }
    }
    
    def copyFileStreamNIO(input: FileChannel, output: FileChannel, startPosition: Long, bytesToCopy: Long): Unit = {
        val initialPos = output.position()
        var count = 0L // In case transferTo method transferred less data than we have required.
        while (count < bytesToCopy) {
            count += input.transferTo(count + startPosition, bytesToCopy - count, output)
        }
        assert(count == bytesToCopy, s"request to copy $bytesToCopy bytes, but actually copied $count bytes.")
        
        // Check the position after transferTo loop to see if it is in the right position and
        // give user information if not.
        // Position will not be increased to the expected length after calling transferTo in
        // kernel version 2.6.32, this issue can be seen in
        // https://bugs.openjdk.java.net/browse/JDK-7052359
        // This will lead to stream corruption issue when using sort-based shuffle (SPARK-3948).
        val finalPos = output.position()
        val expectedPos = initialPos + bytesToCopy
        assert(finalPos == expectedPos,
            s"""
               |Current position $finalPos do not equal to expected position $expectedPos
               |after transferTo, please check your kernel version to see if it is 2.6.32,
               |this is a kernel bug which will lead to unexpected behavior when using transferTo.
               |You can set spark.file.transferTo = false to disable this NIO feature.
           """.stripMargin)
    }

    /**
     * A file name may contain some invalid URI characters, such as " ". This method will convert the
     * file name to a raw path accepted by `java.net.URI(String)`.
     *
     * Note: the file name must not contain "/" or "\"
     */
    def encodeFileNameToURIRawPath(fileName: String): String = {
        require(!fileName.contains("/") && !fileName
            .contains("\\")) // `file` and `localhost` are not used. Just to prevent URI from parsing `fileName` as
        // scheme or host. The prefix "/" is required because URI doesn't accept a relative path.
        // We should remove it after we get the raw path.
        new URI("file", null, "localhost", -1, "/" + fileName, null, null).getRawPath.substring(1)
    }
    
    /**
     * Get the file name from uri's raw path and decode it. If the raw path of uri ends with "/",
     * return the name before the last "/".
     */
    def decodeFileNameInURI(uri: URI): String = {
        val rawPath = uri.getRawPath
        val rawFileName = rawPath.split("/").last
        new URI("file:///" + rawFileName).getPath.substring(1)
    }

    /** Records the duration of running `body`. */
    def timeTakenMs[T](body: => T): (T, Long) = {
        val startTime = System.nanoTime()
        val result = body
        val endTime = System.nanoTime()
        (result, math.max(NANOSECONDS.toMillis(endTime - startTime), 0))
    }
    
    /**
     * Download `in` to `tempFile`, then move it to `destFile`.
     *
     * If `destFile` already exists:
     *   - no-op if its contents equal those of `sourceFile`,
     *   - throw an exception if `fileOverwrite` is false,
     *   - attempt to overwrite it otherwise.
     *
     * @param url URL that `sourceFile` originated from, for logging purposes.
     * @param in InputStream to download.
     * @param destFile File path to move `tempFile` to.
     * @param fileOverwrite Whether to delete/overwrite an existing `destFile` that does not match
     *                      `sourceFile`
     */
    private def downloadFile(url: String, in: InputStream, destFile: File, fileOverwrite: Boolean): Unit = {
        val tempFile = File.createTempFile("fetchFileTemp", null, new File(destFile.getParentFile.getAbsolutePath))
        logInfo(s"Fetching $url to $tempFile")
        
        try {
            val out = new FileOutputStream(tempFile)
            Utils.copyStream(in, out, closeStreams = true)
            copyFile(url, tempFile, destFile, fileOverwrite, removeSourceFile = true)
        } finally {
            // Catch-all for the couple of cases where for some reason we didn't move `tempFile` to
            // `destFile`.
            if (tempFile.exists()) {
                tempFile.delete()
            }
        }
    }
    
    /**
     * Copy `sourceFile` to `destFile`.
     *
     * If `destFile` already exists:
     *   - no-op if its contents equal those of `sourceFile`,
     *   - throw an exception if `fileOverwrite` is false,
     *   - attempt to overwrite it otherwise.
     *
     * @param url URL that `sourceFile` originated from, for logging purposes.
     * @param sourceFile File path to copy/move from.
     * @param destFile File path to copy/move to.
     * @param fileOverwrite Whether to delete/overwrite an existing `destFile` that does not match
     *                      `sourceFile`
     * @param removeSourceFile Whether to remove `sourceFile` after / as part of moving/copying it to
     *                         `destFile`.
     */
    private def copyFile(url: String, sourceFile: File, destFile: File, fileOverwrite: Boolean, removeSourceFile: Boolean = false): Unit = {
        
        if (destFile.exists) {
            if (!filesEqualRecursive(sourceFile, destFile)) {
                if (fileOverwrite) {
                    logInfo(s"File $destFile exists and does not match contents of $url, replacing it with $url")
                    if (!destFile.delete()) {
                        throw new SparkException("Failed to delete %s while attempting to overwrite it with %s"
                            .format(destFile.getAbsolutePath, sourceFile.getAbsolutePath))
                    }
                } else {
                    throw new SparkException(s"File $destFile exists and does not match contents of $url")
                }
            } else {
                // Do nothing if the file contents are the same, i.e. this file has been copied
                // previously.
                logInfo("%s has been previously copied to %s".format(sourceFile.getAbsolutePath, destFile.getAbsolutePath))
                return
            }
        }
        
        // The file does not exist in the target directory. Copy or move it there.
        if (removeSourceFile) {
            Files.move(sourceFile.toPath, destFile.toPath)
        } else {
            logInfo(s"Copying ${sourceFile.getAbsolutePath} to ${destFile.getAbsolutePath}")
            copyRecursive(sourceFile, destFile)
        }
    }
    
    private def filesEqualRecursive(file1: File, file2: File): Boolean = {
        if (file1.isDirectory && file2.isDirectory) {
            val subfiles1 = file1.listFiles()
            val subfiles2 = file2.listFiles()
            if (subfiles1.size != subfiles2.size) {
                return false
            }
            subfiles1.sortBy(_.getName).zip(subfiles2.sortBy(_.getName)).forall { case (f1, f2) => filesEqualRecursive(f1, f2)
            }
        } else if (file1.isFile && file2.isFile) {
            GFiles.equal(file1, file2)
        } else {
            false
        }
    }
    
    private def copyRecursive(source: File, dest: File): Unit = {
        if (source.isDirectory) {
            if (!dest.mkdir()) {
                throw new IOException(s"Failed to create directory ${dest.getPath}")
            }
            val subfiles = source.listFiles()
            subfiles.foreach(f => copyRecursive(f, new File(dest, f.getName)))
        } else {
            Files.copy(source.toPath, dest.toPath)
        }
    }

    
    /**
     * Fetch a file or directory from a Hadoop-compatible filesystem.
     *
     * Visible for testing
     */
    private[spark] def fetchHcfsFile(path: Path, targetDir: File, fs: FileSystem, conf: SparkConf, hadoopConf: Configuration, fileOverwrite: Boolean,
        filename: Option[String] = None): Unit = {
        if (!targetDir.exists() && !targetDir.mkdir()) {
            throw new IOException(s"Failed to create directory ${targetDir.getPath}")
        }
        val dest = new File(targetDir, filename.getOrElse(path.getName))
        if (fs.isFile(path)) {
            val in = fs.open(path)
            try {
                downloadFile(path.toString, in, dest, fileOverwrite)
            } finally {
                in.close()
            }
        } else {
            fs.listStatus(path).foreach { fileStatus =>
                fetchHcfsFile(fileStatus.getPath(), dest, fs, conf, hadoopConf, fileOverwrite)
            }
        }
    }
    
    /**
     * Validate that a given URI is actually a valid URL as well.
     * @param uri The URI to validate
     */
    @throws[MalformedURLException]("when the URI is an invalid URL") def validateURL(uri: URI): Unit = {
        Option(uri.getScheme).getOrElse("file") match {
            case "http" | "https" | "ftp" => try {
                uri.toURL
            } catch {
                case e: MalformedURLException => val ex = new MalformedURLException(s"URI (${uri.toString}) is not a valid URL.")
                    ex.initCause(e)
                    throw ex
            }
            case _ => // will not be turned into a URL anyway
        }
    }

    /**
     * Shuffle the elements of a collection into a random order, returning the
     * result in a new collection. Unlike scala.util.Random.shuffle, this method
     * uses a local random number generator, avoiding inter-thread contention.
     */
    def randomize[T: ClassTag](seq: TraversableOnce[T]): Seq[T] = {
        randomizeInPlace(seq.toArray)
    }
    
    /**
     * Shuffle the elements of an array into a random order, modifying the
     * original array. Returns the original array.
     */
    def randomizeInPlace[T](arr: Array[T], rand: Random = new Random): Array[T] = {
        for (i <- (arr.length - 1) to 1 by -1) {
            val j = rand.nextInt(i + 1)
            val tmp = arr(j)
            arr(j) = arr(i)
            arr(i) = tmp
        }
        arr
    }
    
    /**
     * Get the local host's IP address in dotted-quad format (e.g. 1.2.3.4).
     * Note, this is typically not used from within core spark.
     */
    private lazy val localIpAddress: InetAddress = findLocalInetAddress()
    
    private def findLocalInetAddress(): InetAddress = {
        val defaultIpOverride = System.getenv("SPARK_LOCAL_IP")
        if (defaultIpOverride != null) {
            InetAddress.getByName(defaultIpOverride)
        } else {
            val address = InetAddress.getLocalHost
            if (address.isLoopbackAddress) {
                // Address resolves to something like 127.0.1.1, which happens on Debian; try to find
                // a better address using the local network interfaces
                // getNetworkInterfaces returns ifs in reverse order compared to ifconfig output order
                // on unix-like system. On windows, it returns in index order.
                // It's more proper to pick ip address following system output order.
                val activeNetworkIFs = NetworkInterface.getNetworkInterfaces.asScala.toSeq
                val reOrderedNetworkIFs = if (isWindows) activeNetworkIFs else activeNetworkIFs.reverse
                
                for (ni <- reOrderedNetworkIFs) {
                    val addresses = ni.getInetAddresses.asScala.filterNot(addr => addr.isLinkLocalAddress || addr.isLoopbackAddress).toSeq
                    if (addresses.nonEmpty) {
                        val addr = addresses.find(_.isInstanceOf[Inet4Address]).getOrElse(addresses.head)
                        // because of Inet6Address.toHostName may add interface at the end if it knows about it
                        val strippedAddress = InetAddress.getByAddress(addr.getAddress) // We've found an address that looks reasonable!
                        logWarning("Your hostname, " + InetAddress.getLocalHost.getHostName + " resolves to" + " a loopback address: " + address
                            .getHostAddress + "; using " + strippedAddress.getHostAddress + " instead (on interface " + ni.getName + ")")
                        logWarning("Set SPARK_LOCAL_IP if you need to bind to another address")
                        return strippedAddress
                    }
                }
                logWarning("Your hostname, " + InetAddress.getLocalHost.getHostName + " resolves to" + " a loopback address: " + address
                    .getHostAddress + ", but we couldn't find any" + " external IP address!")
                logWarning("Set SPARK_LOCAL_IP if you need to bind to another address")
            }
            address
        }
    }
    
    private var customHostname: Option[String] = sys.env.get("SPARK_LOCAL_HOSTNAME")
    
    /**
     * Allow setting a custom host name because when we run on Mesos we need to use the same
     * hostname it reports to the master.
     */
    def setCustomHostname(hostname: String) {
        // DEBUG code
        Utils.checkHost(hostname)
        customHostname = Some(hostname)
    }
    
    /**
     * Get the local machine's FQDN.
     */
    def localCanonicalHostName(): String = {
        customHostname.getOrElse(localIpAddress.getCanonicalHostName)
    }
    
    /**
     * Get the local machine's hostname.
     */
    def localHostName(): String = {
        customHostname.getOrElse(localIpAddress.getHostAddress)
    }
    
    /**
     * Get the local machine's URI.
     */
    def localHostNameForURI(): String = {
        customHostname.getOrElse(InetAddresses.toUriString(localIpAddress))
    }
    
    def checkHost(host: String) {
        assert(host != null && host.indexOf(':') == -1, s"Expected hostname (not IP) but got $host")
    }
    
    def checkHostPort(hostPort: String) {
        assert(hostPort != null && hostPort.indexOf(':') != -1, s"Expected host and port but got $hostPort")
    }
    
    // Typically, this will be of order of number of nodes in cluster
    // If not, we should change it to LRUCache or something.
    private val hostPortParseResults = new ConcurrentHashMap[String, (String, Int)]()
    
    def parseHostPort(hostPort: String): (String, Int) = {
        // Check cache first.
        val cached = hostPortParseResults.get(hostPort)
        if (cached != null) {
            return cached
        }
        
        val indx: Int = hostPort.lastIndexOf(':') // This is potentially broken - when dealing with ipv6 addresses for example, sigh ...
        // but then hadoop does not support ipv6 right now.
        // For now, we assume that if port exists, then it is valid - not check if it is an int > 0
        if (-1 == indx) {
            val retval = (hostPort, 0)
            hostPortParseResults.put(hostPort, retval)
            return retval
        }
        
        val retval = (hostPort.substring(0, indx).trim(), hostPort.substring(indx + 1).trim().toInt)
        hostPortParseResults.putIfAbsent(hostPort, retval)
        hostPortParseResults.get(hostPort)
    }
    
    /**
     * Return the string to tell how long has passed in milliseconds.
     */
    def getUsedTimeMs(startTimeMs: Long): String = {
        " " + (System.currentTimeMillis - startTimeMs) + " ms"
    }

    
    /**
     * Determines if a directory contains any files newer than cutoff seconds.
     *
     * @param dir must be the path to a directory, or IllegalArgumentException is thrown
     * @param cutoff measured in seconds. Returns true if there are any files or directories in the
     *               given directory whose last modified time is later than this many seconds ago
     */
    def doesDirectoryContainAnyNewFiles(dir: File, cutoff: Long): Boolean = {
        if (!dir.isDirectory) {
            throw new IllegalArgumentException(s"$dir is not a directory!")
        }
        val filesAndDirs = dir.listFiles()
        val cutoffTimeInMillis = System.currentTimeMillis - (cutoff * 1000)
        
        filesAndDirs.exists(_.lastModified() > cutoffTimeInMillis) || filesAndDirs.filter(_.isDirectory)
            .exists(subdir => doesDirectoryContainAnyNewFiles(subdir, cutoff))
    }
    
    /**
     * Convert a time parameter such as (50s, 100ms, or 250us) to milliseconds for internal use. If
     * no suffix is provided, the passed number is assumed to be in ms.
     */
    def timeStringAsMs(str: String): Long = {
        JavaUtils.timeStringAsMs(str)
    }
    
    /**
     * Convert a time parameter such as (50s, 100ms, or 250us) to seconds for internal use. If
     * no suffix is provided, the passed number is assumed to be in seconds.
     */
    def timeStringAsSeconds(str: String): Long = {
        JavaUtils.timeStringAsSec(str)
    }
    
    /**
     * Convert a passed byte string (e.g. 50b, 100k, or 250m) to bytes for internal use.
     *
     * If no suffix is provided, the passed number is assumed to be in bytes.
     */
    def byteStringAsBytes(str: String): Long = {
        JavaUtils.byteStringAsBytes(str)
    }
    
    /**
     * Convert a passed byte string (e.g. 50b, 100k, or 250m) to kibibytes for internal use.
     *
     * If no suffix is provided, the passed number is assumed to be in kibibytes.
     */
    def byteStringAsKb(str: String): Long = {
        JavaUtils.byteStringAsKb(str)
    }
    
    /**
     * Convert a passed byte string (e.g. 50b, 100k, or 250m) to mebibytes for internal use.
     *
     * If no suffix is provided, the passed number is assumed to be in mebibytes.
     */
    def byteStringAsMb(str: String): Long = {
        JavaUtils.byteStringAsMb(str)
    }
    
    /**
     * Convert a passed byte string (e.g. 50b, 100k, or 250m, 500g) to gibibytes for internal use.
     *
     * If no suffix is provided, the passed number is assumed to be in gibibytes.
     */
    def byteStringAsGb(str: String): Long = {
        JavaUtils.byteStringAsGb(str)
    }
    
    /**
     * Convert a Java memory parameter passed to -Xmx (such as 300m or 1g) to a number of mebibytes.
     */
    def memoryStringToMb(str: String): Int = {
        // Convert to bytes, rather than directly to MB, because when no units are specified the unit
        // is assumed to be bytes
        (JavaUtils.byteStringAsBytes(str) / 1024 / 1024).toInt
    }
    
    /**
     * Convert a quantity in bytes to a human-readable string such as "4.0 MB".
     */
    def bytesToString(size: Long): String = bytesToString(BigInt(size))
    
    def bytesToString(size: BigInt): String = {
        val EB = 1L << 60
        val PB = 1L << 50
        val TB = 1L << 40
        val GB = 1L << 30
        val MB = 1L << 20
        val KB = 1L << 10
        
        if (size >= BigInt(1L << 11) * EB) {
            // The number is too large, show it in scientific notation.
            BigDecimal(size, new MathContext(3, RoundingMode.HALF_UP)).toString() + " B"
        } else {
            val (value, unit) = {
                if (size >= 2 * EB) {
                    (BigDecimal(size) / EB, "EB")
                } else if (size >= 2 * PB) {
                    (BigDecimal(size) / PB, "PB")
                } else if (size >= 2 * TB) {
                    (BigDecimal(size) / TB, "TB")
                } else if (size >= 2 * GB) {
                    (BigDecimal(size) / GB, "GB")
                } else if (size >= 2 * MB) {
                    (BigDecimal(size) / MB, "MB")
                } else if (size >= 2 * KB) {
                    (BigDecimal(size) / KB, "KB")
                } else {
                    (BigDecimal(size), "B")
                }
            }
            "%.1f %s".formatLocal(Locale.US, value, unit)
        }
    }
    
    /**
     * Returns a human-readable string representing a duration such as "35ms"
     */
    def msDurationToString(ms: Long): String = {
        val second = 1000
        val minute = 60 * second
        val hour = 60 * minute
        val locale = Locale.US
        
        ms match {
            case t if t < second => "%d ms".formatLocal(locale, t)
            case t if t < minute => "%.1f s".formatLocal(locale, t.toFloat / second)
            case t if t < hour => "%.1f m".formatLocal(locale, t.toFloat / minute)
            case t => "%.2f h".formatLocal(locale, t.toFloat / hour)
        }
    }
    
    /**
     * Convert a quantity in megabytes to a human-readable string such as "4.0 MB".
     */
    def megabytesToString(megabytes: Long): String = {
        bytesToString(megabytes * 1024L * 1024L)
    }
    
    /**
     * Execute a command and return the process running the command.
     */
    def executeCommand(command: Seq[String], workingDir: File = new File("."), extraEnvironment: Map[String, String] = Map.empty,
        redirectStderr: Boolean = true): Process = {
        val builder = new ProcessBuilder(command: _*).directory(workingDir)
        val environment = builder.environment()
        for ((key, value) <- extraEnvironment) {
            environment.put(key, value)
        }
        val process = builder.start()
        if (redirectStderr) {
            val threadName = "redirect stderr for command " + command(0)
            
            def log(s: String): Unit = logInfo(s)
            
            processStreamByLine(threadName, process.getErrorStream, log)
        }
        process
    }
    
    /**
     * Execute a command and get its output, throwing an exception if it yields a code other than 0.
     */
    def executeAndGetOutput(command: Seq[String], workingDir: File = new File("."), extraEnvironment: Map[String, String] = Map.empty,
        redirectStderr: Boolean = true): String = {
        val process = executeCommand(command, workingDir, extraEnvironment, redirectStderr)
        val output = new StringBuilder
        val threadName = "read stdout for " + command(0)
        
        def appendToOutput(s: String): Unit = output.append(s).append("\n")
        
        val stdoutThread = processStreamByLine(threadName, process.getInputStream, appendToOutput)
        val exitCode = process.waitFor()
        stdoutThread.join() // Wait for it to finish reading output
        if (exitCode != 0) {
            logError(s"Process $command exited with code $exitCode: $output")
            throw new SparkException(s"Process $command exited with code $exitCode")
        }
        output.toString
    }
    
    /**
     * Return and start a daemon thread that processes the content of the input stream line by line.
     */
    def processStreamByLine(threadName: String, inputStream: InputStream, processLine: String => Unit): Thread = {
        val t = new Thread(threadName) {
            override def run() {
                for (line <- Source.fromInputStream(inputStream).getLines()) {
                    processLine(line)
                }
            }
        }
        t.setDaemon(true)
        t.start()
        t
    }
    
    /**
     * Execute a block of code that evaluates to Unit, forwarding any uncaught exceptions to the
     * default UncaughtExceptionHandler
     *
     * NOTE: This method is to be called by the spark-started JVM process.
     */
    def tryOrExit(block: => Unit) {
        try {
            block
        } catch {
            case e: ControlThrowable => throw e
            case t: Throwable => sparkUncaughtExceptionHandler.uncaughtException(t)
        }
    }

    
    /**
     * Execute a block of code that returns a value, re-throwing any non-fatal uncaught
     * exceptions as IOException. This is used when implementing Externalizable and Serializable's
     * read and write methods, since Java's serializer will not report non-IOExceptions properly;
     * see SPARK-4080 for more context.
     */
    def tryOrIOException[T](block: => T): T = {
        try {
            block
        } catch {
            case e: IOException => logError("Exception encountered", e)
                throw e
            case NonFatal(e) => logError("Exception encountered", e)
                throw new IOException(e)
        }
    }
    
    /** Executes the given block. Log non-fatal errors if any, and only throw fatal errors */
    def tryLogNonFatalError(block: => Unit) {
        try {
            block
        } catch {
            case NonFatal(t) => logError(s"Uncaught exception in thread ${Thread.currentThread().getName}", t)
        }
    }
    
    /**
     * Execute a block of code, then a finally block, but if exceptions happen in
     * the finally block, do not suppress the original exception.
     *
     * This is primarily an issue with `finally { out.close() }` blocks, where
     * close needs to be called to clean up `out`, but if an exception happened
     * in `out.write`, it's likely `out` may be corrupted and `out.close` will
     * fail as well. This would then suppress the original/likely more meaningful
     * exception from the original `out.write` call.
     */
    def tryWithSafeFinally[T](block: => T)(finallyBlock: => Unit): T = {
        var originalThrowable: Throwable = null
        try {
            block
        } catch {
            case t: Throwable => // Purposefully not using NonFatal, because even fatal exceptions
                // we don't want to have our finallyBlock suppress
                originalThrowable = t
                throw originalThrowable
        } finally {
            try {
                finallyBlock
            } catch {
                case t: Throwable if (originalThrowable != null && originalThrowable != t) => originalThrowable.addSuppressed(t)
                    logWarning(s"Suppressing exception in finally: ${t.getMessage}", t)
                    throw originalThrowable
            }
        }
    }
    // A regular expression to match classes of the internal Spark API's
    // that we want to skip when finding the call site of a method.
    private val SPARK_CORE_CLASS_REGEX = """^org\.apache\.spark(\.api\.java)?(\.util)?(\.rdd)?(\.broadcast)?\.[A-Z]""".r
    private val SPARK_SQL_CLASS_REGEX = """^org\.apache\.spark\.sql.*""".r
    
    /** Default filtering function for finding call sites using `getCallSite`. */
    private def sparkInternalExclusionFunction(className: String): Boolean = {
        val SCALA_CORE_CLASS_PREFIX = "scala"
        val isSparkClass = SPARK_CORE_CLASS_REGEX.findFirstIn(className).isDefined || SPARK_SQL_CLASS_REGEX.findFirstIn(className).isDefined
        val isScalaClass = className.startsWith(SCALA_CORE_CLASS_PREFIX) // If the class is a Spark internal class or a Scala class, then exclude.
        isSparkClass || isScalaClass
    }
    

    
    private val UNCOMPRESSED_LOG_FILE_LENGTH_CACHE_SIZE_CONF = "spark.worker.ui.compressedLogFileLengthCacheSize"
    private val DEFAULT_UNCOMPRESSED_LOG_FILE_LENGTH_CACHE_SIZE = 100
    private var compressedLogFileLengthCache: LoadingCache[String, java.lang.Long] = null
    
    private def getCompressedLogFileLengthCache(sparkConf: SparkConf): LoadingCache[String, java.lang.Long] = this.synchronized {
        if (compressedLogFileLengthCache == null) {
            val compressedLogFileLengthCacheSize = sparkConf
                .getInt(UNCOMPRESSED_LOG_FILE_LENGTH_CACHE_SIZE_CONF, DEFAULT_UNCOMPRESSED_LOG_FILE_LENGTH_CACHE_SIZE)
            compressedLogFileLengthCache = CacheBuilder.newBuilder().maximumSize(compressedLogFileLengthCacheSize)
                .build[String, java.lang.Long](new CacheLoader[String, java.lang.Long]() {
                    override def load(path: String): java.lang.Long = {
                        Utils.getCompressedFileLength(new File(path))
                    }
                })
        }
        compressedLogFileLengthCache
    }
    
    /**
     * Return the file length, if the file is compressed it returns the uncompressed file length.
     * It also caches the uncompressed file size to avoid repeated decompression. The cache size is
     * read from workerConf.
     */
    def getFileLength(file: File, workConf: SparkConf): Long = {
        if (file.getName.endsWith(".gz")) {
            getCompressedLogFileLengthCache(workConf).get(file.getAbsolutePath)
        } else {
            file.length
        }
    }
    
    /** Return uncompressed file length of a compressed file. */
    private def getCompressedFileLength(file: File): Long = {
        var gzInputStream: GZIPInputStream = null
        try {
            // Uncompress .gz file to determine file size.
            var fileSize = 0L
            gzInputStream = new GZIPInputStream(new FileInputStream(file))
            val bufSize = 1024
            val buf = new Array[Byte](bufSize)
            var numBytes = ByteStreams.read(gzInputStream, buf, 0, bufSize)
            while (numBytes > 0) {
                fileSize += numBytes
                numBytes = ByteStreams.read(gzInputStream, buf, 0, bufSize)
            }
            fileSize
        } catch {
            case e: Throwable => logError(s"Cannot get file length of ${file}", e)
                throw e
        } finally {
            if (gzInputStream != null) {
                gzInputStream.close()
            }
        }
    }
    
    /** Return a string containing part of a file from byte 'start' to 'end'. */
    def offsetBytes(path: String, length: Long, start: Long, end: Long): String = {
        val file = new File(path)
        val effectiveEnd = math.min(length, end)
        val effectiveStart = math.max(0, start)
        val buff = new Array[Byte]((effectiveEnd - effectiveStart).toInt)
        val stream = if (path.endsWith(".gz")) {
            new GZIPInputStream(new FileInputStream(file))
        } else {
            new FileInputStream(file)
        }
        
        try {
            ByteStreams.skipFully(stream, effectiveStart)
            ByteStreams.readFully(stream, buff)
        } finally {
            stream.close()
        }
        Source.fromBytes(buff).mkString
    }
    
    /**
     * Return a string containing data across a set of files. The `startIndex`
     * and `endIndex` is based on the cumulative size of all the files take in
     * the given order. See figure below for more details.
     */
    def offsetBytes(files: Seq[File], fileLengths: Seq[Long], start: Long, end: Long): String = {
        assert(files.length == fileLengths.length)
        val startIndex = math.max(start, 0)
        val endIndex = math.min(end, fileLengths.sum)
        val fileToLength = files.zip(fileLengths).toMap
        logDebug("Log files: \n" + fileToLength.mkString("\n"))
        
        val stringBuffer = new StringBuffer((endIndex - startIndex).toInt)
        var sum = 0L
        files.zip(fileLengths).foreach { case (file, fileLength) => val startIndexOfFile = sum
            val endIndexOfFile = sum + fileToLength(file)
            logDebug(s"Processing file $file, " + s"with start index = $startIndexOfFile, end index = $endIndex")
            
            /*
                                            ____________
             range 1:                      |            |
                                           |   case A   |
      
             files:   |==== file 1 ====|====== file 2 ======|===== file 3 =====|
      
                           |   case B  .       case C       .    case D    |
             range 2:      |___________.____________________.______________|
             */
            
            if (startIndex <= startIndexOfFile && endIndex >= endIndexOfFile) {
                // Case C: read the whole file
                stringBuffer.append(offsetBytes(file.getAbsolutePath, fileLength, 0, fileToLength(file)))
            } else if (startIndex > startIndexOfFile && startIndex < endIndexOfFile) {
                // Case A and B: read from [start of required range] to [end of file / end of range]
                val effectiveStartIndex = startIndex - startIndexOfFile
                val effectiveEndIndex = math.min(endIndex - startIndexOfFile, fileToLength(file))
                stringBuffer.append(Utils.offsetBytes(file.getAbsolutePath, fileLength, effectiveStartIndex, effectiveEndIndex))
            } else if (endIndex > startIndexOfFile && endIndex < endIndexOfFile) {
                // Case D: read from [start of file] to [end of require range]
                val effectiveStartIndex = math.max(startIndex - startIndexOfFile, 0)
                val effectiveEndIndex = endIndex - startIndexOfFile
                stringBuffer.append(Utils.offsetBytes(file.getAbsolutePath, fileLength, effectiveStartIndex, effectiveEndIndex))
            }
            sum += fileToLength(file)
            logDebug(s"After processing file $file, string built is ${stringBuffer.toString}")
        }
        stringBuffer.toString
    }
    
    /**
     * Clone an object using a Spark serializer.
     */
    def clone[T: ClassTag](value: T, serializer: SerializerInstance): T = {
        serializer.deserialize[T](serializer.serialize(value))
    }
    
    private def isSpace(c: Char): Boolean = {
        " \t\r\n".indexOf(c) != -1
    }
    
    /**
     * Split a string of potentially quoted arguments from the command line the way that a shell
     * would do it to determine arguments to a command. For example, if the string is 'a "b c" d',
     * then it would be parsed as three arguments: 'a', 'b c' and 'd'.
     */
    def splitCommandString(s: String): Seq[String] = {
        val buf = new ArrayBuffer[String]
        var inWord = false
        var inSingleQuote = false
        var inDoubleQuote = false
        val curWord = new StringBuilder
        
        def endWord() {
            buf += curWord.toString
            curWord.clear()
        }
        
        var i = 0
        while (i < s.length) {
            val nextChar = s.charAt(i)
            if (inDoubleQuote) {
                if (nextChar == '"') {
                    inDoubleQuote = false
                } else if (nextChar == '\\') {
                    if (i < s.length - 1) {
                        // Append the next character directly, because only " and \ may be escaped in
                        // double quotes after the shell's own expansion
                        curWord.append(s.charAt(i + 1))
                        i += 1
                    }
                } else {
                    curWord.append(nextChar)
                }
            } else if (inSingleQuote) {
                if (nextChar == '\'') {
                    inSingleQuote = false
                } else {
                    curWord.append(nextChar)
                } // Backslashes are not treated specially in single quotes
            } else if (nextChar == '"') {
                inWord = true
                inDoubleQuote = true
            } else if (nextChar == '\'') {
                inWord = true
                inSingleQuote = true
            } else if (!isSpace(nextChar)) {
                curWord.append(nextChar)
                inWord = true
            } else if (inWord && isSpace(nextChar)) {
                endWord()
                inWord = false
            }
            i += 1
        }
        if (inWord || inDoubleQuote || inSingleQuote) {
            endWord()
        }
        buf
    }
    
    /* Calculates 'x' modulo 'mod', takes to consideration sign of x,
     * i.e. if 'x' is negative, than 'x' % 'mod' is negative too
     * so function return (x % mod) + mod in that case.
     */ def nonNegativeMod(x: Int, mod: Int): Int = {
        val rawMod = x % mod
        rawMod + (if (rawMod < 0) mod else 0)
    }
    
    // Handles idiosyncrasies with hash (add more as required)
    // This method should be kept in sync with
    // org.apache.spark.network.util.JavaUtils#nonNegativeHash().
    def nonNegativeHash(obj: AnyRef): Int = {
        
        // Required ?
        if (obj eq null) return 0
        
        val hash = obj.hashCode
        // math.abs fails for Int.MinValue
        val hashAbs = if (Int.MinValue != hash) math.abs(hash) else 0
        
        // Nothing else to guard against ?
        hashAbs
    }
    
    /**
     * NaN-safe version of `java.lang.Double.compare()` which allows NaN values to be compared
     * according to semantics where NaN == NaN and NaN is greater than any non-NaN double.
     */
    def nanSafeCompareDoubles(x: Double, y: Double): Int = {
        val xIsNan: Boolean = java.lang.Double.isNaN(x)
        val yIsNan: Boolean = java.lang.Double.isNaN(y)
        if ((xIsNan && yIsNan) || (x == y)) 0 else if (xIsNan) 1 else if (yIsNan) -1 else if (x > y) 1 else -1
    }
    
    /**
     * NaN-safe version of `java.lang.Float.compare()` which allows NaN values to be compared
     * according to semantics where NaN == NaN and NaN is greater than any non-NaN float.
     */
    def nanSafeCompareFloats(x: Float, y: Float): Int = {
        val xIsNan: Boolean = java.lang.Float.isNaN(x)
        val yIsNan: Boolean = java.lang.Float.isNaN(y)
        if ((xIsNan && yIsNan) || (x == y)) 0 else if (xIsNan) 1 else if (yIsNan) -1 else if (x > y) 1 else -1
    }
    
    /**
     * Returns the system properties map that is thread-safe to iterator over. It gets the
     * properties which have been set explicitly, as well as those for which only a default value
     * has been defined.
     */
    def getSystemProperties: Map[String, String] = {
        System.getProperties.stringPropertyNames().asScala.map(key => (key, System.getProperty(key))).toMap
    }
    
    /**
     * Method executed for repeating a task for side effects.
     * Unlike a for comprehension, it permits JVM JIT optimization
     */
    def times(numIters: Int)(f: => Unit): Unit = {
        var i = 0
        while (i < numIters) {
            f
            i += 1
        }
    }
    
    /**
     * Timing method based on iterations that permit JVM JIT optimization.
     *
     * @param numIters number of iterations
     * @param f function to be executed. If prepare is not None, the running time of each call to f
     *          must be an order of magnitude longer than one millisecond for accurate timing.
     * @param prepare function to be executed before each call to f. Its running time doesn't count.
     * @return the total time across all iterations (not counting preparation time)
     */
    def timeIt(numIters: Int)(f: => Unit, prepare: Option[() => Unit] = None): Long = {
        if (prepare.isEmpty) {
            val start = System.currentTimeMillis
            times(numIters)(f)
            System.currentTimeMillis - start
        } else {
            var i = 0
            var sum = 0L
            while (i < numIters) {
                prepare.get.apply()
                val start = System.currentTimeMillis
                f
                sum += System.currentTimeMillis - start
                i += 1
            }
            sum
        }
    }
    
    /**
     * Counts the number of elements of an iterator using a while loop rather than calling
     * [[scala.collection.Iterator#size]] because it uses a for loop, which is slightly slower
     * in the current version of Scala.
     */
    def getIteratorSize(iterator: Iterator[_]): Long = {
        var count = 0L
        while (iterator.hasNext) {
            count += 1L
            iterator.next()
        }
        count
    }
    
    /**
     * Generate a zipWithIndex iterator, avoid index value overflowing problem
     * in scala's zipWithIndex
     */
    def getIteratorZipWithIndex[T](iterator: Iterator[T], startIndex: Long): Iterator[(T, Long)] = {
        new Iterator[(T, Long)] {
            require(startIndex >= 0, "startIndex should be >= 0.")
            var index: Long = startIndex - 1L
            
            def hasNext: Boolean = iterator.hasNext
            
            def next(): (T, Long) = {
                index += 1L
                (iterator.next(), index)
            }
        }
    }
    
    /**
     * Creates a symlink.
     *
     * @param src absolute path to the source
     * @param dst relative path for the destination
     */
    def symlink(src: File, dst: File): Unit = {
        if (!src.isAbsolute()) {
            throw new IOException("Source must be absolute")
        }
        if (dst.isAbsolute()) {
            throw new IOException("Destination must be relative")
        }
        Files.createSymbolicLink(dst.toPath, src.toPath)
    }
    
    /**
     * Return a Hadoop FileSystem with the scheme encoded in the given path.
     */
    def getHadoopFileSystem(path: URI, conf: Configuration): FileSystem = {
        FileSystem.get(path, conf)
    }
    
    /**
     * Return a Hadoop FileSystem with the scheme encoded in the given path.
     */
    def getHadoopFileSystem(path: String, conf: Configuration): FileSystem = {
        getHadoopFileSystem(new URI(path), conf)
    }
    
    /**
     * Whether the underlying operating system is Windows.
     */
    val isWindows = SystemUtils.IS_OS_WINDOWS
    
    /**
     * Whether the underlying operating system is Mac OS X.
     */
    val isMac = SystemUtils.IS_OS_MAC_OSX
    
    /**
     * Pattern for matching a Windows drive, which contains only a single alphabet character.
     */
    val windowsDrive = "([a-zA-Z])".r
    
    /**
     * Indicates whether Spark is currently running unit tests.
     */
    def isTesting: Boolean = {
        sys.env.contains("SPARK_TESTING") || sys.props.contains("spark.testing")
    }
    
    /**
     * Terminates a process waiting for at most the specified duration.
     *
     * @return the process exit value if it was successfully terminated, else None
     */
    def terminateProcess(process: Process, timeoutMs: Long): Option[Int] = {
        // Politely destroy first
        process.destroy()
        if (process.waitFor(timeoutMs, TimeUnit.MILLISECONDS)) {
            // Successful exit
            Option(process.exitValue())
        } else {
            try {
                process.destroyForcibly()
            } catch {
                case NonFatal(e) => logWarning("Exception when attempting to kill process", e)
            } // Wait, again, although this really should return almost immediately
            if (process.waitFor(timeoutMs, TimeUnit.MILLISECONDS)) {
                Option(process.exitValue())
            } else {
                logWarning("Timed out waiting to forcibly kill process")
                None
            }
        }
    }
    
    /**
     * Return the stderr of a process after waiting for the process to terminate.
     * If the process does not terminate within the specified timeout, return None.
     */
    def getStderr(process: Process, timeoutMs: Long): Option[String] = {
        val terminated = process.waitFor(timeoutMs, TimeUnit.MILLISECONDS)
        if (terminated) {
            Some(Source.fromInputStream(process.getErrorStream).getLines().mkString("\n"))
        } else {
            None
        }
    }
    
    /**
     * Execute the given block, logging and re-throwing any uncaught exception.
     * This is particularly useful for wrapping code that runs in a thread, to ensure
     * that exceptions are printed, and to avoid having to catch Throwable.
     */
    def logUncaughtExceptions[T](f: => T): T = {
        try {
            f
        } catch {
            case ct: ControlThrowable => throw ct
            case t: Throwable => logError(s"Uncaught exception in thread ${Thread.currentThread().getName}", t)
                throw t
        }
    }
    
    /** Executes the given block in a Try, logging any uncaught exceptions. */
    def tryLog[T](f: => T): Try[T] = {
        try {
            val res = f
            scala.util.Success(res)
        } catch {
            case ct: ControlThrowable => throw ct
            case t: Throwable => logError(s"Uncaught exception in thread ${Thread.currentThread().getName}", t)
                scala.util.Failure(t)
        }
    }
    
    /** Returns true if the given exception was fatal. See docs for scala.util.control.NonFatal. */
    def isFatalError(e: Throwable): Boolean = {
        e match {
            case NonFatal(_) | _: InterruptedException | _: NotImplementedError | _: ControlThrowable | _: LinkageError => false
            case _ => true
        }
    }
    
    /**
     * Return a well-formed URI for the file described by a user input string.
     *
     * If the supplied path does not contain a scheme, or is a relative path, it will be
     * converted into an absolute path with a file:// scheme.
     */
    def resolveURI(path: String): URI = {
        try {
            val uri = new URI(path)
            if (uri.getScheme() != null) {
                return uri
            } // make sure to handle if the path has a fragment (applies to yarn
            // distributed cache)
            if (uri.getFragment() != null) {
                val absoluteURI = new File(uri.getPath()).getAbsoluteFile().toURI()
                return new URI(absoluteURI.getScheme(), absoluteURI.getHost(), absoluteURI.getPath(), uri.getFragment())
            }
        } catch {
            case e: URISyntaxException =>
        }
        new File(path).getAbsoluteFile().toURI()
    }
    
    /** Resolve a comma-separated list of paths. */
    def resolveURIs(paths: String): String = {
        if (paths == null || paths.trim.isEmpty) {
            ""
        } else {
            paths.split(",").filter(_.trim.nonEmpty).map { p => Utils.resolveURI(p) }.mkString(",")
        }
    }
    
    /** Return all non-local paths from a comma-separated list of paths. */
    def nonLocalPaths(paths: String, testWindows: Boolean = false): Array[String] = {
        val windows = isWindows || testWindows
        if (paths == null || paths.trim.isEmpty) {
            Array.empty
        } else {
            paths.split(",").filter { p =>
                val uri = resolveURI(p)
                Option(uri.getScheme).getOrElse("file") match {
                    case windowsDrive(d) if windows => false
                    case "local" | "file" => false
                    case _ => true
                }
            }
        }
    }

    


    
    def stringToSeq(str: String): Seq[String] = {
        str.split(",").map(_.trim()).filter(_.nonEmpty)
    }
    
    /**
     * Create instances of extension classes.
     *
     * The classes in the given list must:
     * - Be sub-classes of the given base class.
     * - Provide either a no-arg constructor, or a 1-arg constructor that takes a SparkConf.
     *
     * The constructors are allowed to throw "UnsupportedOperationException" if the extension does not
     * want to be registered; this allows the implementations to check the Spark configuration (or
     * other state) and decide they do not need to be added. A log message is printed in that case.
     * Other exceptions are bubbled up.
     */
    def loadExtensions[T](extClass: Class[T], classes: Seq[String], conf: SparkConf): Seq[T] = {
        classes.flatMap { name =>
            try {
                val klass = classForName(name)
                require(extClass.isAssignableFrom(klass), s"$name is not a subclass of ${extClass.getName()}.")
                
                val ext = Try(klass.getConstructor(classOf[SparkConf])) match {
                    case Success(ctor) => ctor.newInstance(conf)
                    case Failure(_) => klass.getConstructor().newInstance()
                }
                
                Some(ext.asInstanceOf[T])
            } catch {
                case _: NoSuchMethodException => throw new SparkException(
                    s"$name did not have a zero-argument constructor or a" + " single-argument constructor that accepts SparkConf. Note: if the class is" + " defined inside of another Scala class, then its constructors may accept an" + " implicit parameter that references the enclosing class; in this case, you must" + " define the class as a top-level class in order to prevent this extra" + " parameter from breaking Spark's ability to find a valid constructor.")
                case e: InvocationTargetException => e.getCause() match {
                    case uoe: UnsupportedOperationException => logDebug(s"Extension $name not being initialized.", uoe)
                        logInfo(s"Extension $name not being initialized.")
                        None
                    case null => throw e
                    case cause => throw cause
                }
            }
        }
    }

    
    /** Create a new properties object with the same values as `props` */
    def cloneProperties(props: Properties): Properties = {
        val resultProps = new Properties()
        props.asScala.foreach(entry => resultProps.put(entry._1, entry._2))
        resultProps
    }
}

/**
 * A utility class to redirect the child process's stdout or stderr.
 */
private[source] class RedirectThread(in: InputStream, out: OutputStream, name: String, propagateEof: Boolean = false) extends Thread(name) {
    
    setDaemon(true)
    
    override def run() {
        scala.util.control.Exception.ignoring(classOf[IOException]) {
            // FIXME: We copy the stream on the level of bytes to avoid encoding problems.
            Utils.tryWithSafeFinally {
                val buf = new Array[Byte](1024)
                var len = in.read(buf)
                while (len != -1) {
                    out.write(buf, 0, len)
                    out.flush()
                    len = in.read(buf)
                }
            } {
                if (propagateEof) {
                    out.close()
                }
            }
        }
    }
}

