package test

import org.apache.spark.sql.SQLContext
import org.apache.spark.sql.types._

object ToParquetUtil {
  def convert(sqlContext: SQLContext, filename: String, schema: StructType, tablename: String) {
    // import text-based table first into a data frame
    val df = sqlContext.read.format("com.databricks.spark.csv").
      schema(schema).option("delimiter", ",").load(filename)
    // now simply write to a parquet file
    df.write.parquet("e:/parquet/"+tablename)



  }

}
