-------------------------------spark提交运行语句-----------------------

/usr/hdp/current/spark2-client/bin/spark-submit \
  --class com.navinfo.platform.examples.carbondata.DataFrameComplexTypeExample \
  /root/samuel/column/ColumnStorageSample-1.0-SNAPSHOT-jar-with-dependencies.jar


/usr/hdp/current/spark2-client/bin/spark-submit \
  --class com.navinfo.platform.examples.carbondata.HadoopFileExample \
  /root/samuel/column/ColumnStorageSample-1.0-SNAPSHOT-jar-with-dependencies.jar


/usr/hdp/current/spark2-client/bin/spark-submit \
  --class com.navinfo.platform.examples.carbondata.MyCarbondataSample \
  /root/samuel/column/ColumnStorageSample-1.0-SNAPSHOT-jar-with-dependencies.jar


/usr/hdp/current/spark2-client/bin/spark-submit \
  --class com.navinfo.platform.examples.carbondata.CarbonPartitionExample \
  /root/samuel/column/ColumnStorageSample-1.0-SNAPSHOT-jar-with-dependencies.jar


/usr/hdp/current/spark2-client/bin/spark-submit \
  --class com.navinfo.platform.examples.parquet.MyParquetSample \
  /root/samuel/column/ColumnStorageSample-1.0-SNAPSHOT-jar-with-dependencies.jar


/usr/hdp/current/spark2-client/bin/spark-submit \
  --class com.navinfo.platform.examples.parquet.MyParquetJavaSchemaSample \
  /root/samuel/column/ColumnStorageSample-1.0-SNAPSHOT-jar-with-dependencies.jar

/usr/hdp/current/spark2-client/bin/spark-submit \
  --class com.navinfo.platform.examples.parquet.MyParquetJavaSample \
  /root/samuel/column/ColumnStorageSample-1.0-SNAPSHOT-jar-with-dependencies.jar


/usr/hdp/current/spark2-client/bin/spark-submit \
  --class com.navinfo.platform.examples.parquet.MyParquetPBJavaSample \
  /root/samuel/column/ColumnStorageSample-1.0-SNAPSHOT-jar-with-dependencies.jar

----------------carbondata实验语句-------------------------
/usr/hdp/current/spark2-client/bin/spark-shell --jars /usr/hdp/2.6.4.0-91/spark2/carbonlib/apache-carbondata-1.3.1-bin-spark2.2.0-hadoop2.7.2.jar

import org.apache.spark.sql.SparkSession
import org.apache.spark.sql.CarbonSession._

val carbon = SparkSession.builder().config(sc.getConf).getOrCreateCarbonSession("hdfs://sy-lp1.hadoop.com:8020/user/root/data/CarbonData/CarbonStore")


carbon.sql("CREATE TABLE IF NOT EXISTS test_table2( id string, name string, city string, age Int) STORED BY 'carbondata'")

--carbon.sql("LOAD DATA LOCAL INPATH '/root/samuel/testdata/sample.csv' INTO TABLE test_table2")

carbon.sql("LOAD DATA INPATH '/user/root/sample.csv' INTO TABLE test_table2")

carbon.sql("SELECT * FROM test_table2").show()

carbon.sql("SELECT city, avg(age), sum(age) FROM test_table2 GROUP BY city").show()

carbon.sql("CREATE DATABASE partitionDB")


-----------------------mvn编译carbondata-----------------------
mvn -DskipTests -Pspark-2.2 -Dspark.version=2.2.0 clean package
mvn -DskipTests -Pspark-2.2 -Dspark.version=2.2.1 clean package

mvn -DskipTests -Pspark-2.2 -Dspark.version=2.2.0.2.6.4.0-91 -Dhadoop.version=2.7.3.2.6.4.0-91 clean package

mvn -DskipTests -Pspark-2.2 -Dspark.version=2.2.0.2.6.4.0-91 -Dhadoop.version=2.7.3.2.6.4.0-91 dependency:tree

cp /root/samuel/carbondata-parent-1.3.1/assembly/target/scala-2.11/apache-carbondata-1.3.1-bin-spark2.2.0.2.6.4.0-91-hadoop2.7.3.2.6.4.0-91.jar /usr/hdp/2.6.4.0-91/spark2/carbonlib

-----------------pom.xml-----------------------------------
    --添加如下库，便于编译HDP版本的carbondata包
    <repository>
        <id>HDPReleases</id>
        <name>HDP Releases</name>
        <url>http://repo.hortonworks.com/content/repositories/public</url>
        <layout>default</layout>
        <releases>
            <enabled>true</enabled>
            <updatePolicy>always</updatePolicy>
            <checksumPolicy>warn</checksumPolicy>
        </releases>
        <snapshots>
            <enabled>false</enabled>
            <updatePolicy>never</updatePolicy>
            <checksumPolicy>fail</checksumPolicy>
        </snapshots>
    </repository>
    <repository>
        <id>HDPJetty</id>
        <name>Hadoop Jetty</name>
        <url>http://repo.hortonworks.com/content/repositories/jetty-hadoop/</url>
        <layout>default</layout>
        <releases>
            <enabled>true</enabled>
            <updatePolicy>always</updatePolicy>
            <checksumPolicy>warn</checksumPolicy>
        </releases>
        <snapshots>
            <enabled>false</enabled>
            <updatePolicy>never</updatePolicy>
            <checksumPolicy>fail</checksumPolicy>
        </snapshots>
    </repository>
    <repository>
        <snapshots>
            <enabled>false</enabled>
        </snapshots>
        <id>central</id>
        <name>bintray</name>
        <url>http://jcenter.bintray.com</url>
    </repository>
    <repository>
    <id>pentaho-releases</id>
    <url>http://repository.pentaho.org/artifactory/repo/</url>
    </repository>
    <repository>
    <id>pentaho-nexus</id>
    <url>https://nexus.pentaho.org/content/groups/omni/</url>
    </repository>


    --parent.pom.xml需要将jackson-databind排除，否则该版本低，会导致spark-shell启动报错
    <dependency>
            <groupId>org.apache.hadoop</groupId>
            <artifactId>hadoop-hdfs</artifactId>
            <version>${hadoop.version}</version>
            <scope>${hadoop.deps.scope}</scope>
            <exclusions>
              <exclusion>
                <groupId>io.netty</groupId>
                <artifactId>netty-all</artifactId>
              </exclusion>
              <exclusion>
                <groupId>javax.servlet</groupId>
                <artifactId>*</artifactId>
              </exclusion>
              <exclusion>
                <groupId>javax.servlet.jsp</groupId>
                <artifactId>*</artifactId>
              </exclusion>
              <exclusion>
                <groupId>com.fasterxml.jackson.core</groupId>
                <artifactId>jackson-databind</artifactId>
              </exclusion>
            </exclusions>
          </dependency>