package com.spark.demo

import com.sequoiadb.hadoop.io.BSONWritable
import com.sequoiadb.hadoop.mapreduce.SequoiadbInputFormat
import org.apache.hadoop.conf.Configuration
import org.bson.BSONObject

import  scala.math.random
import  org.apache.spark._
/**
 * Created by chenfool on 15/1/15.
 */
object SparkPi {
  def main(args:Array[String])
  {

    val conf = new SparkConf().setAppName("word count").setMaster("local")

    var sparkContext=new SparkContext(conf);

    var config=new Configuration();
    config.set("sequoiadb.input.url","192.168.1.155:11810");
    config.set("sequoiadb.in.collectionspace","foo");
    config.set("sequoiadb.in.collect","bar");
    config.set("sequoiadb.out.url","192.168.1.155:11810");
    config.set("sequoiadb.out.collectionspace","foo");
    config.set("sequoiadb.out.collect","bar");
    config.set("sequoiadb.out.bulknum","20");


    println(config.get("sequoiadb.in.collection"))

/*
    val slices=if(args.length>0) args(0).toInt else 2
    val n = 10000 * slices

    val count=sparkContext.parallelize(1 to n,slices).map{
      i=>val x=random*2 -1
        val y=random*2 -1
        if (x*x+y*y<1) 1 else 0
    }.reduce(_+_)
    println("pi is rought ly"+ 4.0* count/n)
*/
    val sdb_RDD= sparkContext.newAPIHadoopRDD(config,classOf[SequoiadbInputFormat],classOf[Object],classOf[BSONWritable]);

    println("count:"+sdb_RDD.count())
    sparkContext.stop()
  }
}
