# -*- coding: utf-8 -*-
# __author__ = 'Yuanjiang Huang'
# yuanjiang.huang@socialcredits.cn

import os
import sys

# Path for spark source folder
# os.environ['SPARK_HOME']="/home/openkai/social-credits/spark/spark-1.5.1/"
os.environ['SPARK_HOME'] = "/home/scdev/spark-1.5.2/"

# Append pyspark  to Python Path
# sys.path.append("/home/openkai/social-credits/spark/spark-1.5.1/python/")
sys.path.append("/home/scdev/spark-1.5.2/python/")

try:
    from pyspark import SparkContext
    from pyspark import SparkConf

    print ("Successfully imported Spark Modules")
except ImportError as e:
    print ("Can not import Spark Modules", e)
    sys.exit(1)

CURRENT_PATH = os.path.dirname(__file__)
if CURRENT_PATH:
    CURRENT_PATH = CURRENT_PATH + '/'

# from skfeature.function.similarity_based import fisher_score
# from skfeature.utility import construct_W

if __name__ == "__main__":
    # if master_url is local, it tells Spark how to connect to a cluster.
    from pyspark.sql import SQLContext
    # master_url = 'spark://sc-data-server-1:7077' #one worker thread
    # spark://HOST:PORT , mesos://HOST:PORT, yarn-client, etc.
    master_url = 'spark://192.168.31.116:7077'
    conf = SparkConf().setMaster(master_url).setAppName('Demo')
    sc = SparkContext(conf=conf)

    # data = 'hdfs://sc-data-server-1:9000/user/crawler/organization/companyNameStrict_2015_12.json'
    sqlContext = SQLContext(sc)

    # df = sqlContext.load('hdfs://sc-data-server-1:9000/user/crawler/organization/companyNameStrict_2015_12.json','json')
    df = sqlContext.load('hdfs://192.168.31.116:7077:9000/user/crawler/organization/companyNameStrict_2015_12.json',
                         'json')
    # data = 'hdfs://'
    # lines = sc.textFile(data)
    print '*' * 200
    # df.show()
    df.groupBy("province").count().show()
    print '*' * 200
