# -*- coding: utf-8 -*-
from scpy.logger import get_logger
import os
import sys
import json
import re
import pickle

# Path for spark source folder
# os.environ['SPARK_HOME']="/home/openkai/social-credits/spark/spark-1.5.1/"
os.environ['SPARK_HOME'] = "/home/scdev/spark-2.0.1-bin-hadoop2.7/"

# Append pyspark  to Python Path
# sys.path.append("/home/openkai/social-credits/spark/spark-1.5.1/python/")
sys.path.append("/home/scdev/spark-2.0.1-bin-hadoop2.7/python/")

try:
    from pyspark import SparkContext
    from pyspark import SparkConf
    from pyspark.sql import SQLContext
    from pyspark.sql import SparkSession,Row
    from pyspark.conf import SparkConf
    from pyspark.sql.functions import udf
    print ("Successfully imported Spark Modules")
except ImportError as e:
    print ("Can not import Spark Modules", e)
    sys.exit(1)
from operator import add

logger = get_logger(__file__)

CURRENT_PATH = os.path.dirname(__file__)
if CURRENT_PATH:
    CURRENT_PATH = CURRENT_PATH + "/"

master_url = 'local'
filePath = CURRENT_PATH + 'data/people.txt'
filePathAws = CURRENT_PATH + 'cc_data/'
mode = re.compile(r'\d+')

def sample_1():
    '''
    本地spark集群
    '''
    conf = SparkConf().setMaster(master_url).setAppName('Demo')
    sc = SparkContext(conf=conf)
    textFile = sc.textFile(filePath)
    _ = textFile.collect()
    counts = textFile.flatMap(lambda x: x.split(',')) \
                  .map(lambda x: (x, 1)) \
                  .reduceByKey(add)
    # counts = textFile.map(lambda x: x.split(',')) \
    #               .map(lambda x: (x[0], x[1])) \
    #               .reduceByKey(add)
    output = counts.collect()
    for (word, count) in output:
        print("%s: %i" % (word, count))


def sample_2():
    conf = SparkConf().setMaster(master_url).setAppName('Demo')
    sc = SparkContext(conf=conf)
    rdd = sc.parallelize([1, 2, 3, 4], 2)
    print rdd.glom().collect()
    print '*'* 50
    print rdd.collect()


def dumps_info(iterator):
    # return iterator
    result = list()
    for x in iterator:
        itemInfo = json.loads(x)
        for item in itemInfo:
            result.append(item)
    return result


def sample_3():
    conf = SparkConf().setMaster(master_url).setAppName('Demo')
    sc = SparkContext(conf=conf)
    textFile = sc.textFile(filePathAws)
    textList = textFile.collect()
    infoList = sc.parallelize(textList).mapPartitions(dumps_info)
    # _ = textFile.collect()
    sqlContext = SQLContext(sc)
    # _ = sqlContext.createDataFrame(infoList, schema=StructType).collect()
    # print _

def trans_salary_test(salary):
    salary = trans_salary(salary)
    # if(len(row)>5):
    #     salary = row
    return Row(salary=salary)


def trans_salary(salary_before):
    res = {'before': salary_before, 'after': None}
    try:
        values = mode.findall(salary_before.encode('utf-8'))
    except Exception,e:
        # logger.info('trans_salary mode.findall error; res-after set to be None ！')
        # logger.info('*'*100)
        # logger.info(salary_before)
        # logger.info(type(salary_before))
        res['after'] = None
        # return res['after']
    try:
        if salary_before.find(u"元/天") != -1:
            res['after'] = float(values[0]) * 30
            # return res
        if salary_before.find(u"元/次") != -1:
            res['after'] = None
            # return res
        if salary_before.find(u"元/小时") != -1:
            res['after'] = float(values[0]) * 8 * 30
            # return res
        if (salary_before.find(u"面谈") != -1) or (salary_before.find(u"面议") != -1):
            res['after'] = None
            # return res
        if len(values) != 0:
            if len(values) >= 2:
                if salary_before.find(u"k") != -1 or salary_before.find(u"K") != -1:
                    average = (float(values[0]) * 1000 + float(values[-1]) * 1000)/2
                elif salary_before.find(u"年") != -1 and salary_before.find(u"k") != -1:
                    average = (float(values[0]) * 1000 + float(values[-1]) * 1000)/2/12
                elif salary_before.find(u"年") != -1:
                    average = (float(values[0]) + float(values[-1]))/2/12
                else:
                    average = (float(values[0]) + float(values[-1]))/2

            else:
                if salary_before.find(u"k") != -1 or salary_before.find(u"K") != -1:
                    average = float(values[0]) * 1000
                elif salary_before.find(u"年") != -1:
                    average = float(values[0])/12
                else:
                    average = float(values[0])
            res['after'] = average
        else:
            res['after'] = None
    except Exception, e:
        # logger.info('trans salary error！')
        # print salary_before
        res['after'] = None
    ##abnormal monthly salary
    #if (res['after']) < 800 or (res['after'] >= 200000):
    #    res['after'] = None
    return res['after']

def sample_4():
    spark = SparkSession.builder \
            .master("local") \
            .appName("Demo") \
            .config(conf=SparkConf()) \
            .getOrCreate()
    textFile = spark.read.json(filePathAws)
    textFile.createTempView('recruitment')
    # textFile.select('salary').show()
    transSalary = udf(trans_salary)
    # salaryDF =
    salaryDF = spark.sql('select salary from recruitment')
    # salaryDF = textFile.rdd.map(trans_salary_test)
    # salaryDF = salaryDF.toDF()
    textFile = textFile.withColumn('salary', transSalary(textFile.salary))
    textFile.show()
    salaryDF.show()


if __name__ == '__main__':
    # sample_1()
    # sample_2()
    # sample_3()
    sample_4()
    # print trans_salary("12345-100000/月")
