# coding: utf-8

import os
from pyspark.sql import SparkSession, HiveContext
from pyspark import SparkContext, SparkConf


HOME_PATH = os.environ['HOME']
BASE_PATH = os.path.dirname(__file__)
JARS_PATH = os.path.join(BASE_PATH, 'jar')

SPARK_NAME = 'test-hive'
SPARK_HOST = 'spark://10.0.3.125:7077'


def get_spark_context(level='INFO'):
    spark_conf = SparkConf()
    # jars = [os.path.join(JARS_PATH, f) for f in os.listdir(JARS_PATH)]
    # spark_conf.set('spark.jars', ','.join(jars))
    # spark_conf.set('spark.executor-memory', '2g')
    # spark_conf.set('spark.num.executors', '2')
    # spark_conf.set('spark.executor.cores', '2')
    spark_context = SparkContext(
        master=SPARK_HOST,
        appName=SPARK_NAME,
        conf=spark_conf
    )
    spark_context.setLogLevel(level)
    return spark_context


def get_spark_session(spark_context):
    return SparkSession(spark_context).newSession()


def main():
    spark_context = get_spark_context()
    spark_session = get_spark_session(spark_context)
    hive_context = HiveContext(spark_context)
    # hive_context.sql('create database `test`')
    # hive_context.sql('use test')
    print(hive_context.tableNames())
    users = [
        (1, 11, 0.1, 2.0),
        (1, 11, 0.4, 1.0),
        (1, 12, 0.5, 1.0),
        (1, 11, 0.2, 1.0),
        (2, 13, 0.3, 1.0),
        (3, 11, 0.4, 1.0),
        (4, 11, 0.5, 2.0),
        (2, 11, 0.6, 1.0),
        (3, 13, 0.7, 1.0),
        (4, 12, 0.8, 1.0)
    ]
    rdd = spark_context.parallelize(users)
    data_frame = spark_session.createDataFrame(rdd, schema=['user_id', 'article_id', 'score', 'sim'])
    data_frame = data_frame.groupBy('user_id', 'article_id').agg({'score': 'sum'})
    data_frame = data_frame.withColumnRenamed('sum(score)', 'score')
    data_frame.printSchema()
    data_frame.show(n=10)
    data_frame.createOrReplaceTempView('user_news_score')
    print(hive_context.tableNames())
    table_name = 'user_news_score_v2'
    hive_context.registerDataFrameAsTable(data_frame, table_name)
    print(hive_context.tableNames())
    data_frame.write.saveAsTable(table_name, mode='append')
    new_data_frame = hive_context.sql('select * from %s' % table_name)
    new_data_frame.show()
    print(hive_context.tableNames())


if __name__ == '__main__':
    main()
