# coding:utf-8
from pyspark.sql import HiveContext

from bigdata.Personas import sparkSessionBase


class Hive_process(sparkSessionBase.SparkSessionBase):
    SPARK_APP_NAME = 'UserGenderModel'
    SPARK_URL = "local"
    ENABLE_HIVE_SUPPORT = True
    '''
    初始化spark_session
    :param HADOOP_HOME:Hadoop的路径
    :param HADOOP_CONF_DIR:hadoop_config的路径
    :param YARN_CONF_DIR:yarn的路径
    '''
    def __init__(self,HADOOP_HOME = 'D:\\hadoop-2.9.2', HADOOP_CONF_DIR = 'config/hadoop-conf' ,YARN_CONF_DIR='config/yarn-conf'):
        self.spark = self._create_spark_session(HADOOP_HOME=HADOOP_HOME,HADOOP_CONF_DIR=HADOOP_CONF_DIR,YARN_CONF_DIR=YARN_CONF_DIR)

    '''
    read表示从hive中读取数据，
    :param table_name:读取的表名
    '''
    def read(self,table_name):
        hcx = HiveContext(self.spark.sparkContext)
        df = hcx.table(table_name)
        return df


    '''
        write表示从hive中写入数据，
        :param 要写入的dataframe
        :param table_name 表名,tips：一般表名为 shopping.表名
    '''
    def write(self,dataframe, table_name, mode='append'):
        dataframe.write \
        .format('hive')\
        .mode(mode)\
        .saveAsTable(table_name)


a= Hive_process()





