# 以python代码连接hive
from pyhive import hive

if __name__ == '__main__':

    # 获取到Hive(Spark ThriftServer)的连接
    conn = hive.Connection(host='node1', port=10001,username='hadoop')

    # 获取游标对象来执行SQL
    cursor = conn.cursor()

    # 执行SQL语句
    cursor.execute("""SELECT * FROM spark_test """)

    # 获取返回值，返回值是一个list的元组
    result = cursor.fetchall()

    print(result)
    # 输出结果
    # [(1, 'zhangsan'), (2, 'lisi'), (3, 'wangwu')]
