import subprocess

__hivebin = "hive"
__hadoopbin = "hadoop"
__sparksqlbin = "spark-sql"

def exec_hive(hql):
    hive = __hivebin
    cmd = """%(hive)s -e "
          set mapreduce.job.queuename=etl;
          %(hql)s" """ % vars()
    out, err, code = exec_shell(cmd)
    if code != 0:
        raise Exception(err)


def exec_spark_sql(hql):
    sparksql = __sparksqlbin
    cmd = """%(sparksql)s --queue etl -e \"\"\"%(hql)s\"\"\" """ % vars()
    out, err, code = exec_shell(cmd)
    if code != 0:
        raise Exception(err)

    return out, err, code


def exec_shell(cmd):
    print("===cmd:")
    print(cmd)
    p = subprocess.Popen(cmd,
                         stdout=subprocess.PIPE,
                         stdin=subprocess.PIPE,
                         shell=True)

    # Send the data and get the output
    stdout, stderr = p.communicate()

    # To interpret as text, decode
    out = stdout.decode('utf-8')
    err = ""
    code = p.returncode
    if code != 0:
        err = stderr.decode('utf-8')

    return out, err, code
