#!/usr/bin/env python

import os

BASE_DIR, filename = os.path.split(os.path.abspath(__file__))

fname = os.path.join(BASE_DIR, 'ddl.sql')
# print(fname)

ImportTsv = "hbase org.apache.hadoop.hbase.mapreduce.ImportTsv -Dimporttsv.separator='|' -Dimporttsv.columns='HBASE_ROW_KEY,"

f = open(fname, 'rt')
sql = f.readlines()

phoenix_sql = ''

n = 1
for line in sql:
    sqlline = line.replace('`', '')
    if n == 1:
        phoenix_sql += sqlline
        tbname = sqlline.split()[2]
    elif n == 2:
        primary_key = s = sqlline.split()[0]
        ImportTsv += "INFO:" + s + ","
        s += " varchar not null,"
        phoenix_sql += s
        print(s)
    elif n > 2 and n < len(sql):
        s = sqlline.split()[0]
        ImportTsv += "INFO:" + s + ','
        phoenix_sql += "INFO." + s + " varchar,"
        # print("INFO:" + s + ',')
    n += 1


if len(tbname.split('.')) == 1:
    phoenix_sql = phoenix_sql + \
        "CONSTRAINT {}_PK PRIMARY KEY ({})) column_encoded_bytes=0;".format(
            tbname, primary_key)
else:
    phoenix_sql = phoenix_sql + \
        "CONSTRAINT {}_PK PRIMARY KEY ({})) column_encoded_bytes=0;".format(
            tbname.split('.')[1], primary_key)

print(phoenix_sql)


print("-------------------------------------------------\r\n")
ImportTsv = ImportTsv[:-1]
if len(tbname.split('.')) == 1:
    ImportTsv += "' '{}' /tmp/csvFiles/{}.txt".format(
        tbname.upper(), tbname.lower())
else:
    ImportTsv += "' '{}' /tmp/csvFiles/{}.txt".format(
        tbname.upper().replace('.', ':'), tbname.lower().split('.')[1])
# print(tbname.upper())

print(ImportTsv)

f.close()
