# -*- coding: utf-8 -*-
### gx
### 生成etl转换脚本
import os

import pymysql
import re
import sys


def get_mysql_conn(host,user,password,db,port):
    connection = pymysql.connect(host='10.30.50.22',
                                 user='zhdbuser',
                                 password='zhdbuser123',
                                 db='product_manage_0724',
                                 port=3306,
                                 charset='utf8'
                                 )
    return connection

def fetch_mysql_table_columns(host, user, password ,db, port, table):
    connection = get_mysql_conn(host,user,password,db, port)
    try:
        # 获取一个游标
        with connection.cursor(cursor=pymysql.cursors.DictCursor) as cursor:
            sql = 'SHOW FULL FIELDS FROM {0}'.format(table)
            cout = cursor.execute(sql)  # 返回记录条数
            column_rows = []
            fields = []
            try:
                for row in cursor:
                    if 'bigint' in row['Type']:
                        row['Type'] = "bigint"
                    elif 'int' in row['Type'] or 'tinyint' in row['Type'] or 'smallint' in row['Type'] or 'mediumint' in \
                            row['Type'] or 'integer' in row['Type']:
                        row['Type'] = "int"
                    elif 'double' in row['Type'] or 'float' in row['Type'] or 'decimal' in row['Type']:
                        row['Type'] = "double"
                    else:
                        row['Type'] = "string"
                    column_rows.append(row['Field'] + ' ' + row['Type'] + ' comment \'' + row['Comment'] + '\'')
                    fields.append(row['Field'])
                return column_rows,fields
            except:
                print('程序异常!')
    finally:
        connection.close()

'''
生成hive的临时表
hive_db hive数据库名
mysql_table_name mysql的表名
stored_type hive数据列存类型，默认存成orc格式
prefix 表前缀
suffix 表后缀
column_split 列分隔符号
'''
def generate_canal_inc_temp_table(mysql_columns, hive_db, mysql_table_name, table_type_prefix, data_original, suffix,column_split=r'\001'):
    db_path = '' if hive_db == 'default' else '{0}'.format(hive_db)
    if suffix == '':
        hive_table_name = "{0}_{1}_{2}_{3}".format(table_type_prefix, data_original, mysql_table_name,"temp")
    else:
        hive_table_name = "{0}_{1}_{2}_{3}_{4}".format(table_type_prefix, data_original, mysql_table_name, suffix,"temp")
    inc_table_header='''
create external table if not exists {0}.{1}(
    canal_operator_id bigint COMMENT '操作id',
    canal_es bigint COMMENT '数据变更的时间戳',
    canal_ts bigint COMMENT '时间戳',
    canal_ddltype string COMMENT '操作类型',
    '''.format(hive_db, hive_table_name)
    n = 0
    remain_columns = ''
    for column in mysql_columns:
        if n == 0:
            remain_columns += column
        else:
            remain_columns += (',\n    ' + column)
        n = n + 1
    inc_table_footer=r'''
)
row format delimited fields terminated by '{0}'
STORED AS TEXTFILE
location '/user/data/{1}/{2}';'''.format(column_split, db_path, hive_table_name)
    return inc_table_header + remain_columns + inc_table_footer

'''
hive_db hive数据库名
mysql_table_name mysql的表名
table_type_prefix 表类型前缀，比如ods,dwd,ads
data_original 数据来源，比如寰游 hy
suffix 表名后缀 比如增量表 inc,如果不知道，则填 ''
part_field 分区字段名，默认用part_time
part_type 分区字段类型，默认用int
stored_type hive数据列存类型，默认存成orc格式
'''
def generate_table(mysql_columns, hive_db, mysql_table_name, table_type_prefix, data_original, suffix, part_field='part_time', part_type='int', stored_type='orc'):
    if suffix == '':
        hive_table_name = "{0}_{1}_{2}".format(table_type_prefix, data_original, mysql_table_name)
    else:
        hive_table_name = "{0}_{1}_{2}_{3}".format(table_type_prefix, data_original, mysql_table_name, suffix)
    inc_table_header='''
create table if not exists {0}.{1}(
    '''.format(hive_db, hive_table_name)
    n = 0
    remain_columns = ''
    for column in mysql_columns:
        if n == 0:
            remain_columns += column
        else:
            remain_columns += (',\n    ' + column)
        n = n + 1
    inc_table_footer=r'''
)
partitioned by ({0} {1})
STORED AS {2} TBLPROPERTIES ('orc.compression'='SNAPPY');'''.format(part_field, part_type, stored_type)
    return inc_table_header + remain_columns + inc_table_footer



'''
hive_db hive数据库名
mysql_table_name mysql的表名
table_type_prefix 表类型前缀，比如ods,dwd,ads
data_original 数据来源，比如寰游 hy
suffix 表名后缀 比如增量表 inc,如果不知道，则填 ''
part_field 分区字段名，默认用part_time
part_type 分区字段类型，默认用int
stored_type hive数据列存类型，默认存成orc格式
'''
def generate_history_table(mysql_columns, hive_db, mysql_table_name, table_type_prefix, data_original, suffix, stored_type='orc'):
    hive_table_name = "{0}_{1}_{2}_{3}".format(table_type_prefix, data_original, mysql_table_name, suffix)
    inc_table_header='''
create table if not exists {0}.{1}(
    '''.format(hive_db, hive_table_name)
    n = 0
    remain_columns = ''
    for column in mysql_columns:
        if n == 0:
            remain_columns += column
        else:
            remain_columns += (',\n    ' + column )
        n = n + 1
    inc_table_footer=r''',
    dw_start_date STRING comment '记录生效日期开始时间',
    dw_end_date STRING comment '记录生效日期结束时间'
)
STORED AS {0} TBLPROPERTIES ('orc.compression'='SNAPPY');'''.format(stored_type)
    return inc_table_header + remain_columns + inc_table_footer

'''
增量数据sql，用于新增数据或者修改数据
columns 列名数组
hive_db 数据库名
mysql_table_name mysql表名
table_type_prefix 表类型前缀，比如ods,dwd,ads
data_original 数据来源，比如寰游 hy
suffix 表名后缀 比如增量表 inc,如果不知道，则填 ''
part_field 分区字段名
date_field 时间字段名
pri_key 主键名
'''
def generate_temp_to_target_hql(columns, hive_db, mysql_table_name, table_type_prefix, data_original, suffix, part_field, date_field, pri_key='id'):
    if suffix == '':
        temp_table_name = "{0}_{1}_{2}".format(table_type_prefix, data_original, mysql_table_name)
    else:
        temp_table_name = "{0}_{1}_{2}_{3}".format(table_type_prefix, data_original, mysql_table_name, suffix)

    if suffix == '':
        table_name = "{0}_{1}_{2}".format(table_type_prefix, data_original, mysql_table_name)
    else:
        table_name = "{0}_{1}_{2}_{3}".format(table_type_prefix, data_original, mysql_table_name, suffix)
    n = 0
    columns_csv = ''
    for column in columns:
        if n == 0:
            columns_csv += column
        else:
            columns_csv += (',' + column)
        n = n + 1
    part_desc='{0}=${1}{2}{3}'.format(part_field, '{', 'part_time', '}')
    hql = r'''
insert overwrite table {0}.{1} partition({2}) select {3} from ( select a.*,row_number() over(partition by id order by es desc) as rn from ( {3},0 as es,'UPDATE' as ddltype from {0}.{1} where {2} union all SELECT {3}, es , ddltype FROM ( SELECT *, row_number() over (partition BY {4} ORDER BY es DESC) num FROM {0}.{5} where date_format({6},'yyyyMM') = '${{part_time}}') t WHERE t.num=1) a) b where b.rn = 1 and ddltype IN('INSERT','UPDATE')'''.format(hive_db, table_name, part_desc ,columns_csv, pri_key,temp_table_name, date_field)
    return hql

'''
生成增量表转换脚本
columns 列名数组
hive_db 数据库名
mysql_table_name mysql表名
table_type_prefix 表类型前缀，比如ods,dwd,ads
data_original 数据来源，比如寰游 hy
suffix 表名后缀 比如增量表 inc,如果不知道，则填 ''
output_path脚本生成的路径
'''
def generate_etl_incrment_table_script(mysql_columns, hive_db, mysql_table_name, table_type_prefix, data_original, suffix, output_path):
    if suffix == '':
        table_name = "{0}_{1}_{2}".format(table_type_prefix, data_original, mysql_table_name)
    else:
        table_name = "{0}_{1}_{2}_{3}".format(table_type_prefix, data_original, mysql_table_name, suffix)

    if suffix == '':
        temp_table_name = "{0}_{1}_{2}_{3}".format(table_type_prefix, data_original, mysql_table_name, "temp")
    else:
        temp_table_name = "{0}_{1}_{2}_{3}_{4}".format(table_type_prefix, data_original, mysql_table_name, suffix, "temp")
    n = 0
    columns_csv_prefix = ''
    for column in mysql_columns:
        if n == 0:
            columns_csv_prefix +=  column
        else:
            columns_csv_prefix += (',' + column)
        n = n + 1
    content =  '''
#!/bin/sh
hive_db=$1
table_name=$2
hiveconfig=$(cat <<"EOF" ${{CUR_PATH}}/incrment_table/etl_scripts/hive.config
EOF
)
TABLE_CONFIG_PATH=${{CUR_PATH}}/incrment_table/etl_scripts/table_configs
source ${{TABLE_CONFIG_PATH}}/Etl_tbl.config
sql="${{hiveconfig}}"
此处需要注意date_str修改为响应的时间字段
hql="select m from ( select date_format(date_str,'yyyyMM') as m from {0}.{1}) temp group by m order by m asc"
data=`hive --database ${{hive_db}} --hiveconf "tez.queue.name=spark" -S -e "${{hql}}"`
for part_time in ${{data}} 
do
if [ ${{part_time}} = "NULL" ];then
       continue
fi
    echo "part_time===================="${{part_time}}
    update_hql="insert overwrite table {0}.{2} partition(part_time=${{part_time}}) select {3} from ( select a.*,row_number() over(partition by id order by es desc) as rn from ( select {3},0 as es,'UPDATE' as ddltype from {0}.{2} where part_time=${{part_time}} union all SELECT id,car_id,factory_code,desc,data_json,status,date_str,es ,ddltype FROM (SELECT *,row_number() over (partition BY id ORDER BY es DESC) num FROM {0}.{1} where date_format(date_str,'yyyyMM') = '${{part_time}}') t WHERE t.num=1) a ) b where b.rn = 1 and ddltype IN('INSERT','UPDATE')"
    echo ${{update_hql}}
    `hive --database ${{hive_db}} --hiveconf "tez.queue.name=spark" -e "${{update_hql}}"`
done
       '''.format(hive_db, temp_table_name, table_name,columns_csv_prefix)
    if output_path == '':
        output_path = '.'
    file_exist =os.path.exists(output_path + '/etl_scripts/')
    if bool(1 - file_exist):
        os.makedirs(output_path + '/etl_scripts/' )
    f1 = open(output_path + '/etl_scripts/mysql_to_hive_' +  table_name + '.sh', 'w')
    f1.write(content)
'''
生成历史拉链表的转换脚本
columns 列名数组
hive_db 数据库名
mysql_table_name mysql表名
table_type_prefix 表类型前缀，比如ods,dwd,ads
data_original 数据来源，比如寰游 hy
suffix 表名后缀 比如增量表 inc,如果不知道，则填 ''
output_path脚本生成的路径
注：脚本生成后，需要修改时间字段
'''
def generate_etl_history_zipper_table_script(mysql_columns, hive_db, mysql_table_name, table_type_prefix, data_original, suffix, output_path):
    if suffix == '':
        table_name = "{0}_{1}_{2}".format(table_type_prefix, data_original, mysql_table_name)
    else:
        table_name = "{0}_{1}_{2}_{3}".format(table_type_prefix, data_original, mysql_table_name, suffix)

    if suffix == '':
        temp_table_name = "{0}_{1}_{2}_{3}".format(table_type_prefix, data_original, mysql_table_name, "temp")
    else:
        temp_table_name = "{0}_{1}_{2}_{3}_{4}".format(table_type_prefix, data_original, mysql_table_name, suffix, "temp")

    columns_csv = ''
    n = 0
    for column in mysql_columns:
        if n == 0:
            columns_csv += column
        else:
            columns_csv += (',' + column)
        n = n + 1

    n = 0
    columns_csv_prefix = ''
    for column in mysql_columns:
        if n == 0:
            columns_csv_prefix += 'a.' + column
        else:
            columns_csv_prefix += (', a.' + column)
        n = n + 1
    history_table_name = '{0}_{1}_{2}_{3}'.format('dwd', data_original, mysql_table_name, 'his')
    history_temp_table_name = '{0}_{1}_{2}_{3}_{4}'.format('dwd', data_original, mysql_table_name, 'his', 'temp')
    content = '''
#!/bin/sh

hive_db=$1
table_name=$2
if [ -z ${{CUR_PATH}} ];then
   CUR_PATH=$(cd "$(dirname "$0")";cd ..;cd ..;pwd)
fi
hiveconfig=$(cat <<"EOF" ${{CUR_PATH}}/history_zipper_table/etl_scripts/hive.config
EOF
)
TABLE_CONFIG_PATH=${{CUR_PATH}}/history_zipper_table/etl_scripts/table_configs
source ${{TABLE_CONFIG_PATH}}/{1}.config
sql="${{hiveconfig}}"

exterminate_time=99991231
echo "======================"${{exec_day}}
if [ -z ${{exec_day}} ];then
   exec_day=`date +%Y%m%d`
   echo "-------------------"${{exec_day}}
fi
echo "******************"${{exec_day}}
part_time=`date -d "${{exec_day}} 1 days ago" +%Y%m%d`
yesterday_time=`date -d "${{exec_day}} 2 days ago" +%Y%m%d`
### 1.从orders表导入数据到增量临时表，增量临时表将数据格式化之后存储到增量表中
注意修改时间字段，需要加上时间函数来处理,date_format(createtime,'yyyyMMdd'),date_format(modifiedtime,'yyyyMMdd'), partition BY id,date_format(modifiedtime,'yyyyMMdd')
step1_hive_sql="INSERT overwrite TABLE {0}.{1} PARTITION (part_time) SELECT {3},date_format(modifiedtime,'yyyyMMdd') FROM ( SELECT *,row_number() over (partition BY id,date_format(modifiedtime,'yyyyMMdd') ORDER BY es DESC) num FROM {0}.{2}) t WHERE t.num=1 AND ddltype IN('INSERT', 'UPDATE')"

echo ${{step1_hive_sql}}
`hive --database ${{hive_db}} --hiveconf "tez.queue.name=spark" -e "${{step1_hive_sql}}"`
if [ $? -ne 0 ]; then
   return 1
fi
### 2.从增量表将数据导入到历史拉链临时表
step2_hive_sql=$(cat <<EOF 
DROP TABLE IF EXISTS {0}.{4};
CREATE TABLE {0}.{4} AS 
SELECT 
{3},dw_start_date,dw_end_date 
FROM (
    SELECT 
    {5},
    a.dw_start_date,
    CASE WHEN b.主键 IS NOT NULL AND a.dw_end_date > '${{part_time}}' THEN '${{part_time}}' ELSE a.dw_end_date END AS dw_end_date 
    FROM {0}.{6} a 
    left outer join (SELECT * FROM {1} WHERE part_time = '${{part_time}}') b 
    ON (a.主键 = b.主键) 
    UNION ALL 
    SELECT 
    {3},
    modifiedtime AS dw_start_date,
    '${{exterminate_time}}' AS dw_end_date 
    FROM {1} 
    WHERE part_time = '${{part_time}}' 
) x 
ORDER BY 主键,dw_start_date
EOF
)
echo ${{step2_hive_sql}}
`hive --database ${{hive_db}} --hiveconf "tez.queue.name=spark" -e "${{step2_hive_sql}}"`
if [ $? -ne 0 ]; then
   return 1
fi
### 3.从历史临时表将数据导入到历史拉链表
step3_hive_sql="INSERT overwrite TABLE {0}.{6} SELECT * FROM {0}.{4}"
echo ${{step3_hive_sql}}
`hive --database ${{hive_db}} --hiveconf "tez.queue.name=spark" -e "${{step3_hive_sql}}"`

    '''.format(hive_db, table_name, temp_table_name, columns_csv, history_temp_table_name, columns_csv_prefix, history_table_name)
    # print(content)
    if output_path == '':
        output_path = '.'
    file_exist =os.path.exists(output_path + '/etl_scripts/')
    if bool(1 - file_exist):
        os.makedirs(output_path + '/etl_scripts/' )
    f1 = open(output_path + '/etl_scripts/mysql_to_hive_' +  table_name + '.sh', 'w')
    f1.write(content)


if __name__ == '__main__':
    mysql_columns, cols = fetch_mysql_table_columns('10.30.50.22','zhdbuser','zhdbuser123','product_manage_0724',3306, 'etl_orders2')
    # table_temp = generate_canal_inc_temp_table(mysql_columns, 'qingqi', 'etl_orders2', 'ods', 'hy', '')
    # inc_table_temp = generate_canal_inc_temp_table(mysql_columns,'qingqi', 'etl_orders2', 'ods', 'hy', 'inc')
    # inc_table = generate_table(mysql_columns,'qingqi', 'etl_orders2', 'ods', 'hy', 'inc')
    # table=generate_table(mysql_columns,'qingqi', 'etl_orders2', 'ods', 'hy', '')
    # his_table = generate_history_table(mysql_columns,'qingqi', 'etl_orders2', 'dwd', 'hy', 'his')
    # print('临时表结构')
    # print(table_temp)
    # print("------------------------------------------------------------------------------------")
    # print('表结构')
    # print(table)
    # print("------------------------------------------------------------------------------------")
    # print('增量临时表结构')
    # print(inc_table_temp)
    # print("------------------------------------------------------------------------------------")
    # print('增量表结构')
    # print(inc_table)
    # print("------------------------------------------------------------------------------------")
    # print('拉链表历史表结构')
    # print(his_table)step2_hive_sql
    # hql = generate_temp_to_target_hql(cols, 'qingqi', 'etl_orders1', 'dwd', 'hy', '', 'part_time','date_str', 'id')
    # print(hql)
    # generate_etl_history_zipper_table_script(cols,'qingqi', 'etl_orders1', 'ods', 'hy', 'inc', '.')
    # generate_etl_incrment_table_script(cols,'qingqi', 'etl_orders1', 'ods', 'hy', '', '.')
    generate_etl_history_zipper_table_script(cols,'qingqi', 'etl_orders2', 'ods', 'hy', 'inc', '.')
