package com.ideal.hadoopadmin.crontab.entity;

import java.util.ArrayList;
import java.util.List;

/**
 * Created by 袁颖 on 2016/3/22.
 * 用来存放直接用sql导入数据表语句
 */
public class Sql {
    public  final static  String CLUSTER_PARAMETER_SQL="INSERT INTO `cluster_parameter` (`id`, `parameterKey`, `parameterVal`, `parentId`, `note`) VALUES\n" +
            "(1, 'WEBAPP_INIT_PATH', '/home/I-Hadoop/cc/test/shell/init', 22, '初始系统参数地址'),\n" +
            "\t(2, 'HIVE_HDFSPATH_PREFIX', 'hdfs://ns3', 20, ''),\n" +
            "\t(4, 'HDFS_SUPER_USER', 'hdfs', 21, ''),\n" +
            "\t(5, 'HDFS_CLUSH_DN_RANGE', 'ddp-tst-dn', 21, ''),\n" +
            "\t(6, 'HDFS_GROUP_SUFFIX', '_group', 21, ''),\n" +
            "\t(7, 'HDFS_HIVE_TEMP_DIR', '/tmp/hive-', 21, ''),\n" +
            "\t(8, 'HDFS_PATH_PREFIX', '/user/', 21, ''),\n" +
            "\t(9, 'HDFS_PUBLIC_PATH_SUFFIX', '/public', 21, ''),\n" +
            "\t(10, 'HDFS_TRASH_PATH_SUFFIX', '/.Trash', 21, ''),\n" +
            "\t(11, 'HDFS_PRIVATE_PATH_SUFFIX', '/private', 21, ''),\n" +
            "\t(12, 'HIVE_CLIENT_LOG_DIR', '/tmp/', 20, ''),\n" +
            "\t(13, 'HIVE_SENTRY_PRINCIPAL', 'principal=hive/ddp-tst-203@EXAMPLE.COM', 20, ''),\n" +
            "\t(14, 'HIVE_SENTRY_JDBC', 'jdbc:hive2://localhost:10000/', 20, ''),\n" +
            "\t(15, 'HIVE_SENTRY_ROLE_TEMPLET', 'role_@item@_@principle@_@path@', 20, ''),\n" +
            "\t(16, 'HIVE_SUPER_USER', 'hive', 20, ''),\n" +
            "\t(17, 'KDC_REALM', '@EXAMPLE.COM', 21, 'KDC认证'),\n" +
            "\t(18, 'Des_Ticket_Cache_Path', '/home/I-Hadoop/cc/test/shell/ticket_cache/', 22, 'kerberos ticket cache 存放地址'),\n" +
            "\t(19, 'Linux_Group_Path', '/etc/group', 22, 'Linux组信息_初始hdfsvisiter信息'),\n" +
            "\t(20, 'Hive', 'Hive parameters', 0, 'hive参数'),\n" +
            "\t(21, 'HADOOP', 'Hadoop parameters', 0, 'hadoop参数'),\n" +
            "\t(22, 'SYSTEM', 'System parameter', 0, '系统配置参数'),\n" +
            "\t(23, 'RM_SCHEDULE_XML_DES_PATH', '/tmp/fair-scheduler.xml', 21, ''),\n" +
            "\t(24, 'RM_SCHEDULE_XML_FILE_NAME', 'fair-scheduler.xml', 21, ''),\n" +
            "\t(25, 'RM_SCHEDULE_XML_SRC_PATH', 'c:\\\\', 21, ''),\n" +
            "\t(28, 'WEBAPP_SHELL_PATH', '/home/I-Hadoop/cc/test/shell/sh/', 0, ''),\n" +
            "\t(29, 'PublicPw', '12345', 22, '公共的验证密码'),\n" +
            "\t(30, 'Is_Use_BeeLine', '1', 22, '1:启动beeline;0:关闭beeline'),\n" +
            "\t(31, 'HIVE_ROLE_TAMPLE', 'role_@item@_@principle@_@path@', 20, 'hive role 模板'),\n" +
            "\t(32, 'HIVE_USERNAME', 'hadoop', 20, 'hive用户');";
    //1.cluster_user表数据导入,不需要放入sqlList里面
    public final static String CLUSTER_USER_SQL = "insert into cluster_user(userName, clientPW, systemPW, companyId, contactPerson,\n" +
            "phoneNumber, email, remark, userTypeId, status, clusterTypeId, createTime,changeTime)\n" +
            "select h.hp_user_name,\n" +
            "h.hp_user_pw,\n" +
            "h.hp_user_syspw,\n" +
            "case\n" +
            "when h.hp_company_id is null then 1 else h.hp_company_id end,\n" +
            "case\n" +
            "when h.hp_contacts is null then '理想' else h.hp_contacts end,\n" +
            "case\n" +
            "when h.hp_phone is null then '18516248888' else h.hp_phone end,\n" +
            "case\n" +
            "when h.hp_email is null then 'idea@123.com' else h.hp_email end,\n" +
            "case\n" +
            "when h.hp_remarks is null then '老用户迁移' else h.hp_remarks end,\n" +
            "case\n" +
            "when h.hp_user_type is null then 1 else h.hp_user_type end,\n" +
            "case\n" +
            "when h.hp_user_status=1 then 0\n" +
            "when h.hp_user_status=2 then 1\n" +
            "else 3 end,\n" +
            "case\n" +
            "when h.hp_culster is null then 3 else h.hp_culster end,\n" +
            "case\n" +
            "when h.hp_createtime is null then unix_timestamp() * 1000\n" +
            "when h.hp_createtime then unix_timestamp() * 1000 end,\n" +
            "case\n" +
            "when h.hp_createtime is null then unix_timestamp() * 1000\n" +
            "when h.hp_createtime then unix_timestamp() * 1000 end\n" +
            "FROM hadoopuser h";

    //2.cluster_machine表导入数据
    public final static String CLUSTER_MACHINE_SQL = "INSERT INTO cluster_machine (id,machineIp, loginUserName, loginPassWord, clusterTypeId,\n" +
            "machineTypeId, isAddUser, status, note)\n" +
            "select h.id,h.mc_ip,h.mc_user,h.mc_pw,h.cluster_type,h.mc_type,h.add_user,h.mc_status,h.mc_remarks\n" +
            "from machine_info h";
    //3.导入数据到cluster_user_hdfsquota表
        public final static String CLUSTER_USER_HDFSQUOTA = "Replace INTO cluster_user_hdfsquota (id, userId, hdfsSpace, hdfsFileCount, hdfsSpaceUnit, hdfsPath)\n" +
            "select hh.id,cu.id,hh.quota_space,hh.file_count,hh.unit,\n" +
            "case\n" +
            "when hh.path is null then '/user/test' else hh.path end\n" +
            "from hadoopuser_hdfs hh\n" +
            "join hadoopuser h\n" +
            "on hh.username=h.hp_user_id\n" +
            "join cluster_user cu\n" +
            "on h.hp_user_name=cu.userName";
//    4 导入数据到cluster_user_kbrauth表,machineIp只查找机器类型是client的ip
//    public final static String CLUSTER_USER_KBRAUTH = "insert into cluster_user_kbrauth(`id`, `userId`, `machineId`, `startTime`, `endTime`, `principal`, `ticketPath`, `status`, `createTime`)\n" +
//        "select ka.id,cu.id,cm.id,\n" +
//        "case when ka.ker_start_time ='' then 0 else\n" +
//        "UNIX_TIMESTAMP(str_to_date(ka.ker_start_time,'%m/%d/%Y %H:%i:%s'))*1000 end,\n" +
//        "case when ka.ker_end_time ='' then 0 else\n" +
//        "UNIX_TIMESTAMP(str_to_date(ka.ker_end_time,'%m/%d/%Y %H:%i:%s'))*1000 end,\n" +
//        "ka.ker_princ,\n" +
//        "ka.ticket_cahce_path,\n" +
//        "ka.ker_status,\n" +
//        "unix_timestamp(ka.create_time)*1000\n" +
//        "from kdc_auth ka\n" +
//        "join hadoopuser h\n" +
//        "on   h.hp_user_id=ka.hp_user_id\n" +
//        "join cluster_user cu\n" +
//        "on h.hp_user_name=cu.userName\n" +
//        "left join cluster_machine cm\n" +
//        "on ka.mc_ip= cm.machineIp\n" +
//        "where cm.machineTypeId=3;";
//    6 将cluster_user_queue表数据导入
    public static final String CLUSTER_USER_QUEUE = "Replace into cluster_user_queue(`id`, `userId`, `queueName`, `minResource`, `maxResource`, `maxApp`, `weight`, `acl`, `minCpu`, `maxCpu`, `modifyTime`)\n" +
        "select fsa.id,cu.id,fsa.queue,fsa.min_memory,fsa.max_memory,fsa.max_apps,fsa.weight,fsa.acl,fsa.min_cpu,fsa.max_cpu,\n" +
        "unix_timestamp(str_to_date(fsa.update_time,'%Y-%m-%d %H:%i:%s'))*1000\n" +
        "from fair_scheduler_allocation fsa\n" +
        "join hadoopuser h\n" +
        "on   h.hp_user_id= fsa.username\n" +
        "join cluster_user cu\n" +
        "on h.hp_user_name=cu.userName";
//    7 导入system_company数据
    public static final String SYSTEM_COMPANY ="insert into system_company(`id`, `companyName`, `note`, `contactName`, `contactMobile`, `delTag`)\n" +
        " select c.id,c.NAME,c.REMARK,c.CONTACT_NAME,c.CONTACT_MOBILE,c.DEL_FLAG\n" +
        " from t_customer c";
//    8 导入cluster_machine_type数据
    public static final String CLUSTER_MACHINE_TYPE = " INSERT INTO `cluster_machine_type` (`machineTypeId`, `machineTypeName`, `note`)\n" +
        "    select mt.id,mt.name,\n" +
        "            case\n" +
        "    when mt.note is null then mt.name else mt.note end\n" +
        "    from d_machin_type mt";
//    9 导入cluster_user_type数据 (直接设置对应的值)
    public static final String FIRM_USER = " REPLACE INTO `cluster_user_type` (`id`, `userTypeId`, `userTypeName`, `note`) VALUES (3, '3', '其他用户', ' ')";
    public static final String INNER_USER = "REPLACE INTO `cluster_user_type` (`id`, `userTypeId`, `userTypeName`, `note`) VALUES (2, '2', '内部用户', '');";
    public static final String OTHER_USER ="REPLACE INTO `cluster_user_type` (`id`, `userTypeId`, `userTypeName`, `note`) VALUES (1, '1', '厂商用户', '');";
//    10 导入 META_HDFS_INFO数据
    public final static String META_HDFS_INFO = "REPLACE INTO meta_hdfs_info (id, hdfsPath, clusterUserId, hdfsGroup, hdfsOwner, hdfsPerm, note, createTime)\n" +
        " SELECT hdfs_info.id,hdfs_info.hp_folder_name hdfsPath,cluster_user.id clusterUserId,hdfs_info.hp_folder_group hdfsGroup," +
        " hdfs_info.hp_folder_owner hdfsOwner,hdfs_info.hp_folder_auth hdfsPerm,ifnull(hdfs_info.hp_remarks,'') note,UNIX_TIMESTAMP"+
        " (hdfs_info.hp_createtime) createTime FROM hdfs_info JOIN hadoopuser ON hdfs_info.hp_user_id=hadoopuser.hp_user_id JOIN "+
        " cluster_user ON hadoopuser.hp_user_name=cluster_user.userName and hdfs_info.hp_folder_name like '%public%'";
//    11 导入 META_HIVE_ACCESS数据
    public final static String META_HIVE_ACCESS = "REPLACE INTO meta_hive_access (clusterUserId, hiveInfoId, createTime)\n\n" +
        "  SELECT ee.hiveInfoId,cluster_user.id,ee.createTime  FROM( SELECT hiveInfoId,hive_visit_user,createTime"+
        "  FROM ( SELECT aaa.clusterUserId,aaa.hive_visit_user,aaa.hp_tab_name,meta_hive_info.id hiveInfoId,"+
        "  aaa.createTime FROM (SELECT hive_visiters.hp_user_id, cluster_user.id clusterUserId,hive_visiters.hp_tab_name,"+
        "  hive_visiters.hive_visit_user,UNIX_TIMESTAMP(hive_visiters.modify_time) createTime FROM hive_visiters JOIN"+
        "  hadoopuser ON hive_visiters.hp_user_id=hadoopuser.hp_user_id JOIN cluster_user ON hadoopuser.hp_user_name"+
        "  =cluster_user.userName ) aaa JOIN meta_hive_info ON aaa.clusterUserId=meta_hive_info.clusterUserId AND aaa.hp_tab_name = "+
        "  meta_hive_info.tableName) dd ) ee JOIN hadoopuser ON ee.hive_visit_user=hadoopuser.hp_user_id JOIN"+
        "   cluster_user ON hadoopuser.hp_user_name=cluster_user.userName";
    //    12导入 META_HDFS_ACCESS数据
    public final static String META_HDFS_ACCESS = "REPLACE INTO meta_hdfs_access (id,clusterUserId,hdfsInfoId,createTime)\n" +
        " SELECT eee.id,cluster_user.id clusterUserId,metaHdfsInfoId,eee.createTime from  ("+
        " SELECT aaa.id,aaa.hdfsVisitUserId,meta_hdfs_info.id metaHdfsInfoId,aaa.createTime FROM (SELECT cc.id,"+
        " cluster_user.id clusterUserId,cc.hdfs_visit_user hdfsVisitUserId,ifnull(UNIX_TIMESTAMP(cc.modify_time),"+
        " UNIX_TIMESTAMP(now()+1)) createTime,cc.folder_name FROM ( SELECT * FROM ( SELECT hdfs_visiters.id,hdfs_visiters.folder_name,"+
        " hdfs_visiters.hp_user_id,hdfs_visiters.hdfs_visit_user,hdfs_visiters.modify_time ,hdfs_info.id infoId,hdfs_info.hp_folder_name"+
        " FROM hdfs_visiters JOIN hdfs_info ON hdfs_visiters.hp_user_id=hdfs_info.hp_user_id AND hdfs_info.hp_folder_name="+
        " hdfs_visiters.folder_name and hdfs_visiters.folder_name  LIKE '%public%') aa WHERE aa.infoId IS NOT NULL AND "+
        " aa.folder_name LIKE '%public%') cc JOIN hadoopuser ON cc.hp_user_id=hadoopuser.hp_user_id JOIN cluster_user ON "+
        " hadoopuser.hp_user_name=cluster_user.userName ) aaa JOIN meta_hdfs_info ON aaa.clusterUserId="+
        " meta_hdfs_info.clusterUserId AND aaa.folder_name=meta_hdfs_info.hdfsPath ) eee JOIN hadoopuser ON "+
        " eee.hdfsVisitUserId=hadoopuser.hp_user_id JOIN cluster_user ON hadoopuser.hp_user_name=cluster_user.userName";

    public static List<String> getSqlList(){
        List<String> sqlList = new ArrayList<String>();
        sqlList.add(CLUSTER_USER_HDFSQUOTA);
        sqlList.add(CLUSTER_USER_QUEUE);
        sqlList.add(SYSTEM_COMPANY);
//        sqlList.add(CLUSTER_MACHINE_TYPE);
        sqlList.add(FIRM_USER);
        sqlList.add(INNER_USER);
        sqlList.add(OTHER_USER);
        sqlList.add(META_HDFS_INFO);
        return sqlList;
    }
}
