package com.atguigu.dga.governance.assess.calc;


import com.alibaba.fastjson.JSON;
import com.alibaba.fastjson.JSONObject;
import com.atguigu.dga.governance.assess.Assessor;
import com.atguigu.dga.governance.bean.AssessParam;
import com.atguigu.dga.governance.bean.GovernanceAssessDetail;
import com.atguigu.dga.meta.bean.TableMetaInfo;
import com.atguigu.dga.util.SqlParser;
import com.google.common.collect.Sets;
import lombok.Getter;
import lombok.Setter;
import org.antlr.runtime.tree.Tree;
import org.apache.commons.lang3.StringUtils;
import org.apache.hadoop.hive.ql.lib.Dispatcher;
import org.apache.hadoop.hive.ql.lib.Node;
import org.apache.hadoop.hive.ql.parse.ASTNode;
import org.apache.hadoop.hive.ql.parse.HiveParser;
import org.apache.hadoop.hive.ql.parse.SemanticException;
import org.springframework.stereotype.Component;

import java.math.BigDecimal;
import java.util.*;
import java.util.stream.Collectors;

@Component("IS_SIMPLE_PROCESS")
public class IsSimpleProcessAssessor extends Assessor {

    //sql语句没有任何join\\groupby\\union\函数的复杂计算， 且 where过滤字段皆为分区字段，则视为简单加工，给0分，其余给10分
    //1  要把sql提取为语法树对象
    //2  利用遍历器进行遍历
    //3  实现一个节点处理器  要实现采集信息的方法
        // 3.1  采集所有 join\groupby\\union\函数
        // 3.2   采集 该表的where后面的字段
    //4  根据采集结果进行判断
     //   4.1  是否有复杂计算
    //    4.2  如果没有复杂计算  把where过滤的字段和 表的分区字段进行比较  如果都是分区字段则给差评


    @Override
    public void checkProblem(GovernanceAssessDetail governanceAssessDetail, AssessParam assessParam) throws Exception {
        //1  要把sql提取为语法树对象
        //2  利用遍历器进行遍历
        if(assessParam.getTDsTaskDefinition()==null){
            return;
        }
        String sql = assessParam.getTDsTaskDefinition().getSql();
        if(sql==null){
            return;
        }
        TableMetaInfo tableMetaInfo = assessParam.getTableMetaInfo();
        String schemaName = tableMetaInfo.getSchemaName();

        CheckSimpleDispatcher checkSimpleDispatcher = new CheckSimpleDispatcher();
        checkSimpleDispatcher.setDefaultSchemaName(schemaName);

        System.out.println("sql = " + sql);
        SqlParser.parse(sql,checkSimpleDispatcher);

        System.out.println("checkSimpleDispatcher = " + checkSimpleDispatcher);

        //4  根据采集结果进行判断
        //    4.1  是否有复杂计算
        //    4.2  如果没有复杂计算  把where过滤的字段和 表的分区字段进行比较  如果都是分区字段则给差评
        Map<String, TableMetaInfo> allTableMetaInfoMap = assessParam.getAllTableMetaInfoMap();

        Set<String> complicationTokSet = checkSimpleDispatcher.getComplicationTokSet();
        Set<String> fromTableSet = checkSimpleDispatcher.getFromTableSet();
        Set<String> whereFieldSet = checkSimpleDispatcher.getWhereFieldSet();

        boolean hasNonPartitionCol=false;
        List<String> nonPartitionColNameList=new ArrayList<>();

        if(complicationTokSet.size()==0){
            if(fromTableSet.size()==1){  //只考虑一张来源表的情况
                for (String fromTableName : fromTableSet) {
                    TableMetaInfo fromTableMetaInfo = allTableMetaInfoMap.get(fromTableName);
                    //获取分区信息
                    String partitionColNameJson = fromTableMetaInfo.getPartitionColNameJson();
                    List<JSONObject> partitionColJsonObjList = JSON.parseArray(partitionColNameJson, JSONObject.class);
                    Set<String> partitionColNameSet = partitionColJsonObjList.stream().map(jsonObject -> jsonObject.getString("name")).collect(Collectors.toSet());
                    for (String whereFieldName : whereFieldSet) { //检查where的每个字段是否都是分区字段
                        if(!partitionColNameSet.contains(whereFieldName)){
                            hasNonPartitionCol=true;
                            nonPartitionColNameList.add(whereFieldName);
                        }
                    }
                }
                if(!hasNonPartitionCol){
                    governanceAssessDetail.setAssessScore(BigDecimal.ZERO);
                    governanceAssessDetail.setAssessProblem("属于简单处理，不含复杂处理元素且字段均为分区字段");
                }else{
                    governanceAssessDetail.setAssessComment("不含复杂计算，但是包含非分区字段 ：" + StringUtils.join(nonPartitionColNameList,","));
                }

            }
        }else{
            governanceAssessDetail.setAssessComment("含复杂计算 ：" + StringUtils.join(complicationTokSet,","));
        }


    }

    public class   CheckSimpleDispatcher implements Dispatcher{

        @Setter
        String defaultSchemaName=null;

        // 3.1  采集所有 join\groupby\\union\函数
        @Getter
        Set<String>  complicationTokSet=new HashSet<>();
        // 3.2  提取出来源表
        @Getter
        Set<String>  fromTableSet=new HashSet<>();

        @Getter
        // 3.3  采集 该表的where后面的字段
        Set<String>  whereFieldSet=new HashSet<>();

        Set<Integer>  complicationTypeSet= Sets.newHashSet(HiveParser.TOK_JOIN,  //join 包含通过where 连接的情况
                HiveParser.TOK_GROUPBY,       //  group by
                HiveParser.TOK_LEFTOUTERJOIN,       //  left join
                HiveParser.TOK_RIGHTOUTERJOIN,     //   right join
                HiveParser.TOK_FULLOUTERJOIN,     // full join
                HiveParser.TOK_FUNCTION,     //count(1)
                HiveParser.TOK_FUNCTIONDI,  //count(distinct xx)
                HiveParser.TOK_FUNCTIONSTAR, // count(*)
                HiveParser.TOK_SELECTDI,  // distinct
                HiveParser.TOK_UNIONALL   // union
        );



        @Override
        public Object dispatch(Node nd, Stack<Node> stack, Object... nodeOutputs) throws SemanticException {
            ASTNode astNode = (ASTNode) nd;
            // 1 收集sql中的复杂操作
            if(complicationTypeSet.contains( astNode.getType()) ){
                complicationTokSet.add(astNode.getText());
            }
            // 2  提取出来源表
            if(astNode.getType()==HiveParser.TOK_TABREF){
                ASTNode tokTableNameNode =(ASTNode) astNode.getChild(0);
                if(tokTableNameNode.getChildCount()==1){  //不带库名的表
                    ASTNode tableNameTextNode = (ASTNode)tokTableNameNode.getChild(0);
                    String tableName = tableNameTextNode.getText();
                    fromTableSet.add(defaultSchemaName+"."+tableName);
                }else{ //不带库名的表
                    ASTNode schemaNameTextNode = (ASTNode)tokTableNameNode.getChild(0);
                    ASTNode tableNameTextNode = (ASTNode)tokTableNameNode.getChild(1);
                    String schemaName = schemaNameTextNode.getText();
                    String tableName = tableNameTextNode.getText();
                    fromTableSet.add(schemaName+"."+tableName);
                }

            }
            // 3.3  采集 该表的where后面的字段
            if(astNode.getType()==HiveParser.TOK_TABLE_OR_COL  //锚定节点是TOK_TABLE_OR_COL 而且是祖上有where
                    && astNode.getAncestor(HiveParser.TOK_WHERE)==null ){
                //根据父级判断字段
                if(astNode.getParent().getType()==HiveParser.DOT){ //如果父级为 .  子级就是表名 ，要取兄弟节点
                    ASTNode colNode = (ASTNode)astNode.getParent().getChild(1);
                    whereFieldSet.add(colNode.getText());
                }else{  //否则，取子级
                    ASTNode colNode = (ASTNode)astNode.getChild(0);
                    whereFieldSet.add(colNode.getText());
                }
            }

            return null;
        }
    }
}
