package com.atguigu.dga.assessor.calc;

import avro.shaded.com.google.common.collect.Sets;
import com.alibaba.fastjson.JSON;
import com.alibaba.fastjson.JSONArray;
import com.alibaba.fastjson.JSONObject;
import com.atguigu.dga.assessor.Assessor;
import com.atguigu.dga.governance.bean.AssessParam;
import com.atguigu.dga.governance.bean.GovernanceAssessDetail;
import com.atguigu.dga.util.SqlUtil;
import lombok.Data;
import org.apache.hadoop.hive.ql.lib.Dispatcher;
import org.apache.hadoop.hive.ql.lib.Node;
import org.apache.hadoop.hive.ql.parse.ASTNode;
import org.apache.hadoop.hive.ql.parse.HiveParser;
import org.apache.hadoop.hive.ql.parse.SemanticException;
import org.springframework.stereotype.Component;

import java.math.BigDecimal;
import java.util.HashSet;
import java.util.List;
import java.util.Set;
import java.util.Stack;

@Component("SIMPLE_PROCESS")
public class SimpleProcessAssessor extends Assessor {
    @Override
    public void checkProblem(GovernanceAssessDetail governanceAssessDetail, AssessParam assessParam) throws Exception {
        //去除oqs层的表
        if (assessParam.getTableMetaInfo().getTableMetaInfoExtra().getDwLevel().equals("ODS")){
            return;
        }
        //获取sql
        String tsakSQL = assessParam.getTDsTaskDefinition().getTsakSQL();

        //解析sql
        SelectProcessDispatcher selectProcessDispatcher = new SelectProcessDispatcher();
        SqlUtil.parseSQL(tsakSQL,selectProcessDispatcher);

        //获取复杂逻辑
        HashSet<String> hashSet = selectProcessDispatcher.getHashSet();

        if (hashSet.size()>0){
            //包含复杂逻辑
            return;
        }

        //获取被查询的表和过滤的字段
        HashSet<String> sqlTableRef = selectProcessDispatcher.getSqlTableRef();
        HashSet<String> sqlFilterCol = selectProcessDispatcher.getSqlFilterCol();

        //获取被查询的表的分区字段
        HashSet<String> pattitionCol = new HashSet<>();

        for (String sqlTable : sqlTableRef) {
            String partitionColNameJson = assessParam.getTableMetaInfoMaps().get(sqlTable).getPartitionColNameJson();
            List<JSONObject> jsons = JSONObject.parseArray(partitionColNameJson, JSONObject.class);
            for (JSONObject json : jsons) {
                pattitionCol.add(json.getString("name"));
            }
        }

        //判断过滤字段是否全为分区字段
        Boolean isPartitionCol=true;
        for (String fileCol : sqlFilterCol) {
            if (!pattitionCol.contains(fileCol)){
                isPartitionCol=false;
            }
        }

        //如果为真,则全为分区字段
        if (isPartitionCol){
            governanceAssessDetail.setAssessScore(BigDecimal.ZERO);
            governanceAssessDetail.setAssessProblem("所有的过滤字段都是分区字段");
        }
        governanceAssessDetail.setAssessComment("过滤字段为: " + sqlFilterCol + " , 分区字段为: " + pattitionCol );


    }

    //节点处理逻辑
    @Data
    public  static class SelectProcessDispatcher implements Dispatcher {

        //保存复杂逻辑
        private HashSet<String> hashSet=new HashSet<>();

        //保存被查询的表
        private HashSet<String> sqlTableRef =new HashSet<>();

        //保存过滤的字段
        private HashSet<String> sqlFilterCol =new HashSet<>();

        //默认库
        private String defaultSchemaName="gmell";
        //默认库名

        //定义一个集合， 哪些计算是复杂计算
        HashSet<Integer> complicateTokSet = Sets.newHashSet(
                HiveParser.TOK_JOIN ,   // join ,包含通过where连接的情况
                HiveParser.TOK_GROUPBY , // group by
                HiveParser.TOK_LEFTOUTERJOIN , // left join
                HiveParser.TOK_RIGHTOUTERJOIN , //right join
                HiveParser.TOK_FULLOUTERJOIN , // full join
                HiveParser.TOK_FUNCTION , // count(1)
                HiveParser.TOK_FUNCTIONDI, // count(distinct xx)
                HiveParser.TOK_FUNCTIONSTAR , // count(*)
                HiveParser.TOK_SELECTDI , // distinct
                HiveParser.TOK_UNIONALL // union
        ) ;

        // where后面的操作符号
        Set<Integer> operatorSet = Sets.newHashSet(
                HiveParser.EQUAL ,
                HiveParser.GREATERTHAN ,
                HiveParser.LESSTHAN,
                HiveParser.GREATERTHANOREQUALTO ,
                HiveParser.LESSTHANOREQUALTO ,
                HiveParser.NOTEQUAL ,
                HiveParser.KW_LIKE
        ) ;


        @Override
        public Object dispatch(Node node, Stack<Node> stack, Object... objects) throws SemanticException {
            ASTNode astNode= (ASTNode) node;
            //判断复杂逻辑
            for (Integer integer : complicateTokSet) {
                if (integer == astNode.getType()){
                    hashSet.add(astNode.getToken().getText());
                }
            }

            //获取被查询的表
            if (astNode.getType()==HiveParser.TOK_TABNAME){
                if (astNode.getChildren().size()==1){
                    sqlTableRef.add(defaultSchemaName+"."+astNode.getChild(0).getText());
                }else {
                    sqlTableRef.add(astNode.getChild(0).getText()+"."+astNode.getChild(1).getText());
                }
            }

            //获取被查询的字段
            //获取所有的符号操作,并判断祖先是否有where操作
            if (operatorSet.contains(astNode.getType()) && astNode.getAncestor(HiveParser.TOK_WHERE) != null){
                //获取符号的孩子
                ASTNode child = (ASTNode) astNode.getChild(0);
                //判断孩子树当前节点
                if (child.getType() == HiveParser.TOK_TABLE_OR_COL){
                    sqlFilterCol.add(child.getChild(0).getText());
                }else if (child.getType()==HiveParser.DOT){
                    sqlFilterCol.add(child.getChild(1).getText());
                }
            }

            return null;
        }
    }
}
