package com.atguigu.dga.governance.assess.impl.cacl;

import com.alibaba.fastjson.JSON;
import com.alibaba.fastjson.JSONObject;
import com.atguigu.dga.dolphinscheduler.bean.TDsTaskDefinition;
import com.atguigu.dga.governance.assess.Assessor;
import com.atguigu.dga.governance.bean.AssessParam;
import com.atguigu.dga.governance.bean.GovernanceAssessDetail;
import com.atguigu.dga.governance.util.SqlParser;
import com.atguigu.dga.meta.bean.TableMetaInfo;
import com.atguigu.dga.meta.constants.MetaConst;
import com.google.common.collect.Sets;
import groovy.sql.Sql;
import lombok.Getter;
import org.apache.hadoop.hive.ql.lib.Dispatcher;
import org.apache.hadoop.hive.ql.lib.Node;
import org.apache.hadoop.hive.ql.parse.ASTNode;
import org.apache.hadoop.hive.ql.parse.BaseSemanticAnalyzer;
import org.apache.hadoop.hive.ql.parse.HiveParser;
import org.apache.hadoop.hive.ql.parse.SemanticException;
import org.springframework.stereotype.Component;

import java.math.BigDecimal;
import java.text.ParseException;
import java.util.*;

@Component("SIMPLE_PROCESS")
public class SimpleProcessAssessor extends Assessor {
    //1.提取考评表的sql  tdsTaskDefinition
    //2.解析sql   将字符串的sql 转为一种数据结构
    //（1）看sql中是否涉及 groupBy join leftjoin rightjoin fulljoin union unioall 操作
    //(2)如果没有以上操作的话再看 过滤条件中是否只是根据分区来过滤的 判断方式如下：
    //①.首先拿到过滤的字段 ->放到集合中
    //②.拿到过滤的表
    //③.获取过滤的表中分区字段
    //④.拿着分区字段和过滤的字段做对比 看过滤的字段是否都是分区字段
    //（发现集合中有五个元素，然后这个五个元素都是属于分区字段）那么认为是简单加工
    @Override
    protected void assess(AssessParam assessParam, GovernanceAssessDetail governanceAssessDetail) throws ParseException {

        //判断是否是ods层的表如果是的话就不再考评 因为ods层没有sql  以及如果考评的这张表在今天没有进行调度
        if (assessParam.getTableMetaInfo().getTableMetaInfoExtra().getDwLevel().equals(MetaConst.DW_LEVEL_ODS)||assessParam.getTDsTaskDefinition()==null){
            return;
        }

        TDsTaskDefinition tDsTaskDefinition = assessParam.getTDsTaskDefinition();
        String sql = tDsTaskDefinition.getSql();
        MyDispatcher dispatcher = new MyDispatcher();
        SqlParser.sqlParse(dispatcher,sql);

        //获取库名
        String schemaName = assessParam.getTableMetaInfo().getSchemaName();

        //考评备注
        governanceAssessDetail.setAssessComment("涉及操作:"+dispatcher.getProcessSet()+" 过滤字段:"+dispatcher.getWhereFieldSet()+" 查询的表:"+dispatcher.getFromTables());

        //如果涉及到复杂操作那么就没必要进行下一步的考评了
        if (dispatcher.processSet.size()>0){
            return;
        }else {//没有涉及复杂操作  判断过滤字段是否都是分区字段
            //获取过滤字段
            Set<String> whereFieldSet = dispatcher.getWhereFieldSet();
            //获取查询的表
            Set<String> fromTables = dispatcher.getFromTables();
            //遍历每张表获取到每张表的元数据信息
            for (String tableName : fromTables) {
                TableMetaInfo tableMetaInfo = assessParam.getTableMetaInfoMap().get(schemaName + "." + tableName);

                //获取这个查询表的分区字段
                String partitionColNameJson = tableMetaInfo.getPartitionColNameJson();
                //转为List集合里面是JsonObject
                List<JSONObject> partitionColList = JSON.parseArray(partitionColNameJson, JSONObject.class);

                //计数器 用来计算过滤字段中有几个字段是分区字段
                Integer samePartitionCount = 0;

                //先遍历每隔过滤字段，然后和每个分区字段做对比看是否一致
                for (String whereField : whereFieldSet) {
                    for (JSONObject partitionJsonObj : partitionColList) {
                        //提取具体分区字段
                        String partitionField = partitionJsonObj.getString("name");
                        //如果过滤字段等于分区字段
                        if (whereField.equals(partitionField)){
                            samePartitionCount++;
                        }
                    }
                }

                //标志位 用来判断是否是简单处理
                Boolean isSimpleProcess = false;
                if (samePartitionCount==whereFieldSet.size()){
                    //过滤字段集合中的元素都是分区字段
                    isSimpleProcess = true;
                }

                if (isSimpleProcess){
                    governanceAssessDetail.setAssessScore(BigDecimal.ZERO);
                    governanceAssessDetail.setAssessProblem("是简单处理");
                }
            }

        }

    }


    private class MyDispatcher implements Dispatcher{

        //采集的有： 1.有没有 groupBy join leftjoin rightjoin fulljoin union unioall 操作  2.过滤字段 3.查询的表

        //1.用来存放涉及的操作
        @Getter
        Set<String> processSet=new HashSet<>();
        //2.过滤字段
        @Getter
        Set<String> whereFieldSet=new HashSet<>();
        //3.查询的表
        @Getter
        Set<String> fromTables=new HashSet<>();

        //4.定义集合将非简单处理的操作定在集合中 用来做判断

        Set<Integer> realProcessSet= Sets.newHashSet(HiveParser.TOK_JOIN,  //join 包含通过where 连接的情况
                HiveParser.TOK_GROUPBY,       //  group by
                HiveParser.TOK_LEFTOUTERJOIN,       //  left join
                HiveParser.TOK_RIGHTOUTERJOIN,     //   right join
                HiveParser.TOK_FULLOUTERJOIN,     // full join
                HiveParser.TOK_FUNCTION,     //count(1)
                HiveParser.TOK_FUNCTIONDI,  //count(distinct xx)
                HiveParser.TOK_FUNCTIONSTAR, // count(*)
                HiveParser.TOK_SELECTDI,  // distinct
                HiveParser.TOK_UNIONALL   // union
        );
        //5.定义操作符
        Set<String> operators=Sets.newHashSet("=",">","<",">=","<=" ,"<>"  ,"like"); // in / not in 属于函数计算

        @Override
        public Object dispatch(Node nd, Stack<Node> stack, Object... nodeOutputs) throws SemanticException {
            ASTNode astNode = (ASTNode) nd;
            //查询是否有复杂的操作
            if (realProcessSet.contains(astNode.getType())){
                processSet.add(astNode.getText());
            }

            //获取 过滤的字段
            if (operators.contains(astNode.getText())){
                ArrayList<Node> whereNodeList = astNode.getChildren();
                for (Node node : whereNodeList) {
                    ASTNode whereNode = (ASTNode) node;
                    //判断有没有点  其实就是正在判断 字段有没有表名
                    if (whereNode.getType()==HiveParser.DOT){
                        whereFieldSet.add(whereNode.getChild(1).getText());
                    }else if (whereNode.getType()==HiveParser.TOK_TABLE_OR_COL){ //没有点的时候要判断是否有叶子节点的信息为TOK_TABLE_OR_COL，那么下面的叶子节点才有值为字段名
                        whereFieldSet.add(whereNode.getChild(0).getText());
                    }
                }
            }

            //涉及查询的表
            if (astNode.getType()==HiveParser.TOK_TABREF){
                ASTNode tableRefNode = (ASTNode) astNode.getChild(0);

                String tableName = null;
                if (tableRefNode.getChildren().size()==1){ //表名没有带库名
                    tableName= tableRefNode.getChild(0).getText();
                }else { //表名带库名
                   tableName= tableRefNode.getChild(1).getText();
                }
                fromTables.add(tableName);
            }

            return null;
        }
    }

}
