package com.atguigu.dga.config;

import org.apache.hadoop.hive.ql.lib.DefaultGraphWalker;
import org.apache.hadoop.hive.ql.lib.Dispatcher;
import org.apache.hadoop.hive.ql.lib.Node;
import org.apache.hadoop.hive.ql.parse.ASTNode;
import org.apache.hadoop.hive.ql.parse.HiveParser;
import org.apache.hadoop.hive.ql.parse.ParseDriver;
import org.apache.hadoop.hive.ql.parse.SemanticException;

import java.util.Collections;
import java.util.Stack;

/*
解析sql的功能是在hive-exec包下
    一个sql解析后的根节点是nil
        有两个子节点：
            TOK_QUERY:需要获取的
                    有什么操作，就对应什么节点:
                        1.例如有LEFT JOIN,就有TOK_LEFTOUTJOIN
                        2.有where过滤，就有TOK_HWERE
                                        TOK_HWERE
                                            /
                                           or
                                           / \
                                         and  >
                                         /\
                                        = =
                        总结：先找到TOK_WHERE,向下遍历。每个子节点，可能是逻辑运算符
                            逻辑运算符的子节点可能是 关系运算符
                            关系运算符的子节点才是引用的列
                            引用的列，都会有TOK_TAB_OR_COL
                        3.从某张表查询，就有TOK_TABREF(从那张表去查询)节点
                            表有别名：
                                TOK_TABREF
                                    /\
                          TOK_TABREF  别名
                                /
                              真实名字
                       真实名字分情况：
                            没有库名：
                                    TOK_TABREF
                                        /
                                     表名
                            有库名:
                                    TOK_TABREF
                                      /\
                                   库名 表名
                   EOF:结束的标识符
 */
public class SqlParser {
    public static  void parseSql(String sql,Dispatcher dispatcher){
        //先创建一个sql解析器（hive提供的）
        ParseDriver parseDriver = new ParseDriver();

        try {
            //把字符串的sql解析为一个语法树结构
            ASTNode astNode = parseDriver.parse(sql);

            /*
            astNode.getType()：获取当前节点的类型，返回int
            获取到TOK_QUERY节点
             */
            while (astNode.getType() != HiveParser.TOK_QUERY){
                //向下遍历
                astNode = (ASTNode) astNode.getChild(0);
            }

            //TOK_QUERY
            System.out.println(astNode.getText());

            //把节点处理器注入一个图形遍历器  使用自定义处理器遍历抽象语法树
            DefaultGraphWalker ogw = new DefaultGraphWalker(dispatcher);
            //用遍历器遍历整个语法树
            ogw.startWalking(Collections.singletonList(astNode),null);

        } catch (Exception e) {
            throw new RuntimeException(e);
        }
    }

    public static class MyDispatcher implements Dispatcher{
        /*
            会被GraphWalker遍历每一个节点时都调用！
                Node nd :当前处理的节点
         */
        @Override
        public Object dispatch(Node nd, Stack<Node> stack, Object... nodeOutputs) throws SemanticException {
            ASTNode astNode = (ASTNode) nd;
            System.out.println("开始处理:" + astNode.getText());
            return null;
        }
    }
    public static void main(String[] args) {
        String sql = "  with tmp1 as (" +
                "  select " +
                "  t1.name,t1.age,t2.score " +
                "  from student t1 left join score t2 " +
                "  on t1.id = t2.id " +
                ") " +
                "insert overwrite table result " +
                "select " +
                "   name,sum(score) totalScore " +
                "from tmp1 " +
                "group by name " +
                "union all " +
                "select " +
                "   age,sum(score) totalScore " +
                "from tmp1 " +
                " where age > 10 " +
                "group by age " ;

        String sql2 = " with t1 as (select aa(a),b,c,dt as dd from tt1,  tt2 \n" +
                "             where tt1.a=tt2.b and dt='2023-05-11'  )\n" +
                "  insert overwrite table tt9  \n" +
                "  select a,b,c \n" +
                "  from t1 \n" +
                "  where    dt = date_add('${xxx}',-4 )    \n" +
                "  union \n" +
                "  select a,b,c \n" +
                "  from t2\n" +
                "   where    dt = date_add('${xxx} ',-7 )  ";

        String sql3 = "select * from gmall.dim_user_zip where (dt = xxx and a = b) or c > d";

        String sql4 = "select * from t1 where t1.name = 'jack' and t1.age = 30";
//
//        CheckSimleProceess.MyDispatcher myDispatcher = new CheckSimleProceess.MyDispatcher();
//        parseSql(sql4,myDispatcher);
//
//        System.out.println(myDispatcher.getwhereFieldNames());

    }
}
