#! /usr/bin/env python3
# -*- coding: utf-8 -*-

import argparse
import json

from antlr4 import *
from antlr4.InputStream import InputStream
from elasticsearch_dsl import Q
from elasticsearch_dsl import connections, Search

from ESharkDSLLexer import ESharkDSLLexer
from ESharkDSLParser import ESharkDSLParser
from MyVisitor import MyVisitor

g_is_test = False


def test_display_filter(expr,
                        include_fields=None, exclude_fields=None,
                        sort_asc=None, sort_desc=None,
                        offset=0, limit=10,
                        stat_pairs=None,
                        line_no=1):
    if stat_pairs is None:
        stat_pairs = []

    if sort_asc is None:
        sort_asc = []

    if sort_desc is None:
        sort_desc = []

    if exclude_fields is None:
        exclude_fields = []

    if include_fields is None:
        include_fields = ['tcp.*']

    print('%.2d. ---- 语法分析->开始 ----' % line_no)
    print('    输入：%s' % expr.strip())

    input_stream = InputStream(expr)

    lexer = ESharkDSLLexer(input_stream)
    token_stream = CommonTokenStream(lexer)
    parser = ESharkDSLParser(token_stream)
    tree = parser.prog()
    is_syntax_error = parser.getNumberOfSyntaxErrors()

    if is_syntax_error:
        print('测试结果：语法错误')
        print('%.2d. ---- 语法分析->结束 ----\n\n' % line_no)
        print('语法测试因出现错误而终止！')
        exit(1)

    lisp_tree_str = tree.toStringTree(recog=parser)
    print('测试结果：成功')
    print('  语法树：\n\t', lisp_tree_str)

    visitor = MyVisitor()
    visitor.visit(tree)

    print('%.2d. ---- 语法分析->结束 ----\n\n' % line_no)

    # if g_is_test:
    #     return

    s = Search(index="packet")

    for k, v in visitor.querys.items():
        if k == '' and len(v) > 0:
            for q in v:
                s = s.query(q)

        if k == 'bool':
            bool_must = v['must']
            bool_must_not = v['must_not']
            bool_should = v['should']

            q = Q('bool',
                  must=bool_must,
                  must_not=bool_must_not,
                  should=bool_should
                  )
            s = s.query(q)

    if exclude_fields:
        s = s.source(includes=include_fields, excludes=exclude_fields)
    else:
        s = s.source(fields=include_fields)

    sort_fields = []

    for field in sort_asc:
        tmp = {
            field: {
                'order': 'asc'
            }
        }
        sort_fields.append(tmp)

    for field in sort_desc:
        tmp = {
            field: {
                'order': 'desc'
            }
        }
        sort_fields.append(tmp)

    s = s.sort(*sort_fields)

    # 分页
    # {"from": offset, "size": offset + limit}
    s = s[offset: offset + limit]

    # a = A('terms', field='category')
    # s.aggs.bucket('category_terms', a)

    for pair in stat_pairs:
        s.aggs.metric(pair['name'], pair['type'], field=pair['body'])

    # s.aggs.metric('test_name', 'max', script={
    #     "lang": "expression",
    #     'source': "doc['frame.encap_type'].value/markup",
    #     "params": {
    #         "markup": 2
    #     }
    # })
    print("DSL表达式：\n%s\n" % json.dumps(s.to_dict(), indent=2))

    response = s.execute()

    print("匹配总数：%s" % response.hits.total)

    print("\n匹配详情：")
    for hit in response:
        print(hit.meta, hit)

    print('\n聚合：')

    for k, v in response.aggregations.to_dict().items():
        print('名称：%s，结果：\n\t%s' % (k, v))


def test_display_filters():
    pass
    # with open('display_filter.dsl', encoding='utf-8') as f:
    #     line_no = 1
    #
    #     while True:
    #         expr_line = f.readline()
    #
    #         if not expr_line:
    #             break
    #
    #         test_display_filter(expr_line, line_no)
    #         line_no += 1


def main():
    # initiate the default connection to elasticsearch
    connections.create_connection(hosts=['192.168.2.8:9200'])

    parser = argparse.ArgumentParser(prog='eshark',
                                     description='数据包分析工具',
                                     usage='%(prog)s -e <表达式>',
                                     epilog="")

    parser.add_argument('-e',
                        '--expr',
                        metavar='expr',
                        help='查询表达式',
                        action='store',
                        dest='expr')

    group = parser.add_argument_group('输出控制')
    group.add_argument('-fi',
                       metavar='field',
                       help='包含的字段',
                       action='extend',
                       nargs='+',
                       dest='include_fields')
    group.add_argument('-fe',
                       metavar='field',
                       help='排除的字段',
                       action='extend',
                       nargs='+',
                       dest='exclude_fields')

    group = parser.add_argument_group('字段排序')
    group.add_argument('-sa',
                       metavar='field',
                       help='升序',
                       action='extend',
                       nargs='+',
                       dest='sort_asc')
    group.add_argument('-sd',
                       metavar='field',
                       help='降序',
                       action='extend',
                       nargs='+',
                       dest='sort_desc')

    group = parser.add_argument_group('分页')
    group.add_argument('--offset',
                       metavar='num',
                       help='偏移量/起点',
                       type=int,
                       default=0,
                       dest='offset')

    group.add_argument('--limit',
                       metavar='num',
                       help='输出记录数量',
                       type=int,
                       default=10,
                       dest='limit')

    def stat_func(a):
        stat_pairs = []
        for pair in a.stat:
            stat_pairs.append({
                'name': pair[0],
                'type': pair[1],
                'body': pair[2],
            })

        args.stat_pairs = stat_pairs

    parser.add_argument('-s',
                        '--stat',
                        metavar='[name type body]',
                        help='聚合',
                        action='append',
                        nargs='+',
                        dest='stat')
    parser.set_defaults(func=stat_func)

    parser.add_argument('-t',
                        '--test',
                        help='运行诊断代码',
                        action='store_true',
                        dest='is_test')

    parser.add_argument('-V',
                        '--version',
                        help='显示版本信息',
                        action='version',
                        version='%(prog)s v1.0.0')

    args = parser.parse_args()
    args.func(args)
    print(args)

    if args.is_test:
        test_display_filters()
        print('\n语法测试完成！')
    elif args.expr:
        test_display_filter(expr=args.expr,
                            include_fields=args.include_fields, exclude_fields=args.exclude_fields,
                            sort_asc=args.sort_asc, sort_desc=args.sort_desc,
                            offset=args.offset, limit=args.limit,
                            stat_pairs=args.stat_pairs)
    else:
        print('请指定参数......')



if __name__ == '__main__':
    main()

# TODO
# IP地址筛选 https://www.elastic.co/guide/en/elasticsearch/reference/current/search-aggregations-bucket-iprange-aggregation.html
