from elasticsearch import Elasticsearch
import logging
import graypy

es = Elasticsearch(["http://10.102.10.155:9200"])
# 配置Graylog服务器的地址和GELF输入端口
graylog_host = '10.102.10.165'  # Graylog服务器地址
graylog_port = 12201  # GELF输入的默认端口

# 创建一个GELF handler
gelf_handler = graypy.GELFTCPHandler(graylog_host, graylog_port)

# 创建一个logger对象
logger = logging.getLogger('graylog')
logger.setLevel(logging.INFO)  # 设置日志级别
logger.addHandler(gelf_handler)



def send_tcp_glef(data):

    source_data = data.get('_source', {})
    message=source_data['message']
    del source_data['message']
    #logger.info('This is an info message to Graylog with extra fields.', extra=extra_fields)
    logger.info(message, extra=source_data)
    #print(message)

# 初始化Scroll
def scroll_query(i):
    scroll_response = es.search(
        index='graylog_'+str(i),

        body={"query": {"match_all": {}}},
        scroll='360m',  # 保持搜索上下文打开5分钟
        size=300  # 一次scroll请求返回的文档数量
    )
    logger.info('graylog_'+str(i))
    scroll_id = scroll_response['_scroll_id']

    # 使用Scroll ID检索后续文档
    i=0
    while True:
        response = es.scroll(
            scroll_id=scroll_id,
            scroll='360m'  # 必须与初始请求中的相同
        )

        # 处理返回的文档
        #print(len(response))
        for hit in response['hits']['hits']:
            i=i+1
            send_tcp_glef(hit)
            #print(i)
            #print(hit)
            #a=input('a')
            # 你的处理逻辑
            pass
        print(i)
        # 检查是否还有更多文档
        if not response['hits']['hits']:
            break

    # 清理Scroll上下文
    es.clear_scroll(scroll_id=scroll_id)

# 执行查询并获取所有数据
for i in range(783,824):
    print('graylog_'+str(i))
    with open('log.log','a+') as f:
        f.write('graylog_'+str(i)+'\n')
    all_documents = scroll_query(i)

print('end')
print('end')