# -*- coding: utf-8 -*-

import time
import logging
import threading
from elasticsearch import Elasticsearch
import redis
import configs

logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)

src_es = Elasticsearch(hosts=configs.es_source_host)
dest_es = Elasticsearch(hosts=configs.es_dest_host)

r = redis.Redis(
    host=configs.redis_host,
    port=configs.redis_port,
    db=configs.redis_db,
    password=configs.redis_pass,
    socket_connect_timeout=2,
    decode_responses=True,
)


def batch_lpop(key, n):
    p = r.pipeline()
    p.lrange(key, 0, n - 1)
    p.ltrim(key, n, -1)
    data = p.execute()
    return data


start_ts = time.time()

scroll_time = "10m"


def processor():
    while True:
        if r.llen(configs.redis_key) <= 0:
            logger.info("redis中队列为空，脚本自动退出。请先执行 1-get_source_ids.py 获取doc id清单")
            break

        # 每次从redis队列中取一批 doc _id
        doc_id_list = batch_lpop(configs.redis_key, configs.batch_size)[0]

        # 目标端的id不存在，给个标志位
        dest_miss_flag = False

        src_res = src_es.mget(index=configs.es_source_index, body={"ids": doc_id_list})
        src_source = src_res["docs"]
        s1_list = []
        for doc in src_source:
            s1 = dict()
            s1["_id"] = doc["_id"]
            s1["_id"] = doc["_source"]
            s1_list.append(s1)

        dest_res = dest_es.mget(index=configs.es_dest_index, body={"ids": doc_id_list})
        dest_source = dest_res["docs"]
        s2_list = []
        for doc in dest_source:
            s2 = dict()
            s2["_id"] = doc["_id"]
            s2["_id"] = doc["_source"]
            s2_list.append(s1)

        if s1_list != s2_list:
            # 对存在异常的id清单，再逐条进行比对
            for doc_id in doc_id_list:
                try:
                    src_res = src_es.get(index=configs.es_source_index, id=doc_id)
                    src_source = src_res["_source"]
                except Exception as e:
                    logger.error(f"源端找不这个doc id: {doc_id}, 错误信息: {str(e)}")
                    continue

                try:
                    dest_res = dest_es.get(index=configs.es_dest_index, id=doc_id)
                    dest_source = dest_res["_source"]
                except Exception as e:
                    logger.error(f"目标端找不到这个doc id: {doc_id}, 错误信息: {str(e)}")
                    dest_source = ""
                    dest_miss_flag = True

                if src_source != dest_source or dest_miss_flag is True:
                    print("疑似存在差异的doc id清单: ", doc_id)
                    with open("diff.log", "a+") as f:
                        f.writelines(str(doc_id) + "\n")



# 创建多个线程来生产消息
threads = []
for i in range(8):
    t = threading.Thread(target=processor)
    t.start()
    threads.append(t)

# 等待所有线程完成
for t in threads:
    t.join()

stop_ts = time.time()
logger.info(f"程序总体耗时(秒): {(stop_ts - start_ts)}")
