import pandas as pd
import json
import uuid
from confluent_kafka import Producer
from Queen.config.queen_config import queen_to_slave_topic, queen_to_test_topic, queen_to_svip_topic, queen_to_web_topic,\
    slave_main_group, slave_test_group, slave_web_group, queen_producer_config, queen_broker
from Queen.redis.redis_util import redis_connect_factory, redis_exit_key
from Queen.kafka.kafka_partition_key import get_mission_kafka_key_dict
from Queen.kafka.kafka_util import get_lag_info
from Queen.dao.mission_record import MissionRecord,insert_mission
from logger_config import logger_factory

logger = logger_factory()
producer = None
redis_connect = redis_connect_factory()


def producer_factory() -> Producer:
    global producer
    if producer is None:
        producer = Producer(queen_producer_config)
    return producer


def delivery_report(err, msg):
    if err is not None:
        logger.error(f'{err}')
    else:
        logger.info(f'message delivered to {msg.topic()} [{msg.partition()}]')


def switch_data_to_dict(csv_data: pd.DataFrame) -> dict:
    data_dict = csv_data.to_dict(orient='records')
    return data_dict


def produce_to_slave(mission: str, csv_data: pd.DataFrame, queue_name: str):
    """
    将任务信息依次分配给Kafka，等待节点完成
    :param mission: 任务名
    :param csv_data: 任务信息
    :param queue_name: 任务是否稳定
    :return:
    """
    # redis连接
    global redis_connect
    # kafka生产者
    p = producer_factory()
    if queue_name == 'main':
        topic = queen_to_slave_topic
        group_id = slave_main_group
    # 将数据转换为列表
    data_list = switch_data_to_dict(csv_data)
    partition_info = get_lag_info(queen_broker, group_id, topic)

    for data in partition_info:
        if not isinstance(data['lag'], int):
            data['lag'] = 0
    count = 0
    for data in data_list:
        data['mission'] = mission
        # 获取任务在Redis中的id值
        redis_key = get_mission_redis_key(mission, data)
        kafka_key = get_mission_kafka_key_dict(mission, data)
        if kafka_key is None:
            kafka_key = redis_key
        partition_info = sorted(partition_info, key=lambda x:x['lag'])
        try:
            # 如果Redis中没有存储对应的id值，说明短期内没有重做过，完成任务分配
            if not redis_exit_key(redis_key):
                partition = int(partition_info[0]['partition'])
                # mission_uuid = insert_mission_record(redis_key)
                data['REDIS_KEY'] = redis_key
                # data['UUID'] = mission_uuid
                # 输出数据到kafka
                p.produce(topic, partition=partition,value=json.dumps(data), callback=delivery_report)
                # 将id写入Redis
                try:
                    redis_connect.set(redis_key, redis_key)
                except Exception as e:
                    print(e)
                count += 1
                partition_info[0]['lag'] += 1
            else:
                logger.warning(f'Rework Same mission, {redis_key} is on doing, skip it')
                continue
        except Exception as e:
            print(e)
            logger.error(f'{redis_key}')
            continue
        logger.info(f'dispatch {count} mission {mission} in this round')
    p.flush


# 生成任务id的函数
redis_key_dict = {
    'test': lambda mission, data: mission + ":" + data['LOTID'] + ":" + data['EQPID'],
}


# 获取Redis中存储的任务id
def get_mission_redis_key(mission: str, data):
    key_func = redis_key_dict[mission]
    return key_func(mission, data)


def insert_mission_record(key):
    mission_uuid = str(uuid.uuid4())
    mission_record = MissionRecord(
        uuid=mission_uuid,
        mission_key=key,
    )
    try:
        insert_mission(mission_record)
    except Exception as e:
        logger.error('fail')
        pass
    return mission_uuid