import json
import pika
from torch.utils.data import DataLoader
from omegaconf import OmegaConf
from dataset.data_factory import OSSDataset
from models.inference import ClassifyResult, SegmentResult, ClassifySegmentResult, UnsupervisedSegmentResult
import socket
import torch
import functools
import threading
from export_onnx import Export

"""
rabbitmq消息体：
body = {
    'type': 'inference',
    'train_task_id': 1,
    'inference_items':[
        {'id': 1,'url':'https://msjava.oss-cn-qingdao.aliyuncs.com/temp/00b7fb703.jpg'},
        {'id': 2,'url':'https://msjava.oss-cn-qingdao.aliyuncs.com/temp/00bbcd9af.jpg'},
        {'id': 3,'url':'https://msjava.oss-cn-qingdao.aliyuncs.com/temp/1bbf4b4c0.jpg'},
    ],
    'yaml_path': 'http://msjava.oss-cn-qingdao.aliyuncs.com/inference_config.yaml'
}
"""


def call_for_inference_classify(config):
    dataset = OSSDataset(config.inference_items, config.dataset.height, config.dataset.width, config.dataset.mean,
                         config.dataset.std)
    valid_loader = DataLoader(dataset, batch_size=config.batch_size, shuffle=False)
    task = ClassifyResult(config, device, valid_loader)
    task.do_task()


def call_for_inference_segment(config):
    dataset = OSSDataset(config.inference_items, config.dataset.height, config.dataset.width, config.dataset.mean,
                         config.dataset.std)
    valid_loader = DataLoader(dataset, batch_size=config.batch_size, shuffle=False)
    if 'classify_model' in config:
        task = ClassifySegmentResult(config, device, dataloader=valid_loader)
    else:
        task = SegmentResult(config, device, dataloader=valid_loader)
    task.do_task()


def call_for_inference_unsupervised(config):
    dataset = OSSDataset(config.inference_items, config.dataset.height, config.dataset.width, config.dataset.mean,
                         config.dataset.std)
    valid_loader = DataLoader(dataset, batch_size=config.batch_size, shuffle=False)
    task = UnsupervisedSegmentResult(config, device, valid_loader)
    task.do_task()


def call_for_deploy(config):
    export = Export(config)
    # 导出exe程序
    export.export_model()
    # 导出配置文件
    export.export_config2yaml()
    # 打包上传至oss

    # 更新数据库
    pass


def ack_message(channel, delivery_tag):
    if channel.is_open:
        channel.basic_ack(delivery_tag)
    else:
        # Channel is already closed, so we can't ACK this message;
        # log and/or do something that makes sense for your app in this case.
        pass


def on_message(channel, method_frame, header_frame, body):
    print(f'on_message thread id: {threading.get_ident()}')
    delivery_tag = method_frame.delivery_tag
    t = threading.Thread(target=do_work, args=(channel, delivery_tag, body))
    t.start()


# 回调函数
def do_work(channel, delivery_tag, body):
    config = OmegaConf.create(json.loads(body))

    if config.type == 'inference':
        if config.task_type == 'classify':
            call_for_inference_classify(config)
        if config.task_type == 'segment':
            call_for_inference_segment(config)
        if config.task_type == 'unsupervised':
            call_for_inference_unsupervised(config)
    elif config.type == 'deploy':
        call_for_deploy(config)
    cb = functools.partial(ack_message, channel, delivery_tag)
    channel.connection.add_callback_threadsafe(cb)


if __name__ == '__main__':
    from omegaconf import OmegaConf

    temp_path = './temp'
    device = torch.device('cuda:1' if torch.cuda.is_available() else 'cpu')

    config = OmegaConf.load("application.yaml")
    user_info = pika.PlainCredentials(config.rabbitmq.username, config.rabbitmq.password)
    connection = pika.BlockingConnection(
        pika.ConnectionParameters(config.rabbitmq.host, config.rabbitmq.port, virtual_host='/', credentials=user_info))
    channel = connection.channel()
    channel.queue_declare(queue=config.rabbitmq.queue_name, durable=True)
    channel.basic_consume(queue=config.rabbitmq.queue_name,
                          auto_ack=False,
                          on_message_callback=on_message
                          )
    channel.start_consuming()
