import os, sys
import logging
print(os.getcwd())
sys.path.append(os.getcwd())
from torch.optim import SGD, Adam
from torch.nn.functional import binary_cross_entropy_with_logits
from torch.optim.lr_scheduler import MultiStepLR, StepLR

from sklearn.metrics import accuracy_score
from skorch.core import *
from skorch.vision import *
from skorch import NeuralNet, NeuralNetClassifier
from skorch.helper import predefined_split
from skorch.callbacks import EpochScoring, LRScheduler, CyclicLR, Checkpoint
from skorch.loss import FocalLoss
from model import classify_cnn
from dataset import create_dr_classify_datasets, create_classify_augmenter, jsrt_stats, denormalize_image, create_floder_classify_datasets
torch.manual_seed(0)


def logger(loggername, logfile, level=logging.DEBUG):
    """
    create logger, display and store into file
    :param loggername:logger name
    :param logfile:log file path
    :return:logger
    """
    logger = logging.getLogger(loggername)
    logger.setLevel(level=level)
    if os.path.exists(logfile):
        os.remove(logfile)
    handler = logging.FileHandler(logfile)
    handler.setLevel(logging.DEBUG)
    console = logging.StreamHandler()
    console.setLevel(logging.INFO)
    logger.addHandler(handler)
    logger.addHandler(console)

    return logger


def scoring_metric(ture_label:NPArray, predicted_probs:NPArray, sigmoid=False, threshold=0.5):
    if sigmoid:
        predicted_probs = 1/(1 + np.exp(-predicted_probs))
    predicted_logits = (predicted_probs > threshold).astype('uint8')
    return accuracy_score(predicted_logits, ture_label)


def multi_label_scoring(net, ds, y, thresh=0.5):
    sigmoid = True
    predicted_probs = net.predict(ds)
    return scoring_metric(y, predicted_probs, sigmoid=sigmoid, threshold=thresh)


def get_dataloader():
    target_size = (512, 512)
    data_root = ['/home/cao/disk1/DR/DR/images',
                 '/home/cao/disk1/DR/DR2/images']

    return (create_dr_classify_datasets(data_root, create_classify_augmenter, jsrt_stats,
                                        target_size=target_size))


def get_folder_dataloader():
    target_size = (512, 512)
    data_root = '/home/blake/data/dataset/datasets/DR/classifer/zhengcewei/train'
    return create_floder_classify_datasets(data_root, create_classify_augmenter, jsrt_stats,
                                           target_size=target_size)


def init_model_multi_label(n_classes=4, pretrianed=False, dirname='classify_binary', criterion=nn.BCELoss):
    target_size = (512, 512)
    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
    is_parallel = True

    acc_socring = EpochScoring(multi_label_scoring, name='acc', lower_is_better=False)

    module = classify_cnn(n_classes=n_classes, imsize=target_size)

    stepLR = LRScheduler(policy=StepLR, step_size=200, gamma=0.1)    # 学习率的调整，对学习率进行动态的下降

    net = NeuralNet(
        module,
        criterion=criterion,             # 损失函数nn.BCELoss,#FocalLoss,
        batch_size=48,
        max_epochs=1000,
        optimizer=SGD,
        lr=0.01,
        optimizer__momentum=0.9,         # 优化器动量
        iterator_train__shuffle=True,    # 训练集是否打乱
        iterator_train__num_workers=8,  # 并行取文件数量
        iterator_valid__shuffle=False,
        iterator_valid__num_workers=8,
        callbacks=[
            stepLR,
            acc_socring,
            Checkpoint(f_params='best_params.pt', dirname=dirname)],
        device=device,
        parallel=is_parallel              # 是否多GPU
    )

    if pretrianed:
        net.initialize()
        net.load_params(f_params=dirname+'best_params.pt',
                        f_optimizer=dirname+'optimizer.pt',
                        f_history=dirname+'history.json')
    return net


def init_model_single_label(n_classes=4, pretrianed=False, dirname='classify_binary'):
    target_size = (512, 512)
    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
    is_parallel = True

    module = classify_cnn(n_classes=n_classes, softmax=True, imsize=target_size)

    stepLR = LRScheduler(policy=StepLR, step_size=20, gamma=0.1)

    net = NeuralNetClassifier(
        module,
        batch_size=48,
        max_epochs=1000,
        optimizer=SGD,
        lr=0.01,
        optimizer__momentum=0.9,
        iterator_train__shuffle=True,
        iterator_train__num_workers=8,
        iterator_valid__shuffle=False,
        iterator_valid__num_workers=8,
        # callbacks=[
        #     stepLR,
        #     Checkpoint(f_params='best_params.pt', dirname=dirname),
        #     ('tr_acc', EpochScoring(
        #         'accuracy',
        #         lower_is_better=False,
        #         on_train=True,
        #         name='train_acc',
        #     )),
        # ],

        device=device,
        parallel=is_parallel
    )

    if pretrianed:
        net.initialize()
        net.load_params(f_params=dirname+'/best_params.pt',
                        f_optimizer=dirname+'/optimizer.pt',
                        f_history=dirname+'/history.json')
    return net


def train():
    train_ds, valida_ds = get_dataloader()
    net =init_model_multi_label(n_classes=4, pretrianed=True, dirname='classify_fl/', criterion=FocalLoss)#nn.BCELoss)
    net.fit(train_ds)

    # y_pred = net.predict(valida_ds)
    # score = accuracy_score(y_test, y_pred)


def train_floder_labels():
    train_ds, valida_ds = get_folder_dataloader()
    net =init_model_single_label(n_classes=2, pretrianed=True, dirname='classify_binary')
    net.fit(train_ds)

    true_labes = []
    for i in range(len(valida_ds)):
        image, label = valida_ds[i]
        true_labes.append(label)
    true_labes = np.asarray(true_labes)
    y_pred = net.predict(valida_ds)
    score = accuracy_score(true_labes, y_pred)
    print(score)


def predict():
    train_ds, valida_ds = get_dataloader()
    net = init_model_multi_label()
    net.initialize()

    net.load_params(f_params='classify_fl/best_params.pt')
    predicted_probs = net.predict(valida_ds)
    predicted_probs = 1 / (1 + np.exp(-predicted_probs))
    predicted_logits = (predicted_probs > 0.5).astype('uint8')

    logfile = './log.txt'
    log = logger("log", logfile, level=logging.DEBUG)

    true_labes = []
    for i in range(len(valida_ds)):
        image, label = valida_ds[i]
        dcm_name = valida_ds.json_items[i].with_suffix('.dcm')
        # print("name:", dcm_name, "label", label)
        log.debug("name:{}, label:{}".format(str(dcm_name), label))
        true_labes.append(label)
    true_labes = np.asarray(true_labes)

    print(accuracy_score(predicted_logits, true_labes))

    # for i in range(len(valida_ds)):
    #     image, label = valida_ds[i]
    #     show_image = denormalize_image(image, jsrt_stats[0], jsrt_stats[1])
    #     show_image = np.transpose(show_image, [1, 2, 0])
    #
    #     predict_label = predicted_logits[i]
    #     plt.imshow(show_image)
    #     true_name = np.array2string(label, precision=1, separator=',', suppress_small=True)
    #     predict_name = np.array2string(predict_label, precision=1, separator=',', suppress_small=True)
    #     plt.title(true_name+" "+predict_name, {'fontsize':8})
    #
    #     plt.show()
    #     a = input()
    #     if a == "ex":
    #         break
    #     else:
    #         plt.close()


def predict_floder_labels():
    #train_ds, valida_ds = get_folder_dataloader()
    net =init_model_single_label(n_classes=2, pretrianed=False, dirname='classify_binary')
    net.initialize()
    net.load_params(f_params='classify_binary/best_params.pt')

    # true_labes = []
    # for i in range(len(valida_ds)):
    #     image, label = valida_ds[i]
    #     true_labes.append(label)
    # true_labes = np.asarray(true_labes)

    image_path = '/home/blake/data/dataset/datasets/DR/dicom/DR/images/200-0000000001-0001-10001-1.3.51.0.7.11073525975.49120.64333.42989.22486.28979.41145.dcm'

    image = load_image_obj(str(image_path), 'pydcm')
    image = cv2.cvtColor(image, cv2.COLOR_GRAY2RGB)
    image = resize_image(image, (512, 512))

    start_time = time.time()
    image = image_to_tensor(image, jsrt_stats)
    image = image.unsqueeze_(0)

    y_pred = net.predict(image)
    print(y_pred)
    # score = accuracy_score(true_labes, y_pred)
    # print(score)


def test_predict_image():
    net = init_model_multi_label()
    net.initialize()
    net.load_params(f_params='classify/best_params.pt')

    image_path = '/home/blake/data/dataset/datasets/DR/dicom/DR/images/200-0000000001-0001-10001-1.3.51.0.7.11073525975.49120.64333.42989.22486.28979.41145.dcm'


    image = load_image_obj(str(image_path), 'pydcm')
    image = cv2.cvtColor(image, cv2.COLOR_GRAY2RGB)
    image = resize_image(image, (512, 512))

    start_time = time.time()
    image = image_to_tensor(image, jsrt_stats)
    image = image.unsqueeze_(0)

    predicted_probs = net.predict(image)

    print('time: ', time.time() - start_time)
    predicted_probs = 1 / (1 + np.exp(-predicted_probs))
    print(predicted_probs)
    predicted_logits = (predicted_probs > 0.5).astype('uint8')

    print(predicted_logits)


if __name__ == '__main__':
    print('')
    # train()
    predict()
    # test_predict_image()
    # train_floder_labels()
    # predict_floder_labels()
