import io
from argparse import ArgumentParser

import fitlog
import scipy.io as matio
import os
import os.path
import numpy as np
from PIL import Image
import time
import re
import torch
import torch.utils.data
import torch.nn.parallel as para
from torch import nn, optim
from torch.autograd import Variable
from torch.nn import functional as F
from torch.nn.functional import normalize as normalize
from torchvision import datasets, transforms
from torchvision.utils import save_image
from torch.nn import Parameter
from MF import FM as rec_model
import pickle
from torch.autograd import Variable
from tools import generate_samples, my_repeat, top_match, prepare_sample, get_len_of_nonzero
from random import choice
import random
import matplotlib.pyplot as plt

# --Path settings----------------------------------------------------------------------------------
root_path = ''
img_feature_path = 'readFile/image/'
text_data_path = 'readFile/text/'

# 输入
video_texts_filepath = text_data_path + 'video_text_data_train.npy'
img_texts_filepath = text_data_path + 'image_text_data_train.npy'
video_texts_test_filepath = text_data_path + 'video_text_data_test.npy'
image_texts_test_filepath = text_data_path + 'image_text_data_test.npy'


# --Create dataset----------------------------------------------------------------------------------
class ProductData(torch.utils.data.Dataset):
    def __init__(self):
        # image
        self.image_embd = torch.from_numpy(np.load(img_feature_path + 'image_train_oushi_embd.npy'))
        self.video_embd = torch.from_numpy(np.load(img_feature_path + 'video_train_oushi_embd.npy'))

        # text
        # 其中video语音讲解文本为121词；image图片解释为22词
        self.video_texts = torch.from_numpy(np.load(video_texts_filepath))
        self.image_texts = torch.from_numpy(np.load(img_texts_filepath))

    def __getitem__(self, index):
        # image
        image_feature = self.image_embd[index]
        video_feature = self.video_embd[index]

        # text
        video_text = self.video_texts[index]
        image_text = self.image_texts[index]

        return index, image_feature, video_feature, video_text, image_text

    def __len__(self):
        return len(self.image_embd)


comb_option_description = '''0 表示后融合，采用加权的方式融合向量; 不为零0表示先融合;'''

# ---------------------参数输入----------------
parser = ArgumentParser(description="设定输入参数")
# 超参
parser.add_argument("-lr", "--learn_rate", default=0.001, type=float)
parser.add_argument("-dc", "--decay", default=3, type=int)
parser.add_argument("-o", "--optimizer", default='Adam', help="优化算法")
# 训练选项
parser.add_argument("-gru", "--if_train_gru", default=0, type=int)
parser.add_argument("-tbd", "--if_train_text_embd", default=0, type=int)
# 其他
parser.add_argument("-n", "--name", default="test", help="本次实验特殊说明")
parser.add_argument("-g", "--if_gpu", default=0, help="默认不使用gpu", type=int)

args = parser.parse_args()

CUDA = bool(args.if_gpu)  # 1 for True; 0 for False
lr = args.learn_rate
dc = args.decay
if_train_gru = bool(args.if_train_gru)
pre_name = args.name
if_train_text_embd = bool(args.if_train_text_embd)
optimizer_name = args.optimizer

# --manual setting----------------------------------------------------------------------------------
SEED = 1
LOG_INTERVAL = 1
train_set_length = 35000
test_batch_size = 100
test_set_length = 15000
test_step = 10  # 测试15000个太多，只取15000/test_step个
number_neg_sample_train = 1  # # 负样本个数 目前只能设成1 不然正负样本不好算auc
if_fitlog = 1  # 是否使用fitlog

# -------超参数设置-----------------------------------------------------
# 不变的超参
decay_freq = 2  # 衰减频次
BATCH_SIZE = 512
lr_decay_rate = 0.5  # 衰减率
# 需要调整的超参
learning_rate = lr  # 学习率
decay = dc  # 衰减轮
opt_sele = optimizer_name  # 优化方法 SGD Adam Adagrad
EPOCHS = decay * 2 + 1  # 总epoch
init_weight = 0.01
if_save_pt = 0
if_same_rec = 0  # 是否使用同个分解矩阵
if_same_gru = 0  # 是否使用同个gru

res_name = pre_name + '_' + opt_sele + "_lr_" + str(lr) + "_dc_" + str(decay)
result_path = 'res/' + res_name + '/'
if if_fitlog:
    fitlog.set_log_dir('logs/')
    fitlog.add_hyper(args)

if not os.path.exists(result_path):
    os.makedirs(result_path)

# --------------------------------------------------------------------
Tensor = torch.cuda.FloatTensor if CUDA else torch.FloatTensor
mseLoss = torch.nn.MSELoss()
if CUDA:
    mseLoss.cuda()

# test data
test_image_embs = torch.from_numpy(np.load(img_feature_path + 'image_test_oushi_embd.npy'))
test_videoEmbs = torch.from_numpy(np.load(img_feature_path + 'video_test_oushi_embd.npy'))
train_imageEmbs = torch.from_numpy(np.load(img_feature_path + 'image_train_oushi_embd.npy'))
# 数据读取
video_texts_test = torch.from_numpy(np.load(video_texts_test_filepath))
image_texts_test = torch.from_numpy(np.load(image_texts_test_filepath))
video_texts = torch.from_numpy(np.load(video_texts_filepath))
image_texts = torch.from_numpy(np.load(img_texts_filepath))

if CUDA:
    test_videoEmbs = test_videoEmbs.cuda()
    test_image_embs = test_image_embs.cuda()
    train_imageEmbs = train_imageEmbs.cuda()
    video_texts_test = video_texts_test.cuda()
    image_texts_test = image_texts_test.cuda()
    video_texts = video_texts.cuda()
    image_texts = image_texts.cuda()

# draw_data
training_loss = np.zeros(EPOCHS)
train_auc = np.zeros(EPOCHS)
test_loss = np.zeros(EPOCHS)
test_auc = np.zeros(EPOCHS)
test_p1 = np.zeros(EPOCHS)
test_p10 = np.zeros(EPOCHS)
test_p50 = np.zeros(EPOCHS)

# --data loader----------------------------------------------------------------------------------
torch.manual_seed(SEED)
if CUDA:
    torch.cuda.manual_seed(SEED)

# DataLoader instances
kwargs = {'num_workers': 0, 'pin_memory': True} if CUDA else {}

# 词嵌入法 讲词嵌入到（num，100）的矩阵中
textEmbeddings_length = 78230
textEmbeddings_width = 100
textEmbeddings = np.load(text_data_path + "textEmbeddings.npy")

train_loader = torch.utils.data.DataLoader(
    ProductData(),
    batch_size=BATCH_SIZE, shuffle=True, **kwargs,
    drop_last=True)

BRT_MF_input_size = 100


class Pre_Combined(nn.Module):
    def __init__(self):
        super(Pre_Combined, self).__init__()


# --model setup----------------------------------------------------------------------------------
class Later_Combined(nn.Module):
    def __init__(self):
        super(Later_Combined, self).__init__()
        # image
        # cast the feature vector size from 2048 to 100
        image_ori_feature_width = 2048
        self.input_cast = nn.Linear(image_ori_feature_width, BRT_MF_input_size)

        # text
        self.data_embeddings = nn.Embedding(textEmbeddings_length + 1, textEmbeddings_width)
        self.data_embeddings.weight.data.copy_(torch.from_numpy(textEmbeddings))
        self.gru_video = nn.GRU(textEmbeddings_width, textEmbeddings_width, batch_first=True)
        self.h0_video = Parameter(torch.zeros((1, 1, textEmbeddings_width), requires_grad=True))
        self.gru_image = nn.GRU(textEmbeddings_width, textEmbeddings_width, batch_first=True)
        self.h0_image = Parameter(torch.zeros((1, 1, textEmbeddings_width), requires_grad=True))

        # later_comb
        self.comb_weight = Parameter(torch.ones(1) * init_weight, requires_grad=True)
        # MF model
        self.text_rec = rec_model(BRT_MF_input_size, train_set_length, train_set_length)
        self.video_rec = rec_model(BRT_MF_input_size, train_set_length, train_set_length)

    #
    def forward(self, pos_list, sample_list,  # 正样与采样的索引列表
                pos_video_embd, neg_image_embd,  # image部分参数
                videoTexts, imageTexts  # text部分参数
                ):
        # image
        pos_video_embd = self.input_cast(pos_video_embd)
        neg_image_embd = self.input_cast(neg_image_embd)

        # text
        videoEmbs = self.data_embeddings(videoTexts)
        imageEmbs = self.data_embeddings(imageTexts)
        video_embs, _ = self.gru_video(videoEmbs, self.h0_video.repeat(1, pos_list.shape[0], 1))
        if not if_same_gru:
            image_embs, _ = self.gru_image(imageEmbs, self.h0_image.repeat(1, sample_list.shape[0], 1))
        else:
            image_embs, _ = self.gru_video(imageEmbs, self.h0_video.repeat(1, sample_list.shape[0], 1))
        video_fused_emb = torch.sum(video_embs, dim=-2)
        image_fused_emb = torch.sum(image_embs, dim=-2)

        # combine
        y_text = self.text_rec(pos_list, sample_list, video_fused_emb, image_fused_emb)
        if not if_same_rec:
            y_image = self.video_rec(pos_list, sample_list, pos_video_embd, neg_image_embd)
        else:
            y_image = self.text_rec(pos_list, sample_list, pos_video_embd, neg_image_embd)

        y = self.comb_weight * y_text + (1 - self.comb_weight) * y_image
        return y


model = Later_Combined()
if CUDA:
    model = model.cuda()


def if_in(k, l):
    for i in l:
        if k.startswith(i):
            return True
    return False


# -- optimizer -----------------------------------------------------------
def get_optim(model, epoch):
    not_train_list = ['not_train']
    if not if_train_gru:
        not_train_list.append('gru')
    if not if_train_text_embd:
        not_train_list.append('data_embeddings')

    model_optim = [v for k, v in model.named_parameters() if
                   not if_in(k, not_train_list)
                   ]  # len:62
    params_model = [{'params': model_optim}]

    # optimizer
    if opt_sele == 'Adam':
        optimizer = optim.Adam(params_model, lr=learning_rate, betas=(0.9, 0.999),
                               eps=1e-08,
                               weight_decay=1e-5,
                               amsgrad=False)
    elif opt_sele == 'Adagrad':
        optimizer = optim.Adagrad(params_model, lr=learning_rate, weight_decay=1e-5)
    elif opt_sele == 'SGD':
        optimizer = optim.SGD(params_model, lr=learning_rate)
    else:
        optimizer = optim.Adam(params_model, lr=learning_rate, betas=(0.9, 0.999),
                               eps=1e-08,
                               weight_decay=1e-5,
                               amsgrad=False)
    return optimizer


def print_opt():
    with io.open(result_path + 'model_batch_train_loss.txt', 'a', encoding='utf-8') as file:
        file.write(
            'LR: {} | BATCH_SIZE: {} | decay: {} | lr_decay_rate:{} | Epoch: decay * 2 + 1 | '
            'number_neg_sample_train:{}\n'.format(
                learning_rate, BATCH_SIZE, decay, lr_decay_rate, number_neg_sample_train))
    with io.open(result_path + 'test_performance.txt', 'a', encoding='utf-8') as file:
        file.write(
            'LR: {} | BATCH_SIZE: {} | decay: {} | lr_decay_rate:{} | Epoch: decay * 2 + 1 | '
            'number_sample_eval:{}\n'.format(
                learning_rate, BATCH_SIZE, decay, lr_decay_rate, test_batch_size))


def loss_function(pos_scores, neg_scores):
    #     ipdb.set_trace()
    # if number_neg_sample_train is not 1
    pos_scores = my_repeat(pos_scores, number_neg_sample_train)
    # difference = pos_scores.cuda() - neg_scores
    if CUDA:
        difference = pos_scores.float().cuda() - neg_scores
    else:
        difference = pos_scores.float() - neg_scores
    bpr_loss = - F.logsigmoid(difference)

    # import ipdb; ipdb.set_trace()
    return torch.mean(bpr_loss)


# -- training ------------------------------------------------------------------
def train(epoch, decay):
    print('Training starts..')
    # toggle model to train mode
    model.train()
    # initialize loss
    train_loss = 0
    auc_total = 0.0
    precision_total = 0
    recall_total = 0

    total_time = time.time()

    for batch_idx, [index, image_feature, video_feature, videoIDs, imageIDs] in enumerate(train_loader):
        start_time = time.time()
        # 获取采样索引
        pos_list, sample_list, pos_samples_index, neg_samples_index = generate_samples(number_neg_sample_train, index,
                                                                                       BATCH_SIZE, 35000)
        # ==============image 数据预处理==============
        #        pos_video_embd = torch.zeros(len(pos_list), 2048)
        #        neg_image_embd = torch.zeros(len(pos_list), 2048)
        neg_image_embd = train_imageEmbs[sample_list]
        pos_video_embd = train_imageEmbs[pos_list]

        if CUDA:
            pos_video_embd = pos_video_embd.cuda()
            neg_image_embd = neg_image_embd.cuda()

        # ==============text数据预处理=================
        video_batch = video_texts[pos_list]
        image_batch = image_texts[sample_list]

        # ================ 训练模型 ===================
        pos_list = torch.from_numpy(np.array(pos_list))
        sample_list = torch.from_numpy(np.array(sample_list))

        if CUDA:
            pos_list = pos_list.cuda()
            sample_list = sample_list.cuda()

        scores = model(
            pos_list=pos_list, sample_list=sample_list,
            # video
            pos_video_embd=pos_video_embd, neg_image_embd=neg_image_embd,
            # text
            videoTexts=video_batch, imageTexts=image_batch
        )

        pos_scores = scores[pos_samples_index]
        neg_scores = scores[neg_samples_index]
        #         ipdb.set_trace()
        auc = len(torch.nonzero((pos_scores - neg_scores) > 0)) / BATCH_SIZE

        auc_total += auc

        loss = loss_function(pos_scores, neg_scores)

        optimizer.zero_grad()
        loss.backward()

        train_loss += loss.item()
        optimizer.step()

        print(
            'Train Epoch: {} [{}/{} ({:.0f}%)] | Loss: {:.4f}  | auc: {:.4f} | Time:{} | Total_Time:{}\n'.format(
                epoch, (batch_idx + 1) * len(index), len(train_loader.dataset),
                       100. * (batch_idx + 1) / len(train_loader), loss, auc,
                       round((time.time() - start_time), 4) * LOG_INTERVAL,
                round((time.time() - total_time), 4)))

        # records current progress for tracking purpose
        with io.open(result_path + 'model_batch_train_loss.txt', 'a', encoding='utf-8') as file:
            file.write(
                'LR: {} | Train Epoch: {} [{}/{} ({:.0f}%)] | Loss: {:.4f} | auc: {:.4f}| Time:{} | Total_Time:{}\n'.format(
                    learning_rate, epoch, (batch_idx + 1) * len(index), len(train_loader.dataset),
                                          100. * (batch_idx + 1) / len(train_loader), loss, auc,
                                          round((time.time() - start_time), 4) * LOG_INTERVAL,
                    round((time.time() - total_time), 4)))

    print(
        '====> Epoch: {} | Average loss: {:.4f} | Time:{}'.format(
            epoch, train_loss / len(train_loader), round((time.time() - total_time), 4)))

    with io.open(result_path + 'model_batch_train_loss.txt', 'a', encoding='utf-8') as file:
        # print('write in-epoch loss at epoch {} | batch {}'.format(epoch,batch_idx))
        file.write('====> LR: {} | Epoch: {} | Average loss: {:.4f} | Average auc: {:.4f} | Time:{}'.format(
            learning_rate, epoch, train_loss / len(train_loader), auc_total / len(train_loader),
            round((time.time() - total_time), 4)))

    with io.open(result_path + 'train_loss.txt', 'a', encoding='utf-8') as file:
        # print('write in-epoch loss at epoch {} | batch {}'.format(epoch,batch_idx))
        file.write('====> LR: {} | Epoch: {} | Average loss: {:.4f} | avg auc: {:.4f} |Time:{}\n'.format(
            learning_rate, epoch, train_loss / len(train_loader), auc_total / len(train_loader),
            round((time.time() - total_time), 4)))

    training_loss[epoch - 1] = train_loss / len(train_loader)
    train_auc[epoch - 1] = auc_total / len(train_loader)


# -- lr ------------------------------------------------------------------
def lr_scheduler(target_optimizer, init_lr, ep, lr_decay_iter):
    if ep % lr_decay_iter:
        return init_lr
    # drop to 0.1*init_lr
    new_lr = init_lr * lr_decay_rate
    target_optimizer.param_groups[0]['lr'] = new_lr
    return new_lr


# -- test ------------------------------------------------------------------
def test():
    print('Start test...')
    # toggle to eval mode
    model.eval()
    all_index = np.arange(test_set_length, step=test_step)
    aim = np.copy(all_index)
    for test_index in all_index:
        # 测试数据批
        pos_list = np.ones(test_batch_size, dtype=np.int64) * test_index
        example = np.random.choice(all_index, test_batch_size, replace=False)
        if test_index not in example:
            example[random.randint(0, 99)] = test_index

        # 图像特征
        video_feature_batch = test_videoEmbs[np.repeat(test_index, test_batch_size)]
        image_feature_batch = test_image_embs[example]
        # 文本特征
        video_texts_batch = video_texts_test[np.repeat(test_index, test_batch_size)]
        image_texts_batch = video_texts_test[example]

        pos_list = torch.from_numpy(np.array(pos_list))
        example = torch.from_numpy(np.array(example))
        if CUDA:
            pos_list = pos_list.cuda()
            example = example.cuda()
            video_feature_batch = video_feature_batch.cuda()
            image_feature_batch = image_feature_batch.cuda()
            # 文本特征
            video_texts_batch = video_texts_batch.cuda()
            image_texts_batch = image_texts_batch.cuda()

        scores = model(
            pos_list=pos_list, sample_list=example,
            # video
            pos_video_embd=video_feature_batch, neg_image_embd=image_feature_batch,
            # text
            videoTexts=video_texts_batch, imageTexts=image_texts_batch
        )

        rank_list = top_match(scores, example.tolist(), test_batch_size)
        aim[int(test_index/test_step)] = rank_list.tolist().index(test_index)
    test_count = test_set_length / test_step
    return len(aim[aim == 0]) / test_count, len(aim[aim < 10]) / test_count, len(
        aim[aim < 50]) / test_count


# -- training process ----------------------------------------------------------------
best_loss = 0
print_opt()

if_train = 1
for epoch in range(1, EPOCHS + 1):
    optimizer = get_optim(model, epoch)
    learning_rate = lr_scheduler(optimizer, learning_rate, epoch, decay_freq)
    print(learning_rate)
    # train
    if if_train:
        train(epoch, decay)
        torch.save(model.state_dict(), result_path + 'model-{}.pt'.format(epoch))
    else:
        model = Later_Combined()
        model.load_state_dict(torch.load((result_path + 'model-{}.pt'.format(epoch))))
        model.eval()
        if CUDA:
            model.cuda()

    # test
    start_time = time.time()
    if epoch > 0:
        p1, p10, p50 = test()
        test_p1[epoch - 1] = p1
        test_p10[epoch - 1] = p10
        test_p50[epoch - 1] = p50
        with io.open(result_path + 'test_performance.txt', 'a', encoding='utf-8') as file:
            file.write(
                'Epoach {}:  LR={} P1={:.5f}, P10={:.5f}, P50={:.5f},  '
                'Time={}\n'.format
                (epoch, learning_rate, p1, p10, p50,
                 round((time.time() - start_time), 4)))

    if if_fitlog:
        step = epoch - 1
        fitlog.add_loss(training_loss[step], step=step, name="train_loss")
        fitlog.add_metric(train_auc[step], step=step, name="train_auc")
        fitlog.add_metric(test_p1[step], step=step, name="p1")
        fitlog.add_metric(test_p10[step], step=step, name="p10")
        fitlog.add_metric(test_p50[step], step=step, name="p50")
        fitlog.add_metric(model.comb_weight.data, step=step, name="comb_weight")


def clean_file(file_path):
    if os.path.isfile(file_path):
        os.remove(file_path)


def plot_y_graphs(list1y, namelist1y, pic_path):
    colors1 = ['b', 'g', 'r']
    s = ""
    fig, ax1 = plt.subplots()
    for h in range(len(list1y)):
        ax1.plot(list1y[h], colors1[int(h % len(colors1))], label=namelist1y[h])
        s = s + namelist1y[h] + "_"
    ax1.set_xlabel("Epoch")
    ax1.set_ylabel(namelist1y)
    ax1.legend(namelist1y)
    ax1.legend(loc=4)
    pic_path = pic_path + str(s) + '.png'
    clean_file(pic_path)
    plt.savefig(pic_path)


plot_y_graphs([training_loss, train_auc], ["train_loss", "train_auc"], result_path)
plot_y_graphs([test_p1, test_p10, test_p50], ["p1", "p10", "p50"], result_path)

fitlog.finish()
