import io
import scipy.io as matio
import os
import os.path
import numpy as np
from PIL import Image
import time
import re
import ipdb
import torch
import torch.utils.data
import torch.nn.parallel as para
from torch import nn, optim
from torch.autograd import Variable
from torch.nn import functional as F
from torchvision import datasets, transforms
from torchvision.utils import save_image
from torch.nn import Parameter
from MF import FM as rec_model
import pickle
from tool_origin import get_len_of_nonzero, computer_train_auc, top_match, prepare_sample, my_repeat, compute_auc

# --Path settings----------------------------------------------------------------------------------
root_path = '/mnt/gru/'
result_path = root_path + 'result/chenran_1_lr1e-4/'
if not os.path.exists(result_path):
    os.makedirs(result_path)

# -------超参数设置-----------------------------------------------------
BATCH_SIZE = 8
learning_rate = 1e-3  # 学习率
decay = 3  # 衰减轮
EPOCHS = decay * 2 + 1  # 总epoch
lr_decay_rate = 0.5  # 衰减率
opt_sele = 'Adam'  # 优化方法 Adam Adagrad

# --manual setting----------------------------------------------------------------------------------
CUDA = 1  # 1 for True; 0 for False
SEED = 1
LOG_INTERVAL = 4

# define some variables
video_num = 35000
image_num = video_num
video_width = 121
image_width = 22

# load user dict
video_texts = torch.from_numpy(np.load('/mnt/gru/readFile/video_text_data_train.npy'))
image_texts = torch.from_numpy(np.load("/mnt/gru/readFile/image_text_data_train.npy"))

video_texts_test = torch.from_numpy(np.load('/mnt/gru/readFile/video_text_data_test.npy'))
image_texts_test = torch.from_numpy(np.load("/mnt/gru/readFile/image_text_data_test.npy"))

textEmbeddings_length = 78230
textEmbeddings_width = 100
k = 10  # top-k items for rec

number_neg_sample_train = 1  # 负样本数
number_sample_eval = 14999

# draw_data
training_loss = np.zeros(EPOCHS)
train_auc = np.zeros(EPOCHS)
test_loss = np.zeros(EPOCHS)
test_auc = np.zeros(EPOCHS)

torch.manual_seed(SEED)
if CUDA:
    torch.cuda.manual_seed(SEED)

# --load pre-trained embeddings for text-------------------------------------------------------------
path = "/mnt/gru/readFile/all_index2vec_dict.pkl"
with open(path, 'rb') as fo:
    dict_data = pickle.load(fo, encoding='bytes')
textEmbeddings = np.zeros([textEmbeddings_length + 1, textEmbeddings_width])
index = sorted(dict_data.keys())
for i in range(textEmbeddings_length + 1):
    for j in range(textEmbeddings_width):
        if i != 0:
            textEmbeddings[i][j] = dict_data[index[i - 1]][j]


# --Create dataset----------------------------------------------------------------------------------
class ProductData(torch.utils.data.Dataset):
    def __init__(self):
        self.videoTextIDs = torch.from_numpy(np.load("/mnt/gru/readFile/video_text_data_train.npy"))
        self.imageTextIDs = torch.from_numpy(np.load("/mnt/gru/readFile/image_text_data_train.npy"))

    def __getitem__(self, index):
        videoIDs = self.videoTextIDs[index]
        imageIDs = self.imageTextIDs[index]

        return videoIDs, imageIDs, index

    def __len__(self):
        return len(self.videoTextIDs)

    # DataLoader instances


kwargs = {'num_workers': 4, 'pin_memory': True} if CUDA else {}

train_loader = torch.utils.data.DataLoader(
    ProductData(),
    batch_size=BATCH_SIZE, shuffle=True, **kwargs,
    drop_last=True)


# --model setup----------------------------------------------------------------------------------
class myModel(nn.Module):
    def __init__(self):
        super(myModel, self).__init__()
        self.data_embeddings = nn.Embedding(textEmbeddings_length + 1, textEmbeddings_width)
        self.data_embeddings.weight.data.copy_(torch.from_numpy(textEmbeddings))
        self.gru_video = nn.GRU(textEmbeddings_width, textEmbeddings_width, batch_first=True)
        self.h0_video = Parameter(
            torch.zeros((1, 1, textEmbeddings_width), requires_grad=True))
        self.gru_image = nn.GRU(textEmbeddings_width, textEmbeddings_width, batch_first=True)
        self.h0_image = Parameter(
            torch.zeros((1, 1, textEmbeddings_width), requires_grad=True))
        self.rec_model = rec_model(textEmbeddings_width, video_num, image_num)

    def forward(self, videoIDs, imageIDs, videoTexts, imageTexts, video_texts_nonzero_len, image_texts_nonzero_len):
        videoEmbs = self.data_embeddings(videoTexts)
        imageEmbs = self.data_embeddings(imageTexts)
        video_embs, _ = self.gru_video(videoEmbs, self.h0_video.repeat(1, videoIDs.shape[0], 1))
        image_embs, _ = self.gru_image(imageEmbs, self.h0_image.repeat(1, imageIDs.shape[0], 1))
        video_fused_emb = self.emb_fusion(video_embs, 1, video_texts_nonzero_len)
        image_fused_emb = self.emb_fusion(image_embs, 1, image_texts_nonzero_len)
        y = self.rec_model(videoIDs, imageIDs, video_fused_emb, image_fused_emb)
        return y

    def emb_fusion(self, embs, flag, nonzero_len):
        if CUDA:
            temp = torch.zeros(len(nonzero_len), textEmbeddings_width).cuda()
        else:
            temp = torch.zeros(len(nonzero_len), textEmbeddings_width)
        if flag == 0:
            for i in range(len(nonzero_len)):
                for j in range(nonzero_len[i]):
                    temp[i] = temp[i] + embs[i][j]
                if nonzero_len[i] != 0:
                    temp[i] = temp[i] / nonzero_len[i]
        elif flag == 1:
            for i in range(len(nonzero_len)):
                temp[i] = embs[i][nonzero_len[i]]
        return temp


model = myModel()

if CUDA:
    model = model.cuda()


# -- optimizer -----------------------------------------------------------
def get_optim(model, epoch):
    model_optim = [v for k, v in model.named_parameters() if
                   (not k.startswith('data_embeddings'))
                   ]  # len:62
    model_optim2 = [k for k, v in model.named_parameters() if
                    (not k.startswith('data_embeddings'))
                    ]  # len:62)
    params_model = [{'params': model_optim}]

    if opt_sele == 'Adam':
        optimizer = optim.Adam(params_model, lr=learning_rate, betas=(0.9, 0.999),
                               eps=1e-08,
                               weight_decay=1e-5,
                               amsgrad=False)
    elif opt_sele == 'Adagrad':
        optimizer = optim.Adagrad(params_model, lr=learning_rate, weight_decay=1e-5)
    else:
        optimizer = optim.Adam(params_model, lr=learning_rate, betas=(0.9, 0.999),
                               eps=1e-08,
                               weight_decay=1e-5,
                               amsgrad=False)
    return optimizer


# -- Loss ------------------------------------------------------------------
def loss_function(pos_scores, neg_scores):
    pos_scores = my_repeat(pos_scores, number_neg_sample_train)
    difference = pos_scores.float().cuda() - neg_scores
    bpr_loss = - F.logsigmoid(difference)
    return torch.mean(bpr_loss)


def print_opt():
    with io.open(result_path + 'model_batch_train_loss.txt', 'a', encoding='utf-8') as file:
        file.write(
            'LR: {} | BATCH_SIZE: {} | decay: {} | lr_decay_rate:{} | Epoch: decay * 2 + 1 | '
            'number_neg_sample_train:{}\n'.format(
                learning_rate, BATCH_SIZE, decay, lr_decay_rate, number_neg_sample_train))
    with io.open(result_path + 'test_performance.txt', 'a', encoding='utf-8') as file:
        file.write(
            'LR: {} | BATCH_SIZE: {} | decay: {} | lr_decay_rate:{} | Epoch: decay * 2 + 1 | '
            'number_sample_eval:{}\n'.format(
                learning_rate, BATCH_SIZE, decay, lr_decay_rate, number_sample_eval))


# -- training ------------------------------------------------------------------
def train(epoch, decay):
    print('Training starts..')
    model.train()

    train_loss = 0
    auc_total = 0.0
    precision_total = 0
    recall_total = 0

    total_time = time.time()

    for batch_idx, data in enumerate(train_loader):
        start_time = time.time()
        video_text = data[0]
        image_text = data[1]
        index = data[2]

        sample_ids, pos_samples_index, neg_samples_index = prepare_sample(number_neg_sample_train, index, BATCH_SIZE,
                                                                          image_texts)
        sample_texts = image_texts[sample_ids].squeeze(1)
        image_texts_nonzero_len = get_len_of_nonzero(sample_texts)

        video_id = torch.from_numpy(np.array(index))
        video_texts_nonzero_len = get_len_of_nonzero(video_texts[video_id])
        video_id = my_repeat(video_id, number_neg_sample_train + 1)
        video_texts_nonzero_len = my_repeat(video_texts_nonzero_len, number_neg_sample_train + 1)
        video_text = video_texts[video_id]

        if CUDA:
            video_text = video_text.cuda()
            sample_ids = sample_ids.cuda()
            video_id = video_id.cuda()
            sample_texts = sample_texts.cuda()

        scores = model(video_id, sample_ids, video_text, sample_texts, video_texts_nonzero_len, image_texts_nonzero_len)

        pos_scores = scores[pos_samples_index]
        neg_scores = scores[neg_samples_index]

        loss = loss_function(pos_scores, neg_scores)

        auc_total_batch = 0.0
        for i in range(BATCH_SIZE):
            match_scores = torch.cat(
                [pos_scores[i].unsqueeze(0), neg_scores[i * number_neg_sample_train:(i + 1) * number_neg_sample_train]])
            auc_total_batch += computer_train_auc(match_scores)
        auc = auc_total_batch / BATCH_SIZE
        auc_total += auc_total_batch
        optimizer.zero_grad()
        loss.backward()
        train_loss += loss.item()
        optimizer.step()

        if epoch == 1 and batch_idx == 0:
            print(
                'Train Epoch: {} [{}/{} ({:.0f}%)] | Loss: {:.4f} | Auc:{:.0f}% | Time:{} | Total_Time:{}\n'.format(
                    epoch, (batch_idx + 1) * len(data[2]), len(train_loader.dataset),
                           100. * (batch_idx + 1) / len(train_loader), loss, 100. * auc,
                    round((time.time() - start_time), 4),
                    round((time.time() - total_time), 4)))

            with io.open(result_path + 'train_loss.txt', 'a', encoding='utf-8') as file:
                # print('write in-batch loss at epoch {} | batch {}'.format(epoch,batch_idx))
                file.write('Epoach {}: {}\n'.format(epoch, train_loss))

        elif (batch_idx + 1) % LOG_INTERVAL == 0:
            print(
                'Train Epoch: {} [{}/{} ({:.0f}%)] | Loss: {:.4f} | Auc:{:.0f}% | Time:{} | Total_Time:{}\n'.format(
                    epoch, (batch_idx + 1) * len(data[2]), len(train_loader.dataset),
                           100. * (batch_idx + 1) / len(train_loader), loss, 100. * auc,
                           round((time.time() - start_time), 4) * LOG_INTERVAL,
                    round((time.time() - total_time), 4)))

            # records current progress for tracking purpose
            with io.open(result_path + 'model_batch_train_loss.txt', 'a', encoding='utf-8') as file:
                file.write(
                    'LR: {} | Train Epoch: {} [{}/{} ({:.0f}%)] | Loss: {:.4f} | Auc:{:.0f}% | Time:{} | Total_Time:{}\n'.format(
                        learning_rate, epoch, (batch_idx + 1) * len(data[2]), len(train_loader),
                                              100. * (batch_idx + 1) / len(train_loader), loss, 100. * auc,
                                              round((time.time() - start_time), 4) * LOG_INTERVAL,
                        round((time.time() - total_time), 4)))

    print(
        '====> Epoch: {} | Average loss: {:.4f} | Time:{}'.format(
            epoch, train_loss / len(train_loader), round((time.time() - total_time), 4)))

    with io.open(result_path + 'model_batch_train_loss.txt', 'a', encoding='utf-8') as file:
        # print('write in-epoch loss at epoch {} | batch {}'.format(epoch,batch_idx))
        file.write('====> LR: {} | Epoch: {} | Average loss: {:.4f} | Average auc: {:.4f} | Time:{}'.format(
            learning_rate, epoch, train_loss / len(train_loader), auc_total / len(train_loader) / 10,
            round((time.time() - total_time), 4)))

    with io.open(result_path + 'train_loss.txt', 'a', encoding='utf-8') as file:
        # print('write in-epoch loss at epoch {} | batch {}'.format(epoch,batch_idx))
        file.write('====> LR: {} | Epoch: {} | Average loss: {:.4f} | Time:{}\n'.format(
            learning_rate, epoch, train_loss / len(train_loader), round((time.time() - total_time), 4)))

    training_loss[epoch - 1] = train_loss / len(train_loader)
    train_auc[epoch - 1] = auc_total / len(train_loader) / 10


# -- lr ------------------------------------------------------------------
def lr_scheduler(optimizer, init_lr, epoch, lr_decay_iter):
    if epoch % lr_decay_iter:
        return init_lr

    # drop to 0.1*init_lr
    lr = init_lr * lr_decay_rate
    optimizer.param_groups[0]['lr'] = lr

    return lr


# update the model by the pre-trained parameters for myModel -------------------
def get_updateModel(model_path):
    # load pred model
    model_dict_pretrained = torch.load(model_path, map_location='cpu')  # 6
    cur_model_dict = model.state_dict()  # 6
    shared_dict = {k: v for k, v in model_dict_pretrained.items() if k in cur_model_dict}  # 6
    cur_model_dict.update(shared_dict)
    model.load_state_dict(cur_model_dict)


# -- test ------------------------------------------------------------------
def test(video_list_test, image_list_test):
    print('Start test...')
    model.eval()

    p1_total = 0
    p10_total = 0
    p20_total = 0
    p50_total = 0
    p100_total = 0
    p1000_total = 0
    auc_total = 0

    count = 0
    test_length = 500

    test_indexes = [id for id in range(len(video_list_test))]
    test_index = np.random.choice(test_indexes, test_length, replace=False)

    sample_ids = torch.tensor([id for id in range(len(video_texts_test))]).squeeze()
    sample_texts = image_texts_test[sample_ids].squeeze(1)
    image_texts_nonzero_len = get_len_of_nonzero(sample_texts)

    total_time = time.time()

    for index in range(len(test_index)):
        indexes = test_index[index * 1:(index + 1) * 1]
        count += 1
        video_id = torch.from_numpy(np.array(indexes))
        video_texts_nonzero_len = get_len_of_nonzero(video_texts_test[video_id])
        video_id = torch.cat([video_id,
                              my_repeat(video_id, number_sample_eval)])
        video_texts_nonzero_len = torch.cat([video_texts_nonzero_len,
                                             my_repeat(video_texts_nonzero_len, number_sample_eval)])
        video_text = video_texts_test[video_id]
        if CUDA:
            video_text = video_text.cuda()
            sample_ids = sample_ids.cuda()
            video_id = video_id.cuda()
            sample_texts = sample_texts.cuda()

        scores = model(video_id, sample_ids, video_text, sample_texts, video_texts_nonzero_len, image_texts_nonzero_len)
        scores = scores[:10000]
        for i in range(len(indexes)):
            auc = compute_auc(scores, indexes[i], number_sample_eval)
            p1, r, f, ndcg = top_match(scores, sample_ids, indexes[i], 1)
            p10, _, _, _ = top_match(scores, sample_ids, indexes[i], 10)
            p20, _, _, _ = top_match(scores, sample_ids, indexes[i], 20)
            p50, _, _, _ = top_match(scores, sample_ids, indexes[i], 50)
            p100, _, _, _ = top_match(scores, sample_ids, indexes[i], 100)
            p1000, _, _, _ = top_match(scores, sample_ids, indexes[i], 1000)
            p1_total += p1
            p10_total += p10
            p20_total += p20
            p50_total += p50
            p100_total += p100
            p1000_total += p1000
            auc_total += auc

        if (index + 1) % 100 == 0:
            with open(result_path + 'test_train_data_15000.txt', 'w') as file:
                file.write("0-10000 | index:{} | time:{} | p1_total:{} | p10_total:{} | p100_total:{} | auc:{}".format(
                    index, round((time.time() - total_time), 4), p1_total, p10_total, p100_total,
                    auc_total / (index + 1)
                ))

    return p1_total / test_length, p10_total / test_length, p20_total / test_length, \
           p50_total / test_length, p100_total / test_length, p1000_total / test_length, auc_total / test_length


# -- training process ----------------------------------------------------------------
best_loss = 0
print_opt()
for epoch in range(1, EPOCHS + 1):
    optimizer = get_optim(model, epoch)
    learning_rate = lr_scheduler(optimizer, learning_rate, epoch, decay)
    print(learning_rate)
    # train
    train(epoch, decay)
    torch.save(model.state_dict(), result_path + 'model-{}.pt'.format(epoch))
    # test
    start_time = time.time()
    if epoch > 0:
        p1, p10, p20, p50, p100, p1000, auc2 = test(video_texts_test, image_texts_test)
        test_auc[epoch - 1] = auc2
        # write the performance on val data
        with io.open(result_path + 'test_performance.txt', 'a', encoding='utf-8') as file:
            file.write(
                'Epoach {}: k={} number_sample_eval={} LR={} P1={}, P10={}, P20={} P50={}, P100={}, P1000={}, AUC={}, '
                'Time={}\n'.format
                (epoch, k, number_sample_eval + 1, learning_rate, p1, p10, p20, p50, p100, p1000, auc2,
                 round((time.time() - start_time), 4)))
