import io
from argparse import ArgumentParser

import scipy.io as matio
import os
import os.path
import numpy as np
from PIL import Image
import time
import re
import torch
import torch.utils.data
import torch.nn.parallel as para
from torch import nn, optim
from torch.nn import functional as F
from torchvision import datasets, transforms
from torchvision.utils import save_image
from torch.nn import Parameter
from MF import FM as rec_model
import pickle
import math
import sys
from tools import fasterrcnn_load_caculate, fasterrcnn_embd, randomImagePath, top_match, my_repeat
import random
from random import choice
import platform
# --参数输入---
parser = ArgumentParser(description="设定输入参数")
parser.add_argument("-t", "--if_train", default=1, help="默认先训练", type=int)
parser.add_argument("-g", "--if_gpu", default=1, help="默认使用gpu, win默认cpu", type=int)
parser.add_argument("-lr", "--learn_rate", default=0.001, type=float)
parser.add_argument("-lrd", "--lrd", default=0.5, type=float)
args = parser.parse_args()
if_train = bool(args.if_train)
if_cuda = bool(args.if_gpu)  # 1 for True; 0 for False
lr = args.learn_rate
lrd = args.lrd

# -------超参数设置-----------------------------
learning_rate = lr
decay = 8
EPOCHS = decay * 2 + 1
lr_decay_rate = lrd
opt_sele = 'Adam'  # 优化方法 Adam Adagrad

# --manual setting----------------------------------------------------------------------------------------
CUDA = if_cuda  # 1 for True; 0 for False
# 自己版本不支持，只能用cpu
if platform.system()=="Windows":
    CUDA == 0

SEED = 1
LOG_INTERVAL = 4

# define some variables
video_num = 35000
image_num = 35000

embeddings_width = 4096
k = 10  # top-k items for rec
number_neg_sample_train = 1  # 5

BATCH_SIZE = 1
number_sample_eval = 9999

# --Path settings----------------------------------------------------------------------------------
root_path = '/mnt/fasterMF'
allids = os.listdir('/data/train/image')
test_allids = os.listdir('/data/test/image')
result_path = root_path + '/result_for_FM_fasterrcnn/Adamlr1e-2/'
result_path = 'result_for_FM_fasterrcnn/lr_' + str(learning_rate) + '_bs_' + str(BATCH_SIZE) + '_lrdc_' + str(
    lr_decay_rate) + '/'
if not os.path.exists(result_path):
    os.makedirs(result_path)

# test data
# TODO 这里的是啥
test_imageEmbs = torch.from_numpy(np.load('/mnt/fasterMF/res_model_save/image_test_fasterrcnn_embedding.npy'))
test_videoEmbs = torch.from_numpy(np.load('/mnt/fasterMF/res_model_save/video_test_fasterrcnn_embedding.npy'))
if CUDA:
    test_videoEmbs = test_videoEmbs.cuda()
    test_imageEmbs = test_imageEmbs.cuda()

# draw_data
training_loss = np.zeros(EPOCHS)
train_auc = np.zeros(EPOCHS)

torch.manual_seed(SEED)
if CUDA:
    torch.cuda.manual_seed(SEED)


# --data loader------------------------------------------------------------------------------------------              
class ProductData(torch.utils.data.Dataset):
    def __init__(self):
        # video和image的id相同 训练
        self.ids = os.listdir('/data/train/image')

    # 每一段video有0-360帧，每次索引时随机选取一帧
    # 图片
    def __getitem__(self, index):
        videoAndImageID = self.ids[index]
        image_path = randomImagePath(videoAndImageID)
        index_pos = allids.index(self.ids[index])
        video_all_name = ['0.jpg', '40.jpg', '80.jpg', '120.jpg', '160.jpg', '200.jpg', '240.jpg', '280.jpg', '320.jpg',
                          '360.jpg']
        video_path = '/data/train/trainVideoToPic/' + videoAndImageID + '/' + choice(video_all_name)
        return index_pos, image_path, video_path

    def __len__(self):
        return len(self.ids)


kwargs = {'num_workers': 4, 'pin_memory': True} if CUDA else {}

train_loader = torch.utils.data.DataLoader(
    ProductData(),
    batch_size=BATCH_SIZE, shuffle=True, **kwargs,
    drop_last=True)


# --model setup----------------------------------------------------------------------------------
def ElogSigm(x):
    return torch.tensor(1 / (1 + (1 / math.log(math.log(x, 10)))))


class myModel(nn.Module):
    def __init__(self):
        super(myModel, self).__init__()
        self.video_fasterrcnn = fasterrcnn_load_caculate('/mnt/fasterMF/res_model_save/faster_rcnn_1_9_20011.pth')
        self.image_fasterrcnn = fasterrcnn_load_caculate('/mnt/fasterMF/res_model_save/faster_rcnn_1_6_10263.pth')
        self.rec_model = rec_model(embeddings_width, video_num, image_num)
        self.ElogSigm = ElogSigm

    def forward(self, index_pos, index_neg, video_path, image_path):
        # 图片、视频帧都会被
        videoEmbs = torch.zeros(1, 4096)
        imageEmbs = torch.zeros(1, 4096)

        # 用预训练模型将图片变成特征向量？
        videoEmbs[0] = fasterrcnn_embd(self.video_fasterrcnn, video_path, CUDA)
        imageEmbs[0] = fasterrcnn_embd(self.image_fasterrcnn, image_path, CUDA)

        if CUDA:
            videoEmbs = videoEmbs.cuda()
            imageEmbs = imageEmbs.cuda()

        y = self.rec_model(index_pos, index_neg, videoEmbs, imageEmbs)
        y = self.ElogSigm(y)
        return y


model = myModel()
if CUDA:
    model = model.cuda()


# -- optimizer -----------------------------------------------------------
# 设置优化器的参数
def get_optim(model, epoch):
    model_optim = [v for k, v in model.named_parameters() if
                   k.startswith('rec_model')
                   ]  # len:62
    params_model = [{'params': model_optim}]
    # optimizer
    if opt_sele == 'Adam':
        optimizer = optim.Adam(params_model, lr=learning_rate, betas=(0.9, 0.999),
                               eps=1e-08,
                               weight_decay=1e-5,
                               amsgrad=False)
    elif opt_sele == 'Adagrad':
        optimizer = optim.Adagrad(params_model, lr=learning_rate, weight_decay=1e-5)
    else:
        optimizer = optim.Adam(params_model, lr=learning_rate, betas=(0.9, 0.999),
                               eps=1e-08,
                               weight_decay=1e-5,
                               amsgrad=False)
    return optimizer


# -- Loss ------------------------------------------------------------------
def loss_function(pos_scores):
    return 1 - pos_scores


# -- training ------------------------------------------------------------------
def train(epoch, decay):
    print('Training starts..')
    # toggle model to train mode
    model.train()

    # initialize loss
    train_loss = 0
    auc_total = 0.0
    precision_total = 0
    recall_total = 0

    total_time = time.time()

    for batch_idx, [index_pos, image_path, video_path] in enumerate(train_loader):
        #         ipdb.set_trace()
        start_time = time.time()
        index_neg = index_pos.clone().detach()

        if CUDA:
            index_pos = index_pos.cuda()
            index_neg = index_neg.cuda()

        scores = model(index_pos, index_neg, video_path[0], image_path[0])
        loss = loss_function(scores)
        loss.requires_grad_(True)
        optimizer.zero_grad()
        loss.backward()

        train_loss += loss.item()
        optimizer.step()

        sys.stdout.write("\r [LR: %.6f] [Train Epoch: %d] [%d/%d] [Loss: %.4f] [Time: %.3f] [Total_Time: %.3f] \n" % (
        learning_rate, epoch, (batch_idx + 1), len(train_loader), loss, round((time.time() - start_time), 4),
        round((time.time() - total_time), 4)))

        with io.open(result_path + 'model_batch_train_loss.txt', 'a', encoding='utf-8') as file:
            file.write(
                'LR: {} | Train Epoch: {} [{}/{} ] | Loss: {:.4f} | Time:{} | Total_Time:{}\n'.format(
                    learning_rate, epoch, (batch_idx + 1), len(train_loader), loss.item(),
                    round((time.time() - start_time), 4), round((time.time() - total_time), 4)))

    with io.open(result_path + 'model_batch_train_loss.txt', 'a', encoding='utf-8') as file:
        # print('write in-epoch loss at epoch {} | batch {}'.format(epoch,batch_idx))
        file.write('====> LR: {} | Epoch: {} | Average loss: {:.4f} | Time:{}'.format(
            learning_rate, epoch, train_loss / len(train_loader),
            round((time.time() - total_time), 4)))

    with io.open(result_path + 'train_loss.txt', 'a', encoding='utf-8') as file:
        # print('write in-epoch loss at epoch {} | batch {}'.format(epoch,batch_idx))
        file.write('====> LR: {} | Epoch: {} | Average loss: {:.4f} | Time:{}\n'.format(
            learning_rate, epoch, train_loss / len(train_loader), round((time.time() - total_time), 4)))


# -- lr ------------------------------------------------------------------
def lr_scheduler(optimizer, init_lr, epoch, lr_decay_iter):
    if epoch % lr_decay_iter:
        return init_lr

    # drop to 0.1*init_lr
    lr = init_lr * lr_decay_rate
    optimizer.param_groups[0]['lr'] = lr

    return lr


# -----------test--------------------------------------------------------
def test(video_list_test, image_list_test, model_index):
    candidate_image_index_list = {}
    print('Start test...')
    model.eval()
    test_id_1 = [id for id in range(len(test_allids))]
    test_id = test_id_1
    test_index = test_id
    sample_ids = torch.tensor(test_index).squeeze()

    if CUDA:
        sample_ids = sample_ids.cuda()

    started_time = time.time()

    for index in range(len(test_allids)):
        # create the user input
        video_id = torch.from_numpy(np.array(index)).unsqueeze(0)
        video_id = my_repeat(video_id, len(test_allids))
        video_text = video_list_test[video_id]

        if CUDA:
            video_text = video_text.cuda()
            video_id = video_id.cuda()

        scores = model.rec_model(video_id, sample_ids, video_text, image_list_test)

        scores = scores[:15000]
        candidate_image_index_list[index] = top_match(scores, test_index, 100)
    correct_res_top1 = 0
    correct_res_top10 = 0
    correct_res_top100 = 0
    #     ipdb.set_trace()
    for k, v in candidate_image_index_list.items():
        if int(k) in v[0]:
            correct_res_top1 += 1
            correct_res_top10 += 1
            correct_res_top100 += 1
            continue
        elif int(k) in v[:10]:
            correct_res_top10 += 1
            correct_res_top100 += 1
            continue
        elif int(k) in v:
            correct_res_top100 += 1
            continue
    #     print(correct_res_top1)
    #     print(correct_res_top10)
    #     print(correct_res_top100)
    acu1 = correct_res_top1 / 15000
    acu10 = correct_res_top10 / 15000
    acu100 = correct_res_top100 / 15000
    with io.open(result_path + 'model_acu.txt', 'a', encoding='utf-8') as file:
        file.write(
            '===>  EPOCH: {} | ACUtop1: {:.7f} | ACUtop10: {:.7f} | ACUtop100: {:.7f} \n'.format(model_index, acu1,
                                                                                                 acu10, acu100))


# -- training process ----------------------------------------------------------------
best_loss = 0
for epoch in range(1, EPOCHS + 1):
    optimizer = get_optim(model, epoch)
    learning_rate = lr_scheduler(optimizer, learning_rate, epoch, decay)
    print(learning_rate)
    train(epoch, decay)
    torch.save(model.state_dict(), result_path + 'model-{}.pt'.format(epoch))
    start_time = time.time()
    test(test_videoEmbs, test_imageEmbs, epoch)
