import pickle

import torch
from torch import optim
from torch.utils.data import DataLoader
from tqdm import tqdm

from metapath2vec.src.data_loader.aminer_data_loader import DataReader, Metapath2vecDataset
from metapath2vec.src.data_loader.download import AminerDataset, CustomDataset
from metapath2vec.src.model.model import SkipGramModel
from metapath2vec.src.trainer.parser import get_parser_args


def train(args):
    if args.aminer:
        dataset = AminerDataset(args.path)
    else:
        # 我们这里使用这种情况
        # 指定path: NetDBIS
        dataset = CustomDataset(args.path)
        # 读入metapath文件并处理
    data = DataReader(dataset, args.min_count, args.care_type)
    # 实现封装
    dataset = Metapath2vecDataset(data, args.window_size)
    # https://zhuanlan.zhihu.com/p/30385675
    dataloader = DataLoader(dataset, batch_size=args.batch_size,
                            shuffle=True, num_workers=args.num_workers, collate_fn=dataset.collate)

    # 模型参数用args赋值
    output_file_name = args.output_file
    # 一共多少个节点N
    emb_size = len(data.word2id)
    emb_dimension = args.dim
    batch_size = args.batch_size
    iterations = args.iterations
    initial_lr = args.initial_lr
    # emb_size: 一共多少个节点N; emb_dimension: 维度如128
    skip_gram_model = SkipGramModel(emb_size, emb_dimension)

    use_cuda = torch.cuda.is_available()
    device = torch.device("cuda" if use_cuda else "cpu")
    if use_cuda:
        skip_gram_model.cuda()

    # 优化方式
    optimizer = optim.SparseAdam(list(skip_gram_model.parameters()), lr=initial_lr)
    # 优化方式和学习率等
    scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(optimizer, len(dataloader))

    skip_gram_model.train()
    for iteration in range(iterations):
        print("\n\n\nIteration: " + str(iteration + 1))

        running_loss = 0.0
        # 按batch训练
        for i, sample_batched in tqdm(enumerate(dataloader)):

            if len(sample_batched[0]) > 1:
                pos_u = sample_batched[0].to(device)
                pos_v = sample_batched[1].to(device)
                neg_v = sample_batched[2].to(device)

                scheduler.step()
                optimizer.zero_grad()
                # 损失函数值
                loss = skip_gram_model.forward(pos_u, pos_v, neg_v)
                # 反向传播，更新参数
                loss.backward()
                optimizer.step()

                running_loss = running_loss * 0.9 + loss.item() * 0.1
                if i > 0 and i % 500 == 0:
                    print(" Loss: " + str(running_loss))

        skip_gram_model.save_embedding(data.id2word, output_file_name)


def main():
    args = get_parser_args()
    print('training start!')
    train(args)
    # test_auto(args)
    print('training finished!')


if __name__ == '__main__':
    main()
