from abc import ABC, abstractmethod
from typing import Optional
from torch.utils.data.dataset import TensorDataset
from tqdm import tqdm
import math
import config
import acquisition
import time
import numpy as np
import os
from io import open
import csv
import pandas as pd
import random
import torch
import Batch
from torch import optim, nn
from torch.utils.data import DataLoader, dataloader
from torch.nn import TransformerEncoder, TransformerEncoderLayer
import torch.nn.functional as F

class MLPrefetchModel(object):
    '''
    Abstract base class for your models. For HW-based approaches such as the
    NextLineModel below, you can directly add your prediction code. For ML
    models, you may want to use it as a wrapper, but alternative approaches
    are fine so long as the behavior described below is respected.
    '''

    @abstractmethod
    def load(self, path):
        '''
        Loads your model from the filepath path
        '''
        pass

    @abstractmethod
    def save(self, path):
        '''
        Saves your model to the filepath path
        '''
        pass

    @abstractmethod
    def train(self, data):
        '''
        Train your model here. No return value. The data parameter is in the
        same format as the load traces. Namely,
        Unique Instr Id, Cycle Count, Load Address, Instruction Pointer of the Load, LLC hit/miss
        '''
        pass

    @abstractmethod
    def generate(self, data):
        '''
        Generate your prefetches here. Remember to limit yourself to 2 prefetches
        for each instruction ID and to not look into the future :).

        The return format for this will be a list of tuples containing the
        unique instruction ID and the prefetch. For example,
        [
            (A, A1),
            (A, A2),
            (C, C1),
            ...
        ]

        where A, B, and C are the unique instruction IDs and A1, A2 and C1 are
        the prefetch addresses.
        '''
        pass

class NextLineModel(MLPrefetchModel):

    def load(self, path):
        # Load your pytorch / tensorflow model from the given filepath
        print('Loading ' + path + ' for NextLineModel')

    def save(self, path):
        # Save your model to a file
        print('Saving ' + path + ' for NextLineModel')

    def train(self, data):
        '''
        Train your model here using the data

        The data is the same format given in the load traces. Namely:
        Unique Instr Id, Cycle Count, Load Address, Instruction Pointer of the Load, LLC hit/miss
        '''
        print('Training NextLineModel')

    def generate(self, data):
        '''
        Generate the prefetches for the prefetch file for ChampSim here

        As a reminder, no looking ahead in the data and no more than 2
        prefetches per unique instruction ID

        The return format for this function is a list of (instr_id, pf_addr)
        tuples as shown below
        '''
        print('Generating for NextLineModel')
        prefetches = []
        for (instr_id, cycle_count, load_addr, load_ip, llc_hit) in data:
            # Prefetch the next two blocks
            prefetches.append((instr_id, ((load_addr >> 6) + 1) << 6))
            prefetches.append((instr_id, ((load_addr >> 6) + 2) << 6))

        return prefetches

# Temporarily leave PositionalEncoding module here. Will be moved somewhere else.
class PositionalEncoding(nn.Module):
    r"""Inject some information about the relative or absolute position of the tokens
        in the sequence. The positional encodings have the same dimension as
        the embeddings, so that the two can be summed. Here, we use sine and cosine
        functions of different frequencies.
    .. math::
        \text{PosEncoder}(pos, 2i) = sin(pos/10000^(2i/d_model))
        \text{PosEncoder}(pos, 2i+1) = cos(pos/10000^(2i/d_model))
        \text{where pos is the word position and i is the embed idx)
    Args:
        d_model: the embed dim (required).
        dropout: the dropout value (default=0.1).
        max_len: the max. length of the incoming sequence (default=5000).
    Examples:
        >>> pos_encoder = PositionalEncoding(d_model)
    """

    def __init__(self, d_model, dropout=0.1, max_len=5000):
        super(PositionalEncoding, self).__init__()
        #使用来自伯努利分布的样本随机将输入张量的某些元素归零
        self.dropout = nn.Dropout(p=dropout)
        #返回一个用标量值0填充的张量，其形状由变量参数定义(行，每行个数)
        pe = torch.zeros(max_len, d_model)
        #返回从0开始小于max_len的一张量，并增加一个维度
        position = torch.arange(0, max_len, dtype=torch.float).unsqueeze(1)
        #返回具有输入张量元素指数的新张量
        div_term = torch.exp(torch.arange(0, d_model, 2).float() * (-math.log(10000.0) / d_model))
        pe[:, 0::2] = torch.sin(position * div_term)
        pe[:, 1::2] = torch.cos(position * div_term)
        pe = pe.unsqueeze(0).transpose(0, 1)
        self.register_buffer('pe', pe)

    def forward(self, x):
        r"""Inputs of forward function
        Args:
            x: the sequence fed to the positional encoder model (required).
        Shape:
            x: [sequence length, batch size, embed dim]
            output: [sequence length, batch size, embed dim]
        Examples:
            >>> output = pos_encoder(x)
        """

        x = x + self.pe[:x.size(0), :]
        return self.dropout(x)

class TransformerModel(nn.Module):
    def __init__(self, ntoken_pc, ntoken_delta, ninp, nhead, nhid, nlayers, dropout=0.5):
        super(TransformerModel, self).__init__()
        try:
            from torch.nn import TransformerEncoder, TransformerEncoderLayer
        except:
            raise ImportError('TransformerEncoder module does not exist in PyTorch 1.1 or lower.')
        self.model_type = 'Transformer'
        self.src_mask = None
        self.pos_encoder = PositionalEncoding(ninp * 2, dropout)
        encoder_layers = TransformerEncoderLayer(ninp * 2, nhead, nhid, dropout)
        self.transformer_encoder = TransformerEncoder(encoder_layers, nlayers)
        self.encoder_pc = nn.Embedding(ntoken_pc, ninp)
        self.encoder_delta = nn.Embedding(ntoken_delta, ninp)
        self.ninp_2x = ninp * 2
        self.decoder = nn.Linear(ninp * 2, ntoken_delta)

        self.init_weights()

    def _generate_square_subsequent_mask(self, sz):
        mask = (torch.triu(torch.ones(sz, sz)) == 1).transpose(0, 1)
        mask = mask.float().masked_fill(mask == 0, float('-inf')).masked_fill(mask == 1, float(0.0))
        return mask

    def init_weights(self):
        initrange = 0.1
        nn.init.uniform_(self.encoder_pc.weight, -initrange, initrange)
        nn.init.uniform_(self.encoder_delta.weight, -initrange, initrange)
        nn.init.zeros_(self.decoder.weight)
        nn.init.uniform_(self.decoder.weight, -initrange, initrange)

    def forward(self, src, has_mask=True):
        if has_mask:
            device = src.device
            if self.src_mask is None or self.src_mask.size(0) != len(src):
                mask = self._generate_square_subsequent_mask(len(src)).to(device)
                self.src_mask = mask
        else:
            self.src_mask = None
        # print('src:', src)
        #self.encoder_pc = self.encoder_pc.to(torch.long)
        #self.encoder_delta = self.encoder_delta.to(torch.long)
        #print("encoder_pc；",self.encoder_pc)
        #print(src[:, :, 0])
        #print("encoder_delta；",self.encoder_delta)
        #print(src[:, :, 1])
        #print(self.encoder_delta(src[:, :, 1]))
        #print(torch.cat((self.encoder_pc(src[:, :, 0]), self.encoder_delta(src[:, :, 1])), 2))
        src = torch.cat((self.encoder_pc(src[:, :, 0]), self.encoder_delta(src[:, :, 1])), 2) * math.sqrt(self.ninp_2x)
        #print("src:",src.size(0))
        src = self.pos_encoder(src)
        output = self.transformer_encoder(src, self.src_mask)
        output = self.decoder(output)
        return F.log_softmax(output, dim=-1)

class TransformerModelPrefetcher(MLPrefetchModel):
    def __init__(self):
        self.emsize = config.TRANSFORMER_ENCODER_EMSIZE
        self.nhead = config.TRANSFORMER_ENCODER_NHEAD
        self.nhid : int = config.TRANSFORMER_ENCODER_NHID
        self.nlayers = config.TRANSFORMER_ENCODER_NLAYERS
        super().__init__()
        self.filename = 'ntokens_data.txt'
        self.device = config.device
        self.criterion = nn.MSELoss().to(self.device)
        lr = config.LR # learning rate
        #self.optimizer = torch.optim.Adam(self.model.parameters())
        #self.current_trace = trace
        # self.optimizer = torch.optim.SGD(self.model.parameters(), lr=lr)
        # self.scheduler = torch.optim.lr_scheduler.StepLR(self.optimizer, 1.0, gamma=0.95)

    def load(self, path):
        with open(self.filename) as f:
            self.ntokens_pc = int(f.readline())
            self.ntoken_delta = int(f.readline())
        self.model = TransformerModel(self.ntokens_pc, self.ntoken_delta, self.emsize, self.nhead, self.nhid, self.nlayers).double().to(self.device)
        self.model.load_state_dict(torch.load(path))

    def save(self, path):
        torch.save(self.model.state_dict(), path)
        
    def log_loss(self, loss, loss_type):
        loss_tensor = torch.tensor(loss)
        torch.save(loss_tensor, "./Graphs/{}_transformer_{}_losses.pt".format(self.current_trace,loss_type))

    def evaluate(self,data_source):
        # Turn on evaluation mode which disables dropout.
        self.model.eval()
        total_loss = 0.
        total_acc = 0.
        ntokens_delta = len(self.corpus.dictionary_delta)
        #if args.model != 'Transformer':
            #hidden = model.init_hidden(eval_batch_size)
        with torch.no_grad():
            for i in range(0, data_source.size(0) - 1, config.BPTT):
                data2, targets = Batch.get_batch(data_source, i)
                #print("data:",data2[:5])
                #print("targets:",targets[:5])
                if self.model.model_type == 'Transformer':
                    #print(data2[:20])
                    output = self.model(data2)
                    #print("output:",output)
                    output = output.view(-1, ntokens_delta)
                else:
                    output, hidden = self.model(data, hidden)
                    hidden = repackage_hidden(hidden)
                #print(output)
                #print(output.size())
                criterion = nn.NLLLoss()
                total_loss += data2.size(0) * criterion(output,targets).item()
                total_acc += (output.argmax(dim=1)==targets).sum().item()
                # print('pre:', output.argmax(dim=1))
                # print ('target:', targets)
                # print('compare:', total_acc)
                # print('target_len:', len(targets))
                # print('total_len:', (data_source.size(0) - 1) * data_source.size(1))
                # print('total_loss:', total_loss)
            # print('data_source.size:', data_source.size())
        return total_loss / (data_source.size(0) - 1), total_acc / ((data_source.size(0) - 1) * data_source.size(1))
        
    def train1(self):
        # Turn on training mode which enables dropout.
        self.model.train()
        total_loss = 0.
        total_acc = 0.
        start_time = time.time()
        self.corpus = self.train_acq.corpus
        ntokens_delta = len(self.corpus.dictionary_delta)
        #if args.model != 'Transformer':
            #hidden = model.init_hidden(args.batch_size)
        eval_batch_size = self.train_acq.eval_batch_size
        train_data = self.train_acq.train_data
        val_data = self.train_acq.val_data
        for batch, i in enumerate(range(0, train_data.size(0) - 1, config.BPTT)):
            data, targets = Batch.get_batch(train_data, i)
            # Starting each batch, we detach the hidden state from how it was previously produced.
            # If we didn't, the model would try backpropagating all the way to start of the dataset.
            self.model.zero_grad()
            #if args.model == 'Transformer':
            output = self.model(data)
            output = output.view(-1, self.train_acq.TRANSFORMER_ENCODER_NTOKENS_DELTA)
            #else:
                #hidden = repackage_hidden(hidden)
                #output, hidden = model(data, hidden)
            #给定一个输入和一个目标，他们根据给定的损失函数计算梯度。
            criterion = nn.NLLLoss()
            loss = criterion(output, targets)
            # loss.requires_grad_(True)
            loss.backward()
          
        
            # `clip_grad_norm` helps prevent the exploding gradient problem in RNNs / LSTMs.
            #lr=config.LR
            torch.nn.utils.clip_grad_norm_(self.model.parameters(), config.CLIP)#args.clip)
            for p in self.model.parameters():
                p.data.add_(p.grad, alpha=-self.lr)

            total_loss += loss.item()
            total_acc += (output.argmax(dim=-1)==targets).sum().item()
            log_interval=config.LOGINTERVAL
            if batch % log_interval == 0 and batch > 0:
                cur_loss = total_loss / log_interval
                cur_acc = total_acc / ((train_data.size(0) - 1) * train_data.size(1))
                elapsed = time.time() - start_time
                print('| epoch {:3d} | {:5d}/{:5d} batches | lr {:02.2f} | ms/batch {:5.2f} | '
                        'loss {:5.2f} | ppl {:8.2f}| acc {:8.4f}'.format(
                    self.epoch, batch, train_data.size(0) // config.BPTT, self.lr,
                    elapsed * 1000 / log_interval, cur_loss, math.exp(cur_loss), cur_acc))
                total_loss = 0
                start_time = time.time()
            

    def train(self, loader, iterations = None):
        self.train_acq = acquisition.train_acq(loader)
        self.ntokens_pc = self.train_acq.TRANSFORMER_ENCODER_NTOKENS_PC
        self.ntoken_delta = self.train_acq.TRANSFORMER_ENCODER_NTOKENS_DELTA
        with open(self.filename,'w') as f:
            f.write(str(self.ntokens_pc)+"\n")
            f.write(str(self.ntoken_delta))
        device = config.device
        self.model = TransformerModel(self.ntokens_pc, self.ntoken_delta, self.emsize, self.nhead, self.nhid, self.nlayers).double().to(device)
        try:
            best_val_loss = None
            self.lr=config.LR
            for self.epoch in range(1, config.EPOCHS+1):
                epoch_start_time = time.time()
                self.train1()
                # print(val_data)
                val_data = self.train_acq.val_data
                val_loss, val_acc = self.evaluate(val_data)
                max_acc = 0.
                # print(val_loss)
                print('-' * 96)
                print('| end of epoch {:3d} | time: {:5.2f}s | valid loss {:5.2f} | '
                        'valid ppl {:8.2f} | valid acc {:8.4f}'.format(self.epoch, (time.time() - epoch_start_time),
                                           val_loss, math.exp(val_loss), val_acc))
                print('-' * 96)
                # Save the model if the validation loss is the best we've seen so far.
                if not best_val_loss or val_loss < best_val_loss:
                    with open(config.SAVE, 'wb') as f:
                        torch.save(self.model, f)
                    best_val_loss = val_loss
                else:
                    # Anneal the learning rate if no improvement has been seen in the validation dataset.
                    # 其中lrate是当前 epoch的学习率，initial_lrate是指定为 SGD 参数的学习率，decay是大于零的衰减率，而迭代是当前更新次数。
                    initial_lrate = config.LR
                    decay = 0.01
                    iteration = self.epoch
                    self.lr = initial_lrate * (1 / (1 + decay * iteration))
                    #lr /= 4.0
                if max_acc<val_acc:
                    max_acc = val_acc
        except KeyboardInterrupt:
            print('-' * 96)
            print('Exiting from training early')
        return max_acc

    def generate(self, data_s):
        self.generate_acq = acquisition.generate_acq(data_s)
        data_s1=self.generate_acq.generate_data
        torch.manual_seed(config.SEED)
        device = config.device
        if config.TEMPERATURE < 1e-3:
            parser.error("--temperature has to be greater or equal 1e-3")
        with open(config.SAVE, 'rb') as f:
            self.model = torch.load(f).to(device)
        self.model.eval()
        ntokens_delta = self.ntoken_delta
        prefetches = []
        with torch.no_grad():
            m=0
            print("data_s1.size(0)",data_s1.size(0))
            for i in range(0, data_s1.size(0) - 1, config.BPTT):
                data, targets = Batch.get_batch(data_s1, i)
                #if args.model == 'Transformer':
                #print(data[:20])
                self.model.zero_grad()
                output = self.model(data)
                output = output.view(-1, ntokens_delta)
                #print(output)
                #print(output.size())
                #print(targets)
                #print(targets.size())
                #print(output.argmax(dim=1))
                #print(data_s1[output.argmax(dim=1)][1])
                for j in range(0,output.argmax(dim=1).size(0)-1):
                    (instr_id, cycle_count, load_addr, load_ip, llc_hit) = data_s[m]
                    (instr_id1, cycle_count1, load_addr1, load_ip1, llc_hit1) = data_s[m+1]
                    m=m+1
                    #print(output.argmax(dim=1)[j].item())
                    #print(self.B.corpus.glossary_delta.loc[output.argmax(dim=1)[j].item(),'Unique'])
                    if self.generate_acq.corpus.glossary_delta.Deltas[output.argmax(dim=1)[j].item()]==self.generate_acq.corpus.glossary_delta.Deltas[output.argmax(dim=1)[j].item()]:
                        prefetches.append((instr_id1, int(load_addr+int(self.generate_acq.corpus.glossary_delta.Deltas[output.argmax(dim=1)[j].item()]))))
                    else:
                        prefetches.append((instr_id1, int(load_addr+0)))
        return prefetches

'''
# Example PyTorch Model
import torch
import torch.nn as nn

class PytorchMLModel(nn.Module):

    def __init__(self):
        super().__init__()
        # Initialize your neural network here
        # For example
        self.embedding = nn.Embedding(...)
        self.fc = nn.Linear(...)

    def forward(self, x):
        # Forward pass for your model here
        # For example
        return self.relu(self.fc(self.embedding(x)))

class TerribleMLModel(MLPrefetchModel):
    """
    This class effectively functions as a wrapper around the above custom
    pytorch nn.Module. You can approach this in another way so long as the the
    load/save/train/generate functions behave as described above.

    Disclaimer: It's terrible since the below criterion assumes a gold Y label
    for the prefetches, which we don't really have. In any case, the below
    structure more or less shows how one would use a ML framework with this
    script. Happy coding / researching! :)
    """

    def __init__(self):
        self.model = PytorchMLModel()
    
    def load(self, path):
        self.model = torch.load_state_dict(torch.load(path))

    def save(self, path):
        torch.save(self.model.state_dict(), path)

    def train(self, data):
        # Just standard run-time here
        self.model.train()
        criterion = nn.CrossEntropyLoss()
        optimizer = nn.optim.Adam(self.model.parameters())
        scheduler = nn.optim.lr_scheduler.StepLR(optimizer, step_size=0.1)
        for epoch in range(20):
            # Assuming batch(...) is a generator over the data
            for i, (x, y) in enumerate(batch(data)):
                y_pred = self.model(x)
                loss = criterion(y_pred, y)

                if i % 100 == 0:
                    print('Loss:', loss.item())

                optimizer.zero_grad()
                loss.backward()
                optimizer.step()
            scheduler.step()

    def generate(self, data):
        self.model.eval()
        prefetches = []
        for i, (x, _) in enumerate(batch(data, random=False)):
            y_pred = self.model(x)
            
            for xi, yi in zip(x, y_pred):
                # Where instr_id is a function that extracts the unique instr_id
                prefetches.append((instr_id(xi), yi))

        return prefetches
'''

# Replace this if you create your own model
Model = TransformerModelPrefetcher
