import torch
import torch.nn as nn
from torch_geometric.nn.conv.rgcn_conv import RGCNConv

class PaG(nn.Module):
    def __init__(self, opt, window, utter_dim, num_base):
        super().__init__()
        self.opt = opt
        self.window = window
        self.utter_dim = utter_dim
        self.num_base = num_base
        self.num_relations = self.window
        self.rgcn = RGCNConv(utter_dim, utter_dim, num_relations=self.num_relations)

    def forward(self, x, conv_len):
        batch_size = x.shape[0]
        utter_dim = x.shape[2]
        utter_len = x.shape[1]

        src_pos = torch.arange(utter_len).unsqueeze(0)
        tgt_pos = torch.arange(utter_len).unsqueeze(1)

        rel_adj_pre = src_pos - tgt_pos

        rel_adj = rel_adj_create(rel_adj_pre, utter_len, self.window)
        edge_index = index_create(utter_len).view(2, utter_len, -1)

        edge_type_batch = []
        edge_index_batch = []

        for i in range(batch_size):
            cur_index = conv_len[i] - 1
            cur_rel_adj = rel_adj[cur_index][:cur_index+1].long().to(self.opt.device)
            cur_edge_index = edge_index[:, cur_index, :cur_index+1].to(self.opt.device)
            edge_type_batch.append(cur_rel_adj)
            edge_index_batch.append(cur_edge_index)

        self.rgcn.to(self.opt.device)
        out = self.rgcn(x[0], edge_index_batch[0], edge_type_batch[0]).unsqueeze(0)
        for i in range(1, batch_size):
            h = self.rgcn(x[i], edge_index_batch[i], edge_type_batch[i])
            out = torch.cat((out, h.unsqueeze(0)), dim=0)
        return out


def rel_adj_create(rel_adj, slen, window):
    for i in range(slen):
        for s in range(i + 1, slen):  # 当前句后面的句子，边都是1，与论文中公式(5)对应
            rel_adj[i][s] = 1

    for i in range(slen):
        num = 1
        rel_adj[i][i] = -num
        for o in range(i - 1, -1, -2):  # 遍历当前句子开始前面的other句子和self句子，每相邻的other句子和self句子的rel_adj是一样的（如果 1 2 3 4 5 6 7， i=7，则rel[7][6]==rel[7][5]==-1  rel[7][4]==rel[7][3]==-2....）
            if ((o - 1) < 0):
                rel_adj[i][o] = -num
            else:
                rel_adj[i][o] = -num
                rel_adj[i][o - 1] = -num
            num += 1

    for i in range(slen):  # 遍历当前句子前面的句子，一旦rel超过了win大小，就直接赋值为窗口大小
        for o in range(i - 1, -1, -1):
            if (rel_adj[i][o] < -(window + 1)):
                rel_adj[i][o] = - (window + 1)

    return rel_adj


def index_create(slen):
    index = []
    start = []  # [0,0,0,0,0,0,...,0,1,1,1,1,...,1,2,2,2,2...,2,3,3,3,...,3...]
    end = []  # [0,1,2,...25,0,1,2,...,25,0,1,2,...,25...]

    for i in range(0, slen):
        for j in range(0, slen):
            start.append(i)
    for i in range(0, slen):
        for j in range(0, slen):
            end.append(j)

    index.append(start)
    index.append(end)

    index = torch.tensor(index).long()

    return index


if __name__ == '__main__':
    utter_len = 26
    src_pos = torch.arange(utter_len).unsqueeze(0)
    tgt_pos = torch.arange(utter_len).unsqueeze(1)

    rel_adj_pre = src_pos - tgt_pos

    rel_adj = rel_adj_create(rel_adj_pre, utter_len, 10)
    edge_index = index_create(utter_len).view(2, utter_len, -1)

    conv_len = [1, 5]
    batch_size = 2
    for i in range(batch_size):
        cur_index = conv_len[i] - 1
        cur_rel_adj = rel_adj[cur_index][:cur_index+1]
        cur_edge_index = edge_index[:, cur_index, :cur_index+1]
        print(1)
    print(1)






