
import torch
from torch import nn
from torch.utils.data import DataLoader, Dataset
from torch.optim import Adam
from tqdm import tqdm
import pickle
import itertools

import numpy as np
from GCN import GCNs, GCNSelfLoss
from utilities import seq_to_adjacency

# 碱基信息
class BaseInfo:
    def __init__(self, k:int=3) -> None:
        self.k = k
        # s2i:由子串得到索引
        self.__DNA_s2i = {}
        self.__DNA_i2s = {}
        self.__RNA_s2i = {}
        self.__RNA_i2s = {}
        # 初始化DNA
        for i,j in enumerate(itertools.product('TCGA', repeat = k)):
            sub_str = ''.join(j)
            self.__DNA_s2i[sub_str] = i
            self.__DNA_i2s[i] = sub_str
        # 初始化RNA
        for i,j in enumerate(itertools.product('UCGA', repeat = k)):
            sub_str = ''.join(j)
            self.__RNA_s2i[sub_str] = i
            self.__RNA_i2s[i] = sub_str
        # DNA/RNA的碱基子串可能的数量
        self.num = len(self.__DNA_i2s)
        # 碱基子串初始特征：one-hot
            # 在上面我们分别为DNA，RNA可能的子串进行了编号且确定了顺序。所以子串特征就直接用单位阵初始化
        self.__DNAf = torch.eye(self.num, dtype=torch.float32)
        self.__RNAf = torch.eye(self.num, dtype=torch.float32)

    def __getitem__(self, type:str, key):
        if type == 'dna' or type == 'DNA':
            if isinstance(key, int):
                return self.__DNA_i2s[key]
            if isinstance(key, str):
                return self.__DNA_s2i[key]
        if type == 'rna' or type == 'RNA':
            if isinstance(key, int):
                return self.__RNA_i2s[key]
            if isinstance(key, str):
                return self.__RNA_s2i[key]
        return None
    def __len__(self):
        return len(self.__DNA_i2s)
    def dna(self,key):
        if isinstance(key, int):
            return self.__DNA_i2s[key]
        if isinstance(key, str):
            return self.__DNA_s2i[key]
    def rna(self,key):
        if isinstance(key, int):
            return self.__RNA_i2s[key]
        if isinstance(key, str):
            return self.__RNA_s2i[key]
    def get_dna_s2i(self):
        return self.__DNA_s2i
    def get_dna_i2s(self):
        return self.__DNA_i2s
    def get_rna_s2i(self):
        return self.__RNA_s2i
    def get_rna_i2s(self):
        return self.__RNA_i2s
    def get_DNAf(self):
        return self.__DNAf
    def get_RNAf(self):
        return self.__RNAf
    def save(self, file_name:str):
        with open(file_name,'wb') as f:
            pickle.dump(self,f,pickle.HIGHEST_PROTOCOL) 
    @staticmethod
    def load(file_name:str):
        with open(file_name,'rb') as f:
            return pickle.load(f)

def pretrain_gcns_unsupervised_with_base_seq(
        model:GCNs, base_seqS:list, feature:torch.Tensor,
        sub_index: dict, sub_len:int, epoches:int=1, batch_size:int=1, lr=1e-2) -> GCNs:
    '''
    使用碱基序列预训练GCN，DNA与RNA或许应该单独训练一个模型。
    '''
    model.train()
    loss_fn = GCNSelfLoss()
    optimizer = Adam(model.parameters(), lr)
    optimizer.zero_grad()
    for epoch in range(epoches):
        bar = tqdm(base_seqS)
        losses = []
        index = 0
        for seq in bar:
            adjacency = seq_to_adjacency(seq, sub_index, sub_len)
            output = model(feature, GCNs.get_graph(adjacency))
            loss = loss_fn(output, adjacency)
            loss.backward()
            losses.append(loss.item())
            bar.set_postfix_str(f'epoch[{epoch}:{epoches}]|loss[{np.mean(losses)}]|')
            if (index+1)%batch_size == 0:
                optimizer.step()
                optimizer.zero_grad()
            index += 1
    return model

class BaseSeq2Vec(nn.Module):
    def __init__(self, input_dim:int, output_dim:int, sub_index:dict, sub_f:torch.Tensor, sub_len:int=3) -> None:
        '''
        input_dim:特征的维度
        output_dim:输出特征的维度
        sub_index:用于获取碱基子串的标号
        sub_f:所有碱基子串的初始特征
        sub_len:碱基子串的长度
        '''
        super().__init__()
        self.gcn = GCNs(input_dim, output_dim)
        self.sub_len = sub_len
        self.sub_index = sub_index
        self.sub_f = sub_f
        self.trained = False

    def forward(self, seq, gap) -> torch.Tensor:
        # 单序列
        if isinstance(seq, str):
            adjacency = seq_to_adjacency(seq, self.sub_index, self.sub_len, gap)
            nodes_f = self.gcn(self.sub_f, GCNs.get_graph(adjacency))
            # 对每个结点特征求平均
            return torch.mean(nodes_f, dim=0)
        if isinstance(seq,list) and isinstance(seq[0],str):
            ans =[]
            for i in tqdm(seq):
                adjacency = seq_to_adjacency(i, self.sub_index, self.sub_len, gap)
                nodes_f = self.gcn(self.sub_f, GCNs.get_graph(adjacency))
                # 对每个结点特征求平均
                ans.append(torch.mean(nodes_f, dim=0).unsqueeze(0))
            return torch.concat(ans, dim=0)
        raise ''
        
    def save(self, file_name:str):
        with open(file_name,'wb') as f:
            pickle.dump(self,f,pickle.HIGHEST_PROTOCOL)
    
    @staticmethod
    def load(file_name:str):
        with open(file_name,'rb') as f:
            return pickle.load(f)
        
    def pretrain(self, seq, epoches:int = 1, batch_size:int = 1, lr:float = 0.01):
        self.gcn = pretrain_gcns_unsupervised_with_base_seq(
            self.gcn, seq, self.sub_f, self.sub_index, self.sub_len, epoches, batch_size, lr)
        self.trained = True

class TFDataSet(Dataset):
    def __init__(self, encoder_in:torch.Tensor, dencoder_in:torch.Tensor, label:torch.Tensor=None) -> None:
        super().__init__()
        self.encoder_in = encoder_in
        self.dencoder_in = dencoder_in
        self.label = label
    def __len__(self):
        return self.encoder_in.shape[0]
    def __getitem__(self, index):
        if self.label is None:
            return self.encoder_in[index], self.dencoder_in[index]
        else:
            return self.encoder_in[index], self.dencoder_in[index], self.label[index]

class RelationShipModel(nn.Module):
    def __init__(self, input_dim:int, seq:int) -> None:
        super().__init__()
        self.tf = nn.Transformer(
            input_dim, nhead=4, 
            num_encoder_layers=2, 
            num_decoder_layers=2, 
            dim_feedforward=32, 
            batch_first = True)
        self.zip_feature = nn.Linear(input_dim, 1)
        self.zip_seq = nn.Linear(seq, 1)
        self.classifier = nn.Sigmoid()
    def forward(self, encoder_in, dencoder_in) -> torch.Tensor:
        output = self.tf(encoder_in, dencoder_in)
        output = self.zip_feature(output).squeeze()
        output = self.zip_seq(output).squeeze()
        output = self.classifier(output)
        return output
    def fit(self, encoder_in:torch.Tensor, dencoder_in:torch.Tensor, label:torch.Tensor,
            epoches=1, bach_size=150, lr=1e-2):
        self.train()
        opt = Adam(self.parameters(), lr)
        loss_fn = nn.L1Loss()
        for epoch in range(epoches):
            losses = []
            loader = DataLoader(TFDataSet(encoder_in, dencoder_in, label),bach_size,True)
            bar = tqdm(loader)
            for encoder, dencoder, target in bar:
                output = self.forward(encoder, dencoder)
                loss = loss_fn(output, target)
                opt.zero_grad()
                loss.backward()
                opt.step()
                losses.append(loss.item())
                bar.set_postfix_str(f'epoch[{epoch}:{epoches}]|loss[{np.mean(losses)}]|')
        return self
    def predict(self,encoder_in:torch.Tensor, dencoder_in:torch.Tensor, bach_size=150):
        self.eval()
        ans = []
        with torch.no_grad():
            loader = DataLoader(TFDataSet(encoder_in, dencoder_in),bach_size,True)
            bar = tqdm(loader)
            for encoder, dencoder in bar:
                output = self.forward(encoder, dencoder)
                ans.append(output)
        return torch.concat(ans, dim=0)
