from torch_geometric.nn import GATConv, SAGPooling, GCNConv
from torch_geometric.nn import global_mean_pool as gap, global_max_pool as gmp
from torch_geometric.nn import MessagePassing
from torch_geometric.nn.conv.gcn_conv import GCNConv
import torch.nn as nn
import torch
import torch.nn.functional as F
from typing import List, Tuple

from models.model_metrics import cal_metrics
from dataset_adapter import VarmisuseTask
from models.origin.tensor_gcn import Tensor_GCN
from src.tasks import VarmisuseForRepairLayer
from tasks.config import Py150kvarmisuseTaskConfig
from src.tasks import VarmisuseLinearOutputLayer

# 本地库


class VarmisuseModel(nn.Module):
    def __init__(
        self,config:Py150kvarmisuseTaskConfig
    ):
        super(VarmisuseModel, self).__init__()
        self.config=config
        self.embeding_model=Tensor_GCN(
            num_edge_types=config.num_edge_types,
            in_features=config.graph_node_max_num_chars,
            out_features=config.h_features,
            embedding_num_classes=70,
            embedding_out_features=config.embedding_out_features,
            dropout=config.dropout_rate,
            device=config.device,
            max_node_per_graph=config.max_node_per_graph
            )
        
        if config.task_type== VarmisuseTask.RepairBug:
            self.varmiuse_layer=VarmisuseForRepairLayer(
                out_features=config.out_features,
                task_type=config.task_type,
                max_node_per_graph=config.max_node_per_graph,
                device=config.device
            )
        else:
            self.varmiuse_layer=VarmisuseLinearOutputLayer(
                out_features=config.out_features,
                task_type=config.task_type,
                max_node_per_graph=config.max_node_per_graph,
                device=config.device
            )
        
        self.loss_compute=torch.nn.CrossEntropyLoss()

    def get_loss_and_metrics(self,bug_location_predict,repair_predict,bug_location_label,repair_label):
        loss= torch.tensor(0.).to(self.config.device)
        metrics=[0,0,0,0]
        
        batch_size=bug_location_predict.size(0) if bug_location_predict is not None else repair_predict.size(0)
        
        if bug_location_predict is not None:

            

            loss+=self.loss_compute(bug_location_predict,bug_location_label)
            
            bug_number=torch.sum(torch.clamp(bug_location_label,0,1)).item()
            no_bug_number=batch_size-bug_number
            
            bug_location_predict=torch.argmax(bug_location_predict,1)
            
            total_success=torch.sum(torch.where(bug_location_predict==bug_location_label,1,0)).item()
            no_bug_success=torch.sum(torch.einsum("i,i->",1.-torch.clamp(bug_location_label,0,1),1.-torch.clamp(bug_location_predict,0,1))).item()
            
            #print(bug_location_label)
            #print(1.-torch.clamp(bug_location_label,0,1))
            
            bug_success=total_success-no_bug_success
            
            metrics[0]=no_bug_success/no_bug_number if no_bug_number>0 else 1
            metrics[1]=bug_success/bug_number if bug_number>0 else 1
            
        if repair_predict is not None:
            repair_predict=F.softmax(repair_predict, dim=-1)
                
            repair_label=repair_label.view(-1,self.config.max_node_per_graph)
            repair_predict=torch.einsum("td,td->t",repair_predict,repair_label.float())
            loss+=torch.sum(-torch.log(repair_predict+1e-9))

            metrics[2]=torch.sum(torch.where(repair_predict>=0.5,1,0)).item()/batch_size
        
        metrics[3]=loss.item()/batch_size
        
        return loss,metrics

    def forward(self,
                x, 
                edge_list: List[torch.tensor],
                slot_id,
                candidate_ids,
                candidate_masks,
                label=None,
                **kwargs):
        
        x=x.to(self.config.device)
        edge_list=[e.to(self.config.device) for e in edge_list]
        slot_id=slot_id.to(self.config.device)
        candidate_ids=candidate_ids.to(self.config.device)
        candidate_masks=candidate_masks.to(self.config.device)
        label=label.to(self.config.device)
        
        embeding_vector=self.embeding_model(x,edge_list)
        bug_location_predict,repair_predict=self.varmiuse_layer(embeding_vector,slot_id,candidate_ids,candidate_masks,label)
        
        loss,precison=self.get_loss_and_metrics(bug_location_predict,repair_predict,label,slot_id)
            
            
        return bug_location_predict,repair_predict,loss,precison