import itertools
import torch
import torch.nn.functional as F
from sklearn.cluster import KMeans
from .main_net import GATEncoder, AttForEmb, seed_torch, compute_q, compute_p
from .load_data import *
import numpy as np
from sklearn.metrics import normalized_mutual_info_score as NMI
from pathlib import Path
from Clustering_metrics import cal_metrics as CM
import os

def cosine_loss(x, y, alpha=1):
    x = F.normalize(x, p=2, dim=-1)
    y = F.normalize(y, p=2, dim=-1)
    return (1 - (x * y).sum(dim=-1)).pow_(alpha).mean()

def train_(cfg, args):
    
    seed_torch(args.seed)
    
    if args.data_name == 'ACM':
        n, cluster, label, views, v, x = load_ACM()
    elif args.data_name == 'DBLP':
        n, cluster, label, views, v, x = load_DBLP()
    elif args.data_name == 'IMDB':
        n, cluster, label, views, v, x = load_IMDB()
    elif args.data_name == 'Freebase':
        n, cluster, label, views, v, x = load_Freebase()
    
    model_list = [GATEncoder([int(x.shape[1]), int(cfg['dim1']), int(cfg['dim2']), x.shape[1]], cfg['heads'], cfg['drop'], 'elu').cuda() for _ in range(views.v)]
    
    mdAtt = AttForEmb([int(cfg['dim2']*cfg['heads']), x.shape[1]], 'elu', views.v).cuda()
    
    optimizer = torch.optim.Adam([
    {'params': mdAtt.parameters()},
    {'params': itertools.chain.from_iterable(model.parameters() for model in model_list)}
    ], lr=cfg['lr'], weight_decay=1e-6)
    
    scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(optimizer, T_max=cfg['Epoch'])
                
    mdAtt.train()
    for i in range(views.v):
        model_list[i].train()
    min_loss, waiter = torch.inf, 0
    for epoch in range(cfg['Epoch']):
        z, z_recon, _, _ = zip(*[model_list[i](x, views.A[i], views.A_2[i], views.A_T[i]) for i in range(views.v)])
        ls_A = sum(cosine_loss(x, z_recon[i], cfg['alpha']) for i in range(views.v)) / torch.tensor(views.v)
        
        Z, alpha, Z_recon = mdAtt(z)
        ls_T = cosine_loss(x, Z_recon, cfg['alpha'])
        
        ls = cfg['theta'] * ls_A + cfg['lamb'] * ls_T
        
        optimizer.zero_grad()
        ls.backward()
        optimizer.step()
        
        if cfg['sched']:
            scheduler.step()  
            
        if ls.item() < min_loss:
            min_loss = ls.item()
            waiter = 0
        else:
            waiter += 1
            
        if waiter > cfg['tolerance']:
            break
    
    mdAtt.eval()
    
    for i in range(views.v):
        model_list[i].eval()
        
    with torch.no_grad():
        z, z_recon, _, _ = zip(*[model_list[i](x, views.A[i], views.A_2[i], views.A_T[i]) for i in range(views.v)])
        Z, alpha, Z_recon = mdAtt(z)
        KM = KMeans(n_clusters=cluster, n_init=10).fit(F.normalize(Z).detach().cpu())

    mdAtt.Init_center(KM.cluster_centers_)

    optimizer = torch.optim.Adam([{'params': mdAtt.parameters()},
                                  {'params':itertools.chain.from_iterable(model.parameters() for model in model_list)}], 
                                 lr=cfg['lr_dec'], weight_decay=1e-6)
    
    scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(optimizer, T_max=cfg['Epoch_dec'])

    q_list, q_max = [], 0
    for epoch in range(cfg['Epoch_dec']):  
        mdAtt.train()
        for i in range(views.v):
            model_list[i].train()
            
        z, z_recon, _, _ = zip(*[model_list[i](x, views.A[i], views.A_2[i], views.A_T[i]) for i in range(views.v)])
        ls_A = sum(cosine_loss(x, z_recon[i], cfg['alpha']) for i in range(views.v)) / torch.tensor(views.v)
        Z, alpha, Z_recon = mdAtt(z)
        ls_T = cosine_loss(x, Z_recon, cfg['alpha'])
    
        q = compute_q(Z, mdAtt.centers)

        if epoch % cfg['T'] == 0:
            p = compute_p(q).detach()
            
        ls_kl = F.kl_div(torch.log(q), p, reduction='batchmean')
        
        ls_dis = (torch.sqrt(torch.tensor(cluster)) - torch.norm(q, p=2, dim=0).sum()/ torch.sqrt(torch.tensor(n))) / (torch.sqrt(torch.tensor(cluster))-1)
        
        ls = cfg['theta'] * ls_A + cfg['lamb'] * ls_T + cfg['gamma'] * ls_kl + cfg['beta'] * ls_dis
        
        optimizer.zero_grad()
        ls.backward()
        optimizer.step()
        
        if cfg['sched']:
            scheduler.step()  
        
        mdAtt.eval()
        for i in range(views.v):
            model_list[i].eval()
            
        with torch.no_grad():
            z, z_recon, x_list, alpha_list = zip(*[model_list[i](x, views.A[i], views.A_2[i], views.A_T[i]) for i in range(views.v)])
            Z, alpha, Z_recon = mdAtt(z)
            q = compute_q(Z, mdAtt.centers)
            
        four_metric = CM(label, np.argmax(q.detach().cpu().numpy(), axis=1))
        q_list.append(four_metric[1]) 
            
        if q_max < q_list[-1]:
            best_pre = np.argmax(q.detach().cpu().numpy(), axis=1)
            q_max = q_list[-1]
            emb_list = []
            for v_ in x_list:
                emb_list.append([emb.detach().cpu().numpy() for emb in v_])
            emb_list = np.array(emb_list)
    
    return best_pre, label