#!/usr/bin/env python
# -*- coding:utf-8 -*-
# @Time    : 2022/3/20 1:58 下午
# @Author  : WangZhixing
from sklearn.cluster import KMeans
from ..Module.Discriminator import Discriminator1
from ..Module.Encoder import Encoder3, VEncoder1
from ..Module.ARGA import ARGA
import torch


def argva_train(model,data,encoder_optimizer):
    model.train()
    encoder_optimizer.zero_grad()
    z = model.encode(data.x, data.train_pos_edge_index)
    loss = 0
    loss = loss + model.reg_loss(z)  # Comment
    loss = loss + model.recon_loss(z, data.train_pos_edge_index)
    loss = loss + (1 / data.num_nodes) * model.kl_loss()
    loss = loss + model.discriminator_loss(z)  # discriminator_optimizer.step()
    loss.backward()
    encoder_optimizer.step()
    return loss

def ARGA_trian(data,**kwarg):
    encoder = VEncoder1(data.num_features, out_channels=32)
    # 检测器
    # 我们需要产生一个真实的高斯分布，并进行抽样得到真实样本。
    # 接着，将抽样得到的正（+）样本和生成的负（-）样本 [公式] 放入多层感知器进行区分。
    # 这样，『 [公式] 的分布与高斯分布之间的差异』就可以用『[公式] 与 +
    # 样本的差异』来替换了。
    discriminator = Discriminator1(in_channels=32, hidden_channels=64, out_channels=1)  # Comment
    model = ARGA(encoder, discriminator)
    encoder_optimizer = torch.optim.Adam(encoder.parameters(), lr=0.005)
    for epoch in range(1, 201):
        loss = argva_train(model, data, encoder_optimizer)
    with torch.no_grad():
        z = model.encode(data.x, data.train_pos_edge_index)
    kmeans_input = z.cpu().numpy()
    kmeans = KMeans(n_clusters=kwarg['cluster'], random_state=0).fit(kmeans_input)
    preds = kmeans.predict(kmeans_input)
    return kmeans_input, preds