from tools.change_cwd_to_main import change_cwd_to_main


change_cwd_to_main()


from tools import data_generator

import math
import os
import torch.nn as nn
import torch.nn.functional as F
import torch
import matplotlib.pyplot as plt
from tools.data_generator import generate_logic_data
from tools.remove_temp_file import remove_path
from bdtime import tt
from tools.moe import MyMoELayer, MyMoELayerClassification
from tools.moe import MoELayer
from tools.moe import MyMoEClassifier
from tools.logistic import train, test


def main():
    noise_config = None
    # noise_config = (0, 1)
    x_data, y_data = data_generator.generate_logic_data(size=10000, noise_config=noise_config)
    x_train, x_test, y_train, y_test = data_generator.split_train_and_test(x_data, y_data, test_size=0.2)


    input_size = 2
    output_size = 1

    # # --- 默认moe
    # num_experts = 3
    # lr = 0.001 * num_experts
    # experiment_name = "moe_0"
    # moe_layer = MoELayer(num_experts, input_size, output_size)

    # --- 自定义moe, 附带[噪音训练, cv重要性采样, 辅助loss]
    # experiment_name = "MyMoe"
    # top_k = 3
    # num_experts = 5
    # lr = 0.001 * top_k
    # moe_layer = MyMoELayer(num_experts, input_size, top_k=top_k)

    # model = nn.Sequential(
    #     moe_layer,
    #     MyMoELayerClassification(),
    #     # nn.Sigmoid(),
    # )

    # --- MyMoEClassifier
    experiment_name = "MyMoEClassifier"
    input_size = 2
    output_size = 1
    num_experts = 10
    top_k = 3
    lr = 0.001 * top_k

    emb_size = 5
    w_importance = 0.01
    # w_importance = 0

    from tools.moe import Linear
    model = MyMoEClassifier(input_size=input_size, experts=num_experts, top=top_k, emb_size=emb_size,
                                output_size=output_size, w_importance=w_importance, expert_cls=Linear)

    # demo = torch.randn(batch_size, input_size)
    # output = model(demo)
    # # print(output.shape)

    # --- loss_function and optimizer
    criterion = nn.BCELoss()
    # criterion = nn.CrossEntropyLoss()
    # optimizer = torch.optim.SGD(model.parameters(), lr=0.01)
    optimizer = torch.optim.SGD(model.parameters(), lr=lr)

    device = 'cuda' if torch.cuda.is_available() else 'mps' if torch.backends.mps.is_available() else 'cpu'

    # total_epoch = 1000
    total_epoch = 10000
    log_interval = 0.1
    # log_interval = 1000
    train((x_train, x_test, y_train, y_test), model, optimizer, criterion,
          total_epoch=total_epoch, log_interval=log_interval,
          experiment_name=experiment_name, save=True, device=device, is_classification=True)


if __name__ == '__main__':
    main()


