# coding=utf-8
# Copyright (C) idata team - All Rights Reserved
#
# @Version:   3.10.9
# @Software:  PyCharm
# @FileName:  architecture.py
# @CTime:     2023/6/1 10:20   
# @Author:    yhy
# @Email:     yhy@cyber.com
# @UTime:     2023/6/1 10:20
#
# @Description:
#     
#     xxx
#
import logging
from typing import NewType, Any, Optional
import pytorch_lightning as pl
import torch
from torch import optim
from metrics import MyAccuracy

logger = logging.getLogger(__name__)


class Architecture(pl.LightningModule):
    def __init__(self, **cfg):
        super(Architecture, self).__init__()
        self.save_hyperparameters()

        # 冻住特定的参数
        # self.bert.freeze()

        # 特定的参数注册
        self.register_buffer('sigma', torch.eye(3))

        # initialize the metric
        self.accuracy = MyAccuracy()

        self.example_input_array = torch.Tensor(32, 1, 28, 28)


    def forward(self, *args: Any, **kwargs: Any) -> Any:
        pass


    def any_lightning_module_function_or_hook(self):
        tensorboard_logger = self.logger.experiment

        prototype_array = torch.Tensor(32, 1, 28, 27)
        tensorboard_logger.log_graph(model=self, input_array=prototype_array)

    def on_train_start(self) -> None:
        self.any_lightning_module_function_or_hook()

    def training_step(self, batch, batch_idx):
        x, y = batch



        # 2. compute the metric
        self.accuracy(preds, y)


        values = {"loss": loss, "acc": acc, "metric_n": metric_n, "train_acc_step": self.accuracy}  # add more items if needed
        self.log(values, prog_bar=True, on_epoch=True, on_step=True)

        # log multiple parameters
        logger.log_hyperparams({"batch_size": 16, "learning_rate": 0.001})
        logger.log_metrics({"train/loss": 0.001, "val/loss": 0.002})

        # tensorboard
        tensorboard = self.logger.experiment
        tensorboard.add_image()
        tensorboard.add_histogram(...)
        tensorboard.add_figure(...)

        return loss




    def validation_step(self, batch, batch_idx):
        value = batch_idx + 1
        # # Add sync_dist=True to sync logging across all GPU workers (may have performance impact)
        self.log("average_value", value, on_step=True, on_epoch=True, sync_dist=True)



    def test_step(self, batch, batch_idx):
        self.log("test_loss", test_loss)
        pass



    def configure_optimizers(self) -> Any:
        optimizer = optim.Adam(self.parameters(), lr=1e-3)




if __name__ == '__main__':
    cfg = {
        'name': 'test_model'
    }
    model = Architecture(cfg)
