#!/usr/bin/python
# -*- coding:utf-8 -*-
# @FileName : DL5_test1_1.py
# Author    : myh

import torch
from torch import nn
from torch.nn import functional as F

net = nn.Sequential(nn.Linear(20, 256), nn.ReLU(), nn.Linear(256, 256),nn.ReLU(),nn.Linear(256,10))
#
# print(net)
# for i in range(5):
#     print(i,'=',net[i])

class MLP(nn.Module):
    def __init__(self):
        super().__init__()
        self.lin1 = nn.Linear(20,256)
        self.lin2 = nn.Linear(256,256)
        self.out = nn.Linear(256,10)

    def forward(self,X):
        H1 = self.lin1(F.relu(X))
        H2 = self.lin2(F.relu(H1))
        out = self.out(F.relu(H2))
        return out

class MySequential(nn.Module):
    def __init__(self, *args):
        super().__init__()
        for idx, module in enumerate(args):
            # 这里，module是Module子类的一个实例。我们把它保存在'Module'类的成员
            # 变量_modules中。_module的类型是OrderedDict
            self._modules[str(idx)] = module

    def forward(self, X):
        # OrderedDict保证了按照成员添加的顺序遍历它们
        for block in self._modules.values():
            X = block(X)
        return X


class FixedHiddenMLP(nn.Module):
    def __init__(self):
        super().__init__()
        # 不计算梯度的随机权重参数。因此其在训练期间保持不变
        self.rand_weight = torch.rand((20, 20), requires_grad=False)
        self.linear = nn.Linear(20, 20)

    def forward(self, X):
        X = self.linear(X)
        # 使用创建的常量参数以及relu和mm函数,矩阵相乘
        X = F.relu(torch.mm(X, self.rand_weight) + 1)
        # 复用全连接层。这相当于两个全连接层共享参数
        X = self.linear(X)
        # 控制流
        while X.abs().sum() > 1:
            X /= 2
        return X.sum()

class NestMLP(nn.Module):
    def __init__(self):
        super().__init__()
        self.net = nn.Sequential(nn.Linear(20, 64), nn.ReLU(),
                                 nn.Linear(64, 32), nn.ReLU())
        self.linear = nn.Linear(32, 16)

    def forward(self, X):
        return self.linear(self.net(X))



class test5_1_2(nn.Module):
    def __init__(self,net1,net2):
        super().__init__()
        self.net1 = net1
        self.net2 = net2

    def forward(self,X):
        return self.net1(X),self.net2(X)


class test5_1_3(nn.Module):
    def __init__(self,block_num,block):
        super().__init__()
        self.bl_num = block_num
        if self.bl_num > 2:
            self.start = nn.Linear(20, 20)
            self.mid = block

            self.end = nn.Linear(20, 10)
        elif self.bl_num == 1:
            self.start = block
        elif self.bl_num == 2:
            self.start = block
            self.end = block

    def forward(self,X):
        if self.bl_num > 2:
            X = self.start(F.relu(X))
            for i in range(self.bl_num-2):
                X = self.mid(F.relu(X))
                print("mid",i)
            X = self.end(F.relu(X))
            return X
        elif self.bl_num == 2:
            X = self.start(F.relu(X))
            return self.end(F.relu(X))
        elif self.bl_num == 1:
            return self.start(X)
        elif self.bl_num <=0:
            return 0


X = torch.rand(2, 20)
chimera = nn.Sequential(NestMLP(),
                        nn.Linear(16, 20))
# print(chimera(X))
# print(chimera)

net1 = nn.Sequential(nn.Linear(20,256),
                     nn.ReLU(),
                     nn.Linear(256,256),
                     nn.ReLU(),
                     nn.Linear(256,10))

net2 = nn.Sequential(nn.Linear(20,256),
                     nn.ReLU(),
                     nn.Linear(256,20))

# test = test5_1_2(net1,net2)
# print(test(X))
# print(test)

test = test5_1_3(6,chimera)
print(test)
print(test(X))

test1 = test5_1_3(4,chimera)
print(test1)
print(test1(X))