import numpy as np


class SimpleANFIS:
    def __init__(self):
        # 规则的权重和输出
        self.rules = [
            {'input': [0, 0], 'output': 10},  # 规则1
            {'input': [1, 1], 'output': 50},  # 规则2
            {'input': [2, 2], 'output': 90},  # 规则3
        ]

    def evaluate(self, x1, x2):
        # 简单的模糊推理
        output = 0
        for rule in self.rules:
            # 计算规则的激活程度（这里简化为输入相等）
            if rule['input'] == [int(x1), int(x2)]:
                output = rule['output']
        return output


# 定义卷积操作
def convolve_with_anfis(image, anfis_model, stride=1, padding=0):
    padded_image = np.pad(image, padding, mode='constant')
    output_shape = ((padded_image.shape[0] - 2) // stride + 1,
                    (padded_image.shape[1] - 2) // stride + 1)
    output_image = np.zeros(output_shape)

    for i in range(0, padded_image.shape[0] - 2 + 1, stride):
        for j in range(0, padded_image.shape[1] - 2 + 1, stride):
            region = padded_image[i:i + 3, j:j + 3]
            output_image[i // stride, j // stride] = anfis_model.evaluate(np.mean(region), np.std(region))

    return output_image


# 定义全连接层
def fully_connected_layer(input_data, weights, bias):
    return np.dot(input_data, weights) + bias


# 定义损失函数
def mse_loss(predictions, targets):
    return np.mean((predictions - targets) ** 2)


# 定义训练函数
def train_network(num_epochs=100, learning_rate=0.01):
    anfis_model = SimpleANFIS()

    # 初始化权重和偏置
    weights = np.random.rand(9, 3)  # 9个输入（3x3区域），输出维度为3
    bias = np.random.rand(3)

    for epoch in range(num_epochs):
        # 生成随机训练数据
        input_image = np.random.rand(5, 5) * 2  # 输入范围调整为0到2，以匹配规则
        target_output = np.random.rand(3) * 100  # 随机目标输出

        # 第一层卷积
        conv1_output = convolve_with_anfis(input_image, anfis_model, stride=1, padding=1)
        # 第二层卷积
        conv2_output = convolve_with_anfis(conv1_output, anfis_model, stride=1, padding=1)

        # 展平输出以连接全连接层
        flattened_output = conv2_output.flatten()

        # 全连接层输出
        fc_output = fully_connected_layer(flattened_output, weights, bias)

        # 计算损失
        loss = mse_loss(fc_output, target_output)

        # 反向传播更新权重和偏置（简化版）
        weights_gradient = -2 * np.dot(flattened_output.reshape(-1, 1),
                                       (fc_output - target_output).reshape(1, -1)) / len(target_output)
        bias_gradient = -2 * (fc_output - target_output) / len(target_output)

        weights -= learning_rate * weights_gradient
        bias -= learning_rate * bias_gradient

        if epoch % 10 == 0:
            print(f'Epoch {epoch}, Loss: {loss:.4f}')

    return weights, bias


# 训练网络
if __name__ == '__main__':
    final_weights, final_bias = train_network(num_epochs=100)
    print("训练完成！最终权重和偏置：")
    print("Weights:\n", final_weights)
    print("Bias:\n", final_bias)
