import torch
import torch.nn as nn
import torch.optim as optim
import torchvision.transforms as transforms
from torchvision import datasets
from sklearn.preprocessing import OneHotEncoder
import numpy as np
from skfuzzy import control as ctrl
from skfuzzy import membership

# 定义ANFIS结构
class ANFIS:
    def __init__(self):
        # 定义输入和输出变量
        self.input1 = ctrl.Antecedent(np.arange(0, 256, 1), 'input1')
        self.input2 = ctrl.Antecedent(np.arange(0, 256, 1), 'input2')
        self.output = ctrl.Consequent(np.arange(0, 10, 1), 'output')

        # 定义模糊集合
        self.input1.automf(3)
        self.input2.automf(3)
        self.output['low'] = membership.trimf(self.output.universe, [0, 0, 5])
        self.output['medium'] = membership.trimf(self.output.universe, [0, 5, 10])
        self.output['high'] = membership.trimf(self.output.universe, [5, 10, 10])

        # 定义规则
        rule1 = ctrl.Rule(self.input1['poor'] & self.input2['poor'], self.output['low'])
        rule2 = ctrl.Rule(self.input1['average'] & self.input2['average'], self.output['medium'])
        rule3 = ctrl.Rule(self.input1['good'] & self.input2['good'], self.output['high'])

        self.control_system = ctrl.ControlSystem([rule1, rule2, rule3])
        self.simulator = ctrl.ControlSystemSimulation(self.control_system)

    def evaluate(self, x1, x2):
        self.simulator.input['input1'] = x1
        self.simulator.input['input2'] = x2
        self.simulator.compute()
        return self.simulator.output['output']


# 定义用于训练的MLP模型
class MLPModel(nn.Module):
    def __init__(self, anfis_model):
        super(MLPModel, self).__init__()
        self.anfis_model = anfis_model
        self.flatten = nn.Flatten()
        self.dense1 = nn.Linear(10, 128)  # ANFIS 输出为 10
        self.dense2 = nn.Linear(128, 64)
        self.output_layer = nn.Linear(64, 10)

    def forward(self, x):
        # 使用ANFIS处理输入
        x1, x2 = x[:, 0], x[:, 1]  # 分离两个输入
        anfis_output = self.anfis_model.evaluate(x1, x2)
        x = torch.tensor(anfis_output, dtype=torch.float32).view(-1, 10)  # 转换为张量并调整形状
        x = self.flatten(x)
        x = torch.relu(self.dense1(x))
        x = torch.relu(self.dense2(x))
        return self.output_layer(x)


# 加载MNIST数据集
transform = transforms.Compose([transforms.ToTensor(), transforms.Normalize((0.5,), (0.5,))])
train_dataset = datasets.MNIST(root='./data', train=True, download=True, transform=transform)
test_dataset = datasets.MNIST(root='./data', train=False, download=True, transform=transform)

train_loader = torch.utils.data.DataLoader(dataset=train_dataset, batch_size=32, shuffle=True)
test_loader = torch.utils.data.DataLoader(dataset=test_dataset, batch_size=32, shuffle=False)

# 创建ANFIS模型
anfis_model = ANFIS()

# 创建MLP模型
mlp_model = MLPModel(anfis_model)
criterion = nn.CrossEntropyLoss()
optimizer = optim.Adam(mlp_model.parameters(), lr=0.001)

# 训练模型
for epoch in range(10):
    mlp_model.train()
    for images, labels in train_loader:
        images = images.view(images.size(0), -1)  # 扁平化输入
        x1 = images[:, :256]  # 假设输入为256维
        x2 = images[:, 256:]  # 假设输入为256维
        outputs = mlp_model(torch.stack([x1, x2], dim=1))
        loss = criterion(outputs, labels)

        optimizer.zero_grad()
        loss.backward()
        optimizer.step()

    print(f'Epoch [{epoch+1}/10], Loss: {loss.item():.4f}')

# 测试模型
mlp_model.eval()
correct = 0
total = 0

with torch.no_grad():
    for images, labels in test_loader:
        images = images.view(images.size(0), -1)
        x1 = images[:, :256]
        x2 = images[:, 256:]
        outputs = mlp_model(torch.stack([x1, x2], dim=1))
        _, predicted = torch.max(outputs.data, 1)
        total += labels.size(0)
        correct += (predicted == labels).sum().item()

print(f'Test accuracy: {100 * correct / total:.2f}%')
