# 模型合并demo 演示合并原理， 大模型和lora模型合并的原理演示
import torch
from torch import nn


#定义一个简单的神经网络
class SimpleModel(nn.Module):
    def __init__(self):
        super().__init__()
        self.fc1 = nn.Linear(10, 50)
        self.relu = nn.ReLU()
        self.fc2 = nn.Linear(50, 1)

    def forward(self, x):
        x = self.fc1(x)
        x = self.relu(x)
        x = self.fc2(x)
        return x


#创建两个模型实例
model1 = SimpleModel()
model2 = SimpleModel()


#假设模型已经训练好了，我们接下来直接合并

#合并两个模型的参数
def merge_models(model1, model2, alpha=0.5):
    #alpha是模型1的权重，1-alpha是模型2的权重
    with torch.no_grad():
        for param1, param2 in zip(model1.parameters(), model2.parameters()):
            param1.data = alpha * param1.data + (1 - alpha) * param2.data
    return model1


#合并模型
merged_model = merge_models(model1, model2, alpha=0.5)
print(model1)
print(merged_model)
