#===================cell  1========================
import abc
class AbstractFactory(object):  #The abstract factory
   
    __metaclass__ = abc.ABCMeta
    def __init__(self):
        self.product_function() 

    @abc.abstractmethod
    def product_function(self):
        pass

class CGA_Factory1(AbstractFactory):   #CGAfactory
    def product_function(self):
        return sin1_function()

class CGA_Factory3(AbstractFactory):   #CGAfactory
    def product_function(self):
        return ben_function()    


class CGA_Factory2(AbstractFactory):   #The second factory has new value put in
    def product_function(self):
        return transfer_function()

class model_Factory1(AbstractFactory):   # Source domain model
    def product_function(self):
        return model()
    
class model_Factory2(AbstractFactory):   # Target domain model
    def product_function(self):
        return model2()


#===================cell  2========================
def translateDNA(pop):
    return (pop.dot(2 ** np.arange(DNA_SIZE)[::-1]) / float(2**DNA_SIZE-1) * (X_BOUND[1]+1) )-1 

def select(pop, fitness):
    idx = np.random.choice(np.arange(POP_SIZE), size=POP_SIZE, replace=True,p=fitness/fitness.sum())
    return pop[idx]

def crossover(parent, pop):     # mating process (genes crossover)
    if np.random.rand() < CROSS_RATE: 
        i_ = np.random.randint(0, POP_SIZE, size=1)                          
        cross_points = np.random.randint(0, 2, size=DNA_SIZE).astype(np.bool) 
        parent[cross_points] = pop[i_, cross_points]
    return parent

def mutate(child):
    for point in range(DNA_SIZE):
        if np.random.rand() < MUTATION_RATE:
            child[point] = 1 if child[point] == 0 else 0
    return child

def best(pop,fitness):
    min_x=np.argmin(fitness)
    best=pop[min_x]
    tmp= np.random.randint(0, POP_SIZE, size=1)
    pop[tmp]=best
    
def decodeDNA(x):
    return (((x+1)/(X_BOUND[1]+1))*float(2**DNA_SIZE-1))  #To normalize to a decimal integer
#===================cell  3========================
import numpy as np
import matplotlib.pyplot as plt
import random
%matplotlib inline
import time
import pandas as pd
from itertools import chain
Dim=1
DNA_SIZE = 20            # DNA length
POP_SIZE = 100           # population size
CROSS_RATE = 0.8         # mating probability (DNA crossover)
MUTATION_RATE = 0.003    # mutation probability
X_BOUND = [-1, 2]         # x upper and lower bounds
#===================cell  4========================
import torch.nn as nn
from torch.utils.data import DataLoader
from torch.utils.data import TensorDataset
import numpy as np
import pandas as pd
import torch
import time
from itertools import chain
import matplotlib.pyplot as plt
%matplotlib inline
k=1
x=pd.read_table('MIGA%sorder_x.txt'%(k),header = None) 
yy=pd.read_table('MIGA%sorder_fx.txt'%(k),header = None) 
z=pd.read_table('MIGA%sorder_z.txt'%(k),header = None) 
y=None
y=np.hstack((yy,z))  # Merge y and z
p=len(x)
X= x.astype(float)
Y= y.astype(float)
X=torch.as_tensor(X.values.reshape(p,1))
Y=torch.as_tensor(Y.reshape(p,2))
X=torch.tensor(X,dtype=torch.float)
Y=torch.tensor(Y,dtype=torch.float)
print(X.size())
print(Y.size())
# Use batch training
dataset=TensorDataset(torch.tensor(X,dtype=torch.float),torch.tensor(Y,dtype=torch.float))
dataloader=DataLoader(dataset,batch_size=100,shuffle=True)
class Net(nn.Module):  
    def __init__(self):
        super(Net, self).__init__()
        self.net=nn.Sequential(
        nn.Linear(in_features=1,out_features=10),nn.ReLU(),
        nn.Linear(10,100),nn.ReLU(),
        nn.Linear(100,10),nn.ReLU(),
        nn.Linear(10,2)
    )

    def forward(self, input:torch.FloatTensor):
        output=self.net(input)[:,0].detach().numpy() 
        tmp=np.argmax(output) 
        self.net(input)[:,1]=X[tmp] 
        return self.net(input)
#===================cell  5========================
#Model training
class model(object): 
    def __repr__(self):

        net=Net()
        optim=torch.optim.Adam(Net.parameters(net),lr=0.001)
        Loss=nn.MSELoss()
        
        print("Begin---")
        for epoch in range(14000):
            loss=None 
            for batch_x,batch_y in dataloader: 
                y_predict=net(batch_x)  #Two values
                loss=Loss(y_predict,batch_y)  
                optim.zero_grad()
                loss.backward()  
                optim.step()

            if (epoch+1)%200==0:
                print("step: {0} , loss: {1}".format(epoch+1,loss.item()))

        predict=net(torch.tensor(X,dtype=torch.float))
        torch.save(net.state_dict(), "MIGA model parameters.pth")
        plt.plot(x,y[:,0],'b+',label="labeled data from EA")
        predict=predict.detach().numpy()
        plt.plot(x,predict[:,0],'r.',label="predicted data from model")
        plt.xlabel("x")
        plt.ylabel("f(x,1)")
        plt.legend()
        #plt.savefig(fname="Training %s_order models.png"%(k),figsize=[10,10])
        plt.show()
        idx=np.argmin(predict[:,0])  
        print(idx)
        print("x：",X[idx])
        print("fx：",predict[idx],"---The second data is ignored")
        return 
#===================cell  6========================
# Training source model
k=1
m1=model_Factory1().product_function()
print(m1)
#===================cell  7========================
from torch.nn import init
import matplotlib
%matplotlib inline
import torch
import time
import numpy as np
import pandas as pd
matplotlib.rcParams['axes.unicode_minus']=False
import matplotlib.pyplot as plt

class model2(object): 
    def __repr__(self):
        net=Net()
        for m in net.net.modules(): 
             if isinstance(m, nn.Linear):
                param_shape = m.weight.shape
                m.weight.data = torch.from_numpy(np.random.normal(0, 0.5, size=param_shape))  
                m.weight.data=m.weight.data.float()
        print(net.net[6].bias)
        net.net[6].bias.data.zero_()
        init.xavier_uniform_(net.net[6].weight,1) 
        pre_dict=torch.load('MIGA model parameters.pth')
        model_dict=net.state_dict() 
        pre_dict =  {name: value for name, value in pre_dict.items() if name in model_dict} 
        model_dict.update(pre_dict) 
        net.load_state_dict(model_dict)
        optim=torch.optim.Adam(Net.parameters(net),lr=0.001)
        Loss=nn.MSELoss()
        x=pd.read_table('MIGA%sorder_x.txt'%k,header = None) 
        yy=pd.read_table('MIGA%sorder_fx.txt'%k,header = None) 
        z=pd.read_table('MIGA%sorder_z.txt'%k,header = None) 
        y=None
        y=np.hstack((yy,z))  
        p=len(x)
        X= x.astype(float)
        Y= y.astype(float)
        X=torch.as_tensor(X.values.reshape(p,1)) 
        Y=torch.as_tensor(Y.reshape(p,2))
        X=torch.tensor(X,dtype=torch.float)
        Y=torch.tensor(Y,dtype=torch.float)
        p=len(x)
        #print(p)
        X=(x)
        Y=(y)
        X= X.astype(float)
        Y= Y.astype(float)
        X=torch.as_tensor(X.values.reshape(p,1))
        Y=torch.as_tensor(Y.reshape(p,2))

        dataset=TensorDataset(torch.tensor(X,dtype=torch.float),torch.tensor(Y,dtype=torch.float))
        dataloader=DataLoader(dataset,batch_size=100,shuffle=True)

        print("Begin!---")
        for epoch in range(20):
            loss=None
            for batch_x,batch_y in dataloader:
                y_predict=net(batch_x)
                loss=Loss(y_predict,batch_y) 
                optim.zero_grad()
                loss.backward()
                optim.step()
            if (epoch+1)%1==0:
                print("step: {0} , loss: {1}".format(epoch+1,loss.item()))

        predict=net(torch.tensor(X,dtype=torch.float))
        #plt.plot(x,y[:,0],'b+',label="labeled data from EA")
        predict=predict.detach().numpy()
        #plt.plot(x,predict[:,0],'r.',label="predicted data from model")
        #plt.xlabel("x")
        #plt.ylabel("f(x,265)")
        #plt.legend()
        #plt.savefig(fname="Migration of the 265-order model.png",figsize=[10,10])
        #plt.show()
        idx=np.argmin(predict[:,0]) 
        print(idx)
        print("x：",X[idx])
        print("fx：",predict[idx],"---The second data is ignored")
        np.savetxt("best_x_we want.txt",X[idx])   

#===================cell  8========================
#Generate sin model of order 265
k=265
m2=model_Factory2().product_function()
print(m2)