#cell 1
g=0.05
#cell ===================2===================
import abc
class AbstractFactory(object):  #The abstract factory
   
    __metaclass__ = abc.ABCMeta
    def __init__(self):
        self.product_function() 

    @abc.abstractmethod
    def product_function(self):
        pass

class CGA_Factory1(AbstractFactory):   #CGAfactory_
    def product_function(self):
        return sin1_function()

class CGA_Factory3(AbstractFactory):   #CGAfactory——benchmark
    def product_function(self):
        return ben_function()    


class CGA_Factory2(AbstractFactory):   #The second factory has new value put in
    def product_function(self):
        return transfer_function()

class model_Factory1(AbstractFactory):   # Source domain model
    def product_function(self):
        return model()
    
class model_Factory2(AbstractFactory):   # Target domain model
    def product_function(self):
        return model2()

#cell ===================3===================
def translateDNA(pop): # Binary to decimal
    x_pop = pop[:,0:DNA_SIZE]      
    y_pop = pop[:,DNA_SIZE:DNA_SIZE*2]   
    x = x_pop.dot(2**np.arange(DNA_SIZE)[::-1])/float(2**DNA_SIZE-1)*(X_BOUND[1]-X_BOUND[0])+X_BOUND[0]
    y = y_pop.dot(2**np.arange(DNA_SIZE)[::-1])/float(2**DNA_SIZE-1)*(Y_BOUND[1]-Y_BOUND[0])+Y_BOUND[0]
    return x,y
   

def trans(pop):   #Change univariate binary to base 10
    return (pop.dot(2 ** np.arange(DNA_SIZE)[::-1]) / float(2**DNA_SIZE-1) * (X_BOUND[1]+1) )-1  

    
def select(pop, fitness):
    print(np.abs(fitness).sum())
    idx = np.random.choice(np.arange(POP_SIZE), size=POP_SIZE, replace=True,p=np.abs(fitness/fitness.sum()))
    return pop[idx]

def crossover(parent, pop):     # mating process (genes crossover)
    if np.random.rand() < CROSS_RATE: 
        i_ = np.random.randint(0, POP_SIZE, size=1)
        mother = pop[np.random.randint(POP_SIZE)]
        cross_points = np.random.randint(low=0, high=DNA_SIZE*2) #Randomly generating intersecting points
        parent[cross_points:] = mother[cross_points:]  #The child gets the mother's genes behind the intersection
    return parent

def mutate(child, MUTATION_RATE=0.003):
    for point in range(DNA_SIZE):
        if np.random.rand() < MUTATION_RATE:
            mutate_point = np.random.randint(0, DNA_SIZE*2)  
            #Randomly generating a real number that represents the location of the gene to be mutated
            child[mutate_point] = child[mutate_point]^1    #Change the binary of the point of variation to an inversion
    return child

    
    return ((x+1)/(X_BOUND[1]+1))*float(2**DNA_SIZE-1) #To normalize to a decimal integer

def get_fitness(pop): 
    x,y = translateDNA(pop)
    pred = F(x, y)
    return pred      #(pred - np.min(pred)) + 1e-3 

#cell=================== 4===================

import numpy as np
import matplotlib.pyplot as plt
%matplotlib inline
import time
import random
from itertools import chain
from mpl_toolkits.mplot3d import Axes3D
import pandas as pd
import math

#Dim=30  
#DNA_SIZE = 20            # DNA length
#POP_SIZE = 100           # population size
CROSS_RATE = 0.75         # mating probability (DNA crossover)
MUTATION_RATE = 0.03    # mutation probability
#best=list()
#x_fact=list()
#LOW=100
#X_BOUND = [-LOW, LOW]
#Y_BOUND = [-LOW, LOW]
#U=(2*LOW)/Dim
#pk=0.9
#multilist = [[0 for col in range(3)] for row in range(100)]

#===================cell 5=====================
# Define the neural network
import torch
import torch.nn as nn
from torch.utils.data import DataLoader
from torch.utils.data import TensorDataset
import numpy as np
import pandas as pd
import time
from itertools import chain
import matplotlib.pyplot as plt
%matplotlib inline
import pdb

x=pd.read_table('MIGA_x%s.txt'%g,header = None)
fx=pd.read_table('MIGA_fx%s.txt'%g,header = None)
con=None
#con=np.hstack((x,y))  # Merge x and y
p=len(x)
CON= x.astype(float)
FFX=fx.astype(float) 

CON=torch.as_tensor(CON.values.reshape(p,30))
FFX=torch.as_tensor(FFX.values.reshape(p,1))
CON=torch.tensor(CON,dtype=torch.float)
FFX=torch.tensor(FFX,dtype=torch.float)
print('ok')
# Use batch training
dataset=TensorDataset(torch.tensor(CON,dtype=torch.float),torch.tensor(FFX,dtype=torch.float))

dataloader=DataLoader(dataset,batch_size=100,shuffle=True)

class Net(nn.Module):  
    def __init__(self):
        super(Net, self).__init__()
        self.net=nn.Sequential(
        nn.Linear(in_features=30,out_features=10),nn.ReLU(),
        nn.Linear(10,100),nn.ReLU(),
        nn.Linear(100,10),nn.ReLU(),
        nn.Linear(10,1)
    )

    def forward(self,input:torch.FloatTensor):    #input:torch.FloatTensor
        return self.net(input)



#===================cell 5=====================
    #Model training
class model(object): 
    def __repr__(self):
        net=Net()
        optim=torch.optim.Adam(Net.parameters(net),lr=0.001)
        Loss=nn.MSELoss()
        list1=list()
        print("Begin---")
        for epoch in range(20000):
            loss=None 
            for batch_x,batch_fx in dataloader:
                #pdb.set_trace() 
                y_predict=net(batch_x)  #Two values
                loss=Loss(y_predict,batch_fx)  
                optim.zero_grad()
                loss.backward()  
                optim.step()

            if (epoch+1)%200==0:
                print("step: {0} , loss: {1}".format(epoch+1,loss.item()))

        predict=net(torch.tensor(CON,dtype=torch.float))
        torch.save(net.state_dict(), "model para_%s.pth"%name) 
        #fig = plt.figure()
        #ax = Axes3D(fig)  
        #plt.ion()       #Enable image interaction mode
        #plot_3d(ax)
        predict=predict.detach().numpy()
        print('predict',predict.size) 
        #print('predict',predict)
        idx=np.argmin(predict)
        print('idx',idx)
        print("x：",CON[idx])
        print("fx：",predict[idx])
        return 

#===================cell 6=====================
# Training source model

g=0.05
# Switch
name='Sphere'   # 'Rosenbrock'; 'Step';  'Schwefel' ;  'Rastrigin' ; 'Ackley'  ;  'Griewank'

m1=model_Factory1().product_function()
print(m1)



#===================cell 7=====================

from torch.nn import init
import matplotlib
%matplotlib inline
import torch
import time
import numpy as np
import pandas as pd
matplotlib.rcParams['axes.unicode_minus']=False
import matplotlib.pyplot as plt

class model2(object): 
    def __repr__(self):
        net=Net()
        for m in net.net.modules(): 
             if isinstance(m, nn.Linear):
                param_shape = m.weight.shape
                m.weight.data = torch.from_numpy(np.random.normal(0, 0.5, size=param_shape))  
                m.weight.data=m.weight.data.float()
        print(net.net[6].bias)
        net.net[6].bias.data.zero_()
        init.xavier_uniform_(net.net[6].weight,1) 
        pre_dict=torch.load('model para_%s.pth'%name)
        model_dict=net.state_dict() 
        pre_dict =  {name: value for name, value in pre_dict.items() if name in model_dict} 
        model_dict.update(pre_dict) 
        net.load_state_dict(model_dict)
        optim=torch.optim.Adam(Net.parameters(net),lr=0.001)
        Loss=nn.MSELoss()
        
        x=pd.read_table('MIGA_x%s.txt'%g,header = None)
        fx=pd.read_table('MIGA_fx%s.txt'%g,header = None)
        #con=np.hstack((x,y))  # Merge x and y
        p=len(x)
        CON= x.astype(float)
        FFX=fx.astype(float) 

        CON=torch.as_tensor(CON.values.reshape(p,30))
        FFX=torch.as_tensor(FFX.values.reshape(p,1))
        CON=torch.tensor(CON,dtype=torch.float)
        FFX=torch.tensor(FFX,dtype=torch.float)
        print('ok')
        # Use batch training
        dataset=TensorDataset(torch.tensor(CON,dtype=torch.float),torch.tensor(FFX,dtype=torch.float))
        
        dataloader=DataLoader(dataset,batch_size=100,shuffle=True)

        print("Begin!---")
        for epoch in range(5):
            loss=None
            for batch_x,batch_fx in dataloader:
                y_predict=net(batch_x)
                loss=Loss(y_predict,batch_fx) 
                optim.zero_grad()
                loss.backward()
                optim.step()
            if (epoch+1)%200==0:
                print("step: {0} , loss: {1}".format(epoch+1,loss.item()))

        predict=net(torch.tensor(CON,dtype=torch.float))
        predict=predict.detach().numpy()
        print('predict',predict.size)
        
        idx=np.argmin(predict)
        print('idx',idx)
        print("x：",CON[idx])
        print("fx：",predict[idx])
        np.savetxt("minimum_x_we want.txt",CON[idx])



#===================cell 8=====================
# F1：
def F(x,y):
    return sum((x**2)**g)
g=1
name='Sphere'   # 'Rosenbrock'; 'Step';  'Schwefel' ;  'Rastrigin' ; 'Ackley'  ;  'Griewank'

N_GENERATIONS = 200 
m2=model_Factory2().product_function()
print(m2)

