import os.path
import numpy as np
import chainer
from chainer import Chain,report
from chainer import Parameter,Variable,serializers,optimizers,training
from chainer import links as L
from chainer import functions as F
from chainer.training import extensions
from chainer.dataset import concat_examples
from chainer.datasets import TupleDataset
from chainer.cuda import to_cpu
from sklearn.model_selection import train_test_split
from fm import *

N_BATCH = 10000
INPUT_SIZE = 100
HIDDEN_SIZE = 100
N_FACTORS_FM = 5
BATCH_SIZE = 16

# 定义网络
class MyNetwork(Chain):
    def __init__(self):
        super(MyNetwork, self).__init__()
        with self.init_scope():
            self.linear = L.Linear(INPUT_SIZE, HIDDEN_SIZE)
            self.fm = FactorizationMachine(HIDDEN_SIZE, 5)
    
    def __call__(self,x):
        x = Variable(x.astype('float32'))
        x = self.linear(x)
        x =F.dropout(x,ratio=.5)
        x = self.fm(x)
        return x

# 定义分类器，可以不用写
class Classifier(Chain):
    def __init__(self,predictor):
        super(Classifier,self).__init__()
        with self.init_scope():
            self.predictor = predictor
    
    def __call__(self,x,t):
        y = self.predictor(x)
        t=t.reshape(y.shape)
        loss = F.mean_squared_error(y,t)
        report({'loss':loss},self)
        return loss

# prepare dataset
def true_function(input):
    '''A dummy function to learn'''
    return np.sum(input, axis=1)

cur_x = np.random.random(size=(BATCH_SIZE*100, INPUT_SIZE)).astype(np.float32)
cur_y = true_function(cur_x)
INPUT_SIZE = 100
HIDDEN_SIZE = 100

# Train test split 准备训练数据和测试数据
tloc, vloc, ty,vy = train_test_split(cur_x, cur_y, random_state=42)
train_data = TupleDataset(tloc,ty)
test_data = TupleDataset(vloc,vy)
#network and modle 
model = Classifier(MyNetwork())

# Choose an optimizer algorithm
optimizer = chainer.optimizers.Adam()
optimizer.setup(model)

# train iterators 
train_iter = chainer.iterators.SerialIterator(train_data, BATCH_SIZE)
test_iter = chainer.iterators.SerialIterator(test_data, BATCH_SIZE,
                                             repeat=False, shuffle=False)

# train object 
updater = training.StandardUpdater(train_iter,optimizer)
trainer = training.Trainer(updater,(20,'epoch'),out='result')
# train extend
trainer.extend(extensions.Evaluator(test_iter,model))
trainer.extend(extensions.LogReport())
trainer.extend(extensions.PrintReport(['epoch', 'main/loss', 'validation/main/loss']))
trainer.extend(extensions.ProgressBar())
trainer.run()