#include "NNLayer.h"
#include "Log.h"

NNLayer::NNLayer(std::string label, NNLayer * pPreLayer)
:m_Label(label), m_pPreLayer(pPreLayer)
{
    if(pPreLayer != NULL)
      Log::OutputLog("Layer:%s---->Layer:%s", getLabelName(), m_pPreLayer->getLabelName());
    else
      Log::OutputLog("Layer:%s", getLabelName());
    Init();
}

NNLayer::~NNLayer()
{
    UnInit();
}

const char *  NNLayer::getLabelName()
{
    return m_Label.c_str();
}

void NNLayer::setKernel(int * pKernel, int kernelWidth, int kernelHeight)
{
    Log::OutputLog("kernel size:%dx%d", sizeof(pKernel) / sizeof(int), kernelWidth, kernelHeight);
}

void NNLayer::AddNeuron(int cNeurons)
{
    assert(m_Neurons.begin() == m_Neurons.end());
    char buff[256]; 
    std::string label;
    for(int i = 0; i < cNeurons; ++i)
    {
        sprintf(buff, "%s_Neuron%4d", getLabelName(), i);
        label = buff;
        Log::OutputLog("Neuron:%s", label.c_str());
        m_Neurons.push_back(new NNNeuron(label));
    }
}

void NNLayer::AddWeight(int cWeights)
{
    assert(m_Weights.begin() == m_Weights.end());
    char buff[256];
    std::string label;
    for(int i = 0; i < cWeights; ++i )
    {
        sprintf(buff, "%s_Weight%4d", getLabelName(), i);
        label = buff;
        Log::OutputLog("Weight:%s", label.c_str());
        m_Weights.push_back(new NNWeight(label));
    }
}


void NNLayer::Init()
{
    Reuse();
}

void NNLayer::UnInit()
{
    Reuse();
}

void NNLayer::Reuse()
{
    VectorNeurons::iterator nit;
    VectorWeights::iterator wit;
    for(nit = m_Neurons.begin(); nit < m_Neurons.end(); nit++)
    {
        if(*nit)
            delete *nit;
    }
    for(wit = m_Weights.begin(); wit < m_Weights.end(); wit++)
    {
        if(*wit)
            delete *wit;
    }
}

