//
//  NaiveBayesClassifier.cpp
//
//  p = 1/number of POS tags attributes n = number of a given POS tag in
//  words n_c = number of given POS tag in n m = m-estimate equivalent sample (use Laplace const)
//  size (number of unique words) p(a|v) = n_c + m * p / n + m
//
//  Created by Rune Laugesen on 22/01/12.
//  Copyright 2012 Copenhagen University. All rights reserved.
//

#include <iostream>
#include "NaiveBayesClassifier.h"

using namespace std;

NaiveBayesClassifier::NaiveBayesClassifier()
{
    _trainingdata.n = 0;
    _trainingdata.instances = InstanceMap();
    _trainingdata.featurecount = 0;
    _featuredata = map<string, vector<string> >();
}

void NaiveBayesClassifier::TrainClassifier(vector< vector<string> > trainingdata, SWPS swps)
{
    
    if (_trainingdata.featurecount != 0 && _trainingdata.featurecount != swps.featurecount)
    {
        cout << "Training data inconsistant feature count doesn't match";
        return;
    }
    
    //map<string, Instance> instancemap;
    //Indicates first word in sentance, if offset exceeds the limit value is ""
    int newsentancecount = 1;
    
    for (int i = 0; i < trainingdata[0].size(); i++)
    {
        if (trainingdata[0][i] == "")
        {
            newsentancecount = 1;
        }
        else
        {
            map<string, Instance>::iterator it = _trainingdata.instances.find(trainingdata[0][i]);
            
            //New word found
            if (it == _trainingdata.instances.end())
            {
                Instance instance;
                vector< map<string, double> > features(swps.featurecount);
                instance.weight = 1.0;
                for (int j = 1; j < swps.featurecount; j++)
                {
                    //If no predecessor with that offset in sentance then add empty string
                    if ((newsentancecount + swps.features[j].offset) < 1)
                    {
                        features[j-1].insert(pair<string, double>("", 1.0));
                    }
                    else
                    {
                        features[j-1].insert(pair<string, double>(trainingdata[swps.features[j].index][i + swps.features[j].offset], 1.0));
                    }
                }
                instance.features = features;
                _trainingdata.instances.insert(pair<string, Instance>(trainingdata[0][i], instance));
            }
            //Update excisting word
            else
            {
                (*it).second.weight++;
                for (int j = 1; j < swps.featurecount; j++)
                {
                    //If no predecessor with that offset in sentance then add empty string
                    if ((newsentancecount + swps.features[j].offset) < 1)
                    {
                        (*it).second.features[j-1].insert(pair<string, double>("", 1.0));
                    }
                    else
                    {
                        map<string, double>::iterator fit = (*it).second.features[j-1].find(trainingdata[swps.features[j].index][i + swps.features[j].offset]);
                        if (fit == (*it).second.features[j-1].end())
                        {
                            (*it).second.features[j-1].insert(pair<string, double>(trainingdata[swps.features[j].index][i + swps.features[j].offset], 1.0));
                        }
                        else
                        {
                            (*fit).second++;
                        }
                    }
                }
            }
            newsentancecount++;
        }
    }
    
    //_trainingdata.instances = instancemap;
    _trainingdata.n += trainingdata[0].size();
    _trainingdata.featurecount = swps.featurecount;
}

void NaiveBayesClassifier::BuildFeatureData(vector< vector<string> > featuredata, SWPS swps)
{
    Instances trainingdata;
    
    //map<string, Instance> instancemap;
    //Indicates first word in sentance, if offset exceeds the limit value is ""
    int newsentancecount = 1;
    
    for (int i = 0; i < featuredata[0].size(); i++)
    {
        if (featuredata[0][i] == "")
        {
            newsentancecount = 1;
        }
        else
        {
            map<string, Instance>::iterator it = trainingdata.instances.find(featuredata[0][i]);
            
            //New word found
            if (it == trainingdata.instances.end())
            {
                Instance instance;
                vector< map<string, double> > features(swps.featurecount);
                instance.weight = 1.0;
                for (int j = 1; j < swps.featurecount; j++)
                {
                    
                    features[j-1].insert(pair<string, double>(featuredata[swps.features[j].index][i], 1.0));
                    
                }
                instance.features = features;
                trainingdata.instances.insert(pair<string, Instance>(featuredata[0][i], instance));
            }
            //Update excisting word
            else
            {
                (*it).second.weight++;
                for (int j = 1; j < swps.featurecount; j++)
                {
                    
                    map<string, double>::iterator fit = (*it).second.features[j-1].find(featuredata[swps.features[j].index][i]);
                    if (fit == (*it).second.features[j-1].end())
                    {
                        (*it).second.features[j-1].insert(pair<string, double>(featuredata[swps.features[j].index][i], 1.0));
                    }
                    else
                    {
                        (*fit).second++;
                    }
                    
                }
            }
            newsentancecount++;
        }
    }
    
    //_trainingdata.instances = instancemap;
    trainingdata.n += featuredata[0].size();
    trainingdata.featurecount = swps.featurecount;
    
    map<string, Instance>::iterator it;
    for (it = trainingdata.instances.begin(); it != trainingdata.instances.end(); it++)
    {
        string word = (*it).first;
        Instance instance = (*it).second;
        vector<string> features(swps.featurecount-1);
        
        for (int i = 0; i < swps.featurecount-1; i++)
        {
            map<string, double>::iterator fit;
            map<string, double> feature = instance.features[i];
            string featurevalue = "";
            double weight = 0.0;
            for (fit = feature.begin(); fit != feature.end(); fit++)
            {
                if (weight < (*fit).second)
                {
                    featurevalue = (*fit).first;
                    weight = (*fit).second;
                }
            }
            features[i] = featurevalue;
        }
        _featuredata.insert(pair<string, vector<string> >(word, features));
    }
}

//p = 1/number of unique words n = number of words
//n_c = number of given feature in given word occurance m = m-estimate equivalent sample
//size (number of unique words) p(a|v) = n_c + m * p / n + m
//words = subset of words from dictionary/trainingdata
//features = data that probability should be calculated from
map<double, string> NaiveBayesClassifier::Classify(vector<string> words, vector<string> features)
{
    map<double, string> classifieds;
    
    int m = _trainingdata.instances.size();
    
    for (int i = 0; i < words.size(); i++)
    {
        map<string, Instance>::iterator wordit = _trainingdata.instances.find(words[i]);
        double probability = 0.0;
        if (wordit == _trainingdata.instances.end())
        {
            probability = 1/m;
        }
        else
        {
            Instance word = (*wordit).second;
            double n = word.weight;
            double p = word.weight/_trainingdata.n;
            probability = p;
            if (!word.features.empty())
            {
                for (int j = 0; j < features.size(); j++)
                {
                    map<string, double>::iterator featureit = word.features[j].find(features[j]);
                    if (featureit != word.features[j].end())
                    {
                        int n_c = (*featureit).second;
                        probability = probability * (n_c + m * p) / (n + m);
                        
                    }
                    else
                    {
                        probability = probability * 1/m;
                    }
                }
            }
        }
        classifieds.insert(pair<double, string>(probability, words[i]));
    }
    
    return classifieds;
}

map<double, string> NaiveBayesClassifier::Classify(vector<string> words, SWPS swps)
{
    map<double, string> classifieds;
    
    int m = _trainingdata.instances.size();
    
    for (int i = 0; i < words.size(); i++)
    {
        map<string, Instance>::iterator wordit = _trainingdata.instances.find(words[i]);
        double probability = 0.0;
        if (wordit == _trainingdata.instances.end())
        {
            probability = 1/m;
        }
        else
        {
            Instance word = (*wordit).second;
            double n = word.weight;
            double p = word.weight/_trainingdata.n;
            double featureweight = 0.0;
            probability = p;
            if (!word.features.empty() && swps.featurecount > 1)
            {
                for (int j = 0; j < swps.featurecount; j++)
                {
                    //Lookup feature with features enum (switch case) from trained features
                    string featurevalue = "";
                    switch (swps.features[j].index)
                    {
                        case WORD:
                        {
                            map<string, vector<string> >::iterator featureit = _featuredata.find(words[i]);
                            featurevalue = ((vector<string>)(*featureit).second)[WORD];
                            featureweight = swps.features[WORD].weight;
                            break;
                        }
                        case POS:
                        {
                            map<string, vector<string> >::iterator featureit = _featuredata.find(words[i]);
                            featurevalue = ((vector<string>)(*featureit).second)[POS];
                            featureweight = swps.features[POS].weight;
                            break;
                        }
                        case LEMMA:
                        {
                            map<string, vector<string> >::iterator featureit = _featuredata.find(words[i]);
                            featurevalue = ((vector<string>)(*featureit).second)[LEMMA];
                            featureweight = swps.features[LEMMA].weight;
                            break;
                        }
                        case NER:
                        {
                            map<string, vector<string> >::iterator featureit = _featuredata.find(words[i]);
                            featurevalue = ((vector<string>)(*featureit).second)[NER];
                            featureweight = swps.features[NER].weight;
                            break;
                        }
                        case CLUSTER100:
                        {
                            map<string, vector<string> >::iterator featureit = _featuredata.find(words[i]);
                            featurevalue = ((vector<string>)(*featureit).second)[CLUSTER100];
                            featureweight = swps.features[CLUSTER100].weight;
                            break;
                        }
                        case CLUSTER1000:
                        {
                            map<string, vector<string> >::iterator featureit = _featuredata.find(words[i]);
                            featurevalue = ((vector<string>)(*featureit).second)[CLUSTER1000];
                            featureweight = swps.features[CLUSTER1000].weight;
                            break;
                        }
                        case TOPIC:
                        {
                            map<string, vector<string> >::iterator featureit = _featuredata.find(words[i]);
                            featurevalue = ((vector<string>)(*featureit).second)[TOPIC];
                            featureweight = swps.features[TOPIC].weight;
                            break;
                        }
                        default:
                            featureweight = 1;
                            break;
                    }
                    
                    map<string, double>::iterator featureit = word.features[j].find(featurevalue);
                    if (featureit != word.features[j].end())
                    {
                        double n_c = (*featureit).second * featureweight;
                        probability = probability * (n_c + m * p) / (n + m);
                        
                    }
                    else
                    {
                        probability = probability * 1/m;
                    }
                }
            }
        }
        classifieds.insert(pair<double, string>(probability, words[i]));
    }
    
    return classifieds;
}

map<double, string> NaiveBayesClassifier::Classify(vector< pair<string, double> > words, SWPS swps)
{
    map<double, string> classifieds;
    
    int m = _trainingdata.instances.size();
    
    for (int i = 0; i < words.size(); i++)
    {
        map<string, Instance>::iterator wordit = _trainingdata.instances.find(words[i].first);
        double probability = 0.0;
        if (wordit == _trainingdata.instances.end())
        {
            probability = 1/m;
        }
        else
        {
            Instance word = (*wordit).second;
            double n = word.weight * words[i].second;
            double p = n/_trainingdata.n;
            double featureweight = 0.0;
            probability = p;
            if (!word.features.empty() && swps.featurecount > 1)
            {
                for (int j = 0; j < swps.featurecount; j++)
                {
                    //Lookup feature with features enum (switch case) from trained features
                    string featurevalue = "";
                    switch (swps.features[j].index)
                    {
                        case WORD:
                        {
                            map<string, vector<string> >::iterator featureit = _featuredata.find(words[i].first);
                            featurevalue = ((vector<string>)(*featureit).second)[WORD];
                            featureweight = swps.features[WORD].weight;
                            break;
                        }
                        case POS:
                        {
                            map<string, vector<string> >::iterator featureit = _featuredata.find(words[i].first);
                            featurevalue = ((vector<string>)(*featureit).second)[POS];
                            featureweight = swps.features[WORD].weight;
                            break;
                        }
                        case LEMMA:
                        {
                            map<string, vector<string> >::iterator featureit = _featuredata.find(words[i].first);
                            featurevalue = ((vector<string>)(*featureit).second)[LEMMA];
                            featureweight = swps.features[WORD].weight;
                            break;
                        }
                        case NER:
                        {
                            map<string, vector<string> >::iterator featureit = _featuredata.find(words[i].first);
                            featurevalue = ((vector<string>)(*featureit).second)[NER];
                            featureweight = swps.features[WORD].weight;
                            break;
                        }
                        case CLUSTER100:
                        {
                            map<string, vector<string> >::iterator featureit = _featuredata.find(words[i].first);
                            featurevalue = ((vector<string>)(*featureit).second)[CLUSTER100];
                            featureweight = swps.features[WORD].weight;
                            break;
                        }
                        case CLUSTER1000:
                        {
                            map<string, vector<string> >::iterator featureit = _featuredata.find(words[i].first);
                            featurevalue = ((vector<string>)(*featureit).second)[CLUSTER1000];
                            featureweight = swps.features[WORD].weight;
                            break;
                        }
                        case TOPIC:
                        {
                            map<string, vector<string> >::iterator featureit = _featuredata.find(words[i].first);
                            featurevalue = ((vector<string>)(*featureit).second)[TOPIC];
                            featureweight = swps.features[WORD].weight;
                            break;
                        }
                        default:
                            featureweight = 1;
                            break;
                    }
                    
                    map<string, double>::iterator featureit = word.features[j].find(featurevalue);
                    if (featureit != word.features[j].end())
                    {
                        double n_c = (*featureit).second * featureweight;
                        probability = probability * (n_c + m * p) / (n + m);
                        
                    }
                    else
                    {
                        probability = probability * 1/m;
                    }
                }
            }
        }
        classifieds.insert(pair<double, string>(probability, words[i].first));
    }
    
    return classifieds;
}

void NaiveBayesClassifier::SetTrainingData(Instances trainingdata)
{
    _trainingdata = trainingdata;
}


Instances NaiveBayesClassifier::GetTrainingData() const
{
    return _trainingdata;
}

void NaiveBayesClassifier::SetFeatureData(map<string, vector<string> > featuredata)
{
    _featuredata = featuredata;
}

map<string, vector<string> > NaiveBayesClassifier::GetFeatureData() const
{
    return _featuredata;
}

