# coding=utf-8
'''


assume independence among the features.

define the Bayesian classification rule:
If P(C1|x, y) > P(C2|x, y), the class is C1.
If P(C1|x, y) < P(C2|x, y), the class is C2.


'''

# -*- coding: utf-8 -*-
import sys
import numpy as np
import re

reload(sys)
# print sys.getdefaultencoding()
sys.setdefaultencoding('utf8')


# sys.path.append('util/word/')
# from PluralNoun import GetSingular


# class bayes:

def loadDataSet():
    postingList = [['my', 'dog', 'has', 'flea', 'problems', 'help', 'please'],
                   ['maybe', 'not', 'take', 'him', 'to', 'dog', 'park', 'stupid'],
                   ['my', 'dalmation', 'is', 'so', 'cute', 'I', 'love', 'him'],
                   ['stop', 'posting', 'stupid', 'worthless', 'garbage'],
                   ['mr', 'licks', 'ate', 'my', 'steak', 'how', 'to', 'stop', 'him'],
                   ['quit', 'buying', 'worthless', 'dog', 'food', 'stupid']]
    classVec = [0, 1, 0, 1, 0, 1]  # 1 is abusive, 0 not
    return postingList, classVec


def createVocabList(dataSet):
    """
    create a list of all the unique words in all of documents.
    :param dataSet:
    :return:
    """
    vocabSet = set([])  # create empty set
    for document in dataSet:
        vocabSet = vocabSet | set(document)  # union of the two sets
    return list(vocabSet)


def setOfWords2Vec(vocabList, inputSet):
    returnVec = [0] * len(vocabList)
    for word in inputSet:
        if word in vocabList:
            returnVec[vocabList.index(word)] = 1
        else:
            print "the word: %s is not in my Vocabulary!" % word
    return returnVec


def trainNB0(trainMatrix, trainCategory):
    numTrainDocs = len(trainMatrix)
    numWords = len(trainMatrix[0])
    pAbusive = sum(trainCategory) / float(numTrainDocs)
    # When we attempt to classify a document, we multiply a lot of probabilities together to get the probability that a document belongs to a given class.
    # This will look something like p(w0 | 1)p(w1 | 1)p(w2 | 1).
    # If any of these numbers are 0, then when we multiply them together we get 0.
    # To lessen the impact of this, we'll initialize all of our occurrence counts to 1,
    # and we’ll initialize the denominators to 2.
    p0Num = np.ones(numWords)
    p1Num = np.ones(numWords)  # change to ones()
    p0Denom = 2.0
    p1Denom = 2.0  # change to 2.0
    for i in range(numTrainDocs):
        if trainCategory[i] == 1:
            p1Num += trainMatrix[i]
            p1Denom += sum(trainMatrix[i])
        else:
            p0Num += trainMatrix[i]
            p0Denom += sum(trainMatrix[i])
    # divide every element by the total number of words for that class. = P(Wi|Ci)
    p1Vect = np.log(p1Num / p1Denom)  # change to log(), log(P(Wi|C1)
    p0Vect = np.log(p0Num / p0Denom)  # change to log(), log(P(Wi|C0)
    return p0Vect, p1Vect, pAbusive


def classifyNB(vec2Classify, p0Vec, p1Vec, pClass1):
    # calc log(P(Ci|Wi) = sum(P(Wi|Ci)) + log(P(Ci))  - sum(P(Wi))
    # first calc log(numerator) =  sum(P(Wi|Ci)) + log(P(Ci))
    p1 = sum(vec2Classify * p1Vec) + np.log(pClass1)  # element-wise mult
    p0 = sum(vec2Classify * p0Vec) + np.log(1.0 - pClass1)
    if p1 > p0:
        return 1
    else:
        return 0


def bagOfWords2VecMN(vocabList, inputSet):
    returnVec = [0] * len(vocabList)
    for word in inputSet:
        if word in vocabList:
            returnVec[vocabList.index(word)] += 1
    return returnVec


def testingNB():
    listOPosts, listClasses = loadDataSet()
    myVocabList = createVocabList(listOPosts)
    trainMat = []
    for postinDoc in listOPosts:
        trainMat.append(setOfWords2Vec(myVocabList, postinDoc))
    p0V, p1V, pAb = trainNB0(np.array(trainMat), np.array(listClasses))
    testEntry = ['love', 'my', 'dalmation']
    thisDoc = np.array(setOfWords2Vec(myVocabList, testEntry))
    print testEntry, 'classified as: ', classifyNB(thisDoc, p0V, p1V, pAb)
    testEntry = ['stupid', 'garbage']
    thisDoc = np.array(setOfWords2Vec(myVocabList, testEntry))
    print testEntry, 'classified as: ', classifyNB(thisDoc, p0V, p1V, pAb)




def spamTest():
    docList = [];
    classList = [];
    fullText = []
    for i in range(1, 26):
        wordList = textParse(open('data/email/spam/%d.txt' % i).read())
        docList.append(wordList)
        fullText.extend(wordList)
        classList.append(1)
        wordList = textParse(open('data/email/ham/%d.txt' % i).read())
        docList.append(wordList)
        fullText.extend(wordList)
        classList.append(0)
    vocabList = createVocabList(docList)  # create vocabulary
    trainingSet = range(50);
    testSet = []  # create test set
    for i in range(10):
        randIndex = int(np.random.uniform(0, len(trainingSet)))
        testSet.append(trainingSet[randIndex])
        del (trainingSet[randIndex])
    trainMat = [];
    trainClasses = []
    for docIndex in trainingSet:  # train the classifier (get probs) trainNB0
        trainMat.append(bagOfWords2VecMN(vocabList, docList[docIndex]))
        trainClasses.append(classList[docIndex])
    p0V, p1V, pSpam = trainNB0(np.array(trainMat), np.array(trainClasses))
    errorCount = 0
    for docIndex in testSet:  # classify the remaining items
        wordVector = bagOfWords2VecMN(vocabList, docList[docIndex])
        if classifyNB(np.array(wordVector), p0V, p1V, pSpam) != classList[docIndex]:
            errorCount += 1
            print "classification error", docList[docIndex]
    print 'the error rate is: ', float(errorCount) / len(testSet)
    # return vocabList,fullText







getSingularPattern = (
    ('(qu|[^aeiou])ies$', 'ies$', 'y'),
    ('[^aeioudgkprt]hes$', 'es$', ''),
    ('[sxz]es$', 'es$', ''),
    ('s$', 's$', ''),

);

def GetMatchAndApplyFuncs(strPattern, strSearch, strReplace):
    def MatchRule(strWord):
        return re.search(strPattern, strWord);

    def ApplyRule(strWord):
        return re.sub(strSearch, strReplace, strWord);

    return (MatchRule, ApplyRule);



SingualRules = [GetMatchAndApplyFuncs(strPattern, strSearch, strReplace) for (strPattern, strSearch, strReplace) in getSingularPattern]

def GetMatchAndApplyFuncs(strPattern, strSearch, strReplace):
    def MatchRule(strWord):
        return re.search(strPattern, strWord);
    def ApplyRule(strWord):
        return re.sub(strSearch, strReplace, strWord);
    return (MatchRule, ApplyRule);


def GetSingular(strWord):
    if not strWord:
        return ""
    for fnMatch, fnApply in SingualRules:
        if fnMatch(strWord):
            return fnApply(strWord)
    return strWord



def textParse(bigString):  # input is big string, #output is word list
    listOfTokens = re.split(r'\W*', bigString)
    return [GetSingular(tok.lower()) for tok in listOfTokens if len(tok) > 2]




def myTest():
    import pandas as pds
    origDataFrame = pds.read_csv('data/my/search-term-report-2015-11-19 to 2016-01-17.txt', encoding='utf-8', sep='\t', index_col=None, header=0)
    selectedCols = pds.DataFrame()
    selectedCols['CustomerSearchTerm'] = origDataFrame['Customer Search Term']
    selectedCols['Keyword'] = origDataFrame['Keyword']
    selectedCols['Impressions'] = origDataFrame['Impressions']
    selectedCols['Clicks'] = origDataFrame['Clicks']
    # selectedCols['CTR'] = origDataFrame['CTR']
    selectedCols['TotalSpend'] = origDataFrame['Total Spend']
    # selectedCols['AvgCPC'] = origDataFrame['Average CPC']
    # selectedCols['ACoS'] = origDataFrame['ACoS']
    selectedCols['Orders'] = origDataFrame['Orders placed within 1-week of a click']
    selectedCols['Sales'] = origDataFrame['Product Sales within 1-week of a click']
    # selectedCols['ConvRate'] = origDataFrame['Conversion Rate within 1-week of a click']


    # ;
    # GetSingular(textParse(iCustTerm))
    # custTermList = []
    custTermList = [ textParse(iCustTerm) for iCustTerm in selectedCols['CustomerSearchTerm']]
    vocabList = createVocabList(custTermList)
    impList = selectedCols['Impressions'].tolist()
    clkList = selectedCols['Clicks'].tolist()
    ordList = selectedCols['Orders'].tolist()

    trainMat = [];
    trainClasses = []
    for docIndex in custTermList:  # train the classifier (get probs) trainNB0
        trainMat.append(bagOfWords2VecMN(vocabList, docList[docIndex]))
        trainClasses.append(classList[docIndex])
    p0V, p1V, pSpam = trainNB0(np.array(trainMat), np.array(trainClasses))

# testingNB()
# spamTest()
myTest()
print "ok"
