import numpy as np
from sklearn.naive_bayes import MultinomialNB as TN
import pandas as pd
import collections
import traindatawithmysql
from traindata import SklearnTypeList

gresult=SklearnTypeList()

def splitx_y(datadf, dictdf):
    global templatekey, weightkey
    x,y,dic=[],[],collections.OrderedDict()

    for index, row in dictdf.iterrows():
        key = row[0]
        dic[key] = index
    templateIdArray = datadf[0].drop_duplicates().reset_index(drop=True)
    for templateId in templateIdArray:
        resultdf = datadf[datadf[0] == templateId]
        _x,_y = np.random.randint(1,size=len(dic)),100
        for index, row in resultdf.iterrows():
            _y = row[2]
            val = row[1]
            if val in dic:
                i = dic [val]
                lw = row[3]
                try:
                    if lw and lw != "":
                        lw = int(lw)
                    else:
                        lw = 0
                except Exception as e:
                    print(str(e))
                    lw = 0
                _x[i] = lw
        x.append(_x)
        y.append(_y)
    return x,y,dic

def train():
    result = _train()
    return result

def _train():
    global gresult
    datadf = traindatawithmysql.data()
    dictdf = traindatawithmysql.dict()

    result = {}
    datadict = filtertraindata (datadf, dictdf)
    for type, array in datadict.items():
        if len(array) != 2 or array[0] is None or array[1] is None:
            continue
        x, y, dic = splitx_y(array[0], array[1])
        t = np.array(x)
        r = np.array(y)
        clf = TN()
        clf.partial_fit(t, r, classes=gresult)

        result[type] = [clf, dic, x, y]
    return result

def filtertraindata (datadf, dictdf):
    types = datadf[4].drop_duplicates().reset_index(drop=True)
    result = {}
    for type in types:
        ret = datadf[datadf[4] == type]
        result[type] = [ret]
    types = dictdf[1].drop_duplicates().reset_index(drop=True)
    for type in types:
        ret = dictdf[dictdf[1] == type].reset_index(drop=True)
        if type in result:
            result[type].append(ret)
        else:
            result[type] = [None,ret]
    return result

if __name__ == "__main__":
    train()

# def find(ta, type, result):
#     global gresult
#     if type not in result:
#         type = list(result.keys())[0]
#     clf = result[type][0]
#     dic = result[type][1]
#     x = np.random.randint(1, size=len(dic))
#     for t in ta:
#         for key, val in t.items():
#             if len(val) == 0:
#                 continue
#             for v in val:
#                 tk = "%s_%s" % (key, v)
#                 if tk not in dic:
#                     continue
#                 tv = dic[tk]
#                 x[tv] = 1
#     p = clf.predict([x])[0]
#     xs = clf.predict_proba([x])
#     return p, xs[0][gresult.index(p)], xs [0]
#
# def finalfind():
#     result = train()
#     def _find(k,type):
#         nonlocal result
#         return find(k,type,result)
#     return _find

# if __name__=="__main__":
#    Init()


# x=np.array([
#     [1,1,6,2],
#     [6,1,6,2],
#     [6,6,1,1]
# ])
# y=np.array([3,4,3])
#
# clf = TN(alpha=1,class_prior=[0.2,0.4])
# clf.fit(x,y)
# p=[[1,1,6,2]]
# print(clf.predict(p))
# print(clf.predict_proba(p))

# import numpy as np
# # X = np.random.randint(5, size=(6, 2))
# X = np.array([[2 ,4],
#  [2 ,3],
#  [4 ,1],
#  [0 ,3],
#  [1 ,1],
#  [2 ,2]])
# y = np.array([1, 2, 3, 4, 5, 5])
# from sklearn.naive_bayes import MultinomialNB
# clf = MultinomialNB(alpha=1)
# clf.fit(X, y)
# #MultinomialNB(alpha=1.0, class_prior=None, fit_prior=True)
# print(X)
# print(clf.predict(X[3:4]))