# -*- coding: utf-8 -*-
# Created by 'Zhou Bingbing'  on 2019/7/27

# from collections import defaultdict
# import math
#
# class MaxEnt(object):
#     def __init__(self):
#         self.feats = defaultdict(int)
#         self.trainset = []  #训练集
#         self.labels = set() #标签集
#
#     def load_data(self,file):
#         for line in open(file,encoding='utf-8'):
#             fields = line.strip().split()
#             if len(fields) < 2: continue    #特征数要大于等于2列
#             label = fields[0]   #默认第一列为标签
#             self.labels.add(label)#集合的使用
#             for f in set(fields[1:]):
#                 self.feats[(label,f)] += 1 #(label,f)元组为特征 实现我们的特征 与标签的数据
#             self.trainset.append(fields)
#
#
#     def __initparams(self):
#         self.size = len(self.trainset)
#         self.M = max([len(record)-1 for record in self.trainset])   #GIS训练算法的M参数  句子的最长
#         self.ep_ = [0.0] * len(self.feats)
#         for i,f in enumerate(self.feats):
#             self.ep_[i] = float(self.feats[f])/float(self.size)     #计算经验分布的特征期望  每个特征的概率 用频次来表示
#             self.feats[f] = i   #为每个特征函数分配id  之前是依靠我们的频次排的 ，频次在计算完经验分布后不再用
#         self.w = [0.0]*len(self.feats)  #初始化权重
#         self.lastw = self.w
#     '''
#     self.ep_=[0.3333333333333333, 0.3333333333333333, 0.4, 0.26666666666666666, 0.13333333333333333, 0.13333333333333333, 0.13333333333333333, 0.26666666666666666, 0.26666666666666666, 0.26666666666666666, 0.13333333333333333, 0.26666666666666666]
#     self.ep=[0.16666666666666666, 0.2333333333333333, 0.3333333333333333, 0.26666666666666666, 0.13333333333333333, 0.2333333333333333, 0.19999999999999998, 0.19999999999999998, 0.3333333333333333, 0.26666666666666666, 0.13333333333333333, 0.13333333333333333]
#
#     '''
#
#     def probwgt(self, features, label):     #计算每个特征权重的指数
#         wgt = 0.0
#
#         for f in features:
#             if (label,f) in self.feats:
#                 wgt += self.w[self.feats[(label,f)]]
#
#         return math.exp(wgt)
#
#     """calculate feature expectation on model distribution
#     """
#     def calprob(self, features):    #计算条件概率
#         wgts = [(self.probwgt(features,l),l) for l in self.labels] #labels 为 indoor outdoor
#         Z = sum([w for w,l in wgts])    #归一化参数
#         prob = [(w/Z,l) for w,l in wgts]    #概率向量
#         return prob
#
#     def  Ep(self):   #特征函数
#         ep = [0.0] * len(self.feats) #长度为15
#         for record in self.trainset:    #从训练集中迭代输出特征
#             features = record[1:]
#             prob = self.calprob(features)   #计算条件概率p(y|x)
#             for f in features:
#                 for w,l in prob:
#                     if (l,f) in self.feats: #来自训练数据的特征
#                         idx = self.feats[(l,f)] #获取特征id
#                         ep[idx] += w * (1.0/self.size)   # sum(1/N * f(y,x) * p(y|x)), p(x)=1/N  w概率
#         return ep
#
#     def __convergence(self,lastw,w):    #收敛条件
#         for w1,w2 in zip(lastw,w):
#             if abs(w1-w2) >= 0.01: return False
#         return True
#
#     def train(self,maxiter=20000):   #训练主函数，默认迭代次数1000
#         self.__initparams() #初始化参数
#         for i in range(maxiter):
#             print ("iter %d ..." % (i+1))
#             self.ep = self.Ep()     #计算模型分布的特征期望
#             self.lastw = self.w[:]
#
#             for i,win in enumerate(self.w):
#                 delta = 1.0/self.M * math.log(self.ep_[i]/self.ep[i])
#                 self.w[i] += delta  #更新w
#             # print(self.w, self.feats)
#             if self.__convergence(self.lastw,self.w):    #判断算法是否收敛
#                 break
#
#     def predict(self,input):
#         features = input.strip().split()
#         prob = self.calprob(features)
#         prob.sort(reverse=True)
#         return prob
#
# if __name__ == "__main__":
#     model = MaxEnt()
#     model.load_data('inputdata.txt')
#     model.train()
#     print ("===================================")
#     print (model.predict("Sunny"))
from collections import defaultdict
import numpy as np


class maxEntropy(object):
    def __init__(self):
        self.trainset = []  # 训练数据集
        self.features = defaultdict(int)  # 用于获得(标签，特征)键值对
        self.labels = set([])  # 标签
        self.w = []

    def loadData(self, fName):
        for line in open(fName):
            fields = line.strip().split()
            # at least two columns
            if len(fields) < 2: continue  # 只有标签没用
            # the first column is label
            label = fields[0]
            self.labels.add(label)  # 获取label
            for f in set(fields[1:]):  # 对于每一个特征
                # (label,f) tuple is feature
                self.features[(label, f)] += 1  # 每提取一个（标签，特征）对，就自加1，统计该特征-标签对出现了多少次
            self.trainset.append(fields)
            self.w = [0.0] * len(self.features)  # 初始化权重
            self.lastw = self.w

    # 对于该问题，M是一个定值，所以delta有解析解
    def train(self, max_iter=1000):
        self.initP()  # 主要计算M以及联合分布在f上的期望
        # 下面计算条件分布及其期望，正式开始训练
        for i in range(max_iter):  # 计算条件分布在特诊函数上的期望
            self.ep = self.EP()
            self.lastw = self.w[:]
            for i, w in enumerate(self.w):
                self.w[i] += (1.0 / self.M) * np.log(self.Ep_[i] / self.ep[i])
            if self.convergence():
                break

    def initP(self):
        # 获得M
        self.M = max([len(feature[1:]) for feature in self.trainset])
        self.size = len(self.trainset)
        self.Ep_ = [0.0] * len(self.features)
        # 获得联合概率期望
        for i, feat in enumerate(self.features):
            self.Ep_[i] += self.features[feat] / (1.0 * self.size)
            # 更改键值对为（label-feature）-->id
            self.features[feat] = i
        # 准备好权重
        self.w = [0.0] * len(self.features)
        self.lastw = self.w

    def EP(self):
        # 计算pyx
        ep = [0.0] * len(self.features)
        for record in self.trainset:
            features = record[1:]
            # cal pyx
            prob = self.calPyx(features)
            for f in features:  # 特征一个个来
                for pyx, label in prob:  # 获得条件概率与标签
                    if (label, f) in self.features:
                        id = self.features[(label, f)]
                        ep[id] += (1.0 / self.size) * pyx
        return ep #返回是当前数据集得条件概率的期望
    # 获得最终单一样本每个特征的pyx
    def calPyx(self, features):
        # 传的feature是单个样本的
        wlpair = [(self.calSumP(features,label), label) for label in self.labels]
        Z = sum([w for w, l in wlpair])
        prob = [(w / Z, l) for w, l in wlpair]
        return prob

    def calSumP(self, features, label):
        sumP = 0.0
        # 对于这单个样本的feature来说，不存在于feature集合中的f=0所以要把存在的找出来计算
        for showedF in features:
            if (label, showedF) in self.features:
                sumP += self.w[self.features[(label, showedF)]]   #这个就是特征函数 0 1
        return np.exp(sumP)

    def convergence(self):
        for i in range(len(self.w)):
            if abs(self.w[i] - self.lastw[i]) >= 0.001:
                return False
        return True

    def predict(self, input):
        features = input.strip().split()
        prob = self.calPyx(features)
        prob.sort(reverse=True)
        return prob


if __name__ == '__main__':
    mxEnt = maxEntropy()
    mxEnt.loadData('inputdata.txt')
    mxEnt.train()
    print(mxEnt.predict('Sunny'))
