{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": 1,
   "metadata": {},
   "outputs": [],
   "source": [
    "import math\n",
    "import numpy\n",
    "from collections import defaultdict"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 2,
   "metadata": {},
   "outputs": [],
   "source": [
    "class MaxEnt:\n",
    "    def __init__(self, epsilon :float = 1e-3, maxstep :int =100):\n",
    "        self.epsilon :float = epsilon\n",
    "        \"\"\" \n",
    "            人工设定迭代次数\n",
    "        \"\"\"\n",
    "        self.maxstep :int = maxstep\n",
    "        \"\"\"\n",
    "            w 权重数据由fit方法进行训练得到\n",
    "        \"\"\"\n",
    "        self.w = None       # 特征函数的权重\n",
    "        self.labels = None  # 标签类型,所有类型的标签种类\n",
    "        self.fea_list = []  # 特征函数\n",
    "        self.px = defaultdict(lambda: 0)        # 经验边缘分布概率\n",
    "        self.pxy = defaultdict(lambda: 0)       # 经验联合分布概率,特征函数为取值为0，1的二值函数，所以等同于特征的经验期望值\n",
    "        self.exp_fea = defaultdict(lambda: 0)   # 每个特征在数据集上的期望\n",
    "        self.data_list = []     # 样本集，元素为tuple((X),y)\n",
    "        self.N = None           # 样本总量\n",
    "        \"\"\" \n",
    "            某个训练样本包含特征的总数，这里假设每个样本的M值相同，即M为常数。\n",
    "            其倒数类似于学习率\n",
    "        \"\"\"\n",
    "        self.M = None           \n",
    "        self.n_fea :int = None       # 特征函数的总数\n",
    "\n",
    "    def init_param(self, X_data :numpy.ndarray, y_data :numpy.ndarray):\n",
    "        \"\"\"根据传入的数据集(数组)初始化模型参数\n",
    "\n",
    "        Args:\n",
    "            X_data (numpy.ndarray): 数据\n",
    "            y_data (numpy.ndarray): 标签\n",
    "        \"\"\"        \n",
    "        self.N :int = X_data.shape[0]\n",
    "        self.labels : list= numpy.unique(y_data)\n",
    "        # 构造特征函数\n",
    "        self.fea_func(X_data, y_data)\n",
    "        self.n_fea = len(self.fea_list)\n",
    "        self.w = numpy.zeros(self.n_fea)\n",
    "        # 计算特征函数的经验期望值\n",
    "        self._exp_fea(X_data, y_data)\n",
    "\n",
    "    def fea_func(self, X_data :numpy.ndarray, y_data :numpy.ndarray, rules :bool = None):\n",
    "        \"\"\"特征函数构造\n",
    "        初始化 self.fea_list : list Shape(1)\n",
    "\n",
    "        Args:\n",
    "            X_data (numpy.ndarray): _description_\n",
    "            y_data (numpy.ndarray): _description_\n",
    "            rules (bool, optional): _description_. Defaults to None.\n",
    "        \"\"\"        \n",
    "        if rules is None:  \n",
    "            \"\"\"\n",
    "                若没有特征提取规则，则直接构造特征，此时为经验特征函数\n",
    "                此时每个样本没有缺失值的情况下的特征个数相同，等于维度\n",
    "            \"\"\"\n",
    "            for X, y in zip(X_data, y_data):\n",
    "                X = tuple(X)\n",
    "                self.px[X] += 1.0 / self.N          # X的经验边缘分布\n",
    "                self.pxy[(X, y)] += 1.0 / self.N    # X,y的经验联合分布\n",
    "                for dimension, val in enumerate(X):\n",
    "                    key = (dimension, val, y)\n",
    "                    if not key in self.fea_list:\n",
    "                        \"\"\" \n",
    "                            特征函数,由 维度+维度下的值+标签 构成的元组\n",
    "                        \"\"\"\n",
    "                        self.fea_list.append(key) \n",
    "            self.M = X_data.shape[1]\n",
    "        else:\n",
    "            self.M = defaultdict(int)  # 字典存储每个样本的特征总数\n",
    "            for i in range(self.N):\n",
    "                self.M[i] = X_data.shape[1]\n",
    "            pass  # 根据具体规则构建\n",
    "\n",
    "    def _exp_fea(self, X_data :numpy.ndarray, y_data :numpy.ndarray):\n",
    "        \"\"\"计算特征的经验期望值\n",
    "        这里的X每个item都具有4个dimension\n",
    "        特征存在取值为1,否则为0\n",
    "        这里的特征fea含义为一个样本中一个特征单独和一个标签的元组\n",
    "\n",
    "        Args:\n",
    "            X_data (numpy.ndarray): _description_\n",
    "            y_data (numpy.ndarray): _description_\n",
    "        \"\"\"        \n",
    "        for X, y in zip(X_data, y_data):\n",
    "            for dimension, val in enumerate(X):\n",
    "                fea = (dimension, val, y)\n",
    "                self.exp_fea[fea] += self.pxy[(tuple(X), y)]\n",
    "\n",
    "    def _py_X(self, X :numpy.ndarray) -> dict:\n",
    "        \"\"\"获取预测模型\n",
    "        获取当前特征函数权重字典w下的条件分布概率,输出在X下y的条件概率字典.\n",
    "        该算法的思想是针对每个分类y,统计其所有样本中出现该特征函数则经验概率增加\n",
    "\n",
    "        Args:\n",
    "            X (numpy.ndarray): 训练集数据\n",
    "\n",
    "        Returns:\n",
    "            dict: _description_\n",
    "        \"\"\"        \n",
    "        py_X = defaultdict(float)\n",
    "\n",
    "        # 遍历并更新所有类型标签\n",
    "        for y in self.labels:\n",
    "            s = 0 # sum of w * feature_function\n",
    "            for dimension, val in enumerate(X):\n",
    "                tmp_fea = (dimension, val, y)\n",
    "                if tmp_fea in self.fea_list:  # X 具有的特征在特征函数之中\n",
    "                    s += self.w[self.fea_list.index(tmp_fea)]\n",
    "                    \"\"\" \n",
    "                        self.fea_list.index(tmp_fea) 确定目标特征的特征函数下标\n",
    "                        self.w[feature_index] 特征函数权重\n",
    "                    \"\"\"\n",
    "            # 更新发生X下的y的经验概率\n",
    "            py_X[y] = math.exp(s) \n",
    "\n",
    "        # 标准化经验期望值数据\n",
    "        normalizer = sum(py_X.values())\n",
    "        for key, val in py_X.items():\n",
    "            py_X[key] = val / normalizer\n",
    "        return py_X\n",
    "\n",
    "    def _est_fea(self, X_data :numpy.ndarray, y_data :numpy.ndarray) -> dict:\n",
    "        \"\"\"基于当前模型，获取每个特征估计期望\n",
    "\n",
    "        Args:\n",
    "            X_data (numpy.ndarray): _description_\n",
    "            y_data (numpy.ndarray): _description_\n",
    "\n",
    "        Returns:\n",
    "            dict: 更新特征估计期望的Data容器\n",
    "        \"\"\"        \n",
    "        est_fea = defaultdict(float)\n",
    "        for X, y in zip(X_data, y_data):\n",
    "            py_x = self._py_X(X)[y]\n",
    "            for dimension, val in enumerate(X):\n",
    "                est_fea[(dimension, val, y)] += self.px[tuple(X)] * py_x\n",
    "        return est_fea\n",
    "\n",
    "    def GIS(self) -> list:\n",
    "        \"\"\"GIS算法更新delta\n",
    "        delta[j] 提供了关于如何调整模型参数以使其更接近实际数据的信息。\n",
    "        如果经验概率高于模型的估计，那么delta[j]会是正值，意味着我们需要增加该特征的权重；\n",
    "        反之，如果经验概率低于估计，那么delta[j]会是负值，意味着我们需要减少该特征的权重。\n",
    "\n",
    "        Returns:\n",
    "            list: 每个特征函数的权重变化量\n",
    "        \"\"\"       \n",
    "        est_fea = self._est_fea(X_data, y_data)\n",
    "        delta : numpy.ndarray = numpy.zeros(self.n_fea)\n",
    "        for j in range(self.n_fea):\n",
    "            try:\n",
    "                experience_prob_feat = self.exp_fea[self.fea_list[j]]\n",
    "                estimates_feat = est_fea[self.fea_list[j]]\n",
    "                delta[j] = 1 / self.M * math.log( experience_prob_feat / estimates_feat )\n",
    "            except:\n",
    "                continue\n",
    "        delta = delta / delta.sum()  # 归一化，防止某一个特征权重过大导致，后续计算超过范围\n",
    "        # print(\"the type of delta\",type(delta))\n",
    "        return delta\n",
    "\n",
    "    def IIS(self, delta :numpy.ndarray, X_data :numpy.ndarray, y_data :numpy.ndarray) -> numpy.ndarray:\n",
    "        \"\"\"最大熵模型: 采用IIS最优化算法(M为常数时等同于GIS算法)\n",
    "        通过迭代优化来找到一组权重，使得模型分布与经验分布之间的差异最小化。\n",
    "        这通常涉及到对数似然函数的最大化。\n",
    "        IIS算法更新delta 此处的delta的Shape期望是一个(n)\n",
    "        我们遍历每个数据,试图对每个数据进行特征函数的匹配处理.\n",
    "\n",
    "        Args:\n",
    "            delta (numpy.ndarray): _description_\n",
    "            X_data (numpy.ndarray): _description_\n",
    "            y_data (numpy.ndarray): _description_\n",
    "\n",
    "        Returns:\n",
    "            _type_: _description_\n",
    "        \"\"\"        \n",
    "        g = numpy.zeros(self.n_fea)\n",
    "        g_diff = numpy.zeros(self.n_fea)\n",
    "        \n",
    "        for j in range(self.n_fea):\n",
    "            for k in range(self.N):\n",
    "                experience_prob_x = self.px[tuple(X_data[k])]\n",
    "                prob_y_given_x = self._py_X(X_data[k])[y_data[k]]\n",
    "                learning_rate_feature_dimension = math.exp(delta[j] * self.M[k])\n",
    "                g[j] +=  experience_prob_x * prob_y_given_x * learning_rate_feature_dimension\n",
    "                g_diff[j] += g[j] * self.M[k]\n",
    "            g[j] -= self.exp_fea[j]\n",
    "            delta[j] -= g[j] / g_diff[j]\n",
    "        return delta\n",
    "\n",
    "    def fit(self, X_data :numpy.ndarray, y_data :numpy.ndarray):\n",
    "        # 训练，迭代更新wi\n",
    "        self.init_param(X_data, y_data)\n",
    "        # 数据规整 GIS Algorithm\n",
    "        if isinstance(self.M, int):\n",
    "            i = 0\n",
    "            while i < self.maxstep:\n",
    "                i += 1\n",
    "                delta = self.GIS()\n",
    "                # ! Unfinished\n",
    "                \"\"\" \n",
    "                    if max(abs(delta)) < self.epsilon:  # 所有的delta都小于阈值时，停止迭代\n",
    "                        break\n",
    "                \"\"\"\n",
    "                # 更新特征函数权重\n",
    "                self.w += delta\n",
    "        else:\n",
    "            i = 0\n",
    "            delta = numpy.random.rand(self.n_fea)\n",
    "            while i < self.maxstep:\n",
    "                i += 1\n",
    "                delta = self.IIS(delta, X_data, y_data)\n",
    "                # if max(abs(delta)) < self.epsilon:\n",
    "                #     break\n",
    "                self.w += delta\n",
    "        return\n",
    "\n",
    "    def predict(self, X):\n",
    "        # 输入x(数组)，返回条件概率最大的标签\n",
    "        py_x = self._py_X(X)\n",
    "        best_label = max(py_x, key=py_x.get)\n",
    "        return best_label"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "#### Load Dataset"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 3,
   "metadata": {},
   "outputs": [],
   "source": [
    "from sklearn.datasets import load_iris, load_digits\n",
    "from sklearn.model_selection import train_test_split  \n",
    "\n",
    "data = load_iris()\n",
    "X_data = data['data']\n",
    "y_data = data['target']\n",
    "X_train, X_test, y_train, y_test = train_test_split(X_data, y_data, test_size=0.2, random_state=42)  "
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "#### Train"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 4,
   "metadata": {},
   "outputs": [],
   "source": [
    "me = MaxEnt(maxstep=10)\n",
    "me.fit(X_train, y_train)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "Test"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 5,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Accuracy: 0.8666666666666667\n",
      "F1 Score: 0.861111111111111\n"
     ]
    }
   ],
   "source": [
    "from sklearn.metrics import accuracy_score, f1_score  \n",
    "\n",
    "def evaluate_model(model, X_test, y_test):  \n",
    "    y_pred = [model.predict(X) for X in X_test]  \n",
    "    accuracy = accuracy_score(y_test, y_pred)  \n",
    "    f1 = f1_score(y_test, y_pred, average='macro')  # 或 'micro', 'weighted' 等  \n",
    "    return accuracy, f1  \n",
    "\n",
    "accuracy, f1 = evaluate_model(me, X_test, y_test)  \n",
    "print(f\"Accuracy: {accuracy}\")  \n",
    "print(f\"F1 Score: {f1}\")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 7,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Accuracy: 0.8666666666666667\n",
      "F1 Score: 0.861111111111111\n",
      "Precision: 0.8754578754578755\n",
      "Recall: 0.8585858585858586\n"
     ]
    }
   ],
   "source": [
    "from sklearn.metrics import accuracy_score, f1_score, precision_score, recall_score  \n",
    "  \n",
    "def evaluate_model(model, X_test, y_test):  \n",
    "    y_pred = [model.predict(X) for X in X_test]  # 注意：这里假设model.predict接受单个样本作为输入  \n",
    "    # 如果model.predict接受整个数据集作为输入，则应该直接调用 model.predict(X_test)  \n",
    "      \n",
    "    # 假设y_pred现在是与y_test长度相同的预测标签列表  \n",
    "    y_pred = np.array(y_pred)  # 将列表转换为NumPy数组，以便与scikit-learn函数兼容  \n",
    "  \n",
    "    accuracy = accuracy_score(y_test, y_pred)  \n",
    "    f1 = f1_score(y_test, y_pred, average='macro')  # 或 'micro', 'weighted' 等  \n",
    "    precision = precision_score(y_test, y_pred, average='macro')  \n",
    "    recall = recall_score(y_test, y_pred, average='macro')  \n",
    "  \n",
    "    return accuracy, f1, precision, recall  \n",
    "  \n",
    "# 确保导入了NumPy库（如果尚未导入）  \n",
    "import numpy as np  \n",
    "  \n",
    "# 假设me是已经训练好的模型，X_test和y_test是测试数据集  \n",
    "# 在测试之后调用这个函数  \n",
    "accuracy, f1, precision, recall = evaluate_model(me, X_test, y_test)  \n",
    "print(f\"Accuracy: {accuracy}\")  \n",
    "print(f\"F1 Score: {f1}\")  \n",
    "print(f\"Precision: {precision}\")  \n",
    "print(f\"Recall: {recall}\")"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "#### Predict "
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 6,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "0.8666666666666667\n"
     ]
    }
   ],
   "source": [
    "score = 0\n",
    "for X, y in zip(X_test, y_test):\n",
    "    if ME.predict(X) == y:\n",
    "        score += 1\n",
    "print(score / len(y_test))"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "经验概率"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "# 经验概率   \n",
    "p_spam = 0.2                # 垃圾邮件的先验概率    \n",
    "p_free_in_spam = 0.5        # 在垃圾邮件中，“免费”这个词出现的概率    \n",
    "p_promo_in_spam = 0.4       # 在垃圾邮件中，“促销”这个词出现的概率    \n",
    "p_free_in_non_spam = 0.05   # 在非垃圾邮件中，“免费”这个词出现的概率    \n",
    "p_promo_in_non_spam = 0.04  # 在非垃圾邮件中，“促销”这个词出现的概率    \n",
    "    \n",
    "# 假设邮件同时包含“免费”和“促销”    \n",
    "message_contains_free = True    \n",
    "message_contains_promo = True    \n",
    "    \n",
    "# 计算邮件是垃圾邮件的后验概率    \n",
    "def calculate_posterior_probability(p_spam, p_free_in_spam, p_promo_in_spam, p_free_in_non_spam, p_promo_in_non_spam, message_contains_free, message_contains_promo):    \n",
    "    # 计算分子：P(B|A) * P(A)    \n",
    "    numerator = 1.0    \n",
    "    if message_contains_free:    \n",
    "        numerator *= p_free_in_spam    \n",
    "    if message_contains_promo:    \n",
    "        numerator *= p_promo_in_spam    \n",
    "    numerator *= p_spam    \n",
    "        \n",
    "    # 计算分母：P(B)    \n",
    "    denominator = p_spam * (p_free_in_spam * p_promo_in_spam) + (1 - p_spam) * (p_free_in_non_spam * p_promo_in_non_spam)    \n",
    "        \n",
    "    # 注意：这里的计算方式是为了简化问题，实际中需要更复杂的处理，比如考虑词之间的独立性等问题    \n",
    "    # 正确的计算方式应该是使用联合概率，但这里为了直观展示贝叶斯思想，采用了简化的方式    \n",
    "    # 实际上，这里的分母计算并不完全准确，因为没有考虑“免费”和“促销”同时不出现的概率    \n",
    "    # 正确的分母应该是所有可能事件的概率之和，这里只是为了说明问题而简化    \n",
    "        \n",
    "    # 修正分母的计算，应该考虑所有可能的事件（包含“免费”，“促销”的各种组合）    \n",
    "    # 但为了保持示例的简洁性，我们假设“免费”和“促销”同时出现是独立事件（实际上在垃圾邮件中可能不是）    \n",
    "    # 因此，下面的计算仍然使用简化的分母，请注意这一点    \n",
    "        \n",
    "    # 计算后验概率 P(A|B)    \n",
    "    posterior_probability = numerator / denominator    \n",
    "        \n",
    "    return posterior_probability    \n",
    "    \n",
    "# 计算并打印结果    \n",
    "posterior_probability = calculate_posterior_probability(p_spam, p_free_in_spam, p_promo_in_spam, p_free_in_non_spam, p_promo_in_non_spam, message_contains_free, message_contains_promo)    \n",
    "print(f\"根据贝叶斯定理计算，该邮件是垃圾邮件的概率为: {posterior_probability:.2f}\")    \n",
    "    \n",
    "# 注意：上面的代码是为了展示贝叶斯定理的应用，实际上分母的计算需要更复杂的处理。    \n",
    "# 在实际应用中，我们需要考虑词之间的独立性、数据集的大小、以及如何处理未知词汇等问题。\n"
   ]
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "science39",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.9.18"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 2
}
