{
 "cells": [
  {
   "cell_type": "code",
   "id": "initial_id",
   "metadata": {
    "collapsed": true,
    "ExecuteTime": {
     "end_time": "2025-06-12T02:41:31.783839Z",
     "start_time": "2025-06-12T02:41:27.701785Z"
    }
   },
   "source": [
    "import numpy as np\n",
    "import time\n",
    "from collections import Counter\n",
    "#逻辑回归\n",
    "# 1. 读取数据\n",
    "def load_data(filename):\n",
    "    texts = []\n",
    "    labels = []\n",
    "    with open(filename, 'r', encoding='utf-8') as f:\n",
    "        for line in f:\n",
    "            if '\\t' not in line:\n",
    "                continue\n",
    "            label, text = line.strip().split('\\t', 1)\n",
    "            texts.append(text)\n",
    "            labels.append(label)\n",
    "    return texts, labels\n",
    "\n",
    "# 2. 文本预处理（分词、构建词表、向量化）\n",
    "def tokenize(text):\n",
    "    # 简单按字分词（可替换为更复杂的分词）\n",
    "    return list(text)\n",
    "\n",
    "def build_vocab(texts, vocab_size=5000):\n",
    "    all_words = []\n",
    "    for text in texts:\n",
    "        all_words.extend(tokenize(text))\n",
    "    counter = Counter(all_words)\n",
    "    most_common = counter.most_common(vocab_size-1)\n",
    "    word2idx = {word: idx+1 for idx, (word, _) in enumerate(most_common)}\n",
    "    word2idx['<UNK>'] = 0\n",
    "    return word2idx\n",
    "\n",
    "def vectorize(texts, word2idx):\n",
    "    vectors = []\n",
    "    for text in texts:\n",
    "        vec = np.zeros(len(word2idx))\n",
    "        for word in tokenize(text):\n",
    "            idx = word2idx.get(word, 0)\n",
    "            vec[idx] += 1\n",
    "        vectors.append(vec)\n",
    "    return np.array(vectors)\n",
    "\n",
    "def encode_labels(labels):\n",
    "    label_set = list(sorted(set(labels)))\n",
    "    label2idx = {label: idx for idx, label in enumerate(label_set)}\n",
    "    idx2label = {idx: label for label, idx in label2idx.items()}\n",
    "    y = np.array([label2idx[label] for label in labels])\n",
    "    return y, label2idx, idx2label\n",
    "\n",
    "# 3. 逻辑回归模型（多分类，softmax）\n",
    "class LogisticRegression:\n",
    "    def __init__(self, input_dim, num_classes, lr=0.1):\n",
    "        self.W = np.zeros((input_dim, num_classes))\n",
    "        self.b = np.zeros(num_classes)\n",
    "        self.lr = lr\n",
    "\n",
    "    def softmax(self, z):\n",
    "        z = z - np.max(z, axis=1, keepdims=True)  # 防止溢出\n",
    "        exp_z = np.exp(z)\n",
    "        return exp_z / np.sum(exp_z, axis=1, keepdims=True)\n",
    "\n",
    "    def predict_proba(self, X):\n",
    "        logits = np.dot(X, self.W) + self.b\n",
    "        return self.softmax(logits)\n",
    "\n",
    "    def predict(self, X):\n",
    "        proba = self.predict_proba(X)\n",
    "        return np.argmax(proba, axis=1)\n",
    "\n",
    "    def fit(self, X, y, epochs=10, batch_size=32):\n",
    "        n_samples = X.shape[0]\n",
    "        num_classes = self.W.shape[1]\n",
    "        for epoch in range(epochs):\n",
    "            # 打乱\n",
    "            idx = np.random.permutation(n_samples)\n",
    "            X, y = X[idx], y[idx]\n",
    "            for start in range(0, n_samples, batch_size):\n",
    "                end = start + batch_size\n",
    "                X_batch = X[start:end]\n",
    "                y_batch = y[start:end]\n",
    "                # 前向\n",
    "                logits = np.dot(X_batch, self.W) + self.b\n",
    "                probs = self.softmax(logits)\n",
    "                # one-hot\n",
    "                y_onehot = np.zeros_like(probs)\n",
    "                y_onehot[np.arange(len(y_batch)), y_batch] = 1\n",
    "                # 反向\n",
    "                grad_logits = (probs - y_onehot) / len(y_batch)\n",
    "                grad_W = np.dot(X_batch.T, grad_logits)\n",
    "                grad_b = np.sum(grad_logits, axis=0)\n",
    "                # 更新\n",
    "                self.W -= self.lr * grad_W\n",
    "                self.b -= self.lr * grad_b\n",
    "\n",
    "# 4. 评估函数\n",
    "def accuracy(y_true, y_pred):\n",
    "    return np.mean(y_true == y_pred)\n",
    "\n",
    "def precision(y_true, y_pred, num_classes):\n",
    "    prec = []\n",
    "    for c in range(num_classes):\n",
    "        tp = np.sum((y_pred == c) & (y_true == c))\n",
    "        fp = np.sum((y_pred == c) & (y_true != c))\n",
    "        prec.append(tp / (tp + fp + 1e-10))\n",
    "    return np.mean(prec)\n",
    "\n",
    "def recall(y_true, y_pred, num_classes):\n",
    "    rec = []\n",
    "    for c in range(num_classes):\n",
    "        tp = np.sum((y_pred == c) & (y_true == c))\n",
    "        fn = np.sum((y_pred != c) & (y_true == c))\n",
    "        rec.append(tp / (tp + fn + 1e-10))\n",
    "    return np.mean(rec)\n",
    "\n",
    "def f1_score(y_true, y_pred, num_classes):\n",
    "    p = precision(y_true, y_pred, num_classes)\n",
    "    r = recall(y_true, y_pred, num_classes)\n",
    "    return 2 * p * r / (p + r + 1e-10)\n",
    "\n",
    "def mean_squared_error(y_true, y_pred):\n",
    "    return np.mean((y_true - y_pred) ** 2)\n",
    "\n",
    "# 5. 主流程\n",
    "if __name__ == '__main__':\n",
    "    # 读取数据\n",
    "    texts, labels = load_data(r'../Data/cnews.txt')\n",
    "    # 划分训练测试集\n",
    "    np.random.seed(42)\n",
    "    idx = np.random.permutation(len(texts))\n",
    "    split = int(0.8 * len(texts))\n",
    "    train_idx, test_idx = idx[:split], idx[split:]\n",
    "    texts_train = [texts[i] for i in train_idx]\n",
    "    labels_train = [labels[i] for i in train_idx]\n",
    "    texts_test = [texts[i] for i in test_idx]\n",
    "    labels_test = [labels[i] for i in test_idx]\n",
    "\n",
    "    # 构建词表\n",
    "    word2idx = build_vocab(texts_train, vocab_size=3000)\n",
    "    # 向量化\n",
    "    X_train = vectorize(texts_train, word2idx)\n",
    "    X_test = vectorize(texts_test, word2idx)\n",
    "    # 标签编码\n",
    "    y_train, label2idx, idx2label = encode_labels(labels_train)\n",
    "    y_test = np.array([label2idx[label] for label in labels_test])\n",
    "    num_classes = len(label2idx)\n",
    "\n",
    "    # 训练\n",
    "    model = LogisticRegression(input_dim=X_train.shape[1], num_classes=num_classes, lr=0.1)\n",
    "    start_time = time.time()\n",
    "    model.fit(X_train, y_train, epochs=10, batch_size=64)\n",
    "    train_time = time.time() - start_time\n",
    "\n",
    "    # 预测\n",
    "    y_pred = model.predict(X_test)\n",
    "\n",
    "    # 评估\n",
    "    acc = accuracy(y_test, y_pred)\n",
    "    prec = precision(y_test, y_pred, num_classes)\n",
    "    rec = recall(y_test, y_pred, num_classes)\n",
    "    f1 = f1_score(y_test, y_pred, num_classes)\n",
    "    mse = mean_squared_error(y_test, y_pred)\n",
    "\n",
    "    print(f'准确率: {acc:.4f}')\n",
    "    print(f'精确率: {prec:.4f}')\n",
    "    print(f'召回率: {rec:.4f}')\n",
    "    print(f'F1分数: {f1:.4f}')\n",
    "    print(f'均方误差: {mse:.4f}')\n",
    "    print(f'训练时间: {train_time:.2f}秒')"
   ],
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "准确率: 0.9460\n",
      "精确率: 0.9479\n",
      "召回率: 0.9462\n",
      "F1分数: 0.9470\n",
      "均方误差: 0.7910\n",
      "训练时间: 1.33秒\n"
     ]
    }
   ],
   "execution_count": 2
  },
  {
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-06-12T02:44:37.060483Z",
     "start_time": "2025-06-12T02:44:09.718025Z"
    }
   },
   "cell_type": "code",
   "source": [
    "import numpy as np\n",
    "import time\n",
    "from collections import Counter\n",
    "#决策树\n",
    "def load_data(filename):\n",
    "    texts = []\n",
    "    labels = []\n",
    "    with open(filename, 'r', encoding='utf-8') as f:\n",
    "        for line in f:\n",
    "            if '\\t' not in line:\n",
    "                continue\n",
    "            label, text = line.strip().split('\\t', 1)\n",
    "            texts.append(text)\n",
    "            labels.append(label)\n",
    "    return texts, labels\n",
    "\n",
    "def tokenize(text):\n",
    "    return list(text)\n",
    "\n",
    "def build_vocab(texts, vocab_size=500):\n",
    "    all_words = []\n",
    "    for text in texts:\n",
    "        all_words.extend(tokenize(text))\n",
    "    counter = Counter(all_words)\n",
    "    most_common = counter.most_common(vocab_size-1)\n",
    "    word2idx = {word: idx+1 for idx, (word, _) in enumerate(most_common)}\n",
    "    word2idx['<UNK>'] = 0\n",
    "    return word2idx\n",
    "\n",
    "def vectorize(texts, word2idx):\n",
    "    vectors = []\n",
    "    for text in texts:\n",
    "        vec = np.zeros(len(word2idx))\n",
    "        for word in tokenize(text):\n",
    "            idx = word2idx.get(word, 0)\n",
    "            vec[idx] += 1\n",
    "        vectors.append(vec)\n",
    "    return np.array(vectors)\n",
    "\n",
    "def encode_labels(labels):\n",
    "    label_set = list(sorted(set(labels)))\n",
    "    label2idx = {label: idx for idx, label in enumerate(label_set)}\n",
    "    idx2label = {idx: label for label, idx in label2idx.items()}\n",
    "    y = np.array([label2idx[label] for label in labels])\n",
    "    return y, label2idx, idx2label\n",
    "\n",
    "# 决策树节点\n",
    "class TreeNode:\n",
    "    def __init__(self, feature=None, threshold=None, left=None, right=None, value=None):\n",
    "        self.feature = feature\n",
    "        self.threshold = threshold\n",
    "        self.left = left\n",
    "        self.right = right\n",
    "        self.value = value  # 叶节点类别\n",
    "\n",
    "class DecisionTree:\n",
    "    def __init__(self, max_depth=10, min_samples_split=5):\n",
    "        self.max_depth = max_depth\n",
    "        self.min_samples_split = min_samples_split\n",
    "        self.root = None\n",
    "\n",
    "    def fit(self, X, y):\n",
    "        self.n_classes = len(set(y))\n",
    "        self.root = self._grow_tree(X, y, depth=0)\n",
    "\n",
    "    def _entropy(self, y):\n",
    "        hist = np.bincount(y)\n",
    "        ps = hist / len(y)\n",
    "        return -np.sum([p*np.log2(p+1e-10) for p in ps if p > 0])\n",
    "\n",
    "    def _best_split(self, X, y):\n",
    "        m, n = X.shape\n",
    "        if m <= 1:\n",
    "            return None, None\n",
    "        parent_entropy = self._entropy(y)\n",
    "        best_gain = 0\n",
    "        best_feat, best_thr = None, None\n",
    "        for feat in range(n):\n",
    "            thresholds = np.unique(X[:, feat])\n",
    "            for thr in thresholds:\n",
    "                left_idx = X[:, feat] <= thr\n",
    "                right_idx = X[:, feat] > thr\n",
    "                if np.sum(left_idx) == 0 or np.sum(right_idx) == 0:\n",
    "                    continue\n",
    "                left_entropy = self._entropy(y[left_idx])\n",
    "                right_entropy = self._entropy(y[right_idx])\n",
    "                child_entropy = (np.sum(left_idx) * left_entropy + np.sum(right_idx) * right_entropy) / m\n",
    "                info_gain = parent_entropy - child_entropy\n",
    "                if info_gain > best_gain:\n",
    "                    best_gain = info_gain\n",
    "                    best_feat = feat\n",
    "                    best_thr = thr\n",
    "        return best_feat, best_thr\n",
    "\n",
    "    def _grow_tree(self, X, y, depth):\n",
    "        num_samples_per_class = [np.sum(y == i) for i in range(self.n_classes)]\n",
    "        predicted_class = np.argmax(num_samples_per_class)\n",
    "        node = TreeNode(value=predicted_class)\n",
    "        if depth < self.max_depth and len(y) >= self.min_samples_split and len(set(y)) > 1:\n",
    "            feat, thr = self._best_split(X, y)\n",
    "            if feat is not None:\n",
    "                idx_left = X[:, feat] <= thr\n",
    "                idx_right = X[:, feat] > thr\n",
    "                node = TreeNode(feature=feat, threshold=thr)\n",
    "                node.left = self._grow_tree(X[idx_left], y[idx_left], depth+1)\n",
    "                node.right = self._grow_tree(X[idx_right], y[idx_right], depth+1)\n",
    "        return node\n",
    "\n",
    "    def predict(self, X):\n",
    "        return np.array([self._predict(inputs) for inputs in X])\n",
    "\n",
    "    def _predict(self, inputs):\n",
    "        node = self.root\n",
    "        while node.left is not None and node.right is not None:\n",
    "            if inputs[node.feature] <= node.threshold:\n",
    "                node = node.left\n",
    "            else:\n",
    "                node = node.right\n",
    "        return node.value\n",
    "\n",
    "def accuracy(y_true, y_pred):\n",
    "    return np.mean(y_true == y_pred)\n",
    "\n",
    "def precision(y_true, y_pred, num_classes):\n",
    "    prec = []\n",
    "    for c in range(num_classes):\n",
    "        tp = np.sum((y_pred == c) & (y_true == c))\n",
    "        fp = np.sum((y_pred == c) & (y_true != c))\n",
    "        prec.append(tp / (tp + fp + 1e-10))\n",
    "    return np.mean(prec)\n",
    "\n",
    "def recall(y_true, y_pred, num_classes):\n",
    "    rec = []\n",
    "    for c in range(num_classes):\n",
    "        tp = np.sum((y_pred == c) & (y_true == c))\n",
    "        fn = np.sum((y_pred != c) & (y_true == c))\n",
    "        rec.append(tp / (tp + fn + 1e-10))\n",
    "    return np.mean(rec)\n",
    "\n",
    "def f1_score(y_true, y_pred, num_classes):\n",
    "    p = precision(y_true, y_pred, num_classes)\n",
    "    r = recall(y_true, y_pred, num_classes)\n",
    "    return 2 * p * r / (p + r + 1e-10)\n",
    "\n",
    "def mean_squared_error(y_true, y_pred):\n",
    "    return np.mean((y_true - y_pred) ** 2)\n",
    "\n",
    "if __name__ == '__main__':\n",
    "    texts, labels = load_data(\"../Data/cnews.txt\")\n",
    "    np.random.seed(42)\n",
    "    idx = np.random.permutation(len(texts))\n",
    "    split = int(0.8 * len(texts))\n",
    "    train_idx, test_idx = idx[:split], idx[split:]\n",
    "    texts_train = [texts[i] for i in train_idx]\n",
    "    labels_train = [labels[i] for i in train_idx]\n",
    "    texts_test = [texts[i] for i in test_idx]\n",
    "    labels_test = [labels[i] for i in test_idx]\n",
    "\n",
    "    word2idx = build_vocab(texts_train, vocab_size=500)\n",
    "    X_train = vectorize(texts_train, word2idx)\n",
    "    X_test = vectorize(texts_test, word2idx)\n",
    "    y_train, label2idx, idx2label = encode_labels(labels_train)\n",
    "    y_test = np.array([label2idx[label] for label in labels_test])\n",
    "    num_classes = len(label2idx)\n",
    "\n",
    "    model = DecisionTree(max_depth=8, min_samples_split=10)\n",
    "    start_time = time.time()\n",
    "    model.fit(X_train, y_train)\n",
    "    train_time = time.time() - start_time\n",
    "\n",
    "    y_pred = model.predict(X_test)\n",
    "\n",
    "    acc = accuracy(y_test, y_pred)\n",
    "    prec = precision(y_test, y_pred, num_classes)\n",
    "    rec = recall(y_test, y_pred, num_classes)\n",
    "    f1 = f1_score(y_test, y_pred, num_classes)\n",
    "    mse = mean_squared_error(y_test, y_pred)\n",
    "\n",
    "    print(f'准确率: {acc:.4f}')\n",
    "    print(f'精确率: {prec:.4f}')\n",
    "    print(f'召回率: {rec:.4f}')\n",
    "    print(f'F1分数: {f1:.4f}')\n",
    "    print(f'均方误差: {mse:.4f}')\n",
    "    print(f'训练时间: {train_time:.2f}秒')"
   ],
   "id": "96bc974c5bb4eee9",
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "准确率: 0.8340\n",
      "精确率: 0.8388\n",
      "召回率: 0.8306\n",
      "F1分数: 0.8347\n",
      "均方误差: 3.3870\n",
      "训练时间: 25.23秒\n"
     ]
    }
   ],
   "execution_count": 3
  },
  {
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-06-12T02:47:20.607703Z",
     "start_time": "2025-06-12T02:46:03.881352Z"
    }
   },
   "cell_type": "code",
   "source": [
    "import numpy as np\n",
    "import time\n",
    "from collections import Counter\n",
    "\n",
    "# ...已有的load_data, tokenize, build_vocab, vectorize, encode_labels, accuracy, precision, recall, f1_score, mean_squared_error...\n",
    "#随机森林（含有决策树），\n",
    "class TreeNode:\n",
    "    def __init__(self, feature=None, threshold=None, left=None, right=None, value=None):\n",
    "        self.feature = feature\n",
    "        self.threshold = threshold\n",
    "        self.left = left\n",
    "        self.right = right\n",
    "        self.value = value\n",
    "\n",
    "class DecisionTree:\n",
    "    def __init__(self, max_depth=10, min_samples_split=5, feature_subsample=None):\n",
    "        self.max_depth = max_depth\n",
    "        self.min_samples_split = min_samples_split\n",
    "        self.root = None\n",
    "        self.feature_subsample = feature_subsample  # 随机森林用\n",
    "\n",
    "    def fit(self, X, y):\n",
    "        self.n_classes = len(set(y))\n",
    "        self.n_features = X.shape[1]\n",
    "        self.root = self._grow_tree(X, y, depth=0)\n",
    "\n",
    "    def _entropy(self, y):\n",
    "        hist = np.bincount(y)\n",
    "        ps = hist / len(y)\n",
    "        return -np.sum([p*np.log2(p+1e-10) for p in ps if p > 0])\n",
    "\n",
    "    def _best_split(self, X, y):\n",
    "        m, n = X.shape\n",
    "        if m <= 1:\n",
    "            return None, None\n",
    "        parent_entropy = self._entropy(y)\n",
    "        best_gain = 0\n",
    "        best_feat, best_thr = None, None\n",
    "        # 随机子特征\n",
    "        features = np.arange(n)\n",
    "        if self.feature_subsample is not None:\n",
    "            features = np.random.choice(n, self.feature_subsample, replace=False)\n",
    "        for feat in features:\n",
    "            thresholds = np.unique(X[:, feat])\n",
    "            for thr in thresholds:\n",
    "                left_idx = X[:, feat] <= thr\n",
    "                right_idx = X[:, feat] > thr\n",
    "                if np.sum(left_idx) == 0 or np.sum(right_idx) == 0:\n",
    "                    continue\n",
    "                left_entropy = self._entropy(y[left_idx])\n",
    "                right_entropy = self._entropy(y[right_idx])\n",
    "                child_entropy = (np.sum(left_idx) * left_entropy + np.sum(right_idx) * right_entropy) / m\n",
    "                info_gain = parent_entropy - child_entropy\n",
    "                if info_gain > best_gain:\n",
    "                    best_gain = info_gain\n",
    "                    best_feat = feat\n",
    "                    best_thr = thr\n",
    "        return best_feat, best_thr\n",
    "\n",
    "    def _grow_tree(self, X, y, depth):\n",
    "        num_samples_per_class = [np.sum(y == i) for i in range(self.n_classes)]\n",
    "        predicted_class = np.argmax(num_samples_per_class)\n",
    "        node = TreeNode(value=predicted_class)\n",
    "        if depth < self.max_depth and len(y) >= self.min_samples_split and len(set(y)) > 1:\n",
    "            feat, thr = self._best_split(X, y)\n",
    "            if feat is not None:\n",
    "                idx_left = X[:, feat] <= thr\n",
    "                idx_right = X[:, feat] > thr\n",
    "                node = TreeNode(feature=feat, threshold=thr)\n",
    "                node.left = self._grow_tree(X[idx_left], y[idx_left], depth+1)\n",
    "                node.right = self._grow_tree(X[idx_right], y[idx_right], depth+1)\n",
    "        return node\n",
    "\n",
    "    def predict(self, X):\n",
    "        return np.array([self._predict(inputs) for inputs in X])\n",
    "\n",
    "    def _predict(self, inputs):\n",
    "        node = self.root\n",
    "        while node.left is not None and node.right is not None:\n",
    "            if inputs[node.feature] <= node.threshold:\n",
    "                node = node.left\n",
    "            else:\n",
    "                node = node.right\n",
    "        return node.value\n",
    "\n",
    "class RandomForest:\n",
    "    def __init__(self, n_estimators=5, max_depth=8, min_samples_split=10, feature_subsample_ratio=0.7):\n",
    "        self.n_estimators = n_estimators\n",
    "        self.max_depth = max_depth\n",
    "        self.min_samples_split = min_samples_split\n",
    "        self.feature_subsample_ratio = feature_subsample_ratio\n",
    "        self.trees = []\n",
    "\n",
    "    def fit(self, X, y):\n",
    "        n_samples, n_features = X.shape\n",
    "        feature_subsample = max(1, int(n_features * self.feature_subsample_ratio))\n",
    "        self.trees = []\n",
    "        for _ in range(self.n_estimators):\n",
    "            # bootstrap采样\n",
    "            idxs = np.random.choice(n_samples, n_samples, replace=True)\n",
    "            X_sample = X[idxs]\n",
    "            y_sample = y[idxs]\n",
    "            tree = DecisionTree(max_depth=self.max_depth, min_samples_split=self.min_samples_split, feature_subsample=feature_subsample)\n",
    "            tree.fit(X_sample, y_sample)\n",
    "            self.trees.append(tree)\n",
    "\n",
    "    def predict(self, X):\n",
    "        # 多数投票\n",
    "        preds = np.array([tree.predict(X) for tree in self.trees])\n",
    "        preds = preds.T  # shape: [n_samples, n_estimators]\n",
    "        y_pred = []\n",
    "        for row in preds:\n",
    "            counts = np.bincount(row)\n",
    "            y_pred.append(np.argmax(counts))\n",
    "        return np.array(y_pred)\n",
    "\n",
    "# --- 评估函数同前略 ---\n",
    "\n",
    "if __name__ == '__main__':\n",
    "    texts, labels = load_data(\"../Data/cnews.txt\")\n",
    "    np.random.seed(42)\n",
    "    idx = np.random.permutation(len(texts))\n",
    "    split = int(0.8 * len(texts))\n",
    "    train_idx, test_idx = idx[:split], idx[split:]\n",
    "    texts_train = [texts[i] for i in train_idx]\n",
    "    labels_train = [labels[i] for i in train_idx]\n",
    "    texts_test = [texts[i] for i in test_idx]\n",
    "    labels_test = [labels[i] for i in test_idx]\n",
    "\n",
    "    word2idx = build_vocab(texts_train, vocab_size=500)\n",
    "    X_train = vectorize(texts_train, word2idx)\n",
    "    X_test = vectorize(texts_test, word2idx)\n",
    "    y_train, label2idx, idx2label = encode_labels(labels_train)\n",
    "    y_test = np.array([label2idx[label] for label in labels_test])\n",
    "    num_classes = len(label2idx)\n",
    "\n",
    "    model = RandomForest(n_estimators=5, max_depth=8, min_samples_split=10, feature_subsample_ratio=0.7)\n",
    "    start_time = time.time()\n",
    "    model.fit(X_train, y_train)\n",
    "    train_time = time.time() - start_time\n",
    "\n",
    "    y_pred = model.predict(X_test)\n",
    "\n",
    "    acc = accuracy(y_test, y_pred)\n",
    "    prec = precision(y_test, y_pred, num_classes)\n",
    "    rec = recall(y_test, y_pred, num_classes)\n",
    "    f1 = f1_score(y_test, y_pred, num_classes)\n",
    "    mse = mean_squared_error(y_test, y_pred)\n",
    "\n",
    "    print(f'准确率: {acc:.4f}')\n",
    "    print(f'精确率: {prec:.4f}')\n",
    "    print(f'召回率: {rec:.4f}')\n",
    "    print(f'F1分数: {f1:.4f}')\n",
    "    print(f'均方误差: {mse:.4f}')\n",
    "    print(f'训练时间: {train_time:.2f}秒')"
   ],
   "id": "7d3430f414df4420",
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "准确率: 0.8660\n",
      "精确率: 0.8731\n",
      "召回率: 0.8649\n",
      "F1分数: 0.8690\n",
      "均方误差: 2.2520\n",
      "训练时间: 74.00秒\n"
     ]
    }
   ],
   "execution_count": 4
  },
  {
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-06-12T02:49:06.363707Z",
     "start_time": "2025-06-12T02:48:50.820163Z"
    }
   },
   "cell_type": "code",
   "source": [
    "import numpy as np\n",
    "import time\n",
    "from collections import Counter\n",
    "#knn\n",
    "def load_data(filename):\n",
    "    texts = []\n",
    "    labels = []\n",
    "    with open(filename, 'r', encoding='utf-8') as f:\n",
    "        for line in f:\n",
    "            if '\\t' not in line:\n",
    "                continue\n",
    "            label, text = line.strip().split('\\t', 1)\n",
    "            texts.append(text)\n",
    "            labels.append(label)\n",
    "    return texts, labels\n",
    "\n",
    "def tokenize(text):\n",
    "    return list(text)\n",
    "\n",
    "def build_vocab(texts, vocab_size=500):\n",
    "    all_words = []\n",
    "    for text in texts:\n",
    "        all_words.extend(tokenize(text))\n",
    "    counter = Counter(all_words)\n",
    "    most_common = counter.most_common(vocab_size-1)\n",
    "    word2idx = {word: idx+1 for idx, (word, _) in enumerate(most_common)}\n",
    "    word2idx['<UNK>'] = 0\n",
    "    return word2idx\n",
    "\n",
    "def vectorize(texts, word2idx):\n",
    "    vectors = []\n",
    "    for text in texts:\n",
    "        vec = np.zeros(len(word2idx))\n",
    "        for word in tokenize(text):\n",
    "            idx = word2idx.get(word, 0)\n",
    "            vec[idx] += 1\n",
    "        vectors.append(vec)\n",
    "    return np.array(vectors)\n",
    "\n",
    "def encode_labels(labels):\n",
    "    label_set = list(sorted(set(labels)))\n",
    "    label2idx = {label: idx for idx, label in enumerate(label_set)}\n",
    "    idx2label = {idx: label for label, idx in label2idx.items()}\n",
    "    y = np.array([label2idx[label] for label in labels])\n",
    "    return y, label2idx, idx2label\n",
    "\n",
    "class KNNClassifier:\n",
    "    def __init__(self, k=3):\n",
    "        self.k = k\n",
    "\n",
    "    def fit(self, X, y):\n",
    "        self.X_train = X\n",
    "        self.y_train = y\n",
    "\n",
    "    def predict(self, X):\n",
    "        y_pred = []\n",
    "        for x in X:\n",
    "            # 计算欧氏距离\n",
    "            distances = np.linalg.norm(self.X_train - x, axis=1)\n",
    "            nn_idx = np.argsort(distances)[:self.k]\n",
    "            nn_labels = self.y_train[nn_idx]\n",
    "            # 多数投票\n",
    "            votes = Counter(nn_labels)\n",
    "            y_pred.append(votes.most_common(1)[0][0])\n",
    "        return np.array(y_pred)\n",
    "\n",
    "def accuracy(y_true, y_pred):\n",
    "    return np.mean(y_true == y_pred)\n",
    "\n",
    "def precision(y_true, y_pred, num_classes):\n",
    "    prec = []\n",
    "    for c in range(num_classes):\n",
    "        tp = np.sum((y_pred == c) & (y_true == c))\n",
    "        fp = np.sum((y_pred == c) & (y_true != c))\n",
    "        prec.append(tp / (tp + fp + 1e-10))\n",
    "    return np.mean(prec)\n",
    "\n",
    "def recall(y_true, y_pred, num_classes):\n",
    "    rec = []\n",
    "    for c in range(num_classes):\n",
    "        tp = np.sum((y_pred == c) & (y_true == c))\n",
    "        fn = np.sum((y_pred != c) & (y_true == c))\n",
    "        rec.append(tp / (tp + fn + 1e-10))\n",
    "    return np.mean(rec)\n",
    "\n",
    "def f1_score(y_true, y_pred, num_classes):\n",
    "    p = precision(y_true, y_pred, num_classes)\n",
    "    r = recall(y_true, y_pred, num_classes)\n",
    "    return 2 * p * r / (p + r + 1e-10)\n",
    "\n",
    "def mean_squared_error(y_true, y_pred):\n",
    "    return np.mean((y_true - y_pred) ** 2)\n",
    "\n",
    "if __name__ == '__main__':\n",
    "    texts, labels = load_data(\"../Data/cnews.txt\")\n",
    "    np.random.seed(42)\n",
    "    idx = np.random.permutation(len(texts))\n",
    "    split = int(0.8 * len(texts))\n",
    "    train_idx, test_idx = idx[:split], idx[split:]\n",
    "    texts_train = [texts[i] for i in train_idx]\n",
    "    labels_train = [labels[i] for i in train_idx]\n",
    "    texts_test = [texts[i] for i in test_idx]\n",
    "    labels_test = [labels[i] for i in test_idx]\n",
    "\n",
    "    word2idx = build_vocab(texts_train, vocab_size=500)\n",
    "    X_train = vectorize(texts_train, word2idx)\n",
    "    X_test = vectorize(texts_test, word2idx)\n",
    "    y_train, label2idx, idx2label = encode_labels(labels_train)\n",
    "    y_test = np.array([label2idx[label] for label in labels_test])\n",
    "    num_classes = len(label2idx)\n",
    "\n",
    "    model = KNNClassifier(k=3)\n",
    "    start_time = time.time()\n",
    "    model.fit(X_train, y_train)\n",
    "    y_pred = model.predict(X_test)\n",
    "    train_time = time.time() - start_time\n",
    "\n",
    "    acc = accuracy(y_test, y_pred)\n",
    "    prec = precision(y_test, y_pred, num_classes)\n",
    "    rec = recall(y_test, y_pred, num_classes)\n",
    "    f1 = f1_score(y_test, y_pred, num_classes)\n",
    "    mse = mean_squared_error(y_test, y_pred)\n",
    "\n",
    "    print(f'准确率: {acc:.4f}')\n",
    "    print(f'精确率: {prec:.4f}')\n",
    "    print(f'召回率: {rec:.4f}')\n",
    "    print(f'F1分数: {f1:.4f}')\n",
    "    print(f'均方误差: {mse:.4f}')\n",
    "    print(f'训练时间: {train_time:.2f}秒')"
   ],
   "id": "7188e7f7fbeba28f",
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "准确率: 0.8220\n",
      "精确率: 0.8421\n",
      "召回率: 0.8193\n",
      "F1分数: 0.8306\n",
      "均方误差: 2.4270\n",
      "训练时间: 12.85秒\n"
     ]
    }
   ],
   "execution_count": 5
  },
  {
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-06-12T02:50:09.396479Z",
     "start_time": "2025-06-12T02:50:06.583631Z"
    }
   },
   "cell_type": "code",
   "source": [
    "import numpy as np\n",
    "import time\n",
    "from collections import Counter\n",
    "#朴素贝叶斯\n",
    "def load_data(filename):\n",
    "    texts = []\n",
    "    labels = []\n",
    "    with open(filename, 'r', encoding='utf-8') as f:\n",
    "        for line in f:\n",
    "            if '\\t' not in line:\n",
    "                continue\n",
    "            label, text = line.strip().split('\\t', 1)\n",
    "            texts.append(text)\n",
    "            labels.append(label)\n",
    "    return texts, labels\n",
    "\n",
    "def tokenize(text):\n",
    "    return list(text)\n",
    "\n",
    "def build_vocab(texts, vocab_size=500):\n",
    "    all_words = []\n",
    "    for text in texts:\n",
    "        all_words.extend(tokenize(text))\n",
    "    counter = Counter(all_words)\n",
    "    most_common = counter.most_common(vocab_size-1)\n",
    "    word2idx = {word: idx+1 for idx, (word, _) in enumerate(most_common)}\n",
    "    word2idx['<UNK>'] = 0\n",
    "    return word2idx\n",
    "\n",
    "def vectorize(texts, word2idx):\n",
    "    vectors = []\n",
    "    for text in texts:\n",
    "        vec = np.zeros(len(word2idx))\n",
    "        for word in tokenize(text):\n",
    "            idx = word2idx.get(word, 0)\n",
    "            vec[idx] += 1\n",
    "        vectors.append(vec)\n",
    "    return np.array(vectors)\n",
    "\n",
    "def encode_labels(labels):\n",
    "    label_set = list(sorted(set(labels)))\n",
    "    label2idx = {label: idx for idx, label in enumerate(label_set)}\n",
    "    idx2label = {idx: label for label, idx in label2idx.items()}\n",
    "    y = np.array([label2idx[label] for label in labels])\n",
    "    return y, label2idx, idx2label\n",
    "\n",
    "class NaiveBayesClassifier:\n",
    "    def fit(self, X, y):\n",
    "        self.num_classes = np.max(y) + 1\n",
    "        self.num_features = X.shape[1]\n",
    "        self.class_count = np.zeros(self.num_classes)\n",
    "        self.feature_count = np.zeros((self.num_classes, self.num_features))\n",
    "        for c in range(self.num_classes):\n",
    "            X_c = X[y == c]\n",
    "            self.class_count[c] = X_c.shape[0]\n",
    "            self.feature_count[c] = np.sum(X_c, axis=0)\n",
    "        self.class_prior = self.class_count / np.sum(self.class_count)\n",
    "        # 拉普拉斯平滑\n",
    "        self.feature_prob = (self.feature_count + 1) / (self.feature_count.sum(axis=1, keepdims=True) + self.num_features)\n",
    "\n",
    "    def predict(self, X):\n",
    "        log_prob = np.log(self.class_prior + 1e-10) + X @ np.log(self.feature_prob.T + 1e-10)\n",
    "        return np.argmax(log_prob, axis=1)\n",
    "\n",
    "def accuracy(y_true, y_pred):\n",
    "    return np.mean(y_true == y_pred)\n",
    "\n",
    "def precision(y_true, y_pred, num_classes):\n",
    "    prec = []\n",
    "    for c in range(num_classes):\n",
    "        tp = np.sum((y_pred == c) & (y_true == c))\n",
    "        fp = np.sum((y_pred == c) & (y_true != c))\n",
    "        prec.append(tp / (tp + fp + 1e-10))\n",
    "    return np.mean(prec)\n",
    "\n",
    "def recall(y_true, y_pred, num_classes):\n",
    "    rec = []\n",
    "    for c in range(num_classes):\n",
    "        tp = np.sum((y_pred == c) & (y_true == c))\n",
    "        fn = np.sum((y_pred != c) & (y_true == c))\n",
    "        rec.append(tp / (tp + fn + 1e-10))\n",
    "    return np.mean(rec)\n",
    "\n",
    "def f1_score(y_true, y_pred, num_classes):\n",
    "    p = precision(y_true, y_pred, num_classes)\n",
    "    r = recall(y_true, y_pred, num_classes)\n",
    "    return 2 * p * r / (p + r + 1e-10)\n",
    "\n",
    "def mean_squared_error(y_true, y_pred):\n",
    "    return np.mean((y_true - y_pred) ** 2)\n",
    "\n",
    "if __name__ == '__main__':\n",
    "    texts, labels = load_data(\"../Data/cnews.txt\")\n",
    "    np.random.seed(42)\n",
    "    idx = np.random.permutation(len(texts))\n",
    "    split = int(0.8 * len(texts))\n",
    "    train_idx, test_idx = idx[:split], idx[split:]\n",
    "    texts_train = [texts[i] for i in train_idx]\n",
    "    labels_train = [labels[i] for i in train_idx]\n",
    "    texts_test = [texts[i] for i in test_idx]\n",
    "    labels_test = [labels[i] for i in test_idx]\n",
    "\n",
    "    word2idx = build_vocab(texts_train, vocab_size=500)\n",
    "    X_train = vectorize(texts_train, word2idx)\n",
    "    X_test = vectorize(texts_test, word2idx)\n",
    "    y_train, label2idx, idx2label = encode_labels(labels_train)\n",
    "    y_test = np.array([label2idx[label] for label in labels_test])\n",
    "    num_classes = len(label2idx)\n",
    "\n",
    "    model = NaiveBayesClassifier()\n",
    "    start_time = time.time()\n",
    "    model.fit(X_train, y_train)\n",
    "    train_time = time.time() - start_time\n",
    "\n",
    "    y_pred = model.predict(X_test)\n",
    "\n",
    "    acc = accuracy(y_test, y_pred)\n",
    "    prec = precision(y_test, y_pred, num_classes)\n",
    "    rec = recall(y_test, y_pred, num_classes)\n",
    "    f1 = f1_score(y_test, y_pred, num_classes)\n",
    "    mse = mean_squared_error(y_test, y_pred)\n",
    "\n",
    "    print(f'准确率: {acc:.4f}')\n",
    "    print(f'精确率: {prec:.4f}')\n",
    "    print(f'召回率: {rec:.4f}')\n",
    "    print(f'F1分数: {f1:.4f}')\n",
    "    print(f'均方误差: {mse:.4f}')\n",
    "    print(f'训练时间: {train_time:.2f}秒')"
   ],
   "id": "c2a84315c811593f",
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "准确率: 0.9060\n",
      "精确率: 0.9112\n",
      "召回率: 0.9036\n",
      "F1分数: 0.9074\n",
      "均方误差: 1.0480\n",
      "训练时间: 0.00秒\n"
     ]
    }
   ],
   "execution_count": 6
  },
  {
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-06-12T10:27:50.365882Z",
     "start_time": "2025-06-12T10:27:49.083384Z"
    }
   },
   "cell_type": "code",
   "source": [
    "import numpy as np\n",
    "import time\n",
    "from collections import Counter\n",
    "\n",
    "def load_data(filename):\n",
    "    texts = []\n",
    "    labels = []\n",
    "    with open(filename, 'r', encoding='utf-8') as f:\n",
    "        for line in f:\n",
    "            if '\\t' not in line:\n",
    "                continue\n",
    "            label, text = line.strip().split('\\t', 1)\n",
    "            texts.append(text)\n",
    "            labels.append(label)\n",
    "    return texts, labels\n",
    "\n",
    "def tokenize(text):\n",
    "    return list(text)\n",
    "\n",
    "def build_vocab(texts, vocab_size=500):\n",
    "    all_words = []\n",
    "    for text in texts:\n",
    "        all_words.extend(tokenize(text))\n",
    "    counter = Counter(all_words)\n",
    "    most_common = counter.most_common(vocab_size-1)\n",
    "    word2idx = {word: idx+1 for idx, (word, _) in enumerate(most_common)}\n",
    "    word2idx['<UNK>'] = 0\n",
    "    return word2idx\n",
    "\n",
    "def vectorize(texts, word2idx):\n",
    "    vectors = []\n",
    "    for text in texts:\n",
    "        vec = np.zeros(len(word2idx))\n",
    "        for word in tokenize(text):\n",
    "            idx = word2idx.get(word, 0)\n",
    "            vec[idx] += 1\n",
    "        vectors.append(vec)\n",
    "    return np.array(vectors)\n",
    "\n",
    "def encode_labels(labels):\n",
    "    label_set = list(sorted(set(labels)))\n",
    "    label2idx = {label: idx for idx, label in enumerate(label_set)}\n",
    "    idx2label = {idx: label for label, idx in label2idx.items()}\n",
    "    y = np.array([label2idx[label] for label in labels])\n",
    "    return y, label2idx, idx2label\n",
    "\n",
    "class NaiveBayesClassifier:\n",
    "    def fit(self, X, y):\n",
    "        self.num_classes = np.max(y) + 1\n",
    "        self.num_features = X.shape[1]\n",
    "        self.class_count = np.zeros(self.num_classes)\n",
    "        self.feature_count = np.zeros((self.num_classes, self.num_features))\n",
    "        for c in range(self.num_classes):\n",
    "            X_c = X[y == c]\n",
    "            self.class_count[c] = X_c.shape[0]\n",
    "            self.feature_count[c] = np.sum(X_c, axis=0)\n",
    "        self.class_prior = self.class_count / np.sum(self.class_count)\n",
    "        # 拉普拉斯平滑\n",
    "        self.feature_prob = (self.feature_count + 1) / (self.feature_count.sum(axis=1, keepdims=True) + self.num_features)\n",
    "\n",
    "    def predict(self, X):\n",
    "        log_prob = np.log(self.class_prior + 1e-10) + X @ np.log(self.feature_prob.T + 1e-10)\n",
    "        return np.argmax(log_prob, axis=1)\n",
    "\n",
    "def accuracy(y_true, y_pred):\n",
    "    return np.mean(y_true == y_pred)\n",
    "\n",
    "def precision_per_class(y_true, y_pred, label2idx):\n",
    "    \"\"\"计算每个类别的精确率\"\"\"\n",
    "    prec_dict = {}\n",
    "    for label, c in label2idx.items():\n",
    "        tp = np.sum((y_pred == c) & (y_true == c))\n",
    "        fp = np.sum((y_pred == c) & (y_true != c))\n",
    "        prec_dict[label] = tp / (tp + fp + 1e-10)\n",
    "    return prec_dict\n",
    "\n",
    "def recall_per_class(y_true, y_pred, label2idx):\n",
    "    \"\"\"计算每个类别的召回率\"\"\"\n",
    "    rec_dict = {}\n",
    "    for label, c in label2idx.items():\n",
    "        tp = np.sum((y_pred == c) & (y_true == c))\n",
    "        fn = np.sum((y_pred != c) & (y_true == c))\n",
    "        rec_dict[label] = tp / (tp + fn + 1e-10)\n",
    "    return rec_dict\n",
    "\n",
    "def f1_per_class(y_true, y_pred, label2idx):\n",
    "    \"\"\"计算每个类别的F1值\"\"\"\n",
    "    f1_dict = {}\n",
    "    prec_dict = precision_per_class(y_true, y_pred, label2idx)\n",
    "    rec_dict = recall_per_class(y_true, y_pred, label2idx)\n",
    "    for label in label2idx:\n",
    "        p = prec_dict[label]\n",
    "        r = rec_dict[label]\n",
    "        f1_dict[label] = 2 * p * r / (p + r + 1e-10)\n",
    "    return f1_dict\n",
    "\n",
    "def precision(y_true, y_pred, num_classes):\n",
    "    prec = []\n",
    "    for c in range(num_classes):\n",
    "        tp = np.sum((y_pred == c) & (y_true == c))\n",
    "        fp = np.sum((y_pred == c) & (y_true != c))\n",
    "        prec.append(tp / (tp + fp + 1e-10))\n",
    "    return np.mean(prec)\n",
    "\n",
    "def recall(y_true, y_pred, num_classes):\n",
    "    rec = []\n",
    "    for c in range(num_classes):\n",
    "        tp = np.sum((y_pred == c) & (y_true == c))\n",
    "        fn = np.sum((y_pred != c) & (y_true == c))\n",
    "        rec.append(tp / (tp + fn + 1e-10))\n",
    "    return np.mean(rec)\n",
    "\n",
    "def f1_score(y_true, y_pred, num_classes):\n",
    "    p = precision(y_true, y_pred, num_classes)\n",
    "    r = recall(y_true, y_pred, num_classes)\n",
    "    return 2 * p * r / (p + r + 1e-10)\n",
    "\n",
    "def mean_squared_error(y_true, y_pred):\n",
    "    return np.mean((y_true - y_pred) ** 2)\n",
    "\n",
    "if __name__ == '__main__':\n",
    "    texts, labels = load_data(\"../Data/cnews.txt\")\n",
    "    np.random.seed(42)\n",
    "    idx = np.random.permutation(len(texts))\n",
    "    split = int(0.8 * len(texts))\n",
    "    train_idx, test_idx = idx[:split], idx[split:]\n",
    "    texts_train = [texts[i] for i in train_idx]\n",
    "    labels_train = [labels[i] for i in train_idx]\n",
    "    texts_test = [texts[i] for i in test_idx]\n",
    "    labels_test = [labels[i] for i in test_idx]\n",
    "\n",
    "    word2idx = build_vocab(texts_train, vocab_size=500)\n",
    "    X_train = vectorize(texts_train, word2idx)\n",
    "    X_test = vectorize(texts_test, word2idx)\n",
    "    y_train, label2idx, idx2label = encode_labels(labels_train)\n",
    "    y_test = np.array([label2idx[label] for label in labels_test])\n",
    "    num_classes = len(label2idx)\n",
    "\n",
    "    model = NaiveBayesClassifier()\n",
    "    start_time = time.time()\n",
    "    model.fit(X_train, y_train)\n",
    "    train_time = time.time() - start_time\n",
    "\n",
    "    y_pred = model.predict(X_test)\n",
    "\n",
    "    # 计算整体指标\n",
    "    acc = accuracy(y_test, y_pred)\n",
    "    prec = precision(y_test, y_pred, num_classes)\n",
    "    rec = recall(y_test, y_pred, num_classes)\n",
    "    f1 = f1_score(y_test, y_pred, num_classes)\n",
    "    mse = mean_squared_error(y_test, y_pred)\n",
    "\n",
    "    # 计算每个类别的指标\n",
    "    prec_dict = precision_per_class(y_test, y_pred, label2idx)\n",
    "    rec_dict = recall_per_class(y_test, y_pred, label2idx)\n",
    "    f1_dict = f1_per_class(y_test, y_pred, label2idx)\n",
    "\n",
    "    # 输出整体指标\n",
    "    print(f'准确率: {acc:.4f}')\n",
    "    print(f'精确率: {prec:.4f}')\n",
    "    print(f'召回率: {rec:.4f}')\n",
    "    print(f'F1分数: {f1:.4f}')\n",
    "    print(f'均方误差: {mse:.4f}')\n",
    "    print(f'训练时间: {train_time:.2f}秒')\n",
    "    \n",
    "    # 输出每个类别的指标\n",
    "    print(\"\\n每个类别的详细指标:\")\n",
    "    print(\"类别\\t\\t精确率\\t\\t召回率\\t\\tF1分数\")\n",
    "    for label in label2idx:\n",
    "        print(f\"{label}\\t\\t{prec_dict[label]:.4f}\\t\\t{rec_dict[label]:.4f}\\t\\t{f1_dict[label]:.4f}\")\n"
   ],
   "id": "632febb1d7a6750d",
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "准确率: 0.9060\n",
      "精确率: 0.9112\n",
      "召回率: 0.9036\n",
      "F1分数: 0.9074\n",
      "均方误差: 1.0480\n",
      "训练时间: 0.01秒\n",
      "\n",
      "每个类别的详细指标:\n",
      "类别\t\t精确率\t\t召回率\t\tF1分数\n",
      "体育\t\t0.9540\t\t0.9765\t\t0.9651\n",
      "娱乐\t\t0.9364\t\t0.9904\t\t0.9626\n",
      "家居\t\t0.7474\t\t0.8161\t\t0.7802\n",
      "房产\t\t0.9192\t\t0.8750\t\t0.8966\n",
      "教育\t\t0.9661\t\t0.6000\t\t0.7403\n",
      "时尚\t\t0.9492\t\t0.9492\t\t0.9492\n",
      "时政\t\t0.7941\t\t0.9310\t\t0.8571\n",
      "游戏\t\t0.8947\t\t0.9659\t\t0.9290\n",
      "科技\t\t0.9709\t\t0.9524\t\t0.9615\n",
      "财经\t\t0.9796\t\t0.9796\t\t0.9796\n"
     ]
    }
   ],
   "execution_count": 1
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 2
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython2",
   "version": "2.7.6"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 5
}
