{
 "cells": [
  {
   "cell_type": "code",
   "id": "initial_id",
   "metadata": {
    "collapsed": true,
    "ExecuteTime": {
     "end_time": "2025-06-11T03:06:27.271011Z",
     "start_time": "2025-06-11T03:06:24.185400Z"
    }
   },
   "source": [
    "import os\n",
    "import numpy as np\n",
    "import jieba\n",
    "from sklearn.feature_extraction.text import TfidfVectorizer\n",
    "from sklearn.linear_model import LogisticRegression\n",
    "from sklearn.neighbors import KNeighborsClassifier\n",
    "from sklearn.metrics import accuracy_score, classification_report\n",
    "from tqdm import tqdm\n",
    "import matplotlib.pyplot as plt\n",
    "from datetime import datetime\n",
    "\n",
    "\n",
    "# ------------------------------ 数据处理函数 ------------------------------\n",
    "def load_class_labels(class_path):\n",
    "    label_map = {}\n",
    "    if os.path.exists(class_path):\n",
    "        with open(class_path, 'r', encoding='utf-8') as f:\n",
    "            for idx, line in enumerate(f):\n",
    "                label_map[idx] = line.strip()\n",
    "    return label_map or {idx: f\"类别{idx}\" for idx in sorted(set(label_map.keys()))}\n",
    "\n",
    "\n",
    "def load_text_dataset(file_path):\n",
    "    texts, labels = [], []\n",
    "    with open(file_path, 'r', encoding='utf-8') as f:\n",
    "        for line in tqdm(f, desc=f\"加载 {file_path}\"):\n",
    "            line = line.strip()\n",
    "            if not line:\n",
    "                continue\n",
    "            parts = line.rsplit('\\t', 1)\n",
    "            if len(parts) == 2:\n",
    "                texts.append(parts[0])\n",
    "                labels.append(int(parts[1]))\n",
    "    return texts, labels\n",
    "\n",
    "\n",
    "def preprocess_texts(texts, stopwords):\n",
    "    processed = []\n",
    "    for text in tqdm(texts, desc=\"文本预处理\"):\n",
    "        words = jieba.cut(text, HMM=True)\n",
    "        filtered = [word for word in words if word not in stopwords and len(word) >= 2]\n",
    "        processed.append(' '.join(filtered))\n",
    "    return processed\n",
    "\n",
    "\n",
    "def load_imdb_data(data_path):\n",
    "    train_texts, train_labels = [], []\n",
    "    test_texts, test_labels = [], []\n",
    "\n",
    "    # 检查根目录是否存在\n",
    "    if not os.path.exists(data_path):\n",
    "        raise FileNotFoundError(f\"数据集根目录不存在: {data_path}\")\n",
    "\n",
    "    for label in ['pos', 'neg']:\n",
    "        for split in ['train', 'test']:\n",
    "            path = os.path.join(data_path, split, label)\n",
    "            # 检查子目录是否存在\n",
    "            if not os.path.exists(path):\n",
    "                print(f\"警告: 路径不存在 - {path}\")\n",
    "                continue\n",
    "\n",
    "            for file_name in os.listdir(path):\n",
    "                if file_name.endswith('.txt'):\n",
    "                    with open(os.path.join(path, file_name), 'r', encoding='utf-8') as file:\n",
    "                        text = file.read()\n",
    "                        if split == 'train':\n",
    "                            train_texts.append(text)\n",
    "                            train_labels.append(1 if label == 'pos' else 0)\n",
    "                        else:\n",
    "                            test_texts.append(text)\n",
    "                            test_labels.append(1 if label == 'pos' else 0)\n",
    "    return train_texts, train_labels, test_texts, test_labels\n",
    "\n",
    "\n",
    "# ------------------------------ 模型训练函数 ------------------------------\n",
    "def run_knn(data, stopwords, label_map):\n",
    "    train_texts, train_labels, test_texts, test_labels = data\n",
    "    classes = list(label_map.values())\n",
    "\n",
    "    # 特征提取\n",
    "    vectorizer = TfidfVectorizer(max_features=10000, ngram_range=(1, 2))\n",
    "    X_train = vectorizer.fit_transform(preprocess_texts(train_texts, stopwords))\n",
    "    X_test = vectorizer.transform(preprocess_texts(test_texts, stopwords))\n",
    "\n",
    "    # 初始化K近邻模型\n",
    "    model = KNeighborsClassifier(n_neighbors=5)  # 可调整n_neighbors参数\n",
    "\n",
    "    # 训练模型\n",
    "    start_time = datetime.now()\n",
    "    model.fit(X_train, train_labels)\n",
    "    train_time = (datetime.now() - start_time).total_seconds()\n",
    "\n",
    "    # 预测\n",
    "    train_pred = model.predict(X_train)\n",
    "    test_pred = model.predict(X_test)\n",
    "    train_acc = accuracy_score(train_labels, train_pred)\n",
    "    test_acc = accuracy_score(test_labels, test_pred)\n",
    "\n",
    "    # 输出结果（四位小数）\n",
    "    print(f\"\\n---------------- K近邻模型 ----------------\")\n",
    "    print(f\"训练集准确率: {train_acc:.4f}\")\n",
    "    print(f\"测试集准确率: {test_acc:.4f}\")\n",
    "    print(f\"训练时间: {train_time:.2f} 秒\")\n",
    "    print(\"分类报告:\\n\", classification_report(test_labels, test_pred, target_names=classes, digits=4))\n",
    "\n",
    "    return {\n",
    "        \"model\": \"K近邻\",\n",
    "        \"dataset\": data[0][0][:5],  # 仅用于标识数据集类型，非实际数据\n",
    "        \"test_accuracy\": test_acc,\n",
    "        \"train_time\": train_time\n",
    "    }\n",
    "\n",
    "\n",
    "def run_logistic_regression(data, stopwords, label_map):\n",
    "    train_texts, train_labels, test_texts, test_labels = data\n",
    "    classes = list(label_map.values())\n",
    "\n",
    "    # 特征提取\n",
    "    vectorizer = TfidfVectorizer(max_features=10000, ngram_range=(1, 2))\n",
    "    X_train = vectorizer.fit_transform(preprocess_texts(train_texts, stopwords))\n",
    "    X_test = vectorizer.transform(preprocess_texts(test_texts, stopwords))\n",
    "\n",
    "    # 初始化逻辑回归模型\n",
    "    model = LogisticRegression(solver='lbfgs', max_iter=1000, random_state=42)  # 可调整solver和max_iter\n",
    "\n",
    "    # 训练模型\n",
    "    start_time = datetime.now()\n",
    "    model.fit(X_train, train_labels)\n",
    "    train_time = (datetime.now() - start_time).total_seconds()\n",
    "\n",
    "    # 预测\n",
    "    train_pred = model.predict(X_train)\n",
    "    test_pred = model.predict(X_test)\n",
    "    train_acc = accuracy_score(train_labels, train_pred)\n",
    "    test_acc = accuracy_score(test_labels, test_pred)\n",
    "\n",
    "    # 输出结果（四位小数）\n",
    "    print(f\"\\n---------------- 逻辑回归模型 ----------------\")\n",
    "    print(f\"训练集准确率: {train_acc:.4f}\")\n",
    "    print(f\"测试集准确率: {test_acc:.4f}\")\n",
    "    print(f\"训练时间: {train_time:.2f} 秒\")\n",
    "    print(\"分类报告:\\n\", classification_report(test_labels, test_pred, target_names=classes, digits=4))\n",
    "\n",
    "    return {\n",
    "        \"model\": \"逻辑回归\",\n",
    "        \"dataset\": data[0][0][:5],  # 仅用于标识数据集类型，非实际数据\n",
    "        \"test_accuracy\": test_acc,\n",
    "        \"train_time\": train_time\n",
    "    }\n",
    "\n",
    "\n",
    "# ------------------------------ 主函数 ------------------------------\n",
    "def main():\n",
    "    print(f\"[{datetime.now()}] 文本分类实验开始...\")\n",
    "\n",
    "    # 设置中文字体\n",
    "    plt.rcParams[\"font.family\"] = [\"SimHei\", \"WenQuanYi Micro Hei\", \"Heiti TC\"]\n",
    "\n",
    "    # 新闻标题数据集配置\n",
    "    THUCNEWS_DATA_DIR = r\"D:\\机器学习\\THUCNews-txt\"\n",
    "    THUCNEWS_CLASS_PATH = os.path.join(THUCNEWS_DATA_DIR, \"class.txt\")\n",
    "    stopwords = {\n",
    "        '的', '了', '在', '是', '我', '有', '和', '就', '不', '人', '都', '一',\n",
    "        '个', '上', '也', '很', '到', '说', '要', '去', '你', '会', '着', '没有'\n",
    "    }\n",
    "\n",
    "    # 加载新闻标题数据集\n",
    "    if os.path.exists(THUCNEWS_DATA_DIR):\n",
    "        thucnews_train_texts, thucnews_train_labels = load_text_dataset(os.path.join(THUCNEWS_DATA_DIR, \"train.txt\"))\n",
    "        thucnews_test_texts, thucnews_test_labels = load_text_dataset(os.path.join(THUCNEWS_DATA_DIR, \"test.txt\"))\n",
    "        thucnews_label_map = load_class_labels(THUCNEWS_CLASS_PATH)\n",
    "        thucnews_data = (thucnews_train_texts, thucnews_train_labels, thucnews_test_texts, thucnews_test_labels)\n",
    "\n",
    "        # 输出新闻标题数据集信息\n",
    "        print(f\"新闻标题训练集样本数：{len(thucnews_train_texts)}\")\n",
    "        print(f\"新闻标题测试集样本数：{len(thucnews_test_texts)}\")\n",
    "\n",
    "        # 运行逻辑回归模型 - 新闻标题数据集\n",
    "        thucnews_lr_result = run_logistic_regression(thucnews_data, stopwords, thucnews_label_map)\n",
    "\n",
    "        # 运行K近邻模型 - 新闻标题数据集\n",
    "        thucnews_knn_result = run_knn(thucnews_data, stopwords, thucnews_label_map)\n",
    "    else:\n",
    "        print(f\"警告: 新闻标题数据集路径不存在 - {THUCNEWS_DATA_DIR}\")\n",
    "        thucnews_lr_result = thucnews_knn_result = None\n",
    "\n",
    "    # 电影评论数据集配置\n",
    "    IMDB_DATA_DIR = r\"D:\\机器学习\\aclImdb_v1\\aclImdb\"\n",
    "\n",
    "    # 加载电影评论数据集\n",
    "    if os.path.exists(IMDB_DATA_DIR):\n",
    "        imdb_train_texts, imdb_train_labels, imdb_test_texts, imdb_test_labels = load_imdb_data(IMDB_DATA_DIR)\n",
    "        imdb_label_map = {0: 'neg', 1: 'pos'}\n",
    "        imdb_data = (imdb_train_texts, imdb_train_labels, imdb_test_texts, imdb_test_labels)\n",
    "\n",
    "        # 输出电影评论数据集信息\n",
    "        print(f\"电影评论训练集样本数：{len(imdb_train_texts)}\")\n",
    "        print(f\"电影评论测试集样本数：{len(imdb_test_texts)}\")\n",
    "\n",
    "        # 运行逻辑回归模型 - 电影评论数据集\n",
    "        imdb_lr_result = run_logistic_regression(imdb_data, stopwords, imdb_label_map)\n",
    "\n",
    "        # 运行K近邻模型 - 电影评论数据集\n",
    "        imdb_knn_result = run_knn(imdb_data, stopwords, imdb_label_map)\n",
    "    else:\n",
    "        print(f\"警告: 电影评论数据集路径不存在 - {IMDB_DATA_DIR}\")\n",
    "        imdb_lr_result = imdb_knn_result = None\n",
    "\n",
    "    # 整理所有结果\n",
    "    results = []\n",
    "    if thucnews_lr_result:\n",
    "        results.append(thucnews_lr_result)\n",
    "    if thucnews_knn_result:\n",
    "        results.append(thucnews_knn_result)\n",
    "    if imdb_lr_result:\n",
    "        results.append(imdb_lr_result)\n",
    "    if imdb_knn_result:\n",
    "        results.append(imdb_knn_result)\n",
    "\n",
    "    if not results:\n",
    "        print(\"没有可用的数据集和结果进行可视化。\")\n",
    "        return\n",
    "\n",
    "    # ------------------------------ 可视化 ------------------------------\n",
    "    plt.figure(figsize=(12, 8))\n",
    "\n",
    "    # 准确率对比\n",
    "    plt.subplot(2, 1, 1)\n",
    "    bars = plt.bar(\n",
    "        [f\"{res['model']}\\n新闻\" for res in results if '新闻' in res['dataset'][:5]] +\n",
    "        [f\"{res['model']}\\n电影\" for res in results if '电影' in res['dataset'][:5]],\n",
    "        [res['test_accuracy'] for res in results],\n",
    "        width=0.4,\n",
    "        color=['#1f77b4', '#ff7f0e', '#2ca02c', '#d62728']\n",
    "    )\n",
    "    plt.title('模型准确率对比')\n",
    "    plt.ylabel('准确率')\n",
    "    plt.ylim(0.0, 1.0)  # 调整y轴范围以完整显示数值\n",
    "    for bar in bars:\n",
    "        height = bar.get_height()\n",
    "        plt.text(bar.get_x() + bar.get_width()/2., height,\n",
    "                f'{height:.4f}',\n",
    "                ha='center', va='bottom')\n",
    "\n",
    "    # 训练时间对比\n",
    "    plt.subplot(2, 1, 2)\n",
    "    bars = plt.bar(\n",
    "        [f\"{res['model']}\\n新闻\" for res in results if '新闻' in res['dataset'][:5]] +\n",
    "        [f\"{res['model']}\\n电影\" for res in results if '电影' in res['dataset'][:5]],\n",
    "        [res['train_time'] for res in results],\n",
    "        width=0.4,\n",
    "        color=['#1f77b4', '#ff7f0e', '#2ca02c', '#d62728']\n",
    "    )\n",
    "    plt.title('模型训练时间对比')\n",
    "    plt.xlabel('数据集/模型')\n",
    "    plt.ylabel('训练时间（秒）')\n",
    "    for bar in bars:\n",
    "        height = bar.get_height()\n",
    "        plt.text(bar.get_x() + bar.get_width()/2., height,\n",
    "                f'{height:.2f}s',\n",
    "                ha='center', va='bottom')\n",
    "\n",
    "    plt.tight_layout()\n",
    "    plt.show()\n",
    "\n",
    "\n",
    "if __name__ == \"__main__\":\n",
    "    main()"
   ],
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "[2025-06-11 11:06:24.601767] 文本分类实验开始...\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "加载 D:\\机器学习\\THUCNews-txt\\train.txt: 180000it [00:00, 1145982.96it/s]\n",
      "加载 D:\\机器学习\\THUCNews-txt\\test.txt: 10000it [00:00, ?it/s]\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "新闻标题训练集样本数：180000\n",
      "新闻标题测试集样本数：10000\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "文本预处理:   0%|          | 0/180000 [00:00<?, ?it/s]Building prefix dict from the default dictionary ...\n",
      "Loading model from cache C:\\Users\\hanji\\AppData\\Local\\Temp\\jieba.cache\n",
      "Loading model cost 0.613 seconds.\n",
      "Prefix dict has been built successfully.\n",
      "文本预处理:   8%|▊         | 14272/180000 [00:01<00:17, 9576.67it/s] \n",
      "D:\\python\\Lib\\site-packages\\jieba\\__init__.py:44: SyntaxWarning: invalid escape sequence '\\.'\n",
      "  re_han_default = re.compile(\"([\\u4E00-\\u9FD5a-zA-Z0-9+#&\\._%\\-]+)\", re.U)\n",
      "D:\\python\\Lib\\site-packages\\jieba\\__init__.py:46: SyntaxWarning: invalid escape sequence '\\s'\n",
      "  re_skip_default = re.compile(\"(\\r\\n|\\s)\", re.U)\n"
     ]
    },
    {
     "ename": "KeyboardInterrupt",
     "evalue": "",
     "output_type": "error",
     "traceback": [
      "\u001B[1;31m---------------------------------------------------------------------------\u001B[0m",
      "\u001B[1;31mKeyboardInterrupt\u001B[0m                         Traceback (most recent call last)",
      "Cell \u001B[1;32mIn[1], line 268\u001B[0m\n\u001B[0;32m    264\u001B[0m     plt\u001B[38;5;241m.\u001B[39mshow()\n\u001B[0;32m    267\u001B[0m \u001B[38;5;28;01mif\u001B[39;00m \u001B[38;5;18m__name__\u001B[39m \u001B[38;5;241m==\u001B[39m \u001B[38;5;124m\"\u001B[39m\u001B[38;5;124m__main__\u001B[39m\u001B[38;5;124m\"\u001B[39m:\n\u001B[1;32m--> 268\u001B[0m     main()\n",
      "Cell \u001B[1;32mIn[1], line 179\u001B[0m, in \u001B[0;36mmain\u001B[1;34m()\u001B[0m\n\u001B[0;32m    176\u001B[0m \u001B[38;5;28mprint\u001B[39m(\u001B[38;5;124mf\u001B[39m\u001B[38;5;124m\"\u001B[39m\u001B[38;5;124m新闻标题测试集样本数：\u001B[39m\u001B[38;5;132;01m{\u001B[39;00m\u001B[38;5;28mlen\u001B[39m(thucnews_test_texts)\u001B[38;5;132;01m}\u001B[39;00m\u001B[38;5;124m\"\u001B[39m)\n\u001B[0;32m    178\u001B[0m \u001B[38;5;66;03m# 运行逻辑回归模型 - 新闻标题数据集\u001B[39;00m\n\u001B[1;32m--> 179\u001B[0m thucnews_lr_result \u001B[38;5;241m=\u001B[39m run_logistic_regression(thucnews_data, stopwords, thucnews_label_map)\n\u001B[0;32m    181\u001B[0m \u001B[38;5;66;03m# 运行K近邻模型 - 新闻标题数据集\u001B[39;00m\n\u001B[0;32m    182\u001B[0m thucnews_knn_result \u001B[38;5;241m=\u001B[39m run_knn(thucnews_data, stopwords, thucnews_label_map)\n",
      "Cell \u001B[1;32mIn[1], line 120\u001B[0m, in \u001B[0;36mrun_logistic_regression\u001B[1;34m(data, stopwords, label_map)\u001B[0m\n\u001B[0;32m    118\u001B[0m \u001B[38;5;66;03m# 特征提取\u001B[39;00m\n\u001B[0;32m    119\u001B[0m vectorizer \u001B[38;5;241m=\u001B[39m TfidfVectorizer(max_features\u001B[38;5;241m=\u001B[39m\u001B[38;5;241m10000\u001B[39m, ngram_range\u001B[38;5;241m=\u001B[39m(\u001B[38;5;241m1\u001B[39m, \u001B[38;5;241m2\u001B[39m))\n\u001B[1;32m--> 120\u001B[0m X_train \u001B[38;5;241m=\u001B[39m vectorizer\u001B[38;5;241m.\u001B[39mfit_transform(preprocess_texts(train_texts, stopwords))\n\u001B[0;32m    121\u001B[0m X_test \u001B[38;5;241m=\u001B[39m vectorizer\u001B[38;5;241m.\u001B[39mtransform(preprocess_texts(test_texts, stopwords))\n\u001B[0;32m    123\u001B[0m \u001B[38;5;66;03m# 初始化逻辑回归模型\u001B[39;00m\n",
      "Cell \u001B[1;32mIn[1], line 41\u001B[0m, in \u001B[0;36mpreprocess_texts\u001B[1;34m(texts, stopwords)\u001B[0m\n\u001B[0;32m     39\u001B[0m \u001B[38;5;28;01mfor\u001B[39;00m text \u001B[38;5;129;01min\u001B[39;00m tqdm(texts, desc\u001B[38;5;241m=\u001B[39m\u001B[38;5;124m\"\u001B[39m\u001B[38;5;124m文本预处理\u001B[39m\u001B[38;5;124m\"\u001B[39m):\n\u001B[0;32m     40\u001B[0m     words \u001B[38;5;241m=\u001B[39m jieba\u001B[38;5;241m.\u001B[39mcut(text, HMM\u001B[38;5;241m=\u001B[39m\u001B[38;5;28;01mTrue\u001B[39;00m)\n\u001B[1;32m---> 41\u001B[0m     filtered \u001B[38;5;241m=\u001B[39m [word \u001B[38;5;28;01mfor\u001B[39;00m word \u001B[38;5;129;01min\u001B[39;00m words \u001B[38;5;28;01mif\u001B[39;00m word \u001B[38;5;129;01mnot\u001B[39;00m \u001B[38;5;129;01min\u001B[39;00m stopwords \u001B[38;5;129;01mand\u001B[39;00m \u001B[38;5;28mlen\u001B[39m(word) \u001B[38;5;241m>\u001B[39m\u001B[38;5;241m=\u001B[39m \u001B[38;5;241m2\u001B[39m]\n\u001B[0;32m     42\u001B[0m     processed\u001B[38;5;241m.\u001B[39mappend(\u001B[38;5;124m'\u001B[39m\u001B[38;5;124m \u001B[39m\u001B[38;5;124m'\u001B[39m\u001B[38;5;241m.\u001B[39mjoin(filtered))\n\u001B[0;32m     43\u001B[0m \u001B[38;5;28;01mreturn\u001B[39;00m processed\n",
      "File \u001B[1;32mD:\\python\\Lib\\site-packages\\jieba\\__init__.py:325\u001B[0m, in \u001B[0;36mTokenizer.cut\u001B[1;34m(self, sentence, cut_all, HMM, use_paddle)\u001B[0m\n\u001B[0;32m    323\u001B[0m     \u001B[38;5;28;01mcontinue\u001B[39;00m\n\u001B[0;32m    324\u001B[0m \u001B[38;5;28;01mif\u001B[39;00m re_han\u001B[38;5;241m.\u001B[39mmatch(blk):\n\u001B[1;32m--> 325\u001B[0m     \u001B[38;5;28;01mfor\u001B[39;00m word \u001B[38;5;129;01min\u001B[39;00m cut_block(blk):\n\u001B[0;32m    326\u001B[0m         \u001B[38;5;28;01myield\u001B[39;00m word\n\u001B[0;32m    327\u001B[0m \u001B[38;5;28;01melse\u001B[39;00m:\n",
      "File \u001B[1;32mD:\\python\\Lib\\site-packages\\jieba\\__init__.py:250\u001B[0m, in \u001B[0;36mTokenizer.__cut_DAG\u001B[1;34m(self, sentence)\u001B[0m\n\u001B[0;32m    249\u001B[0m \u001B[38;5;28;01mdef\u001B[39;00m \u001B[38;5;21m__cut_DAG\u001B[39m(\u001B[38;5;28mself\u001B[39m, sentence):\n\u001B[1;32m--> 250\u001B[0m     DAG \u001B[38;5;241m=\u001B[39m \u001B[38;5;28mself\u001B[39m\u001B[38;5;241m.\u001B[39mget_DAG(sentence)\n\u001B[0;32m    251\u001B[0m     route \u001B[38;5;241m=\u001B[39m {}\n\u001B[0;32m    252\u001B[0m     \u001B[38;5;28mself\u001B[39m\u001B[38;5;241m.\u001B[39mcalc(sentence, DAG, route)\n",
      "File \u001B[1;32mD:\\python\\Lib\\site-packages\\jieba\\__init__.py:188\u001B[0m, in \u001B[0;36mTokenizer.get_DAG\u001B[1;34m(self, sentence)\u001B[0m\n\u001B[0;32m    186\u001B[0m i \u001B[38;5;241m=\u001B[39m k\n\u001B[0;32m    187\u001B[0m frag \u001B[38;5;241m=\u001B[39m sentence[k]\n\u001B[1;32m--> 188\u001B[0m \u001B[38;5;28;01mwhile\u001B[39;00m i \u001B[38;5;241m<\u001B[39m N \u001B[38;5;129;01mand\u001B[39;00m frag \u001B[38;5;129;01min\u001B[39;00m \u001B[38;5;28mself\u001B[39m\u001B[38;5;241m.\u001B[39mFREQ:\n\u001B[0;32m    189\u001B[0m     \u001B[38;5;28;01mif\u001B[39;00m \u001B[38;5;28mself\u001B[39m\u001B[38;5;241m.\u001B[39mFREQ[frag]:\n\u001B[0;32m    190\u001B[0m         tmplist\u001B[38;5;241m.\u001B[39mappend(i)\n",
      "\u001B[1;31mKeyboardInterrupt\u001B[0m: "
     ]
    }
   ],
   "execution_count": 1
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 2
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython2",
   "version": "2.7.6"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 5
}
