{
 "cells": [
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## pyspark协同过滤"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "### user-based协同过滤"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "collapsed": true
   },
   "outputs": [],
   "source": [
    "#-*- coding:utf8 -*-\n",
    "# pySpark实现的基于用户的协同过滤\n",
    "# 使用的余弦相似度\n",
    "\n",
    "import sys\n",
    "from collections import defaultdict\n",
    "from itertools import combinations\n",
    "import random\n",
    "import numpy as np\n",
    "import pdb\n",
    "\n",
    "from pyspark import SparkContext\n",
    "\n",
    "# user item rating timestamp\n",
    "def parseVectorOnUser(line):\n",
    "    '''\n",
    "        解析数据，key是user，后面是item和打分\n",
    "    '''\n",
    "    line = line.split(\"|\")\n",
    "    return line[0],(line[1],float(line[2]))\n",
    "\n",
    "def parseVectorOnItem(line):\n",
    "    '''\n",
    "        解析数据，key是item，后面是user和打分\n",
    "    '''\n",
    "    line = line.split(\"|\")\n",
    "    return line[1],(line[0],float(line[2]))\n",
    "\n",
    "def sampleInteractions(item_id,users_with_rating,n):\n",
    "    '''\n",
    "        如果某个商品上用户行为特别多，可以选择适当做点下采样\n",
    "    '''\n",
    "    if len(users_with_rating) > n:\n",
    "        return item_id, random.sample(users_with_rating,n)\n",
    "    else:\n",
    "        return item_id, users_with_rating\n",
    "\n",
    "def findUserPairs(item_id,users_with_rating):\n",
    "    '''\n",
    "        对每个item，找到共同打分的user对\n",
    "    '''\n",
    "    for user1,user2 in combinations(users_with_rating,2):\n",
    "        return (user1[0],user2[0]),(user1[1],user2[1])\n",
    "\n",
    "def calcSim(user_pair,rating_pairs):\n",
    "    ''' \n",
    "        对每个user对，根据打分计算余弦距离，并返回共同打分的item个数\n",
    "    '''\n",
    "    sum_xx, sum_xy, sum_yy, sum_x, sum_y, n = (0.0, 0.0, 0.0, 0.0, 0.0, 0)\n",
    "    \n",
    "    for rating_pair in rating_pairs:\n",
    "        sum_xx += np.float(rating_pair[0]) * np.float(rating_pair[0])\n",
    "        sum_yy += np.float(rating_pair[1]) * np.float(rating_pair[1])\n",
    "        sum_xy += np.float(rating_pair[0]) * np.float(rating_pair[1])\n",
    "        # sum_y += rt[1]\n",
    "        # sum_x += rt[0]\n",
    "        n += 1\n",
    "\n",
    "    cos_sim = cosine(sum_xy,np.sqrt(sum_xx),np.sqrt(sum_yy))\n",
    "    return user_pair, (cos_sim,n)\n",
    "\n",
    "def cosine(dot_product,rating_norm_squared,rating2_norm_squared):\n",
    "    '''\n",
    "        2个向量A和B的余弦相似度\n",
    "       dotProduct(A, B) / (norm(A) * norm(B))\n",
    "    '''\n",
    "    numerator = dot_product\n",
    "    denominator = rating_norm_squared * rating2_norm_squared\n",
    "\n",
    "    return (numerator / (float(denominator))) if denominator else 0.0\n",
    "\n",
    "def keyOnFirstUser(user_pair,item_sim_data):\n",
    "    '''\n",
    "        对于每个user-user对，用第一个user做key(好像有点粗暴...)\n",
    "    '''\n",
    "    (user1_id,user2_id) = user_pair\n",
    "    return user1_id,(user2_id,item_sim_data)\n",
    "\n",
    "def nearestNeighbors(user,users_and_sims,n):\n",
    "    '''\n",
    "        选出相似度最高的N个邻居\n",
    "    '''\n",
    "    users_and_sims.sort(key=lambda x: x[1][0],reverse=True)\n",
    "    return user, users_and_sims[:n]\n",
    "\n",
    "def topNRecommendations(user_id,user_sims,users_with_rating,n):\n",
    "    '''\n",
    "        根据最近的N个邻居进行推荐\n",
    "    '''\n",
    "\n",
    "    totals = defaultdict(int)\n",
    "    sim_sums = defaultdict(int)\n",
    "\n",
    "    for (neighbor,(sim,count)) in user_sims:\n",
    "\n",
    "        # 遍历邻居的打分\n",
    "        unscored_items = users_with_rating.get(neighbor,None)\n",
    "\n",
    "        if unscored_items:\n",
    "            for (item,rating) in unscored_items:\n",
    "                if neighbor != item:\n",
    "\n",
    "                    # 更新推荐度和相近度\n",
    "                    totals[neighbor] += sim * rating\n",
    "                    sim_sums[neighbor] += sim\n",
    "\n",
    "    # 归一化\n",
    "    scored_items = [(total/sim_sums[item],item) for item,total in totals.items()]\n",
    "\n",
    "    # 按照推荐度降序排列\n",
    "    scored_items.sort(reverse=True)\n",
    "\n",
    "    # 推荐度的item\n",
    "    ranked_items = [x[1] for x in scored_items]\n",
    "\n",
    "    return user_id,ranked_items[:n]\n",
    "\n",
    "if __name__ == \"__main__\":\n",
    "    if len(sys.argv) < 3:\n",
    "        print >> sys.stderr, \\\n",
    "            \"Usage: PythonUserCF <master> <file>\"\n",
    "        exit(-1)\n",
    "\n",
    "    sc = SparkContext(sys.argv[1],\"PythonUserCF\")\n",
    "    lines = sc.textFile(sys.argv[2])\n",
    "\n",
    "    '''\n",
    "        处理数据，获得稀疏item-user矩阵:\n",
    "        item_id -> ((user_1,rating),(user2,rating))\n",
    "    '''\n",
    "    item_user_pairs = lines.map(parseVectorOnItem).groupByKey().map(\n",
    "        lambda p: sampleInteractions(p[0],p[1],500)).cache()\n",
    "\n",
    "    '''\n",
    "        获得2个用户所有的item-item对得分组合:\n",
    "        (user1_id,user2_id) -> [(rating1,rating2),\n",
    "                                (rating1,rating2),\n",
    "                                (rating1,rating2),\n",
    "                                ...]\n",
    "    '''\n",
    "    pairwise_users = item_user_pairs.filter(\n",
    "        lambda p: len(p[1]) > 1).map(\n",
    "        lambda p: findUserPairs(p[0],p[1])).groupByKey()\n",
    "\n",
    "    '''\n",
    "        计算余弦相似度，找到最近的N个邻居:\n",
    "        (user1,user2) ->    (similarity,co_raters_count)\n",
    "    '''\n",
    "    user_sims = pairwise_users.map(\n",
    "        lambda p: calcSim(p[0],p[1])).map(\n",
    "        lambda p: keyOnFirstUser(p[0],p[1])).groupByKey().map(\n",
    "        lambda p: nearestNeighbors(p[0],p[1],50))\n",
    "\n",
    "    ''' \n",
    "        对每个用户的打分记录整理成如下形式\n",
    "        user_id -> [(item_id_1, rating_1),\n",
    "                   [(item_id_2, rating_2),\n",
    "                    ...]\n",
    "    '''\n",
    "\n",
    "    user_item_hist = lines.map(parseVectorOnUser).groupByKey().collect()\n",
    "\n",
    "    ui_dict = {}\n",
    "    for (user,items) in user_item_hist: \n",
    "        ui_dict[user] = items\n",
    "\n",
    "    uib = sc.broadcast(ui_dict)\n",
    "\n",
    "    '''\n",
    "        为每个用户计算Top N的推荐\n",
    "        user_id -> [item1,item2,item3,...]\n",
    "    '''\n",
    "    user_item_recs = user_sims.map(lambda p: topNRecommendations(p[0],p[1],uib.value,100)).collect()"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "### item-based协同过滤"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "collapsed": true
   },
   "outputs": [],
   "source": [
    "#-*- coding:utf8 -*-\n",
    "# pySpark实现的基于物品的协同过滤\n",
    "\n",
    "import sys\n",
    "from collections import defaultdict\n",
    "from itertools import combinations\n",
    "import numpy as np\n",
    "import random\n",
    "import csv\n",
    "import pdb\n",
    "\n",
    "from pyspark import SparkContext\n",
    "\n",
    "def parseVector(line):\n",
    "    '''\n",
    "        解析数据，key是item，后面是user和打分\n",
    "    '''\n",
    "    line = line.split(\"|\")\n",
    "    return line[0],(line[1],float(line[2]))\n",
    "\n",
    "def sampleInteractions(user_id,items_with_rating,n):\n",
    "    '''\n",
    "        如果某个用户打分行为特别多，可以选择适当做点下采样\n",
    "    '''\n",
    "    if len(items_with_rating) > n:\n",
    "        return user_id, random.sample(items_with_rating,n)\n",
    "    else:\n",
    "        return user_id, items_with_rating\n",
    "\n",
    "def findItemPairs(user_id,items_with_rating):\n",
    "    '''\n",
    "        对每个用户的打分item，组对\n",
    "    '''\n",
    "    for item1,item2 in combinations(items_with_rating,2):\n",
    "        return (item1[0],item2[0]),(item1[1],item2[1])\n",
    "\n",
    "def calcSim(item_pair,rating_pairs):\n",
    "    ''' \n",
    "        对每个item对，根据打分计算余弦距离，并返回共同打分的user个数\n",
    "    '''\n",
    "    sum_xx, sum_xy, sum_yy, sum_x, sum_y, n = (0.0, 0.0, 0.0, 0.0, 0.0, 0)\n",
    "    \n",
    "    for rating_pair in rating_pairs:\n",
    "        sum_xx += np.float(rating_pair[0]) * np.float(rating_pair[0])\n",
    "        sum_yy += np.float(rating_pair[1]) * np.float(rating_pair[1])\n",
    "        sum_xy += np.float(rating_pair[0]) * np.float(rating_pair[1])\n",
    "        # sum_y += rt[1]\n",
    "        # sum_x += rt[0]\n",
    "        n += 1\n",
    "\n",
    "    cos_sim = cosine(sum_xy,np.sqrt(sum_xx),np.sqrt(sum_yy))\n",
    "    return item_pair, (cos_sim,n)\n",
    "\n",
    "def cosine(dot_product,rating_norm_squared,rating2_norm_squared):\n",
    "    '''\n",
    "    The cosine between two vectors A, B\n",
    "       dotProduct(A, B) / (norm(A) * norm(B))\n",
    "    '''\n",
    "    numerator = dot_product\n",
    "    denominator = rating_norm_squared * rating2_norm_squared\n",
    "    return (numerator / (float(denominator))) if denominator else 0.0\n",
    "\n",
    "def correlation(size, dot_product, rating_sum, \\\n",
    "            rating2sum, rating_norm_squared, rating2_norm_squared):\n",
    "    '''\n",
    "        2个向量A和B的相似度\n",
    "        [n * dotProduct(A, B) - sum(A) * sum(B)] /\n",
    "        sqrt{ [n * norm(A)^2 - sum(A)^2] [n * norm(B)^2 - sum(B)^2] }\n",
    "\n",
    "    '''\n",
    "    numerator = size * dot_product - rating_sum * rating2sum\n",
    "    denominator = sqrt(size * rating_norm_squared - rating_sum * rating_sum) * \\\n",
    "                    sqrt(size * rating2_norm_squared - rating2sum * rating2sum)\n",
    "\n",
    "    return (numerator / (float(denominator))) if denominator else 0.0\n",
    "\n",
    "def keyOnFirstItem(item_pair,item_sim_data):\n",
    "    '''\n",
    "        对于每个item-item对，用第一个item做key(好像有点粗暴...)\n",
    "    '''\n",
    "    (item1_id,item2_id) = item_pair\n",
    "    return item1_id,(item2_id,item_sim_data)\n",
    "\n",
    "def nearestNeighbors(item_id,items_and_sims,n):\n",
    "    '''\n",
    "        排序选出相似度最高的N个邻居\n",
    "    '''\n",
    "    items_and_sims.sort(key=lambda x: x[1][0],reverse=True)\n",
    "    return item_id, items_and_sims[:n]\n",
    "\n",
    "def topNRecommendations(user_id,items_with_rating,item_sims,n):\n",
    "    '''\n",
    "        根据最近的N个邻居进行推荐\n",
    "    '''\n",
    "    \n",
    "    totals = defaultdict(int)\n",
    "    sim_sums = defaultdict(int)\n",
    "\n",
    "    for (item,rating) in items_with_rating:\n",
    "\n",
    "        # 遍历item的邻居\n",
    "        nearest_neighbors = item_sims.get(item,None)\n",
    "\n",
    "        if nearest_neighbors:\n",
    "            for (neighbor,(sim,count)) in nearest_neighbors:\n",
    "                if neighbor != item:\n",
    "\n",
    "                    # 更新推荐度和相近度\n",
    "                    totals[neighbor] += sim * rating\n",
    "                    sim_sums[neighbor] += sim\n",
    "\n",
    "    # 归一化\n",
    "    scored_items = [(total/sim_sums[item],item) for item,total in totals.items()]\n",
    "\n",
    "    # 按照推荐度降序排列\n",
    "    scored_items.sort(reverse=True)\n",
    "\n",
    "    ranked_items = [x[1] for x in scored_items]\n",
    "\n",
    "    return user_id,ranked_items[:n]\n",
    "\n",
    "if __name__ == \"__main__\":\n",
    "    if len(sys.argv) < 3:\n",
    "        print >> sys.stderr, \\\n",
    "            \"Usage: PythonItemCF <master> <file>\"\n",
    "        exit(-1)\n",
    "\n",
    "    sc = SparkContext(sys.argv[1], \"PythonItemCF\")\n",
    "    lines = sc.textFile(sys.argv[2])\n",
    "\n",
    "    ''' \n",
    "        处理数据，获得稀疏user-item矩阵:\n",
    "        user_id -> [(item_id_1, rating_1),\n",
    "                   [(item_id_2, rating_2),\n",
    "                    ...]\n",
    "    '''\n",
    "    user_item_pairs = lines.map(parseVector).groupByKey().map(\n",
    "        lambda p: sampleInteractions(p[0],p[1],500)).cache()\n",
    "\n",
    "    '''\n",
    "        获取所有item-item组合对\n",
    "        (item1,item2) ->    [(item1_rating,item2_rating),\n",
    "                             (item1_rating,item2_rating),\n",
    "                             ...]\n",
    "    '''\n",
    "\n",
    "    pairwise_items = user_item_pairs.filter(\n",
    "        lambda p: len(p[1]) > 1).map(\n",
    "        lambda p: findItemPairs(p[0],p[1])).groupByKey()\n",
    "\n",
    "    '''\n",
    "        计算余弦相似度，找到最近的N个邻居:\n",
    "        (item1,item2) ->    (similarity,co_raters_count)\n",
    "    '''\n",
    "\n",
    "    item_sims = pairwise_items.map(\n",
    "        lambda p: calcSim(p[0],p[1])).map(\n",
    "        lambda p: keyOnFirstItem(p[0],p[1])).groupByKey().map(\n",
    "        lambda p: nearestNeighbors(p[0],p[1],50)).collect()\n",
    "\n",
    "\n",
    "    item_sim_dict = {}\n",
    "    for (item,data) in item_sims: \n",
    "        item_sim_dict[item] = data\n",
    "\n",
    "    isb = sc.broadcast(item_sim_dict)\n",
    "\n",
    "    '''\n",
    "        计算最佳的N个推荐结果\n",
    "        user_id -> [item1,item2,item3,...]\n",
    "    '''\n",
    "    user_item_recs = user_item_pairs.map(lambda p: topNRecommendations(p[0],p[1],isb.value,500)).collect()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "collapsed": true
   },
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## Spark推荐系统"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "### spark自带了用于推荐的算法"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "collapsed": true
   },
   "outputs": [],
   "source": [
    "#!/usr/bin/env python\n",
    "# 基于spark中ALS的推荐系统，针对movielens中电影打分数据做推荐\n",
    "# Edit：寒小阳(hanxiaoyang.ml@gmail.com)\n",
    "\n",
    "import sys\n",
    "import itertools\n",
    "from math import sqrt\n",
    "from operator import add\n",
    "from os.path import join, isfile, dirname\n",
    "\n",
    "from pyspark import SparkConf, SparkContext\n",
    "from pyspark.mllib.recommendation import ALS\n",
    "\n",
    "def parseRating(line):\n",
    "    \"\"\"\n",
    "        MovieLens的打分格式是userId::movieId::rating::timestamp\n",
    "        我们对格式做一个解析\n",
    "    \"\"\"\n",
    "    fields = line.strip().split(\"::\")\n",
    "    return long(fields[3]) % 10, (int(fields[0]), int(fields[1]), float(fields[2]))\n",
    "\n",
    "def parseMovie(line):\n",
    "    \"\"\"\n",
    "        对应的电影文件的格式为movieId::movieTitle\n",
    "        解析成int id, 文本\n",
    "    \"\"\"\n",
    "    fields = line.strip().split(\"::\")\n",
    "    return int(fields[0]), fields[1]\n",
    "\n",
    "def loadRatings(ratingsFile):\n",
    "    \"\"\"\n",
    "        载入得分\n",
    "    \"\"\"\n",
    "    if not isfile(ratingsFile):\n",
    "        print \"File %s does not exist.\" % ratingsFile\n",
    "        sys.exit(1)\n",
    "    f = open(ratingsFile, 'r')\n",
    "    ratings = filter(lambda r: r[2] > 0, [parseRating(line)[1] for line in f])\n",
    "    f.close()\n",
    "    if not ratings:\n",
    "        print \"No ratings provided.\"\n",
    "        sys.exit(1)\n",
    "    else:\n",
    "        return ratings\n",
    "\n",
    "def computeRmse(model, data, n):\n",
    "    \"\"\"\n",
    "        评估的时候要用的，计算均方根误差\n",
    "    \"\"\"\n",
    "    predictions = model.predictAll(data.map(lambda x: (x[0], x[1])))\n",
    "    predictionsAndRatings = predictions.map(lambda x: ((x[0], x[1]), x[2])) \\\n",
    "      .join(data.map(lambda x: ((x[0], x[1]), x[2]))) \\\n",
    "      .values()\n",
    "    return sqrt(predictionsAndRatings.map(lambda x: (x[0] - x[1]) ** 2).reduce(add) / float(n))\n",
    "\n",
    "if __name__ == \"__main__\":\n",
    "    if (len(sys.argv) != 3):\n",
    "        print \"Usage: /path/to/spark/bin/spark-submit --driver-memory 2g \" + \\\n",
    "          \"MovieLensALS.py movieLensDataDir personalRatingsFile\"\n",
    "        sys.exit(1)\n",
    "\n",
    "    # 设定环境\n",
    "    conf = SparkConf() \\\n",
    "      .setAppName(\"MovieLensALS\") \\\n",
    "      .set(\"spark.executor.memory\", \"2g\")\n",
    "    sc = SparkContext(conf=conf)\n",
    "\n",
    "    # 载入打分数据\n",
    "    myRatings = loadRatings(sys.argv[2])\n",
    "    myRatingsRDD = sc.parallelize(myRatings, 1)\n",
    "\n",
    "    movieLensHomeDir = sys.argv[1]\n",
    "\n",
    "    # 得到的ratings为(时间戳最后一位整数, (userId, movieId, rating))格式的RDD\n",
    "    ratings = sc.textFile(join(movieLensHomeDir, \"ratings.dat\")).map(parseRating)\n",
    "\n",
    "    # 得到的movies为(movieId, movieTitle)格式的RDD\n",
    "    movies = dict(sc.textFile(join(movieLensHomeDir, \"movies.dat\")).map(parseMovie).collect())\n",
    "\n",
    "    numRatings = ratings.count()\n",
    "    numUsers = ratings.values().map(lambda r: r[0]).distinct().count()\n",
    "    numMovies = ratings.values().map(lambda r: r[1]).distinct().count()\n",
    "\n",
    "    print \"Got %d ratings from %d users on %d movies.\" % (numRatings, numUsers, numMovies)\n",
    "\n",
    "    # 根据时间戳最后一位把整个数据集分成训练集(60%), 交叉验证集(20%), 和评估集(20%)\n",
    "\n",
    "    # 训练, 交叉验证, 测试 集都是(userId, movieId, rating)格式的RDD\n",
    "\n",
    "    numPartitions = 4\n",
    "    training = ratings.filter(lambda x: x[0] < 6) \\\n",
    "      .values() \\\n",
    "      .union(myRatingsRDD) \\\n",
    "      .repartition(numPartitions) \\\n",
    "      .cache()\n",
    "\n",
    "    validation = ratings.filter(lambda x: x[0] >= 6 and x[0] < 8) \\\n",
    "      .values() \\\n",
    "      .repartition(numPartitions) \\\n",
    "      .cache()\n",
    "\n",
    "    test = ratings.filter(lambda x: x[0] >= 8).values().cache()\n",
    "\n",
    "    numTraining = training.count()\n",
    "    numValidation = validation.count()\n",
    "    numTest = test.count()\n",
    "\n",
    "    print \"Training: %d, validation: %d, test: %d\" % (numTraining, numValidation, numTest)\n",
    "\n",
    "    # 训练模型，在交叉验证集上看效果\n",
    "\n",
    "    ranks = [8, 12]\n",
    "    lambdas = [0.1, 10.0]\n",
    "    numIters = [10, 20]\n",
    "    bestModel = None\n",
    "    bestValidationRmse = float(\"inf\")\n",
    "    bestRank = 0\n",
    "    bestLambda = -1.0\n",
    "    bestNumIter = -1\n",
    "\n",
    "    for rank, lmbda, numIter in itertools.product(ranks, lambdas, numIters):\n",
    "        model = ALS.train(training, rank, numIter, lmbda)\n",
    "        validationRmse = computeRmse(model, validation, numValidation)\n",
    "        print \"RMSE (validation) = %f for the model trained with \" % validationRmse + \\\n",
    "              \"rank = %d, lambda = %.1f, and numIter = %d.\" % (rank, lmbda, numIter)\n",
    "        if (validationRmse < bestValidationRmse):\n",
    "            bestModel = model\n",
    "            bestValidationRmse = validationRmse\n",
    "            bestRank = rank\n",
    "            bestLambda = lmbda\n",
    "            bestNumIter = numIter\n",
    "\n",
    "    testRmse = computeRmse(bestModel, test, numTest)\n",
    "\n",
    "    # 在测试集上评估 交叉验证集上最好的模型\n",
    "    print \"The best model was trained with rank = %d and lambda = %.1f, \" % (bestRank, bestLambda) \\\n",
    "      + \"and numIter = %d, and its RMSE on the test set is %f.\" % (bestNumIter, testRmse)\n",
    "\n",
    "    # 我们把基线模型设定为每次都返回平均得分的模型\n",
    "    meanRating = training.union(validation).map(lambda x: x[2]).mean()\n",
    "    baselineRmse = sqrt(test.map(lambda x: (meanRating - x[2]) ** 2).reduce(add) / numTest)\n",
    "    improvement = (baselineRmse - testRmse) / baselineRmse * 100\n",
    "    print \"The best model improves the baseline by %.2f\" % (improvement) + \"%.\"\n",
    "\n",
    "    # 个性化的推荐(针对某个用户)\n",
    "\n",
    "    myRatedMovieIds = set([x[1] for x in myRatings])\n",
    "    candidates = sc.parallelize([m for m in movies if m not in myRatedMovieIds])\n",
    "    predictions = bestModel.predictAll(candidates.map(lambda x: (0, x))).collect()\n",
    "    recommendations = sorted(predictions, key=lambda x: x[2], reverse=True)[:50]\n",
    "\n",
    "    print \"Movies recommended for you:\"\n",
    "    for i in xrange(len(recommendations)):\n",
    "        print (\"%2d: %s\" % (i + 1, movies[recommendations[i][1]])).encode('ascii', 'ignore')\n",
    "\n",
    "    # clean up\n",
    "    sc.stop()"
   ]
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.5.2"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 2
}
