{
 "cells": [
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "1、导入必要的工具包"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 1,
   "metadata": {},
   "outputs": [],
   "source": [
    "#数据量太大，pdandas不能一次讲所有数据读入\n",
    "#也可以用pandas,一次读取部分数据，可以参考：https://www.cnblogs.com/datablog/p/6127000.html\n",
    "#import pandas as pd\n",
    "\n",
    "import numpy as np\n",
    "import scipy.sparse as ss\n",
    "import scipy.io as sio\n",
    "\n",
    "#保存数据\n",
    "import cPickle\n",
    "\n",
    "#event的特征需要编码\n",
    "#from utils import FeatureEng\n",
    "from sklearn.preprocessing import normalize\n",
    "#相似度/距离\n",
    "import scipy.spatial.distance as ssd"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "2、探索活动总数"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 2,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "number of records :3137972\n"
     ]
    }
   ],
   "source": [
    "#读取数据，并统计有多少不同的events\n",
    "#其实EDA.ipynb中用read_csv已经统计过了\n",
    "lines = 0\n",
    "fin = open(\"events.csv\", 'rb')\n",
    "#找到用C/C++的感觉了\n",
    "#字段：event_id, user_id,start_time, city, state, zip, country, lat, and lng， 101 columns of words count\n",
    "fin.readline() # skip header，列名行\n",
    "for line in fin:\n",
    "    cols = line.strip().split(\",\")\n",
    "    lines += 1\n",
    "fin.close()\n",
    "\n",
    "print(\"number of records :%d\" % lines)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "3、读取训练集和测试集中出现过的活动列表"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 3,
   "metadata": {},
   "outputs": [],
   "source": [
    "eventIndex = cPickle.load(open(\"PE_eventIndex.pkl\", 'rb'))\n",
    "n_events = len(eventIndex)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "4、特征编码工具FeatureEng"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 4,
   "metadata": {},
   "outputs": [],
   "source": [
    "# 特征编码工具\n",
    "#该事件涉及国家、城市、时间等信息的处理\n",
    "\n",
    "#保存数据\n",
    "import cPickle\n",
    "\n",
    "#特征编码\n",
    "import datetime\n",
    "import hashlib\n",
    "# POSIX locale database and functionality\n",
    "import locale\n",
    "#国家的编码、名字、语言、货币等信息\n",
    "import pycountry\n",
    "\n",
    "import numpy as np\n",
    "import scipy.io as sio\n",
    "import scipy.sparse as ss\n",
    "\n",
    "from collections import defaultdict\n",
    "from sklearn.preprocessing import normalize\n",
    "\n",
    "#类别型特征编码\n",
    "#这里写成类的形式，因为编码字典要在不同的文件中使用\n",
    "class FeatureEng:\n",
    "  def __init__(self):\n",
    "    \n",
    "    # 载入 locales\n",
    "    self.localeIdMap = defaultdict(int)\n",
    "    for i, l in enumerate(locale.locale_alias.keys()):\n",
    "      self.localeIdMap[l] = i + 1\n",
    "    #print locale.locale_alias.keys()\n",
    "\n",
    "    # 载入 countries\n",
    "    self.countryIdMap = defaultdict(int)\n",
    "    ctryIdx = defaultdict(int)\n",
    "    for i, c in enumerate(pycountry.countries):\n",
    "      self.countryIdMap[c.name.lower()] = i + 1\n",
    "      if c.name.lower() == \"usa\":\n",
    "        ctryIdx[\"US\"] = i\n",
    "      if c.name.lower() == \"canada\":\n",
    "        ctryIdx[\"CA\"] = i\n",
    "    for cc in ctryIdx.keys():\n",
    "      for s in pycountry.subdivisions.get(country_code=cc):\n",
    "        self.countryIdMap[s.name.lower()] = ctryIdx[cc] + 1\n",
    "        \n",
    "    # 载入 gender id 字典\n",
    "    ##缺失补0，性别未知\n",
    "    self.genderIdMap = defaultdict(int, {'NaN': 0, \"male\":1, \"female\":2})\n",
    "\n",
    "  def getLocaleId(self, locstr):\n",
    "    return self.localeIdMap[locstr.lower()]\n",
    "\n",
    "  def getGenderId(self, genderStr):\n",
    "    return self.genderIdMap[genderStr]\n",
    "\n",
    "  def getJoinedYearMonth(self, dateString):\n",
    "    try:\n",
    "        dttm = datetime.datetime.strptime(dateString, \"%Y-%m-%dT%H:%M:%S.%fZ\")\n",
    "        return (dttm.year-2010)*12 + dttm.month\n",
    "    except:  #缺失补0\n",
    "        return 0\n",
    "\n",
    "  def getCountryId(self, location):\n",
    "    if (isinstance(location, str)\n",
    "        and len(location.strip()) > 0\n",
    "        and location.rfind(\"  \") > -1):\n",
    "        return self.countryIdMap[location[location.rindex(\"  \") + 2:].lower()]\n",
    "    else:\n",
    "        return 0\n",
    "\n",
    "  def getBirthYearInt(self, birthYear):\n",
    "    try:\n",
    "      return 0 if birthYear == \"None\" else int(birthYear)\n",
    "    except:\n",
    "      return 0\n",
    "\n",
    "  def getTimezoneInt(self, timezone):\n",
    "    try:\n",
    "      return int(timezone)\n",
    "    except:\n",
    "      return 0\n",
    "\n",
    "  def getFeatureHash(self, value):\n",
    "    if len(value.strip()) == 0:\n",
    "      return -1\n",
    "    else:\n",
    "      return int(hashlib.sha224(value).hexdigest()[0:4], 16)\n",
    "\n",
    "  def getFloatValue(self, value):\n",
    "    if len(value.strip()) == 0:\n",
    "      return 0.0\n",
    "    else:\n",
    "      return float(value)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "5、特征工具编码处理，抽出只在训练集和测试集中出现的event"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "FE = FeatureEng()\n",
    "\n",
    "fin = open(\"events.csv\", 'rb')\n",
    "\n",
    "#字段：event_id, user_id,start_time, city, state, zip, country, lat, and lng， 101 columns of words count\n",
    "fin.readline() # skip header\n",
    "\n",
    "#start_time, city, state, zip, country, lat, and lng\n",
    "eventPropMatrix = ss.dok_matrix((n_events, 7))\n",
    "\n",
    "#词频特征\n",
    "eventContMatrix = ss.dok_matrix((n_events, 101))\n",
    "\n",
    "for line in fin.readlines():\n",
    "    cols = line.strip().split(\",\")\n",
    "    eventId = str(cols[0])\n",
    "    \n",
    "    if eventIndex.has_key(eventId):  #在训练集或测试集中出现\n",
    "        i = eventIndex[eventId]\n",
    "  \n",
    "        #event的特征编码，这里只是简单处理，其实开始时间，地点等信息很重要\n",
    "        eventPropMatrix[i, 0] = FE.getJoinedYearMonth(cols[2]) # start_time\n",
    "        eventPropMatrix[i, 1] = FE.getFeatureHash(cols[3]) # city\n",
    "        eventPropMatrix[i, 2] = FE.getFeatureHash(cols[4]) # state\n",
    "        eventPropMatrix[i, 3] = FE.getFeatureHash(cols[5]) # zip\n",
    "        eventPropMatrix[i, 4] = FE.getFeatureHash(cols[6]) # country\n",
    "        eventPropMatrix[i, 5] = FE.getFloatValue(cols[7]) # lat\n",
    "        eventPropMatrix[i, 6] = FE.getFloatValue(cols[8]) # lon\n",
    "        \n",
    "        #词频\n",
    "        for j in range(9, 110):\n",
    "            eventContMatrix[i, j-9] = cols[j]\n",
    "fin.close()\n",
    "\n",
    "#用L2模归一化,Kmeans聚类基于L2距离\n",
    "eventPropMatrix = normalize(eventPropMatrix,\n",
    "    norm=\"l2\", axis=0, copy=False)\n",
    "sio.mmwrite(\"EV_eventPropMatrix\", eventPropMatrix)\n",
    "\n",
    "#词频，可以考虑我们用这部分特征进行聚类，得到活动的genre\n",
    "eventContMatrix = normalize(eventContMatrix,\n",
    "    norm=\"l2\", axis=0, copy=False)\n",
    "sio.mmwrite(\"EV_eventContMatrix\", eventContMatrix)\n",
    "\n",
    "\n",
    "# calculate similarity between event pairs based on the two matrices\n",
    "eventPropSim = ss.dok_matrix((n_events, n_events))\n",
    "eventContSim = ss.dok_matrix((n_events, n_events))\n",
    "\n",
    "#读取在测试集和训练集中出现的活动对\n",
    "uniqueEventPairs = cPickle.load(open(\"PE_uniqueEventPairs.pkl\", 'rb'))\n",
    "\n",
    "for e1, e2 in uniqueEventPairs:\n",
    "    #i = eventIndex[e1]\n",
    "    #j = eventIndex[e2]\n",
    "    i = e1\n",
    "    j = e2\n",
    "    \n",
    "    #非词频特征，采用Person相关系数作为相似度\n",
    "    if not eventPropSim.has_key((i,j)):\n",
    "        epsim = ssd.correlation(eventPropMatrix.getrow(i).todense(),\n",
    "            eventPropMatrix.getrow(j).todense())\n",
    "        \n",
    "        eventPropSim[i, j] = epsim\n",
    "        eventPropSim[j, i] = epsim\n",
    "    \n",
    "    #对词频特征，采用余弦相似度，也可以用直方图交/Jacard相似度\n",
    "    if not eventContSim.has_key((i,j)):\n",
    "        ecsim = ssd.cosine(eventContMatrix.getrow(i).todense(),\n",
    "            eventContMatrix.getrow(j).todense())\n",
    "    \n",
    "        eventContSim[i, j] = epsim\n",
    "        eventContSim[j, i] = epsim\n",
    "    \n",
    "sio.mmwrite(\"EV_eventPropSim\", eventPropSim)\n",
    "sio.mmwrite(\"EV_eventContSim\", eventContSim)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 8,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "matrix([[0., 0., 0., ..., 0., 0., 0.]])"
      ]
     },
     "execution_count": 8,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "eventPropSim.getrow(0).todense()"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "6、对活动进行聚类和CH_scores计算"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 9,
   "metadata": {},
   "outputs": [],
   "source": [
    "#读取数据\n",
    "import scipy.io as sio\n",
    "eventContMatrix = sio.mmread(\"EV_eventContMatrix\") "
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "K-means begin with clusters: 10\n",
      "CH_score: 0.103891393554\n",
      "K-means begin with clusters: 20\n",
      "CH_score: -0.0600639875604\n",
      "K-means begin with clusters: 30\n",
      "CH_score: -0.0238457206096\n",
      "K-means begin with clusters: 40\n",
      "CH_score: -0.11884731258\n",
      "K-means begin with clusters: 50\n",
      "CH_score: -0.182353973903\n",
      "K-means begin with clusters: 60\n",
      "CH_score: -0.0891900404738\n",
      "K-means begin with clusters: 70\n",
      "CH_score: -0.198736604276\n",
      "K-means begin with clusters: 80\n",
      "CH_score: -0.109745224638\n",
      "K-means begin with clusters: 90\n",
      "CH_score: -0.18972497216\n"
     ]
    }
   ],
   "source": [
    "from sklearn.cluster import MiniBatchKMeans\n",
    "\n",
    "# 一个参数点（聚类数据为K）的模型，并评价聚类算法性能\n",
    "def K_cluster_analysis(K, df):\n",
    "    print(\"K-means begin with clusters: {}\".format(K));\n",
    "    \n",
    "    #K-means,在训练集上训练\n",
    "    km = MiniBatchKMeans(n_clusters = K)\n",
    "    km.fit(df)\n",
    "    \n",
    "    #保存预测结果\n",
    "    cluster_result = km.predict(df)\n",
    "\n",
    "    # K值的评估标准\n",
    "    #常见的方法有轮廓系数Silhouette Coefficient和Calinski-Harabasz Index\n",
    "    #这两个分数值越大则聚类效果越好\n",
    "    #CH_score = metrics.calinski_harabaz_score(X_train,mb_kmeans.predict(X_train))\n",
    "    CH_score = metrics.silhouette_score(df,cluster_result)   \n",
    "    print(\"CH_score: {}\".format(CH_score))\n",
    "\n",
    "    return CH_score\n",
    "# 设置超参数（聚类数目K）搜索范围\n",
    "from sklearn import metrics\n",
    "CH_scores = []\n",
    "Ks = [10,20,30,40,50,60,70,80,90,100]\n",
    "for K in Ks:\n",
    "    ch = K_cluster_analysis(K, eventContMatrix)\n",
    "    CH_scores.append(ch)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "7、显示结果和分析"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "print CH_scores"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "# 绘制不同聚类数目的模型的性能，找到最佳模型／参数（分数最高）\n",
    "import matplotlib.pyplot as plt\n",
    "%matplotlib inline\n",
    "\n",
    "plt.plot(Ks, np.array(CH_scores), 'b-')"
   ]
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 2",
   "language": "python",
   "name": "python2"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 2
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython2",
   "version": "2.7.14"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 2
}
