{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": 1,
   "metadata": {
    "collapsed": true,
    "pycharm": {
     "name": "#%%\n"
    }
   },
   "outputs": [],
   "source": [
    "import pandas as pd\n",
    "\n",
    "#正则\n",
    "import re\n",
    "\n",
    "#文本处理\n",
    "import jieba\n",
    "from jieba import posseg\n",
    "from jieba import analyse\n",
    "\n",
    "import cntext as ct\n",
    "from cntext.stats import term_freq, readability\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 48,
   "outputs": [],
   "source": [
    "# 获取cntext内置词典列表（pkl格式)\n",
    "# ct.dict_pkl_list()\n",
    "\n",
    "# 导入pkl词典文件,\n",
    "# print(ct.load_pkl_dict('ChineseFinancialFormalUnformalSentiment.pkl'))"
   ],
   "metadata": {
    "collapsed": false,
    "pycharm": {
     "name": "#%%\n"
    }
   }
  },
  {
   "cell_type": "code",
   "execution_count": 17,
   "outputs": [
    {
     "data": {
      "text/plain": "       UserID  EI  NS  TF  JP  \\\n0  1330417035   1   1   1   0   \n\n                                                text  \n0  ['<span class=\"url-icon\"><img alt=[蜡烛] src=\"ht...  ",
      "text/html": "<div>\n<style scoped>\n    .dataframe tbody tr th:only-of-type {\n        vertical-align: middle;\n    }\n\n    .dataframe tbody tr th {\n        vertical-align: top;\n    }\n\n    .dataframe thead th {\n        text-align: right;\n    }\n</style>\n<table border=\"1\" class=\"dataframe\">\n  <thead>\n    <tr style=\"text-align: right;\">\n      <th></th>\n      <th>UserID</th>\n      <th>EI</th>\n      <th>NS</th>\n      <th>TF</th>\n      <th>JP</th>\n      <th>text</th>\n    </tr>\n  </thead>\n  <tbody>\n    <tr>\n      <th>0</th>\n      <td>1330417035</td>\n      <td>1</td>\n      <td>1</td>\n      <td>1</td>\n      <td>0</td>\n      <td>['&lt;span class=\"url-icon\"&gt;&lt;img alt=[蜡烛] src=\"ht...</td>\n    </tr>\n  </tbody>\n</table>\n</div>"
     },
     "execution_count": 17,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "mbti_weibo_data_all = pd.read_csv('../data/mbti_weibo_data_all.csv')\n",
    "mbti_weibo_data_text = mbti_weibo_data_all[['UserID','EI','NS','TF','JP','text']]\n",
    "mbti_weibo_data_text.head(1)"
   ],
   "metadata": {
    "collapsed": false,
    "pycharm": {
     "name": "#%%\n"
    }
   }
  },
  {
   "cell_type": "code",
   "execution_count": 3,
   "outputs": [
    {
     "data": {
      "text/plain": "       UserID  EI  NS  TF  JP MBTI类型  粉丝数  关注数  性别  微博数  ...  社会时事  电影  娱乐明星  \\\n0  1330417035   1   1   1   0   ISFJ   71  276   0  655  ...     0   1     1   \n\n   法律  综艺节目  旅游出行  财经  健康医疗  电视剧  \\\n0   0     1     1   0     1    1   \n\n                                                text  \n0  ['<span class=\"url-icon\"><img alt=[蜡烛] src=\"ht...  \n\n[1 rows x 54 columns]",
      "text/html": "<div>\n<style scoped>\n    .dataframe tbody tr th:only-of-type {\n        vertical-align: middle;\n    }\n\n    .dataframe tbody tr th {\n        vertical-align: top;\n    }\n\n    .dataframe thead th {\n        text-align: right;\n    }\n</style>\n<table border=\"1\" class=\"dataframe\">\n  <thead>\n    <tr style=\"text-align: right;\">\n      <th></th>\n      <th>UserID</th>\n      <th>EI</th>\n      <th>NS</th>\n      <th>TF</th>\n      <th>JP</th>\n      <th>MBTI类型</th>\n      <th>粉丝数</th>\n      <th>关注数</th>\n      <th>性别</th>\n      <th>微博数</th>\n      <th>...</th>\n      <th>社会时事</th>\n      <th>电影</th>\n      <th>娱乐明星</th>\n      <th>法律</th>\n      <th>综艺节目</th>\n      <th>旅游出行</th>\n      <th>财经</th>\n      <th>健康医疗</th>\n      <th>电视剧</th>\n      <th>text</th>\n    </tr>\n  </thead>\n  <tbody>\n    <tr>\n      <th>0</th>\n      <td>1330417035</td>\n      <td>1</td>\n      <td>1</td>\n      <td>1</td>\n      <td>0</td>\n      <td>ISFJ</td>\n      <td>71</td>\n      <td>276</td>\n      <td>0</td>\n      <td>655</td>\n      <td>...</td>\n      <td>0</td>\n      <td>1</td>\n      <td>1</td>\n      <td>0</td>\n      <td>1</td>\n      <td>1</td>\n      <td>0</td>\n      <td>1</td>\n      <td>1</td>\n      <td>['&lt;span class=\"url-icon\"&gt;&lt;img alt=[蜡烛] src=\"ht...</td>\n    </tr>\n  </tbody>\n</table>\n<p>1 rows × 54 columns</p>\n</div>"
     },
     "execution_count": 3,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "mbti_weibo_data_all_sorted=mbti_weibo_data_all[['UserID','EI','NS','TF','JP','MBTI类型','粉丝数','关注数','性别','微博数','注册年限','互动数','视频累积播放量','TOP1','TOP2','TOP3','TOP4','TOP5','数码','母婴育儿','时尚','美妆','颜值','搞笑幽默','历史','互联网','体育','人文艺术','公益','动漫','美食','情感生活','动物宠物','运动健身','科学科普','游戏','教育','音乐演出','军事','房产家居','汽车','宗教','摄影拍照','读书作家','社会时事','电影','娱乐明星','法律','综艺节目','旅游出行','财经','健康医疗','电视剧','text']]\n",
    "mbti_weibo_data_all_sorted.head(1)"
   ],
   "metadata": {
    "collapsed": false,
    "pycharm": {
     "name": "#%%\n"
    }
   }
  },
  {
   "cell_type": "code",
   "execution_count": 4,
   "outputs": [],
   "source": [
    "#清除微博文本，提取中文进行后续分析\n",
    "def clean(text):\n",
    "    text = re.sub(r\"(回复)?(//)?\\s*@\\S*?\\s*(:| |$)\", \" \", text)  # 去除正文中的@和回复/转发中的用户名\n",
    "    text = re.sub(r\"\\[\\S+\\]\", \"\", text)      # 去除表情符号\n",
    "    text = re.sub(r'<span.*?</span>', \"\", text)      # 去除span\n",
    "    # text = re.sub(r\"#\\S+#\", \"\", text)      # 保留话题内容\n",
    "    URL_REGEX = re.compile(\n",
    "        r'(?i)\\b((?:https?://|www\\d{0,3}[.]|[a-z0-9.\\-]+[.][a-z]{2,4}/)(?:[^\\s()<>]+|\\(([^\\s()<>]+|(\\([^\\s()<>]+\\)))*\\))+(?:\\(([^\\s()<>]+|(\\([^\\s()<>]+\\)))*\\)|[^\\s`!()\\[\\]{};:\\'\".,<>?«»“”‘’]))',\n",
    "        re.IGNORECASE)\n",
    "    text = re.sub(URL_REGEX, \"\", text)       # 去除网址\n",
    "    text = text.replace(\"转发微博\", \"\")       # 去除无意义的词语\n",
    "    text = text.replace(\"微博网友\", \"\")       # 去除无意义的词语\n",
    "    text = text.replace(\"我参与\", \"\")       # 去除无意义的词语\n",
    "    text = re.sub(\"[\\.\\!\\/_$%^*(+\\\"\\']+|[+——!。？、~@#￥%……&*（））]+\",\"\",text)\n",
    "    # text = re.sub(\"[,]+|[，]+\",\"\",text)\n",
    "    text = re.sub(r'<a .*?>',\"\",text)\n",
    "    text=''.join(re.findall(r\"[\\u4e00-\\u9fff,，]{2,}\",text))#仅提取中文\n",
    "    text = text.replace(\",\", \"。\")       # 替换成句号\n",
    "    text = re.sub(r\"\\s+\", \" \", text) # 合并正文中过多的空格\n",
    "    return text.strip()\n"
   ],
   "metadata": {
    "collapsed": false,
    "pycharm": {
     "name": "#%%\n"
    }
   }
  },
  {
   "cell_type": "code",
   "execution_count": 157,
   "outputs": [],
   "source": [
    "\n",
    "tText = mbti_weibo_data_text.loc[1000]['text']\n",
    "# print(tText)\n",
    "tText = clean(tText)"
   ],
   "metadata": {
    "collapsed": false,
    "pycharm": {
     "name": "#%%\n"
    }
   }
  },
  {
   "cell_type": "code",
   "execution_count": 31,
   "outputs": [],
   "source": [
    "#获取所有文本\n",
    "def getAllText(data):\n",
    "    allText = ''\n",
    "    for index, row in data.iterrows():\n",
    "        text = row['text']\n",
    "        tText = clean(text)\n",
    "        # 替换成句号再做后续分析\n",
    "        tText = tText.replace(\",\", \"。\")\n",
    "        allText=allText+tText\n",
    "    return allText\n",
    "allText = getAllText(mbti_weibo_data_text)"
   ],
   "metadata": {
    "collapsed": false,
    "pycharm": {
     "name": "#%%\n"
    }
   }
  },
  {
   "cell_type": "code",
   "execution_count": 6,
   "outputs": [],
   "source": [
    "\n",
    "\n",
    "def sentimentAna(tText):\n",
    "    sres = ct.sentiment(text=tText,diction=ct.load_pkl_dict('DUTIR.pkl')['DUTIR'],lang='chinese')\n",
    "    #增加一列总情绪数\n",
    "    all_num=sres['乐_num']+sres['好_num']+sres['怒_num']+sres['哀_num']+sres['惧_num']+sres['恶_num']+sres['惊_num']\n",
    "    sres['all_num']=all_num\n",
    "    return sres\n",
    "\n",
    "\n",
    "# 哈哈哈哈/o\n",
    "# 今天/t\n",
    "# 上课/v\n",
    "# 老师/n\n",
    "# 又/d\n",
    "def lcutAna(tText):\n",
    "    POSset = []\n",
    "    pos_word = posseg.lcut(tText)\n",
    "    # print(pos_word)\n",
    "    for word in pos_word:\n",
    "        POSset.append(str(word).split('/')[1])\n",
    "    return POSset\n",
    "\n",
    "# Counter({'哈哈哈哈': 1,\n",
    "#          '今天': 8,\n",
    "#          '上课': 1,\n",
    "#          '老师': 4,\n",
    "#          '仗': 1,\n",
    "#          '手大': 1,\n",
    "def TFAna(tText,count):\n",
    "    tfres  = sorted(dict(term_freq(tText)).items(),key= lambda  x:x[1],reverse=True)[0:count]\n",
    "    tflist = []\n",
    "    for tf in tfres:\n",
    "       tflist.append(tf[0])\n",
    "    return tflist\n",
    "\n",
    "\n",
    "# ['同事',\n",
    "#  '真的',\n",
    "#  '可爱',\n",
    "#  '自己',\n",
    "#  '领导',\n",
    "def TFIDFAna(tText,count,allowPOS):\n",
    "    return analyse.extract_tags(tText,allowPOS =allowPOS , topK=count)\n"
   ],
   "metadata": {
    "collapsed": false,
    "pycharm": {
     "name": "#%%\n"
    }
   }
  },
  {
   "cell_type": "code",
   "execution_count": 7,
   "outputs": [],
   "source": [
    "def TFDictCreate(allText,count):\n",
    "    TFDic = TFAna(allText,count)\n",
    "    return TFDic\n",
    "def TFIDFDictCreate(allText,count,allowPOS):\n",
    "    TFIDic = TFIDFAna(allText,count,allowPOS)\n",
    "    return TFIDic"
   ],
   "metadata": {
    "collapsed": false,
    "pycharm": {
     "name": "#%%\n"
    }
   }
  },
  {
   "cell_type": "code",
   "execution_count": 26,
   "outputs": [],
   "source": [
    "#获取所有文本\n",
    "def getEText(data,type,witch):\n",
    "    datas = data[data[type]==witch]\n",
    "    # print(datas.describe())\n",
    "    allText = ''\n",
    "    for index, row in datas.iterrows():\n",
    "        text = row['text']\n",
    "        tText = clean(text)\n",
    "        # 替换成句号再做后续分析\n",
    "        tText = tText.replace(\",\", \"。\")\n",
    "        allText=allText+tText\n",
    "    return allText\n",
    "\n",
    "typeList = ['EI','NS','TF','JP']\n",
    "text_list = []\n",
    "for type in typeList:\n",
    "    for witch in range(0,2):\n",
    "        ttext = getEText(mbti_weibo_data_text,type,witch)\n",
    "        tfres3 = TFDictCreate(ttext,200)\n",
    "        res3 = TFIDFDictCreate(ttext,200,\"\")\n",
    "        text_list = text_list+tfres3+res3\n",
    "# print(text_list)\n",
    "\n",
    "finallres3 = list(set(text_list))\n"
   ],
   "metadata": {
    "collapsed": false,
    "pycharm": {
     "name": "#%%\n"
    }
   }
  },
  {
   "cell_type": "code",
   "execution_count": 20,
   "outputs": [
    {
     "data": {
      "text/plain": "157"
     },
     "execution_count": 20,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "len(finallres3)"
   ],
   "metadata": {
    "collapsed": false,
    "pycharm": {
     "name": "#%%\n"
    }
   }
  },
  {
   "cell_type": "code",
   "execution_count": 168,
   "outputs": [],
   "source": [
    "\n",
    "\n",
    "\n",
    "#对所有文本跑一次词频分析，并正反各提取100个\n",
    "tfres = TFDictCreate(allText,200)\n",
    "Ares = TFIDFDictCreate(allText,100,\"a\")\n",
    "Nres = TFIDFDictCreate(allText,100,\"n\")\n",
    "res = TFIDFDictCreate(allText,100,\"\")\n",
    "finallres = list(set(tfres+Ares+Nres+res))\n",
    "\n",
    "\n"
   ],
   "metadata": {
    "collapsed": false,
    "pycharm": {
     "name": "#%%\n"
    }
   }
  },
  {
   "cell_type": "code",
   "execution_count": 8,
   "outputs": [],
   "source": [
    "#对所有文本跑一次词性分析\n",
    "allPOS = {'vg', 'nt', 'rz', 'l', 'vi', 'z', 't', 'uz', 'n', 'uj', 'i', 'c', 'vd', 'm', 'nz', 'dg', 'd', 'g', 'vq', 'zg', 'mg', 'r', 'nrt', 'v', 'o', 'ns', 'ug', 'ng', 'df', 'nrfg', 'e', 'y', 'uv', 'ul', 'ag', 'k', 'u', 'ud', 'ad', 'vn', 's', 'h', 'an', 'tg', 'nr', 'f', 'q', 'yg', 'rr', 'mq', 'rg', 'a', 'bg', 'p', 'x', 'b', 'j'}"
   ],
   "metadata": {
    "collapsed": false,
    "pycharm": {
     "name": "#%%\n"
    }
   }
  },
  {
   "cell_type": "code",
   "execution_count": 32,
   "outputs": [],
   "source": [
    "#对所有文本跑一次词频分析，并正反各提取100个\n",
    "tfres2 = TFDictCreate(allText,2000)\n",
    "res2 = TFIDFDictCreate(allText,2000,\"\")\n",
    "finallres2 = list(set(tfres2+res2))"
   ],
   "metadata": {
    "collapsed": false,
    "pycharm": {
     "name": "#%%\n"
    }
   }
  },
  {
   "cell_type": "code",
   "execution_count": 33,
   "outputs": [],
   "source": [
    "\n",
    "#获得文本分析列进行分析\n",
    "def getTextTrainData(userid,userText):\n",
    "    allDict = {\"UserID\":userid}\n",
    "    #只保留中文和，。——用于可读性分析\n",
    "    tText = clean(userText)\n",
    "    #可读性分析 dict\n",
    "    res_read = ct.readability(tText, lang='chinese')  #{'readability1': 852.5, 'readability2': 53.5, 'readability3': 453.0}\n",
    "\n",
    "    # 去掉句号再做后续分析\n",
    "    tText = tText.replace(\"。\", \"\")\n",
    "    tText = tText.replace(\"，\", \"\")\n",
    "\n",
    "    #情绪分析 dict\n",
    "    res_sent = sentimentAna(tText)\n",
    "\n",
    "    #词性分析\n",
    "    textP = lcutAna(tText)\n",
    "    res_textp = {}\n",
    "    for pos in allPOS:\n",
    "        count = len([i for i in textP if i == pos])\n",
    "        res_textp[pos]=count\n",
    "        # print(\"值 {} 在列表中出现的次数为：\".format(pos), count)\n",
    "\n",
    "    #TF + TF-IDF分析\n",
    "    tfres_dict = {}\n",
    "    for t in finallres2:\n",
    "        if tText.count(t)>0:\n",
    "            w = 1\n",
    "        else:\n",
    "            w = 0\n",
    "        tfres_dict[t] = w\n",
    "\n",
    "    allDict.update(res_read)\n",
    "    allDict.update(res_sent)\n",
    "    allDict.update(res_textp)\n",
    "    allDict.update(tfres_dict)\n",
    "    # print(allDict)\n",
    "    return pd.DataFrame(allDict,index=[0])\n",
    "\n",
    "\n",
    "\n"
   ],
   "metadata": {
    "collapsed": false,
    "pycharm": {
     "name": "#%%\n"
    }
   }
  },
  {
   "cell_type": "code",
   "execution_count": 34,
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "0\n",
      "100\n",
      "200\n",
      "300\n",
      "400\n",
      "500\n",
      "600\n",
      "700\n",
      "800\n",
      "900\n",
      "1000\n",
      "1100\n"
     ]
    }
   ],
   "source": [
    "def createAllFeaturesData(data):\n",
    "    pd_text = pd.DataFrame()\n",
    "    for index,row in data.iterrows():\n",
    "        if index%100==0:\n",
    "            print(index)\n",
    "        pd_text = pd.concat([pd_text,getTextTrainData(row['UserID'],row['text'])])\n",
    "    data = pd.merge(data,pd_text,on='UserID')\n",
    "    return data\n",
    "\n",
    "mbti_weibo_data_cantrain = createAllFeaturesData(mbti_weibo_data_all_sorted)\n",
    "# mbti_weibo_data_all_sorted"
   ],
   "metadata": {
    "collapsed": false,
    "pycharm": {
     "name": "#%%\n"
    }
   }
  },
  {
   "cell_type": "code",
   "execution_count": 35,
   "outputs": [
    {
     "data": {
      "text/plain": "(1197, 2625)"
     },
     "execution_count": 35,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "mbti_weibo_data_cantrain.shape"
   ],
   "metadata": {
    "collapsed": false,
    "pycharm": {
     "name": "#%%\n"
    }
   }
  },
  {
   "cell_type": "code",
   "execution_count": 36,
   "outputs": [],
   "source": [
    "mbti_weibo_data_cantrain2 = mbti_weibo_data_cantrain.drop(columns=['text'])"
   ],
   "metadata": {
    "collapsed": false,
    "pycharm": {
     "name": "#%%\n"
    }
   }
  },
  {
   "cell_type": "code",
   "execution_count": 37,
   "outputs": [],
   "source": [
    "mbti_weibo_data_cantrain2.to_csv(\"../data/mbti_weibo_data_cantrain_2000.csv\")"
   ],
   "metadata": {
    "collapsed": false,
    "pycharm": {
     "name": "#%%\n"
    }
   }
  },
  {
   "cell_type": "code",
   "execution_count": 194,
   "outputs": [],
   "source": [
    "# cText = jieba.cut(tText,cut_all=False,HMM=False)\n",
    "# print(list(cText))"
   ],
   "metadata": {
    "collapsed": false,
    "pycharm": {
     "name": "#%%\n"
    }
   }
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 2
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython2",
   "version": "2.7.6"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 0
}