{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": 1,
   "metadata": {},
   "outputs": [],
   "source": [
    "import pandas as pd\n",
    "\n",
    "df = pd.read_csv('selected-ann.csv')\n",
    "\n",
    "import sklearn\n",
    "from sklearn.model_selection import StratifiedKFold\n",
    "\n",
    "# user_id列为唯一标识，content列为文本内容，score列为评分\n",
    "# 按评分，分层进行五折交叉验证\n",
    "# 设定random_state=0，保证每次运行结果一致\n",
    "skf = StratifiedKFold(n_splits=5, shuffle=True, random_state=42)\n",
    "train_dfs = []\n",
    "test_dfs = []\n",
    "for train_index, test_index in skf.split(df['content'], df['score']):\n",
    "    train_df = df.loc[train_index]\n",
    "    test_df = df.loc[test_index]\n",
    "    train_dfs.append(train_df)\n",
    "    test_dfs.append(test_df)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 2,
   "metadata": {},
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Building prefix dict from the default dictionary ...\n",
      "Loading model from cache /tmp/jieba.cache\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "0\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Loading model cost 0.493 seconds.\n",
      "Prefix dict has been built successfully.\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "1\n",
      "2\n",
      "3\n",
      "4\n",
      "Model 0 :\n",
      "Coefficients: \n",
      " [-0.00418627  0.03662068  0.00822377 -0.0047862  -0.00241734  0.06641793\n",
      "  0.00490553  0.00240476  0.01802271  0.47960648]\n",
      "Intercept: \n",
      " -0.17029244261762155\n",
      "R2: \n",
      " 0.22021820512983203\n",
      "Model 1 :\n",
      "Coefficients: \n",
      " [-0.00568805  0.07677755  0.00728801 -0.00208812 -0.00767011  0.10928824\n",
      "  0.02314468  0.00567956  0.04501041  0.45717305]\n",
      "Intercept: \n",
      " -0.22246298982946744\n",
      "R2: \n",
      " 0.2475516293496881\n",
      "Model 2 :\n",
      "Coefficients: \n",
      " [-0.00389477  0.03293367  0.00560601 -0.00387343 -0.00951521  0.07461051\n",
      "  0.02479802  0.0274025   0.04138507  0.42715234]\n",
      "Intercept: \n",
      " -0.03083086550459635\n",
      "R2: \n",
      " 0.2200876693838547\n",
      "Model 3 :\n",
      "Coefficients: \n",
      " [-0.00640468  0.03298682  0.01077683 -0.0116658  -0.0071925   0.08610526\n",
      "  0.01767583  0.01092979  0.00279208  0.55099001]\n",
      "Intercept: \n",
      " -0.0950970195552232\n",
      "R2: \n",
      " 0.21530956286798952\n",
      "Model 4 :\n",
      "Coefficients: \n",
      " [-3.95608832e-03  3.31872115e-02  6.61424390e-03 -4.24218216e-03\n",
      " -3.36515267e-04  6.48108493e-02  9.43391400e-03 -2.42474252e-04\n",
      "  5.27897628e-03  4.64353239e-01]\n",
      "Intercept: \n",
      " -0.047817391928573816\n",
      "R2: \n",
      " 0.1997972515905414\n",
      "QWK: \n",
      " 0.34837545126353786\n"
     ]
    }
   ],
   "source": [
    "# 提取文本浅层特征，包括中文的词频、词性、句法依存关系，构建线性回归模型，计算5折交叉验证的平均QWK\n",
    "from sklearn.feature_extraction.text import CountVectorizer\n",
    "from sklearn.feature_extraction.text import TfidfVectorizer\n",
    "from sklearn.preprocessing import OneHotEncoder\n",
    "from sklearn.preprocessing import LabelEncoder\n",
    "from sklearn.preprocessing import StandardScaler\n",
    "from sklearn.linear_model import LinearRegression\n",
    "from sklearn.metrics import cohen_kappa_score\n",
    "import numpy as np\n",
    "import jieba\n",
    "import pyltp\n",
    "import jieba.posseg\n",
    "import jieba.analyse\n",
    "import spacy\n",
    "\n",
    "model = spacy.load('zh_core_web_sm')\n",
    "\n",
    "def get_tree_depth(text):\n",
    "    # 对文本进行句法分析\n",
    "    doc = model(text)\n",
    "\n",
    "    depths = []\n",
    "    # 遍历每个句子\n",
    "    for sent in doc.sents:\n",
    "        # 获取句法树的根节点\n",
    "        root = sent.root\n",
    "\n",
    "        # 计算从叶子节点到根节点的路径长度\n",
    "        depth = 0\n",
    "        for token in sent:\n",
    "            if token.dep_ != 'ROOT':\n",
    "                path_len = 1\n",
    "                current_token = token\n",
    "                while current_token.head != root:\n",
    "                    path_len += 1\n",
    "                    current_token = current_token.head\n",
    "                depth = max(depth, path_len)\n",
    "\n",
    "        depths.append(depth)\n",
    "\n",
    "    # 返回句法树深度的平均值\n",
    "    return sum(depths) / len(depths)\n",
    "\n",
    "def getSimpleAttrs(text):\n",
    "    attr = []\n",
    "    # 字数\n",
    "    attr.append(len(text))\n",
    "    # 句子数\n",
    "    sentences = pyltp.SentenceSplitter.split(text)\n",
    "    attr.append(len(sentences))\n",
    "    # 总词数\n",
    "    attr.append(len(jieba.lcut(text)))\n",
    "    # 每句话的平均字数\n",
    "    attr.append(attr[0] / attr[1])\n",
    "    # 分词后，词汇量\n",
    "    attr.append(len(set(jieba.cut(text))))\n",
    "    # 形容词词汇量\n",
    "    attr.append(len(set([w for w, t in jieba.posseg.cut(text) if t.startswith('a')])))\n",
    "    # 动词词汇量\n",
    "    attr.append(len(set([w for w, t in jieba.posseg.cut(text) if t.startswith('v')])))\n",
    "    # 名词词汇量\n",
    "    attr.append(len(set([w for w, t in jieba.posseg.cut(text) if t.startswith('n')])))\n",
    "    # 连词词汇量\n",
    "    attr.append(len(set([w for w, t in jieba.posseg.cut(text) if t.startswith('c')])))\n",
    "    # 平均句法树深度\n",
    "    attr.append(get_tree_depth(text))\n",
    "    return attr\n",
    "\n",
    "train_attrs = {}\n",
    "test_attrs = {}\n",
    "\n",
    "for i in range(5):\n",
    "    train_df = train_dfs[i]\n",
    "    test_df = test_dfs[i]\n",
    "    # 提取文本浅层特征\n",
    "    print(i)\n",
    "    train_attrs[i] = np.array([getSimpleAttrs(text) for text in train_df['content']])\n",
    "    test_attrs[i] = np.array([getSimpleAttrs(text) for text in test_df['content']])\n",
    "\n",
    "# 构建线性回归模型，计算5折交叉验证的平均QWK\n",
    "\n",
    "preds = []\n",
    "trues = []\n",
    "for i in range(5):\n",
    "    train_df = train_dfs[i]\n",
    "    test_df = test_dfs[i]\n",
    "    # 构建线性回归模型\n",
    "    lr = LinearRegression()\n",
    "    lr.fit(train_attrs[i], train_df['score'])\n",
    "    # 拟合结果详细信息\n",
    "    print('Model', i, \":\")\n",
    "    print('Coefficients: \\n', lr.coef_)\n",
    "    print('Intercept: \\n', lr.intercept_)\n",
    "    print('R2: \\n', lr.score(train_attrs[i], train_df['score']))\n",
    "    # 计算QWK\n",
    "    pred = lr.predict(test_attrs[i])\n",
    "    for i in range(len(pred)):\n",
    "        pred[i] = round(pred[i])\n",
    "    preds.extend(pred)\n",
    "    trues.extend(test_df['score'])\n",
    "\n",
    "print('QWK: \\n', cohen_kappa_score(trues, preds, weights='quadratic'))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 7,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Model 0 :\n",
      "Coefficients: \n",
      " [[-0.00187691  0.04672855  0.00485837 -0.00306826 -0.00343717  0.13032148\n",
      "   0.00509994 -0.00054437  0.03965667  0.52519932]]\n",
      "Intercept: \n",
      " [-0.70913894]\n",
      "R2: \n",
      " 0.1911853691011408\n",
      "Model 1 :\n",
      "Coefficients: \n",
      " [[-2.27272290e-03  8.27597996e-02 -3.69417522e-05 -1.29280294e-03\n",
      "  -6.04725739e-03  1.51694626e-01  3.31831571e-02  1.24801756e-02\n",
      "   7.70094979e-02  4.56168505e-01]]\n",
      "Intercept: \n",
      " [-0.72689785]\n",
      "R2: \n",
      " 0.20699810738568758\n",
      "Model 2 :\n",
      "Coefficients: \n",
      " [[-0.00087556  0.05525834  0.00522616 -0.00305818 -0.01890633  0.13880422\n",
      "   0.02875635  0.02897431  0.05323417  0.45137663]]\n",
      "Intercept: \n",
      " [-0.5976344]\n",
      "R2: \n",
      " 0.1675527098515306\n",
      "Model 3 :\n",
      "Coefficients: \n",
      " [[ 2.65353869e-03  3.74046346e-02  1.98910191e-04 -1.71383572e-02\n",
      "  -7.84118490e-03  1.27059049e-01  7.24438884e-03  5.33375112e-03\n",
      "   2.60015197e-02  6.71344186e-01]]\n",
      "Intercept: \n",
      " [-0.6741221]\n",
      "R2: \n",
      " 0.17322781756978034\n",
      "Model 4 :\n",
      "Coefficients: \n",
      " [[-0.00139066  0.03650044  0.00098052 -0.00312666  0.00074631  0.13237737\n",
      "   0.01631349  0.00135258  0.04690536  0.48099228]]\n",
      "Intercept: \n",
      " [-0.62408872]\n",
      "R2: \n",
      " 0.1565445948905595\n",
      "QWK: \n",
      " 0.3878504672897196\n"
     ]
    }
   ],
   "source": [
    "# 构建SVM模型，计算5折交叉验证的平均QWK\n",
    "from sklearn.svm import SVR\n",
    "preds = []\n",
    "trues = []\n",
    "for i in range(5):\n",
    "    train_df = train_dfs[i]\n",
    "    test_df = test_dfs[i]\n",
    "    # 构建SVM模型\n",
    "    svr = SVR(kernel='linear')\n",
    "    svr.fit(train_attrs[i], train_df['score'])\n",
    "    # 拟合结果详细信息\n",
    "    print('Model', i, \":\")\n",
    "    print('Coefficients: \\n', svr.coef_)\n",
    "    print('Intercept: \\n', svr.intercept_)\n",
    "    print('R2: \\n', svr.score(train_attrs[i], train_df['score']))\n",
    "    # 计算QWK\n",
    "    pred = svr.predict(test_attrs[i])\n",
    "    for i in range(len(pred)):\n",
    "        pred[i] = round(pred[i])\n",
    "    preds.extend(pred)\n",
    "    trues.extend(test_df['score'])\n",
    "\n",
    "print('QWK: \\n', cohen_kappa_score(trues, preds, weights='quadratic'))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 6,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "498.36\n",
      "376.8\n",
      "382.42\n",
      "375.54\n",
      "389.53\n",
      "397.81\n",
      "403.41\n",
      "1040.0100337977512\n",
      "125.13536670342243\n",
      "107.54005579317878\n",
      "115.30563039158147\n",
      "97.94564359888601\n",
      "109.64804558221728\n",
      "438.73320507266527\n",
      "------------------\n",
      "14.35\n",
      "14.65\n",
      "14.69\n",
      "13.93\n",
      "14.24\n",
      "14.42\n",
      "14.38\n",
      "4.8401962770119145\n",
      "4.52852072977479\n",
      "3.7032283213434196\n",
      "4.499455522616042\n",
      "3.4673332692430936\n",
      "4.190894892502078\n",
      "4.240157229789165\n",
      "------------------\n",
      "10.728784933735882\n",
      "3.668286685029796\n",
      "3.6756866172519693\n",
      "3.6826669607482296\n",
      "3.727790155412632\n",
      "3.826507428313001\n",
      "4.884953796748585\n",
      "68.67273556840846\n",
      "0.40768955181966826\n",
      "0.3622769229858414\n",
      "0.41793924582055964\n",
      "0.3391014791990142\n",
      "0.30606558278956214\n",
      "28.159140621738246\n"
     ]
    }
   ],
   "source": [
    "import pandas as pd\n",
    "\n",
    "df = pd.DataFrame(columns=[\"score\", \"content\"])\n",
    "import os\n",
    "import json\n",
    "\n",
    "for filename in os.listdir('result/enhanced'):\n",
    "    index = int(filename.split(\".\")[0].split(\"-\")[0])\n",
    "    time = int(filename.split(\".\")[0].split(\"-\")[1])\n",
    "    filePath = \"./result/enhanced/\" + filename\n",
    "    fileObj = json.load(open(filePath, encoding='utf8'))\n",
    "    if time == 1:\n",
    "        df.loc[len(df.index)] = [int(index/100), fileObj[\"response\"].replace(\"\\n\", \"\")]\n",
    "\n",
    "\n",
    "# 提取文本浅层特征，包括中文的词频、词性、句法依存关系，构建线性回归模型，计算5折交叉验证的平均QWK\n",
    "from sklearn.feature_extraction.text import CountVectorizer\n",
    "from sklearn.feature_extraction.text import TfidfVectorizer\n",
    "from sklearn.preprocessing import OneHotEncoder\n",
    "from sklearn.preprocessing import LabelEncoder\n",
    "from sklearn.preprocessing import StandardScaler\n",
    "from sklearn.linear_model import LinearRegression\n",
    "from sklearn.metrics import cohen_kappa_score\n",
    "import numpy as np\n",
    "import jieba\n",
    "import pyltp\n",
    "import jieba.posseg\n",
    "import jieba.analyse\n",
    "import spacy\n",
    "\n",
    "model = spacy.load('zh_core_web_sm')\n",
    "\n",
    "def get_tree_depth(text):\n",
    "    # 对文本进行句法分析\n",
    "    doc = model(text)\n",
    "\n",
    "    depths = []\n",
    "    # 遍历每个句子\n",
    "    for sent in doc.sents:\n",
    "        # 获取句法树的根节点\n",
    "        root = sent.root\n",
    "\n",
    "        # 计算从叶子节点到根节点的路径长度\n",
    "        depth = 0\n",
    "        for token in sent:\n",
    "            if token.dep_ != 'ROOT':\n",
    "                path_len = 1\n",
    "                current_token = token\n",
    "                while current_token.head != root:\n",
    "                    path_len += 1\n",
    "                    current_token = current_token.head\n",
    "                depth = max(depth, path_len)\n",
    "\n",
    "        depths.append(depth)\n",
    "\n",
    "    # 返回句法树深度的平均值\n",
    "    return sum(depths) / len(depths)\n",
    "\n",
    "# 计算每个评分等级的文本长度平均值\n",
    "for i in range(6):\n",
    "    print(np.mean(df[df['score'] == i]['content'].apply(len)))\n",
    "# 计算总体文本长度平均值\n",
    "print(np.mean(df['content'].apply(len)))\n",
    "# 计算每个评分等级的文本长度标准差\n",
    "for i in range(6):\n",
    "    print(np.std(df[df['score'] == i]['content'].apply(len)))\n",
    "# 计算总体文本长度标准差\n",
    "print(np.std(df['content'].apply(len)))\n",
    "\n",
    "print('------------------')\n",
    "\n",
    "# 计算每个评分等级的句子数平均值\n",
    "for i in range(6):\n",
    "    print(np.mean(df[df['score'] == i]['content'].apply(lambda x: len(pyltp.SentenceSplitter.split(x)))))\n",
    "# 计算总体句子数平均值\n",
    "print(np.mean(df['content'].apply(lambda x: len(pyltp.SentenceSplitter.split(x)))))\n",
    "# 计算每个评分等级的句子数标准差\n",
    "for i in range(6):\n",
    "    print(np.std(df[df['score'] == i]['content'].apply(lambda x: len(pyltp.SentenceSplitter.split(x)))))\n",
    "# 计算总体句子数标准差\n",
    "print(np.std(df['content'].apply(lambda x: len(pyltp.SentenceSplitter.split(x)))))\n",
    "\n",
    "print('------------------')\n",
    "\n",
    "# 计算每个评分等级的句法树深度平均值\n",
    "for i in range(6):\n",
    "    print(np.mean(df[df['score'] == i]['content'].apply(get_tree_depth)))\n",
    "# 计算总体句法树深度平均值\n",
    "print(np.mean(df['content'].apply(get_tree_depth)))\n",
    "# 计算每个评分等级的句法树深度标准差\n",
    "for i in range(6):\n",
    "    print(np.std(df[df['score'] == i]['content'].apply(get_tree_depth)))\n",
    "# 计算总体句法树深度标准差\n",
    "print(np.std(df['content'].apply(get_tree_depth)))"
   ]
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "jupyter",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.6.15"
  },
  "orig_nbformat": 4
 },
 "nbformat": 4,
 "nbformat_minor": 2
}
