{
 "cells": [
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## 一些工具函数"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 1,
   "metadata": {},
   "outputs": [],
   "source": [
    "def strQ2B(ustring):\n",
    "    \"\"\"把字符串全角转半角\"\"\"\n",
    "    ss = []\n",
    "    for s in ustring:\n",
    "        rstring = \"\"\n",
    "        for uchar in s:\n",
    "            inside_code = ord(uchar)\n",
    "            if inside_code == 12288:  # 全角空格直接转换\n",
    "                inside_code = 32\n",
    "            elif (inside_code >= 65281 and inside_code <= 65374):  # 全角字符（除空格）根据关系转化\n",
    "                inside_code -= 65248\n",
    "            rstring += chr(inside_code)\n",
    "        ss.append(rstring)\n",
    "    return ''.join(ss)\n",
    "\n",
    "def clean_str(input):\n",
    "\n",
    "    input = input.replace(\",\", \"，\")\n",
    "    input = input.replace(\"\\xa0\", \"\")\n",
    "    input = input.replace(\"\\b\", \"\")\n",
    "    input = input.replace('\"', \"\")\n",
    "    input = re.sub(\"\\t|\\n|\\x0b|\\x1c|\\x1d|\\x1e\", \"\", input)\n",
    "    input = input.strip()\n",
    "    input = re.sub('\\?\\?+','',input)\n",
    "    input = re.sub('\\{IMG:.?.?.?\\}','',input)\n",
    "    input = re.sub('\\t|\\n','', input)\n",
    "    # pattern1 = re.compile(r'http[s]?://(?:[a-zA-Z]|[0-9]|[$-_@.&+]|[!*\\(\\),]|(?:%[0-9a-fA-F][0-9a-fA-F]))+')  # 剔除链接\n",
    "    pattern1 = re.compile(\n",
    "        r'((http|ftp|https)://)?(([a-zA-Z0-9\\._-]+\\.[a-zA-Z]{2,6})|([0-9]{1,3}\\.[0-9]{1,3}\\.[0-9]{1,3}\\.[0-9]{1,3}))(:[0-9]{1,4})*(/[a-zA-Z0-9\\&%_\\./-~-]*)?')\n",
    "\n",
    "    pattern2 = re.compile(\"\\{IMG.*?\\}\")  # 剔除{IMG:1}{IMG:2}等等\n",
    "    # pattern3 = re.compile(\"（.*?\\）\") # 剔除括号等等\n",
    "    # pattern4 = re.compile(\"《.*?\\》\")  # 剔除括号等等\n",
    "    # pattern5 = re.compile(\"【.*?】\")  # 删除括号内容\n",
    "    pattern6 = re.compile(\"\\?{2,}\")  # 删除多个问号\n",
    "    pattern7 = re.compile(\n",
    "        \"[\\w!#$%&'*+/=?^_`{|}~-]+(?:\\.[\\w!#$%&'*+/=?^_`{|}~-]+)*@(?:[\\w](?:[\\w-]*[\\w])?\\.)+[\\w](?:[\\w-]*[\\w])?\")  # 邮箱\n",
    "    pattern8 = re.compile(\"0\\d{2}-\\d{8}|0\\d{3}-\\d{7}|\\d{5}-\\d{5}|\\d{3}-\\d{3}-\\d{4}\")  # 剔除电话\n",
    "    pattern9 = re.compile(\"(20\\d{2}([\\.\\-/|年月\\s]{1,3}\\d{1,2}){2}日?(\\s?\\d{2}:\\d{2}(:\\d{2})?)?)|(\\d{1,2}\\s?(分钟|小时|天)前)\")  # 日期\n",
    "    pattern10 = re.compile(\"<.*?>\")\n",
    "    punct = string.punctuation \n",
    "    pattern11 = re.compile(\"[^\\u4e00-\\u9fa5^a-z^A-Z^0-9%s]+\" % punct)\n",
    "    pattern12 = re.compile('^[?：！*]', re.S)\n",
    "    pattern13 = re.compile(\"#\")  # 删除#\n",
    "    pattern14 = re.compile(\"\\(\\)|{}\")  # 删除多个kuohao\n",
    "\n",
    "    # shunxu\n",
    "    pattern = [pattern11, pattern10, pattern2, pattern1, pattern6, pattern7, pattern8, pattern9, pattern14]\n",
    "    # pattern = [pattern11, pattern10, pattern2, pattern1, pattern6, pattern7, pattern8, pattern14]\n",
    "    pattern_2 = [pattern12, pattern13]\n",
    "\n",
    "\n",
    "    # pattern_2 = [pattern4, pattern5, pattern12]\n",
    "    def clean_zh(text):\n",
    "        '''清洗文本，保证语句通顺(关于小数点的问题无法处理)'''\n",
    "        # text = text.replace(\"（\", \"(\").replace(\"）\", \")\")\n",
    "        # punct = string.punctuation + punctuation\n",
    "        # punct = \"\".join([c for c in punct if c not in [\".\", \"、\", \"%\", \"“\", \"”\", \"(\", \")\", \"！\", \"。\", \"？\"]])\n",
    "        # text = re.sub(r\"[%s]+\" % punct, \" \", text)\n",
    "        # 将引号替换\n",
    "        # text = re.sub(r\"[%s]+\" % \"“”()\", \"\", text)\n",
    "        # text = re.sub(r\"[%s]+\" % \"：\", \" \", text)\n",
    "        # 多个空格替换成一个\n",
    "        # text = re.sub('/{2,}', '', text)\n",
    "        # text = re.sub('\\|{2,}', '', text)\n",
    "        text = re.sub('window.public=.*\\(window[,，]document\\);', ' ', text)\n",
    "        text = re.sub('varcontentConEle=.*AD_SURVEY_Add_AdPos\\(\\\"42974\\\"\\);', ' ', text)\n",
    "        text = re.sub('&nbsp;|&quot;', '', text)\n",
    "        text = re.sub('　+', ' ', text)\n",
    "        text = re.sub(' +', ' ', text)\n",
    "        text = re.sub('%+', '%', text)\n",
    "        text = re.sub('#+', '#', text)\n",
    "\n",
    "        # add ccp3\n",
    "        text = re.sub(\"\\[.*?\\]\", '', text)\n",
    "        text = re.sub(\",+\", '，', text)\n",
    "\n",
    "        #add\n",
    "        text = text.replace(\"\\xa0\", \"\")\n",
    "        text = text.replace(\"\\b\", \"\")\n",
    "        text = re.sub(\"\\t|\\n|\\x0b|\\x1c|\\x1d|\\x1e\", \"\", text)\n",
    "\n",
    "        pattern = [(r'&amp;', '&'), (r'&lt;', '<'), (r'&gt;', '>'), (r'&quot;', ''),\n",
    "               (r'&nbsp;', ''), (r'br/', ''), (r'&', '&')]\n",
    "\n",
    "        # 根据pattern清除一些特殊字符\n",
    "        for p in pattern:\n",
    "            text = re.sub(p[0], p[1], text)\n",
    "        \n",
    "        html1 = re.compile(r'(https?://)([\\da-z\\.-]+)\\.([a-z\\.]{2,6})([/\\w \\.-]*)*/?')\n",
    "        html2 = re.compile(r'(www)\\.([\\da-z\\.-]+)\\.([a-z\\.]{2,6})([/\\w \\.-]*)*/?')\n",
    "\n",
    "        text = re.sub(html1, ' ', text)\n",
    "        text = re.sub(html2, ' ', text)\n",
    "        return text\n",
    "    input=clean_zh(input)\n",
    "    return input\n",
    "    \n",
    "def juhao(x):\n",
    "    if x==\"\":\n",
    "        return x\n",
    "    elif x[-1] not in '.。？！!?':\n",
    "        return x+'。'\n",
    "    else:\n",
    "        return x\n",
    "\n",
    "def loadStopWords(path):\n",
    "    stopwords = []\n",
    "    # 加载停止词\n",
    "    with open(path, 'r', encoding='utf8') as f:\n",
    "        for line in f:\n",
    "            stopwords.append(line.strip())\n",
    "    return stopwords\n",
    "\n",
    "\n",
    "\n",
    "def ngram(news,stopwords):\n",
    "    if news['title_cut']!='':\n",
    "        fenci = news['title_cut'].split(',')#########词列表\n",
    "    else:\n",
    "        fenci = [word.strip() for word in jieba.cut(news['text'][:int(len(news['text'])/4)]) if (word.strip() not in stopwords and word.strip())]\n",
    "\n",
    "    text = news['title'] + '\\n' + news['text']############title和content之间用\\n分割开，，记得 text\n",
    "    result = copy.deepcopy(fenci)\n",
    "    length = len(fenci)\n",
    "    appear = []\n",
    "    for i in range(length):\n",
    "        if i < length - 1:\n",
    "            combine = fenci[i] + fenci[i + 1]#######相邻两个词组成一个词 2-gram，用空格分一下\n",
    "            count = text.count(combine)\n",
    "            if count > 1 and combine not in appear:\n",
    "                appear.append(combine)\n",
    "                result.append(combine)\n",
    "\n",
    "            combine = fenci[i] + ' ' + fenci[i + 1]#######相邻两个词组成一个词 2-gram，用空格分一下\n",
    "            count = text.count(combine)\n",
    "            if count > 1 and combine not in appear:############如果这个词在title+content中出现的次数大于等于2次，那么就认为这是个新词\n",
    "                appear.append(combine)\n",
    "                result.append(combine)\n",
    "\n",
    "            if i < length - 2:\n",
    "                ## 分词合并\n",
    "                combine = fenci[i] + fenci[i + 1] + fenci[i + 2]\n",
    "                count = text.count(combine)\n",
    "                if count > 1 and combine not in appear:\n",
    "                    appear.append(combine)\n",
    "                    result.append(combine)\n",
    "\n",
    "                ## 分词加空格合并\n",
    "                combine = fenci[i] + ' ' + fenci[i + 1] + fenci[i + 2]#######相邻三个词组成一个词 3-gram，用空格分一下\n",
    "                count = text.count(combine)\n",
    "                if count > 1 and combine not in appear:\n",
    "                    appear.append(combine)\n",
    "                    result.append(combine)\n",
    "\n",
    "                ## 分词加空格合并\n",
    "                combine = fenci[i] + fenci[i + 1] + ' ' + fenci[i + 2]\n",
    "                count = text.count(combine)\n",
    "                if count > 1 and combine not in appear:\n",
    "                    appear.append(combine)\n",
    "                    result.append(combine)\n",
    "    return ';'.join(result)#############result是分好了词 且进行了ngram的结果\n",
    "\n",
    "def get_cixing(s):\n",
    "    tokens = s.split(';')\n",
    "    cixing = []\n",
    "    for word in tokens:###############词性标注！！！！！！！[a,b,c]   [a的词性,b的词性,c的词性]\n",
    "        try:\n",
    "            p = next(pseg.cut(word)).flag\n",
    "            cixing.append(p)\n",
    "        except:\n",
    "            print(word, s)\n",
    "    if len(tokens) != len(cixing):\n",
    "        print(cixing,'*****************')\n",
    "        print(tokens,\"*****************\")\n",
    "        assert len(tokens) == len(cixing), '长度不匹配'\n",
    "    return ';'.join(cixing)\n",
    "\n",
    "#     print(cixing,'*****************')\n",
    "#     print(tokens,\"*****************\")\n",
    "#     assert len(tokens) == len(cixing), '长度不匹配'\n",
    "#     return ';'.join(cixing)\n",
    "\n",
    "def add_bert_to_tokens(all_df):\n",
    "    result = []\n",
    "    for i in tqdm(range(all_df.shape[0])):\n",
    "        news = all_df.iloc[i,:]\n",
    "        all_tokens = news['tokens_with_sw']############原始文本分词（title+content）\n",
    "        if not pd.isnull(news['tokens']):\n",
    "            tokens = news['tokens'].split(';')#######所有实体（bert的、官方的正确实体、n-gram分词的）取set\n",
    "            for word in tokens:######（bert的、官方的正确实体、n-gram分词的）\n",
    "                if word not in all_tokens:###########非原始的词\n",
    "                    all_tokens = '/split' + word + all_tokens + '/split' + word #/split A BCD /split A  \n",
    "                    ##### '/split' E  /split A BCD /split A   '/split' E\n",
    "        result.append(all_tokens)\n",
    "\n",
    "    return result"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "# lda和kmeans"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 2,
   "metadata": {
    "scrolled": true
   },
   "outputs": [],
   "source": [
    "import pandas as pd\n",
    "import numpy as np\n",
    "import pickle\n",
    "import os\n",
    "from sklearn.feature_extraction.text import TfidfVectorizer\n",
    "from sklearn import decomposition\n",
    "from sklearn.feature_extraction.text import TfidfVectorizer\n",
    "from gensim.models import Word2Vec, Doc2Vec\n",
    "from gensim.models.doc2vec import TaggedDocument\n",
    "from sklearn.feature_extraction.text import CountVectorizer\n",
    "from sklearn.decomposition import LatentDirichletAllocation\n",
    "from sklearn.cluster import KMeans\n",
    "\n",
    "model_path='temp2'\n",
    "data_gen='temp2'\n",
    "# 训练LDA主题分类模型\n",
    "def train_lda(all_df,  n_topics=15):\n",
    "    ## 使用不包含停止词的分词结果\n",
    "    corpus = all_df['tokens']\n",
    "    cnt = CountVectorizer()\n",
    "    cntIf = cnt.fit_transform(corpus)\n",
    "\n",
    "    lda_path = os.path.join(model_path, 'lda.pkl')\n",
    "\n",
    "    ## 使用LDA主题模型进行分类\n",
    "    lda = LatentDirichletAllocation(n_components=n_topics, max_iter=150)\n",
    "    print(\"正在训练LDA主题模型...\")\n",
    "    lda_pred = lda.fit_transform(cntIf)\n",
    "    lda_classes = np.argmax(lda_pred, axis=1)\n",
    "    ## 保存模型\n",
    "    with open(lda_path, 'wb') as f:\n",
    "        pickle.dump(lda, f)\n",
    "    print(\"LDA主题模型已保存...\")\n",
    "\n",
    "    return lda_classes\n",
    "\n",
    "# 训练KMeans分类器\n",
    "def train_kmeans(all_df,  n_clusters=15):\n",
    "    ## 准备训练集\n",
    "    cluster_train = []\n",
    "    newsid = all_df['doc_id']\n",
    "    doc2vec = Doc2Vec.load(os.path.join(model_path, 'doc2vec.model'))\n",
    "\n",
    "    for ID in newsid:\n",
    "        cluster_train.append(doc2vec[ID])\n",
    "\n",
    "    kmeans_path = os.path.join(model_path, 'kmeans.pkl')\n",
    "\n",
    "\n",
    "    cluster = KMeans(n_clusters=n_clusters)\n",
    "    print(\"正在训练KMeans聚类结果...\")\n",
    "    kmeans_classes = cluster.fit_predict(cluster_train)\n",
    "    ## 保存模型\n",
    "    with open(kmeans_path, 'wb') as f:\n",
    "        pickle.dump(cluster, f)\n",
    "    print(\"KMean聚类模型已保存...\")\n",
    "\n",
    "    return kmeans_classes\n",
    "\n",
    "\n",
    "# 训练tsvd主题\n",
    "def train_tsvd(all_df,  n_topics=15):\n",
    "    ## 使用不包含停止词的分词结果\n",
    "    corpus = all_df['tokens']\n",
    "    tfidf_path = os.path.join(model_path, 'Tfidf.pkl')\n",
    "    f = open(tfidf_path, 'rb')\n",
    "    tfidf_model = pickle.load(f)\n",
    "    f.close()\n",
    "    print(\"开始构建tsvd矩阵\")\n",
    "    tfidf_all=tfidf_model.transform(corpus)\n",
    "    tsvd_pred=decomposition.TruncatedSVD(n_components=15).fit_transform(tfidf_all)\n",
    "    tsvd_col=['lsi_'+str(i) for i in range(len(tsvd_pred[0]))]\n",
    "    tsvd_pred=pd.DataFrame(tsvd_pred,columns=tsvd_col)\n",
    "    \n",
    "    return tsvd_pred\n"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "# 训练w2v和tfidf还有doc2vec"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 3,
   "metadata": {},
   "outputs": [],
   "source": [
    "# -*- coding: utf-8 --*--\n",
    "# @Author: Zessay\n",
    "# @time: 2019.05.05 20:21\n",
    "# @File: train_models.py\n",
    "# @Software: PyCharm\n",
    "\n",
    "import pandas as pd\n",
    "import numpy as np\n",
    "import pickle\n",
    "import os\n",
    "\n",
    "from sklearn.feature_extraction.text import TfidfVectorizer\n",
    "from gensim.models import Word2Vec, Doc2Vec\n",
    "from gensim.models.doc2vec import TaggedDocument\n",
    "from sklearn.feature_extraction.text import CountVectorizer\n",
    "from sklearn.decomposition import LatentDirichletAllocation\n",
    "from sklearn.cluster import KMeans\n",
    "\n",
    "\n",
    "'''\n",
    "# 读取生成的分词文件给后面模型训练使用\n",
    "token_path = '../data/gen/all_tokens.csv'\n",
    "all_df = pd.read_csv(token_path)\n",
    "'''\n",
    "\n",
    "\n",
    "# 定义训练TFIDF的模型\n",
    "def train_tfidf(all_df):\n",
    "    tfidf_tokens = all_df['tokens'].values\n",
    "    tfidf_model = TfidfVectorizer()\n",
    "    print(\"正在训练TFIDF模型...\")\n",
    "    tfidf_model = tfidf_model.fit(tfidf_tokens)\n",
    "\n",
    "    with open(os.path.join(model_path, 'Tfidf.pkl'), 'wb') as f:\n",
    "        pickle.dump(tfidf_model, f)\n",
    "    print(\"TFIDF模型已保存...\")\n",
    "\n",
    "# 定义训练word2vec的模型\n",
    "def train_word2vec(all_df):\n",
    "    sentences = [line.strip().split('/split') for line in all_df['tokens_with_sw']]\n",
    "    # 训练200维的词向量\n",
    "    word2vec = Word2Vec(size=200, window=5, min_count=1, iter=20,workers=8)\n",
    "    word2vec.build_vocab(sentences)\n",
    "    print(\"正在训练Word2Vec模型...\")\n",
    "    word2vec.train(sentences, total_examples=word2vec.corpus_count, epochs=word2vec.epochs)\n",
    "    word2vec.save(os.path.join(model_path, 'word2vec.model'))\n",
    "    print(\"Word2Vec模型已保存...\")\n",
    "\n",
    "# 定义训练doc2vec的模型\n",
    "def train_doc2vec(all_df):\n",
    "    newsid = all_df['doc_id']\n",
    "    sentences = [line.strip().split('/split') for line in all_df['tokens_with_sw']]\n",
    "    # 构建文章向量的语料\n",
    "    documents = [TaggedDocument(doc, [ID]) for doc, ID in zip(sentences, newsid)]\n",
    "    doc2vec = Doc2Vec(vector_size=200, window=5, min_count=1, epochs=20)\n",
    "    doc2vec.build_vocab(documents)\n",
    "    print(\"正在训练Doc2Vec模型...\")\n",
    "    doc2vec.train(documents, total_examples=doc2vec.corpus_count, epochs=doc2vec.epochs)\n",
    "    doc2vec.save(os.path.join(model_path, 'doc2vec.model'))\n",
    "    print(\"Doc2Vec模型已保存...\")\n",
    "\n",
    "\n",
    "def train_all_models():\n",
    "    all_df = pd.read_csv(os.path.join(data_gen, 'all_tokens.csv'))\n",
    "    # 训练TFIDF\n",
    "    train_tfidf(all_df)\n",
    "    # 训练word2vec\n",
    "    train_word2vec(all_df)\n",
    "    # 训练doc2vec\n",
    "    train_doc2vec(all_df)\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "# 数据预处理"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "#!/usr/bin/python\n",
    "# -*- coding: utf-8 -*-\n",
    "\n",
    "import os\n",
    "import math\n",
    "import re\n",
    "import datetime\n",
    "import sys, getopt\n",
    "\n",
    "\n",
    "import jieba\n",
    "import re\n",
    "import pandas as pd\n",
    "import re\n",
    "import os\n",
    "import sys\n",
    "import numpy as np\n",
    "import string\n",
    "from random import choice\n",
    "import jieba\n",
    "\n",
    "\n",
    "\n",
    "    \n",
    "train_df_r2 = pd.read_csv(\"data/Round2_train_huibiao.csv\", encoding=\"utf-8-sig\")\n",
    "test_df_r2 = pd.read_csv(\"data/Round2_Test.csv\", encoding=\"utf-8-sig\")\n",
    "\n",
    "train_df_r1 = pd.read_csv(\"data/r1_train_hand.csv\", encoding=\"utf-8-sig\")\n",
    "test_df_r1 = pd.read_csv(\"data/Round1_Test_Data.csv\", encoding=\"utf-8-sig\")\n",
    "\n",
    "train_df=pd.concat([train_df_r2,train_df_r1],axis=0)\n",
    "test_df=pd.concat([test_df_r2,test_df_r1],axis=0)\n",
    "\n",
    "train_df['title']=train_df['title'].fillna('')\n",
    "test_df['title']=test_df['title'].fillna('')\n",
    "\n",
    "train_df['text'] =  train_df['text'].fillna('')\n",
    "test_df['text'] =   test_df['text'].fillna('')\n",
    "\n",
    "train_df[['title']]=train_df[['title']].applymap(lambda x:juhao(x))\n",
    "test_df[['title']]=test_df[['title']].applymap(lambda x:juhao(x))\n",
    "\n",
    "\n",
    "train_df['text'] = train_df['text'].apply(clean_str)\n",
    "test_df['text'] = test_df['text'].apply(clean_str)\n",
    "\n",
    "train_df['title'] = train_df['title'].apply(clean_str)\n",
    "test_df['title'] = test_df['title'].apply(clean_str)\n",
    "# 所有的非中文英文数字符号\n",
    "additional_chars = set()\n",
    "for t in list(train_df.text) + list(test_df.text) +list(train_df.title) + list(test_df.title):\n",
    "    additional_chars.update(re.findall(u'[^\\u4e00-\\u9fa5a-zA-Z0-9\\*]', t))\n",
    "\n",
    "# 一些需要保留的符号\n",
    "extra_chars = set(\"!#$%&\\()*+,-./:;<=>?@[\\\\]^_`{|}~！#￥%&？《》{}“”，：‘’。（）·、；【】\")\n",
    "additional_chars = additional_chars.difference(extra_chars)\n",
    "def remove_additional_chars(input):\n",
    "    for x in additional_chars:\n",
    "        input = input.replace(x, \"\")\n",
    "    return input\n",
    "train_df[\"text\"] = train_df[\"text\"].apply(remove_additional_chars)\n",
    "test_df[\"text\"] = test_df[\"text\"].apply(remove_additional_chars)\n",
    "train_df[\"title\"] = train_df[\"title\"].apply(remove_additional_chars)\n",
    "test_df[\"title\"] = test_df[\"title\"].apply(remove_additional_chars)\n",
    "\n",
    "rawwords = loadStopWords('stopwords.txt')\n",
    "stopwords = rawwords################情感词和普通词的停用词词典？\n",
    "\n",
    "\n",
    "train_len=len(train_df)\n",
    "test_len=len(test_df)\n",
    "train_r2_len=len(train_df_r2)\n",
    "test_r2_len=len(test_df_r2)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "# 生成去停用词和不去停用词的数据"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "scrolled": true
   },
   "outputs": [],
   "source": [
    "import multiprocessing\n",
    "import jieba\n",
    "import jieba.posseg as pseg\n",
    "import pandas as pd\n",
    "import gc\n",
    "import os\n",
    "import re\n",
    "import codecs\n",
    "import copy\n",
    "import numpy as np\n",
    "from tqdm import tqdm\n",
    "## 将两组数据进行合并\n",
    "print(\"合并训练集和测试集...\")\n",
    "all_df = pd.concat([train_df[['id', 'title', 'text']] ,test_df], axis=0, ignore_index=True)\n",
    "all_df.columns=['id','title','content']\n",
    "all_df['text']=all_df['title']+all_df['content']\n",
    "############训练集和测试集合并了\n",
    "\n",
    "jieba.load_userdict(\"all_entities_bert_train.txt\")\n",
    "all_ent_bert_train=pd.read_csv(\"all_entities_bert_train.txt\",header=None)\n",
    "all_ent_bert_train=list(set([i for i in all_ent_bert_train[0]]))\n",
    "\n",
    "try:\n",
    "    all_ent_bert_train.remove(np.nan)\n",
    "except:\n",
    "    pass\n",
    "## 加载bert的结果\n",
    "# bert_train = pd.read_csv(\"data/train_fu.csv\")\n",
    "# bert_test=pd.read_csv(\"data/submit.csv\")\n",
    "# bert_all=pd.concat([bert_train['unknownEntities'],bert_test['unknownEntities']],axis=0).reset_index(drop=True)\n",
    "all_df['bert_result']=all_df['text'].apply(lambda x:\";\".join([i for i in all_ent_bert_train if i in x]))\n",
    "# all_df['title_cut'] = all_df['title'].apply(lambda s: ','.join([word.strip() for word in jieba.cut(s) if (word.strip() not in stopwords and word.strip())]))\n",
    "print(\"cut end\")\n",
    "# title = []\n",
    "# for i in tqdm(range(all_df.shape[0])):\n",
    "#     news = all_df.iloc[i, :]\n",
    "#     result = ngram(news,stopwords)#################进行了n-gram（2、3-gram），词和词之间以逗号分割，，，是title的分词结果，，如果title的分词结果在text中出现了两次以上 那么就认为这是一个新词\n",
    "#     title.append(result)\n",
    "# all_df['title_ngram'] = title############这里虽然写了title_ngram但实际上是 title+content\n",
    "\n",
    "all_unknownEntities=[i for i in train_df['unknownEntities']]+[np.nan for i in range(len(test_df))]\n",
    "all_df['unknownEntities']=all_unknownEntities\n",
    "\n",
    "\n",
    "train_df = all_df[:len(train_df)]################训练集和测试集分割出来\n",
    "test_df = all_df[len(train_df):]####各找各妈\n",
    "train_df['unknownEntities']=train_df['unknownEntities'].fillna('')\n",
    "train_df['bert_result']=train_df['bert_result'].fillna('')\n",
    "# train_df['title_ngram']=train_df['title_ngram'].fillna('')\n",
    "test_df['bert_result']=test_df['bert_result'].fillna('')\n",
    "# test_df['title_ngram']=test_df['title_ngram'].fillna('')\n",
    "\n",
    "# train_df['bert_and_title'] = train_df['bert_result'] + ';' + train_df['unknownEntities'] + ';' + train_df['title_ngram']\n",
    "train_df['bert_and_title'] = train_df['bert_result'] + ';' + train_df['unknownEntities']\n",
    "#########gt_entity猜想应该是官方给的正确实体，，，，bert_result的预测结果，还有title+content的分词结果\n",
    "test_df['bert_and_title'] = test_df['bert_result'] \n",
    "\n",
    "all_df = pd.concat([train_df, test_df], axis=0, sort=False, ignore_index=True)\n",
    "\n",
    "\n",
    "all_df['tokens'] = all_df['bert_and_title'].apply(\n",
    "    lambda s: ';'.join([w.strip() for w in set(s.split(';')) if w!='' and len(w.strip()) > 1]))#######所有实体（bert的、官方的正确实体、n-gram分词的）取set\n",
    "    ######   tokens理解成所有实体/所有单词 把，，，然后按照  A,B,C这样弄成一个字符串\n",
    "all_df['tokens']=all_df['tokens'].apply(lambda x:'无' if x=='' else x)\n",
    "\n",
    "all_df['cixing'] = all_df['tokens'].apply(lambda s: get_cixing(s) if not pd.isnull(s) else s)################A,B,C  A的词性,B的词性,C的词性\n",
    "\n",
    "# all_df.drop(['bert_result', 'title_cut', 'title_ngram', 'bert_and_title'], axis=1, inplace=True)\n",
    "all_df.drop(['bert_result', 'bert_and_title'], axis=1, inplace=True)\n",
    "################id，text，title，content，title_ngram,tokens,cixing\n",
    "\n",
    "all_df['tokens_with_sw'] = all_df['text'].apply(lambda s: '/split'.join(jieba.cut(s)))############原始文本分词\n",
    "all_df['tokens_with_sw'] = add_bert_to_tokens(all_df)###############这个 是   有停止词的 tokens_with_stopwords\n",
    "\n",
    "\n",
    "all_df['doc_id']=[i for i in range(train_len+test_len)]\n",
    "\n",
    "\n",
    "# 训练TFIDF\n",
    "train_tfidf(all_df)\n",
    "pool = multiprocessing.Pool(processes=15)\n",
    "print(\"start model make\")\n",
    "\n",
    "\n",
    "# 训练word2vec\n",
    "pool.apply_async(train_word2vec, [all_df])\n",
    "# 训练doc2vec\n",
    "pool.apply_async(train_doc2vec, [all_df])\n",
    "\n",
    "pool.close()\n",
    "pool.join()\n",
    "\n",
    "pool = multiprocessing.Pool(processes=15)\n",
    "####lda kmeans tsvd\n",
    "model_fea=[pool.apply_async(train_lda, [all_df]),pool.apply_async(train_kmeans, [all_df]),pool.apply_async(train_tsvd, [all_df])]\n",
    "pool.close()\n",
    "pool.join()\n",
    "\n",
    "\n",
    "\n",
    "all_df['lda_classes'] = model_fea[0].get()\n",
    "all_df['kmeans_classes'] = model_fea[1].get()\n",
    "lsi_df=model_fea[2].get()\n",
    "all_df=pd.concat([all_df,lsi_df],axis=1)\n",
    "\n",
    "all_df.to_csv(\"temp2/all_tokens.csv\",index=None)\n",
    "\n",
    "\n",
    "\n",
    "\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "scrolled": true
   },
   "outputs": [],
   "source": [
    "\n",
    "# bert_dict=pd.read_csv(\"data/submit.csv\",usecols=['unknownEntities'])\n",
    "# fu_dict=pd.read_csv(\"data/train_fu.csv\",usecols=['unknownEntities'])\n",
    "# bert_dict=pd.DataFrame(list(set([i for i in bert_dict['unknownEntities']]+[i for i in fu_dict['unknownEntities']]+[i for i in train_df['unknownEntities']])))\n",
    "# bert_dict.to_csv(\"temp2/bert_dict.txt\",index=None,header=None)\n",
    "# jieba.load_userdict(\"temp2/bert_dict.txt\")"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "# 特征生成"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## 下面是图特征"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "### 根据在tokens中的共现关系来构建边"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "scrolled": true
   },
   "outputs": [],
   "source": [
    "import networkx as nx\n",
    "from networkx.readwrite.gpickle import write_gpickle, read_gpickle\n",
    "import itertools\n",
    "graph = nx.Graph()\n",
    "for row_index,row_ent in enumerate(all_df['tokens']):\n",
    "    if len(row_ent.split(';'))==1:\n",
    "        graph.add_node(row_ent.split(';')[0]) \n",
    "        continue\n",
    "    row_ent_com=itertools.combinations(row_ent.split(';'),2)\n",
    "    for single_com in row_ent_com:\n",
    "        ziped = [(single_com[0], single_com[1])]\n",
    "        graph.add_edges_from(ziped)\n",
    "    if row_index%1000==0:\n",
    "        print(row_index)\n",
    "print('finish generate test graph, temporaray graph size ->', graph.number_of_nodes(), graph.number_of_edges())"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "### 图特征函数"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "scrolled": true
   },
   "outputs": [],
   "source": [
    "%%time\n",
    "\n",
    "# # ShortestPath ####两个节点之间的最短路径\n",
    "def calculate_shortestpath(row):\n",
    "    graph.remove_edge(row[0], row[1])\n",
    "    if nx.has_path(graph, row[0], row[1]):\n",
    "        shortest_path = len(nx.shortest_path(graph, row[0], row[1]))\n",
    "    else:\n",
    "        shortest_path = -1\n",
    "    graph.add_edge(row[0], row[1])\n",
    "    return [shortest_path]\n",
    "def gen_degrees(graph):\n",
    "    max_degrees = {}\n",
    "    edges = graph.edges()\n",
    "    for edge in edges:\n",
    "        for n in edge:\n",
    "            max_degrees[n] = max_degrees.get(n, 0) + 1\n",
    "    return max_degrees\n",
    "\n",
    "def gen_components(graph):\n",
    "    max_components = {}\n",
    "    components = nx.connected_components(graph)\n",
    "    for component in components:\n",
    "        for n in component:\n",
    "            max_components[n] = max(max_components.get(n, 0), len(component))\n",
    "    return max_components\n",
    "\n",
    "def gen_hits(graph):\n",
    "    hits_h, hits_a = nx.hits(graph, max_iter=500)\n",
    "    return hits_h, hits_a\n",
    "\n",
    "################################single_edge\n",
    "# Neighbour\n",
    "def calculate_single_neighbour(tokens):\n",
    "    tokens = tokens.split(';')\n",
    "    neighbor_list=[]\n",
    "    for word in tokens:###############词性标注！！！！！！！[a,b,c]   [a的词性,b的词性,c的词性]\n",
    "\n",
    "        neighbor_1 = set(graph.neighbors(word))\n",
    "        neighbor_num=len(neighbor_1)\n",
    "        neighbor_list.append(str(neighbor_num))\n",
    "\n",
    "    if len(tokens) != len(neighbor_list):\n",
    "        assert len(tokens) == len(neighbor_list), '长度不匹配'\n",
    "    return ';'.join(neighbor_list)\n",
    "        \n",
    "\n",
    "def calculate_single_hits_h(tokens):\n",
    "    tokens = tokens.split(';')\n",
    "    hits_h_1_list=[]\n",
    "    for word in tokens:###############词性标注！！！！！！！[a,b,c]   [a的词性,b的词性,c的词性]\n",
    "\n",
    "        hits_h_1 = hits_h[word] * 1e6\n",
    "        hits_h_1_list.append(str(hits_h_1))\n",
    "\n",
    "    if len(tokens) != len(hits_h_1_list):\n",
    "        assert len(tokens) == len(hits_h_1_list), '长度不匹配'\n",
    "    return ';'.join(hits_h_1_list)\n",
    "        \n",
    "\n",
    "def calculate_single_hits_a(tokens):\n",
    "    tokens = tokens.split(';')\n",
    "    hits_a_1_list=[]\n",
    "    for word in tokens:###############词性标注！！！！！！！[a,b,c]   [a的词性,b的词性,c的词性]\n",
    "\n",
    "        hits_a_1 = hits_a[word] * 1e6\n",
    "        hits_a_1_list.append(str(hits_a_1))\n",
    "\n",
    "    if len(tokens) != len(hits_a_1_list):\n",
    "        assert len(tokens) == len(hits_a_1_list), '长度不匹配'\n",
    "    return ';'.join(hits_a_1_list)\n",
    "        \n",
    "\n",
    "def calculate_single_statistics_degrees(tokens):\n",
    "    tokens = tokens.split(';')\n",
    "    max_degrees_list=[]\n",
    "    for word in tokens:###############词性标注！！！！！！！[a,b,c]   [a的词性,b的词性,c的词性]\n",
    "        try:\n",
    "            max_degrees_list.append(str(max_degrees[word]))\n",
    "        except:\n",
    "            max_degrees_list.append(\"0\")\n",
    "    if len(tokens) != len(max_degrees_list):\n",
    "        assert len(tokens) == len(max_degrees_list), '长度不匹配'\n",
    "    return ';'.join(max_degrees_list)\n",
    "        \n",
    "\n",
    "\n",
    "def calculate_single_statistics_components(tokens):\n",
    "    tokens = tokens.split(';')\n",
    "    components_list=[]\n",
    "    for word in tokens:###############词性标注！！！！！！！[a,b,c]   [a的词性,b的词性,c的词性]\n",
    "\n",
    "        components_list.append(str(max_components[word]))\n",
    "\n",
    "    if len(tokens) != len(components_list):\n",
    "        assert len(tokens) == len(components_list), '长度不匹配'\n",
    "    return ';'.join(components_list)\n",
    "        \n",
    "def calculate_single_pagerank(tokens):\n",
    "    tokens = tokens.split(';')\n",
    "    pagerank_list=[]\n",
    "    for word in tokens:###############词性标注！！！！！！！[a,b,c]   [a的词性,b的词性,c的词性]\n",
    "\n",
    "        pagerank_list.append(str(page_rank[word] * 1e6 ))\n",
    "\n",
    "    if len(tokens) != len(pagerank_list):\n",
    "        assert len(tokens) == len(pagerank_list), '长度不匹配'\n",
    "    return ';'.join(pagerank_list)\n",
    "        \n",
    "pool = multiprocessing.Pool(processes=4)\n",
    "print(\"start model make\")\n",
    "\n",
    "\n",
    "####lda kmeans tsvd\n",
    "model_graph=[pool.apply_async(gen_degrees, [graph]),pool.apply_async(nx.pagerank_scipy, [graph]),pool.apply_async(gen_components, [graph]),pool.apply_async(gen_hits, [graph])]\n",
    "\n",
    "pool.close()\n",
    "pool.join()\n",
    "    \n",
    "    \n",
    "max_degrees = model_graph[0].get()\n",
    "page_rank = model_graph[1].get()\n",
    "max_components = model_graph[2].get()\n",
    "hits_h, hits_a = model_graph[3].get()\n",
    "\n",
    "def all_grapha_process(part_index):\n",
    "    one_part_df=pd.DataFrame()\n",
    "    one_part_df['neighbour'] = all_df_r2['tokens'].iloc[part_index].apply(lambda s: calculate_single_neighbour(s) if not pd.isnull(s) else s)################A,B,C  A的词性,B的词性,C的词性\n",
    "    one_part_df['hits_h'] = all_df_r2['tokens'].iloc[part_index].apply(lambda s: calculate_single_hits_h(s) if not pd.isnull(s) else s)################A,B,C  A的词性,B的词性,C的词性\n",
    "    one_part_df['hits_a'] = all_df_r2['tokens'].iloc[part_index].apply(lambda s: calculate_single_hits_a(s) if not pd.isnull(s) else s)################A,B,C  A的词性,B的词性,C的词性\n",
    "    one_part_df['degrees'] = all_df_r2['tokens'].iloc[part_index].apply(lambda s: calculate_single_statistics_degrees(s) if not pd.isnull(s) else s)################A,B,C  A的词性,B的词性,C的词性\n",
    "    one_part_df['components'] = all_df_r2['tokens'].iloc[part_index].apply(lambda s: calculate_single_statistics_components(s) if not pd.isnull(s) else s)################A,B,C  A的词性,B的词性,C的词性\n",
    "    one_part_df['pagerank'] = all_df_r2['tokens'].iloc[part_index].apply(lambda s: calculate_single_pagerank(s) if not pd.isnull(s) else s)################A,B,C  A的词性,B的词性,C的词性\n",
    "    return one_part_df\n",
    "    \n",
    "all_df_train=all_df[:train_len]\n",
    "all_df_test=all_df[train_len:]\n",
    "all_df_r2=pd.concat([all_df_train[:train_r2_len],all_df_test[:test_r2_len]],axis=0).reset_index(drop=True)\n",
    "\n",
    "pool = multiprocessing.Pool(processes=40)\n",
    "grapha_fea=[]\n",
    "\n",
    "split_size=int(len(all_df_r2)/40)\n",
    "all_index=list(range(len(all_df_r2)))\n",
    "df_split_index=[all_index[i:i+split_size] for i in range(0,len(all_index),split_size)]  \n",
    "\n",
    "for part_id,part_index in enumerate(df_split_index):    \n",
    "    grapha_fea.append(pool.apply_async(all_grapha_process, [part_index]))\n",
    "    print(part_id)\n",
    "pool.close()\n",
    "pool.join()\n",
    "grapha_fea=[i.get() for i in grapha_fea]\n",
    "grapha_df=pd.concat(grapha_fea,axis=0).reset_index(drop=True)\n",
    "all_df_r2=pd.concat([all_df_r2,grapha_df],axis=1)\n",
    "print(\"模型训练完成\")\n",
    "\n",
    "train_df = all_df_r2[:train_r2_len]################训练集和测试集分割出来\n",
    "test_df = all_df_r2[train_r2_len:]####各找各妈\n",
    "train_df.to_csv(\"temp2/train_cut.csv\",index=None)\n",
    "test_df.to_csv(\"temp2/test_cut.csv\",index=None)b"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "train_df.columns"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "# 下面是其他特征"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "scrolled": true
   },
   "outputs": [],
   "source": [
    "from sklearn.preprocessing import LabelEncoder\n",
    "from collections import Counter\n",
    "import pickle\n",
    "import jieba\n",
    "import jieba.posseg as pseg\n",
    "import jieba.analyse\n",
    "import numpy as np\n",
    "import pandas as pd\n",
    "import json\n",
    "import os\n",
    "import re\n",
    "import codecs\n",
    "import math\n",
    "from gensim.models import Word2Vec, Doc2Vec, KeyedVectors\n",
    "from tqdm import tqdm\n",
    "\n",
    "import gc\n",
    "import math\n",
    "\n",
    "from collections import Counter\n",
    "\n",
    "from sklearn.feature_extraction.text import TfidfVectorizer\n",
    "from sklearn.preprocessing import normalize\n",
    "import scipy.stats as stats\n",
    "\n",
    "import warnings\n",
    "warnings.filterwarnings('ignore')\n",
    "\n",
    "\n",
    "\n",
    "def load_models( df_):\n",
    "    # 获取模型的位置\n",
    "    tfidf_path = os.path.join(model_path, 'Tfidf.pkl')\n",
    "    w2v_path = os.path.join(model_path, 'word2vec.model')\n",
    "    d2v_path = os.path.join(model_path, 'doc2vec.model')\n",
    "    # 获取TFIDF模型\n",
    "    try:\n",
    "        f = open(tfidf_path, 'rb')\n",
    "    except:\n",
    "        train_tfidf(df_)\n",
    "        f = open(tfidf_path)\n",
    "    finally:\n",
    "        tfidf_model = pickle.load(f)\n",
    "        f.close()\n",
    "\n",
    "\n",
    "    # 加载word2vec和doc2vec模型\n",
    "    try:\n",
    "        w2v_model = Word2Vec.load(w2v_path)\n",
    "    except:\n",
    "        train_word2vec(df_)\n",
    "        w2v_model = Word2Vec.load(w2v_path)\n",
    "\n",
    "    try:\n",
    "        d2v_model = Doc2Vec.load(d2v_path)\n",
    "    except:\n",
    "        train_doc2vec(df_)\n",
    "        d2v_model = Doc2Vec.load(d2v_path)\n",
    "\n",
    "    return tfidf_model, w2v_model, d2v_model\n",
    "\n",
    "# 计算单词与词向量的余弦相似度和欧氏距离\n",
    "## 计算余弦相似度\n",
    "def Cosine(wordvec, docvec):\n",
    "    wordvec, docvec = np.array(wordvec), np.array(docvec)\n",
    "    return wordvec.dot(docvec) / (math.sqrt((wordvec**2).sum()) * math.sqrt((docvec**2).sum()))\n",
    "\n",
    "## 计算欧式距离\n",
    "def Euclidean(wordvec, docvec):\n",
    "    wordvec, docvec = np.array(wordvec), np.array(docvec)\n",
    "    return math.sqrt(((wordvec-docvec)**2).sum())\n",
    "\n",
    "# 判断是否是数字\n",
    "def is_number(x):\n",
    "    try:\n",
    "        float(x)\n",
    "        return True\n",
    "    except:\n",
    "        return False\n",
    "\n",
    "\n",
    "def extract_word_property(news, tfidf_model, w2v_model, d2v_model, train=True):\n",
    "    tmp = pd.DataFrame()\n",
    "    text = news['title'] + '\\n' + news['content']\n",
    "    # words = pseg.cut(text)\n",
    "    # print(list(zip(*words)))\n",
    "    # li = list(zip(*words))\n",
    "    # print(li[0])\n",
    "    assert len(news['tokens'].split(';')) == len(news['cixing'].split(';')), '长度不匹配'\n",
    "\n",
    "    tmp['word'] = news['tokens'].split(';')\n",
    "    tmp['word'] = tmp['word'].apply(lambda w: w.strip())\n",
    "    \n",
    "    tmp['cixing'] = news['cixing'].split(';')\n",
    "\n",
    "    tmp['neighbour'] = news['neighbour'].split(';')\n",
    "    tmp['hits_h'] = news['hits_h'].split(';')\n",
    "    tmp['hits_a'] = news['hits_a'].split(';')\n",
    "    tmp['degrees'] = news['degrees'].split(';')\n",
    "    tmp['components'] = news['components'].split(';')\n",
    "    tmp['pagerank'] = news['pagerank'].split(';')\n",
    "\n",
    "    tmp[['neighbour','hits_h','hits_a','degrees','components','pagerank']]=tmp[['neighbour','hits_h','hits_a','degrees','components','pagerank']].astype(np.float64)\n",
    "\n",
    "    tmp['doc_id'] = news['doc_id']\n",
    "    tmp['lda_classes'] = news['lda_classes']\n",
    "    tmp['kmeans_classes'] = news['kmeans_classes']\n",
    "    tsvd_col=[i for i in list(news.index) if 'lsi' in i]\n",
    "    tsvd=[i for i in news[tsvd_col].values] \n",
    "    tsvd=[tsvd for tsvd_row_num in range(len(tmp))]\n",
    "    tsvd=pd.DataFrame(tsvd,columns=tsvd_col)\n",
    "    \n",
    "    tmp=pd.concat([tmp,tsvd],axis=1)\n",
    "    # 添加词频的列\n",
    "    tmp.dropna(axis=0, subset=['word'], inplace=True)\n",
    "    tmp['tf'] = tmp['word'].apply(lambda w: text.count(w))\n",
    "    tmp.drop_duplicates(inplace=True)\n",
    "    \n",
    "\n",
    "    stopwords = loadStopWords('stopwords.txt')\n",
    "    no_core_words = loadStopWords('no_core_stopword.txt')\n",
    "    no_core_words.sort(key=lambda x:len(x),reverse=True)##########停用词从大到小排序\n",
    "    def no_core_word_in_word(word):\n",
    "        for no_core in no_core_words:\n",
    "            if no_core in word:\n",
    "                return no_core\n",
    "        return '没有停用'\n",
    "        \n",
    "    def cut_sent(para):\n",
    "        para = re.sub('([。！？\\?])([^”’])', r\"\\1\\n\\2\", para)  # 单字符断句符\n",
    "        para = re.sub('(\\.{6})([^”’])', r\"\\1\\n\\2\", para)  # 英文省略号\n",
    "        para = re.sub('(\\…{2})([^”’])', r\"\\1\\n\\2\", para)  # 中文省略号\n",
    "        para = re.sub('([。！？\\?][”’])([^，。！？\\?])', r'\\1\\n\\2', para)\n",
    "        # 如果双引号前有终止符，那么双引号才是句子的终点，把分句符\\n放到双引号后，注意前面的几句都小心保留了双引号\n",
    "        para = para.rstrip()  # 段尾如果有多余的\\n就去掉它\n",
    "        # 很多规则中会考虑分号;，但是这里我把它忽略不计，破折号、英文双引号等同样忽略，需要的再做些简单调整即可。\n",
    "        return para.split(\"\\n\")\n",
    "\n",
    "    # 添加是否为停用词的标记，或者为空，或者为纯数字\n",
    "    tmp['flag'] = tmp['word'].apply(lambda w: int((w in stopwords) or (is_number(w))))\n",
    "    # 删除停用词，取出非停用词\n",
    "    tmp = tmp[tmp['flag'] == 0]\n",
    "    tmp.drop(['flag'], axis=1, inplace=True)\n",
    "\n",
    "    tmp['stop_in_word'] = tmp['word'].apply(lambda w: no_core_word_in_word(w))###判断一个词里是不是有停用词\n",
    "\n",
    "    sen_list=cut_sent(news['content'])\n",
    "    sen_num=len(sen_list)\n",
    "    def is_in_first_sent(word):\n",
    "        if word in sen_list[0]:\n",
    "            return 1\n",
    "        else:\n",
    "            return 0\n",
    "    def is_in_last_sent(word):\n",
    "        if word in sen_list[-1]:\n",
    "            return 1\n",
    "        else:\n",
    "            return 0\n",
    "    def is_in_other_sent(word):\n",
    "        if word not in sen_list[0] and word not in sen_list[-1]:\n",
    "            return 1\n",
    "        else:\n",
    "            return 0\n",
    "    tmp['is_in_first_sent'] = tmp['word'].apply(lambda w: is_in_first_sent(w))###判断一个词是不是在第一句\n",
    "    tmp['is_in_last_sent'] = tmp['word'].apply(lambda w: is_in_last_sent(w))###判断一个词里是不是在最后一句\n",
    "    tmp['is_in_other_sent'] = tmp['word'].apply(lambda w: is_in_other_sent(w))###判断一个词里是不是在其他句子（或标题）\n",
    "    tmp['sen_num']=sen_num\n",
    "    \n",
    "    ###################组内特征\n",
    "    ######组内相词频特征\n",
    "\n",
    "    \n",
    "    tmp['word_count_xiangdui_max']=tmp['word'].apply(lambda w: text.count(w))####相对于最大的\n",
    "\n",
    "    tmp['word_count_xiangdui_max']=tmp['word_count_xiangdui_max']/max(tmp['word_count_xiangdui_max'].values)\n",
    "    tmp['word_count_xiangdui_min']=tmp['word'].apply(lambda w: text.count(w))\n",
    "    tmp['word_count_xiangdui_min']=tmp['word_count_xiangdui_min']/min(tmp['word_count_xiangdui_min'].values)\n",
    "    tmp['word_count_group_max']=tmp['word'].apply(lambda w: text.count(w))\n",
    "    tmp['word_count_group_max']=max(tmp['word_count_group_max'].values)\n",
    "    tmp['word_count_group_min']=tmp['word'].apply(lambda w: text.count(w))\n",
    "    tmp['word_count_group_min']=min(tmp['word_count_xiangdui_min'].values)\n",
    "    tmp['word_count_group_avg']=tmp['word'].apply(lambda w: text.count(w))\n",
    "    tmp['word_count_group_avg']=sum(tmp['word_count_group_avg'].values)/len(tmp['word_count_group_avg'].values)\n",
    "    tmp['word_count_group_min_xiangdui_max']=tmp['word'].apply(lambda w: text.count(w))\n",
    "    tmp['word_count_group_min_xiangdui_max']=min(tmp['word_count_group_min_xiangdui_max'].values)/max(tmp['word_count_group_min_xiangdui_max'].values)\n",
    "    \n",
    "    #####组内所有候选词特征\n",
    "    tmp['all_word_count_group']=tmp['word'].apply(lambda w: len(list(tmp['word'])))####组内有多少候选词\n",
    "\n",
    "\n",
    "    ######组内公司特征\n",
    "    def gongsi_count_group(group_word_list):\n",
    "        gongsi_num=0\n",
    "        for word in group_word_list:\n",
    "            if [i for i in ['公司','集团','有限','股份','科技'] if i in word]!=[]:\n",
    "                gongsi_num+=1\n",
    "        return gongsi_num\n",
    "\n",
    "    tmp['gongsi_count_group']=tmp['word'].apply(lambda w: gongsi_count_group(list(tmp['word'])))####组内有多少含有\"公司\"候选词\n",
    "\n",
    "\n",
    "    def gongsi_self_div_max_index_group(self_word,group_word_list):\n",
    "        if [i for i in ['公司','集团','有限','股份','科技'] if i in self_word]==[]:\n",
    "            return -1\n",
    "        self_gongsi_first_index=text.find(self_word)\n",
    "        other_gongsi_last_index=self_gongsi_first_index\n",
    "        for word in group_word_list:\n",
    "            if word!=self_word:\n",
    "                if [i for i in ['公司','集团','有限','股份','科技'] if i in word]!=[]:\n",
    "                    other_gongsi_last_index=max(text.rfind(word),other_gongsi_last_index)\n",
    "\n",
    "        return self_gongsi_first_index/(other_gongsi_last_index+2)\n",
    "\n",
    "    tmp['gongsi_self_div_max_index_group']=tmp['word'].apply(lambda w: gongsi_self_div_max_index_group(w,list(tmp['word'])))####组内有多少含有\"公司\"候选词\n",
    "\n",
    "    def gongsi_self_div_min_index_group(self_word,group_word_list):\n",
    "        if [i for i in ['公司','集团','有限','股份','科技'] if i in self_word]==[]:\n",
    "            return -1\n",
    "        self_gongsi_first_index=text.find(self_word)\n",
    "        other_gongsi_last_index=self_gongsi_first_index\n",
    "        for word in group_word_list:\n",
    "            if word!=self_word:\n",
    "                if [i for i in ['公司','集团','有限','股份','科技'] if i in word]!=[] :\n",
    "                    other_gongsi_last_index=min(text.find(word),other_gongsi_last_index)\n",
    "\n",
    "        return self_gongsi_first_index/(other_gongsi_last_index+2)\n",
    "    tmp['gongsi_self_div_min_index_group']=tmp['word'].apply(lambda w: gongsi_self_div_min_index_group(w,list(tmp['word'])))####组内有多少含有\"公司\"候选词\n",
    "\n",
    "    def is_gongsi_in_title(self_word,group_word_list):\n",
    "        if [i for i in ['公司','集团','有限','股份','科技'] if i in self_word]==[]:\n",
    "            return -1\n",
    "        for word in group_word_list:\n",
    "            if [i for i in ['公司','集团','有限','股份','科技'] if i in word]!=[] :\n",
    "                if word != self_word and word in news['title']:\n",
    "                    return 1\n",
    "        return 0\n",
    "    tmp['is_gongsi_in_title']=tmp['word'].apply(lambda w: is_gongsi_in_title(w,list(tmp['word'])))####组内有多少含有\"公司\"候选词\n",
    "\n",
    "    def is_gongsi_in_first_two_sen(self_word,group_word_list):###self_word\n",
    "        if [i for i in ['公司','集团','有限','股份','科技'] if i in self_word]==[]:\n",
    "            return -1\n",
    "        title_tmp=sen_list[0]\n",
    "        if len(sen_list)>=2:\n",
    "            title_tmp+=sen_list[1]\n",
    "        for word in group_word_list:\n",
    "            if [i for i in ['公司','集团','有限','股份','科技'] if i in word]!=[] :\n",
    "                if word != self_word and word in title_tmp:\n",
    "                    return 1\n",
    "        return 0\n",
    "    tmp['is_gongsi_in_first_two_sen']=tmp['word'].apply(lambda w: is_gongsi_in_first_two_sen(w,list(tmp['word'])))####组内有多少含有\"公司\"候选词\n",
    "\n",
    "\n",
    "    def gongsi_avg_index(group_word_list):###self_word\n",
    "        all_first_index=[]\n",
    "        for word in group_word_list:\n",
    "            if [i for i in ['公司','集团','有限','股份','科技'] if i in word]!=[] :\n",
    "                all_first_index.append(text.find(word))\n",
    "        if all_first_index==[]:\n",
    "            return -1\n",
    "        return sum(all_first_index)/(len(all_first_index)+2)\n",
    "    tmp['gongsi_avg_index']=tmp['word'].apply(lambda w: gongsi_avg_index(list(tmp['word'])))####组内有多少含有\"公司\"候选词\n",
    "\n",
    "\n",
    "    def gongsi_max_count(group_word_list):###self_word\n",
    "        all_gongsi_count=[]\n",
    "        for word in group_word_list:\n",
    "            if [i for i in ['公司','集团','有限','股份','科技'] if i in word]!=[] :\n",
    "                all_gongsi_count.append(text.count(word))\n",
    "        if all_gongsi_count==[]:\n",
    "            return -1\n",
    "        return max(all_gongsi_count)\n",
    "    tmp['gongsi_max_count']=tmp['word'].apply(lambda w: gongsi_max_count(list(tmp['word'])))####组内有多少含有\"公司\"候选词\n",
    "\n",
    "\n",
    "    \n",
    "    ################\n",
    "    \n",
    "    \n",
    "#     if len(tmp['word'])==0:\n",
    "#         return pd.Series([np.nan])\n",
    "    # 增加词的长度特征\n",
    "    tmp['word_len'] = tmp['word'].apply(lambda w: len(w))\n",
    "    ## 相对于最大词长的比值\n",
    "    \n",
    "    max_len = max(tmp['word_len'].values)\n",
    "    tmp['word_len_ratio'] = tmp['word_len'].apply(lambda x: x / max_len)\n",
    "    # 去除词长小于1的\n",
    "    tmp = tmp[tmp.word_len > 1]\n",
    "\n",
    "    # 计算频率\n",
    "    tmp['tf_ratio'] = tmp['tf'] / sum(tmp['tf'])\n",
    "\n",
    "    ## 获取所有的实体\n",
    "    feature_name = tfidf_model.get_feature_names()\n",
    "    # 获取TfIdf\n",
    "    tfidf_fetures = tfidf_model.transform([news['tokens']])\n",
    "    tfidf_map = {feature_name[k]: v for k, v in zip(tfidf_fetures.indices, tfidf_fetures.data)}\n",
    "    tmp['tfidf'] = tmp['word'].map(tfidf_map)\n",
    "    ## 对TFIDF进行归一化\n",
    "    tmp['tfidf'] = normalize(tmp['tfidf'].fillna(0).values.reshape(1, -1), 'max')[0]#############按最大的进行归一化！？\n",
    "\n",
    "\n",
    "\n",
    "\n",
    "    # 是否出现在标题中\n",
    "    tmp['dai_in_word'] = tmp['word'].apply(lambda w: int((1 if [i for i in ['贷'] if  w.endswith(i)] !=[] else 0)))\n",
    "    tmp['bao_in_word'] = tmp['word'].apply(lambda w: int((1 if [i for i in ['宝'] if  w.endswith(i)] !=[] else 0)))\n",
    "    tmp['bi_in_word'] = tmp['word'].apply(lambda w: int((1 if [i for i in ['币'] if  w.endswith(i)] !=[] else 0)))\n",
    "    tmp['gongsi_in_word'] = tmp['word'].apply(lambda w: int((1 if [i for i in ['公司','科技','控股','集团'] if  w.endswith(i)] !=[] else 0)))\n",
    "    tmp['zhengquan_in_word'] = tmp['word'].apply(lambda w: int((1 if [i for i in ['证券','债券'] if  w.endswith(i)] !=[] else 0)))\n",
    "    tmp['kuohao_in_word'] = tmp['word'].apply(lambda w: int((1 if [i for i in [')','）'] if  w.endswith(i)] !=[] else 0)))\n",
    "\n",
    "\n",
    "    # 获取textrank\n",
    "    allow_cixing=['a', 'ad', 'ag', 'an', 'b', 'c', 'd',\n",
    "                'df', 'dg', 'e', 'eng', 'f', 'g', 'h',\n",
    "                'i', 'j', 'k', 'l', 'm', 'mg', 'mq',\n",
    "                'n', 'ng', 'nr', 'nrfg', \n",
    "                'nrt', 'ns', 'nt', 'nz',\n",
    "                'o', 'p', 'q', 'r', 'rg',\n",
    "                'rr', 'rz', 's', 't', 'tg',\n",
    "                'u', 'ud', 'ug', 'uj', 'ul', 'uv', 'uz',\n",
    "                'v', 'vd', 'vg', 'vi',\n",
    "                'vn', 'vq', 'w', 'x', \n",
    "                'y', 'z', 'zg']\n",
    "    tr_score = jieba.analyse.textrank(text, topK=1000, withWeight=True, allowPOS=list(allow_cixing))\n",
    "    #####参与textrank的词的数量\n",
    "    \n",
    "    ####在整篇文章中的textrank排名\n",
    "    tr_map = {k: v for k, v in tr_score}\n",
    "    tr_rank_map = {k: rank for rank, (k,v) in enumerate(tr_score)}\n",
    "    tmp['text_rank'] = tmp['word'].map(tr_map)\n",
    "    tmp['text_rank_geshu']=len(tr_score)\n",
    "    tmp['text_rank_rank'] = tmp['word'].map(tr_rank_map)\n",
    "    # 返回前100\n",
    "    #tmp = tmp if tmp.shape[0] <= 100 else tmp.iloc[:100, :]\n",
    "\n",
    "    ######同候选词内包含关系特征 ，最长的那个实体 长度，最短包含的那个实体长度，被包含的词频和包含的词频的比值（按最小被包含关系走）\n",
    "    all_group_word=list(tmp['word'])\n",
    "    def baohan_max_len(word):\n",
    "        all_group_word.sort(key=lambda x:len(x),reverse=True)####从大到小\n",
    "        for other_word in all_group_word:\n",
    "            if word in other_word and word!=other_word:\n",
    "                return len(other_word)\n",
    "        return -1\n",
    "\n",
    "    def baohan_min_len(word):\n",
    "        all_group_word.sort(key=lambda x:len(x))####从小到大排序\n",
    "        for other_word in all_group_word:\n",
    "            if word in other_word and word!=other_word:\n",
    "                return len(other_word)\n",
    "        return -1\n",
    "\n",
    "    def baohan_self_div_other_count(word):\n",
    "        all_group_word.sort(key=lambda x:len(x))####从小到大排序\n",
    "        for other_word in all_group_word:\n",
    "            if word in other_word and word!=other_word:\n",
    "                return text.count(word)/(text.count(other_word)+1)\n",
    "        return -1\n",
    "\n",
    "    def baohan_self_div_other_count_reserve(word):\n",
    "        all_group_word.sort(key=lambda x:len(x),reverse=True)####从大到小排序\n",
    "        for other_word in all_group_word:\n",
    "            if word in other_word and word!=other_word:\n",
    "                return text.count(word)/(text.count(other_word)+1)\n",
    "        return -1\n",
    "\n",
    "    tmp['baohan_max_len'] = tmp['word'].apply(lambda w: baohan_max_len(w))\n",
    "    tmp['baohan_min_len'] = tmp['word'].apply(lambda w: baohan_min_len(w))\n",
    "    tmp['baohan_self_div_other_count'] = tmp['word'].apply(lambda w: baohan_self_div_other_count(w))###\n",
    "    tmp['baohan_self_div_other_count_reserve'] = tmp['word'].apply(lambda w: baohan_self_div_other_count_reserve(w))###\n",
    "\n",
    "    def find_repeat(source,elmt): # The source may be a list or string.\n",
    "            elmt_index=[]\n",
    "            s_index = 0;e_index = len(source)\n",
    "            while(s_index < e_index):\n",
    "                    try:\n",
    "                        temp = source.index(elmt,s_index,e_index)\n",
    "                        elmt_index.append(temp)\n",
    "                        s_index = temp + 1\n",
    "                    except ValueError:\n",
    "                        break\n",
    "            return elmt_index\n",
    "        \n",
    "    def get_avg_pos(wenben,ci):\n",
    "        all_pos=find_repeat(wenben,ci)\n",
    "        avg_pos=sum(all_pos)/(len(all_pos)+1)\n",
    "        return avg_pos\n",
    "\n",
    "\n",
    "    \n",
    "\n",
    "    ##文章长度特征\n",
    "    tmp['text_len'] = len(text)\n",
    "    tmp['title_len'] = len(news['title'])\n",
    "    tmp['content_len'] = len(news['content'])\n",
    "    tmp['title_div_text_len']=len(news['title'])/(len(text)+1)\n",
    "    #####词在标题中的平均位置 并除以文章长度\n",
    "    tmp['avg_ocur_title'] = tmp['word'].apply(lambda w: get_avg_pos(news['title'],w)/(len(news['title'])+1))\n",
    "    #####词在正文中的平均位置 并除以文章长度\n",
    "    tmp['avg_ocur_content'] = tmp['word'].apply(lambda w: get_avg_pos(news['content'],w)/(len(news['content'])+1))\n",
    "    \n",
    "\n",
    "    # 找出第一次出现的位置\n",
    "    tmp['first_ocur'] = tmp['word'].apply(lambda w: text.find(w) + len(w) - 1)\n",
    "    # 找出最后一次出现的位置\n",
    "    reverse_text = text[-1::-1]\n",
    "    all_len = len(text)\n",
    "    tmp['last_ocur'] = tmp['word'].apply(lambda w: all_len - (reverse_text.find(w[-1::-1]) + len(w)))\n",
    "    # 计算词跨度\n",
    "    tmp['word_distance'] = (tmp['last_ocur'] - tmp['first_ocur']).apply(lambda d: 0 if d < 0 else d)\n",
    "    # 对词跨度进行归一化\n",
    "    tmp['word_distance_norm'] = normalize(tmp['word_distance'].values.reshape(1, -1), 'max')[0]\n",
    "    tmp.drop(['first_ocur', 'last_ocur', 'word_distance'], axis=1, inplace=True)\n",
    "\n",
    "    # 计算词向量和文档向量的余弦距离\n",
    "    tmp['Cosine'] = tmp['word'].apply(\n",
    "        lambda w: np.nan if w not in w2v_model.wv.vocab else Cosine(w2v_model.wv[w], d2v_model[news['doc_id']]))\n",
    "\n",
    "    # 计算词向量和文档向量的欧式距离\n",
    "    tmp['Euclidean'] = tmp['word'].apply(\n",
    "        lambda w: np.nan if w not in w2v_model.wv.vocab else Euclidean(w2v_model.wv[w], d2v_model[news['doc_id']]))\n",
    "\n",
    "    # 计算皮尔逊相关系数\n",
    "    tmp['pearson_cor'] = tmp['word'].apply(\n",
    "        lambda w: np.nan if w not in w2v_model.wv.vocab else stats.pearsonr(w2v_model.wv[w], d2v_model[news['doc_id']])[\n",
    "            0])\n",
    "    tmp['pearson_pvalue'] = tmp['word'].apply(\n",
    "        lambda w: np.nan if w not in w2v_model.wv.vocab else stats.pearsonr(w2v_model.wv[w], d2v_model[news['doc_id']])[\n",
    "            1])\n",
    "\n",
    "    # 是否出现在标题中\n",
    "    tmp['ocur_in_title'] = tmp['word'].apply(lambda w: int((1 if news['title'].find(w) != -1 else 0)))\n",
    "    # 是否出现在正文中\n",
    "    tmp['ocur_in_content'] = tmp['word'].apply(lambda w: int((1 if news['content'].find(w) != -1 else 0)))\n",
    "\n",
    "    # 是否还有数字，是否含有字母\n",
    "    tmp['has_num'] = tmp['word'].apply(lambda w: int(bool(re.search(r'\\d', w))))\n",
    "    tmp['has_char'] = tmp['word'].apply(lambda w: int(bool(re.search(r'[a-zA-Z]+', w))))\n",
    "\n",
    "    # 添加共现矩阵信息\n",
    "    sentences = [news['title']]#########他把标题单独拿成一句话了\n",
    "    for seq in re.split(r'[\\n。？！?!.]', news['content']):\n",
    "        # 如果开头不是汉字、字母或数字，则去除\n",
    "        seq = re.sub(r'^[^\\u4e00-\\u9fa5A-Za-z0-9]+', '', seq)\n",
    "        # 去除之后，如果句子不为空，则添加进句子中\n",
    "        if len(seq) > 0:\n",
    "            sentences.append(seq)\n",
    "\n",
    "    num_tokens = len(tmp['word'])\n",
    "    words_list = tmp['word'].tolist()\n",
    "    arr = np.zeros((num_tokens, num_tokens))\n",
    "    # 得到共现矩阵\n",
    "    for i in range(num_tokens):\n",
    "        for j in range(i + 1, num_tokens):\n",
    "            count = 0\n",
    "            for sentence in sentences:\n",
    "                if (words_list[i] in sentence) and (words_list[j] in sentence):\n",
    "                    count += 1\n",
    "            arr[i, j] = count\n",
    "            arr[j, i] = count\n",
    "\n",
    "    ## 得到偏度的统计特征\n",
    "    tmp['coocur_skew'] = stats.skew(arr)\n",
    "    ## 计算某个单词共现矩阵的方差、峰度\n",
    "    tmp['coocur_var'] = np.var(arr, axis=0)\n",
    "    tmp['coocur_mean'] = np.mean(arr, axis=0)\n",
    "    tmp['coocur_kurt'] = stats.kurtosis(arr, axis=0)\n",
    "    tmp['coocur_std'] = np.std(arr, axis=0)\n",
    "    #tmp['coocur_median'] = np.median(arr, axis=0)\n",
    "    #tmp['coocur_variation'] = stats.variation(arr, axis=0)\n",
    "\n",
    "    ## 共现矩阵一阶差分\n",
    "    co_diff1 = np.diff(arr, n=1, axis=1)\n",
    "    tmp['diff_coocur_mean'] = np.mean(co_diff1, axis=1)\n",
    "    tmp['diff_coocur_var'] = np.var(co_diff1, axis=1)\n",
    "    tmp['diff_coocur_std'] = np.std(co_diff1, axis=1)\n",
    "    #tmp['diff_coocur_median'] = np.median(co_diff1, axis=1)\n",
    "    tmp['diff_coocur_skew'] = stats.skew(co_diff1, axis=1)\n",
    "    tmp['diff_coocur_kurt'] = stats.kurtosis(co_diff1, axis=1)\n",
    "\n",
    "    '''\n",
    "    ## 共现矩阵二阶差分的统计特性\n",
    "    co_diff2 = np.diff(arr, n=2, axis=1)\n",
    "    tmp['diff2_coocur_mean'] = np.mean(co_diff2, axis=1)\n",
    "    tmp['diff2_coocur_var'] = np.var(co_diff2, axis=1)\n",
    "    tmp['diff2_coocur_std'] = np.std(co_diff2, axis=1)\n",
    "    tmp['diff2_coocur_median'] = np.median(co_diff2, axis=1)\n",
    "    tmp['diff2_coocur_skew'] = stats.skew(co_diff2, axis=1)\n",
    "    tmp['diff2_coocur_kurt'] = stats.kurtosis(co_diff2, axis=1)\n",
    "    '''\n",
    "\n",
    "    # 计算相似度矩阵以及统计信息\n",
    "    ## sim_tags_arr: 初始化候选词相似度矩阵\n",
    "    sim_tags_arr = np.zeros((num_tokens, num_tokens))\n",
    "\n",
    "    for i in range(num_tokens):\n",
    "        for j in range(i, num_tokens):\n",
    "            sim_tags_arr[i][j] = 0 if (words_list[i] not in w2v_model.wv.vocab or words_list[\n",
    "                j] not in w2v_model.wv.vocab) else w2v_model.wv.similarity(words_list[i], words_list[j])\n",
    "            if i != j:\n",
    "                sim_tags_arr[j][i] = sim_tags_arr[i][j]\n",
    "    # 计算单词相似度矩阵的统计信息\n",
    "    ## 相似度平均值\n",
    "    tmp['mean_sim_tags'] = np.mean(sim_tags_arr, axis=1)\n",
    "    ## 相似度矩阵的偏度\n",
    "    tmp['skew_sim_tags'] = stats.skew(sim_tags_arr, axis=1)\n",
    "    ## 相似度矩阵的峰值\n",
    "    tmp['kurt_sim_tags'] = stats.kurtosis(sim_tags_arr, axis=1)\n",
    "    tmp['var_sim_tags'] = np.var(sim_tags_arr, axis=1)\n",
    "    tmp['std_sim_tags'] = np.std(sim_tags_arr, axis=1, ddof=1)\n",
    "    #tmp['median_sim_tags'] = np.median(sim_tags_arr, axis=1)\n",
    "    #tmp['variation_sim_tags'] = stats.variation(sim_tags_arr, axis=1)\n",
    "\n",
    "    ## 一阶差分统计特征补充\n",
    "    diff1 = np.diff(sim_tags_arr, n=1, axis=1)\n",
    "    tmp['diff_median_sim_tags'] = np.median(diff1, axis=1)\n",
    "    tmp['diff_var_sim_tags'] = np.var(diff1, axis=1)\n",
    "    tmp['diff_std_sim_tags'] = np.std(diff1, axis=1)\n",
    "    ## 相似度矩阵的差分均值\n",
    "    tmp['diff_mean_sim_tags'] = np.mean(diff1, axis=1)\n",
    "    ## 相似度矩阵差分的偏度\n",
    "    tmp['diff_skew_sim_tags'] = stats.skew(diff1, axis=1)\n",
    "    ## 相似度矩阵差分的峰度\n",
    "    tmp['diff_kurt_sim_tags'] = stats.kurtosis(diff1, axis=1)\n",
    "\n",
    "    '''\n",
    "    ## 二阶差分统计特征\n",
    "    diff2 = np.diff(sim_tags_arr, n=2, axis=1)\n",
    "    tmp['diff2_mean_sim_tags'] = np.mean(diff2, axis=1)\n",
    "    tmp['diff2_median_sim_tags'] = np.median(diff2, axis=1)\n",
    "    tmp['diff2_var_sim_tags'] = np.var(diff2, axis=1)\n",
    "    tmp['diff2_std_sim_tags'] = np.std(diff2, axis=1)\n",
    "    tmp['diff2_skew_sim_tags'] = stats.skew(diff2, axis=1)\n",
    "    tmp['diff2_kurt_sim_tags'] = stats.kurtosis(diff2, axis=1)\n",
    "    '''\n",
    "\n",
    "    # 包含关系矩阵\n",
    "    in_arr = np.zeros((num_tokens, num_tokens))\n",
    "    ## 计算包含关系矩阵\n",
    "    for i in range(num_tokens):\n",
    "        for j in range(num_tokens):\n",
    "            if i != j:\n",
    "                if words_list[i] in words_list[j]:\n",
    "                    in_arr[i][j] = 1\n",
    "\n",
    "    ### 被多少个词包含\n",
    "    tmp['be_include_sum'] = np.sum(in_arr, axis=1)\n",
    "    tmp['be_include_mean'] = np.mean(in_arr, axis=1)\n",
    "    tmp['be_include_var'] = np.var(in_arr, axis=1)\n",
    "    tmp['be_include_std'] = np.std(in_arr, axis=1)\n",
    "    tmp['be_include_skew'] = stats.skew(in_arr, axis=1)\n",
    "    tmp['be_include_kurt'] = stats.kurtosis(in_arr, axis=1)\n",
    "\n",
    "    ### 包含了多少个词\n",
    "    tmp['include_sum'] = np.sum(in_arr, axis=0)\n",
    "    tmp['include_mean'] = np.mean(in_arr, axis=0)\n",
    "    tmp['include_var'] = np.var(in_arr, axis=0)\n",
    "    tmp['include_std'] = np.std(in_arr, axis=0)\n",
    "    tmp['include_skew'] = stats.skew(in_arr, axis=0)\n",
    "    tmp['include_kurt'] = stats.kurtosis(in_arr, axis=0)\n",
    "    ######应该加一个文章中所出现的所有词的tfidf排名\n",
    "    \n",
    "    # 对tfidf排名\n",
    "    tmp = tmp.sort_values(by='tfidf', ascending=False)\n",
    "    tmp['tfidf_index'] = range(tmp.shape[0])\n",
    "    # 对TFIDF索引进行归一化\n",
    "    tmp['tfidf_index'] = normalize(tmp['tfidf_index'].values.reshape(1, -1), 'max')[0]\n",
    "\n",
    "    # 对词频排名\n",
    "    tmp = tmp.sort_values(by='tf', ascending=False)\n",
    "    tmp['tf_index'] = range(tmp.shape[0])\n",
    "\n",
    "    # 对词跨度排名\n",
    "    tmp = tmp.sort_values(by='word_distance_norm', ascending=False)\n",
    "    tmp['word_distance_norm_index'] = range(tmp.shape[0])\n",
    "    # tmp['word_distance_norm_index'] = normalize(tmp['word_distance_norm_index'].values.reshape(1, -1), 'max')[0]\n",
    "\n",
    "    # 对Cosine排名\n",
    "    tmp = tmp.sort_values(by='Cosine', ascending=False)\n",
    "    tmp['cosine_index'] = range(tmp.shape[0])\n",
    "\n",
    "    # 对EUC排名\n",
    "    tmp = tmp.sort_values(by='Euclidean', ascending=True)\n",
    "    tmp['euclidean_index'] = range(tmp.shape[0])\n",
    "\n",
    "    \n",
    "    # 如果是训练集，获取标签\n",
    "    if train:\n",
    "        true_entity = news['unknownEntities'].split(';')\n",
    "        # print(train_news['doc_id'], true_entity, \"文章长度为%d\" % all_len)\n",
    "        tmp['label'] = tmp['word'].apply(lambda w: int(w in true_entity))\n",
    "        tmp = tmp.sort_values(by=['label', 'tfidf', 'tf'], ascending=False)\n",
    "    else:\n",
    "        tmp = tmp.sort_values(['tfidf', 'tf'], ascending=False)\n",
    "\n",
    "        \n",
    "        \n",
    "    return tmp.reset_index(drop=True)\n",
    "\n",
    "\n",
    "def add_features(train_df, test_df):\n",
    "    '''\n",
    "    增加新特征或者对特征进行转换\n",
    "    :param train_df:\n",
    "    :param test_df:\n",
    "    :return:\n",
    "    '''\n",
    "    documents=pd.DataFrame()\n",
    "\n",
    "    documents['text']=all_df['title']+all_df['content']\n",
    "\n",
    "\n",
    "    def getBayesSmoothParam(origion_rate):\n",
    "        origion_rate_mean = origion_rate.mean()\n",
    "        origion_rate_var = origion_rate.var()\n",
    "        alpha = origion_rate_mean / origion_rate_var * (origion_rate_mean * (1 - origion_rate_mean) - origion_rate_var)\n",
    "        beta = (1 - origion_rate_mean) / origion_rate_var * (origion_rate_mean * (1 - origion_rate_mean) - origion_rate_var)\n",
    "        return alpha, beta\n",
    "    \n",
    "    all_df_fea = pd.concat([train_df, test_df], axis=0, sort=False, ignore_index=True)\n",
    "    all_df_fea['idf'] = all_df_fea['word'].map(Counter(all_df_fea['word']))###########被当成候选词的次数~~~~~~\n",
    "    ## 对词性编码\n",
    "    lb = LabelEncoder()\n",
    "    all_df_fea['cixing'] = lb.fit_transform(all_df_fea['cixing'])\n",
    "    lb2 = LabelEncoder()\n",
    "    all_df_fea['stop_in_word'] = lb2.fit_transform(all_df_fea['stop_in_word'])\n",
    "    ####获取单词在所有文章中的频率(先不变换成idf值)\n",
    "    all_word=set([i for i in all_df_fea['word']])\n",
    "    id_freq = {}\n",
    "    i=0\n",
    "    for doc in documents['text']:\n",
    "        for word in all_word:\n",
    "            if word in doc:\n",
    "                id_freq[word] = id_freq.get(word, 0) + 1\n",
    "        if i % 1000 == 0:\n",
    "            print('Documents processed: ', i, ', time: ',\n",
    "                datetime.datetime.now())\n",
    "        i += 1\n",
    "    id_freq = [[word,word_freq] for word,word_freq in id_freq.items()]\n",
    "    id_freq = pd.DataFrame(id_freq,columns=['word','word_occor_times'])\n",
    "    id_freq['idf_in_all_text']= id_freq['word_occor_times'].apply(lambda x:math.log(i / x, 2))\n",
    "    id_freq=id_freq[['word','idf_in_all_text']]\n",
    "\n",
    "    ######################被当成候选词的概率\n",
    "    #############待加\n",
    "    all_df_fea=pd.merge(all_df_fea,id_freq,on=['word'],how='left')\n",
    "    ## 获取单词的逆词频\n",
    "    \n",
    "    ## 获取训练集和测试集\n",
    "    train_df = all_df_fea[:len(train_df)]\n",
    "    test_df = all_df_fea[len(train_df):]\n",
    "\n",
    "    return train_df, test_df\n",
    "\n",
    "\n",
    "\n",
    "tfidf_model, w2v_model, d2v_model = load_models(train_df)\n",
    "\n",
    "def train_fea_make(train_index_all_list):\n",
    "    fea_train=[]\n",
    "    for train_index in tqdm(train_index_all_list):\n",
    "        \n",
    "        try:\n",
    "            train_single = extract_word_property(train_df.iloc[train_index, :], tfidf_model, w2v_model, d2v_model, train=True)\n",
    "            fea_train.append(train_single)\n",
    "        except:\n",
    "            continue\n",
    "    return pd.concat(fea_train,axis=0)\n",
    "\n",
    "def test_fea_make(test_index_all_list):\n",
    "    fea_test=[]\n",
    "    for test_index in test_index_all_list:\n",
    "        try:\n",
    "            test_single = extract_word_property(test_df.iloc[test_index, :], tfidf_model, w2v_model, d2v_model, train=False)\n",
    "\n",
    "            fea_test.append(test_single)\n",
    "        except:\n",
    "            continue\n",
    "    return pd.concat(fea_test,axis=0)\n",
    "\n",
    "##############\n",
    "pool = multiprocessing.Pool(processes=40)\n",
    "train_fea=[]\n",
    "\n",
    "split_size=int(len(train_df)/40)\n",
    "all_index=list(range(len(train_df)))\n",
    "df_split_index=[all_index[i:i+split_size] for i in range(0,len(all_index),split_size)]  \n",
    "\n",
    "for part_id,part_index in enumerate(df_split_index):    \n",
    "    train_fea.append(pool.apply_async(train_fea_make, [part_index]))\n",
    "    print(part_id)\n",
    "pool.close()\n",
    "pool.join()\n",
    "fea_train=[i.get() for i in train_fea]\n",
    "\n",
    "print(\"模型训练完成\")\n",
    "#############\n",
    "\n",
    "\n",
    "##############\n",
    "pool = multiprocessing.Pool(processes=40)\n",
    "test_fea=[]\n",
    "\n",
    "split_size=int(len(test_df)/40)\n",
    "all_index=list(range(len(test_df)))\n",
    "df_split_index=[all_index[i:i+split_size] for i in range(0,len(all_index),split_size)]  \n",
    "\n",
    "for part_id,part_index in enumerate(df_split_index):    \n",
    "    test_fea.append(pool.apply_async(test_fea_make, [part_index]))\n",
    "    print(part_id)\n",
    "pool.close()\n",
    "pool.join()\n",
    "fea_test=[i.get() for i in test_fea]\n",
    "\n",
    "print(\"模型训练完成\")\n",
    "#############\n",
    "\n",
    "\n",
    "\n",
    "\n",
    "train_yuan=train_df\n",
    "test_yuan=test_df\n",
    "\n",
    "train_all2=pd.concat(fea_train).reset_index(drop=True)\n",
    "test_all2=pd.concat(fea_test).reset_index(drop=True)\n",
    "\n",
    "train_all, test_all = add_features(train_all2, test_all2)\n",
    "\n",
    "\n",
    "# train_all=train_test[~train_test['label'].isnull()]\n",
    "# test_all=train_test[train_test['label'].isnull()]\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "test_all"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "fea_train[0].columns"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "train_all.to_csv(\"temp2/train_data.csv\",index=None)\n",
    "test_all.to_csv(\"temp2/test_data.csv\",index=None)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "# 加载已经做好的特征"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 4,
   "metadata": {},
   "outputs": [],
   "source": [
    "import os\n",
    "import math\n",
    "import re\n",
    "import datetime\n",
    "import sys, getopt\n",
    "\n",
    "\n",
    "import jieba\n",
    "import re\n",
    "import pandas as pd\n",
    "import re\n",
    "import os\n",
    "import sys\n",
    "import numpy as np\n",
    "import string\n",
    "from random import choice\n",
    "import jieba\n",
    "train_all=pd.read_csv(\"temp2/train_data.csv\")\n",
    "test_all=pd.read_csv(\"temp2/test_data.csv\")"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "# 在训练前对特征进行预处理"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## 去掉那些干扰词：在手动改训练集的时候，引入了一些 京东淘宝之类的，进去"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 21,
   "metadata": {},
   "outputs": [],
   "source": [
    "train_all.loc[train_all['word']=='京东','label']=0\n",
    "train_all.loc[train_all['word']=='阿里巴巴','label']=0\n",
    "train_all.loc[train_all['word']=='莱特币','label']=0\n",
    "train_all.loc[train_all['word']=='蚂蚁金服','label']=0\n",
    "train_all.loc[train_all['word']=='唯品会','label']=0\n",
    "train_all.loc[train_all['word']=='淘宝','label']=0\n",
    "train_all.loc[train_all['word']=='天猫','label']=0"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "# 特征更新"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 6,
   "metadata": {},
   "outputs": [],
   "source": [
    "all_df=pd.read_csv(\"temp2/all_tokens.csv\")"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "# ctr_fea"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 18,
   "metadata": {
    "collapsed": true
   },
   "outputs": [
    {
     "data": {
      "text/html": [
       "<div>\n",
       "<style scoped>\n",
       "    .dataframe tbody tr th:only-of-type {\n",
       "        vertical-align: middle;\n",
       "    }\n",
       "\n",
       "    .dataframe tbody tr th {\n",
       "        vertical-align: top;\n",
       "    }\n",
       "\n",
       "    .dataframe thead th {\n",
       "        text-align: right;\n",
       "    }\n",
       "</style>\n",
       "<table border=\"1\" class=\"dataframe\">\n",
       "  <thead>\n",
       "    <tr style=\"text-align: right;\">\n",
       "      <th></th>\n",
       "      <th>word</th>\n",
       "      <th>cixing</th>\n",
       "      <th>neighbour</th>\n",
       "      <th>hits_h</th>\n",
       "      <th>hits_a</th>\n",
       "      <th>degrees</th>\n",
       "      <th>components</th>\n",
       "      <th>pagerank</th>\n",
       "      <th>doc_id</th>\n",
       "      <th>lda_classes</th>\n",
       "      <th>...</th>\n",
       "      <th>tf_index</th>\n",
       "      <th>word_distance_norm_index</th>\n",
       "      <th>cosine_index</th>\n",
       "      <th>euclidean_index</th>\n",
       "      <th>label</th>\n",
       "      <th>idf</th>\n",
       "      <th>idf_in_all_text</th>\n",
       "      <th>occr_times</th>\n",
       "      <th>select_times</th>\n",
       "      <th>ent_ctr</th>\n",
       "    </tr>\n",
       "  </thead>\n",
       "  <tbody>\n",
       "    <tr>\n",
       "      <th>0</th>\n",
       "      <td>钱嗨娱乐</td>\n",
       "      <td>28</td>\n",
       "      <td>233.0</td>\n",
       "      <td>3.202750</td>\n",
       "      <td>3.202750</td>\n",
       "      <td>233.0</td>\n",
       "      <td>101329.0</td>\n",
       "      <td>6.660880</td>\n",
       "      <td>0</td>\n",
       "      <td>4</td>\n",
       "      <td>...</td>\n",
       "      <td>26</td>\n",
       "      <td>17</td>\n",
       "      <td>6</td>\n",
       "      <td>6</td>\n",
       "      <td>1.0</td>\n",
       "      <td>16</td>\n",
       "      <td>8.966938</td>\n",
       "      <td>16.0</td>\n",
       "      <td>16.0</td>\n",
       "      <td>0.959648</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>1</th>\n",
       "      <td>躺赚</td>\n",
       "      <td>28</td>\n",
       "      <td>11535.0</td>\n",
       "      <td>83.274707</td>\n",
       "      <td>83.274707</td>\n",
       "      <td>11535.0</td>\n",
       "      <td>101329.0</td>\n",
       "      <td>120.246907</td>\n",
       "      <td>0</td>\n",
       "      <td>4</td>\n",
       "      <td>...</td>\n",
       "      <td>13</td>\n",
       "      <td>3</td>\n",
       "      <td>14</td>\n",
       "      <td>16</td>\n",
       "      <td>1.0</td>\n",
       "      <td>278</td>\n",
       "      <td>5.045692</td>\n",
       "      <td>141.0</td>\n",
       "      <td>2.0</td>\n",
       "      <td>0.014418</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>2</th>\n",
       "      <td>明星周末</td>\n",
       "      <td>28</td>\n",
       "      <td>27.0</td>\n",
       "      <td>0.243519</td>\n",
       "      <td>0.243519</td>\n",
       "      <td>27.0</td>\n",
       "      <td>101329.0</td>\n",
       "      <td>2.470474</td>\n",
       "      <td>0</td>\n",
       "      <td>4</td>\n",
       "      <td>...</td>\n",
       "      <td>0</td>\n",
       "      <td>18</td>\n",
       "      <td>21</td>\n",
       "      <td>21</td>\n",
       "      <td>0.0</td>\n",
       "      <td>1</td>\n",
       "      <td>13.288866</td>\n",
       "      <td>NaN</td>\n",
       "      <td>NaN</td>\n",
       "      <td>NaN</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>3</th>\n",
       "      <td>全明星</td>\n",
       "      <td>15</td>\n",
       "      <td>130.0</td>\n",
       "      <td>2.238660</td>\n",
       "      <td>2.238660</td>\n",
       "      <td>130.0</td>\n",
       "      <td>101329.0</td>\n",
       "      <td>3.493467</td>\n",
       "      <td>0</td>\n",
       "      <td>4</td>\n",
       "      <td>...</td>\n",
       "      <td>1</td>\n",
       "      <td>13</td>\n",
       "      <td>15</td>\n",
       "      <td>4</td>\n",
       "      <td>0.0</td>\n",
       "      <td>2</td>\n",
       "      <td>12.703904</td>\n",
       "      <td>NaN</td>\n",
       "      <td>NaN</td>\n",
       "      <td>NaN</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>4</th>\n",
       "      <td>钱嗨</td>\n",
       "      <td>28</td>\n",
       "      <td>239.0</td>\n",
       "      <td>3.296492</td>\n",
       "      <td>3.296492</td>\n",
       "      <td>239.0</td>\n",
       "      <td>101329.0</td>\n",
       "      <td>6.713562</td>\n",
       "      <td>0</td>\n",
       "      <td>4</td>\n",
       "      <td>...</td>\n",
       "      <td>25</td>\n",
       "      <td>8</td>\n",
       "      <td>9</td>\n",
       "      <td>3</td>\n",
       "      <td>0.0</td>\n",
       "      <td>17</td>\n",
       "      <td>8.931314</td>\n",
       "      <td>16.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.002593</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>...</th>\n",
       "      <td>...</td>\n",
       "      <td>...</td>\n",
       "      <td>...</td>\n",
       "      <td>...</td>\n",
       "      <td>...</td>\n",
       "      <td>...</td>\n",
       "      <td>...</td>\n",
       "      <td>...</td>\n",
       "      <td>...</td>\n",
       "      <td>...</td>\n",
       "      <td>...</td>\n",
       "      <td>...</td>\n",
       "      <td>...</td>\n",
       "      <td>...</td>\n",
       "      <td>...</td>\n",
       "      <td>...</td>\n",
       "      <td>...</td>\n",
       "      <td>...</td>\n",
       "      <td>...</td>\n",
       "      <td>...</td>\n",
       "      <td>...</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>430060</th>\n",
       "      <td>交易</td>\n",
       "      <td>13</td>\n",
       "      <td>69297.0</td>\n",
       "      <td>195.982491</td>\n",
       "      <td>195.982491</td>\n",
       "      <td>69297.0</td>\n",
       "      <td>101329.0</td>\n",
       "      <td>915.531194</td>\n",
       "      <td>5005</td>\n",
       "      <td>5</td>\n",
       "      <td>...</td>\n",
       "      <td>4</td>\n",
       "      <td>4</td>\n",
       "      <td>8</td>\n",
       "      <td>8</td>\n",
       "      <td>0.0</td>\n",
       "      <td>4276</td>\n",
       "      <td>1.360718</td>\n",
       "      <td>1815.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.000024</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>430061</th>\n",
       "      <td>中国PE基金</td>\n",
       "      <td>28</td>\n",
       "      <td>8.0</td>\n",
       "      <td>0.154935</td>\n",
       "      <td>0.154935</td>\n",
       "      <td>8.0</td>\n",
       "      <td>101329.0</td>\n",
       "      <td>1.739767</td>\n",
       "      <td>5005</td>\n",
       "      <td>5</td>\n",
       "      <td>...</td>\n",
       "      <td>5</td>\n",
       "      <td>5</td>\n",
       "      <td>1</td>\n",
       "      <td>0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>1</td>\n",
       "      <td>13.288866</td>\n",
       "      <td>NaN</td>\n",
       "      <td>NaN</td>\n",
       "      <td>NaN</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>430062</th>\n",
       "      <td>PE基金</td>\n",
       "      <td>28</td>\n",
       "      <td>229.0</td>\n",
       "      <td>3.434037</td>\n",
       "      <td>3.434037</td>\n",
       "      <td>229.0</td>\n",
       "      <td>101329.0</td>\n",
       "      <td>3.972357</td>\n",
       "      <td>5005</td>\n",
       "      <td>5</td>\n",
       "      <td>...</td>\n",
       "      <td>7</td>\n",
       "      <td>7</td>\n",
       "      <td>5</td>\n",
       "      <td>2</td>\n",
       "      <td>0.0</td>\n",
       "      <td>3</td>\n",
       "      <td>12.288866</td>\n",
       "      <td>NaN</td>\n",
       "      <td>NaN</td>\n",
       "      <td>NaN</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>430063</th>\n",
       "      <td>中国P</td>\n",
       "      <td>28</td>\n",
       "      <td>901.0</td>\n",
       "      <td>12.265358</td>\n",
       "      <td>12.265358</td>\n",
       "      <td>901.0</td>\n",
       "      <td>101329.0</td>\n",
       "      <td>10.534478</td>\n",
       "      <td>5005</td>\n",
       "      <td>5</td>\n",
       "      <td>...</td>\n",
       "      <td>8</td>\n",
       "      <td>8</td>\n",
       "      <td>2</td>\n",
       "      <td>3</td>\n",
       "      <td>0.0</td>\n",
       "      <td>7</td>\n",
       "      <td>10.381975</td>\n",
       "      <td>6.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.006453</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>430064</th>\n",
       "      <td>PE</td>\n",
       "      <td>28</td>\n",
       "      <td>12766.0</td>\n",
       "      <td>98.240910</td>\n",
       "      <td>98.240910</td>\n",
       "      <td>12766.0</td>\n",
       "      <td>101329.0</td>\n",
       "      <td>106.394818</td>\n",
       "      <td>5005</td>\n",
       "      <td>5</td>\n",
       "      <td>...</td>\n",
       "      <td>6</td>\n",
       "      <td>6</td>\n",
       "      <td>3</td>\n",
       "      <td>5</td>\n",
       "      <td>0.0</td>\n",
       "      <td>108</td>\n",
       "      <td>6.922544</td>\n",
       "      <td>31.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.001367</td>\n",
       "    </tr>\n",
       "  </tbody>\n",
       "</table>\n",
       "<p>430065 rows × 122 columns</p>\n",
       "</div>"
      ],
      "text/plain": [
       "          word  cixing  neighbour      hits_h      hits_a  degrees  \\\n",
       "0         钱嗨娱乐      28      233.0    3.202750    3.202750    233.0   \n",
       "1           躺赚      28    11535.0   83.274707   83.274707  11535.0   \n",
       "2         明星周末      28       27.0    0.243519    0.243519     27.0   \n",
       "3          全明星      15      130.0    2.238660    2.238660    130.0   \n",
       "4           钱嗨      28      239.0    3.296492    3.296492    239.0   \n",
       "...        ...     ...        ...         ...         ...      ...   \n",
       "430060      交易      13    69297.0  195.982491  195.982491  69297.0   \n",
       "430061  中国PE基金      28        8.0    0.154935    0.154935      8.0   \n",
       "430062    PE基金      28      229.0    3.434037    3.434037    229.0   \n",
       "430063     中国P      28      901.0   12.265358   12.265358    901.0   \n",
       "430064      PE      28    12766.0   98.240910   98.240910  12766.0   \n",
       "\n",
       "        components    pagerank  doc_id  lda_classes  ...  tf_index  \\\n",
       "0         101329.0    6.660880       0            4  ...        26   \n",
       "1         101329.0  120.246907       0            4  ...        13   \n",
       "2         101329.0    2.470474       0            4  ...         0   \n",
       "3         101329.0    3.493467       0            4  ...         1   \n",
       "4         101329.0    6.713562       0            4  ...        25   \n",
       "...            ...         ...     ...          ...  ...       ...   \n",
       "430060    101329.0  915.531194    5005            5  ...         4   \n",
       "430061    101329.0    1.739767    5005            5  ...         5   \n",
       "430062    101329.0    3.972357    5005            5  ...         7   \n",
       "430063    101329.0   10.534478    5005            5  ...         8   \n",
       "430064    101329.0  106.394818    5005            5  ...         6   \n",
       "\n",
       "        word_distance_norm_index  cosine_index  euclidean_index  label   idf  \\\n",
       "0                             17             6                6    1.0    16   \n",
       "1                              3            14               16    1.0   278   \n",
       "2                             18            21               21    0.0     1   \n",
       "3                             13            15                4    0.0     2   \n",
       "4                              8             9                3    0.0    17   \n",
       "...                          ...           ...              ...    ...   ...   \n",
       "430060                         4             8                8    0.0  4276   \n",
       "430061                         5             1                0    0.0     1   \n",
       "430062                         7             5                2    0.0     3   \n",
       "430063                         8             2                3    0.0     7   \n",
       "430064                         6             3                5    0.0   108   \n",
       "\n",
       "        idf_in_all_text  occr_times  select_times   ent_ctr  \n",
       "0              8.966938        16.0          16.0  0.959648  \n",
       "1              5.045692       141.0           2.0  0.014418  \n",
       "2             13.288866         NaN           NaN       NaN  \n",
       "3             12.703904         NaN           NaN       NaN  \n",
       "4              8.931314        16.0           0.0  0.002593  \n",
       "...                 ...         ...           ...       ...  \n",
       "430060         1.360718      1815.0           0.0  0.000024  \n",
       "430061        13.288866         NaN           NaN       NaN  \n",
       "430062        12.288866         NaN           NaN       NaN  \n",
       "430063        10.381975         6.0           0.0  0.006453  \n",
       "430064         6.922544        31.0           0.0  0.001367  \n",
       "\n",
       "[430065 rows x 122 columns]"
      ]
     },
     "execution_count": 18,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "# train_all=train_all.drop(['occr_times','select_times','ent_ctr'],axis=1)\n",
    "# test_all=test_all.drop(['occr_times','select_times','ent_ctr'],axis=1)\n",
    "ctr_df=pd.read_csv(\"data/ctr_over_four.csv\")\n",
    "ctr_df.columns=['word','occr_times','select_times','ent_ctr']\n",
    "train_all=pd.merge(train_all,ctr_df,on=['word'],how='left')\n",
    "train_all\n",
    "test_all=pd.merge(test_all,ctr_df,on=['word'],how='left')\n",
    "test_all\n",
    "train_all"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "scrolled": true
   },
   "outputs": [],
   "source": [
    "# train_all[['word','doc_id','pagerank','hits_h','neighbour','ent_ctr','occr_times','select_times']].to_csv(\"train_need_group_fea.csv\",index=None)\n",
    "# test_all[['word','doc_id','pagerank','hits_h','neighbour','ent_ctr','occr_times','select_times']].to_csv(\"test_need_group_fea.csv\",index=None)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "train_all=train_all.drop(['over_05_times'],axis=1)\n",
    "test_all=test_all.drop(['over_05_times'],axis=1)\n",
    "# test_over_05=pd.read_csv(\"data/over_05.csv\")\n",
    "# train_all=pd.merge(train_all,test_over_05,on=['word'],how='left')\n",
    "# train_all\n",
    "# test_all=pd.merge(test_all,test_over_05,on=['word'],how='left')\n",
    "# test_all"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "drop_fea=['ctr_group_rank','select_times_group_rank','select_times_group_avg'\n",
    "         ,'ctr_group_avg','select_times_group_avg']\n",
    "train_all=train_all.drop(drop_fea,axis=1)\n",
    "test_all=test_all.drop(drop_fea,axis=1)\n",
    "# train_add_fea=pd.read_csv(\"add_fea/group_fea_add_train.csv\")\n",
    "# test_add_fea=pd.read_csv(\"add_fea/group_fea_add_test.csv\")\n",
    "# train_all=pd.merge(train_all,train_add_fea,on=['word','doc_id'],how='left')\n",
    "# train_all\n",
    "# test_all=pd.merge(test_all,test_add_fea,on=['word','doc_id'],how='left')\n",
    "# test_all"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "train_all.columns"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "# train"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "scrolled": true
   },
   "outputs": [],
   "source": [
    "# val_using_df=fea_train_df[fea_train_df['label']==1][0:20]\n",
    "# val_using_df\n",
    "# val_using_df=pd.concat([val_using_df,fea_train_df[fea_train_df['label']==0][0:20]])\n",
    "# train_all['label']=train_all['label'].astype(int)\n",
    "# train_all=train_all.reset_index(drop=True)\n",
    "train_all"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "scrolled": true
   },
   "outputs": [],
   "source": [
    "fea_train_df_over_sam.sample(frac=1)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 125,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/html": [
       "<div>\n",
       "<style scoped>\n",
       "    .dataframe tbody tr th:only-of-type {\n",
       "        vertical-align: middle;\n",
       "    }\n",
       "\n",
       "    .dataframe tbody tr th {\n",
       "        vertical-align: top;\n",
       "    }\n",
       "\n",
       "    .dataframe thead th {\n",
       "        text-align: right;\n",
       "    }\n",
       "</style>\n",
       "<table border=\"1\" class=\"dataframe\">\n",
       "  <thead>\n",
       "    <tr style=\"text-align: right;\">\n",
       "      <th></th>\n",
       "      <th>word</th>\n",
       "      <th>cixing</th>\n",
       "      <th>neighbour</th>\n",
       "      <th>hits_h</th>\n",
       "      <th>hits_a</th>\n",
       "      <th>degrees</th>\n",
       "      <th>components</th>\n",
       "      <th>pagerank</th>\n",
       "      <th>doc_id</th>\n",
       "      <th>lda_classes</th>\n",
       "      <th>...</th>\n",
       "      <th>tf_index</th>\n",
       "      <th>word_distance_norm_index</th>\n",
       "      <th>cosine_index</th>\n",
       "      <th>euclidean_index</th>\n",
       "      <th>label</th>\n",
       "      <th>idf</th>\n",
       "      <th>idf_in_all_text</th>\n",
       "      <th>occr_times</th>\n",
       "      <th>select_times</th>\n",
       "      <th>ent_ctr</th>\n",
       "    </tr>\n",
       "  </thead>\n",
       "  <tbody>\n",
       "    <tr>\n",
       "      <th>0</th>\n",
       "      <td>钱嗨娱乐</td>\n",
       "      <td>28</td>\n",
       "      <td>233.0</td>\n",
       "      <td>3.202750</td>\n",
       "      <td>3.202750</td>\n",
       "      <td>233.0</td>\n",
       "      <td>101329.0</td>\n",
       "      <td>6.660880</td>\n",
       "      <td>0</td>\n",
       "      <td>4</td>\n",
       "      <td>...</td>\n",
       "      <td>26</td>\n",
       "      <td>17</td>\n",
       "      <td>6</td>\n",
       "      <td>6</td>\n",
       "      <td>1.0</td>\n",
       "      <td>16</td>\n",
       "      <td>8.966938</td>\n",
       "      <td>16.0</td>\n",
       "      <td>16.0</td>\n",
       "      <td>0.938298</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>1</th>\n",
       "      <td>躺赚</td>\n",
       "      <td>28</td>\n",
       "      <td>11535.0</td>\n",
       "      <td>83.274707</td>\n",
       "      <td>83.274707</td>\n",
       "      <td>11535.0</td>\n",
       "      <td>101329.0</td>\n",
       "      <td>120.246907</td>\n",
       "      <td>0</td>\n",
       "      <td>4</td>\n",
       "      <td>...</td>\n",
       "      <td>13</td>\n",
       "      <td>3</td>\n",
       "      <td>14</td>\n",
       "      <td>16</td>\n",
       "      <td>1.0</td>\n",
       "      <td>278</td>\n",
       "      <td>5.045692</td>\n",
       "      <td>141.0</td>\n",
       "      <td>2.0</td>\n",
       "      <td>0.014413</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>28</th>\n",
       "      <td>钱嗨娱乐</td>\n",
       "      <td>28</td>\n",
       "      <td>233.0</td>\n",
       "      <td>3.202750</td>\n",
       "      <td>3.202750</td>\n",
       "      <td>233.0</td>\n",
       "      <td>101329.0</td>\n",
       "      <td>6.660880</td>\n",
       "      <td>1</td>\n",
       "      <td>4</td>\n",
       "      <td>...</td>\n",
       "      <td>1</td>\n",
       "      <td>11</td>\n",
       "      <td>1</td>\n",
       "      <td>3</td>\n",
       "      <td>1.0</td>\n",
       "      <td>16</td>\n",
       "      <td>8.966938</td>\n",
       "      <td>16.0</td>\n",
       "      <td>16.0</td>\n",
       "      <td>0.938298</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>29</th>\n",
       "      <td>躺赚</td>\n",
       "      <td>28</td>\n",
       "      <td>11535.0</td>\n",
       "      <td>83.274707</td>\n",
       "      <td>83.274707</td>\n",
       "      <td>11535.0</td>\n",
       "      <td>101329.0</td>\n",
       "      <td>120.246907</td>\n",
       "      <td>1</td>\n",
       "      <td>4</td>\n",
       "      <td>...</td>\n",
       "      <td>13</td>\n",
       "      <td>4</td>\n",
       "      <td>9</td>\n",
       "      <td>11</td>\n",
       "      <td>1.0</td>\n",
       "      <td>278</td>\n",
       "      <td>5.045692</td>\n",
       "      <td>141.0</td>\n",
       "      <td>2.0</td>\n",
       "      <td>0.014413</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>44</th>\n",
       "      <td>上证50etf期权</td>\n",
       "      <td>28</td>\n",
       "      <td>33.0</td>\n",
       "      <td>0.539496</td>\n",
       "      <td>0.539496</td>\n",
       "      <td>33.0</td>\n",
       "      <td>101329.0</td>\n",
       "      <td>1.849637</td>\n",
       "      <td>2</td>\n",
       "      <td>3</td>\n",
       "      <td>...</td>\n",
       "      <td>3</td>\n",
       "      <td>8</td>\n",
       "      <td>1</td>\n",
       "      <td>1</td>\n",
       "      <td>1.0</td>\n",
       "      <td>1</td>\n",
       "      <td>13.288866</td>\n",
       "      <td>NaN</td>\n",
       "      <td>NaN</td>\n",
       "      <td>NaN</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>...</th>\n",
       "      <td>...</td>\n",
       "      <td>...</td>\n",
       "      <td>...</td>\n",
       "      <td>...</td>\n",
       "      <td>...</td>\n",
       "      <td>...</td>\n",
       "      <td>...</td>\n",
       "      <td>...</td>\n",
       "      <td>...</td>\n",
       "      <td>...</td>\n",
       "      <td>...</td>\n",
       "      <td>...</td>\n",
       "      <td>...</td>\n",
       "      <td>...</td>\n",
       "      <td>...</td>\n",
       "      <td>...</td>\n",
       "      <td>...</td>\n",
       "      <td>...</td>\n",
       "      <td>...</td>\n",
       "      <td>...</td>\n",
       "      <td>...</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>430015</th>\n",
       "      <td>启航</td>\n",
       "      <td>26</td>\n",
       "      <td>7510.0</td>\n",
       "      <td>71.275247</td>\n",
       "      <td>71.275247</td>\n",
       "      <td>7510.0</td>\n",
       "      <td>101329.0</td>\n",
       "      <td>64.845111</td>\n",
       "      <td>5002</td>\n",
       "      <td>7</td>\n",
       "      <td>...</td>\n",
       "      <td>8</td>\n",
       "      <td>8</td>\n",
       "      <td>6</td>\n",
       "      <td>3</td>\n",
       "      <td>1.0</td>\n",
       "      <td>58</td>\n",
       "      <td>7.547399</td>\n",
       "      <td>31.0</td>\n",
       "      <td>2.0</td>\n",
       "      <td>0.063798</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>430025</th>\n",
       "      <td>ing</td>\n",
       "      <td>28</td>\n",
       "      <td>25158.0</td>\n",
       "      <td>138.128603</td>\n",
       "      <td>138.128603</td>\n",
       "      <td>25158.0</td>\n",
       "      <td>101329.0</td>\n",
       "      <td>230.100278</td>\n",
       "      <td>5003</td>\n",
       "      <td>5</td>\n",
       "      <td>...</td>\n",
       "      <td>12</td>\n",
       "      <td>3</td>\n",
       "      <td>9</td>\n",
       "      <td>15</td>\n",
       "      <td>1.0</td>\n",
       "      <td>423</td>\n",
       "      <td>4.858414</td>\n",
       "      <td>181.0</td>\n",
       "      <td>1.0</td>\n",
       "      <td>0.005756</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>430026</th>\n",
       "      <td>金融服务</td>\n",
       "      <td>13</td>\n",
       "      <td>22769.0</td>\n",
       "      <td>116.528001</td>\n",
       "      <td>116.528001</td>\n",
       "      <td>22769.0</td>\n",
       "      <td>101329.0</td>\n",
       "      <td>238.894914</td>\n",
       "      <td>5003</td>\n",
       "      <td>5</td>\n",
       "      <td>...</td>\n",
       "      <td>20</td>\n",
       "      <td>20</td>\n",
       "      <td>6</td>\n",
       "      <td>14</td>\n",
       "      <td>1.0</td>\n",
       "      <td>566</td>\n",
       "      <td>4.112693</td>\n",
       "      <td>308.0</td>\n",
       "      <td>23.0</td>\n",
       "      <td>0.074564</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>430046</th>\n",
       "      <td>rnsfx</td>\n",
       "      <td>28</td>\n",
       "      <td>9.0</td>\n",
       "      <td>0.179206</td>\n",
       "      <td>0.179206</td>\n",
       "      <td>9.0</td>\n",
       "      <td>101329.0</td>\n",
       "      <td>1.580474</td>\n",
       "      <td>5004</td>\n",
       "      <td>5</td>\n",
       "      <td>...</td>\n",
       "      <td>1</td>\n",
       "      <td>1</td>\n",
       "      <td>2</td>\n",
       "      <td>1</td>\n",
       "      <td>1.0</td>\n",
       "      <td>1</td>\n",
       "      <td>13.288866</td>\n",
       "      <td>NaN</td>\n",
       "      <td>NaN</td>\n",
       "      <td>NaN</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>430056</th>\n",
       "      <td>易智堂</td>\n",
       "      <td>28</td>\n",
       "      <td>8.0</td>\n",
       "      <td>0.154935</td>\n",
       "      <td>0.154935</td>\n",
       "      <td>8.0</td>\n",
       "      <td>101329.0</td>\n",
       "      <td>1.739767</td>\n",
       "      <td>5005</td>\n",
       "      <td>5</td>\n",
       "      <td>...</td>\n",
       "      <td>0</td>\n",
       "      <td>0</td>\n",
       "      <td>0</td>\n",
       "      <td>1</td>\n",
       "      <td>1.0</td>\n",
       "      <td>1</td>\n",
       "      <td>13.288866</td>\n",
       "      <td>NaN</td>\n",
       "      <td>NaN</td>\n",
       "      <td>NaN</td>\n",
       "    </tr>\n",
       "  </tbody>\n",
       "</table>\n",
       "<p>46764 rows × 122 columns</p>\n",
       "</div>"
      ],
      "text/plain": [
       "             word  cixing  neighbour      hits_h      hits_a  degrees  \\\n",
       "0            钱嗨娱乐      28      233.0    3.202750    3.202750    233.0   \n",
       "1              躺赚      28    11535.0   83.274707   83.274707  11535.0   \n",
       "28           钱嗨娱乐      28      233.0    3.202750    3.202750    233.0   \n",
       "29             躺赚      28    11535.0   83.274707   83.274707  11535.0   \n",
       "44      上证50etf期权      28       33.0    0.539496    0.539496     33.0   \n",
       "...           ...     ...        ...         ...         ...      ...   \n",
       "430015         启航      26     7510.0   71.275247   71.275247   7510.0   \n",
       "430025        ing      28    25158.0  138.128603  138.128603  25158.0   \n",
       "430026       金融服务      13    22769.0  116.528001  116.528001  22769.0   \n",
       "430046      rnsfx      28        9.0    0.179206    0.179206      9.0   \n",
       "430056        易智堂      28        8.0    0.154935    0.154935      8.0   \n",
       "\n",
       "        components    pagerank  doc_id  lda_classes  ...  tf_index  \\\n",
       "0         101329.0    6.660880       0            4  ...        26   \n",
       "1         101329.0  120.246907       0            4  ...        13   \n",
       "28        101329.0    6.660880       1            4  ...         1   \n",
       "29        101329.0  120.246907       1            4  ...        13   \n",
       "44        101329.0    1.849637       2            3  ...         3   \n",
       "...            ...         ...     ...          ...  ...       ...   \n",
       "430015    101329.0   64.845111    5002            7  ...         8   \n",
       "430025    101329.0  230.100278    5003            5  ...        12   \n",
       "430026    101329.0  238.894914    5003            5  ...        20   \n",
       "430046    101329.0    1.580474    5004            5  ...         1   \n",
       "430056    101329.0    1.739767    5005            5  ...         0   \n",
       "\n",
       "        word_distance_norm_index  cosine_index  euclidean_index  label  idf  \\\n",
       "0                             17             6                6    1.0   16   \n",
       "1                              3            14               16    1.0  278   \n",
       "28                            11             1                3    1.0   16   \n",
       "29                             4             9               11    1.0  278   \n",
       "44                             8             1                1    1.0    1   \n",
       "...                          ...           ...              ...    ...  ...   \n",
       "430015                         8             6                3    1.0   58   \n",
       "430025                         3             9               15    1.0  423   \n",
       "430026                        20             6               14    1.0  566   \n",
       "430046                         1             2                1    1.0    1   \n",
       "430056                         0             0                1    1.0    1   \n",
       "\n",
       "        idf_in_all_text  occr_times  select_times   ent_ctr  \n",
       "0              8.966938        16.0          16.0  0.938298  \n",
       "1              5.045692       141.0           2.0  0.014413  \n",
       "28             8.966938        16.0          16.0  0.938298  \n",
       "29             5.045692       141.0           2.0  0.014413  \n",
       "44            13.288866         NaN           NaN       NaN  \n",
       "...                 ...         ...           ...       ...  \n",
       "430015         7.547399        31.0           2.0  0.063798  \n",
       "430025         4.858414       181.0           1.0  0.005756  \n",
       "430026         4.112693       308.0          23.0  0.074564  \n",
       "430046        13.288866         NaN           NaN       NaN  \n",
       "430056        13.288866         NaN           NaN       NaN  \n",
       "\n",
       "[46764 rows x 122 columns]"
      ]
     },
     "execution_count": 125,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "fea_train_df_over_sam[fea_train_df_over_sam['label']==1]"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 19,
   "metadata": {
    "scrolled": true
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "start lgb\n",
      "Training until validation scores don't improve for 4000 rounds.\n",
      "[30]\tvalid_0's auc: 1\n",
      "[60]\tvalid_0's auc: 1\n",
      "[90]\tvalid_0's auc: 1\n",
      "[120]\tvalid_0's auc: 1\n",
      "[150]\tvalid_0's auc: 1\n",
      "[180]\tvalid_0's auc: 1\n",
      "[210]\tvalid_0's auc: 1\n",
      "[240]\tvalid_0's auc: 1\n",
      "[270]\tvalid_0's auc: 1\n",
      "[300]\tvalid_0's auc: 1\n",
      "[330]\tvalid_0's auc: 1\n",
      "[360]\tvalid_0's auc: 1\n",
      "[390]\tvalid_0's auc: 1\n",
      "[420]\tvalid_0's auc: 1\n",
      "[450]\tvalid_0's auc: 1\n",
      "[480]\tvalid_0's auc: 1\n",
      "[510]\tvalid_0's auc: 1\n",
      "[540]\tvalid_0's auc: 1\n",
      "[570]\tvalid_0's auc: 1\n",
      "[600]\tvalid_0's auc: 1\n",
      "[630]\tvalid_0's auc: 1\n",
      "[660]\tvalid_0's auc: 1\n",
      "[690]\tvalid_0's auc: 1\n",
      "[720]\tvalid_0's auc: 1\n",
      "[750]\tvalid_0's auc: 1\n",
      "[780]\tvalid_0's auc: 1\n",
      "[810]\tvalid_0's auc: 1\n",
      "[840]\tvalid_0's auc: 1\n",
      "[870]\tvalid_0's auc: 1\n",
      "[900]\tvalid_0's auc: 1\n",
      "[930]\tvalid_0's auc: 1\n",
      "[960]\tvalid_0's auc: 1\n",
      "[990]\tvalid_0's auc: 1\n",
      "[1020]\tvalid_0's auc: 1\n",
      "[1050]\tvalid_0's auc: 1\n",
      "[1080]\tvalid_0's auc: 1\n",
      "[1110]\tvalid_0's auc: 1\n",
      "[1140]\tvalid_0's auc: 1\n",
      "[1170]\tvalid_0's auc: 1\n",
      "[1200]\tvalid_0's auc: 1\n",
      "[1230]\tvalid_0's auc: 1\n",
      "[1260]\tvalid_0's auc: 1\n",
      "[1290]\tvalid_0's auc: 1\n",
      "[1320]\tvalid_0's auc: 1\n",
      "[1350]\tvalid_0's auc: 1\n",
      "[1380]\tvalid_0's auc: 1\n",
      "[1410]\tvalid_0's auc: 1\n",
      "[1440]\tvalid_0's auc: 1\n",
      "[1470]\tvalid_0's auc: 1\n",
      "[1500]\tvalid_0's auc: 1\n",
      "[1530]\tvalid_0's auc: 1\n",
      "[1560]\tvalid_0's auc: 1\n",
      "[1590]\tvalid_0's auc: 1\n",
      "[1620]\tvalid_0's auc: 1\n",
      "[1650]\tvalid_0's auc: 1\n",
      "[1680]\tvalid_0's auc: 1\n",
      "[1710]\tvalid_0's auc: 1\n",
      "[1740]\tvalid_0's auc: 1\n",
      "[1770]\tvalid_0's auc: 1\n",
      "[1800]\tvalid_0's auc: 1\n",
      "[1830]\tvalid_0's auc: 1\n",
      "[1860]\tvalid_0's auc: 1\n",
      "[1890]\tvalid_0's auc: 1\n",
      "[1920]\tvalid_0's auc: 1\n",
      "[1950]\tvalid_0's auc: 1\n",
      "[1980]\tvalid_0's auc: 1\n",
      "[2010]\tvalid_0's auc: 1\n",
      "[2040]\tvalid_0's auc: 1\n",
      "[2070]\tvalid_0's auc: 1\n",
      "[2100]\tvalid_0's auc: 1\n",
      "[2130]\tvalid_0's auc: 1\n",
      "[2160]\tvalid_0's auc: 1\n",
      "[2190]\tvalid_0's auc: 1\n",
      "[2220]\tvalid_0's auc: 1\n",
      "[2250]\tvalid_0's auc: 1\n",
      "[2280]\tvalid_0's auc: 1\n",
      "[2310]\tvalid_0's auc: 1\n",
      "[2340]\tvalid_0's auc: 1\n",
      "[2370]\tvalid_0's auc: 1\n",
      "[2400]\tvalid_0's auc: 1\n",
      "[2430]\tvalid_0's auc: 1\n",
      "[2460]\tvalid_0's auc: 1\n",
      "[2490]\tvalid_0's auc: 1\n",
      "Did not meet early stopping. Best iteration is:\n",
      "[3]\tvalid_0's auc: 1\n"
     ]
    },
    {
     "data": {
      "text/plain": [
       "0.7096774193548387"
      ]
     },
     "execution_count": 19,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "import lightgbm as lgb\n",
    "from sklearn.metrics import f1_score\n",
    "fea_train_df=train_all\n",
    "fea=list(fea_train_df.columns)\n",
    "fea.remove(\"label\")\n",
    "fea.remove(\"doc_id\")\n",
    "fea.remove(\"word\")\n",
    "# fea.remove(\"occr_times\")[fea_train_df['label']==1]\n",
    "# fea.remove(\"select_times\")\n",
    "# fea.remove(\"ent_ctr\")\n",
    "# 'neighbour', 'hits_h', 'hits_a', 'degrees', 'components', 'pagerank',\n",
    "#fea=[i for i in fea if i not in ['neighbour', 'hits_h', 'hits_a', 'degrees', \n",
    "#                                 'components', 'pagerank']]\n",
    "val_using_df=fea_train_df[fea_train_df['label']==1][0:20]\n",
    "# val_using_df=fea_train_df[fea_train_df['label']==1]\n",
    "val_using_df=pd.concat([val_using_df,fea_train_df[0:20]])\n",
    "fea_train_df_over_sam=pd.concat([fea_train_df[fea_train_df['label']==1],fea_train_df[fea_train_df['label']==1],\n",
    "                                 fea_train_df[fea_train_df['label']==1],fea_train_df[fea_train_df['label']==1],\n",
    "                                fea_train_df[fea_train_df['label']==1],fea_train_df[fea_train_df['label']==1],\n",
    "                                 fea_train_df[fea_train_df['label']==1],fea_train_df[fea_train_df['label']==1],\n",
    "\n",
    "                                fea_train_df],axis=0)\n",
    "# fea_train_df_over_sam=fea_train_df\n",
    "\n",
    "# fea_train_df_over_sam=fea_train_df_over_sam.sample(frac=1)\n",
    "train_x=fea_train_df_over_sam[fea]\n",
    "train_y=fea_train_df_over_sam['label']\n",
    "\n",
    "\n",
    "\n",
    "X_train = train_x\n",
    "y_train = train_y\n",
    "X_val = val_using_df[fea]\n",
    "y_val = val_using_df['label']\n",
    "\n",
    "\n",
    "params = {\n",
    "    'boosting_type': 'gbdt',\n",
    "    'objective': 'binary',\n",
    "    'metric': {'auc'}, \n",
    "    'num_leaves': 45,\n",
    "    'max_depth': -1,\n",
    "     'subsample_freq':1,\n",
    "     'feature_fraction':0.8,\n",
    "    'learning_rate': 0.01,\n",
    "    'colsample_bytree':0.8,\n",
    "    'subsample':0.9,\n",
    "     'reg_alpha':0.0,\n",
    "     'reg_lambda':1,\n",
    "    'min_child_weight':40,\n",
    "    \n",
    "}\n",
    "\n",
    "\n",
    "\n",
    "print(\"start lgb\")\n",
    "# cate_fra=['cixing','stop_in_word','lda_classes','kmeans_classes']\n",
    "lgb_train = lgb.Dataset(train_x,train_y)\n",
    "lgb_eval = lgb.Dataset(X_val, y_val, reference=lgb_train)\n",
    "\n",
    "gbm = lgb.train(params,\n",
    "                lgb_train,\n",
    "                num_boost_round=2500,\n",
    "                valid_sets=lgb_eval,\n",
    "                early_stopping_rounds=4000,\n",
    "               verbose_eval=30)\n",
    "\n",
    "\n",
    "preds = gbm.predict(X_val, num_iteration=gbm.best_iteration)\n",
    "pre=[]\n",
    "for i in preds:\n",
    "    if i>0.4:\n",
    "        pre.append(1)\n",
    "    else:\n",
    "        pre.append(0)\n",
    "f1_score(y_val,pre)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 20,
   "metadata": {
    "scrolled": true
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "cixing, 2\n",
      "neighbour, 2\n",
      "hits_h, 7\n",
      "hits_a, 1\n",
      "degrees, 2\n",
      "components, 0\n",
      "pagerank, 6\n",
      "lda_classes, 3\n",
      "kmeans_classes, 4\n",
      "lsi_0, 2\n",
      "lsi_1, 0\n",
      "lsi_2, 9\n",
      "lsi_3, 2\n",
      "lsi_4, 0\n",
      "lsi_5, 0\n",
      "lsi_6, 2\n",
      "lsi_7, 1\n",
      "lsi_8, 0\n",
      "lsi_9, 1\n",
      "lsi_10, 4\n",
      "lsi_11, 0\n",
      "lsi_12, 0\n",
      "lsi_13, 2\n",
      "lsi_14, 5\n",
      "tf, 1\n",
      "stop_in_word, 8\n",
      "is_in_first_sent, 2\n",
      "is_in_last_sent, 0\n",
      "is_in_other_sent, 0\n",
      "sen_num, 0\n",
      "word_count_xiangdui_max, 4\n",
      "word_count_xiangdui_min, 2\n",
      "word_count_group_max, 0\n",
      "word_count_group_min, 0\n",
      "word_count_group_avg, 4\n",
      "word_count_group_min_xiangdui_max, 0\n",
      "all_word_count_group, 1\n",
      "gongsi_count_group, 0\n",
      "gongsi_self_div_max_index_group, 0\n",
      "gongsi_self_div_min_index_group, 0\n",
      "is_gongsi_in_title, 0\n",
      "is_gongsi_in_first_two_sen, 0\n",
      "gongsi_avg_index, 2\n",
      "gongsi_max_count, 0\n",
      "word_len, 0\n",
      "word_len_ratio, 0\n",
      "tf_ratio, 1\n",
      "tfidf, 0\n",
      "dai_in_word, 0\n",
      "bao_in_word, 0\n",
      "bi_in_word, 0\n",
      "gongsi_in_word, 1\n",
      "zhengquan_in_word, 0\n",
      "kuohao_in_word, 0\n",
      "text_rank, 1\n",
      "text_rank_geshu, 0\n",
      "text_rank_rank, 3\n",
      "baohan_max_len, 0\n",
      "baohan_min_len, 0\n",
      "baohan_self_div_other_count, 0\n",
      "baohan_self_div_other_count_reserve, 1\n",
      "text_len, 1\n",
      "title_len, 0\n",
      "content_len, 0\n",
      "title_div_text_len, 0\n",
      "avg_ocur_title, 0\n",
      "avg_ocur_content, 0\n",
      "word_distance_norm, 0\n",
      "Cosine, 1\n",
      "Euclidean, 1\n",
      "pearson_cor, 0\n",
      "pearson_pvalue, 0\n",
      "ocur_in_title, 0\n",
      "ocur_in_content, 0\n",
      "has_num, 0\n",
      "has_char, 0\n",
      "coocur_skew, 1\n",
      "coocur_var, 0\n",
      "coocur_mean, 3\n",
      "coocur_kurt, 0\n",
      "coocur_std, 0\n",
      "diff_coocur_mean, 0\n",
      "diff_coocur_var, 0\n",
      "diff_coocur_std, 0\n",
      "diff_coocur_skew, 0\n",
      "diff_coocur_kurt, 1\n",
      "mean_sim_tags, 11\n",
      "skew_sim_tags, 0\n",
      "kurt_sim_tags, 0\n",
      "var_sim_tags, 3\n",
      "std_sim_tags, 0\n",
      "diff_median_sim_tags, 0\n",
      "diff_var_sim_tags, 0\n",
      "diff_std_sim_tags, 0\n",
      "diff_mean_sim_tags, 0\n",
      "diff_skew_sim_tags, 0\n",
      "diff_kurt_sim_tags, 0\n",
      "be_include_sum, 0\n",
      "be_include_mean, 0\n",
      "be_include_var, 0\n",
      "be_include_std, 0\n",
      "be_include_skew, 0\n",
      "be_include_kurt, 0\n",
      "include_sum, 1\n",
      "include_mean, 0\n",
      "include_var, 0\n",
      "include_std, 0\n",
      "include_skew, 0\n",
      "include_kurt, 0\n",
      "tfidf_index, 1\n",
      "tf_index, 0\n",
      "word_distance_norm_index, 0\n",
      "cosine_index, 3\n",
      "euclidean_index, 1\n",
      "idf, 4\n",
      "idf_in_all_text, 5\n",
      "occr_times, 3\n",
      "select_times, 3\n",
      "ent_ctr, 3\n"
     ]
    }
   ],
   "source": [
    "importance = gbm.feature_importance()\n",
    "names = fea\n",
    "for index, im in enumerate(importance):\n",
    "    string = names[index] + ', ' + str(im)\n",
    "    print(string)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## xgboost"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 116,
   "metadata": {
    "scrolled": true
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "[0]\ttrain-logloss:0.62714\teval-logloss:0.643876\n",
      "Multiple eval metrics have been passed: 'eval-logloss' will be used for early stopping.\n",
      "\n",
      "Will train until eval-logloss hasn't improved in 1000 rounds.\n",
      "[100]\ttrain-logloss:0.119728\teval-logloss:0.258903\n"
     ]
    },
    {
     "ename": "KeyboardInterrupt",
     "evalue": "",
     "output_type": "error",
     "traceback": [
      "\u001b[0;31m---------------------------------------------------------------------------\u001b[0m",
      "\u001b[0;31mKeyboardInterrupt\u001b[0m                         Traceback (most recent call last)",
      "\u001b[0;32m<ipython-input-116-fb498ebb619c>\u001b[0m in \u001b[0;36m<module>\u001b[0;34m()\u001b[0m\n\u001b[1;32m     76\u001b[0m     \u001b[0mevals\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mxgb_train\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m'train'\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m(\u001b[0m\u001b[0mxgb_eval\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m'eval'\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m     77\u001b[0m     \u001b[0mearly_stopping_rounds\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0;36m1000\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m---> 78\u001b[0;31m     \u001b[0mverbose_eval\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0;36m100\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m     79\u001b[0m )\n\u001b[1;32m     80\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n",
      "\u001b[0;32m~/anaconda3/lib/python3.5/site-packages/xgboost/training.py\u001b[0m in \u001b[0;36mtrain\u001b[0;34m(params, dtrain, num_boost_round, evals, obj, feval, maximize, early_stopping_rounds, evals_result, verbose_eval, xgb_model, callbacks, learning_rates)\u001b[0m\n\u001b[1;32m    202\u001b[0m                            \u001b[0mevals\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0mevals\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m    203\u001b[0m                            \u001b[0mobj\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0mobj\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mfeval\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0mfeval\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 204\u001b[0;31m                            xgb_model=xgb_model, callbacks=callbacks)\n\u001b[0m\u001b[1;32m    205\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m    206\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n",
      "\u001b[0;32m~/anaconda3/lib/python3.5/site-packages/xgboost/training.py\u001b[0m in \u001b[0;36m_train_internal\u001b[0;34m(params, dtrain, num_boost_round, evals, obj, feval, xgb_model, callbacks)\u001b[0m\n\u001b[1;32m     72\u001b[0m         \u001b[0;31m# Skip the first update if it is a recovery step.\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m     73\u001b[0m         \u001b[0;32mif\u001b[0m \u001b[0mversion\u001b[0m \u001b[0;34m%\u001b[0m \u001b[0;36m2\u001b[0m \u001b[0;34m==\u001b[0m \u001b[0;36m0\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m---> 74\u001b[0;31m             \u001b[0mbst\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mupdate\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mdtrain\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mi\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mobj\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m     75\u001b[0m             \u001b[0mbst\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0msave_rabit_checkpoint\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m     76\u001b[0m             \u001b[0mversion\u001b[0m \u001b[0;34m+=\u001b[0m \u001b[0;36m1\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
      "\u001b[0;32m~/anaconda3/lib/python3.5/site-packages/xgboost/core.py\u001b[0m in \u001b[0;36mupdate\u001b[0;34m(self, dtrain, iteration, fobj)\u001b[0m\n\u001b[1;32m   1019\u001b[0m         \u001b[0;32mif\u001b[0m \u001b[0mfobj\u001b[0m \u001b[0;32mis\u001b[0m \u001b[0;32mNone\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m   1020\u001b[0m             _check_call(_LIB.XGBoosterUpdateOneIter(self.handle, ctypes.c_int(iteration),\n\u001b[0;32m-> 1021\u001b[0;31m                                                     dtrain.handle))\n\u001b[0m\u001b[1;32m   1022\u001b[0m         \u001b[0;32melse\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m   1023\u001b[0m             \u001b[0mpred\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mpredict\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mdtrain\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
      "\u001b[0;31mKeyboardInterrupt\u001b[0m: "
     ]
    }
   ],
   "source": [
    "from sklearn.model_selection import StratifiedKFold\n",
    "import xgboost as xgb\n",
    "# 训练全部数据\n",
    "fea_train_df=train_all.sample(frac=1)\n",
    "fea=list(fea_train_df.columns)\n",
    "fea.remove(\"label\")\n",
    "fea.remove(\"doc_id\")\n",
    "fea.remove(\"word\")\n",
    "# fea.remove(\"occr_times\")\n",
    "# fea.remove(\"select_times\")\n",
    "# fea.remove(\"ent_ctr\")\n",
    "# 'neighbour', 'hits_h', 'hits_a', 'degrees', 'components', 'pagerank',\n",
    "#fea=[i for i in fea if i not in ['neighbour', 'hits_h', 'hits_a', 'degrees', \n",
    "#                                 'components', 'pagerank']]\n",
    "\n",
    "val_using_df=fea_train_df[fea_train_df['label']==1][0:20]\n",
    "# val_using_df=fea_train_df[fea_train_df['label']==1]\n",
    "val_using_df=pd.concat([val_using_df,fea_train_df[fea_train_df['label']==0][0:20]])\n",
    "fea_train_df_over_sam=pd.concat([fea_train_df[fea_train_df['label']==1],fea_train_df[fea_train_df['label']==1],\n",
    "                                 fea_train_df[fea_train_df['label']==1],fea_train_df[fea_train_df['label']==1],\n",
    "                                fea_train_df[fea_train_df['label']==1],fea_train_df[fea_train_df['label']==1],\n",
    "          \n",
    "                                fea_train_df],axis=0)\n",
    "fea_train_df_over_sam=fea_train_df_over_sam.sample(frac=1)\n",
    "train_x=fea_train_df_over_sam[fea]\n",
    "train_y=fea_train_df_over_sam['label']\n",
    "\n",
    "val_size=int(-len(train_x)*0.001)\n",
    "\n",
    "X_train = train_x[:val_size].values\n",
    "y_train = train_y[:val_size].values\n",
    "X_val = val_using_df[fea].values\n",
    "y_val = val_using_df['label'].values\n",
    "\n",
    "params = {\n",
    "    'booster': 'gbtree',\n",
    "    'objective': 'binary:logistic',\n",
    "\n",
    "    'eval_metric': 'logloss',\n",
    "\n",
    "    'learning_rate': 0.0894,\n",
    "    'max_depth': 9,\n",
    "    'max_leaves': 20,\n",
    "\n",
    "    'lambda': 2,\n",
    "    'alpha': 1,\n",
    "    'subsample': 0.8,\n",
    "    'colsample_bytree': 0.8,\n",
    "    'silent': 1,\n",
    "    'seed': 2019,\n",
    "\n",
    "    # 使用gpu\n",
    "    'gpu_id': 0,\n",
    "    'tree_method': 'gpu_hist'\n",
    "}\n",
    "\n",
    "\n",
    "X_test=test_all[fea].values\n",
    "xgb_test=xgb.DMatrix(X_test)\n",
    "pred = np.zeros(len(test_all[fea]))\n",
    "\n",
    "\n",
    "## 进行K折训练\n",
    "\n",
    "train_X, train_y = X_train, y_train\n",
    "eval_X, eval_y = X_val, y_val\n",
    "\n",
    "xgb_train = xgb.DMatrix(train_X, train_y)\n",
    "xgb_eval = xgb.DMatrix(eval_X, eval_y)\n",
    "\n",
    "\n",
    "model = xgb.train(\n",
    "    params,\n",
    "    xgb_train,\n",
    "    num_boost_round=2000,\n",
    "    evals=[(xgb_train, 'train'), (xgb_eval, 'eval')],\n",
    "    early_stopping_rounds=1000,\n",
    "    verbose_eval=100\n",
    ")\n",
    "\n",
    "# pred += model.predict(xgb_test)\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "importance = model.get_fscore().items()\n",
    "importance=[i[1] for i in importance]\n",
    "names = fea\n",
    "for index, im in enumerate(importance):\n",
    "    string = names[index] + ', ' + str(im)\n",
    "    print(string)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## catboost"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "scrolled": true
   },
   "outputs": [],
   "source": [
    "!source activate tf_bisai"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "scrolled": true
   },
   "outputs": [],
   "source": [
    "import catboost as ctb\n",
    "\n",
    "\n",
    "from sklearn.metrics import f1_score\n",
    "fea_train_df=train_all.sample(frac=1)\n",
    "fea=list(fea_train_df.columns)\n",
    "fea.remove(\"label\")\n",
    "fea.remove(\"doc_id\")\n",
    "fea.remove(\"word\")\n",
    "# fea.remove(\"occr_times\")\n",
    "# fea.remove(\"select_times\")\n",
    "# fea.remove(\"ent_ctr\")\n",
    "# 'neighbour', 'hits_h', 'hits_a', 'degrees', 'components', 'pagerank',\n",
    "#fea=[i for i in fea if i not in ['neighbour', 'hits_h', 'hits_a', 'degrees', \n",
    "#                                 'components', 'pagerank']]\n",
    "val_using_df=fea_train_df[fea_train_df['label']==1][0:20]\n",
    "# val_using_df=fea_train_df[fea_train_df['label']==1]\n",
    "val_using_df=pd.concat([val_using_df,fea_train_df[fea_train_df['label']==0][0:20]])\n",
    "fea_train_df_over_sam=pd.concat([fea_train_df[fea_train_df['label']==1],fea_train_df[fea_train_df['label']==1],\n",
    "                                 fea_train_df[fea_train_df['label']==1],fea_train_df[fea_train_df['label']==1],\n",
    "                                fea_train_df[fea_train_df['label']==1],fea_train_df[fea_train_df['label']==1],\n",
    "                                 fea_train_df[fea_train_df['label']==1],fea_train_df[fea_train_df['label']==1],\n",
    "                                 fea_train_df[fea_train_df['label']==1],fea_train_df[fea_train_df['label']==1],\n",
    "                                 fea_train_df[fea_train_df['label']==1],fea_train_df[fea_train_df['label']==1],\n",
    "                                fea_train_df[fea_train_df['label']==0]],axis=0)\n",
    "\n",
    "\n",
    "\n",
    "fea_train_df_over_sam=fea_train_df_over_sam.sample(frac=1)\n",
    "train_x=fea_train_df_over_sam[fea]\n",
    "train_y=fea_train_df_over_sam['label']\n",
    "\n",
    "val_size=int(-len(train_x)*0.001)\n",
    "\n",
    "X_train = train_x[:val_size]\n",
    "y_train = train_y[:val_size]\n",
    "X_val = val_using_df[fea]\n",
    "y_val = val_using_df['label']\n",
    "\n",
    "\n",
    "ctb_model = ctb.CatBoostClassifier(iterations=500,learning_rate=0.05,max_depth=11,l2_leaf_reg=1,verbose=10)\n",
    "ctb_model.fit(train_x,train_y,eval_set=(X_val,y_val))\n",
    "\n",
    "\n",
    "\n",
    "\n",
    "\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "importance = ctb_model.feature_importances_\n",
    "\n",
    "test_pre = ctb_model.predict_proba(test_features)[:,1]\n",
    "\n",
    "\n",
    "importance = gbm.feature_importance()\n",
    "names = fea\n",
    "for index, im in enumerate(importance):\n",
    "    string = names[index] + ', ' + str(im)\n",
    "    print(string)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "# 预测"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## lgb"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 137,
   "metadata": {},
   "outputs": [],
   "source": [
    "\n",
    "def get_is_new(ent_msg):\n",
    "    preds = gbm.predict(ent_msg, num_iteration=gbm.best_iteration)\n",
    "    pre=[]\n",
    "    for i in preds:\n",
    "        if i>0.4:\n",
    "            pre.append(1)\n",
    "        else:\n",
    "            pre.append(0)\n",
    "    return pre\n",
    "\n",
    "result=get_is_new(test_all[fea])\n",
    "\n",
    "# def get_is_new(ent_msg):\n",
    "#     preds = gbm.predict(ent_msg, num_iteration=gbm.best_iteration)\n",
    "#     pre=[i for i in preds]\n",
    "#     pre=pd.DataFrame(pre)\n",
    "#     return pre\n",
    "\n",
    "# result=get_is_new(test_all[fea])\n",
    "# result=pd.concat([test_all['word'],result],axis=1)\n",
    "# result.to_csv(\"test_score.csv\",index=None)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## xgb"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "# def get_is_new(preds):\n",
    "#     pre=[]\n",
    "#     for i in preds:\n",
    "#         if i>0.5:\n",
    "#             pre.append(1)\n",
    "#         else:\n",
    "#             pre.append(0)\n",
    "#     return pre\n",
    "# result=get_is_new(pred)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## ronghe"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "preds_lgb = gbm.predict(test_all[fea], num_iteration=gbm.best_iteration)\n",
    "preds_lgb=[i for i in preds_lgb]\n",
    "preds_xgb = model.predict(xgb_test)\n",
    "preds_xgb=[i for i in preds_xgb]"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "result=[(i+j)/2 for i,j in zip(preds_lgb,preds_xgb)]\n",
    "result=[1 if i>0.3 else 0 for i in result]"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 138,
   "metadata": {
    "scrolled": true
   },
   "outputs": [
    {
     "data": {
      "text/html": [
       "<div>\n",
       "<style scoped>\n",
       "    .dataframe tbody tr th:only-of-type {\n",
       "        vertical-align: middle;\n",
       "    }\n",
       "\n",
       "    .dataframe tbody tr th {\n",
       "        vertical-align: top;\n",
       "    }\n",
       "\n",
       "    .dataframe thead th {\n",
       "        text-align: right;\n",
       "    }\n",
       "</style>\n",
       "<table border=\"1\" class=\"dataframe\">\n",
       "  <thead>\n",
       "    <tr style=\"text-align: right;\">\n",
       "      <th></th>\n",
       "      <th>id</th>\n",
       "      <th>unknownEntities</th>\n",
       "    </tr>\n",
       "  </thead>\n",
       "  <tbody>\n",
       "    <tr>\n",
       "      <th>0</th>\n",
       "      <td>1</td>\n",
       "      <td>芒果小V;芒果达人;淘宝</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>1</th>\n",
       "      <td>2</td>\n",
       "      <td>四川恩威集团;恩威道源商城;恩威集团;恩威道源</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>2</th>\n",
       "      <td>3</td>\n",
       "      <td>月月救急</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>3</th>\n",
       "      <td>4</td>\n",
       "      <td>墨菲;素店晨晨;晨晨</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>4</th>\n",
       "      <td>5</td>\n",
       "      <td>民间投资;insisted;拍雅安;民间投;钱旺实业;Qbo网</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>...</th>\n",
       "      <td>...</td>\n",
       "      <td>...</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>4994</th>\n",
       "      <td>5000</td>\n",
       "      <td>前海云轩互联网金融服务有限公司;原沪深理财;深圳前海云轩互联网金融服务有限公司;理财官网;首...</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>4995</th>\n",
       "      <td>5001</td>\n",
       "      <td>阿里巴巴;BHEX;顶级投资;TokenFund;BHEX平台;BHT;了得资本;Blueh...</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>4996</th>\n",
       "      <td>5002</td>\n",
       "      <td>Agen;期货投资平台;移动交易;文华财经;逸富国际期货交易;华尔街交易员;国际期货交易;金...</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>4997</th>\n",
       "      <td>5003</td>\n",
       "      <td>想排油;广药王老吉;anta522;广药吉悠</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>4998</th>\n",
       "      <td>5004</td>\n",
       "      <td>一鱼仔;趣渔乐</td>\n",
       "    </tr>\n",
       "  </tbody>\n",
       "</table>\n",
       "<p>4999 rows × 2 columns</p>\n",
       "</div>"
      ],
      "text/plain": [
       "        id                                    unknownEntities\n",
       "0        1                                       芒果小V;芒果达人;淘宝\n",
       "1        2                            四川恩威集团;恩威道源商城;恩威集团;恩威道源\n",
       "2        3                                               月月救急\n",
       "3        4                                         墨菲;素店晨晨;晨晨\n",
       "4        5                    民间投资;insisted;拍雅安;民间投;钱旺实业;Qbo网\n",
       "...    ...                                                ...\n",
       "4994  5000  前海云轩互联网金融服务有限公司;原沪深理财;深圳前海云轩互联网金融服务有限公司;理财官网;首...\n",
       "4995  5001  阿里巴巴;BHEX;顶级投资;TokenFund;BHEX平台;BHT;了得资本;Blueh...\n",
       "4996  5002  Agen;期货投资平台;移动交易;文华财经;逸富国际期货交易;华尔街交易员;国际期货交易;金...\n",
       "4997  5003                             想排油;广药王老吉;anta522;广药吉悠\n",
       "4998  5004                                            一鱼仔;趣渔乐\n",
       "\n",
       "[4999 rows x 2 columns]"
      ]
     },
     "execution_count": 138,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "old_entities = []\n",
    "train_df = pd.read_csv(\"data/Round2_train.csv\", encoding=\"utf-8-sig\")\n",
    "for x in list(train_df[\"unknownEntities\"].fillna(\"\")):\n",
    "    old_entities.extend(x.split(\";\"))\n",
    "old_entities = set(old_entities)\n",
    "add_char = {']', '：', '~', '！', '%', '[', '《', '】', ';', '”', ':', '》', '？', '>', '/', '#', '。', '；', '&', '=', '，', '“', '【'}\n",
    "\n",
    "def islegitimate(x):\n",
    "    if re.findall(\"\\\\\"+\"|\\\\\".join(add_char), x):\n",
    "        return False\n",
    "    if x in old_entities:\n",
    "        return False\n",
    "\n",
    "    return True\n",
    "\n",
    "result_all=pd.DataFrame()\n",
    "result_all['result']=result\n",
    "result_all['word']=test_all['word'].reset_index(drop=True)\n",
    "result_all['doc_id']=test_all['doc_id'].reset_index(drop=True)\n",
    "result_all=result_all[result_all['result']==1]\n",
    "result_all=result_all[['doc_id','word']]\n",
    "result_all=result_all.values.tolist()\n",
    "result_df=[[] for i in range(test_r2_len)]\n",
    "for i in result_all:\n",
    "    if islegitimate(i[1]):\n",
    "        result_df[i[0]-10019].append(i[1])\n",
    "result_df=[';'.join(list(set(i))) if i !=[] else np.nan for i in result_df]\n",
    "result_df=pd.DataFrame(result_df)\n",
    "# \n",
    "\n",
    "\n",
    "id_df=pd.read_csv('data/Round2_Test.csv',usecols=['id'])\n",
    "result_df=pd.concat([id_df,result_df],axis=1)\n",
    "result_df.columns=['id','unknownEntities']\n",
    "result_df"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 139,
   "metadata": {},
   "outputs": [],
   "source": [
    "result_df.to_csv(\"new_submit/all_bert.csv\",index=None)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "result_all"
   ]
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.7.1"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 2
}
