#!/usr/bin/env python
# -*- coding: utf-8  -*-
#使用 fasttext 训练脚本获取词向量

import pandas as pd 
import os
from fasttext import train_supervised,load_model,train_unsupervised,load_model
from sklearn.model_selection import train_test_split

def cleanReview(subject):
    # 数据处理函数
    beau = BeautifulSoup(subject)
    newSubject = beau.get_text()
    newSubject = newSubject.replace("\\", "").replace("\'", "").replace('/', '').replace('"', '').replace(',', '').replace('.', '').replace('?', '').replace('(', '').replace(')', '')
    newSubject = newSubject.strip().split(" ")
    newSubject = [word.lower() for word in newSubject]
    newSubject = " ".join(newSubject)
    return newSubject

if __name__ == '__main__':
    # 配置
    config = {}
    config['isClean'] = False
    config['model'] = 'skipgram'
    config['modeldir'] =  "../vec/"
    config['status'] =  '_re'
    config['textColName'] = "text"
    config['isDemo'] =  True

    config['isDemo'] =  False

    if config['isDemo']:
        config['commonPath'] =  "../data/"
        config['trainPath'] ="{0}train_set_demo{1}.csv".format(config['commonPath'],config['status']) 
        config['testPath'] ="{0}test_set_demo{1}.csv".format(config['commonPath'],config['status']) 
        config['allPath'] = "{0}all{1}.csv".format(config['commonPath'],config['status']) 
    else:
        config['commonPath'] =  "D:/project/python_wp/nlp/team-learning-nlp/NewsTextClassification/data/"
        config['trainPath'] ="{0}train_set{1}.csv".format(config['commonPath'],config['status']) 
        config['testPath'] ="{0}test_a{1}.csv".format(config['commonPath'],config['status']) 
        config['allPath'] = "{0}all{1}.csv".format(config['commonPath'],config['status']) 

    config['modelpath'] ="{0}{1}{2}.bin".format(config['modeldir'],config['model'],config['status']) 
    config['vectorpath'] ="{0}{1}{2}.vector".format(config['modeldir'],config['model'],config['status']) 
    print("config:{}".format(config))

    # 数据合并
    labelPd = pd.read_csv(config['trainPath'],encoding="utf-8",sep="\t")
    unlabelPd = pd.read_csv(config['testPath'],encoding="utf-8",sep="\t")

    if config['isClean']:
        unlabelPd[config['textColName']] = unlabelPd[config['textColName']].apply(cleanReview)
        labelPd[config['textColName']] = labelPd[config['textColName']].apply(cleanReview)
        
    newDf = pd.concat([labelPd[config['textColName']],unlabelPd[config['textColName']]],axis=0)
    newDf.to_csv(config['allPath'],encoding="utf-8",index=None,header=0)

    # 向量词向量
    model = train_unsupervised(config['allPath'],model=config['model'])
    model.save_model(config['modelpath'])

    # model = load_model(config['modelpath'])
    fo = open(config['vectorpath'], "w")
    num = 0
    words = model.get_words()
    vectors = model.get_output_matrix()
    for word,vec in zip(words,vectors):
        if num==0:
            fo.write(f"{len(words)} {len(vec)}\n")
        vec = ' '.join([str(v) for v in list(vec)])
        fo.write(f"{str(word)} {vec}\n")
        num = num+1
    fo.close()

    