# -*- coding:utf8 -*-
# @Time : 2022/12/2 3:39 下午
# @Author : WanJie Wu

import os.path
import fasttext
from tqdm import tqdm
import _pickle as pickle
from gensim.models import Word2Vec
from sklearn.decomposition import TruncatedSVD
from sklearn.feature_extraction.text import TfidfVectorizer

from app.src.embedding import nltk


class TrainEncodingModel(object):
    def __init__(self, corpus_path, output_model_dir, emb_size=64, segment_type="ltp"):
        """
        train_data数据格式： [[word1, word2, word3, ......]]
        """
        assert segment_type in ["lac", "ltp", "hanlp", "jieba"]
        self.output_model_dir = output_model_dir
        self.corpus = self.read_src_corpus(corpus_path)

        self.segment = self.load_segment(segment_type)
        self.train_data = self.segment_words()
        self.emb_size = emb_size

    @staticmethod
    def load_segment(segment_type):
        if segment_type == "lac":
            return nltk.SegmentLAC()
        elif segment_type == "ltp":
            return nltk.SegmentLTP()
        elif segment_type == "hanlp":
            return nltk.SegmentHLP()
        return nltk.SegmentJB()

    @staticmethod
    def read_src_corpus(corpus_path):
        """读取源文本信息"""
        sentence_lst = []
        with open(corpus_path, "r", encoding="utf8") as f:
            for line in f.readlines():
                line = line.strip()
                sentence_lst.append(line)
        return sentence_lst

    @staticmethod
    def load_stop_words(stop_words_path):
        if not stop_words_path:
            return []

        stop_words_lst = []
        with open(stop_words_path, "r", encoding="utf8") as f:
            for line in f.readlines():
                stop_words_lst.append(line.strip())
        return stop_words_lst

    def segment_words(self):
        """分词计算"""
        train_data = []
        for sentence in tqdm(self.corpus):
            words = self.segment.ss_segment(sentence)
            train_data.append(words)
        return train_data

    def train_by_tfidf(self):
        train_data = [" ".join(item) for item in self.train_data]
        model = TfidfVectorizer(
            analyzer="word",
            max_df=1.0,
            min_df=1,
            ngram_range=(1, 1)
        )
        model.fit_transform(train_data)
        with open(os.path.join(self.output_model_dir, "tfidf.pkl"), "wb") as f:
            pickle.dump(model, f)

    def train_by_tfidf_svd(self):
        train_data = [" ".join(item) for item in self.train_data]
        mid_model = TfidfVectorizer(
            analyzer="word",
            max_df=40,
            min_df=20,
            ngram_range=(1, 1)
        )
        result = mid_model.fit_transform(train_data)
        with open(os.path.join(self.output_model_dir, "svd_tfidf.pkl"), "wb") as f:
            pickle.dump(mid_model, f)
        # 主要在于矩阵大小 float64占用8
        tsd = TruncatedSVD(
            n_components=128,
        )
        tsd.fit_transform(result.toarray())
        with open(os.path.join(self.output_model_dir, "svd.pkl"), "wb") as f:
            pickle.dump(tsd, f)

    def train_by_word2vec(self, min_count=2, worker=4, window=5):
        """
        :param min_count: 词语出现的最小次数
        :param worker: 进程个数
        :param window: 窗口大小
        :return:
        """
        print(self.train_data[0:2])
        model = Word2Vec(
            sentences=self.train_data,
            vector_size=self.emb_size,
            min_count=min_count,
            workers=worker,
            window=window,
            sg=0,  # 采用的算法， 默认skip_gram参数为0, 即使采用CBOW算法
            hs=1,  # hierarchical softmax = 0，默认不采用分层softmax优化策略
            negative=0,  # 默认采用负例策略,如果值为0，则不采用负例优化
        )
        model.save(os.path.join(self.output_model_dir, "word2vec.bin"))

    def train_by_fasttext(self):
        tmp_file = "fasttext.txt"
        with open(tmp_file, "w", encoding="utf8") as f1:
            for sentence in self.train_data:
                f1.write(" ".join(sentence)+"\n")
        model = fasttext.train_unsupervised(
            tmp_file,
            model="skipgram",
            lr=0.1,
            epoch=50,
            dim=128,
            ws=5, # windows size
        )
        os.remove(tmp_file)
        model.save_model(os.path.join(self.output_model_dir, "fasttext.bin"))


if __name__ == "__main__":
    tem = TrainEncodingModel(
        corpus_path="/note/nlp_algo/app/data/competition/content.txt",
        output_model_dir="/data/output",
        segment_type="jieba",
        emb_size=64,
    )
    tem.train_by_word2vec()
