# -*- coding: utf-8 -*-

import os
import wget
import gzip
import fasttext
import pandas as pd
from gensim.models.fasttext import FastText

from utils import *
from auto_correct import Speller

logger = logging.getLogger(__name__)

# v2.0 添加拼写纠错模块
spell = Speller('ug')


# 解压gz文件
def un_gz(file_name):
    f_name = file_name.replace(".gz", "")
    g_file = gzip.GzipFile(file_name)
    open(f_name, "wb+").write(g_file.read())
    g_file.close()


# 排序索引，取相似度值用
def takeSecond(elem):
    return elem[1]


# 训练模型
def train(args):
    if args.fromPreTrainedModel == 'fasttext':  # 导入 fastText 预训练模型
        logger.info("Load from pretrained model...")
        if not os.path.exists(fasttext_model_file[args.lang]):
            logger.info("Downloading fastText pretrained model...")
            model_gz = fasttext_model_file[args.lang] + '.gz'
            wget.download(fasttext_model_url[args.lang], out=fasttext_path)
            un_gz(model_gz)
            os.remove(model_gz)
            logger.info("Done.")

        # 导入指定语言的 fastText 预训练模型
        model = FastText.load_fasttext_format(model_file=fasttext_model_file[args.lang])
        model.build_vocab(corpus_file=vocab_file[args.lang], update=True)  # 添加vocab

    if args.fromPreTrainedModel == 'custom':  # 重头开始模型训练
        logger.info("Training model from ground zero...")

        # 导入指定语言的 fastText 预训练模型
        model = FastText(vector_size=10, alpha=0.025, window=5, min_count=1, min_n=3, max_n=6, word_ngrams=1)
        model.build_vocab(corpus_file=vocab_file[args.lang])  # 添加vocab

    logger.info("***** Running training *****")
    model.train(corpus_file=corpus_file[args.lang], epochs=args.num_train_epochs,
                total_examples=model.corpus_count, total_words=model.corpus_total_words)

    return model


# 评估模型 - 即生成服务器全词的近义词表，保存成 csv
def evaluate(model, args):
    vocab_df = pd.read_csv(vocab_file[args.lang], header=None)
    vocab_df.columns = ['words']
    similar = []
    sim_type = ''

    if not args.exists_word:  # 直接输出最佳匹配词表
        sim_type = 'all'
        for word in vocab_df['words']:
            # similar.append(model.wv.most_similar(word, topn=10))

            # v2.0 添加拼写纠错模块
            sim = []
            sim_words = model.wv.most_similar(word, topn=10)
            for s in sim_words:
                s0 = spell.autocorrect_sentence(s[0], mode='exist')
                if s0 not in [si[0] for si in sim] and s0 != '':
                    s = (s0, s[1])
                    sim.append(s)
            similar.append(sim)

    else:  # 限定服务器存在词的最佳匹配表
        sim_type = 'exists'
        for wi in vocab_df['words']:
            sim = []
            for wj in vocab_df['words']:
                # sim_score = model.similarity(wi, wj)
                sim_score = model.wv.similarity(wi, wj)
                if sim_score > args.sim_ratio:  # 限定匹配的相似度比率
                    sim.append((wj, sim_score))
            sim.sort(key=takeSecond, reverse=True)  # 对相似度结果进行排序
            sim = sim[:10]  # 取前10个结果
            similar.append(sim)

    similar_df = pd.DataFrame(similar)
    simvocab_df = pd.merge(vocab_df, similar_df, how='outer', left_index=True, right_index=True)

    # 保存近义词表
    simvocab_df.to_csv(os.path.join(table_path, sim_type, table_save_file[args.lang]), encoding='utf-8-sig', index=0,
                       header=0)


def save_model(model, args):
    logger.info("Saving trained model...")
    if args.fromPreTrainedModel == 'fasttext':
        model.save(model_save_file[args.lang])
    if args.fromPreTrainedModel == 'custom':
        model.save(model_save_file[args.lang])


def load_model(args):
    logger.info("Loading trained model...")
    if args.fromPreTrainedModel == 'fasttext':
        model = FastText.load(model_save_file[args.lang])
    if args.fromPreTrainedModel == 'custom':
        model = FastText.load(model_save_file[args.lang])

    return model


if __name__ == '__main__':
    pass
