import os
import jieba

from epoch_logger import EpochLogger
from file_loader import FileLoader
from model_trainer import ModelTrainer


# 主程序
if __name__ == "__main__":
    # 设置参数
    root_path = "./annual_reports"  # 存放公司年报文本的目录路径
    stopwords_path = "./stop_words/cn_stopwords.txt"  # 中文停用词表路径
    core_words = ["先进生产力", "高科技", "高效能", "高质量"]  # 核心主题词
    similarity_threshold = 0.5  # 余弦相似度阈值
    similar_words_count = 80  # 需要的相似词数量
    result_path = "./result"  # 输出文件路径
    model_save_path = "./model"  # 模型保存路径
    if not os.path.exists(result_path):
        os.makedirs(result_path)
    if not os.path.exists(model_save_path):
        os.makedirs(model_save_path)

    # 在加载停用词之前，先加载自定义词典
    jieba.load_userdict(core_words)

    # 加载中文停用词
    file_loader = FileLoader(root_path=root_path,
                             stopwords_path=stopwords_path,
                             years=("2023",),
                             max_no=100)
    # 加载并预处理文本数据
    documents_dict = file_loader.load_annual_reports()

    # 获取全部词数据用于训练
    documents = []  # [[公司1词数据],[公司2词数据],...]
    for company_name, company_dict in documents_dict.items():
        company_data = []
        for year, company_annual_report_content in company_dict.items():
            company_data.extend(company_annual_report_content)
        documents.append(company_data)

    # 训练 Word2Vec 模型
    epoch_logger = EpochLogger(model_save_path)  # 创建自定义日志回调实例
    model_trainer = ModelTrainer(sentences=documents,
                                 vector_size=128,
                                 window=5,
                                 min_count=2,
                                 workers=8,
                                 sg=1,
                                 epochs=100,
                                 alpha=0.025,
                                 min_alpha=0.0001,
                                 hs=0,
                                 compute_loss=True,
                                 callbacks=[epoch_logger])

    # 保存模型（可选）
    model_trainer.save(f"{model_save_path}/word2vec.model")

    # 扩展核心词并保存结果
    similar_words = model_trainer.expand_and_save(core_words, f"{result_path}/core_and_expanded_words.csv",
                                                  similarity_threshold, similar_words_count)

    # 计算deti,保存excel
    model_trainer.calc_and_save_excel(
        documents_dict, similar_words, f"{result_path}/result.xlsx")
