package com.campus.counseling.service.impl;

import com.campus.counseling.config.LSTMConfig;
import com.campus.counseling.service.Word2VecTrainer;
import lombok.RequiredArgsConstructor;
import lombok.extern.slf4j.Slf4j;
import org.deeplearning4j.models.word2vec.Word2Vec;
import org.deeplearning4j.text.sentenceiterator.BasicLineIterator;
import org.deeplearning4j.text.sentenceiterator.SentenceIterator;
import org.deeplearning4j.text.tokenization.tokenizer.preprocessor.CommonPreprocessor;
import org.deeplearning4j.text.tokenization.tokenizerfactory.DefaultTokenizerFactory;
import org.deeplearning4j.text.tokenization.tokenizerfactory.TokenizerFactory;
import org.springframework.stereotype.Service;
import com.huaban.analysis.jieba.JiebaSegmenter;
import com.huaban.analysis.jieba.WordDictionary;
import org.springframework.core.io.ClassPathResource;
import javax.annotation.PostConstruct;

import java.io.BufferedWriter;
import java.io.File;
import java.io.FileWriter;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collection;
import java.util.List;
import java.util.stream.Collectors;

@Slf4j
@Service
@RequiredArgsConstructor
public class Word2VecTrainerImpl implements Word2VecTrainer {

    private final LSTMConfig lstmConfig;
    private JiebaSegmenter segmenter;  // 改为普通成员变量，不用final
    
    // 添加基础情感词库
    private static final List<String> EMOTION_WORDS = Arrays.asList(
        "开心", "快乐", "兴奋", "愉悦",
        "悲伤", "难过", "痛苦", "压抑",
        "焦虑", "紧张", "不安", "恐惧",
        "平静", "放松", "安心", "满意"
    );

    // 添加分词器依赖
    private static final String STOP_WORDS_PATH = "dict/stop_words.txt";
    private static final String USER_DICT_PATH = "dict/user_dict.txt";

    @PostConstruct
    public void init() {
        // 初始化结巴分词器
        segmenter = new JiebaSegmenter();
        log.info("结巴分词器初始化成功");
    }

    @Override
    public Word2Vec trainWord2Vec(List<String> sentences) {
        try {
            // 1. 写入临时文件并进行分词
            File tempFile = File.createTempFile("word2vec-training", ".txt");
            try (BufferedWriter writer = new BufferedWriter(new FileWriter(tempFile))) {
                int batchSize = 1000;
                int processed = 0;
                
                for (String sentence : sentences) {
                    // 使用结巴分词进行分词
                    String segmentedSentence = segmentSentence(sentence);
                    if (!segmentedSentence.trim().isEmpty()) {
                        writer.write(segmentedSentence + "\n");
                        processed++;
                        
                        // 每处理1000行输出一次进度
                        if (processed % batchSize == 0) {
                            log.info("已处理 {} 行数据", processed);
                        }
                    }
                }
            }

            // 2. 配置Word2Vec训练
            SentenceIterator iterator = new BasicLineIterator(tempFile);
            TokenizerFactory tokenizerFactory = new DefaultTokenizerFactory();
            tokenizerFactory.setTokenPreProcessor(token -> {
                token = token.toLowerCase().trim();
                // 只保留中文词语和数字
                if (token.matches("[\\u4e00-\\u9fa5]+|\\d+")) {
                    return token;
                }
                return "";
            });

            // 3. 训练模型 - 优化大规模训练参数
            Word2Vec vec = new Word2Vec.Builder()
                .minWordFrequency(5)
                .iterations(10)
                .layerSize(lstmConfig.getVectorSize())
                .seed(42)
                .windowSize(7)
                .workers(Runtime.getRuntime().availableProcessors())
                .iterate(iterator)
                .tokenizerFactory(tokenizerFactory)
                .build();

            vec.fit();
            
            // 4. 输出训练信息
            log.info("训练完成 - 词汇表大小: {}", vec.getVocab().numWords());
            log.info("向量维度: {}", vec.getLayerSize());
            
            // 5. 删除临时文件
            if (!tempFile.delete()) {
                log.warn("临时文件删除失败: {}", tempFile.getAbsolutePath());
            }
            
            return vec;
        } catch (Exception e) {
            log.error("Word2Vec训练失败: {}", e.getMessage(), e);
            throw new RuntimeException("Word2Vec训练失败", e);
        }
    }

    private String segmentSentence(String sentence) {
        // 使用结巴分词
        return String.join(" ", segmenter.sentenceProcess(sentence));
    }
    
    private void validateTrainedModel(Word2Vec model) {
        // 验证基础情感词是否正确训练
        for (String word : EMOTION_WORDS) {
            if (!model.hasWord(word)) {
                log.warn("基础情感词 '{}' 未能加入词汇表", word);
            } else {
                Collection<String> nearest = model.wordsNearest(word, 3);
                log.info("词语 '{}' 的近义词: {}", word, nearest);
            }
        }
        
        // 输出词汇表大小
        int vocabSize = model.getVocab().numWords();
        log.info("训练后的词汇表大小: {}", vocabSize);
    }
} 