const Goods = require("../model/goodsModel");
const Comment = require("../model/commentModel");
const { Op } = require("sequelize");
const tf = require('@tensorflow/tfjs');

// 简单 tokenizer 实现
class Tokenizer {
  constructor(vocabSize = 1000) {
    this.wordIndex = {};
    this.indexWord = {};
    this.vocabSize = vocabSize;
    this.currentIndex = 1; // 0 用于 padding
  }

  fitOnTexts(texts) {
    const wordCount = {};
    
    // 统计词频
    texts.forEach(text => {
      const words = this._tokenize(text);
      words.forEach(word => {
        wordCount[word] = (wordCount[word] || 0) + 1;
      });
    });

    // 按词频排序，取前 vocabSize-1 个词
    const sortedWords = Object.keys(wordCount)
      .sort((a, b) => wordCount[b] - wordCount[a])
      .slice(0, this.vocabSize - 1);

    // 构建词表
    sortedWords.forEach(word => {
      if (!this.wordIndex[word]) {
        this.wordIndex[word] = this.currentIndex;
        this.indexWord[this.currentIndex] = word;
        this.currentIndex++;
      }
    });
  }

  textsToSequences(texts) {
    return texts.map(text => {
      const words = this._tokenize(text);
      return words.map(word => this.wordIndex[word] || 0).filter(idx => idx !== 0);
    });
  }

  _tokenize(text) {
    // 简单中文分词：按单个字符切分
    return text.split('').filter(char => char.trim() !== '');
    
    // 如果是英文，可以用：return text.toLowerCase().split(/\s+/);
  }
}

class RecommendService {
  constructor() {
    this.model = null;
    this.tokenizer = new Tokenizer(1000); // 词汇表大小
    this.maxLen = 50; // 每条评论的最大长度
    this.isModelTrained = false;
    this.initializeModel();
    this.loadInitialData(); // 初始化时加载训练数据
  }

  // 初始化 LSTM 模型
  initializeModel() {
    this.model = tf.sequential();
    
    // 嵌入层
    this.model.add(tf.layers.embedding({
      inputDim: this.tokenizer.vocabSize,
      outputDim: 32,
      inputLength: this.maxLen
    }));
    
    // LSTM 层
    this.model.add(tf.layers.lstm({ units: 32 }));
    
    // 全连接层
    this.model.add(tf.layers.dense({ units: 1, activation: 'sigmoid' }));

    // 编译模型
    this.model.compile({
      optimizer: tf.train.adam(0.001),
      loss: 'binaryCrossentropy',
      metrics: ['accuracy']
    });

    console.log("LSTM 模型初始化完成");
  }

  // 加载初始训练数据
  async loadInitialData() {
    try {
      const initialComments = [
        '好', '舒适', '满意', '划算', '推荐', '喜欢', '不错', '优秀', 
        '差', '不舒服', '失望', '贵', '不推荐', '退货', '一般'
      ];
      const initialLabels = [1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0];
      
      // 训练 tokenizer
      this.tokenizer.fitOnTexts(initialComments);
      
      await this.trainModel(initialComments, initialLabels);
      console.log("初始训练完成");
    } catch (error) {
      console.error("初始训练失败:", error);
    }
  }

  // 训练模型
  async trainModel(texts, labels) {
    if (texts.length === 0 || texts.length !== labels.length) {
      throw new Error("训练数据和标签数量不匹配");
    }

    try {
      // 转换文本为序列
      const sequences = this.tokenizer.textsToSequences(texts);
      const paddedSequences = this.padSequences(sequences, this.maxLen);

      // 转换为 Tensor
      const xTrain = tf.tensor2d(paddedSequences, [paddedSequences.length, this.maxLen]);
      const yTrain = tf.tensor1d(labels, 'float32');

      // 训练模型
      await this.model.fit(xTrain, yTrain, {
        epochs: 10,
        batchSize: 8,
        validationSplit: 0.2,
        callbacks: {
          onEpochEnd: (epoch, logs) => {
            console.log(`Epoch ${epoch + 1}: loss = ${logs.loss.toFixed(4)}, acc = ${logs.acc.toFixed(4)}`);
          }
        }
      });

      this.isModelTrained = true;
      
      // 释放内存
      xTrain.dispose();
      yTrain.dispose();
    } catch (error) {
      console.error("训练模型失败:", error);
      throw error;
    }
  }

  // 填充序列
  padSequences(sequences, maxLen) {
    return sequences.map(seq => {
      const padded = new Array(maxLen).fill(0);
      const len = Math.min(seq.length, maxLen);
      for (let i = 0; i < len; i++) {
        padded[i] = seq[i];
      }
      return padded;
    });
  }

  // 情感分析
  async analyzeSentiment(comments) {
    if (!this.isModelTrained || comments.length === 0) {
      return this.simpleSentimentAnalysis(comments);
    }

    try {
      // 预处理评论
      const sequences = this.tokenizer.textsToSequences(comments.map(c => c.content || ''));
      const paddedSequences = this.padSequences(sequences, this.maxLen);

      // 预测情感
      const inputTensor = tf.tensor2d(paddedSequences, [paddedSequences.length, this.maxLen]);
      const predictions = await this.model.predict(inputTensor).array();
      inputTensor.dispose();

      // 返回结果
      return comments.map((comment, index) => ({
        ...comment,
        sentiment: predictions[index][0] > 0.5 ? 1 : 0,
        sentimentScore: predictions[index][0]
      }));
    } catch (error) {
      console.error("LSTM 情感分析失败:", error);
      return this.simpleSentimentAnalysis(comments);
    }
  }

  // 简单情感分析（回退方法）
  simpleSentimentAnalysis(comments) {
    const negativeWords = ["差", "糟", "烂", "假", "退", "不喜欢", "垃圾"];
    return comments.map(comment => {
      const content = comment.content || '';
      const isNegative = negativeWords.some(word => content.includes(word));
      return {
        ...comment,
        sentiment: isNegative ? 0 : 1,
        sentimentScore: isNegative ? 0.2 : 0.8
      };
    });
  }

  // 获取商品列表
  async findGoods(pageNum, pageSize) {
    const offset = (pageNum - 1) * pageSize;
    
    const { count, rows: goodsList } = await Goods.findAndCountAll({
      offset: offset,
      limit: pageSize * 1,
    });
  
    const enhancedGoods = await Promise.all(goodsList.map(async (goods) => {
      const comments = await Comment.findAll({
        where: { goods_id: goods.id },
        limit: 10,
        order: [['comment_time', 'DESC']]
      });
  
      const analyzedComments = await this.analyzeSentiment(comments);
      const positiveCount = analyzedComments.filter(c => c.sentiment === 1).length;
      const sentimentRate = comments.length > 0 
        ? positiveCount / comments.length 
        : 0.7;
  
      return {
        ...goods.toJSON(),
        commentCount: comments.length,
        sentimentRate: parseFloat(sentimentRate.toFixed(2)),
        recentComments: analyzedComments.slice(0, 3)
      };
    }));
  
    enhancedGoods.sort((a, b) => {
      const scoreA = a.commentCount * a.sentimentRate;
      const scoreB = b.commentCount * b.sentimentRate;
      return scoreB - scoreA;
    });
  
    return {
      pageNum,
      pageSize,
      total: count,
      list: enhancedGoods,
    };
  }
}

module.exports = new RecommendService();