from sentence_transformers import SentenceTransformer
import numpy as np
from sklearn.cluster import DBSCAN
from datetime import datetime
import time
from collections import defaultdict
import threading
import logging
from peewee import *
from api.utils import get_uuid
from api.db.db_models import DB, DataBaseModel

# 配置日志
logging.basicConfig(level=logging.INFO,
                    format='%(asctime)s - %(levelname)s - %(message)s')

# 聚类配置
SIMILARITY_THRESHOLD = 0.3
MIN_SAMPLES = 1
MODEL_PATH = '/ragflow/api/db/services/model'

# 数据库模型定义
class Track(DataBaseModel):
    id = CharField(primary_key=True)
    anonymous_user_id = CharField(null=True, index=True)
    conversation_id=CharField(null=True, index=True)
    login_user_email = CharField(null=True, index=True)
    question = TextField(null=False)
    answer = TextField(null=True)
    timestamp = DateTimeField(null=False, index=True)
    create_time = BigIntegerField(null=True)
    create_date = CharField(null=True)
    update_time = BigIntegerField(null=True)
    update_date = CharField(null=True)


    class Meta:
        db_table = 'user_questions'

class Cluster(DataBaseModel):
    cluster_id = CharField(primary_key=True)
    user_identifier = CharField(null=False, index=True)
    conversation_id=CharField(null=True, index=True)
    question_pattern = TextField(null=False)
    semantic_hash = CharField(null=False, index=True)
    question_count = IntegerField(default=1)
    first_question_id = CharField(null=True)
    create_time = BigIntegerField(null=True)
    create_date = CharField(null=True)
    update_time = BigIntegerField(null=True)
    update_date = CharField(null=True)

    class Meta:
        db_table = 'user_question_clusters'

class QuestionClusterer:
    def __init__(self):
        self.model = SentenceTransformer(MODEL_PATH)
        self.running = False
        self.lock = threading.Lock()
        self.timer = None 
    @DB.connection_context()
    def fetch_questions_by_time_range(self, start_timestamp=None, end_timestamp=None):
        """根据时间范围获取问题（统一使用update_time）"""
        try:
            query = Track.select()
            
            # 默认获取最近半年的数据（毫秒级时间戳）
            if start_timestamp is None:
                six_months_ago = int(time.time() * 1000) - 180 * 24 * 60 * 60 * 1000  # 180天前
                start_timestamp = six_months_ago
            
            if end_timestamp is None:
                end_timestamp = int(time.time() * 1000)  # 当前时间
            
            query = query.where(
                (Track.update_time >= start_timestamp) &
                (Track.update_time <= end_timestamp)
            ).order_by(Track.update_time.desc())  # 按更新时间降序
            
            questions = list(query.dicts())
            logging.info(f"获取到 {len(questions)} 条问题（时间范围: {start_timestamp} 至 {end_timestamp}）")
            return questions
        except Exception as e:
            logging.error(f"查询错误: {e}")
            return None

    @DB.connection_context()
    def fetch_incremental_questions(self):
        """获取上次聚类后新增的问题（统一使用update_time）"""
        try:
            # 获取上次聚类的最晚时间（毫秒级）
            last_cluster_time = (
                Cluster.select(fn.MAX(Cluster.update_time))
                .scalar()
            )
            
            current_time = int(time.time() * 1000)
            
            # 如果是第一次聚类，获取半年数据
            if last_cluster_time is None:
                six_months_ago = current_time - 180 * 24 * 60 * 60 * 1000
                last_cluster_time = six_months_ago
            
            query = Track.select().where(
                (Track.update_time > last_cluster_time) &
                (Track.update_time <= current_time)
            ).order_by(Track.update_time.desc())
            
            questions = list(query.dicts())
            logging.info(f"获取到 {len(questions)} 条新增问题（自上次聚类后）")
            return questions
        except Exception as e:
            logging.error(f"查询新增问题错误: {e}")
            return None
            
    @DB.connection_context()
    def clear_existing_clusters(self):
        """删除所有现有聚类"""
        try:
            deleted_count = Cluster.delete().execute()
            logging.info(f"已删除 {deleted_count} 条旧聚类记录")
            return True
        except Exception as e:
            logging.error(f"删除聚类记录错误: {e}")
            return False

    @DB.connection_context() 
    def fetch_unclustered_questions(self):
        """获取未聚类的问题"""
        try:
            query = (Track
                     .select()
                     .join(Cluster, JOIN.LEFT_OUTER, on=(Track.id == Cluster.first_question_id))
                     .where(Cluster.cluster_id.is_null())
                     .order_by(Track.timestamp))
            
            questions = list(query.dicts())
            logging.info(f"获取到 {len(questions)} 条未聚类的问题")
            return questions
        except Exception as e:
            logging.error(f"查询错误: {e}")
            return None

    def get_user_identifier(self, question_record):
        """获取用户标识符"""
        return question_record['anonymous_user_id'] or question_record['login_user_email']

    def get_representative_question(self, questions):
        """选择最中心的问题作为代表性问题"""
        if len(questions) == 1:
            return questions[0]
        
        embeddings = self.model.encode(questions, convert_to_tensor=True)
        embeddings_np = embeddings.cpu().numpy()
        sim_matrix = np.inner(embeddings_np, embeddings_np)
        avg_sims = np.mean(sim_matrix, axis=1)
        
        return questions[np.argmax(avg_sims)]

    def cluster_questions(self, user_questions):
        """聚类逻辑"""
        if not user_questions:
            return []
            
        user_groups = defaultdict(list)
        for q in user_questions:
            user_id = self.get_user_identifier(q)
            user_groups[user_id].append(q)
        
        all_clusters = []
        
        for user_id, questions in user_groups.items():
            question_texts = [q['question'] for q in questions]
            question_records = questions
            
            embeddings = self.model.encode(question_texts, convert_to_tensor=True)
            embeddings_np = embeddings.cpu().numpy()
            
            clustering = DBSCAN(
                eps=SIMILARITY_THRESHOLD,
                min_samples=MIN_SAMPLES,
                metric='cosine'
            ).fit(embeddings_np)
            
            labels = clustering.labels_
            unique_labels = set(labels)
            
            logging.info(f"用户 {user_id} - 总问题数: {len(questions)}")
            logging.info(f"用户 {user_id} - 聚类数: {len(unique_labels) - (1 if -1 in unique_labels else 0)}")
            
            clusters = defaultdict(list)
            for idx, label in enumerate(labels):
                clusters[label].append(question_records[idx])
            
            for label, records in clusters.items():
                questions_in_cluster = [r['question'] for r in records]
                
                if label == -1:
                    for r in records:
                        all_clusters.append({
                            'user_identifier': user_id,
                            'conversation_id': r.get('conversation_id'),  # 使用get方法安全获取
                            'question_pattern': r['question'],
                            'question_count': 1,
                            'first_question_id': r['id'],
                            'question_records': [r]
                        })
                else:
                    representative = self.get_representative_question(questions_in_cluster)
                    
                    all_clusters.append({
                        'user_identifier': user_id,
                        'conversation_id': records[0].get('conversation_id'),  # 使用get方法安全获取
                        'question_pattern': representative,
                        'question_count': len(records),
                        'first_question_id': records[0]['id'],
                        'question_records': records
                    })
        
        return all_clusters

    def generate_semantic_hash(self, text):
        """生成语义哈希"""
        embedding = self.model.encode(text, convert_to_tensor=False)
        return str(hash(tuple(embedding.tolist())))[:64]

    @DB.connection_context()
    def save_clusters(self, clusters):
        """保存聚类结果"""
        if not clusters:
            logging.info("没有需要保存的聚类结果")
            return True
        
        current_time = int(time.time())
        current_date = datetime.now().strftime("%Y-%m-%d")
        
        try:
            for cluster in clusters:
                semantic_hash = self.generate_semantic_hash(cluster['question_pattern'])
                
                # 检查是否已存在相同语义的聚类
                existing = (Cluster
                            .select()
                            .where(
                                (Cluster.user_identifier == cluster['user_identifier']) &
                                (Cluster.semantic_hash == semantic_hash)
                            )
                            .first())
                
                if existing:
                    # 更新现有聚类
                    (Cluster
                    .update(
                        question_count=Cluster.question_count + cluster['question_count'],
                        update_time=current_time,
                        update_date=current_date
                    )
                    .where(Cluster.cluster_id == existing.cluster_id)
                    .execute())
                else:
                    # 插入新聚类
                    Cluster.create(
                        cluster_id=str(get_uuid()),
                        user_identifier=cluster['user_identifier'],
                        conversation_id=cluster['conversation_id'],  # 新增
                        question_pattern=cluster['question_pattern'],
                        semantic_hash=semantic_hash,
                        question_count=cluster['question_count'],
                        first_question_id=cluster['first_question_id'],
                        create_time=current_time,
                        create_date=current_date,
                        update_time=current_time,
                        update_date=current_date
                    )
            
            logging.info(f"成功保存 {len(clusters)} 个聚类结果")
            return True
        except Exception as e:
            logging.error(f"保存聚类结果错误: {e}")
            return False
        
    @classmethod
    @DB.connection_context()
    def get_clusters_by_user(cls, user_identifier=None, conversation_id=None, page=1, page_size=10):
        """获取聚类问题列表"""
        query = Cluster.select()
        
        if user_identifier:
            query = query.where(Cluster.user_identifier == user_identifier)
        
        if conversation_id:
            query = query.where(Cluster.conversation_id == conversation_id)
        
        query = query.order_by(Cluster.question_count.desc())
        
        total = query.count()
        clusters = query.paginate(page, page_size).dicts()
        
        return {
            'list': list(clusters),
            'total': total,
            'page': page,
            'page_size': page_size
        }

    @classmethod
    @DB.connection_context()
    def get_top_clusters_by_user(cls, user_identifier, conversation_id=None, limit=3):
        """获取用户最常见问题的聚类"""
        query = Cluster.select().where(
            Cluster.user_identifier == user_identifier
        )
        
        if conversation_id:
            query = query.where(Cluster.conversation_id == conversation_id)
        
        clusters = query.order_by(Cluster.question_count.desc()).limit(limit).dicts()
        
        result = []
        for cluster in clusters:
            first_question = (Track
                            .select()
                            .where(Track.id == cluster['first_question_id'])
                            .first())
            
            if first_question:
                result.append({
                    'cluster_id': cluster['cluster_id'],
                    'conversation_id': cluster['conversation_id'],  # 新增
                    'question_pattern': cluster['question_pattern'],
                    'question_count': cluster['question_count'],
                    'representative_question': first_question.question,
                    'representative_answer': first_question.answer
                })
        
        return result

    def run_clustering(self, mode='incremental', time_range=None):
        """执行聚类任务
        Args:
            mode: 'full'（全量重新聚类）, 'range'（按时间范围聚类）, 'incremental'（增量聚类）
            time_range: 当mode='range'时使用，格式为(start_timestamp, end_timestamp)
        """
        if self.lock.locked():
            logging.warning("聚类任务已在运行中，跳过本次执行")
            return
            
        with self.lock:
            try:
                logging.info(f"开始聚类任务，模式: {mode}")
                
                # 根据模式获取问题数据
                if mode == 'full':
                    # 全量重新聚类：删除旧聚类，处理半年数据
                    self.clear_existing_clusters()
                    questions = self.fetch_questions_by_time_range()
                elif mode == 'range' and time_range:
                    # 按时间范围聚类：删除旧聚类，处理指定时间范围数据
                    self.clear_existing_clusters()
                    start_ts, end_ts = time_range
                    questions = self.fetch_questions_by_time_range(start_ts, end_ts)
                else:
                    # 默认增量聚类：保留旧聚类，处理新增数据
                    questions = self.fetch_incremental_questions()
                
                if not questions:
                    logging.info("没有需要聚类的问题")
                    return
                
                # 执行聚类算法
                clusters = self.cluster_questions(questions)
                if clusters:
                    self.save_clusters(clusters)
                logging.info("聚类任务完成")
            except Exception as e:
                logging.error(f"聚类任务出错: {e}")
    def start_scheduler(self, interval_minutes=60):
        """使用 threading.Timer 实现定时任务"""
        if self.running:
            logging.warning("调度器已在运行中")
            return
            
        self.running = True
        interval_seconds = interval_minutes * 60
        logging.info(f"启动定时聚类任务，间隔: {interval_minutes}分钟")
        
        def schedule_run():
            if not self.running:
                return
                
            try:
                self.run_clustering()
            finally:
                # 每次执行完后重新设置定时器
                self.timer = threading.Timer(interval_seconds, schedule_run)
                self.timer.daemon = True
                self.timer.start()
        
        # 首次启动
        self.timer = threading.Timer(interval_seconds, schedule_run)
        self.timer.daemon = True
        self.timer.start()


    def stop_scheduler(self):
        """停止定时任务"""
        self.running = False
        if self.timer:
            self.timer.cancel()
        logging.info("已停止定时聚类任务")

    def trigger_manually(self):
        """手动触发聚类"""
        logging.info("手动触发聚类任务")
        threading.Thread(target=self.run_clustering, daemon=True).start()


# 使用示例
if __name__ == "__main__":
    clusterer = QuestionClusterer()
    
    # 方式1: 立即运行一次
    clusterer.run_clustering()
    
    # 方式2: 启动定时任务 (每小时运行一次)
    # clusterer.start_scheduler(interval_minutes=60)
    
    # 方式3: 手动触发
    # clusterer.trigger_manually()
    
    # 保持主线程运行 (如果启动了定时任务)
    # while True:
    #     time.sleep(1)