import random
import re

from apscheduler.schedulers.background import BackgroundScheduler
from flask import Flask, jsonify, request
from surprise import Dataset, Reader, KNNBasic
import pandas as pd
import jieba
from neo4j import GraphDatabase

# coding: utf-8
import _thread as thread
import base64
import datetime
import hashlib
import hmac
import json
from urllib.parse import urlparse
import ssl
from datetime import datetime
from time import mktime
from urllib.parse import urlencode
from wsgiref.handlers import format_date_time

import websocket

class RecommendationSystem:
    def __init__(self, uri, user, password):
        self.driver = GraphDatabase.driver(uri, auth=(user, password))
        self.model = None
        self.data = None

    def close(self):
        self.driver.close()

    def load_data_from_neo4j(self):
        """
        从 Neo4j 数据库中加载评分数据，分别处理 CASE、ARTICLE 和 VIDEO。
        假设 Neo4j 中的关系结构为 (USER)-[rating]->(RESOURCE)，
        资源以 title 作为唯一标识。
        """
        query = """
        MATCH (user:USER)-[r:rating]->(resource)
        WHERE resource:CASE OR resource:ARTICLE OR resource:VIDEO
        RETURN user.id AS user_id, resource.title AS title, 
               toFloat(r.score) AS score, 
               LABELS(resource) AS resource_type
        """
        with self.driver.session() as session:
            result = session.run(query)
            data = [{
                "user_id": record["user_id"],
                "title": record["title"],
                "score": record["score"],
                "type": record["resource_type"][0]  # 取第一标签作为资源类型
            } for record in result]
        return pd.DataFrame(data)

    def train_model(self):
        """
        使用 KNNBasic 基于用户相似度的协同过滤算法。
        """
        # 从 Neo4j 加载评分数据
        ratings = self.load_data_from_neo4j()

        if ratings.empty:
            raise ValueError("No rating data available for training.")

        # 使用 Surprise 的 Reader 格式化数据
        reader = Reader(rating_scale=(1, 5))  # 假设评分范围为 1 到 5
        self.data = Dataset.load_from_df(ratings[['user_id', 'title', 'score']], reader)

        # 构建训练集
        trainset = self.data.build_full_trainset()

        # 使用 KNNBasic 训练基于用户的协同过滤模型
        sim_options = {
            'name': 'cosine',       # 相似度度量方法，可选 'cosine' 或 'pearson'
            'user_based': True      # True 表示基于用户，False 表示基于物品
        }
        self.model = KNNBasic(sim_options=sim_options)
        self.model.fit(trainset)
        # 检查训练数据中的用户和资源
        trainset = recommendation_system.model.trainset
        print("Users in training set:", trainset.n_users)
        print("Items in training set:", trainset.n_items)

    def recommend_resources_by_type(self, user_id, resource_type, min_count=6):
        """
        为指定用户推荐特定类型的资源。
        """
        if self.model is None or self.data is None:
            raise ValueError("Model is not trained yet.")

        # 获取训练集
        trainset = self.model.trainset

        # 如果用户不在训练集数据中，则无法推荐
        if user_id not in trainset._raw2inner_id_users:
            return []

        # 获取用户的内部 ID
        inner_user_id = trainset.to_inner_uid(user_id)
        print("inner_user_id", inner_user_id)

        # 获取用户已评分的资源
        user_rated_titles = set([trainset.to_raw_iid(item) for item, rating in trainset.ur[inner_user_id]])

        # 预测所有未评分资源中属于指定类别的评分
        predictions = []
        with self.driver.session() as session:
            query = f"""
            MATCH (resource:{resource_type})
            RETURN resource.title AS title
            """
            result = session.run(query)
            resource_titles = [record["title"] for record in result]

            for title in resource_titles:
                if title not in user_rated_titles:
                    predicted_rating = self.model.predict(user_id, title).est
                    predictions.append((title, predicted_rating))

        # 按评分排序，并限制返回数量
        predictions = sorted(predictions, key=lambda x: x[1], reverse=True)[:min(len(predictions), min_count)]
        print("predictions", predictions)

        # 从 Neo4j 中获取推荐资源的详细信息
        recommended_resources = []
        with self.driver.session() as session:
            for title, score in predictions:
                # 根据资源类型构建查询，视频类型需要额外处理
                query = """
                MATCH (resource {title: $title})
                RETURN resource.title AS title, resource.content AS content, 
                       resource.image AS image, resource.time AS time, 
                       resource.introduction AS introduction, resource.url AS url,
                       COALESCE(resource.video, '') AS video,
                       COALESCE(resource.id, '') AS id,
                       LABELS(resource) AS type
                """

                result = session.run(query, title=title)
                resource = result.single()
                if resource:
                    recommended_resources.append({
                        "title": resource["title"],
                        "content": resource["content"],
                        "image": resource["image"],
                        "time": resource["time"],
                        "url": resource["url"],
                        "introduction": resource["introduction"],
                        "video": resource.get("video", "No video available"),  # 如果没有video属性则返回默认值
                        "type": resource["type"][0],
                        "predicted_score": score,
                        "id": resource["id"],
                    })

        return recommended_resources

    def recommend_resources(self, user_id):
        """
        确保每个类别至少有 6 个推荐资源。
        """
        categories = ["CASE", "ARTICLE", "VIDEO"]
        final_recommendations = []

        for category in categories:
            recommendations = self.recommend_resources_by_type(user_id, category, min_count=6)
            final_recommendations.extend(recommendations)

        return final_recommendations

class LearningPlanRecommender:
    def __init__(self, uri, user, password):
        self.driver = GraphDatabase.driver(uri, auth=(user, password))

    def close(self):
        self.driver.close()

    def recommend_courses(self, question):
        # 分词处理
        keywords = list(jieba.cut(question))

        with self.driver.session() as session:
            result = session.write_transaction(self._find_courses_and_relations, keywords)
            return result

    def find_related_courses(self, course_id):
        # 查询指定课程ID的相关节点
        with self.driver.session() as session:
            result = session.write_transaction(self._find_related_courses, course_id)
            return result

    def get_related_directories(self, course_id):
        # 查询与指定课程相关的目录
        with self.driver.session() as session:
            result = session.write_transaction(self._find_contents_and_branches, course_id)
            return result

    def get_random_courses_by_theme(self, course_id):
        with self.driver.session() as session:
            result = session.write_transaction(self._find_random_courses, course_id)
            return result

    @staticmethod
    def _find_random_courses(tx, course_id):
        query = """
        MATCH (course:COURSE {id: $course_id})-[:belong]->(theme:THEME)
        WITH theme
        MATCH (other_course:COURSE)-[:belong]->(theme)
        WHERE other_course.id <> $course_id
        RETURN other_course.id AS id, 
               other_course.title AS title, 
               other_course.description AS description, 
               other_course.image AS image,
               theme.theme AS theme
        """
        records = tx.run(query, course_id=course_id)

        # Collect all matching courses
        all_courses = [record.data() for record in records]

        # Randomly select up to 3 courses
        random_courses = random.sample(all_courses, min(3, len(all_courses)))
        return random_courses

    @staticmethod
    def _find_courses_and_relations(tx, keywords):
        query = """
        MATCH (course:COURSE)
        WHERE ANY(keyword IN $keywords WHERE course.title CONTAINS keyword)
        OPTIONAL MATCH (course)<-[:pre]-(preCourse:COURSE)
        RETURN course, collect(preCourse) AS preCourses
        """
        records = tx.run(query, keywords=keywords)

        nodes = []
        links = []
        id_map = {}

        for record in records:
            course = record["course"]
            pre_courses = record["preCourses"]

            if course["id"] not in id_map:  # 使用节点的id属性
                nodes.append({
                    "id": course["id"],  # 使用id属性
                    "name": course["title"],
                    "description": course["description"],
                    "image": course["image"],
                    "symbolSize": 70,
                    "category": 0
                })
                id_map[course["id"]] = course["id"]

            for pre_course in pre_courses:
                if pre_course and pre_course["id"] not in id_map:
                    nodes.append({
                        "id": pre_course["id"],  # 使用id属性
                        "name": pre_course["title"],
                        "description": pre_course["description"],
                        "image": pre_course["image"],
                        "symbolSize": 50,
                        "category": 1
                    })
                    id_map[pre_course["id"]] = pre_course["id"]
                if pre_course:
                    links.append({
                        "source": pre_course["id"],  # 使用id属性
                        "target": course["id"],  # 使用id属性
                        "name": "进阶知识"
                    })

        return {"nodes": nodes, "links": links}

    @staticmethod
    def _find_related_courses(tx, course_id):
        query = """
        MATCH (course:COURSE {id: $course_id})
        OPTIONAL MATCH (course)<-[:pre]-(preCourse:COURSE)
        RETURN course, collect(preCourse) AS preCourses
        """
        records = tx.run(query, course_id=course_id)

        nodes = []
        links = []
        id_map = {}

        for record in records:
            course = record["course"]
            pre_courses = record["preCourses"]

            for pre_course in pre_courses:
                if pre_course and pre_course["id"] not in id_map:
                    nodes.append({
                        "id": pre_course["id"],  # 使用id属性
                        "name": pre_course["title"],
                        "description": pre_course["description"],
                        "image": pre_course["image"],
                        "symbolSize": 50,
                        "category": 1
                    })
                    id_map[pre_course["id"]] = pre_course["id"]
                if pre_course:
                    links.append({
                        "source": pre_course["id"],  # 使用id属性
                        "target": course["id"],  # 使用id属性
                        "name": "进阶知识"
                    })

        return {"nodes": nodes, "links": links}

    @staticmethod
    def _find_contents_and_branches(tx, course_id):
        query = """
           MATCH (content:CONTENTS)-[:component]->(course:COURSE {id: $course_id})
           OPTIONAL MATCH (content)<-[:branch]-(child:BRANCH)
           RETURN content.id AS content_id, 
                  content.title AS content_title, 
                  collect(child { .id, .title, .video }) AS children
           """
        records = tx.run(query, course_id=course_id)

        contents = []
        for record in records:
            contents.append({
                "id": record["content_id"],
                "title": record["content_title"],
                "children": record["children"]
            })

        return contents

class RecommendationByCorrelation:
    def __init__(self, uri, user, password):
        self.driver = GraphDatabase.driver(uri, auth=(user, password))

    def close(self):
        self.driver.close()

    def get_related_resources_by_theme(self, resource_title, resource_type):
        """
        获取给定资源的主题，并返回该主题涉及的其他资源
        """
        with self.driver.session() as session:
            # 查找给定资源的主题
            query = f"""
            MATCH (resource:{resource_type} {{title: $resource_title}})-[:relation]->(theme:TAGS)<-[:relation]-(related_resource:{resource_type})
            WHERE related_resource:CASE OR related_resource:ARTICLE OR related_resource:VIDEO
            RETURN related_resource.title AS title, related_resource.content AS content, 
                   related_resource.image AS image, related_resource.time AS time, 
                   related_resource.introduction AS introduction, related_resource.url AS url,
                   COALESCE(related_resource.video, '') AS video,
                   LABELS(related_resource) AS type
            """
            result = session.run(query, resource_title=resource_title)

            related_resources = []
            for record in result:
                related_resources.append({
                        "title": record["title"],
                        "content": record["content"],
                        "image": record["image"],
                        "time": record["time"],
                        "url": record["url"],
                        "introduction": record["introduction"],
                        "video": record.get("video", "No video available"),  # 如果没有video属性则返回默认值
                        "type": record["type"],
                    })
            return related_resources

    def recommend_case_resources(self, resource_title):
        """
        推荐与给定案例相关的资源
        """
        related_resources = self.get_related_resources_by_theme(resource_title, 'CASE')
        return related_resources[:3]  # 返回最多 3 个相关案例

    def recommend_article_resources(self, resource_title):
        """
        推荐与给定文章相关的资源
        """
        related_resources = self.get_related_resources_by_theme(resource_title, 'ARTICLE')
        return related_resources[:3]  # 返回最多 3 个相关文章

    def recommend_video_resources(self, resource_title):
        """
        推荐与给定视频相关的资源
        """
        related_resources = self.get_related_resources_by_theme(resource_title, 'VIDEO')
        return related_resources[:3]  # 返回最多 3 个相关视频
# 推荐系统类
class RecommendationSystem2:
    def __init__(self, uri, user, password):
        self.uri = uri
        self.user = user
        self.password = password
        self.driver = GraphDatabase.driver(uri, auth=(user, password))

    def get_recommendations(self, title):
        # 使用 jieba 分词
        tokens = self.get_keywords_from_title(title)

        # 根据分词结果在知识图谱中查找资源
        resources_by_type = self.get_resources_grouped_by_type(tokens)

        # 组合学习计划
        study_plans = self.generate_study_plans(resources_by_type)

        return study_plans

    def get_keywords_from_title(self, title):
        """
        使用 jieba 分词
        """
        tokens = list(jieba.cut(title))
        # 可选：过滤无意义的词
        meaningful_tokens = [token for token in tokens if len(token) > 1]
        return meaningful_tokens

    def get_resources_grouped_by_type(self, tokens):
        """
        查询 Neo4j 图谱，根据资源类型分类
        """
        query = """
        MATCH (t:TAGS)<-[:relation]-(r)
        WHERE t.tag IN $tokens
        WITH r, count(t) AS tag_count
        ORDER BY tag_count DESC
        RETURN r, tag_count
        """
        with self.driver.session() as session:
            result = session.run(query, tokens=tokens)

            resources = {
                "CASE": [],
                "ARTICLE": [],
                "VIDEO": []
            }

            for record in result:
                resource = record["r"]

                tag_count = record["tag_count"]

                if resource["video"]:
                    resources["VIDEO"].append(self.get_video_info(resource, tag_count))
                elif resource["origin"]:
                    resources["CASE"].append(self.get_resource_info(resource, tag_count))
                else:
                    resources["ARTICLE"].append(self.get_resource_info(resource, tag_count))

            return resources

    def get_video_info(self, video_node, tag_count):
        """
        提取视频节点的详细信息
        """
        video_info = {
            "title": video_node["title"],
            "content": video_node.get("content"),
            "url": video_node.get("url"),
            "length": video_node.get("length"),
            "tags_connected": tag_count,
            "introduction": video_node.get("introduction"),
            "video": video_node.get("video"),
        }
        print(video_info)
        return video_info

    def get_resource_info(self, resource_node, tag_count):
        """
        提取通用资源节点的详细信息
        """
        resource_info = {
            "title": resource_node["title"],
            "content": resource_node.get("content"),
            "type": resource_node["type"],
            "url": resource_node.get("url"),
            "tags_connected": tag_count,
            "introduction": resource_node.get("introduction"),
        }
        return resource_info

    def generate_study_plans(self, resources_by_type):
        """
        根据每种资源类型选择前两个，生成学习计划
        """
        cases = resources_by_type["CASE"][:2]
        articles = resources_by_type["ARTICLE"][:2]
        videos = resources_by_type["VIDEO"][:2]

        # 如果某类资源不足两条，用空对象填补
        while len(cases) < 2:
            cases.append({"title": None, "content": None, "type": "CASE", "url": None, "tags_connected": 0, "introduction": None})

        while len(articles) < 2:
            articles.append({"title": None, "content": None, "type": "ARTICLE", "url": None, "tags_connected": 0, "introduction": None})

        while len(videos) < 2:
            videos.append({"title": None, "content": None, "url": None, "length": None, "tags_connected": 0, "introduction": None, "video": None})

        # 组合学习计划
        study_plans = [
            {"case": cases[0], "article": articles[0], "video": videos[0]},
            {"case": cases[1], "article": articles[1], "video": videos[1]},
        ]

        return study_plans

# Global variables
text = []
answer = ""

#星火大模型类
class Ws_Param(object):
    def __init__(self, APPID, APIKey, APISecret, gpt_url):
        self.APPID = APPID
        self.APIKey = APIKey
        self.APISecret = APISecret
        self.host = urlparse(gpt_url).netloc
        self.path = urlparse(gpt_url).path
        self.gpt_url = gpt_url

    def create_url(self):
        now = datetime.now()
        date = format_date_time(mktime(now.timetuple()))
        signature_origin = f"host: {self.host}\ndate: {date}\nGET {self.path} HTTP/1.1"
        signature_sha = hmac.new(
            self.APISecret.encode('utf-8'),
            signature_origin.encode('utf-8'),
            digestmod=hashlib.sha256
        ).digest()
        signature_sha_base64 = base64.b64encode(signature_sha).decode('utf-8')
        authorization_origin = (
            f'api_key="{self.APIKey}", algorithm="hmac-sha256", '
            f'headers="host date request-line", signature="{signature_sha_base64}"'
        )
        authorization = base64.b64encode(authorization_origin.encode('utf-8')).decode('utf-8')
        v = {
            "authorization": authorization,
            "date": date,
            "host": self.host
        }
        return self.gpt_url + '?' + urlencode(v)

def on_error(ws, error):
    print("### error:", error)

def on_close(ws, *args):
    print("### closed ###")

def on_open(ws):
    thread.start_new_thread(run, (ws,))

def run(ws, *args):
    data = json.dumps(gen_params(appid=ws.appid, query=ws.query, domain=ws.domain))
    ws.send(data)

def on_message(ws, message):
    global answer
    data = json.loads(message)
    code = data['header']['code']
    if code != 0:
        print(f'请求错误: {code}, {data}')
        ws.close()
    else:
        choices = data["payload"]["choices"]
        status = choices["status"]
        content = choices["text"][0]["content"]
        answer += content
        if status == 2:
            ws.close()

def gen_params(appid, query, domain):
    return {
        "header": {
            "app_id": appid,
            "uid": "1234",
        },
        "parameter": {
            "chat": {
                "domain": domain,
                "temperature": 0.5,
                "max_tokens": 4096,
                "auditing": "default",
            }
        },
        "payload": {
            "message": {
                "text": query
            }
        }
    }

def getText(role, content):
    jsoncon = {"role": role, "content": content}
    text.append(jsoncon)
    return text

def getlength(text):
    return sum(len(content["content"]) for content in text)

def checklen(text):
    while getlength(text) > 8000:
        del text[0]
    return text

def main(appid, api_secret, api_key, gpt_url, domain, query):
    wsParam = Ws_Param(appid, api_key, api_secret, gpt_url)
    websocket.enableTrace(False)
    wsUrl = wsParam.create_url()
    ws = websocket.WebSocketApp(wsUrl, on_message=on_message, on_error=on_error, on_close=on_close, on_open=on_open)
    ws.appid = appid
    ws.query = query
    ws.domain = domain
    ws.run_forever(sslopt={"cert_reqs": ssl.CERT_NONE})


def clean_response(raw_response):
    # 1. 移除多余的控制符（如 \r\n\t）
    clean_text = re.sub(r"\\[rnt]", "", raw_response)

    # 2. 解码 Unicode 转义字符
    clean_text = clean_text.encode("utf-8").decode("utf-8")

    # 3. 移除多余空格
    clean_text = re.sub(r"\s+", " ", clean_text)

    # 4. 去掉首尾空格
    clean_text = clean_text.strip()

    return clean_text

# 创建 Flask 应用
app = Flask(__name__)

# 初始化推荐系统
neo4j_uri = "bolt://localhost:7687"  # 替换为你的 Neo4j 地址
username = "neo4j"
password = "12345678"  # 替换为你的密码
recommendation_system = RecommendationSystem(neo4j_uri, username, password)
recommendation_by_correlation = RecommendationByCorrelation(neo4j_uri, username, password)
recommendation_system2 = RecommendationSystem2(neo4j_uri, username, password)
recommendation_learning = LearningPlanRecommender(neo4j_uri, username, password)

# 设置 APScheduler 定时任务
scheduler = BackgroundScheduler()

# 定义定时任务：每小时执行一次训练
def scheduled_train():
    try:
        print("Triggering model training...")
        recommendation_system.train_model()
        print("Model training completed.")
    except Exception as e:
        print(f"Error during scheduled training: {e}")

# 添加定时任务：每小时执行一次训练
scheduler.add_job(scheduled_train, 'interval', hours=1)

# 启动调度器
scheduler.start()

@app.route("/train", methods=["POST"])
def train_model():
    """
    手动触发训练模型。
    """
    try:
        recommendation_system.train_model()
        return jsonify({"message": "Model trained successfully."}), 200
    except Exception as e:
        return jsonify({"error": str(e)}), 500


@app.route("/recommend/<user_id>", methods=["GET"])
def get_recommendations(user_id):
    """
    获取推荐资源。
    """
    try:
        recommendations = recommendation_system.recommend_resources(user_id)
        if recommendations:
            return jsonify(recommendations), 200
        else:
            return jsonify({"message": "No recommendations available."}), 404
    except Exception as e:
        return jsonify({"error": str(e)}), 500

@app.route("/recommend_related", methods=["GET"])
def recommend_related():
    """
    根据资源类型推荐相关资源
    前端传入 title 和 type（CASE/ARTICLE/VIDEO）
    """
    resource_title = request.args.get('title')  # 获取传递的资源标题
    resource_type = request.args.get('type')   # 获取传递的资源类型（CASE/ARTICLE/VIDEO）

    if not resource_title or not resource_type:
        return jsonify({"error": "Missing resource title or type"}), 400

    try:
        if resource_type == "CASE":
            recommendations = recommendation_by_correlation.recommend_case_resources(resource_title)
        elif resource_type == "ARTICLE":
            recommendations = recommendation_by_correlation.recommend_article_resources(resource_title)
        elif resource_type == "VIDEO":
            recommendations = recommendation_by_correlation.recommend_video_resources(resource_title)
        else:
            return jsonify({"error": "Invalid resource type"}), 400

        return jsonify(recommendations), 200
    except Exception as e:
        return jsonify({"error": str(e)}), 500

@app.route('/recommend', methods=['GET'])
def recommend():
    """
    接受前端传来的标题，返回推荐结果
    """
    try:
        # 获取前端传来的数据
        title = request.args.get('title')

        if not title:
            return jsonify({'error': 'Title is required'}), 400

        # 获取推荐结果
        recommendations = recommendation_system2.get_recommendations(title)

        return jsonify(recommendations), 200

    except Exception as e:
        return jsonify({'error': str(e)}), 500

@app.route('/learning', methods=["GET"])
def learning():
    try:
        # 获取前端传来的数据
        schedule = request.args.get('schedule')

        if not schedule:
            return jsonify({'error': 'schedule is required'}), 400

        # 获取推荐结果
        result = recommendation_learning.recommend_courses(schedule)

        return jsonify(result), 200

    except Exception as e:
        return jsonify({'error': str(e)}), 500

@app.route('/learning/<id>', methods=["GET"])
def learnings(id):
    try:
        # 获取推荐结果
        print(id)
        result = recommendation_learning.find_related_courses(id)

        return jsonify(result), 200

    except Exception as e:
        return jsonify({'error': str(e)}), 500

@app.route('/directory/<id>', methods=["GET"])
def directory(id):
    try:
        # 获取推荐结果
        print(id)
        result = recommendation_learning.get_related_directories(id)

        return jsonify(result), 200

    except Exception as e:
        return jsonify({'error': str(e)}), 500

@app.route('/random/<id>', methods=["GET"])
def randoms(id):
    try:
        # 获取推荐结果
        print(id)
        result = recommendation_learning.get_random_courses_by_theme(id)

        return jsonify(result), 200

    except Exception as e:
        return jsonify({'error': str(e)}), 500

driver = GraphDatabase.driver(neo4j_uri, auth=(username, password))
@app.route('/chat', methods=['POST'])
def chat():
    data = request.get_json()
    question = data.get("question", "")

    if not question:
        return jsonify({"error": "Question is required"}), 400

    # 分词处理用户问题
    question_words = set(jieba.lcut(question))

    # 查询 Neo4j 获取所有 ARTICLE 节点的 title 和 content
    with driver.session() as session:
        query = "MATCH (a:ARTICLE) RETURN a.title AS title, a.content AS content"
        results = session.run(query)
        articles = [{"title": record["title"], "content": record["content"]} for record in results]

    # 统计每个节点与问题的相似度
    best_match = None
    max_similarity = 0

    for article in articles:
        title_words = set(jieba.lcut(article["title"]))
        common_words = question_words & title_words
        similarity = len(common_words) / len(question_words)

        if similarity > max_similarity:
            max_similarity = similarity
            best_match = article

    # 判断相似度是否超过阈值
    threshold = 0.8  # 设置相似度阈值
    if max_similarity >= threshold:
        return jsonify({
            "source": "Neo4j",
            "answer": clean_response(best_match["content"]),
            "similarity": max_similarity
        })

    # 如果没有找到相似节点，调用星火 AI
    global text, answer
    data = request.get_json()
    question = data.get('question', '')
    text.clear()
    query = checklen(getText("user", question))
    answer = ""

    main(
        appid="a43799fb",
        api_secret="Zjc3YjNmMmIzMGI4YTg3MGNjZjkzODQx",
        api_key="093c1a3e311a7e3f94bfea0fd2bf11fd",
        gpt_url="wss://spark-api.xf-yun.com/v4.0/chat",
        domain="4.0Ultra",
        query=query
    )

    getText("assistant", answer)

    return jsonify({"source": "AI", "answer": answer})

# 分页查询节点信息的 API
@app.route('/nodes', methods=['GET'])
def get_nodes():
    try:
        # 获取请求参数：页码和每页大小
        page = int(request.args.get('page', 1))
        page_size = int(request.args.get('page_size', 10))
        keyword = request.args.get('search', '').strip()  # 搜索关键词，默认空字符串

        # 计算偏移量
        skip = (page - 1) * page_size
        limit = page_size
        # 构建查询条件
        query_condition = ""
        if keyword:
            # 使用 CONTAINS 实现模糊查询
            query_condition = f"WHERE n.title CONTAINS '{keyword}'"

        with driver.session() as session:
            # 查询总节点数
            total_query = f"""
                        MATCH (n)
                        {query_condition}
                        RETURN COUNT(n) AS total_count
                        """
            total_result = session.run(total_query)
            total_count = total_result.single()["total_count"]

            # 定义查询语句
            query = f"""
            MATCH (n)
            {query_condition}
            RETURN id(n) AS id, 
                   n.title AS title, 
                   labels(n)[0] AS type, 
                   n.time AS time,
                   n.content AS content
            SKIP {skip} LIMIT {limit}
            """

            result = session.run(query)
            nodes = []
            for record in result:
                nodes.append({
                    "id": record["id"],  # 节点 ID
                    "title": record["title"],  # 节点标题
                    "type": record["type"],  # 节点类型
                    "time": record.get("time", None),  # 创建时间（可能不存在）
                    "content": record["content"]
                })

        # 返回 JSON 响应
        return jsonify({
            "page": page,
            "page_size": page_size,
            "total": total_count,
            "nodes": nodes
        })
    except Exception as e:
        return jsonify({"error": str(e)}), 500

# 关闭推荐系统连接
@app.teardown_appcontext
def shutdown_session(exception=None):
    recommendation_system.close()
    # 确保 APScheduler 只有在运行时才会关闭
    if scheduler.running:
        scheduler.shutdown()  # 关闭定时任务调度器

if __name__ == "__main__":
    app.run(host="0.0.0.0", port=5000)
