import requests
import dashscope
import time
import schedule
from datetime import datetime
import mysql.connector
import json
import numpy as np


class knowlege_base:

    def __init__(self):
        #API配置
        self.api_url = "https://api.eol.cn/web/api/"
        self.headers = {
            'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.124 Safari/537.36',
            'Referer': 'https://www.gaokao.cn/school/search',
            'Origin': 'https://www.gaokao.cn/school/search'
        }
        self.params = {
            'keyword': '',
            'uri': 'apidata/api/gk/school/lists',
            'size': 20,
            'page': 1
        }

        #数据源
        self.data_sources = {
            'colleges': 'https://api.eol.cn/web/api/'
        }

        #连接数据库
        try:
            self.db_connection = mysql.connector.connect(
                host="localhost",
                port=3306,
                user="root",
                password="940618",
                database="haolaoshi",
                charset="utf8"
            )
            self.cursor = self.db_connection.cursor()
            print(f"{datetime.now()}: Successfully connected to MySQL database")
        except Exception as e:
            raise ConnectionError(f"{datetime.now()}: Failed to connect to MySQL database: {e}")

    #获取数据
    def find_data(self, url, data_type):
        print(f"开始爬取{data_type}数据...")
        all_universities = []

        try:
            #获取爬的页数
            response = requests.get(url, params=self.params, headers=self.headers, timeout=10)
            response.raise_for_status()
            data = response.json()
            total_pages = data['data']['num_pages']

        except Exception as e:
            print(f"{datetime.now()}: 获取总页数失败: {e}")
            return all_universities

        if total_pages == 0:
            print(f"{datetime.now()}: 无法获取页数")
            return all_universities

        #爬取每页的数据
        for page in range(1, total_pages + 1):
            print(f"{datetime.now()}: 正在爬取第 {page}/{total_pages} 页...")
            universities = []

            try:
                self.params['page'] = page
                response = requests.get(url, params=self.params, headers=self.headers, timeout=10)
                response.raise_for_status()
                data = response.json()

                #判断连接
                if data['code'] == '0000':
                    universities = data['data']['item']
                else:
                    print(f"{datetime.now()}: API返回错误: {data['message']}")
                    universities = []

            except Exception as e:
                print(f"{datetime.now()}: 获取第{page}页数据失败: {e}")
                universities = []

            #列表存入所有大学列表
            if universities:
                all_universities.extend(universities)
            time.sleep(1)

        print(f"{datetime.now()}: 成功获取 {len(all_universities)} 条{data_type}信息")
        return all_universities

    #向量化
    def vector_result(self, text):
        try:
            response = dashscope.TextEmbedding.call(
                model="sk-0af8d524292a4417bafd18ab726b2b4d",
                input=text
            )
            #判断是否正常响应
            if response.status_code == 200:
                return response.output['embeddings'][0]['embedding']
            else:
                print(f"{datetime.now()}: Embedding API error: {response.message}")
                return None
        #捕获异常，记录错误
        except Exception as e:
            print(f"{datetime.now()}: Error getting embedding: {str(e)}")
            return None

    #计算相似度
    def similarity(self, vec1, vec2):
        vec1 = np.array(vec1)
        vec2 = np.array(vec2)
        return np.dot(vec1, vec2) / (np.linalg.norm(vec1) * np.linalg.norm(vec2))

    #数据存入向量库
    def vector_sjk(self, data, data_type):
        #判断是否有数据存入
        if not data:
            return False

        try:
            inserted_count = 0

            for idx, item in enumerate(data):
                #根据数据类型构建文档内容
                if data_type == 'colleges':
                    content = (
                        f"院校名称: {item.get('name', '')}, 学校代码: {item.get('school_id', '')} 所在地: {item.get('province_name', '')}, "
                        f"类型: {item.get('type_name', '')}, 性质: {item.get('nature_name', '')}, "
                        f"特色: {item.get('f211', '')} {item.get('f985', '')}, 排名: {item.get('rank', '')}")

                #获取向量
                embedding = self.vector_result(content)
                if not embedding:
                    continue

                #生成id
                item_id = f"{data_type}_{datetime.now().strftime('%Y%m%d_%H%M%S')}_{idx}"

                #插入数据
                insert_query = """
                               INSERT INTO vector_data
                               (id, content, embedding, metadata, data_type,
                                university_name, university_code, university_region,
                                university_type, university_nature)
                               VALUES (%s, %s, %s, %s, %s, %s, %s, %s, %s, %s) ON DUPLICATE KEY \
                               UPDATE \
                                   content = \
                               VALUES (content), embedding = \
                               VALUES (embedding), metadata = \
                               VALUES (metadata), updated_at = CURRENT_TIMESTAMP \
                               """

                #执行插入操作
                self.cursor.execute(insert_query, (
                    item_id,
                    content,
                    json.dumps(embedding),
                    json.dumps(item),
                    data_type,
                    item.get('name'),
                    item.get('school_id'),
                    item.get('province_name'),
                    item.get('type_name'),
                    item.get('nature_name')
                ))

                inserted_count += 1

            #提交
            self.db_connection.commit()
            print(f"{datetime.now()}: 成功插入 {inserted_count} 条记录")
            return True

        except Exception as e:
            print(f"{datetime.now()}: Error storing {data_type} data in MySQL: {str(e)}")
            self.db_connection.rollback()
            return False

    #在向量数据库中搜索
    def search_in_vector(self, query_embedding, limit=5):
        try:
            #获取所有向量数据
            select_query = "SELECT id, content, metadata, embedding FROM vector_data"
            self.cursor.execute(select_query)
            results = self.cursor.fetchall()

            #计算相似度
            scored_results = []
            for result in results:
                id, content, metadata, embedding_json = result
                db_embedding = json.loads(embedding_json)
                similarity = self.similarity(query_embedding, db_embedding)
                scored_results.append((id, content, metadata, db_embedding, similarity))

            scored_results.sort(key=lambda x: x[4], reverse=True)
            top_results = scored_results[:limit]

            #格式化
            for_results = {
                'ids': [result[0] for result in top_results],
                'documents': [result[1] for result in top_results],
                'metadatas': [json.loads(result[2]) for result in top_results],
                'embeddings': [result[3] for result in top_results],
                'similarities': [result[4] for result in top_results]
            }
            return for_results

        except Exception as e:
            print(f"{datetime.now()}: Error searching in vector database: {str(e)}")
            return None

    #更新信息记录(每天凌晨四点)
    def daily_update(self):
        #记录
        print(f"{datetime.now()}: Starting daily data update...")

        #遍历处理数据源
        for data_type, url in self.data_sources.items():
            print(f"{datetime.now()}: Updating {data_type} data...")  # 记录
            data = self.find_data(url, data_type)  #获取数据
            if data:  #获取成功
                success = self.vector_sjk(data, data_type)  #存入数据库
                if success:  #获取旧数据成功，记录
                    print(f"{datetime.now()}: Successfully updated {data_type} data")
                else:  #失败，记录
                    print(f"{datetime.now()}: Failed to update {data_type} data")
            else:
                print(f"{datetime.now()}: No data fetched for {data_type}")

        print(f"{datetime.now()}: Daily update completed")  #完成

    #向量数据库+ai检索
    def ai(self, question):
        try:
            #获取问题的向量
            question_embedding = self.vector_result(question)
            if not question_embedding:
                return "无法生成问题的向量表示，请稍后再试。"

            #查询向量数据库
            results = self.search_in_vector(question_embedding, limit=2)
            if not results or not results['documents']:
                return "未找到相关信息，请尝试其他问题。"

            #构建上下文
            context = ""
            for doc, meta in zip(results['documents'], results['metadatas']):
                context += f"{doc}\n来源: 高考网, 更新时间: {datetime.now().strftime('%Y-%m-%d')}\n\n"

            #调用大模型生成回答
            dashscope.api_key = "sk-0af8d524292a4417bafd18ab726b2b4d"

            prompt = f"""
            你是一个专业的高考教育问答助手，请根据以下参考信息回答用户问题。保持回答专业、简洁、清晰。

            参考信息:
            {context}

            用户问题:
            {question}
            """

            response = dashscope.Generation.call(
                model="qwen-plus",
                prompt=prompt
            )

            #判断是否调用成功
            if response.status_code == 200:
                return response.output['text']
            else:
                return "抱歉，回答问题出错，请稍后再试。"

        except Exception as e:
            print(f"{datetime.now()}: Error answering question: {str(e)}")
            return "处理您的问题时出现错误，请稍后再试。"

    #关闭数据连接
    def close_data(self):
        self.cursor.close()
        self.db_connection.close()
        print(f"{datetime.now()}: Database connection closed")

    #运行
    def run(self, gjz=None):
        print(f"{datetime.now()}: 开始知识库数据更新流程")

        #遍历所有数据源进行爬取和存储
        for data_type, base_url in self.data_sources.items():
            print(f"\n{datetime.now()}: 处理 {data_type} 数据")

            #爬取数据
            data = self.find_data(base_url, data_type)
            if not data:
                print(f"{datetime.now()}: 未获取到 {data_type} 数据，跳过")
                continue

            #向量化并存储到数据库
            success = self.vector_sjk(data, data_type)
            if success:
                print(f"{datetime.now()}: 成功存储 {len(data)} 条 {data_type} 数据")
            else:
                print(f"{datetime.now()}: 存储 {data_type} 数据失败")

        print(f"{datetime.now()}: 知识库数据更新完成")

        #每天凌晨4点执行数据更新
        schedule.every().day.at("04:00").do(self.daily_update)
        print(f"{datetime.now()}: 已设置每日凌晨4点自动更新任务")

        #AI解答
        try:
                answer = self.ai(gjz)
                return answer

        except Exception as e:
            print(f"{datetime.now()}: 程序运行出错: {e}")

        finally:
            #资源释放
            self.close_data()
            print(f"{datetime.now()}: 程序正常结束")