import sqlite3
import chromadb
import pandas as pd
from langchain_community.vectorstores import Chroma
from langchain_ollama import OllamaEmbeddings
from typing import List, Dict, Any
import json
import os
from dotenv import load_dotenv

load_dotenv()

class DatabaseManager:
    def __init__(self, sqlite_path: str = "courses.db", chroma_path: str = "chroma_db"):
        self.sqlite_path = sqlite_path
        self.chroma_path = chroma_path
        self._init_databases()
    
    def _init_databases(self):
        # 初始化SQLite
        conn = sqlite3.connect(self.sqlite_path)
        cursor = conn.cursor()
        cursor.execute('''
            CREATE TABLE IF NOT EXISTS courses (
                course_id TEXT PRIMARY KEY,
                name TEXT NOT NULL,
                description TEXT,
                grade_level TEXT,
                subject TEXT,
                difficulty TEXT,
                course_url TEXT
            )
        ''')
        conn.commit()
        conn.close()
        
        self.embedding_model = OllamaEmbeddings(
            model=os.getenv("EMBEDDING_MODEL"),
            base_url="http://localhost:11434"
        )
        # 初始化ChromaDB
        self.chroma_client = chromadb.PersistentClient(path=self.chroma_path)
        self.collection_name = 'courses'
        try:
            self.collection = self.chroma_client.get_collection(name=self.collection_name)
            print(f"已连接到现有集合: {self.collection_name}")
        except:
            self.collection = self.chroma_client.create_collection(name=self.collection_name)
            print(f"已创建新集合: {self.collection_name}")

        self.vectorstore = Chroma(
            collection_name=self.collection_name,
            embedding_function=self.embedding_model,
            client=self.chroma_client,
            client_settings={"anonymized_telemetry": False}
        )

        self.retriever = self.vectorstore.as_retriever(
            search_type="similarity",
            search_kwargs={"k": 3}
        )

    
    def add_course(self, course_data: Dict[str, Any]) -> bool:
        try:
            # 添加到SQLite
            conn = sqlite3.connect(self.sqlite_path)
            cursor = conn.cursor()
            cursor.execute('''
                INSERT OR REPLACE INTO courses 
                (course_id, name, description, grade_level, subject, difficulty, course_url)
                VALUES (?, ?, ?, ?, ?, ?, ?)
            ''', (
                course_data['course_id'],
                course_data['name'],
                course_data['description'],
                course_data['grade_level'],
                course_data['subject'],
                course_data['difficulty'],
                course_data['course_url']
            ))
            conn.commit()
            conn.close()
            
            
            # 添加到ChromaDB
            self.collection.add(
                embeddings=[self.embedding_model.embed_query(course_data['description'])],
                documents=[course_data['description']],
                metadatas=[{
                    'course_id': course_data['course_id'],
                    'name': course_data['name'],
                    'grade_level': course_data['grade_level'],
                    'subject': course_data['subject'],
                    'difficulty': course_data['difficulty'],
                    'course_url': course_data['course_url']
                }],
                ids=[course_data['course_id']]
            )
            return True
        except Exception as e:
            print(f"Error adding course: {e}")
            return False
    
    def get_courses(self, page: int = 1, per_page: int = 10) -> List[Dict[str, Any]]:
        conn = sqlite3.connect(self.sqlite_path)
        cursor = conn.cursor()
        offset = (page - 1) * per_page
        cursor.execute('''
            SELECT * FROM courses 
            LIMIT ? OFFSET ?
        ''', (per_page, offset))
        columns = [description[0] for description in cursor.description]
        courses = [dict(zip(columns, row)) for row in cursor.fetchall()]
        conn.close()
        return courses
    
    def delete_course(self, course_id: str) -> bool:
        try:
            # 从SQLite删除
            conn = sqlite3.connect(self.sqlite_path)
            cursor = conn.cursor()
            cursor.execute('DELETE FROM courses WHERE course_id = ?', (course_id,))
            conn.commit()
            conn.close()
            
            # 从ChromaDB删除
            self.collection.delete(ids=[course_id])
            return True
        except Exception as e:
            print(f"Error deleting course: {e}")
            return False
    
    def search_similar_courses(self, query: str, top_k: int = 3) -> List[Dict[str, Any]]:
        try:
            # 使用embedding进行查询
            results = self.collection.query(
                query_embeddings=[self.embedding_model.embed_query(query)],
                n_results=top_k
            )
            
            # 从SQLite获取完整的课程信息
            conn = sqlite3.connect(self.sqlite_path)
            cursor = conn.cursor()
            
            courses = []
            # 获取查询结果中的ids
            if results and 'ids' in results and len(results['ids']) > 0:
                for id in results['ids'][0]:  # 获取最相似的课程ID
                    cursor.execute('SELECT * FROM courses WHERE course_id = ?', (id,))
                    row = cursor.fetchone()
                    if row:
                        columns = [description[0] for description in cursor.description]
                        course_data = dict(zip(columns, row))
                        courses.append(course_data)
            
            conn.close()
            return courses
        except Exception as e:
            print(f"Error searching similar courses: {e}")
            return []
    
    def import_from_file(self, file_path: str, file_type: str = 'csv') -> bool:
        try:
            with open(file_path, 'r', encoding='utf-8') as f:
                if file_type == 'csv':
                    df = pd.read_csv(f)
                elif file_type == 'json':
                    data = json.load(f)
                    if 'courses' in data:
                        df = pd.DataFrame(data['courses'])
                    else:
                        df = pd.read_json(f)
                else:
                    raise ValueError("Unsupported file type")
            
            # 检查必要的字段是否存在
            required_fields = ['course_id', 'name', 'description', 'grade_level', 'subject', 'difficulty', 'course_url']
            missing_fields = [field for field in required_fields if field not in df.columns]
            if missing_fields:
                print(f"Error: Missing required fields: {missing_fields}")
                return False
            
            for _, row in df.iterrows():
                course_data = row.to_dict()
                if not self.add_course(course_data):
                    print(f"Failed to add course: {course_data.get('course_id', 'Unknown')}")
                    return False
            return True
        except Exception as e:
            print(f"Error importing file: {e}")
            return False 