import os
import numpy as np

from src.utils import tokenCounter
import json
from tqdm import tqdm

from tinydb import TinyDB, Query
from abc import ABC, abstractmethod
from typing import List, Dict, Any, Optional
import logging
import datetime
import re
# # 配置日志
# log_dir = "logs"
# if not os.path.exists(log_dir):
#     os.makedirs(log_dir)

# 创建日志记录器
# logger = logging.getLogger('atomgit_api')
# logger.setLevel(logging.INFO)

# 创建一个按日期命名的文件处理器
# log_file = os.path.join(log_dir, f'atomgit_api_{datetime.datetime.now().strftime("%Y%m%d")}.log')
# file_handler = logging.FileHandler(log_file, encoding='utf-8')
# file_handler.setLevel(logging.INFO)

# 设置日志格式
# formatter = logging.Formatter('%(asctime)s - %(levelname)s - %(message)s')
# file_handler.setFormatter(formatter)

# 添加处理器到日志记录器
# logger.addHandler(file_handler)

class BasePaperDatabase(ABC):
    """论文数据库基类,定义必要的对外接口"""
    
    @abstractmethod
    def get_ids_from_query(self, query: str, num: int = 50, shuffle: bool = False) -> List[str]:
        """通过查询词获取文献ID"""
        pass
        
    @abstractmethod
    def get_paper_info_from_ids(self, ids: List[str]) -> List[Dict[str, Any]]:
        """获取文献信息"""
        pass
        
    @abstractmethod
    def get_ids_from_topic(self, topic: str, num: int = 50) -> List[str]:
        """通过主题获取文献ID"""
        pass
        
    @abstractmethod
    def get_titles_from_citations(self, citations: List[str]) -> List[str]:
        """通过引用获取标题"""
        pass

class LocalPaperDatabase(BasePaperDatabase):
    """本地论文数据库实现"""
    
    
    
    def __init__(self, config: Optional[Dict] = None) -> None:
        import torch
        from transformers import AutoModel, AutoTokenizer, AutoModelForSequenceClassification
        from sentence_transformers import SentenceTransformer
        import faiss

        self.DEFAULT_CONFIG = {
            "db_path": "./database",
            "embedding_model": "AI-ModelScope/nomic-embed-text-v1",
            "device": "cuda" if torch.cuda.is_available() else "cpu"
        }
        """初始化本地数据库"""
        # 合并配置
        self.config = self.DEFAULT_CONFIG.copy()
        if config:
            self.config.update(config)

        db_path = self.config["db_path"]

        # 初始化模型
        self.embedding_model = SentenceTransformer(self.config["embedding_model"], trust_remote_code=True)
        self.embedding_model.to(torch.device(self.config["device"]))

        # 初始化数据库
        self.db = TinyDB(f'{db_path}/arxiv_paper_db.json')
        self.table = self.db.table('cs_paper_info')

        # 初始化查询工具
        self.User = Query()
        self.token_counter = tokenCounter()
        
        # 加载索引
        self.title_loaded_index = faiss.read_index(f'{db_path}/faiss_paper_title_embeddings.bin')
        self.abs_loaded_index = faiss.read_index(f'{db_path}/faiss_paper_abs_embeddings.bin')

        # 加载ID映射
        self.id_to_index, self.index_to_id = self.load_index_arxivid(db_path)
        
    def load_index_arxivid(self, db_path):
        with open(f'{db_path}/arxivid_to_index_abs.json','r') as f:
            id_to_index = json.loads(f.read())
        id_to_index = {id: int(index) for id, index in id_to_index.items()}
        index_to_id = {int(index): id for id, index in id_to_index.items()}
        return id_to_index, index_to_id
    
    def get_embeddings(self, batch_text):
        batch_text = ['search_query: ' + _ for _ in batch_text]
        embeddings = self.embedding_model.encode(batch_text)
        return embeddings

    def get_embeddings_documents(self, batch_text):
        batch_text = ['search_document: ' + _ for _ in batch_text]
        embeddings = self.embedding_model.encode(batch_text)
        return embeddings
        
    def batch_search(self, query_vectors, top_k=1, title=False):
        query_vectors = np.array(query_vectors).astype('float32')
        if title:
            distances, indices = self.title_loaded_index.search(query_vectors, top_k)
        else:
            distances, indices = self.abs_loaded_index.search(query_vectors, top_k)
        results = []
        for i, query in tqdm(enumerate(query_vectors)):
            result = [(self.index_to_id[idx], distances[i][j]) for j, idx in enumerate(indices[i]) if idx != -1]
            results.append([_[0] for _ in result])
        return results

    def search(self, query_vector, top_k=1, title=False):
        query_vector = np.array([query_vector]).astype('float32')
        if title:
            distances, indices = self.title_loaded_index.search(query_vector, top_k)
        else:
            distances, indices = self.abs_loaded_index.search(query_vector, top_k)
        results = [(self.index_to_id[idx], distances[0][i]) for i, idx in enumerate(indices[0]) if idx != -1]
        return [_[0] for _ in results]

    def get_ids_from_query(self, query, num,  shuffle = False):
        q = self.get_embeddings([query])[0]
        return self.search(q, top_k=num)
    
    def get_titles_from_citations(self, citations):
        q = self.get_embeddings_documents(citations)
        ids = self.batch_search(q,1, True)
        return [_[0] for _ in ids]

    def get_ids_from_queries(self, queries, num,  shuffle = False):
        q = self.get_embeddings(queries)
        ids = self.batch_search(q,num)
        return ids
    
    def get_date_from_ids(self, ids):
        result = self.table.search(self.User.id.one_of(ids))
        dates = [r['date'] for r in result]
        return dates

    def get_title_from_ids(self, ids):
        result = self.table.search(self.User.id.one_of(ids))
        titles = [r['title'] for r in result]
        return titles

    def get_abs_from_ids(self, ids):
        result = self.table.search(self.User.id.one_of(ids))
        abs_l = [r['abs'] for r in result]
        return abs_l

    def get_paper_info_from_ids(self, ids):
        result = self.table.search(self.User.id.one_of(ids))
        return result
    
    def get_ids_from_topic(self, topic: str, num: int = 50) -> List[str]:
        """通过主题获取文献ID"""
        paper_ids = self.get_ids_from_query(query = topic, num = num)
        return paper_ids

class AtomGitPaperDatabase(BasePaperDatabase):
    """AtomGit论文数据库实现"""
    
    DEFAULT_CONFIG = {
        "base_url": "http://180.184.65.98:38880/atomgit",
        "cache_size": 5000,
        "max_retries": 5,
        "retry_delay": 1.0,
        "timeout": 10,
        "headers": {
            "Content-Type": "application/json",
            "Accept": "application/json"
        }
    }
    
    def __init__(self, config: Optional[Dict] = None) -> None:
        """初始化AtomGit数据库"""
        # 合并配置
        self.config = self.DEFAULT_CONFIG.copy()
        
        if config:
            self.config.update(config)
        from src.model import APIModel
        model_name = 'glm-4-airx' # 模型名称设置为 'glm-4-plus'
        api_key = 'xxxxxx' # GLM-4-Plus 的 API 密钥
        api_url = "https://open.bigmodel.cn/api/paas/v4" # GLM-4-Plus 的 API URL
        self.model = model_name
        self.api_key = api_key
        self.api_url = api_url
        self.api_model = APIModel(self.model, self.api_key, self.api_url)

        # 初始化缓存
        self._cache = {}

        # 初始化论文InfoList
        self.paper_info_list = {}
        
    def _make_request(self, endpoint: str, method: str = "GET", **kwargs) -> Dict:
        """发送HTTP请求到AtomGit API"""
        import requests
        from requests.adapters import HTTPAdapter
        from urllib3.util.retry import Retry
        
        url = f"{self.config['base_url']}/{endpoint}"
        
        # 记录请求信息
        request_info = {
            "url": url,
            "method": method,
            "headers": self.config["headers"]
        }
        
        if kwargs.get("params"):
            request_info["params"] = kwargs["params"]
        elif kwargs.get("json"):
            request_info["json"] = kwargs["json"]
            
        # logger.info(f"API请求: {json.dumps(request_info, ensure_ascii=False, indent=2)}")
        
        # 设置重试策略
        session = requests.Session()
        retries = Retry(
            total=self.config["max_retries"],
            backoff_factor=self.config["retry_delay"],
            status_forcelist=[500, 502, 503, 504]
        )
        session.mount('http://', HTTPAdapter(max_retries=retries))
        session.mount('https://', HTTPAdapter(max_retries=retries))
        
        try:
            if method == "GET" and kwargs.get("json"):
                # 对于GET请求，将json参数转换为query string
                params = kwargs.pop("json")
                response = session.request(
                    method=method,
                    url=url,
                    headers=self.config["headers"],
                    timeout=self.config["timeout"],
                    params=params,
                    **kwargs
                )
            else:
                response = session.request(
                    method=method,
                    url=url,
                    headers=self.config["headers"],
                    timeout=self.config["timeout"],
                    **kwargs
                )
            response.raise_for_status()
            result = response.json()
            
            # 记录响应信息
            response_info = {
                "status_code": response.status_code,
                "response_time": response.elapsed.total_seconds(),
                "response_size": len(response.content),
                "result": result
            }
            # logger.info(f"API响应: {json.dumps(response_info, ensure_ascii=False, indent=2)}")
            
            return result
        except requests.exceptions.RequestException as e:
            # 记录错误信息
            error_info = {
                "error_type": type(e).__name__,
                "error_message": str(e)
            }
            # logger.error(f"请求失败: {json.dumps(error_info, ensure_ascii=False, indent=2)}")
            return None
            
        
    def _get_cache_key(self, method: str, **kwargs) -> str:
        """生成缓存键"""
        import hashlib
        key_str = f"{method}:{sorted(kwargs.items())}"
        return hashlib.md5(key_str.encode()).hexdigest()
        
    def _cached_request(self, endpoint: str, method: str = "GET", **kwargs) -> Dict:
        """带缓存的请求"""
        cache_key = self._get_cache_key(endpoint, **kwargs)
        
        if cache_key in self._cache:
            return self._cache[cache_key]
            
        result = self._make_request(endpoint, method, **kwargs)
        
        if result is not None:
            if len(self._cache) >= self.config["cache_size"]:
                # 简单的LRU策略:删除第一个缓存项
                self._cache.pop(next(iter(self._cache)))
            self._cache[cache_key] = result
            
        return result

    def get_ids_from_query(self, query: str, num: int = 50, shuffle: bool = False) -> List[str]:
        """通过查询词获取文献ID"""
        response = self._cached_request("search_papers", method="GET", json={
            "query": query,
            "top_k": num
        })
        
        if response and isinstance(response, list):
            # 处理返回的列表格式
            paper_ids = []
            for item in response:
                if isinstance(item, dict) and "entity" in item:
                    paper_id = item["entity"].get("paper_id")
                    if paper_id:
                        paper_ids.append(paper_id)
            return paper_ids
        return []
    
    def get_paper_info_from_ids(self, ids: List[str]) -> List[Dict[str, Any]]:
        import re
        def extract_json_to_dict(text):
            json_matches = re.findall(r'\{([^}]+)\}', text)
            result_dict = {}
            for json_str in json_matches:
                try:
                    data = json.loads('{' + json_str + '}')
                    result_dict = data
                except json.JSONDecodeError:
                    print(f"Warning: Could not decode JSON: {{{json_str}}}")
            return result_dict
        

        def extract_conf_year(text):
            # 匹配模式：Data_会议名称_年份 或 Data_会议名称年份
            pattern = r'Data_([A-Z]+)(_?)(\d{4})'
            match = re.search(pattern, text)
            
            if match:
                # 提取会议名称和年份
                conf_name = match.group(1)  # 会议名称
                year = match.group(3)       # 年份
                #print(f"extract_conf_year Conference: {conf_name}, Year: {year}")
                ret_json = {"journal_abbreviation": conf_name, "year": year}
                return ret_json
            else:
                #print(str(text), " ---extract_conf_year No match found.")
                return {
                    "journal_abbreviation": "UNKNOWN",
                    "year": "2025"
                }

        """获取文献信息"""
        results = []
        for paper_id in ids:
            # 如果论文信息已经在缓存中，直接返回
            if str(paper_id) in self.paper_info_list:
                results.append(self.paper_info_list[str(paper_id)])
                continue
            
            # 如果论文信息不在缓存中，则请求API
            response = self._cached_request("query_by_paper_id", method="GET", json={
                "paper_id": paper_id,
                "top_k": 3
            })
            if response and isinstance(response, list) and response:
                # 直接从响应中提取字段，不需要访问entity
                item = response[0]
                #print("item", item)
                
                # 提取期刊简称以及年份
                json_str = extract_conf_year(item.get("original_filename", ""))
                # 如果提取失败，则使用API提取
                if json_str == {}:
                    print(paper_id, "--- original_filename", item.get("original_filename", ""))
                    json_ex = '{"journal_abbreviation": "AAAI","year": 2024}'
                    prompt = f'''从{item.get("original_filename", "")}这里抽取出期刊简称以及年份并以json返回，只输出json即可。
                    输出json格式参考：
                    {json_ex}'''
                    response_zhipu = self.api_model.chat(prompt, temperature=0)
                    json_str = extract_json_to_dict(response_zhipu)

                # 插入论文信息
                insert_paper_info = {
                    "id": item.get("paper_id", ""),
                    "title": item.get("paper_title", ""),
                    "abs": item.get("chunk_text", ""),
                    "chunk_id": item.get("chunk_id", ""),
                    "date": json_str.get("year", ""),
                    "journal_abbreviation": json_str.get("journal_abbreviation", ""),
                }
                # print(prompt,json_str[0])
                # 插入论文信息到缓存
                self.paper_info_list[str(paper_id)] = insert_paper_info

                results.append(insert_paper_info)
                # print(results)
            else:
                results.append({})
        return results
    
    def get_ids_from_topic(self, topic: str, num: int = 50) -> List[str]:
        """通过主题获取文献ID"""
        paper_ids = self.get_ids_from_query(query = topic, num = num)
        return paper_ids
    
    def get_titles_from_citations(self, citations: List[str]) -> List[str]:
        """通过引用获取标题
        
        Args:
            citations: 引文列表
            
        Returns:
            List[str]: 每个引文对应的论文ID列表
        """
        # logger.info(f"开始处理 {len(citations)} 个引文")
        results = []
        
        for citation in citations:
            # logger.debug(f"处理引文: {citation}")
            try:

                paper_ids = self.get_ids_from_query(query = citation, num = 1)
                if paper_ids:
                    results.append(paper_ids[0])
                    # logger.debug(f"找到匹配论文ID: {paper_ids[0]}")
                    continue
                else:
                    results.append("")
                    # logger.warning(f"未找到匹配的论文，引文: {citation}")
            except Exception as e:
                # logger.error(f"处理引文时出错: {str(e)}, 引文: {citation}")
                results.append("")
                
        # logger.info(f"引文处理完成，成功率: {len([r for r in results if r])} / {len(citations)}")
        return results

class PaperDatabase:
    """统一的论文数据库接口"""
    
    def __init__(self, db_type: str = "local", **kwargs):
        """初始化数据库
        
        Args:
            db_type: 数据库类型, "local" 或 "atomgit"
            **kwargs: 数据库配置,直接传入local_config或atomgit_config
        """
        if db_type == "local":
            if "local_config" not in kwargs:
                raise ValueError("使用local数据库时必须提供local_config")
            self.db = LocalPaperDatabase(kwargs["local_config"])
        elif db_type == "atomgit":
            if "atomgit_config" not in kwargs:
                raise ValueError("使用atomgit数据库时必须提供atomgit_config")
            self.db = AtomGitPaperDatabase(kwargs["atomgit_config"])
        else:
            raise ValueError(f"不支持的数据库类型: {db_type}")
            
    def __getattr__(self, name):
        """代理所有方法调用到具体的数据库实现"""
        return getattr(self.db, name)