"""
Hugging Face 数据源
获取最新的模型发布、数据集和AI社区动态
"""
import requests
from typing import List, Dict, Any
import json
import os
from datetime import datetime, timedelta
from bs4 import BeautifulSoup

CACHE_DIR = "./cache"
CACHE_FILE = os.path.join(CACHE_DIR, "hf_cache.json")
CACHE_EXPIRATION_HOURS = 1

def extract_huggingface_images(model_id: str) -> List[Dict[str, str]]:
    """
    从Hugging Face模型页面提取相关图片
    """
    images = []
    try:
        # 构建模型页面URL
        model_url = f"https://huggingface.co/{model_id}"
        
        headers = {
            'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36'
        }
        
        response = requests.get(model_url, headers=headers, timeout=15)
        response.raise_for_status()
        
        soup = BeautifulSoup(response.content, 'html.parser')
        
        # 查找模型卡片中的图片
        img_tags = soup.find_all('img')
        
        for img in img_tags:
            src = img.get('src', '')
            alt = img.get('alt', '')
            
            # 查找模型相关的图片（排除UI图标）
            if any(keyword in src.lower() for keyword in ['model', 'demo', 'example', 'sample', 'preview']):
                if src.startswith('//'):
                    src = 'https:' + src
                elif src.startswith('/'):
                    src = 'https://huggingface.co' + src
                elif not src.startswith('http'):
                    continue
                    
                # 过滤掉太小的图片（通常是图标）
                if 'icon' not in src.lower() and 'logo' not in src.lower():
                    images.append({
                        'url': src,
                        'alt': alt or f'Hugging Face模型 {model_id} 图片',
                        'type': 'model_image'
                    })
        
        # 如果没找到图片，添加默认的模型头像
        if not images:
            # Hugging Face模型通常有一个默认头像
            avatar_url = f"https://huggingface.co/{model_id}/resolve/main/avatar.jpg"
            images.append({
                'url': avatar_url,
                'alt': f'Hugging Face模型 {model_id} 头像',
                'type': 'avatar'
            })
            
    except Exception as e:
        print(f"获取Hugging Face图片时出错 {model_id}: {e}")
    
    return images[:2]  # 最多返回2张图片

def fetch_huggingface_models(limit: int = 20) -> List[Dict[str, Any]]:
    """
    从Hugging Face获取最新模型
    """
    os.makedirs(CACHE_DIR, exist_ok=True)
    
    # 检查缓存
    if os.path.exists(CACHE_FILE):
        with open(CACHE_FILE, "r", encoding="utf-8") as f:
            cache_data = json.load(f)
        cached_time = datetime.fromisoformat(cache_data["timestamp"])
        if (datetime.now() - cached_time < timedelta(hours=CACHE_EXPIRATION_HOURS) and 
            cache_data.get("limit") == limit):
            print(f"[DEBUG] Returning cached Hugging Face results.")
            return cache_data["models"]
    
    try:
        # Hugging Face API
        url = "https://huggingface.co/api/models"
        params = {
            'sort': 'lastModified',  # 按最后修改时间排序
            'direction': -1,  # 降序
            'limit': limit,
            'filter': 'text-generation,text2text-generation,conversational'  # 关注语言模型
        }
        
        headers = {
            'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36'
        }
        
        response = requests.get(url, params=params, headers=headers, timeout=30)
        response.raise_for_status()
        
        models_data = response.json()
        models = []
        
        for model in models_data:
            model_id = model.get('modelId', '')
            
            # 获取模型图片
            images = []
            if model_id:
                images = extract_huggingface_images(model_id)
            
            model_info = {
                'title': f"新模型发布: {model_id}",
                'model_id': model_id,
                'url': f"https://huggingface.co/{model_id}",
                'published': model.get('lastModified', ''),
                'authors': ', '.join(model.get('author', [])) if model.get('author') else 'Hugging Face Community',
                'downloads': model.get('downloads', 0),
                'likes': model.get('likes', 0),
                'tags': model.get('tags', []),
                'pipeline_tag': model.get('pipeline_tag', ''),
                'library_name': model.get('library_name', ''),
                'source': 'huggingface',
                'images': images
            }
            
            # 获取模型详细信息
            try:
                model_detail_url = f"https://huggingface.co/api/models/{model_id}"
                detail_response = requests.get(model_detail_url, headers=headers, timeout=10)
                if detail_response.status_code == 200:
                    detail_data = detail_response.json()
                    model_info['description'] = detail_data.get('cardData', {}).get('description', '')
            except:
                model_info['description'] = ''
            
            models.append(model_info)
        
        # 保存到缓存
        cache_data = {
            "timestamp": datetime.now().isoformat(),
            "limit": limit,
            "models": models
        }
        with open(CACHE_FILE, "w", encoding="utf-8") as f:
            json.dump(cache_data, f, ensure_ascii=False, indent=2)
        
        return models
        
    except Exception as e:
        print(f"获取Hugging Face数据时出错: {e}")
        return []

def fetch_trending_datasets(limit: int = 10) -> List[Dict[str, Any]]:
    """
    获取热门数据集
    """
    try:
        url = "https://huggingface.co/api/datasets"
        params = {
            'sort': 'downloads',
            'direction': -1,
            'limit': limit
        }
        
        headers = {
            'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36'
        }
        
        response = requests.get(url, params=params, headers=headers, timeout=30)
        response.raise_for_status()
        
        datasets_data = response.json()
        datasets = []
        
        for dataset in datasets_data:
            dataset_id = dataset.get('id', '')
            
            # 获取数据集图片
            images = []
            if dataset_id:
                images = extract_huggingface_images(f"datasets/{dataset_id}")
            
            dataset_info = {
                'title': f"热门数据集: {dataset_id}",
                'dataset_id': dataset_id,
                'url': f"https://huggingface.co/datasets/{dataset_id}",
                'published': dataset.get('lastModified', ''),
                'authors': ', '.join(dataset.get('author', [])) if dataset.get('author') else 'Hugging Face Community',
                'downloads': dataset.get('downloads', 0),
                'likes': dataset.get('likes', 0),
                'tags': dataset.get('tags', []),
                'source': 'huggingface_datasets',
                'images': images
            }
            datasets.append(dataset_info)
        
        return datasets
        
    except Exception as e:
        print(f"获取热门数据集时出错: {e}")
        return []