from bs4 import BeautifulSoup
from datetime import datetime
from typing import Dict, List, Any
from .base_crawler import BaseCrawler
from .request_utils import RequestUtils
import requests
import json

class Kr36Crawler(BaseCrawler):
    def __init__(self, config):
        self.config = config
        self.base_url = config['base_url']
        self.api_url = config['api_url']
        self.headers = config['headers']
        self.request_utils = RequestUtils()

    def fetch_articles(self):
        try:
            url = f"{self.base_url}{self.api_url}"
            response = requests.get(url, headers=self.headers)
            if response.status_code == 200:
                data = json.loads(response.text)
                if data['code'] == 0:
                    return data['data']['items']
            return []
        except Exception as e:
            print(f"Error fetching articles: {str(e)}")
            return []

    def parse_article_list(self, html: str) -> List[Dict[str, str]]:
        soup = BeautifulSoup(html, "html.parser")
        articles = []
        
        for item in soup.select(".article-item"):
            title = item.select_one(".article-title")
            if not title:
                continue
                
            link = item.select_one("a")
            if not link:
                continue
                
            time = item.select_one(".article-info")
            if not time:
                continue
                
            articles.append({
                "title": title.text.strip(),
                "link": f"{self.base_url}{link['href']}",
                "publish_time": time.text.strip()
            })
            
        return articles
        
    def parse_article_detail(self, html: str) -> Dict[str, str]:
        soup = BeautifulSoup(html, "html.parser")
        
        title = soup.select_one("h1")
        content = soup.select_one(".article-content")
        time = soup.select_one(".article-time")
        
        if not all([title, content, time]):
            return {}
            
        return {
            "title": title.text.strip(),
            "content": self._clean_content(content),
            "publish_time": self._parse_time(time.text.strip())
        }
        
    def _clean_content(self, content) -> str:
        # 清理文章内容
        for tag in content.select("script, style, iframe"):
            tag.decompose()
        return content.get_text().strip()
        
    def _parse_time(self, time_str: str) -> str:
        # 解析发布时间
        try:
            dt = datetime.strptime(time_str, "%Y-%m-%d %H:%M:%S")
            return dt.isoformat()
        except ValueError:
            return ""
            
    def parse_article(self, article: Dict[str, Any]) -> Dict[str, str]:
        return {
            "title": article.get("title", ""),
            "content": article.get("content", ""),
            "publish_time": article.get("publish_time", ""),
            "url": article.get("url", "")
        }
        
    def crawl(self) -> List[Dict[str, Any]]:
        articles = self.fetch_articles()
        if not articles:
            return []
            
        results = []
        for article in articles:
            detail = self.parse_article(article)
            if detail:
                results.append(detail)
                
        return results

    def _clean_content(self, content) -> str:
        # 清理文章内容
        for tag in content.select("script, style, iframe"):
            tag.decompose()
        return content.get_text().strip().split("发布时间：")[0].strip()
