import scrapy
from typing import Dict, Any, List
from datetime import datetime
import json

from ..base.base_spider import BaseSpider


class WeiboSpider(BaseSpider):
    name = "weibo"
    allowed_domains = ["weibo.com", "api.weibo.com"]
    
    def __init__(self, keyword=None, *args, **kwargs):
        super().__init__(*args, **kwargs)
        self.keyword = keyword
        self.rate_limits = {
            'weibo.com': 5,  # 5 requests per minute
            'api.weibo.com': 10
        }
    
    def start_requests(self):
        if not self.keyword:
            self.logger.error("No keyword provided for Weibo spider")
            return
        
        # Example Weibo search API endpoint (would need proper authentication in production)
        base_url = "https://weibo.com/ajax/statuses/topic_band"
        params = {
            "q": self.keyword,
            "page": 1
        }
        
        url = f"{base_url}?{'&'.join([f'{k}={v}' for k, v in params.items()])}"
        
        yield self.fetch(url)
    
    def fetch(self, url: str, **kwargs) -> scrapy.Request:
        self.stats["requests_made"] += 1
        self.log_progress(f"Fetching URL: {url}", level="debug")
        
        return scrapy.Request(
            url,
            callback=self.parse,
            errback=self.handle_error,
            meta={
                "source": "weibo",
                **kwargs
            }
        )
    
    def parse(self, response: scrapy.http.Response) -> List[Dict[str, Any]]:
        items = []
        
        try:
            # Parse JSON response (Weibo returns JSON)
            data = json.loads(response.text)
            
            if "data" in data and "statuses" in data["data"]:
                for status in data["data"]["statuses"]:
                    item_data = self.extract_weibo_item(status)
                    
                    if item_data:
                        item = self.create_item(response, item_data)
                        if item:
                            # Store the item
                            if self.store(item):
                                items.append(item)
                                yield item
            
            # Check for next page
            if "next_cursor" in data:
                next_url = self.build_next_url(response.url, data["next_cursor"])
                if next_url:
                    yield self.fetch(next_url)
            
        except json.JSONDecodeError as e:
            self.logger.error(
                "Failed to parse JSON response",
                url=response.url,
                error=str(e)
            )
        except Exception as e:
            self.logger.error(
                "Error parsing Weibo response",
                url=response.url,
                error=str(e)
            )
        
        self.log_progress(
            f"Parsed {len(items)} items from {response.url}",
            items_count=len(items)
        )
        
        return items
    
    def extract_weibo_item(self, status: Dict[str, Any]) -> Dict[str, Any]:
        try:
            # Extract text content
            text = status.get("text", "")
            text = self.clean_text(text)
            
            # Extract user information
            user = status.get("user", {})
            author = user.get("screen_name", "Unknown")
            
            # Extract media
            images = []
            if "pics" in status:
                for pic in status["pics"]:
                    if "large" in pic:
                        images.append(pic["large"]["url"])
            
            # Extract engagement metrics
            metrics = {
                "reposts": status.get("reposts_count", 0),
                "comments": status.get("comments_count", 0),
                "likes": status.get("attitudes_count", 0)
            }
            
            return {
                "content": {
                    "text": text,
                    "markdown": None,  # Could convert to markdown if needed
                    "images": images,
                    "metadata": {
                        "author": author,
                        "author_id": user.get("id"),
                        "publish_date": status.get("created_at"),
                        "weibo_id": status.get("id"),
                        "tags": self.extract_hashtags(text),
                        "metrics": metrics
                    }
                }
            }
            
        except Exception as e:
            self.logger.warning(
                "Failed to extract Weibo item",
                error=str(e)
            )
            return None
    
    def extract_hashtags(self, text: str) -> List[str]:
        import re
        hashtag_pattern = r'#([^#\s]+)#'
        hashtags = re.findall(hashtag_pattern, text)
        return list(set(hashtags))
    
    def build_next_url(self, current_url: str, next_cursor: str) -> str:
        # Parse current URL and update with next cursor/page
        from urllib.parse import urlparse, parse_qs, urlencode
        
        parsed = urlparse(current_url)
        params = parse_qs(parsed.query)
        
        if "page" in params:
            current_page = int(params["page"][0])
            params["page"] = [str(current_page + 1)]
        
        new_query = urlencode(params, doseq=True)
        return f"{parsed.scheme}://{parsed.netloc}{parsed.path}?{new_query}"
    
    def store(self, item: Dict[str, Any]) -> bool:
        # This would be handled by pipelines in production
        # Here we just validate and return True
        if self.validate_item(item):
            self.log_progress(
                "Item stored successfully",
                level="debug",
                url=item.get("url")
            )
            return True
        return False