from typing import Dict, Any
import pymongo
from pymongo import MongoClient
import structlog
import hashlib
from datetime import datetime, timedelta
import os


class MongoDBPipeline:
    
    def __init__(self):
        self.logger = structlog.get_logger(self.__class__.__name__)
        self.client = None
        self.db = None
        self.collection = None
        self.stats = {
            "inserted": 0,
            "updated": 0,
            "duplicates": 0,
            "errors": 0
        }
    
    @classmethod
    def from_crawler(cls, crawler):
        return cls()
    
    def open_spider(self, spider):
        try:
            # MongoDB connection settings
            mongo_uri = os.getenv("MONGODB_URI", "mongodb://mongodb:27017/")
            mongo_db = os.getenv("MONGODB_DATABASE", "ai_writing")
            mongo_collection = os.getenv("MONGODB_COLLECTION", "crawled_data")
            
            self.client = MongoClient(mongo_uri)
            self.db = self.client[mongo_db]
            self.collection = self.db[mongo_collection]
            
            # Create indexes
            self.collection.create_index([("url_hash", 1)], unique=True)
            self.collection.create_index([("source", 1)])
            self.collection.create_index([("fetch_time", -1)])
            self.collection.create_index(
                [("expire_at", 1)], 
                expireAfterSeconds=0  # TTL index
            )
            
            self.logger.info(
                "MongoDB pipeline connected",
                spider=spider.name,
                database=mongo_db,
                collection=mongo_collection
            )
            
        except Exception as e:
            self.logger.error(
                "Failed to connect to MongoDB",
                error=str(e),
                spider=spider.name
            )
            raise
    
    def process_item(self, item: Dict[str, Any], spider):
        try:
            # Generate URL hash for deduplication
            url_hash = hashlib.md5(item["url"].encode()).hexdigest()
            
            # Prepare document
            document = {
                "_id": url_hash,  # Use URL hash as _id for efficiency
                "url": item["url"],
                "url_hash": url_hash,
                "source": item.get("source", spider.name),
                "content": self._prepare_content(item.get("content", {})),
                "fetch_time": datetime.fromisoformat(item["fetch_time"]) if isinstance(item["fetch_time"], str) else item["fetch_time"],
                "expire_at": datetime.now() + timedelta(days=30),  # Auto-expire after 30 days
                "metadata": {
                    "spider": spider.name,
                    "spider_version": item.get("spider_version", "1.0.0"),
                    "crawl_id": item.get("crawl_id"),
                    "status_code": item.get("status_code"),
                    "headers": item.get("headers", {}),
                    "encoding": item.get("encoding")
                },
                "created_at": datetime.now(),
                "updated_at": datetime.now()
            }
            
            # Try to insert or update
            result = self.collection.replace_one(
                {"_id": url_hash},
                document,
                upsert=True
            )
            
            if result.upserted_id:
                self.stats["inserted"] += 1
                self.logger.debug(
                    "Document inserted in MongoDB",
                    url=item["url"],
                    spider=spider.name
                )
            else:
                self.stats["updated"] += 1
                self.logger.debug(
                    "Document updated in MongoDB",
                    url=item["url"],
                    spider=spider.name
                )
            
        except pymongo.errors.DuplicateKeyError:
            self.stats["duplicates"] += 1
            self.logger.debug(
                "Duplicate document skipped",
                url=item.get("url"),
                spider=spider.name
            )
        except Exception as e:
            self.stats["errors"] += 1
            self.logger.error(
                "Failed to store item in MongoDB",
                url=item.get("url"),
                error=str(e),
                spider=spider.name
            )
        
        return item
    
    def _prepare_content(self, content):
        if isinstance(content, dict):
            return content
        elif isinstance(content, str):
            return {
                "text": content,
                "markdown": None,
                "images": [],
                "metadata": {}
            }
        else:
            return {
                "raw": str(content),
                "metadata": {}
            }
    
    def close_spider(self, spider):
        if self.client:
            self.logger.info(
                "MongoDB pipeline statistics",
                spider=spider.name,
                **self.stats
            )
            self.client.close()