# Define your item pipelines here
#
# Don't forget to add your pipeline to the ITEM_PIPELINES setting
# See: https://docs.scrapy.org/en/latest/topics/item-pipeline.html

import csv
import pymongo
from itemadapter import ItemAdapter
from sqlalchemy.orm import sessionmaker
from sqlalchemy import create_engine

# Make sure to import your Item and Model definitions correctly
from items import NovelItem
from database.models import Base, Novelsinfomodel

class CsvPipeline:
    def open_spider(self, spider):
        self.file = open('novels_scrapy_output.csv', 'w', newline='', encoding='utf-8')
        self.writer = csv.writer(self.file)
        self.writer.writerow(['title', 'url', 'category'])
        spider.logger.info("CSV pipeline opened.")

    def close_spider(self, spider):
        self.file.close()
        spider.logger.info("CSV pipeline closed.")

    def process_item(self, item, spider):
        if isinstance(item, NovelItem):
            self.writer.writerow([item['title'], item['url'], item['category']])
        return item

class MySQLPipeline:
    def __init__(self, db_url):
        self.db_url = db_url
        self.engine = None
        self.SessionLocal = None

    @classmethod
    def from_crawler(cls, crawler):
        return cls(
            db_url=crawler.settings.get('MYSQL_CONNECTION_STRING')
        )

    def open_spider(self, spider):
        self.engine = create_engine(self.db_url)
        Base.metadata.create_all(self.engine)
        self.SessionLocal = sessionmaker(autocommit=False, autoflush=False, bind=self.engine)
        spider.logger.info("MySQL pipeline opened.")

    def close_spider(self, spider):
        self.engine.dispose()
        spider.logger.info("MySQL pipeline closed.")

    def process_item(self, item, spider):
        if not isinstance(item, NovelItem):
            return item
            
        session = self.SessionLocal()
        try:
            exists = session.query(Novelsinfomodel).filter_by(url=item['url']).first()
            if not exists:
                novel_db = Novelsinfomodel(
                    title=item['title'],
                    url=item['url'],
                    type=item['category'] # Note: model uses 'type', item uses 'category'
                )
                session.add(novel_db)
                session.commit()
        except Exception as e:
            session.rollback()
            spider.logger.error(f"Error storing item to MySQL: {e}")
        finally:
            session.close()
        return item

class MongoPipeline:
    def __init__(self, mongo_uri, mongo_db):
        self.mongo_uri = mongo_uri
        self.mongo_db = mongo_db

    @classmethod
    def from_crawler(cls, crawler):
        return cls(
            mongo_uri=crawler.settings.get('MONGO_CONNECTION_STRING'),
            mongo_db='spider_db'
        )

    def open_spider(self, spider):
        self.client = pymongo.MongoClient(self.mongo_uri)
        self.db = self.client[self.mongo_db]
        spider.logger.info("Mongo pipeline opened.")

    def close_spider(self, spider):
        self.client.close()
        spider.logger.info("Mongo pipeline closed.")

    def process_item(self, item, spider):
        collection_name = spider.name
        self.db[collection_name].update_one(
            {'url': item['url']},
            {'$set': ItemAdapter(item).asdict()},
            upsert=True
        )
        return item
