"""
@function
Update anime episodes.

@cron
40 * * * *

@command
python3 update.py

@detail
This script fetch episodes from '末日動漫資源庫' via RSS
and save them into csv file(s) for the right date.
The order of the contents is descending by published_time.

The fields of csv files:
episode, published_time, author, link, magnet_uri

link: https://share.acgnx.net/
rss url: https://share.acgnx.net/rss.xml
log path: ~/.my-tools/log/anime/update.log
data path: ~/.my-tools/data/anime/episodes_{date}.csv
"""
import os
import csv
import time
import logging
import feedparser
from datetime import datetime
from logging.handlers import RotatingFileHandler

rss_url = 'https://share.acgnx.net/rss.xml'
log_file = os.path.expanduser('~/.my-tools/log/anime/update.log')
data_dir = os.path.expanduser('~/.my-tools/data/anime/')

# Ensure the directories for logging and data exist
os.makedirs(os.path.dirname(log_file), exist_ok=True)
os.makedirs(data_dir, exist_ok=True)

# Set up logging
log_formatter = logging.Formatter('%(asctime)s - %(levelname)s - %(message)s')
handler = RotatingFileHandler(log_file, maxBytes=100*1024*1024, backupCount=2)  # 100 MB per file, 2 backup
handler.setFormatter(log_formatter)
logger = logging.getLogger('update')
logger.setLevel(logging.INFO)
logger.addHandler(handler)

def fetch_episodes(rss_url):
    logger.info(f"Fetching episodes data from [{rss_url}]...")
    feed = feedparser.parse(rss_url)
    feed_len = len(feed.entries)
    retry_sec = 3
    retry_cnt = 5
    while feed_len == 0 and retry_cnt != 0:
        logger.error(f"Read RSS: {rss_url} failed. Retry in {retry_sec} second(s)")
        time.sleep(retry_sec)
        feed = feedparser.parse(rss_url)
        retry_cnt -= 1

    episodes = []
    for entry in feed.entries: # type(entries): list, type(entry): dict
        episode = {
            'episode': entry.title,
            'published_time': entry.published,
            'author': entry.get('author', 'Unknown'),
            'link': entry.id,
            'magnet_uri': entry.links[1].href if len(entry.links) > 1 else 'Nothing'
        }
        logger.debug(f"Fetched episode: {episode}")
        episodes.append(episode)

    logger.info(f"Fetching success, {feed_len} entries has been loaded.")
    # logger.info("RSS raw data:\n"
    #             f"{feed}")
    return episodes

def read_existing_entries(filename):
    existing_entries = set() # set: it's like std::set in cpp
    if not os.path.exists(filename):
        return existing_entries
    
    with open(filename, mode='r', encoding='utf-8') as csvfile:
        reader = csv.DictReader(csvfile)
        for row in reader:
            existing_entries.add(row['link'])  # use link as the key

    return existing_entries

def save_to_csv(episodes):
    logger.info("Saving episodes data into csv file...")
    entries_saved = []
    modified_files = set()
    for ep in episodes:
        try:
            pub_time = datetime.strptime(ep['published_time'], '%a, %d %b %Y %H:%M:%S %z')
        except ValueError as e:
            logger.error(f"Error parsing date: {e}")
            continue

        date_str = pub_time.strftime('%Y-%m-%d')
        filename = os.path.join(data_dir, f"episodes_{date_str}.csv")
        existing_entries = read_existing_entries(filename)

        # skip the entries saved before
        if ep['link'] in existing_entries:
            continue

        with open(filename, mode='a', newline='', encoding='utf-8') as csvfile:
            modified_files.add(filename)
            fieldnames = ['episode', 'published_time', 'author', 'link', 'magnet_uri']
            writer = csv.DictWriter(csvfile, fieldnames=fieldnames)
            
            if not os.path.exists(filename) or os.stat(filename).st_size == 0:
                writer.writeheader()

            writer.writerow(ep)
            entries_saved.append(ep)
            logger.debug(f"Entry \"{ep['episode']}]\" has saved into file: \"[{filename}]\".")

    logger.info(f"Saving successed, {len(entries_saved)} entries saved into csv file(s).")
    return len(entries_saved), modified_files

def sort_csv(modified_files):
    success = True
    for filename in modified_files:
        try:
            with open(filename, mode='r', newline='', encoding='utf-8') as csvfile:
                entries = list(csv.DictReader(csvfile))

            entries.sort(key=lambda x: datetime.strptime(x['published_time'], '%a, %d %b %Y %H:%M:%S %z'), reverse=True)

            with open(filename, mode='w', newline='', encoding='utf-8') as csvfile:
                fieldnames = ['episode', 'published_time', 'author', 'link', 'magnet_uri']
                writer = csv.DictWriter(csvfile, fieldnames=fieldnames)
                writer.writeheader()
                writer.writerows(entries)
            logger.info(f"Sort csv file by published_time: {filename}")

        except Exception as e:
            logger.error(f"Failed to sort csv file {filename}: {e}")
            success = False

    return success

def main():
    logger.info("--------------------------Starting script: update_episodes_data.py--------------------------")
    episodes = fetch_episodes(rss_url)
    saved_len, modified_files = save_to_csv(episodes)
    if len(modified_files) != 0:
        logger.info(f"Sorting files {modified_files} by published_time...")
        sort_csv(modified_files)
    logger.info(f"Episodes read : {len(episodes)}")
    logger.info(f"Episodes saved: {saved_len}")
    logger.info("--------------------------Finished script: update_episodes_data.py--------------------------")

if __name__ == "__main__":
    main()