import os
import sys
import json
import time
import requests
import zipfile
from bs4 import BeautifulSoup
from urllib.parse import urljoin

# 加载配置文件
config_path = os.path.join(os.path.dirname(__file__), 'config.json')
with open(config_path, 'r', encoding='utf-8') as f:
    config = json.load(f)['config']

class ComicDownloader:
    def __init__(self, comic_id):
        self.comic_id = comic_id
        self.headers = config.get('request_headers', {})
        self.proxy = config.get('proxy')
        self.max_retry = config.get('max_retry', 3)
        self.download_path = config.get('download_path', './tmp')
        self.zip_path = os.path.join(self.download_path, f'jm_{self.comic_id}.zip')

    def _make_request(self, url):
        session = requests.Session()
        if self.proxy:
            session.proxies.update({'http': self.proxy, 'https': self.proxy})
            
        for _ in range(self.max_retry):
            try:
                resp = session.get(url, headers=self.headers, timeout=30)
                resp.raise_for_status()
                return resp
            except Exception as e:
                print(f'Request failed: {str(e)}, retrying...')
                time.sleep(2)
        raise Exception(f'Failed after {self.max_retry} retries')

    def _parse_images(self, html):
        soup = BeautifulSoup(html, 'html.parser')
        # 根据实际DOM结构调整选择器
        return [urljoin(base_url, img['data-src']) for img in soup.select('.image-item img')]

    def download(self):
        os.makedirs(self.download_path, exist_ok=True)
        base_url = f"https://jmcomic1.com/album/{self.comic_id}/"
        
        try:
            # 获取漫画页面
            resp = self._make_request(base_url)
            img_urls = self._parse_images(resp.text)
            
            # 下载并打包
            with zipfile.ZipFile(self.zip_path, 'w') as zipf:
                for idx, url in enumerate(img_urls, 1):
                    img_data = self._make_request(url).content
                    zipf.writestr(f'{self.comic_id}_{idx:03d}.jpg', img_data)
                    
            return self.zip_path
        except Exception as e:
            if os.path.exists(self.zip_path):
                os.remove(self.zip_path)
            raise e

if __name__ == '__main__':
    if len(sys.argv) != 2:
        print("Usage: python jm_crawler.py <comic_id>")
        sys.exit(1)
    
    try:
        downloader = ComicDownloader(sys.argv[1])
        path = downloader.download()
        print(f'SUCCESS:{path}')
    except Exception as e:
        print(f'ERROR:{str(e)}')
        sys.exit(1)