#!/usr/bin/env python3
import argparse
import requests
from bs4 import BeautifulSoup
import re
import csv
from tqdm import tqdm
import time

def scrape_zinc_page(page_number, catalog):
    url = f'https://zinc20.docking.org/catalogs/{catalog}/substances/?page={page_number}'
    retries = 3
    while retries > 0:
        try:
            response = requests.get(url)
            response.raise_for_status()  # Raise an HTTPError if the HTTP request returned an unsuccessful status code

            soup = BeautifulSoup(response.content, 'html.parser')
            elements = soup.select('#print a')
            zinc_ids = []

            for element in elements:
                text = element.get_text()
                match = re.search(r'ZINC(\d+)', text)
                if match:
                    zinc_id = match.group(1)
                    padded_zinc_id = zinc_id.zfill(12)
                    formatted_zinc_id = f'ZINC{padded_zinc_id}'
                    zinc_ids.append(formatted_zinc_id)

            return zinc_ids
        except Exception as e:
            print(f'Warning: Error occurred while scraping page {page_number}: {e}. Retrying...')
            retries -= 1
            time.sleep(1)  # Wait for 1 second before retrying
    print(f'Failed to scrape page {page_number} after 3 attempts.')
    return []

def main(args):
    all_zinc_ids = []
    for page_number in tqdm(range(args.start, args.page_number + 1), desc="Scraping pages"):
        zinc_ids = scrape_zinc_page(page_number, args.catalog)
        all_zinc_ids.extend(zinc_ids)
        if args.verbose:
            print(f'Page {page_number}: {len(zinc_ids)} IDs retrieved')

    with open(args.output, 'w', newline='') as csvfile:
        writer = csv.writer(csvfile)
        writer.writerow(['ZINC_ID'])
        for zinc_id in all_zinc_ids:
            writer.writerow([zinc_id])

    print(f'Total {len(all_zinc_ids)} ZINC IDs written to {args.output}')

if __name__ == '__main__':
    parser = argparse.ArgumentParser(description='Scrape ZINC catalog for IDs.')
    parser.add_argument('-c', '--catalog', type=str, default='mcee', help='ZINC catalog to scrape')
    parser.add_argument('-n', '--page-number', type=int, default=1, help='Number of page to scrape to')
    parser.add_argument('-s', '--start', type=int, default=1, help='Number of page to start')
    parser.add_argument('-o', '--output', type=str, required=True, help='CSV file path to store ZINC IDs')
    parser.add_argument('-v', '--verbose', action='store_true', help='Enable verbose mode')
    
    args = parser.parse_args()
    main(args)

