#!/usr/bin/env python3

import argparse
import pandas as pd
import requests
import time
from tqdm import tqdm
from requests.exceptions import RequestException
from concurrent.futures import ThreadPoolExecutor, as_completed

def query_concurrently(zinc_ids, max_workers=10, delay=0.1, inchi=False):
    """
    Query UniChem API concurrently with rate limiting.
    """
    results = []
    with ThreadPoolExecutor(max_workers=max_workers) as executor:
        futures = {executor.submit(query_unichem_src, zinc_id, inchi): zinc_id for zinc_id in zinc_ids}
        for future in tqdm(as_completed(futures), total=len(futures), desc="Concurrent queries"):
            results.extend(future.result())
            time.sleep(delay)  # Add delay to avoid throttling
    return results

def query_unichem_src(zinc_id, inchi):
    """
    Query the UniChem API for a given ZINC ID.
    """
    url = "https://www.ebi.ac.uk/unichem/api/v1/compounds"
    headers = {"Accept": "application/json"}
    payload = {
        "compound": zinc_id,
        "sourceID": 9,
        "type": "sourceID"
    }
    
    try:
        response = requests.post(url, headers=headers, json=payload)
        response.raise_for_status()  # Raise an error for bad status codes
        data = response.json()
        
        # Extract compounds and sources
        compounds = data.get("compounds", [])
        if not compounds:
            return []
        
        results = []
        for compound in compounds:
            if inchi:
                results.append({
                    "zinc_id": zinc_id,
                    "InChI": compound.get("inchi").get("inchi",""),
                    "uci": compound.get("uci", 0)
                })
                return results
            sources = compound.get("sources", [])
            for source in sources:
                results.append({
                    "zinc_id": zinc_id,
                    "compoundId": source.get("compoundId"),
                    "shortName": source.get("shortName"),
                    "url": source.get("url")
                })
        return results
    except RequestException as e:
        print(f"Error querying UniChem for ZINC ID {zinc_id}: {e}")
        return []

def main():
    # Argument parsing
    parser = argparse.ArgumentParser(description="Query UniChem for ZINC IDs and save source info or InChI.")
    parser.add_argument("-i", "--input", required=True, help="Input TSV/CSV file with zinc_id column")
    parser.add_argument("-o", "--output", required=True, help="Output TSV file prefix")
    parser.add_argument("-w", "--workers", default=10, type=int, help="Maximum number of threads for concurrent execution")
    parser.add_argument("-d", "--delay", default=0.06, type=float, help="Delay in seconds between completing one request and starting the next")
    parser.add_argument("-n", "--min_src", default=10, type=int, help="Minimun number of sources for non-chemicalbook domestic entries")
    parser.add_argument("-c", "--inchi", action="store_true", help="Query InChI instead of source info")
    args = parser.parse_args()

    # Read input TSV file
    try:
        df = pd.read_csv(args.input, sep=None, engine="python")
        if "zinc_id" not in df.columns:
            raise ValueError("Input TSV must contain a 'zinc_id' column.")
        
        # Get unique ZINC IDs
        zinc_ids = df["zinc_id"].unique()
        print(f"Found unique zinc id: {len(zinc_ids)}")
    except Exception as e:
        print(f"Error reading input file: {e}")
        return

    # Query UniChem for each ZINC ID
    all_results = query_concurrently(zinc_ids, max_workers=args.workers, delay=args.delay, inchi=args.inchi)

    # Convert results to DataFrame
    results_df = pd.DataFrame(all_results)
    
    if args.inchi:
        print(f"Found zinc id with InChI: {len(results_df)}")
        results_df.to_csv(args.output + ".inchi.tsv", sep="\t", index=False)
        print(f"InChI results saved to {args.output + '.inchi.tsv'}")
        return
    
    nsrc = results_df['zinc_id'].value_counts()
    
    results_df['nsrc'] = results_df['zinc_id'].map(nsrc)
    
    chembk = results_df[results_df.shortName == "chemicalbook"]
    chembk = chembk.drop_duplicates('zinc_id')
    print(f"Found zinc id with chemicalbook entry: {len(chembk)}")
    
    no_chembk = results_df[~results_df.zinc_id.isin(chembk.zinc_id.to_list())]
    
    no_chembk = (no_chembk[no_chembk.nsrc >= args.min_src]
    .drop_duplicates('zinc_id'))
    
    print(f"Found zinc id with enough other sources: {len(no_chembk)}")
    
    domestic = pd.concat([chembk, no_chembk])

    # Write results to output TSV
    try:
        results_df.to_csv(args.output + ".complete.tsv", sep="\t", index=False)
        print(f"Compelete results saved to {args.output + '.complete.tsv'}")
        domestic.to_csv(args.output + ".domestic.tsv", sep="\t", index=False)
        print(f"Purchasable results saved to {args.output + '.domestic.tsv'}")
    except Exception as e:
        print(f"Error writing output file: {e}")

if __name__ == "__main__":
    main()
