# main.py (Revised with Token Usage Tracking)
import json
import asyncio
import os
from openai import AsyncOpenAI
from tqdm import tqdm
import aiolimiter

# Import all settings from the config file
import config

# --- Helper functions ---

def load_prompt(file_path):
    """Loads the prompt text from a file."""
    try:
        with open(file_path, 'r', encoding='utf-8') as f:
            return f.read()
    except FileNotFoundError:
        print(f"Error: Prompt file not found at {file_path}")
        exit(1)

def load_datasets(file_paths):
    """Loads and merges all input JSON datasets."""
    all_items = []
    for path in file_paths:
        try:
            with open(path, 'r', encoding='utf-8') as f:
                data = json.load(f)
                if isinstance(data, list):
                    all_items.extend(data)
        except Exception as e:
            print(f"Warning: Could not process file {path}. Error: {e}")
    return all_items

# ==================== MODIFIED SECTION START ====================
def prepare_model_input(item):
    """Extracts the required fields from the raw item for the model."""
    # This function now only includes the four specified keys.
    return {
        "citation_sentence": item.get("citation_sentence", ""),
        "citation_for": item.get("citation_for", ""),
        "citation_post": item.get("citation_post", ""),
        "reference_abstract": item.get("reference_abstract", "")
    }
# ==================== MODIFIED SECTION END ======================

def load_processed_ids(file_path):
    """Loads IDs of items that have already been successfully processed."""
    processed_ids = set()
    if not os.path.exists(file_path):
        return processed_ids
    with open(file_path, 'r', encoding='utf-8') as f:
        for line in f:
            try:
                data = json.loads(line)
                if "error" not in data.get("model_response", {}):
                    processed_ids.add(data["input_item_id"])
            except (json.JSONDecodeError, KeyError):
                continue
    return processed_ids

# --- Worker function updated to handle token tracking ---

async def worker(name, queue, client, system_prompt, limiter, output_file, lock, pbar, total_tokens_used):
    """A worker that fetches items from the queue, processes them, tracks tokens, and writes results."""
    while True:
        item = await queue.get()
        if item is None:
            break

        result = {"input_item_id": item.get("id")}
        try:
            model_input = prepare_model_input(item)
            async with limiter:
                response = await client.chat.completions.create(
                    model=config.MODEL_NAME,
                    messages=[
                        {"role": "system", "content": system_prompt},
                        {"role": "user", "content": json.dumps(model_input, ensure_ascii=False, indent=4)}
                    ],
                    response_format={"type": "json_object"},
                    timeout=config.REQUEST_TIMEOUT
                )
            
            model_output = json.loads(response.choices[0].message.content)
            result["model_response"] = model_output
            
            # Extract and store token usage
            if response.usage:
                token_usage = {
                    "prompt_tokens": response.usage.prompt_tokens,
                    "completion_tokens": response.usage.completion_tokens,
                    "total_tokens": response.usage.total_tokens
                }
                result["token_usage"] = token_usage
                # Update shared token counters safely
                async with lock:
                    total_tokens_used["prompt"] += token_usage["prompt_tokens"]
                    total_tokens_used["completion"] += token_usage["completion_tokens"]
                    total_tokens_used["total"] += token_usage["total_tokens"]

        except Exception as e:
            print(f"\nWorker {name}: API call for id {item.get('id')} failed. Error: {e}")
            result["model_response"] = {"error": str(e)}

        # Acquire lock to safely write the result to the shared file
        async with lock:
            output_file.write(json.dumps(result, ensure_ascii=False) + '\n')
        
        pbar.update(1)
        queue.task_done()

# --- Main function updated to manage and report token totals ---

async def main_async():
    """Main asynchronous function to manage the producer-consumer workflow and track token usage."""
    print("Initializing...")
    system_prompt = load_prompt(config.PROMPT_FILE_PATH)
    all_items = load_datasets(config.INPUT_FILE_PATHS)
    
    if not all_items:
        print("No items to process. Exiting.")
        return

    processed_ids = load_processed_ids(config.RAW_OUTPUT_FILE_PATH)
    if processed_ids:
        print(f"Found {len(processed_ids)} already processed items. Resuming...")

    pending_items = [item for item in all_items if item.get("id") not in processed_ids]
    
    if not pending_items:
        print("All items have already been processed successfully. Exiting.")
        return
        
    print(f"Loaded {len(all_items)} total items. {len(pending_items)} items pending processing.")

    client = AsyncOpenAI(api_key=config.API_KEY, base_url=config.BASE_URL)
    limiter = aiolimiter.AsyncLimiter(100, 60)
    
    queue = asyncio.Queue()
    lock = asyncio.Lock()

    # Initialize dictionary for tracking total tokens
    total_tokens_used = {"prompt": 0, "completion": 0, "total": 0}

    with open(config.RAW_OUTPUT_FILE_PATH, 'a', encoding='utf-8') as raw_f:
        with tqdm(total=len(pending_items), desc="Processing Citations") as pbar:
            # Create worker tasks, passing the token tracker to them
            workers = [
                asyncio.create_task(worker(f"worker-{i}", queue, client, system_prompt, limiter, raw_f, lock, pbar, total_tokens_used))
                for i in range(config.CONCURRENT_REQUESTS)
            ]

            # Producer
            for item in pending_items:
                await queue.put(item)
            for _ in range(config.CONCURRENT_REQUESTS):
                await queue.put(None)

            await asyncio.gather(*workers)

    print("\nAll API calls completed. Raw responses saved.")
    
    # Print final token usage report
    print("\n--- Token Usage Summary ---")
    print(f"Total Prompt Tokens:     {total_tokens_used['prompt']:,}")
    print(f"Total Completion Tokens: {total_tokens_used['completion']:,}")
    print(f"Total Tokens Used:       {total_tokens_used['total']:,}")
    print("---------------------------")
    
    print("\nYou can now run the post-processing script to generate the final output.")

if __name__ == "__main__":
    asyncio.run(main_async())