{
  "cells": [
    {
      "cell_type": "code",
      "execution_count": null,
      "metadata": {},
      "outputs": [],
      "source": [
        "# imports\n",
        "\n",
        "import os\n",
        "import re\n",
        "import math\n",
        "import json\n",
        "import random\n",
        "from dotenv import load_dotenv\n",
        "from huggingface_hub import login\n",
        "import matplotlib.pyplot as plt\n",
        "import numpy as np\n",
        "import pickle\n",
        "from collections import Counter, defaultdict\n",
        "from openai import OpenAI\n",
        "import pandas as pd\n"
      ]
    },
    {
      "cell_type": "code",
      "execution_count": null,
      "metadata": {},
      "outputs": [],
      "source": [
        "# environment\n",
        "\n",
        "load_dotenv(override=True)\n",
        "os.environ['OPENAI_API_KEY'] = os.getenv('OPENAI_API_KEY', 'your-key-if-not-using-env')\n",
        "os.environ['HF_TOKEN'] = os.getenv('HF_TOKEN', 'your-key-if-not-using-env')\n",
        "\n",
        "# Log in to HuggingFace\n",
        "hf_token = os.environ['HF_TOKEN']\n",
        "login(hf_token, add_to_git_credential=True)\n",
        "\n",
        "# Import custom classes\n",
        "import sys\n",
        "sys.path.append('../../') \n",
        "from items import Item\n",
        "from testing import Tester\n",
        "\n",
        "# Setup\n",
        "openai = OpenAI()\n",
        "%matplotlib inline\n"
      ]
    },
    {
      "cell_type": "code",
      "execution_count": null,
      "metadata": {},
      "outputs": [],
      "source": [
        "# Configuration \n",
        "\n",
        "TRAINING_SIZE = 1000  # Options: 100, 500, 1000, 5000\n",
        "PROMPT_STRATEGY = 'expert'  #Tested with different prompt strategies: 'basic', 'description', 'expert'\n",
        "N_EPOCHS = 1  \n",
        "VALIDATION_SPLIT = 0.1  # 10% for validation\n",
        "\n",
        "print(f\"\"\"\n",
        "Configuration:\n",
        "Training Size: {TRAINING_SIZE} examples\n",
        "Prompt Strategy: {PROMPT_STRATEGY}\n",
        "Epochs: {N_EPOCHS}\n",
        "Validation: {VALIDATION_SPLIT*100:.0f}%\n",
        "\"\"\")\n"
      ]
    },
    {
      "cell_type": "code",
      "execution_count": null,
      "metadata": {},
      "outputs": [],
      "source": [
        "# Prompt Strategies\n",
        "\n",
        "PROMPT_STRATEGIES = {\n",
        "    'baseline': {\n",
        "        'system': \"You estimate prices of items. Reply only with the price, no explanation\",\n",
        "        'description': 'Original baseline prompt'\n",
        "    },\n",
        "    'detailed': {\n",
        "        'system': \"\"\"You are an expert price estimator for retail products. \n",
        "Analyze the product description carefully and estimate its market price in USD. \n",
        "Consider factors like brand, features, specifications, and category. \n",
        "Reply only with the price in format: Price is $XX.XX\"\"\",\n",
        "        'description': 'Detailed instruction with context'\n",
        "    },\n",
        "    'concise': {\n",
        "        'system': \"Estimate product price from description. Return only: Price is $XX.XX\",\n",
        "        'description': 'Ultra-concise instruction'\n",
        "    },\n",
        "    'range_aware': {\n",
        "        'system': \"\"\"You estimate retail product prices (typically $1-999). \n",
        "Analyze the description and estimate the most likely market price. \n",
        "Reply only with: Price is $XX.XX\"\"\",\n",
        "        'description': 'Includes price range context'\n",
        "    },\n",
        "    'expert': {\n",
        "        'system': \"\"\"You are a pricing analyst with expertise in consumer electronics, appliances, and retail products.\n",
        "Based on product features, brand, and specifications, estimate the typical retail price.\n",
        "Format: Price is $XX.XX\"\"\",\n",
        "        'description': 'Expert persona with domain knowledge'\n",
        "    }\n",
        "}\n",
        "\n",
        "print(\"Available Prompt Strategies:\\n\")\n",
        "for name, config in PROMPT_STRATEGIES.items():\n",
        "    indicator = \"👉 \" if name == PROMPT_STRATEGY else \"   \"\n",
        "    print(f\"{indicator}{name.upper()}: {config['description']}\")\n"
      ]
    },
    {
      "cell_type": "markdown",
      "metadata": {},
      "source": [
        "## Step 1: Load and Analyze Data\n"
      ]
    },
    {
      "cell_type": "code",
      "execution_count": null,
      "metadata": {},
      "outputs": [],
      "source": [
        "# Load the pickle files \n",
        "\n",
        "with open('../../train.pkl', 'rb') as file:\n",
        "    train = pickle.load(file)\n",
        "\n",
        "with open('../../test.pkl', 'rb') as file:\n",
        "    test = pickle.load(file)\n",
        "\n",
        "print(f\"Loaded {len(train):,} training items\")\n",
        "print(f\"Loaded {len(test):,} test items\")\n",
        "\n",
        "# Quick stats\n",
        "train_prices = [item.price for item in train]\n",
        "test_prices = [item.price for item in test]\n",
        "\n",
        "print(f\"\\nTraining: Mean=${np.mean(train_prices):.2f}, Median=${np.median(train_prices):.2f}\")\n",
        "print(f\"Test: Mean=${np.mean(test_prices):.2f}, Median=${np.median(test_prices):.2f}\")\n"
      ]
    },
    {
      "cell_type": "code",
      "execution_count": null,
      "metadata": {},
      "outputs": [],
      "source": [
        "# Visualize price distributions\n",
        "\n",
        "fig, axes = plt.subplots(1, 2, figsize=(15, 5))\n",
        "\n",
        "axes[0].hist(train_prices, bins=50, color='skyblue', edgecolor='black', alpha=0.7)\n",
        "axes[0].set_title(f'Training Price Distribution (n={len(train):,})')\n",
        "axes[0].set_xlabel('Price ($)')\n",
        "axes[0].set_ylabel('Count')\n",
        "axes[0].grid(alpha=0.3)\n",
        "\n",
        "axes[1].hist(test_prices, bins=50, color='lightcoral', edgecolor='black', alpha=0.7)\n",
        "axes[1].set_title(f'Test Price Distribution (n={len(test):,})')\n",
        "axes[1].set_xlabel('Price ($)')\n",
        "axes[1].set_ylabel('Count')\n",
        "axes[1].grid(alpha=0.3)\n",
        "\n",
        "plt.tight_layout()\n",
        "plt.show()\n"
      ]
    },
    {
      "cell_type": "markdown",
      "metadata": {},
      "source": [
        "## Step 2: Create Balanced Training Sets\n"
      ]
    },
    {
      "cell_type": "code",
      "execution_count": null,
      "metadata": {},
      "outputs": [],
      "source": [
        "# Helper functions\n",
        "\n",
        "def categorize_price(price):\n",
        "    \"\"\"Categorize price into ranges\"\"\"\n",
        "    if price < 50:\n",
        "        return '$0-50'\n",
        "    elif price < 100:\n",
        "        return '$50-100'\n",
        "    elif price < 300:\n",
        "        return '$100-300'\n",
        "    else:\n",
        "        return '$300+'\n",
        "\n",
        "def create_balanced_dataset(items, size, validation_split=0.1, seed=42):\n",
        "    \"\"\"Create balanced dataset with even distribution across price ranges\"\"\"\n",
        "    random.seed(seed)\n",
        "    \n",
        "    # Group by price range\n",
        "    price_buckets = defaultdict(list)\n",
        "    for item in items:\n",
        "        bucket = categorize_price(item.price)\n",
        "        price_buckets[bucket].append(item)\n",
        "    \n",
        "    # Sample equally from each bucket\n",
        "    items_per_bucket = size // len(price_buckets)\n",
        "    selected_items = []\n",
        "    \n",
        "    for bucket, bucket_items in price_buckets.items():\n",
        "        sample_size = min(items_per_bucket, len(bucket_items))\n",
        "        selected_items.extend(random.sample(bucket_items, sample_size))\n",
        "    \n",
        "    # Shuffle and split\n",
        "    random.shuffle(selected_items)\n",
        "    val_size = int(len(selected_items) * validation_split)\n",
        "    \n",
        "    return selected_items[val_size:], selected_items[:val_size]\n",
        "\n",
        "def show_balance(dataset, name):\n",
        "    \"\"\"Display dataset balance\"\"\"\n",
        "    prices = [item.price for item in dataset]\n",
        "    categories = Counter([categorize_price(p) for p in prices])\n",
        "    print(f\"\\n{name}:\")\n",
        "    for cat in ['$0-50', '$50-100', '$100-300', '$300+']:\n",
        "        count = categories[cat]\n",
        "        pct = count/len(dataset)*100 if len(dataset) > 0 else 0\n",
        "        print(f\"  {cat}: {count:,} ({pct:.1f}%)\")\n"
      ]
    },
    {
      "cell_type": "code",
      "execution_count": null,
      "metadata": {},
      "outputs": [],
      "source": [
        "# Create different sized datasets\n",
        "\n",
        "print(\"Creating balanced training sets...\\n\")\n",
        "\n",
        "train_500, val_500 = create_balanced_dataset(train, 500, VALIDATION_SPLIT)\n",
        "print(f\"500 examples: {len(train_500)} train + {len(val_500)} val\")\n",
        "\n",
        "train_1000, val_1000 = create_balanced_dataset(train, 1000, VALIDATION_SPLIT)\n",
        "print(f\"1000 examples: {len(train_1000)} train + {len(val_1000)} val\")\n",
        "\n",
        "train_2000, val_2000 = create_balanced_dataset(train, 2000, VALIDATION_SPLIT)\n",
        "print(f\"2000 examples: {len(train_2000)} train + {len(val_2000)} val\")\n",
        "\n",
        "# Select based on configuration\n",
        "if TRAINING_SIZE == 500:\n",
        "    selected_train, selected_val = train_500, val_500\n",
        "elif TRAINING_SIZE == 1000:\n",
        "    selected_train, selected_val = train_1000, val_1000\n",
        "else:\n",
        "    selected_train, selected_val = train_2000, val_2000\n",
        "\n",
        "print(f\"\\n👉 Using {len(selected_train)} train + {len(selected_val)} val\")\n",
        "show_balance(selected_train, f\"{TRAINING_SIZE}-example Training Set\")\n"
      ]
    },
    {
      "cell_type": "markdown",
      "metadata": {},
      "source": [
        "## Step 3: Prepare JSONL Files for Fine-Tuning\n"
      ]
    },
    {
      "cell_type": "code",
      "execution_count": null,
      "metadata": {},
      "outputs": [],
      "source": [
        "# JSONL conversion functions\n",
        "\n",
        "def messages_for(item, strategy='baseline'):\n",
        "    \"\"\"Create message format for training\"\"\"\n",
        "    system_message = PROMPT_STRATEGIES[strategy]['system']\n",
        "    user_prompt = item.test_prompt().replace(\" to the nearest dollar\", \"\").replace(\"\\n\\nPrice is $\", \"\")\n",
        "    \n",
        "    return [\n",
        "        {\"role\": \"system\", \"content\": system_message},\n",
        "        {\"role\": \"user\", \"content\": user_prompt},\n",
        "        {\"role\": \"assistant\", \"content\": f\"Price is ${item.price:.2f}\"}\n",
        "    ]\n",
        "\n",
        "def make_jsonl(items, strategy='baseline'):\n",
        "    \"\"\"Convert items to JSONL format\"\"\"\n",
        "    result = \"\"\n",
        "    for item in items:\n",
        "        messages = messages_for(item, strategy)\n",
        "        result += '{\"messages\": ' + json.dumps(messages) + '}\\n'\n",
        "    return result.strip()\n",
        "\n",
        "def write_jsonl(items, filename, strategy='baseline'):\n",
        "    \"\"\"Write JSONL file\"\"\"\n",
        "    with open(filename, \"w\") as f:\n",
        "        f.write(make_jsonl(items, strategy))\n",
        "    print(f\"Written {len(items)} items to {filename}\")\n",
        "\n",
        "# Test\n",
        "print(\"Example message:\")\n",
        "print(json.dumps(messages_for(selected_train[0], PROMPT_STRATEGY), indent=2)[:200] + \"...\")\n"
      ]
    },
    {
      "cell_type": "code",
      "execution_count": null,
      "metadata": {},
      "outputs": [],
      "source": [
        "# Write JSONL files\n",
        "\n",
        "print(f\"Using prompt strategy: '{PROMPT_STRATEGY}'\\n\")\n",
        "write_jsonl(selected_train, \"fine_tune_train.jsonl\", PROMPT_STRATEGY)\n",
        "write_jsonl(selected_val, \"fine_tune_validation.jsonl\", PROMPT_STRATEGY)\n"
      ]
    },
    {
      "cell_type": "markdown",
      "metadata": {},
      "source": [
        "## Step 4: Upload Files to OpenAI\n"
      ]
    },
    {
      "cell_type": "code",
      "execution_count": null,
      "metadata": {},
      "outputs": [],
      "source": [
        "# Upload training file\n",
        "\n",
        "with open(\"fine_tune_train.jsonl\", \"rb\") as f:\n",
        "    train_file = openai.files.create(file=f, purpose=\"fine-tune\")\n",
        "\n",
        "print(f\"Training file uploaded: {train_file.id}\")\n",
        "train_file\n"
      ]
    },
    {
      "cell_type": "code",
      "execution_count": null,
      "metadata": {},
      "outputs": [],
      "source": [
        "# Upload validation file\n",
        "\n",
        "with open(\"fine_tune_validation.jsonl\", \"rb\") as f:\n",
        "    validation_file = openai.files.create(file=f, purpose=\"fine-tune\")\n",
        "\n",
        "print(f\"Validation file uploaded: {validation_file.id}\")\n",
        "validation_file\n"
      ]
    },
    {
      "cell_type": "markdown",
      "metadata": {},
      "source": [
        "## Step 5: Create Fine-Tuning Job\n",
        "\n",
        "**Optional**: Set up Weights & Biases at https://wandb.ai for training monitoring\n"
      ]
    },
    {
      "cell_type": "code",
      "execution_count": null,
      "metadata": {},
      "outputs": [],
      "source": [
        "# Create fine-tuning job\n",
        "\n",
        "wandb_integration = {\"type\": \"wandb\", \"wandb\": {\"project\": \"product-pricer-improved\"}}\n",
        "\n",
        "print(f\"Starting fine-tuning:\")\n",
        "print(f\"  Model: gpt-4o-mini-2024-07-18\")\n",
        "print(f\"  Training: {len(selected_train)} examples\")\n",
        "print(f\"  Validation: {len(selected_val)} examples\")\n",
        "print(f\"  Epochs: {N_EPOCHS}\\n\")\n",
        "\n",
        "fine_tune_job = openai.fine_tuning.jobs.create(\n",
        "    training_file=train_file.id,\n",
        "    validation_file=validation_file.id,\n",
        "    model=\"gpt-4o-mini-2024-07-18\",\n",
        "    seed=42,\n",
        "    hyperparameters={\"n_epochs\": N_EPOCHS},\n",
        "    integrations=[wandb_integration],\n",
        "    suffix=\"pricer-improved\"\n",
        ")\n",
        "\n",
        "job_id = fine_tune_job.id\n",
        "print(f\"Job created: {job_id}\")\n",
        "print(f\"   Status: {fine_tune_job.status}\")\n",
        "\n",
        "# Save configuration\n",
        "config = {\n",
        "    \"job_id\": job_id,\n",
        "    \"training_size\": TRAINING_SIZE,\n",
        "    \"prompt_strategy\": PROMPT_STRATEGY,\n",
        "    \"n_epochs\": N_EPOCHS\n",
        "}\n",
        "\n",
        "with open(\"training_config.json\", \"w\") as f:\n",
        "    json.dump(config, f, indent=2)\n",
        "with open(\"job_id.txt\", \"w\") as f:\n",
        "    f.write(job_id)\n",
        "\n",
        "print(\"\\n Config saved to training_config.json\")\n",
        "fine_tune_job\n"
      ]
    },
    {
      "cell_type": "code",
      "execution_count": null,
      "metadata": {},
      "outputs": [],
      "source": [
        "# Check job status (run this cell to monitor progress)\n",
        "\n",
        "status = openai.fine_tuning.jobs.retrieve(job_id)\n",
        "print(f\"Job Status: {status.status}\\n\")\n",
        "\n",
        "# Show recent events\n",
        "events = openai.fine_tuning.jobs.list_events(fine_tuning_job_id=job_id, limit=5)\n",
        "print(\"Recent events:\")\n",
        "for event in events.data[::-1]:\n",
        "    print(f\"  {event.message}\")\n",
        "\n",
        "status\n"
      ]
    },
    {
      "cell_type": "code",
      "execution_count": null,
      "metadata": {},
      "outputs": [],
      "source": [
        "# Get fine-tuned model name and final status\n",
        "\n",
        "job_status = openai.fine_tuning.jobs.retrieve(job_id)\n",
        "fine_tuned_model_name = job_status.fine_tuned_model\n",
        "\n",
        "if fine_tuned_model_name:\n",
        "    print(f\" Model ready: {fine_tuned_model_name}\")\n",
        "    if job_status.trained_tokens:\n",
        "        print(f\"   Trained tokens: {job_status.trained_tokens:,}\")\n",
        "else:\n",
        "    print(f\"Still training... Status: {job_status.status}\")\n",
        "    print(\" Run this cell again in a few minutes\")\n"
      ]
    },
    {
      "cell_type": "markdown",
      "metadata": {},
      "source": [
        "## Step 6: Evaluate the Fine-Tuned Model\n"
      ]
    },
    {
      "cell_type": "code",
      "execution_count": null,
      "metadata": {},
      "outputs": [],
      "source": [
        "# Prediction function\n",
        "\n",
        "def get_price(s):\n",
        "    \"\"\"Extract price from response\"\"\"\n",
        "    s = s.replace('$', '').replace(',', '')\n",
        "    match = re.search(r\"[-+]?\\d*\\.\\d+|\\d+\", s)\n",
        "    return float(match.group()) if match else 0\n",
        "\n",
        "def gpt_fine_tuned_improved(item):\n",
        "    \"\"\"Get price prediction from our model\"\"\"\n",
        "    system_message = PROMPT_STRATEGIES[PROMPT_STRATEGY]['system']\n",
        "    user_prompt = item.test_prompt().replace(\" to the nearest dollar\", \"\").replace(\"\\n\\nPrice is $\", \"\")\n",
        "    \n",
        "    messages = [\n",
        "        {\"role\": \"system\", \"content\": system_message},\n",
        "        {\"role\": \"user\", \"content\": user_prompt},\n",
        "        {\"role\": \"assistant\", \"content\": \"Price is $\"}\n",
        "    ]\n",
        "    \n",
        "    response = openai.chat.completions.create(\n",
        "        model=fine_tuned_model_name,\n",
        "        messages=messages,\n",
        "        seed=42,\n",
        "        max_tokens=10,\n",
        "        temperature=0\n",
        "    )\n",
        "    \n",
        "    return get_price(response.choices[0].message.content)\n",
        "\n",
        "# Test on one item\n",
        "sample = test[0]\n",
        "pred = gpt_fine_tuned_improved(sample)\n",
        "print(f\"Product: {sample.title[:60]}...\")\n",
        "print(f\"Predicted: ${pred:.2f}\")\n",
        "print(f\"Actual: ${sample.price:.2f}\")\n",
        "print(f\"Error: ${abs(pred - sample.price):.2f}\")\n"
      ]
    },
    {
      "cell_type": "code",
      "execution_count": null,
      "metadata": {},
      "outputs": [],
      "source": [
        "# Run full evaluation on 250 test items\n",
        "\n",
        "print(\"Running comprehensive evaluation...\\n\")\n",
        "print(\"This will take a few minutes.\\n\")\n",
        "print(\"=\"*80 + \"\\n\")\n",
        "\n",
        "Tester.test(gpt_fine_tuned_improved, test)\n"
      ]
    }
  ],
  "metadata": {
    "kernelspec": {
      "display_name": "llm-engineering",
      "language": "python",
      "name": "python3"
    },
    "language_info": {
      "codemirror_mode": {
        "name": "ipython",
        "version": 3
      },
      "file_extension": ".py",
      "mimetype": "text/x-python",
      "name": "python",
      "nbconvert_exporter": "python",
      "pygments_lexer": "ipython3",
      "version": "3.12.12"
    }
  },
  "nbformat": 4,
  "nbformat_minor": 2
}
