#!/usr/bin/env python3

# Copyright © 2025 Wenze Wei
#
# This file is part of Pisces L1.
#
# Licensed under the Creative Commons Attribution-NonCommercial 4.0 International License (CC BY-NC 4.0).
# You may not use this file except in compliance with the License.
# Commercial use is strictly prohibited.
# You may obtain a copy of the License at
#
#     https://creativecommons.org/licenses/by-nc/4.0/
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

import time, torch
from model import PiscesModel, PiscesConfig

# Load configuration from JSON file
cfg = PiscesConfig.from_json("configs/0.5B.json")
# Smart device detection
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
print(f"✅\tUsing device: {device}")

# Initialize the model with the loaded configuration, move it to the selected device, and set it to evaluation mode
model = PiscesModel(cfg).to(device).eval()
# Generate random tokens as input for the model
tok = torch.randint(0, cfg.vocab_size, (1, 8192)).to(device)
# Synchronize the CUDA operations to ensure all previous operations are completed
torch.cuda.synchronize()
# Record the start time
t0 = time.time()
# Disable gradient calculation to speed up inference
with torch.no_grad():
    # Perform a forward pass through the model
    _ = model(tok)
# Synchronize the CUDA operations again to ensure the forward pass is completed
torch.cuda.synchronize()

# Print the time taken for the forward pass of 8192 tokens
print("\t8192 tokens forward:", time.time()-t0, "s")