import os
import traceback
from typing import List, Tuple

import numpy as np
import tensorflow as tf
from tensorflow.keras.preprocessing import sequence
from tensorflow.python.keras.models import load_model
from tqdm import tqdm

from src.extractor import extract_dot_text


def inference_file(input_file: str, model: tf.keras.Model, batch_size: int,
                   features: int) -> List[Tuple[int, int]]:
    """
    Performs the inference over a binary file with the given model. The data
    from the binary file will be extracted from the .text section, and split in
    chunks of `features` length, pre-padded with zeroes if the length is not
    sufficient. `batch_size` chunks will be fed to the model and the
    predictions collected.
    :param input_file: Path to the file from which the data will be extracted.
    :param model: The model used for inference.
    :param batch_size: Number of batches to be fed to the model.
    :param features: Number of features for each sample.
    :return: A list for tuples, where each tuple contain the sample length
    and the prediction.
    """
    data = extract_dot_text(input_file)
    if data is None:
        return []
    buffer = []
    result = []
    while len(data) > 0:
        buffer.append(data[:features])
        data = data[features:]
    while len(buffer) > 0:
        input = [np.asarray(val) for val in buffer[:batch_size]]
        shapes = [sample.shape[0] for sample in input]
        input = sequence.pad_sequences(input, maxlen=features,
                                       padding="pre", truncating="pre")
        prediction = model.predict(input, verbose=0, batch_size=batch_size)
        predicted_class = np.argmax(prediction, axis=1)
        for i in range(0, len(predicted_class)):
            result.append((shapes[i], predicted_class[i]))
        buffer = buffer[batch_size:]
    return result


def run_inference(input_files: List[str], model_path: str, output: str,
                  bs: int, features: int):
    print("[DEBUG] Starting inference")

    # Load the model
    print(f"[DEBUG] Loading model from {model_path}")
    model = load_model(model_path)
    print("[DEBUG] Model loaded successfully")

    # Process each file
    results = []
    for i, file in enumerate(input_files):
        print(f"[DEBUG] Processing file {i+1}/{len(input_files)}: {file}")

        try:
            # Extract features
            print(f"[DEBUG] Extracting features from {file}")
            X = extract_features(file, features)
            print(f"[DEBUG] Features extracted: shape={X.shape if X is not None else 'None'}")

            if X is None or len(X) == 0:
                print(f"[DEBUG] No features extracted from {file}")
                continue

            # Make prediction
            print(f"[DEBUG] Making prediction with batch size {bs}")
            y_pred = model.predict(X, batch_size=bs, verbose=1)
            print(f"[DEBUG] Prediction complete: {y_pred}")

            # Process results
            print(f"[DEBUG] Processing results")
            for i, pred in enumerate(y_pred):
                prediction = np.argmax(pred)
                confidence = pred[prediction]
                results.append((file, prediction, confidence))
                print(f"[DEBUG] Result: file={file}, prediction={prediction}, confidence={confidence:.4f}")

        except Exception as e:
            print(f"[DEBUG] Error processing {file}: {str(e)}")
            traceback.print_exc()
    
    # Write results
    print(f"[DEBUG] Writing results to {output}")
    if output:
        with open(output, "w") as f:
            f.write("file, prediction, confidence\n")
            for file, prediction, confidence in results:
                f.write(f"{file}, {prediction}, {confidence}\n")
    
    print("[DEBUG] Inference complete")
    return results
    
def extract_features(binary_path, features):
    print(f"[DEBUG] extract_features: Starting for {binary_path}")

    # Check if file exists
    if not os.path.isfile(binary_path):
        print(f"[DEBUG] extract_features: File not found: {binary_path}")
        return None
    
    try:
        # Use the existing extract_dot_text function instead of calling radare2 directly
        print(f"[DEBUG] extract_features: Extracting .text section")
        data = extract_dot_text(binary_path)

        if data is None or len(data) == 0:
            print(f"[DEBUG] extract_features: No data extracted from {binary_path}")
            return None

        print(f"[DEBUG] extract_features: Extracted {len(data)} bytes")

        # Process the data into features
        print(f"[DEBUG] extract_features: Processing features")
        buffer = []
        while len(data) > 0:
            buffer.append(data[:features])
            data = data[features:]
        
        # Pad sequences
        X = np.array([np.asarray(val) for val in buffer])
        X = sequence.pad_sequences(X, maxlen=features, padding="pre", truncating="pre")

        print(f"[DEBUG] extract_features: Completed with shape {X.shape}")
        return X
    except Exception as e:
        print(f"[DEBUG] extract_features: Error: {str(e)}")
        traceback.print_exc()
        return None
        
