#!/usr/bin/env python3

# Copyright © 2025 Wenze Wei
#
# This file is part of Pisces L1.
#
# Licensed under the Creative Commons Attribution-NonCommercial 4.0 International License (CC BY-NC 4.0).
# You may not use this file except in compliance with the License.
# Commercial use is strictly prohibited.
# You may obtain a copy of the License at
#
#     https://creativecommons.org/licenses/by-nc/4.0/
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

import torch
import torch.nn as nn
import torch.nn.functional as F

class PiscesReasoner(nn.Module):
    """
    Pisces Reasoner Module.

    Features:
    1. Controllable Chain-of-Thought (CoT) generation.
    2. Self-reflection mechanism for error correction.
    3. Dynamic thinking budget based on predicted difficulty.
    """
    def __init__(self, cfg):
        """
        Initialize the PiscesReasoner module.

        Args:
            cfg: Configuration object containing hidden_size and vocab_size attributes.
        """
        super().__init__()
        self.cfg = cfg
        self.hidden_size = cfg.hidden_size
        self.vocab_size = cfg.vocab_size

        # Heads for different reasoning tasks
        # Linear layer for generating thinking chain, output size matches vocabulary size
        self.thinking_head = nn.Linear(self.hidden_size, self.vocab_size, bias=False)
        # Linear layer for predicting difficulty level (easy, medium, hard)
        self.difficulty_head = nn.Linear(self.hidden_size, 3)
        # Linear layer for self-reflection (correct, incorrect)
        self.reflection_head = nn.Linear(self.hidden_size, 2)

        # Special token IDs, to be set after tokenizer is updated
        self.start_thinking_id = None
        self.end_thinking_id = None

    def resize_vocab(self, new_vocab_size):
        """
        Resizes the thinking_head to match the new vocabulary size.

        Args:
            new_vocab_size (int): The new size of the vocabulary.
        """
        old_head = self.thinking_head
        new_head = nn.Linear(
            self.hidden_size, 
            new_vocab_size, 
            bias=False, 
            device=old_head.weight.device, 
            dtype=old_head.weight.dtype
        )

        num_to_copy = min(old_head.out_features, new_vocab_size)
        new_head.weight.data[:num_to_copy, :] = old_head.weight.data[:num_to_copy, :]
        
        self.thinking_head = new_head
        self.vocab_size = new_vocab_size

    def forward(self, hidden_states, input_ids=None, labels=None, correct=None):
        """
        Forward pass of the PiscesReasoner module.

        Args:
            hidden_states (torch.Tensor): [batch, seq_len, hidden_size] from the base model.
            input_ids (torch.Tensor, optional): [batch, seq_len]. Defaults to None.
            labels (torch.Tensor, optional): [batch, seq_len]. Defaults to None.
            correct (torch.Tensor, optional): [batch] (1 for correct reasoning, 0 for incorrect). Defaults to None.

        Returns:
            dict: A dictionary containing thinking_logits, difficulty_logits, reflection_logits, and loss.
        """
        if self.start_thinking_id is None or self.end_thinking_id is None:
            # Not yet configured for reasoning, return no-op
            return {"loss": torch.tensor(0.0, device=hidden_states.device, requires_grad=True)}

        batch_size, seq_len, _ = hidden_states.shape

        # 1. Predict difficulty (e.g., for dynamic budget)
        # Using hidden state of the first token (e.g., [CLS])
        difficulty_logits = self.difficulty_head(hidden_states[:, 0])

        # 2. Generate thinking chain
        thinking_logits = self.thinking_head(hidden_states)

        # 3. Reflect on the answer (self-correction)
        # Using hidden state of the last token
        reflection_logits = self.reflection_head(hidden_states[:, -1])

        loss = None
        if labels is not None and correct is not None:
            # Calculate loss only for the tokens within the thinking block
            # Create a mask for tokens between <|start_thinking|> and <|end_thinking|>
            start_mask = (input_ids == self.start_thinking_id).cumsum(dim=1) > 0
            end_mask = (input_ids == self.end_thinking_id).cumsum(dim=1) == 0
            cot_mask = start_mask & end_mask
            cot_mask |= (input_ids == self.end_thinking_id)
            
            if cot_mask.any():
                thinking_loss = F.cross_entropy(thinking_logits[cot_mask], labels[cot_mask])
            else:
                thinking_loss = torch.tensor(0.0, device=hidden_states.device, requires_grad=True)
                
            # Use explicit correctness label for reflection loss
            reflection_loss = F.cross_entropy(reflection_logits, correct)
            
            # Weighted loss: thinking loss + reflection loss
            loss = thinking_loss + 0.5 * reflection_loss

        return {
            "thinking_logits": thinking_logits,
            "difficulty_logits": difficulty_logits,
            "reflection_logits": reflection_logits,
            "loss": loss
        }


class TreeSearchReasoner:
    """
    Lightweight tree search for inference using self-consistency.
    Generates multiple reasoning paths and selects the best one by voting.
    """
    def __init__(self, model, tokenizer, num_samples=5, max_length=512, rejection_threshold=0.8, temperature_range=(0.5, 1.0)):
        """
        Initialize the TreeSearchReasoner.

        Args:
            model: The reasoning model to use.
            tokenizer: The tokenizer for text encoding and decoding.
            num_samples (int, optional): Number of samples to generate. Defaults to 5.
            max_length (int, optional): Maximum length of generated sequences. Defaults to 512.
            rejection_threshold (float, optional): Threshold for rejection sampling. Defaults to 0.8.
            temperature_range (tuple, optional): Range of temperature for sampling. Defaults to (0.5, 1.0).
        """
        self.model = model
        self.tokenizer = tokenizer
        self.num_samples = num_samples
        self.max_length = max_length
        self.rejection_threshold = rejection_threshold
        self.temperature_range = temperature_range

    @torch.no_grad()
    def generate(self, prompt):
        """
        Generate multiple reasoning paths and select the best one using rejection sampling.

        Args:
            prompt (str): The input prompt for reasoning.

        Returns:
            str: The selected reasoning result.
        """
        # Get difficulty prediction for adaptive temperature
        difficulty_inputs = self.tokenizer.encode(prompt, return_tensors="pt").to(self.model.device)
        difficulty_hidden = self.model.get_hidden_states(difficulty_inputs)
        difficulty_logits = self.model.reasoner.difficulty_head(difficulty_hidden[:, 0])
        difficulty = torch.argmax(difficulty_logits).item()  # 0: easy, 1: medium, 2: hard
    
        # Adaptive temperature based on difficulty (harder = more exploration)
        min_temp, max_temp = self.temperature_range
        temperature = min_temp + (max_temp - min_temp) * (difficulty / 2)
    
        candidates = []
        for _ in range(self.num_samples * 2):  # Generate extra for rejection sampling
            inputs = self.tokenizer.encode(prompt, return_tensors="pt").to(self.model.device)
            outputs = self.model.generate(
                inputs,
                max_length=self.max_length,
                do_sample=True,
                temperature=temperature,
                pad_token_id=self.tokenizer.pad_token_id
            )
            candidate = self.tokenizer.decode(outputs[0], skip_special_tokens=True)
            candidates.append(candidate)
    
        # Score candidates using reflection head
        scored_candidates = []
        for candidate in candidates:
            # Get reflection score
            candidate_inputs = self.tokenizer.encode(candidate, return_tensors="pt").to(self.model.device)
            candidate_hidden = self.model.get_hidden_states(candidate_inputs)
            reflection_logits = self.model.reasoner.reflection_head(candidate_hidden[:, -1])
            confidence = F.softmax(reflection_logits, dim=-1)[0, 0].item()  # Confidence in correctness
            scored_candidates.append((candidate, confidence))
    
        # Rejection sampling: keep only high-confidence candidates
        accepted = [c for c, conf in scored_candidates if conf >= self.rejection_threshold]
        if not accepted:  # Fallback if all rejected
            accepted = [c for c, _ in scored_candidates[:self.num_samples]]
    
        # Return majority vote of accepted candidates
        return max(set(accepted), key=accepted.count) if accepted else ""