"""
Originality detection system with paragraph-level similarity checking.
"""
import logging
from typing import List, Dict, Any, Tuple, Optional
from dataclasses import dataclass

from src.algorithms.simhash import SimHash, TextSegmenter
from src.models.fingerprint import ContentFingerprint, SimilarContent
from src.repositories.fingerprint_repository import FingerprintRepository
from src.repositories.whitelist_repository import WhitelistRepository
from src.models.whitelist import WhitelistType

logger = logging.getLogger(__name__)


@dataclass
class SegmentSimilarity:
    """Represents similarity information for a text segment."""
    segment_text: str
    segment_position: Tuple[int, int]  # (start, end) character positions
    fingerprint: str
    similar_segments: List[Dict[str, Any]]
    max_similarity: float


@dataclass
class DetectionResult:
    """Complete originality detection result."""
    overall_originality: float  # 0-100, higher is more original
    total_segments: int
    similar_segments_count: int
    segment_results: List[SegmentSimilarity]
    similar_contents: List[SimilarContent]
    detection_summary: str


class OriginalityDetector:
    """Detector for content originality using SimHash and segment analysis."""
    
    def __init__(
        self,
        repository: FingerprintRepository,
        whitelist_repository: Optional[WhitelistRepository] = None,
        similarity_threshold: int = 10,
        segment_type: str = "paragraph"
    ):
        """
        Initialize originality detector.
        
        Args:
            repository: FingerprintRepository instance
            whitelist_repository: WhitelistRepository instance for exclusions
            similarity_threshold: Hamming distance threshold for similarity
            segment_type: Type of segmentation (paragraph/sentence/sliding)
        """
        self.repository = repository
        self.whitelist_repository = whitelist_repository
        self.simhash = SimHash()
        self.similarity_threshold = similarity_threshold
        self.segment_type = segment_type
    
    def _segment_text(self, text: str) -> List[Tuple[str, Tuple[int, int]]]:
        """
        Segment text based on configured type.
        
        Args:
            text: Text to segment
            
        Returns:
            List of (segment_text, (start_pos, end_pos)) tuples
        """
        segments_with_positions = []
        
        if self.segment_type == "paragraph":
            segments = TextSegmenter.segment_by_paragraph(text)
        elif self.segment_type == "sentence":
            segments = TextSegmenter.segment_by_sentence(text)
        elif self.segment_type == "sliding":
            segments = TextSegmenter.sliding_window(text, window_size=200, step=100)
        else:
            segments = [text]  # Treat as single segment
        
        # Calculate positions for each segment
        current_pos = 0
        for segment in segments:
            # Find the segment in the original text
            segment_start = text.find(segment, current_pos)
            if segment_start == -1:
                segment_start = current_pos
            segment_end = segment_start + len(segment)
            
            segments_with_positions.append((segment, (segment_start, segment_end)))
            current_pos = segment_end
        
        return segments_with_positions
    
    async def detect_originality(
        self,
        text: str,
        content_id: Optional[str] = None,
        save_fingerprint: bool = True,
        check_sources: Optional[List[str]] = None
    ) -> DetectionResult:
        """
        Perform complete originality detection on text.
        
        Args:
            text: Text to check for originality
            content_id: Optional content ID for saving fingerprint
            save_fingerprint: Whether to save fingerprints to database
            check_sources: Optional list of sources to check against whitelist
            
        Returns:
            DetectionResult with complete analysis
        """
        if not text or not text.strip():
            return DetectionResult(
                overall_originality=100.0,
                total_segments=0,
                similar_segments_count=0,
                segment_results=[],
                similar_contents=[],
                detection_summary="Empty text, considered 100% original"
            )
        
        # Generate fingerprint for entire text
        full_fingerprint = self.simhash.generate(text)
        
        # Find similar content in database
        similar_full = await self.repository.find_similar(
            full_fingerprint,
            threshold=self.similarity_threshold
        )
        
        # Filter out whitelisted sources if whitelist repository is available
        if self.whitelist_repository and similar_full:
            filtered_similar = []
            for similar_item in similar_full:
                # Handle tuple format (ContentFingerprint, similarity)
                if isinstance(similar_item, tuple):
                    content, similarity = similar_item
                    # Check if source is whitelisted
                    if hasattr(content, 'source') and content.source:
                        match = await self.whitelist_repository.match(
                            content.source,
                            WhitelistType.SOURCE
                        )
                        if not match.matched:
                            filtered_similar.append(similar_item)
                    else:
                        filtered_similar.append(similar_item)
                else:
                    filtered_similar.append(similar_item)
            similar_full = filtered_similar
        
        # Segment text for detailed analysis
        segments_with_positions = self._segment_text(text)
        segment_results = []
        
        for segment_text, position in segments_with_positions:
            if not segment_text.strip():
                continue
            
            # Generate fingerprint for segment
            segment_fp = self.simhash.generate(segment_text)
            
            # Find similar segments
            similar_segments = await self.repository.find_similar(
                segment_fp,
                threshold=self.similarity_threshold,
                content_type="paragraph"
            )
            
            # Filter out whitelisted sources from segments
            if self.whitelist_repository and similar_segments:
                filtered_segments = []
                for segment_item in similar_segments:
                    if isinstance(segment_item, tuple):
                        content, similarity = segment_item
                        if hasattr(content, 'source') and content.source:
                            match = await self.whitelist_repository.match(
                                content.source,
                                WhitelistType.SOURCE
                            )
                            if not match.matched:
                                filtered_segments.append(segment_item)
                        else:
                            filtered_segments.append(segment_item)
                    else:
                        filtered_segments.append(segment_item)
                similar_segments = filtered_segments
            
            # Calculate max similarity for this segment
            max_similarity = 0.0
            similar_segment_data = []
            
            for similar_content, similarity in similar_segments:
                if similarity > max_similarity:
                    max_similarity = similarity
                
                similar_segment_data.append({
                    "content_id": similar_content.content_id,
                    "similarity": similarity,
                    "text_preview": similar_content.text_preview,
                    "source": similar_content.source
                })
            
            segment_results.append(SegmentSimilarity(
                segment_text=segment_text,
                segment_position=position,
                fingerprint=segment_fp,
                similar_segments=similar_segment_data,
                max_similarity=max_similarity
            ))
            
            # Save segment fingerprint if requested
            if save_fingerprint and content_id:
                segment_fingerprint = ContentFingerprint(
                    content_id=f"{content_id}_seg_{position[0]}",
                    content_type="paragraph",
                    fingerprint=segment_fp,
                    text_preview=segment_text[:200],
                    word_count=len(segment_text.split())
                )
                await self.repository.save_fingerprint(segment_fingerprint)
        
        # Calculate overall originality score
        if segment_results:
            avg_similarity = sum(s.max_similarity for s in segment_results) / len(segment_results)
            overall_originality = 100.0 - avg_similarity
        else:
            overall_originality = 100.0
        
        # Count segments with significant similarity
        similar_segments_count = sum(
            1 for s in segment_results 
            if s.max_similarity > 30.0  # Consider >30% similarity as significant
        )
        
        # Prepare similar contents list
        similar_contents = []
        for content, similarity in similar_full[:5]:  # Top 5 similar documents
            similar_contents.append(SimilarContent(
                content_id=content.content_id,
                similarity=similarity,
                fingerprint=content.fingerprint,
                segments=[],
                source=content.source
            ))
        
        # Save full text fingerprint if requested
        if save_fingerprint and content_id:
            full_text_fingerprint = ContentFingerprint(
                content_id=content_id,
                content_type="article",
                fingerprint=full_fingerprint,
                text_preview=text[:200],
                word_count=len(text.split())
            )
            await self.repository.save_fingerprint(full_text_fingerprint)
        
        # Generate detection summary
        if overall_originality >= 90:
            summary = "Content is highly original with minimal similarity to existing content."
        elif overall_originality >= 70:
            summary = "Content shows good originality with some similar segments found."
        elif overall_originality >= 50:
            summary = "Content has moderate originality. Several similar segments detected."
        else:
            summary = "Content shows low originality. Many similar segments found in database."
        
        return DetectionResult(
            overall_originality=round(overall_originality, 2),
            total_segments=len(segment_results),
            similar_segments_count=similar_segments_count,
            segment_results=segment_results,
            similar_contents=similar_contents,
            detection_summary=summary
        )
    
    async def detect_with_sliding_window(
        self,
        text: str,
        window_size: int = 200,
        step_size: int = 100
    ) -> DetectionResult:
        """
        Detect originality using sliding window approach.
        
        Args:
            text: Text to analyze
            window_size: Size of sliding window in characters
            step_size: Step size for sliding window
            
        Returns:
            DetectionResult with sliding window analysis
        """
        # Temporarily switch to sliding window mode
        original_type = self.segment_type
        self.segment_type = "sliding"
        
        try:
            # Use sliding window segments
            segments = TextSegmenter.sliding_window(text, window_size, step_size)
            
            # Perform detection
            result = await self.detect_originality(text, save_fingerprint=False)
            
            return result
        finally:
            # Restore original segment type
            self.segment_type = original_type
    
    def calculate_segment_similarity(
        self,
        segment1: str,
        segment2: str
    ) -> float:
        """
        Calculate similarity between two text segments.
        
        Args:
            segment1: First text segment
            segment2: Second text segment
            
        Returns:
            Similarity percentage (0-100)
        """
        fp1 = self.simhash.generate(segment1)
        fp2 = self.simhash.generate(segment2)
        
        return self.simhash.calculate_similarity(fp1, fp2)
    
    def mark_similar_regions(
        self,
        text: str,
        detection_result: DetectionResult,
        similarity_threshold: float = 30.0
    ) -> str:
        """
        Mark similar regions in text with annotations.
        
        Args:
            text: Original text
            detection_result: Detection result with segment information
            similarity_threshold: Threshold for marking as similar
            
        Returns:
            Text with similar regions marked
        """
        # Sort segments by position to process in order
        similar_segments = [
            s for s in detection_result.segment_results
            if s.max_similarity > similarity_threshold
        ]
        similar_segments.sort(key=lambda x: x.segment_position[0])
        
        # Build marked text
        marked_text = []
        last_pos = 0
        
        for segment in similar_segments:
            start, end = segment.segment_position
            
            # Add unmarked text before this segment
            if start > last_pos:
                marked_text.append(text[last_pos:start])
            
            # Add marked segment
            marked_text.append(f"[SIMILAR {segment.max_similarity:.1f}%]")
            marked_text.append(text[start:end])
            marked_text.append("[/SIMILAR]")
            
            last_pos = end
        
        # Add remaining text
        if last_pos < len(text):
            marked_text.append(text[last_pos:])
        
        return "".join(marked_text)