"""
File Parser Module for xlcig Notebook Visualization Platform
Supports .ipynb, .csv, .xlsx file formats
"""

import nbformat
import pandas as pd
import json
import os
from typing import Dict, Any, Optional


class FileParser:
    """Handles parsing of different file formats and extracts DataFrames"""
    
    def __init__(self):
        self.supported_formats = ['.ipynb', '.csv', '.xlsx', '.xls']
    
    def load_file(self, file_path: str) -> Dict[str, pd.DataFrame]:
        """
        Load file and return dictionary of DataFrames
        
        Args:
            file_path: Path to the uploaded file
            
        Returns:
            Dictionary with sheet/table names as keys and DataFrames as values
        """
        if not os.path.exists(file_path):
            raise FileNotFoundError(f"File not found: {file_path}")
        
        file_ext = os.path.splitext(file_path)[1].lower()
        
        if file_ext == '.ipynb':
            return self._load_notebook(file_path)
        elif file_ext == '.csv':
            return self._load_csv(file_path)
        elif file_ext in ['.xlsx', '.xls']:
            return self._load_excel(file_path)
        else:
            raise ValueError(f"Unsupported file format: {file_ext}")
    
    def _load_notebook(self, file_path: str) -> Dict[str, pd.DataFrame]:
        """Load Jupyter notebook and extract DataFrames from executed code"""
        try:
            with open(file_path, 'r', encoding='utf-8') as f:
                nb = nbformat.read(f, as_version=4)
            
            # Extract all code cells
            code_cells = [cell.source for cell in nb.cells if cell.cell_type == 'code']
            code = "\n".join(code_cells)
            
            # Execute code in isolated scope with comprehensive imports
            scope = {
                'pd': pd, 'pandas': pd,
                '__builtins__': __builtins__
            }
            
            # Try to import common data science libraries
            common_imports = {
                'numpy': ['numpy', 'np'],
                'matplotlib.pyplot': ['plt'],
                'seaborn': ['sns'],
                'sklearn': ['sklearn'],
                'scipy': ['scipy'],
                'plotly': ['plotly'],
                'jieba': ['jieba'],
                'chardet': ['chardet'],
                'requests': ['requests'],
                'json': ['json'],
                'os': ['os'],
                'sys': ['sys'],
                're': ['re'],
                'datetime': ['datetime'],
                'time': ['time'],
                'warnings': ['warnings']
            }
            
            for module_name, aliases in common_imports.items():
                try:
                    module = __import__(module_name)
                    for alias in aliases:
                        scope[alias] = module
                    if '.' in module_name:
                        # Handle submodules like matplotlib.pyplot
                        submodule_name = module_name.split('.')[-1]
                        scope[submodule_name] = getattr(module, submodule_name, module)
                except ImportError:
                    # Silently skip unavailable modules
                    pass
            
            # Execute code with error handling
            try:
                exec(code, scope, scope)
            except Exception as e:
                print(f"Warning: Error executing notebook code: {e}")
                # Continue anyway, might still find dataframes in outputs or variables
            
            # Extract DataFrames from scope
            dataframes = {}
            excluded_names = set(['pd', 'pandas', 'numpy', 'np', 'plt', 'sns', 'sklearn', 'scipy', 
                                'plotly', 'jieba', 'chardet', 'requests', 'json', 'os', 'sys', 
                                're', 'datetime', 'time', 'warnings'])
            
            for name, obj in scope.items():
                if isinstance(obj, pd.DataFrame) and not obj.empty:
                    # Skip built-in variables and imported modules
                    if not name.startswith('_') and name not in excluded_names:
                        dataframes[name] = obj.copy()
            
            # If no DataFrames found, try to find data in outputs
            if not dataframes:
                dataframes = self._extract_dataframes_from_outputs(nb)
            
            # If still no DataFrames, try to find any CSV/Excel data mentioned in code
            if not dataframes:
                dataframes = self._try_load_referenced_files(code, os.path.dirname(file_path))
            
            return dataframes if dataframes else {"No DataFrames": pd.DataFrame()}
            
        except Exception as e:
            raise ValueError(f"Error loading notebook: {str(e)}")
    
    def _try_load_referenced_files(self, code: str, base_dir: str) -> Dict[str, pd.DataFrame]:
        """Try to load files referenced in the notebook code"""
        dataframes = {}
        
        # Look for common file reading patterns
        import re
        
        # Find CSV file patterns
        csv_patterns = [
            r'pd\.read_csv\([\'"]([^\'"]+)[\'"]',
            r'pandas\.read_csv\([\'"]([^\'"]+)[\'"]'
        ]
        
        for pattern in csv_patterns:
            matches = re.findall(pattern, code)
            for match in matches:
                try:
                    file_path = os.path.join(base_dir, match) if not os.path.isabs(match) else match
                    if os.path.exists(file_path):
                        df = pd.read_csv(file_path)
                        dataframes[f"CSV_{os.path.basename(match)}"] = df
                except:
                    continue
        
        # Find Excel file patterns
        excel_patterns = [
            r'pd\.read_excel\([\'"]([^\'"]+)[\'"]',
            r'pandas\.read_excel\([\'"]([^\'"]+)[\'"]'
        ]
        
        for pattern in excel_patterns:
            matches = re.findall(pattern, code)
            for match in matches:
                try:
                    file_path = os.path.join(base_dir, match) if not os.path.isabs(match) else match
                    if os.path.exists(file_path):
                        df = pd.read_excel(file_path)
                        dataframes[f"Excel_{os.path.basename(match)}"] = df
                except:
                    continue
        
        return dataframes
    
    def _load_csv(self, file_path: str) -> Dict[str, pd.DataFrame]:
        """Load CSV file with automatic encoding detection"""
        try:
            # First try to detect encoding using chardet
            encoding = 'utf-8'  # default
            try:
                import chardet
                with open(file_path, 'rb') as f:
                    raw_data = f.read(10000)  # Read first 10KB for detection
                    result = chardet.detect(raw_data)
                    if result['encoding'] and result['confidence'] > 0.7:
                        encoding = result['encoding']
            except ImportError:
                pass
            
            # Try detected encoding first, then fallback encodings
            encodings = [encoding, 'utf-8', 'gbk', 'gb2312', 'iso-8859-1', 'cp1252']
            df = None
            
            for enc in encodings:
                try:
                    df = pd.read_csv(file_path, encoding=enc)
                    print(f"Successfully loaded CSV with encoding: {enc}")
                    break
                except (UnicodeDecodeError, UnicodeError):
                    continue
                except Exception as e:
                    # Try next encoding for other errors too
                    continue
            
            if df is None:
                # Last resort: read with errors='ignore'
                df = pd.read_csv(file_path, encoding='utf-8', errors='ignore')
                print("Loaded CSV with error handling (some characters may be lost)")
            
            # Clean up dataframe
            if df is not None:
                # Remove completely empty rows/columns
                df = df.dropna(how='all').dropna(axis=1, how='all')
                
                # Give meaningful name based on filename
                filename = os.path.splitext(os.path.basename(file_path))[0]
                return {filename: df}
            else:
                return {"Empty CSV": pd.DataFrame()}
            
        except Exception as e:
            raise ValueError(f"Error loading CSV file: {str(e)}")
    
    def _load_excel(self, file_path: str) -> Dict[str, pd.DataFrame]:
        """Load Excel file with multiple sheets"""
        try:
            # Read all sheets
            excel_data = pd.read_excel(file_path, sheet_name=None, engine='openpyxl')
            
            # Filter out empty sheets
            filtered_data = {}
            for sheet_name, df in excel_data.items():
                if not df.empty:
                    filtered_data[sheet_name] = df
            
            return filtered_data if filtered_data else {"Empty File": pd.DataFrame()}
            
        except Exception as e:
            raise ValueError(f"Error loading Excel file: {str(e)}")
    
    def _extract_dataframes_from_outputs(self, nb) -> Dict[str, pd.DataFrame]:
        """Extract DataFrames from notebook cell outputs"""
        dataframes = {}
        
        for i, cell in enumerate(nb.cells):
            if cell.cell_type == 'code' and 'outputs' in cell:
                for output in cell.outputs:
                    if output.output_type == 'display_data' and 'text/html' in output.data:
                        try:
                            # Try to parse HTML table
                            html_content = output.data['text/html']
                            if '<table' in html_content.lower():
                                df = pd.read_html(html_content)[0]
                                dataframes[f"Table_{i}"] = df
                        except:
                            continue
        
        return dataframes
    
    def get_column_info(self, df: pd.DataFrame) -> Dict[str, Any]:
        """Get information about DataFrame columns"""
        info = {
            'columns': list(df.columns),
            'numeric_columns': list(df.select_dtypes(include=['number']).columns),
            'categorical_columns': list(df.select_dtypes(include=['object', 'category']).columns),
            'datetime_columns': list(df.select_dtypes(include=['datetime']).columns),
            'shape': df.shape,
            'info': {
                'memory_usage': df.memory_usage(deep=True).sum(),
                'null_counts': df.isnull().sum().to_dict()
            }
        }
        return info
    
    def preview_data(self, df: pd.DataFrame, n_rows: int = 5) -> Dict[str, Any]:
        """Get a preview of the DataFrame"""
        return {
            'head': df.head(n_rows).to_dict('records'),
            'tail': df.tail(n_rows).to_dict('records'),
            'sample': df.sample(min(n_rows, len(df))).to_dict('records') if len(df) > 0 else [],
            'describe': df.describe().to_dict() if not df.empty else {}
        } 