"""
xlcig Notebook Enterprise Web Application
Flask-based professional data visualization platform
"""

from flask import Flask, render_template, request, jsonify, send_file, send_from_directory
import pandas as pd
import json
import os
import tempfile
import traceback
from typing import Dict, List, Optional, Tuple, Any
import uuid
from werkzeug.utils import secure_filename
import base64
import io
import numpy as np
from datetime import datetime

from file_parser import FileParser
from chart_generator import ChartGenerator

app = Flask(__name__)
app.config['MAX_CONTENT_LENGTH'] = 100 * 1024 * 1024  # 100MB max file size
app.config['UPLOAD_FOLDER'] = tempfile.gettempdir()

class DataAnalyzer:
    """Enterprise data analysis engine"""
    
    def __init__(self):
        self.file_parser = FileParser()
        self.chart_generator = ChartGenerator()
        self.session_data = {}
    
    def process_files(self, files) -> Dict[str, Any]:
        """Process uploaded files and return analysis results"""
        session_id = str(uuid.uuid4())
        
        try:
            all_dataframes = {}
            file_info = []
            charts = []
            
            # Process each uploaded file
            for file in files:
                if file and file.filename:
                    # Save file temporarily
                    filename = secure_filename(file.filename)
                    temp_path = os.path.join(app.config['UPLOAD_FOLDER'], filename)
                    file.save(temp_path)
                    
                    try:
                        # Parse file
                        file_dataframes = self.file_parser.load_file(temp_path)
                        
                        # Add to collection
                        base_name = os.path.splitext(filename)[0]
                        for df_name, df in file_dataframes.items():
                            if not df.empty:
                                key = f"{base_name}_{df_name}" if df_name != base_name else base_name
                                all_dataframes[key] = df
                                
                                # Generate file info
                                file_info.append({
                                    'name': key,
                                    'rows': df.shape[0],
                                    'columns': df.shape[1],
                                    'size': f"{df.memory_usage(deep=True).sum() / 1024:.1f} KB",
                                    'numeric_cols': len(df.select_dtypes(include=['number']).columns),
                                    'categorical_cols': len(df.select_dtypes(include=['object', 'category']).columns)
                                })
                                
                                # Charts will be generated when user clicks analyze
                                # dataset_charts = self.generate_dataset_charts(df, key)
                                # charts.extend(dataset_charts)
                        
                        # Clean up temp file
                        os.remove(temp_path)
                        
                    except Exception as e:
                        print(f"Error processing {filename}: {e}")
                        if os.path.exists(temp_path):
                            os.remove(temp_path)
                        continue
            
            # Store session data
            self.session_data[session_id] = {
                'dataframes': all_dataframes,
                'file_info': file_info,
                'charts': []  # Empty initially, will be populated when user analyzes
            }
            
            return {
                'success': True,
                'session_id': session_id,
                'file_count': len(file_info),
                'total_rows': sum(info['rows'] for info in file_info),
                'file_info': file_info
                # No charts returned on upload
            }
            
        except Exception as e:
            return {
                'success': False,
                'error': str(e),
                'traceback': traceback.format_exc()
            }
    
    def generate_dataset_charts(self, df: pd.DataFrame, dataset_name: str) -> List[Dict]:
        """Generate multiple charts for a dataset using ECharts"""
        charts = []
        
        try:
            column_info = self.file_parser.get_column_info(df)
            numeric_cols = column_info['numeric_columns']
            categorical_cols = column_info['categorical_columns']
            
            # Clean and format dataset name for titles
            clean_name = self.format_dataset_name(dataset_name)
            
            # Generate various chart types based on data characteristics
            chart_configs = []
            
            # 1. Bar chart for categorical vs numeric
            if categorical_cols and numeric_cols:
                title = f"{clean_name} - {categorical_cols[0]} Distribution"
                config = self.chart_generator.generate_echarts_config(
                    df, 'bar', categorical_cols[0], numeric_cols[0], title
                )
                if not config.get('error'):
                    chart_configs.append({
                        'id': f'{dataset_name}_bar',
                        'title': title,
                        'type': 'bar',
                        'config': config,
                        'dataset': dataset_name,
                        'x_col': categorical_cols[0],
                        'y_col': numeric_cols[0]
                    })
            
            # 2. Line chart for trends
            if len(numeric_cols) >= 1:
                title = f"{clean_name} - {numeric_cols[0]} Trend"
                config = self.chart_generator.generate_echarts_config(
                    df, 'line', None, numeric_cols[0], title
                )
                if not config.get('error'):
                    chart_configs.append({
                        'id': f'{dataset_name}_line',
                        'title': title,
                        'type': 'line',
                        'config': config,
                        'dataset': dataset_name,
                        'x_col': None,
                        'y_col': numeric_cols[0]
                    })
            
            # 3. Pie chart for categorical distribution
            if categorical_cols:
                title = f"{clean_name} - {categorical_cols[0]} Composition"
                config = self.chart_generator.generate_echarts_config(
                    df, 'pie', categorical_cols[0], numeric_cols[0] if numeric_cols else None, title
                )
                if not config.get('error'):
                    chart_configs.append({
                        'id': f'{dataset_name}_pie',
                        'title': title,
                        'type': 'pie',
                        'config': config,
                        'dataset': dataset_name,
                        'x_col': categorical_cols[0],
                        'y_col': numeric_cols[0] if numeric_cols else None
                    })
            
            # 4. Scatter plot for correlation
            if len(numeric_cols) >= 2:
                title = f"{clean_name} - {numeric_cols[0]} vs {numeric_cols[1]}"
                config = self.chart_generator.generate_echarts_config(
                    df, 'scatter', numeric_cols[0], numeric_cols[1], title
                )
                if not config.get('error'):
                    chart_configs.append({
                        'id': f'{dataset_name}_scatter',
                        'title': title,
                        'type': 'scatter',
                        'config': config,
                        'dataset': dataset_name,
                        'x_col': numeric_cols[0],
                        'y_col': numeric_cols[1]
                    })
            
            # 5. Histogram for distribution analysis
            if numeric_cols:
                title = f"{clean_name} - {numeric_cols[0]} Distribution"
                config = self.chart_generator.generate_echarts_config(
                    df, 'histogram', numeric_cols[0], None, title
                )
                if not config.get('error'):
                    chart_configs.append({
                        'id': f'{dataset_name}_histogram',
                        'title': title,
                        'type': 'histogram',
                        'config': config,
                        'dataset': dataset_name,
                        'x_col': numeric_cols[0],
                        'y_col': None
                    })
            
            return chart_configs
            
        except Exception as e:
            print(f"Error generating charts for {dataset_name}: {e}")
            return []

    def format_dataset_name(self, dataset_name: str) -> str:
        """Format dataset name for professional chart titles"""
        # Remove file extensions and common prefixes
        name = dataset_name.replace('_Sheet1', '').replace('.xlsx', '').replace('.csv', '')
        
        # Handle common patterns
        if 'sample_data' in name.lower():
            name = name.replace('sample_data', 'Sample Dataset').replace('_', ' ')
        elif 'test_data' in name.lower():
            name = name.replace('test_data', 'Test Dataset').replace('_', ' ')
        else:
            # Convert underscores to spaces and title case
            name = name.replace('_', ' ').title()
        
        # Clean up multiple spaces
        name = ' '.join(name.split())
        
        return name or 'Dataset'
    
    def get_available_chart_types(self, df: pd.DataFrame) -> List[Dict]:
        """Get available chart types for a dataset"""
        try:
            recommendations = self.chart_generator.get_chart_recommendations(df)
            chart_types = []
            
            for chart_type in self.chart_generator.chart_types:
                chart_types.append({
                    'type': chart_type,
                    'name': chart_type.title(),
                    'recommended': chart_type in recommendations['suitable_charts']
                })
            
            return chart_types
            
        except Exception as e:
            print(f"Error getting chart types: {e}")
            return []
    
    def generate_custom_chart(self, session_id: str, dataset_name: str, 
                            chart_type: str, x_col: Optional[str], y_col: Optional[str]) -> Dict:
        """Generate custom chart configuration"""
        try:
            if session_id not in self.session_data:
                return {'error': True, 'message': 'Session not found'}
            
            dataframes = self.session_data[session_id]['dataframes']
            if dataset_name not in dataframes:
                return {'error': True, 'message': 'Dataset not found'}
            
            df = dataframes[dataset_name]
            title = f"{dataset_name} - {chart_type.title()}"
            
            config = self.chart_generator.generate_echarts_config(
                df, chart_type, x_col, y_col, title
            )
            
            if config.get('error'):
                return config
            
            return {
                'success': True,
                'id': f'{dataset_name}_{chart_type}_{x_col}_{y_col}',
                'title': title,
                'type': chart_type,
                'config': config,
                'dataset': dataset_name,
                'x_col': x_col,
                'y_col': y_col
            }
            
        except Exception as e:
            return {
                'error': True,
                'message': str(e)
            }

# Initialize analyzer
analyzer = DataAnalyzer()

@app.route('/')
def index():
    """Main dashboard page"""
    return render_template('index.html')

@app.route('/api/upload', methods=['POST'])
def upload_files():
    """Handle file upload and analysis"""
    try:
        if 'files' not in request.files:
            return jsonify({'success': False, 'error': 'No files provided'})
        
        files = request.files.getlist('files')
        if not files or all(f.filename == '' for f in files):
            return jsonify({'success': False, 'error': 'No files selected'})
        
        # Process files
        result = analyzer.process_files(files)
        return jsonify(result)
        
    except Exception as e:
        return jsonify({
            'success': False,
            'error': str(e),
            'traceback': traceback.format_exc()
        })

@app.route('/api/analyze/<session_id>', methods=['POST'])
def analyze_data(session_id):
    """Generate charts and analysis for uploaded data"""
    try:
        if session_id not in analyzer.session_data:
            return jsonify({'success': False, 'error': 'Session not found'})
        
        session_data = analyzer.session_data[session_id]
        dataframes = session_data['dataframes']
        
        # Generate charts for all datasets
        all_charts = []
        for dataset_name, df in dataframes.items():
            dataset_charts = analyzer.generate_dataset_charts(df, dataset_name)
            all_charts.extend(dataset_charts)
        
        # Update session data with charts
        analyzer.session_data[session_id]['charts'] = all_charts
        
        return jsonify({
            'success': True,
            'session_id': session_id,
            'file_info': session_data['file_info'],
            'charts': all_charts
        })
        
    except Exception as e:
        return jsonify({
            'success': False,
            'error': str(e),
            'traceback': traceback.format_exc()
        })

@app.route('/api/session/<session_id>')
def get_session_data(session_id):
    """Get session data"""
    if session_id in analyzer.session_data:
        data = analyzer.session_data[session_id]
        return jsonify({
            'success': True,
            'file_info': data['file_info'],
            'charts': data['charts']
        })
    else:
        return jsonify({'success': False, 'error': 'Session not found'})

@app.route('/api/chart-types/<session_id>/<dataset_name>')
def get_chart_types(session_id, dataset_name):
    """Get available chart types and columns for a specific dataset"""
    try:
        if session_id not in analyzer.session_data:
            return jsonify({'success': False, 'error': 'Session not found'})
        
        session_data = analyzer.session_data[session_id]
        if dataset_name not in session_data['dataframes']:
            return jsonify({'success': False, 'error': 'Dataset not found'})
        
        df = session_data['dataframes'][dataset_name]
        
        # Get column information
        column_info = analyzer.file_parser.get_column_info(df)
        numeric_cols = column_info['numeric_columns']
        categorical_cols = column_info['categorical_columns']
        all_cols = list(df.columns)
        
        # Define available chart types with recommendations
        chart_types = []
        
        # Basic chart types always available
        chart_types.append({'type': 'line', 'name': 'Line Chart', 'recommended': len(numeric_cols) >= 1})
        chart_types.append({'type': 'bar', 'name': 'Bar Chart', 'recommended': len(categorical_cols) >= 1 and len(numeric_cols) >= 1})
        chart_types.append({'type': 'pie', 'name': 'Pie Chart', 'recommended': len(categorical_cols) >= 1})
        chart_types.append({'type': 'histogram', 'name': 'Histogram', 'recommended': len(numeric_cols) >= 1})
        
        # Advanced chart types based on data characteristics
        if len(numeric_cols) >= 2:
            chart_types.append({'type': 'scatter', 'name': 'Scatter Plot', 'recommended': True})
            chart_types.append({'type': 'heatmap', 'name': 'Heatmap', 'recommended': len(numeric_cols) >= 3})
        
        if len(numeric_cols) >= 3:
            chart_types.append({'type': 'radar', 'name': 'Radar Chart', 'recommended': len(numeric_cols) <= 8})
        
        chart_types.append({'type': 'funnel', 'name': 'Funnel Chart', 'recommended': len(categorical_cols) >= 1})
        
        return jsonify({
            'success': True,
            'chart_types': chart_types,
            'columns': {
                'all': all_cols,
                'numeric': numeric_cols,
                'categorical': categorical_cols
            },
            'data_info': {
                'rows': len(df),
                'columns': len(df.columns),
                'numeric_count': len(numeric_cols),
                'categorical_count': len(categorical_cols)
            }
        })
        
    except Exception as e:
        return jsonify({'success': False, 'error': str(e)})

@app.route('/api/generate-chart', methods=['POST'])
def generate_chart():
    """Generate custom chart"""
    try:
        data = request.get_json()
        session_id = data.get('session_id')
        dataset_name = data.get('dataset')
        chart_type = data.get('chart_type')
        x_col = data.get('x_col')
        y_col = data.get('y_col')
        
        result = analyzer.generate_custom_chart(session_id, dataset_name, chart_type, x_col, y_col)
        return jsonify(result)
        
    except Exception as e:
        return jsonify({
            'success': False,
            'error': str(e)
        })

@app.route('/api/dashboard/<session_id>')
def get_dashboard_config(session_id):
    """Get dashboard configuration for visualization screen"""
    try:
        if session_id not in analyzer.session_data:
            return jsonify({'success': False, 'error': 'Session not found'})
        
        data = analyzer.session_data[session_id]
        
        # Create dashboard layout configuration
        dashboard_config = {
            'datasets': list(data['dataframes'].keys()),
            'charts': data['charts'],
            'layouts': [
                {
                    'name': 'Overview Dashboard',
                    'description': 'Comprehensive overview of all datasets',
                    'grid': '2x3',
                    'charts': data['charts'][:6]  # First 6 charts
                },
                {
                    'name': 'Detailed Analysis',
                    'description': 'Detailed analysis with large charts',
                    'grid': '1x2',
                    'charts': data['charts'][:2]  # First 2 charts, larger
                },
                {
                    'name': 'Executive Summary',
                    'description': 'High-level metrics for executives',
                    'grid': '2x2',
                    'charts': [chart for chart in data['charts'] if chart['type'] in ['pie', 'bar']][:4]
                }
            ]
        }
        
        return jsonify({
            'success': True,
            'dashboard': dashboard_config
        })
        
    except Exception as e:
        return jsonify({'success': False, 'error': str(e)})

@app.route('/api/data_cleaning/<session_id>', methods=['POST'])
def clean_data(session_id):
    """Advanced data cleaning operations"""
    try:
        if session_id not in analyzer.session_data:
            return jsonify({'success': False, 'error': 'Session not found'})
        
        data = request.get_json()
        dataset_name = data.get('dataset')
        operation = data.get('operation')
        
        session_data = analyzer.session_data[session_id]
        if dataset_name not in session_data['dataframes']:
            return jsonify({'success': False, 'error': 'Dataset not found'})
        
        df = session_data['dataframes'][dataset_name].copy()
        
        if operation == 'remove_nulls':
            # Remove rows with null values
            original_rows = len(df)
            df = df.dropna()
            removed_rows = original_rows - len(df)
            message = f"Removed {removed_rows} rows with null values"
            
        elif operation == 'fill_nulls':
            # Fill null values with mean/mode
            for col in df.columns:
                if df[col].dtype in ['int64', 'float64']:
                    df[col].fillna(df[col].mean(), inplace=True)
                else:
                    df[col].fillna(df[col].mode().iloc[0] if not df[col].mode().empty else 'Unknown', inplace=True)
            message = "Filled null values with statistical defaults"
            
        elif operation == 'remove_duplicates':
            # Remove duplicate rows
            original_rows = len(df)
            df = df.drop_duplicates()
            removed_rows = original_rows - len(df)
            message = f"Removed {removed_rows} duplicate rows"
            
        elif operation == 'normalize':
            # Normalize numeric columns
            numeric_cols = df.select_dtypes(include=[np.number]).columns
            df[numeric_cols] = (df[numeric_cols] - df[numeric_cols].min()) / (df[numeric_cols].max() - df[numeric_cols].min())
            message = f"Normalized {len(numeric_cols)} numeric columns"
            
        elif operation == 'outlier_removal':
            # Remove outliers using IQR method
            numeric_cols = df.select_dtypes(include=[np.number]).columns
            original_rows = len(df)
            
            for col in numeric_cols:
                Q1 = df[col].quantile(0.25)
                Q3 = df[col].quantile(0.75)
                IQR = Q3 - Q1
                lower_bound = Q1 - 1.5 * IQR
                upper_bound = Q3 + 1.5 * IQR
                df = df[(df[col] >= lower_bound) & (df[col] <= upper_bound)]
            
            removed_rows = original_rows - len(df)
            message = f"Removed {removed_rows} outlier rows from {len(numeric_cols)} numeric columns"
            
        else:
            return jsonify({'success': False, 'error': 'Unknown operation'})
        
        # Update session data
        session_data['dataframes'][dataset_name] = df
        
        # Generate updated file info
        updated_info = {
            'name': dataset_name,
            'rows': df.shape[0],
            'columns': df.shape[1],
            'size': f"{df.memory_usage(deep=True).sum() / 1024:.1f} KB",
            'numeric_cols': len(df.select_dtypes(include=['number']).columns),
            'categorical_cols': len(df.select_dtypes(include=['object', 'category']).columns)
        }
        
        return jsonify({
            'success': True,
            'message': message,
            'updated_info': updated_info,
            'preview': df.head(10).to_dict('records')
        })
        
    except Exception as e:
        return jsonify({'success': False, 'error': str(e)})

@app.route('/api/statistical_analysis/<session_id>', methods=['POST'])
def statistical_analysis(session_id):
    """Generate statistical analysis report"""
    try:
        if session_id not in analyzer.session_data:
            return jsonify({'success': False, 'error': 'Session not found'})
        
        data = request.get_json()
        dataset_name = data.get('dataset')
        
        session_data = analyzer.session_data[session_id]
        if dataset_name not in session_data['dataframes']:
            return jsonify({'success': False, 'error': 'Dataset not found'})
        
        df = session_data['dataframes'][dataset_name]
        
        # Basic statistics
        stats = {
            'basic_info': {
                'total_rows': len(df),
                'total_columns': len(df.columns),
                'memory_usage': f"{df.memory_usage(deep=True).sum() / 1024:.1f} KB",
                'null_values': df.isnull().sum().sum(),
                'duplicate_rows': df.duplicated().sum()
            },
            'column_types': {
                'numeric': len(df.select_dtypes(include=[np.number]).columns),
                'categorical': len(df.select_dtypes(include=['object', 'category']).columns),
                'datetime': len(df.select_dtypes(include=['datetime']).columns)
            },
            'numeric_summary': {},
            'categorical_summary': {},
            'correlation_matrix': {},
            'missing_data': {}
        }
        
        # Numeric columns analysis
        numeric_cols = df.select_dtypes(include=[np.number]).columns
        if len(numeric_cols) > 0:
            numeric_desc = df[numeric_cols].describe()
            stats['numeric_summary'] = numeric_desc.to_dict()
            
            # Correlation matrix
            if len(numeric_cols) > 1:
                corr_matrix = df[numeric_cols].corr()
                stats['correlation_matrix'] = corr_matrix.to_dict()
        
        # Categorical columns analysis
        categorical_cols = df.select_dtypes(include=['object', 'category']).columns
        for col in categorical_cols:
            value_counts = df[col].value_counts().head(10)
            stats['categorical_summary'][col] = {
                'unique_values': df[col].nunique(),
                'most_frequent': value_counts.to_dict()
            }
        
        # Missing data analysis
        missing_data = df.isnull().sum()
        stats['missing_data'] = {
            col: {'count': int(missing_data[col]), 'percentage': f"{missing_data[col]/len(df)*100:.1f}%"} 
            for col in df.columns if missing_data[col] > 0
        }
        
        return jsonify({
            'success': True,
            'statistics': stats
        })
        
    except Exception as e:
        return jsonify({'success': False, 'error': str(e)})

@app.route('/api/export_data/<session_id>', methods=['POST'])
def export_data(session_id):
    """Export data in various formats"""
    try:
        if session_id not in analyzer.session_data:
            return jsonify({'success': False, 'error': 'Session not found'})
        
        data = request.get_json()
        dataset_name = data.get('dataset')
        export_format = data.get('format', 'csv')
        
        session_data = analyzer.session_data[session_id]
        if dataset_name not in session_data['dataframes']:
            return jsonify({'success': False, 'error': 'Dataset not found'})
        
        df = session_data['dataframes'][dataset_name]
        
        # Create temporary file for export
        import tempfile
        import os
        
        if export_format == 'csv':
            temp_file = tempfile.NamedTemporaryFile(delete=False, suffix='.csv')
            df.to_csv(temp_file.name, index=False)
            filename = f"{dataset_name}.csv"
            
        elif export_format == 'excel':
            temp_file = tempfile.NamedTemporaryFile(delete=False, suffix='.xlsx')
            df.to_excel(temp_file.name, index=False)
            filename = f"{dataset_name}.xlsx"
            
        elif export_format == 'json':
            temp_file = tempfile.NamedTemporaryFile(delete=False, suffix='.json', mode='w')
            df.to_json(temp_file.name, orient='records', indent=2)
            filename = f"{dataset_name}.json"
            
        else:
            return jsonify({'success': False, 'error': 'Unsupported format'})
        
        temp_file.close()
        
        # Return file download response
        return send_file(
            temp_file.name,
            as_attachment=True,
            download_name=filename,
            mimetype='application/octet-stream'
        )
        
    except Exception as e:
        return jsonify({'success': False, 'error': str(e)})

@app.route('/api/advanced_charts/<session_id>', methods=['POST'])
def generate_advanced_charts(session_id):
    """Generate advanced chart types"""
    try:
        if session_id not in analyzer.session_data:
            return jsonify({'success': False, 'error': 'Session not found'})
        
        data = request.get_json()
        dataset_name = data.get('dataset')
        chart_type = data.get('chart_type')
        config_options = data.get('options', {})
        
        session_data = analyzer.session_data[session_id]
        if dataset_name not in session_data['dataframes']:
            return jsonify({'success': False, 'error': 'Dataset not found'})
        
        df = session_data['dataframes'][dataset_name]
        clean_name = analyzer.format_dataset_name(dataset_name)
        
        if chart_type == 'correlation_matrix':
            # Enhanced correlation heatmap
            numeric_cols = df.select_dtypes(include=[np.number]).columns
            if len(numeric_cols) < 2:
                return jsonify({'success': False, 'error': 'Need at least 2 numeric columns'})
            
            title = f"{clean_name} - Correlation Matrix"
            config = analyzer.chart_generator.generate_echarts_config(
                df, 'heatmap', None, None, title
            )
            
        elif chart_type == 'box_plot':
            # Box plot for distribution analysis
            numeric_cols = df.select_dtypes(include=[np.number]).columns
            if len(numeric_cols) == 0:
                return jsonify({'success': False, 'error': 'No numeric columns found'})
            
            title = f"{clean_name} - Distribution Box Plot"
            config = analyzer.chart_generator.generate_echarts_config(
                df, 'box', numeric_cols[0], None, title
            )
            
        elif chart_type == 'multi_line':
            # Multi-line chart for trend comparison
            numeric_cols = df.select_dtypes(include=[np.number]).columns
            if len(numeric_cols) < 2:
                return jsonify({'success': False, 'error': 'Need at least 2 numeric columns'})
            
            title = f"{clean_name} - Multi-Variable Trends"
            config = analyzer.chart_generator._create_multi_line_echarts(df, numeric_cols[:5], title)
            
        else:
            return jsonify({'success': False, 'error': 'Unsupported advanced chart type'})
        
        if config.get('error'):
            return jsonify({'success': False, 'error': config.get('message', 'Chart generation failed')})
        
        chart_data = {
            'id': f'{dataset_name}_{chart_type}',
            'title': title,
            'type': chart_type,
            'config': config,
            'dataset': dataset_name
        }
        
        return jsonify({
            'success': True,
            'chart': chart_data
        })
        
    except Exception as e:
        return jsonify({'success': False, 'error': str(e)})

# Static file routes for SEO and PWA
@app.route('/favicon.png')
def favicon():
    """Serve favicon"""
    try:
        # Try to serve the actual favicon file if it exists
        if os.path.exists('favicon.png'):
            return send_file('favicon.png')
        elif os.path.exists('static/favicon.png'):
            return send_file('static/favicon.png')
        else:
            # Return a default 404 if no favicon found
            return '', 404
    except Exception as e:
        print(f"Error serving favicon: {e}")
        return '', 404

@app.route('/manifest.json')
def manifest():
    """Generate PWA manifest for mobile optimization"""
    base_url = request.url_root.rstrip('/')
    
    manifest_data = {
        "name": "xlcig Notebook - 免费在线数据可视化平台",
        "short_name": "xlcig分析",
        "description": "免费在线数据可视化工具，无需下载安装，一键生成专业图表",
        "start_url": "/",
        "display": "standalone",
        "background_color": "#ffffff",
        "theme_color": "#4A90E2",
        "orientation": "portrait-primary",
        "scope": "/",
        "lang": "zh-CN",
        "dir": "ltr",
        "categories": ["business", "productivity", "utilities"],
        "icons": [
            {
                "src": f"{base_url}/static/img/favicon.png",
                "sizes": "16x16",
                "type": "image/png"
            },
            {
                "src": f"{base_url}/static/img/favicon.png", 
                "sizes": "32x32",
                "type": "image/png"
            },
            {
                "src": f"{base_url}/static/img/logo.png",
                "sizes": "180x180",
                "type": "image/png",
                "purpose": "any maskable"
            },
            {
                "src": f"{base_url}/static/img/logo.png",
                "sizes": "512x512", 
                "type": "image/png",
                "purpose": "any maskable"
            }
        ],
        "screenshots": [
            {
                "src": f"{base_url}/static/img/screenshot-wide.png",
                "sizes": "1280x720",
                "type": "image/png",
                "form_factor": "wide"
            },
            {
                "src": f"{base_url}/static/img/screenshot-narrow.png", 
                "sizes": "390x844",
                "type": "image/png",
                "form_factor": "narrow"
            }
        ],
        "shortcuts": [
            {
                "name": "上传数据",
                "short_name": "上传",
                "description": "直接上传CSV或Excel文件开始分析",
                "url": "/?action=upload",
                "icons": [{"src": f"{base_url}/static/img/favicon.png", "sizes": "96x96"}]
            },
            {
                "name": "历史数据",
                "short_name": "历史",
                "description": "查看和管理历史数据集",
                "url": "/?action=history", 
                "icons": [{"src": f"{base_url}/static/img/favicon.png", "sizes": "96x96"}]
            }
        ]
    }
    
    return jsonify(manifest_data)

@app.route('/.well-known/security.txt')
def security():
    """Security policy for responsible disclosure"""
    security_content = '''Contact: mailto:security@your-domain.com
Expires: 2025-12-31T23:59:59.000Z
Encryption: https://your-domain.com/pgp-key.txt
Preferred-Languages: zh-CN, en
Canonical: https://your-domain.com/.well-known/security.txt
Policy: https://your-domain.com/security-policy
Acknowledgments: https://your-domain.com/security-thanks
'''
    
    response = app.response_class(
        security_content,
        mimetype='text/plain'
    )
    return response

@app.route('/robots.txt')
def robots():
    """Generate robots.txt for SEO"""
    base_url = request.url_root.rstrip('/')
    
    robots_content = f'''User-agent: *
Allow: /
Allow: /static/
Allow: /?lang=zh
Allow: /?lang=en

# 重要页面
Sitemap: {base_url}/sitemap.xml

# 禁止访问的路径
Disallow: /api/
Disallow: /uploads/
Disallow: /temp/
Disallow: /*.json$
Disallow: /admin/
Disallow: /private/

# 爬虫延迟 (秒)
Crawl-delay: 1

# 特定搜索引擎
User-agent: Baiduspider
Allow: /
Crawl-delay: 2

User-agent: Sogou
Allow: /
Crawl-delay: 2

User-agent: 360Spider
Allow: /
Crawl-delay: 2

User-agent: Googlebot
Allow: /
Crawl-delay: 1

User-agent: Bingbot
Allow: /
Crawl-delay: 1
'''
    
    response = app.response_class(
        robots_content,
        mimetype='text/plain'
    )
    return response

@app.route('/sitemap.xml')
def sitemap():
    """Generate sitemap.xml for SEO"""
    from datetime import datetime
    
    base_url = request.url_root.rstrip('/')
    current_date = datetime.now().strftime('%Y-%m-%d')
    
    sitemap_content = f'''<?xml version="1.0" encoding="UTF-8"?>
<urlset xmlns="http://www.sitemaps.org/schemas/sitemap/0.9"
        xmlns:xhtml="http://www.w3.org/1999/xhtml">
    <url>
        <loc>{base_url}/</loc>
        <lastmod>{current_date}</lastmod>
        <changefreq>daily</changefreq>
        <priority>1.0</priority>
        <xhtml:link rel="alternate" hreflang="zh-CN" href="{base_url}/?lang=zh"/>
        <xhtml:link rel="alternate" hreflang="en" href="{base_url}/?lang=en"/>
        <xhtml:link rel="alternate" hreflang="x-default" href="{base_url}/"/>
    </url>
    <url>
        <loc>{base_url}/?lang=zh</loc>
        <lastmod>{current_date}</lastmod>
        <changefreq>daily</changefreq>
        <priority>0.9</priority>
        <xhtml:link rel="alternate" hreflang="en" href="{base_url}/?lang=en"/>
        <xhtml:link rel="alternate" hreflang="zh-CN" href="{base_url}/?lang=zh"/>
    </url>
    <url>
        <loc>{base_url}/?lang=en</loc>
        <lastmod>{current_date}</lastmod>
        <changefreq>daily</changefreq>
        <priority>0.9</priority>
        <xhtml:link rel="alternate" hreflang="zh-CN" href="{base_url}/?lang=zh"/>
        <xhtml:link rel="alternate" hreflang="en" href="{base_url}/?lang=en"/>
    </url>
</urlset>'''
    
    response = app.response_class(
        sitemap_content,
        mimetype='application/xml'
    )
    return response

if __name__ == '__main__':
    app.run(host='0.0.0.0', port=8080, debug=True) 