"""
Schwab Market Data API Client

Simplified client for making API calls to fetch historical price data.
"""

import os
import time
import logging
import pytz
import requests
import pandas as pd
from datetime import datetime, timedelta
from typing import Optional, Dict, List, Tuple
from dataclasses import dataclass

# Configure logging
logging.basicConfig(
    level=logging.INFO,
    format='%(asctime)s - %(levelname)s - %(message)s',
    datefmt='%Y-%m-%d %H:%M:%S'
)
logger = logging.getLogger(__name__)

@dataclass
class APIConfig:
    """Configuration for the Schwab Market Data API."""
    server: str = "https://api.schwabapi.com/marketdata/v1"
    timeout: int = 30
    rate_limit_delay: float = 1.0

class PeriodOptimizer:
    """Handles optimal period calculation for API requests."""
    
    VALID_PERIODS = [10, 5, 4, 3, 2, 1]
    
    @staticmethod
    def get_optimal_period(days_remaining: int) -> int:
        """
        Determine the optimal period size for fetching data.
        Uses decreasing chunks: 10, 5, 4, 3, 2, 1 days.
        
        Args:
            days_remaining: Number of days left to fetch
            
        Returns:
            Optimal period size in days
        """
        for period in PeriodOptimizer.VALID_PERIODS:
            if days_remaining >= period:
                return period
        return 0

class SchwabMarketDataClient:
    """Client for fetching market data from Schwab API."""
    
    def __init__(self, api_config: APIConfig, access_token: str):
        self.api_config = api_config
        self.access_token = access_token
    
    def get_price_history(self, symbol: str, start_date: Optional[str], 
                         end_date: Optional[str], time_interval: int,
                         need_extended_hours: bool = False, only_last_row: bool = False) -> Optional[List[Dict]]:
        """
        Get historical price data with optimal period chunking.
        
        Args:
            symbol: Stock symbol
            start_date: Start date in YYYY-MM-DD format or Unix timestamp (ms) (default: 9:30am today)
            end_date: End date in YYYY-MM-DD format or Unix timestamp (ms) (default: right now)
            time_interval: Time interval in minutes (1, 5, 10, 15, 30)
            need_extended_hours: Whether to include extended hours data (default: False)
            only_last_row: If True, return only the last candle (default: False)
            
        Returns:
            List of candle dictionaries or None if failed
        """
        if not self._validate_inputs(symbol, time_interval):
            return None
        
        start_dt, end_dt = self._parse_dates(start_date, end_date)
        logger.info(f"🔄 Starting data fetch for {symbol} from {start_dt.strftime('%Y-%m-%d %H:%M:%S')} to {end_dt.strftime('%Y-%m-%d %H:%M:%S')}")
        
        all_candles = self._fetch_all_periods(symbol, start_dt, end_dt, time_interval, need_extended_hours)
        
        if not all_candles:
            logger.warning(f"⚠️  No data retrieved for {symbol}_{time_interval}m")
            return None
        
        # If only_last_row is True, return only the last candle
        if only_last_row:
            # Sort by timestamp and get the last one
            sorted_candles = sorted(all_candles, key=lambda x: x.get('datetime', 0))
            last_candle = sorted_candles[-1] if sorted_candles else None
            if last_candle:
                logger.info(f"✅ Returning only last row (timestamp: {last_candle.get('datetime', 'N/A')})")
                return [last_candle]
            else:
                return None
        
        logger.info(f"✅ Retrieved {len(all_candles)} total candles")
        return all_candles
    
    def _validate_inputs(self, symbol: str, time_interval: int) -> bool:
        """Validate input parameters."""
        valid_intervals = [1, 5, 10, 15, 30]
        
        if time_interval not in valid_intervals:
            logger.error(f"❌ Invalid time interval: {time_interval}. Valid values: {valid_intervals}")
            return False
        
        if not symbol:
            logger.error("❌ Invalid symbol provided")
            return False
        
        if not self.access_token:
            logger.error("❌ Access token is not set")
            return False
        
        return True
    
    def _parse_dates(self, start_date: Optional[str], end_date: Optional[str]) -> Tuple[datetime, datetime]:
        """Parse and localize start and end dates with defaults.
        
        Accepts either YYYY-MM-DD format strings or Unix timestamps in milliseconds.
        """
        timezone = pytz.timezone('America/New_York')
        now = datetime.now(timezone)
        
        def _parse_date_input(date_input: Optional[str], default: datetime) -> datetime:
            """Parse date input, handling both YYYY-MM-DD strings and timestamps."""
            if not date_input:
                return default
            
            # Try to parse as timestamp (milliseconds)
            try:
                timestamp_ms = int(date_input)
                # If timestamp is less than 13 digits, it might be in seconds
                if timestamp_ms < 10000000000:
                    timestamp_ms = timestamp_ms * 1000
                dt = datetime.fromtimestamp(timestamp_ms / 1000, tz=pytz.UTC)
                return dt.astimezone(timezone)
            except (ValueError, OverflowError):
                # Not a timestamp, try YYYY-MM-DD format
                try:
                    dt = datetime.strptime(date_input, "%Y-%m-%d")
                    if not dt.tzinfo:
                        dt = timezone.localize(dt)
                    return dt
                except ValueError:
                    raise ValueError(f"Invalid date format: {date_input}. Use YYYY-MM-DD or Unix timestamp (ms)")
        
        # Default end_date: right now
        if end_date:
            end_dt = _parse_date_input(end_date, now)
            # If it was a date string (not timestamp), set to end of day
            if end_date and not end_date.isdigit():
                end_dt = end_dt.replace(hour=23, minute=59, second=59)
        else:
            end_dt = now
        
        # Default start_date: 9:30am today
        if start_date:
            start_dt = _parse_date_input(start_date, now)
            # If it was a date string (not timestamp), set to market open (9:30am)
            if start_date and not start_date.isdigit():
                start_dt = start_dt.replace(hour=9, minute=30, second=0)
        else:
            start_dt = now.replace(hour=9, minute=30, second=0)
            if start_dt > now:
                # If 9:30am today hasn't happened yet, use 9:30am yesterday
                start_dt = (now - timedelta(days=1)).replace(hour=9, minute=30, second=0)
        
        return start_dt, end_dt
    
    def _fetch_all_periods(self, symbol: str, start_dt: datetime, end_dt: datetime, 
                          time_interval: int, need_extended_hours: bool = False) -> List[Dict]:
        """Fetch data for all periods using optimal chunking."""
        all_candles = []
        current_start_dt = start_dt
        
        while current_start_dt <= end_dt:
            days_remaining = (end_dt - current_start_dt).days + 1
            period = PeriodOptimizer.get_optimal_period(days_remaining)
            
            if period == 0:
                logger.info(f"✅ Completed fetching data for {symbol}")
                break
            
            period_end_dt = current_start_dt + timedelta(days=period - 1)
            if period_end_dt > end_dt:
                period_end_dt = end_dt
            
            candles = self._fetch_period(symbol, current_start_dt, period_end_dt, period, time_interval, need_extended_hours)
            if candles is None:
                return []
            
            all_candles.extend(candles)
            current_start_dt = period_end_dt + timedelta(days=1)
        
        return all_candles
    
    def _fetch_period(self, symbol: str, start_dt: datetime, end_dt: datetime, 
                     period: int, time_interval: int, need_extended_hours: bool = False) -> Optional[List[Dict]]:
        """Fetch data for a single period."""
        start_time_ms = int(start_dt.timestamp() * 1000)
        end_time_ms = int(end_dt.timestamp() * 1000)
        
        params = {
            'symbol': symbol,
            'periodType': 'day',
            'period': period,
            'frequencyType': 'minute',
            'frequency': time_interval,
            'startDate': start_time_ms,
            'endDate': end_time_ms,
            'needExtendedHoursData': 'true' if need_extended_hours else 'false',
            'needPreviousClose': 'true'
        }
        
        headers = {'Authorization': f'Bearer {self.access_token}'}
        url = f"{self.api_config.server}/pricehistory"
        
        logger.info(f"📡 Fetching {period} days for {symbol} ({time_interval}m) from {start_dt.strftime('%Y-%m-%d')} to {end_dt.strftime('%Y-%m-%d')}")
        
        try:
            response = requests.get(url, headers=headers, params=params, timeout=self.api_config.timeout)
            time.sleep(self.api_config.rate_limit_delay)
            
            if response.status_code == 200:
                data = response.json()
                
                if 'candles' in data and data['candles']:
                    candles = data['candles']
                    logger.info(f"✅ Retrieved {len(candles)} candles for this period")
                    return candles
                else:
                    logger.info("📊 No candle data found in API response for this period")
                    return []
            else:
                logger.error(f"❌ API request failed: {response.status_code}")
                if response.text:
                    logger.error(f"Response: {response.text[:200]}...")
                return None
                
        except requests.exceptions.RequestException as e:
            logger.error(f"❌ Network error fetching price history: {e}")
            return None
        except Exception as e:
            logger.error(f"❌ Unexpected error fetching price history: {e}")
            return None

def load_access_token(token_file: str = "schwab_access_token.txt") -> str:
    """Load access token from local file."""
    try:
        with open(token_file, 'r') as f:
            token = f.read().strip()
        if not token:
            raise ValueError(f"Access token file {token_file} is empty")
        return token
    except FileNotFoundError:
        raise FileNotFoundError(f"Access token file {token_file} not found")
    except Exception as e:
        raise Exception(f"Error reading access token file: {e}")

def load_symbols(symbols_file: str = "symbols.txt") -> List[str]:
    """Load symbols from comma-separated text file."""
    try:
        with open(symbols_file, 'r') as f:
            content = f.read().strip()
        if not content:
            raise ValueError(f"Symbols file {symbols_file} is empty")
        symbols = [s.strip() for s in content.split(',') if s.strip()]
        if not symbols:
            raise ValueError(f"Symbols file {symbols_file} is empty")
        return symbols
    except FileNotFoundError:
        raise FileNotFoundError(f"Symbols file {symbols_file} not found")
    except Exception as e:
        raise Exception(f"Error reading symbols file: {e}")

def load_timeframes(timeframes_file: str = "timeframes.txt") -> List[int]:
    """Load timeframes from comma-separated text file."""
    try:
        with open(timeframes_file, 'r') as f:
            content = f.read().strip()
        if not content:
            raise ValueError(f"Timeframes file {timeframes_file} is empty")
        timeframes = []
        for s in content.split(','):
            s = s.strip()
            if s:
                # Handle formats like "5m" or "5"
                if s.endswith('m'):
                    timeframes.append(int(s[:-1]))
                else:
                    timeframes.append(int(s))
        if not timeframes:
            raise ValueError(f"Timeframes file {timeframes_file} is empty")
        return timeframes
    except FileNotFoundError:
        raise FileNotFoundError(f"Timeframes file {timeframes_file} not found")
    except Exception as e:
        raise Exception(f"Error reading timeframes file: {e}")

def save_candles_to_csv(candles: List[Dict], output_path: str):
    """
    Save candles data to CSV file. Appends to existing file if it exists, merging data.
    
    Args:
        candles: List of candle dictionaries from API
        output_path: Full path to output CSV file
    """
    if not candles:
        logger.warning(f"⚠️  No candles to save to {output_path}")
        return
    
    # Convert candles to DataFrame
    df_data = []
    timezone = pytz.timezone('America/New_York')
    
    for candle in candles:
        timestamp_ms = candle.get('datetime', 0)
        dt_utc = datetime.fromtimestamp(timestamp_ms / 1000, tz=pytz.UTC)
        dt_et = dt_utc.astimezone(timezone)
        
        df_data.append({
            'timestamp': timestamp_ms,
            'datetime': dt_et.strftime('%Y-%m-%d %H:%M:%S %Z'),
            'open': candle.get('open', 0),
            'high': candle.get('high', 0),
            'low': candle.get('low', 0),
            'close': candle.get('close', 0),
            'volume': candle.get('volume', 0)
        })
    
    df = pd.DataFrame(df_data)
    df = df.sort_values('timestamp').drop_duplicates(subset=['timestamp'])
    
    # Create directory if it doesn't exist
    output_dir = os.path.dirname(output_path)
    if output_dir:
        os.makedirs(output_dir, exist_ok=True)
    
    # Check if file exists and append/merge
    if os.path.exists(output_path):
        # Read existing data
        existing_df = pd.read_csv(output_path)
        logger.info(f"📁 Found existing file with {len(existing_df)} records")
        
        # Combine with new data
        combined_df = pd.concat([existing_df, df], ignore_index=True)
        
        # Remove duplicates based on timestamp (keep last)
        combined_df = combined_df.drop_duplicates(subset=['timestamp'], keep='last')
        
        # Sort by timestamp
        combined_df = combined_df.sort_values('timestamp')
        
        logger.info(f"🔄 Combined data: {len(existing_df)} existing + {len(df)} new = {len(combined_df)} total records")
        
        # Save combined data
        combined_df.to_csv(output_path, index=False)
        logger.info(f"💾 Data appended to {output_path}")
    else:
        # Save new data
        df.to_csv(output_path, index=False)
        logger.info(f"💾 Data saved to {output_path}")

def get_last_timestamp_from_csv(file_path: str) -> Optional[int]:
    """
    Get the last timestamp from an existing CSV file.
    
    Args:
        file_path: Path to CSV file
        
    Returns:
        Last timestamp in milliseconds, or None if file doesn't exist or is empty
    """
    if not os.path.exists(file_path):
        return None
    
    try:
        df = pd.read_csv(file_path)
        if df.empty or 'timestamp' not in df.columns:
            return None
        
        # Get the maximum timestamp (last row)
        last_timestamp = int(df['timestamp'].max())
        return last_timestamp
    except Exception as e:
        logger.warning(f"⚠️  Error reading {file_path}: {e}")
        return None

def get_default_output_path(timeframe: int, symbol: str) -> str:
    """
    Generate default output path: data/equity/{timeframe}/{symbol}.csv
    
    Args:
        timeframe: Time interval in minutes
        symbol: Stock symbol
        
    Returns:
        Output file path
    """
    return f"data/equity/{timeframe}m/{symbol}.csv"

def main(start_date: Optional[str] = None, end_date: Optional[str] = None, 
         output_path_template: Optional[str] = None, need_extended_hours: bool = False,
         only_last_row: bool = False):
    """
    Main function to fetch market data.
    
    Args:
        start_date: Start date in YYYY-MM-DD format or Unix timestamp (ms) (default: 9:30am today)
        end_date: End date in YYYY-MM-DD format or Unix timestamp (ms) (default: right now)
        output_path_template: Path template for output files. Use {timeframe} and {symbol} as placeholders.
                             Default: data/equity/{timeframe}m/{symbol}.csv
        need_extended_hours: Whether to include extended hours data (default: False)
        only_last_row: If True, fetch and save only the last row of data (default: False)
    """
    # Load access token
    access_token = load_access_token()
    logger.info("✅ Loaded access token")
    
    # Load symbols and timeframes
    symbols = load_symbols()
    timeframes = load_timeframes()
    logger.info(f"✅ Loaded {len(symbols)} symbols and {len(timeframes)} timeframes")
    
    # Configuration
    api_config = APIConfig()
    
    # Create client
    client = SchwabMarketDataClient(api_config, access_token)
    
    # Fetch data for each symbol and timeframe combination
    for symbol in symbols:
        for time_interval in timeframes:
            logger.info(f"🔄 Processing {symbol} with {time_interval}m timeframe")
            
            # Determine output path
            if output_path_template:
                output_path = output_path_template.format(timeframe=time_interval, symbol=symbol)
            else:
                output_path = get_default_output_path(time_interval, symbol)
            
            # Check if file exists - if so, use last row's timestamp as start_date
            actual_start_date = start_date
            if os.path.exists(output_path):
                last_timestamp = get_last_timestamp_from_csv(output_path)
                if last_timestamp:
                    actual_start_date = str(last_timestamp)
                    logger.info(f"📁 File exists, using last timestamp as start date: {last_timestamp}")
                else:
                    logger.warning(f"⚠️  File exists but couldn't read last timestamp, using provided start_date")
            
            # Always use current time as end_date if file exists (to get latest data)
            actual_end_date = end_date if not os.path.exists(output_path) else None
            
            candles = client.get_price_history(symbol, actual_start_date, actual_end_date, time_interval, need_extended_hours, only_last_row)
            
            if candles:
                logger.info(f"✅ Retrieved {len(candles)} candles for {symbol} ({time_interval}m)")
                
                # Save to CSV (will append if file exists)
                save_candles_to_csv(candles, output_path)
            else:
                logger.warning(f"⚠️  No data retrieved for {symbol} ({time_interval}m)")

if __name__ == "__main__":
    import sys
    
    # Parse command line arguments
    start_date = None
    end_date = None
    output_path_template = None
    need_extended_hours = False
    only_last_row = False
    
    if len(sys.argv) > 1:
        start_date = sys.argv[1]
    if len(sys.argv) > 2:
        end_date = sys.argv[2]
    if len(sys.argv) > 3:
        output_path_template = sys.argv[3]
    if len(sys.argv) > 4:
        # Accept "true", "1", "yes", "y" (case insensitive) to enable extended hours
        need_extended_hours = sys.argv[4].lower() in ('true', '1', 'yes', 'y')
    if len(sys.argv) > 5:
        # Accept "true", "1", "yes", "y" (case insensitive) to fetch only last row
        only_last_row = sys.argv[5].lower() in ('true', '1', 'yes', 'y')
    
    main(start_date=start_date, end_date=end_date, output_path_template=output_path_template, 
         need_extended_hours=need_extended_hours, only_last_row=only_last_row)

