import akshare as ak
import psycopg2
import psycopg2.extras
import logging
import pandas as pd
import time
from datetime import datetime

# Configure logging
logging.basicConfig(
    level=logging.DEBUG,
    format='%(asctime)s - %(levelname)s - %(message)s'
)
logger = logging.getLogger(__name__)

def get_db_connection():
    """Establish connection to PostgreSQL database"""
    try:
        conn = psycopg2.connect(
            host="localhost",
            database="stock_db",
            user="leizhen",
            password="751982leizhen",
            port="5432"
        )
        return conn
    except Exception as e:
        logger.error(f"Database connection failed: {e}")
        raise

def get_column_types(df):
    """Generate PostgreSQL column types based on DataFrame dtypes"""
    type_mapping = {
        'object': 'TEXT',
        'int64': 'BIGINT',
        'float64': 'DOUBLE PRECISION',
        'bool': 'BOOLEAN',
        'datetime64[ns]': 'TIMESTAMP'
    }
    return {col: type_mapping.get(str(df[col].dtype), 'TEXT') for col in df.columns}

def create_table_if_not_exists(conn, df):
    """Create table with dynamic schema based on DataFrame columns"""
    # Log actual column names for debugging
    logger.info(f"DataFrame columns: {list(df.columns)}")
    
    # Find suitable primary key columns for balance sheet data
    pk_columns = []
    for col in ['SECUCODE', 'REPORT_DATE', 'symbol']:
        if col in df.columns:
            pk_columns.append(f'"{col}"')
            if len(pk_columns) >= 2:
                break
    
    if len(pk_columns) < 2:
        pk_columns = [f'"{col}"' for col in df.columns[:2]]
        logger.warning(f"Using first two columns as primary key: {pk_columns}")
    
    column_types = get_column_types(df)
    columns_sql = ",\n".join(
        [f'"{col}" {dtype}' for col, dtype in column_types.items()]
    )
    
    # First try to create table if not exists
    create_table_sql = f"""
    CREATE TABLE IF NOT EXISTS stock_balance_sheet_by_report_em (
        {columns_sql},
        PRIMARY KEY ({', '.join(pk_columns)})
    );
    """
    
    try:
        with conn.cursor() as cursor:
            cursor.execute(create_table_sql)
            
            # Check for missing columns if table already exists
            cursor.execute("""
                SELECT column_name 
                FROM information_schema.columns 
                WHERE table_name = 'stock_balance_sheet_by_report_em'
            """)
            existing_columns = {row[0] for row in cursor.fetchall()}
            
            # Add any missing columns
            for col, dtype in column_types.items():
                if col not in existing_columns:
                    alter_sql = f"""
                    ALTER TABLE stock_balance_sheet_by_report_em
                    ADD COLUMN "{col}" {dtype}
                    """
                    cursor.execute(alter_sql)
                    logger.info(f"Added missing column: {col}")
            
            conn.commit()
            logger.info("Table schema verified and updated")
    except Exception as e:
        logger.error(f"Error creating table: {e}")
        conn.rollback()
        raise

def upsert_data(conn, df):
    """Upsert data into the table with enhanced data handling"""
    # Find primary key columns (same logic as create_table_if_not_exists)
    pk_columns = []
    for col in ['SECUCODE', 'REPORT_DATE', 'symbol']:
        if col in df.columns:
            pk_columns.append(f'"{col}"')
            if len(pk_columns) >= 2:
                break
    if len(pk_columns) < 2:
        pk_columns = [f'"{col}"' for col in df.columns[:2]]
    
    columns = [f'"{col}"' for col in df.columns]
    columns_sql = ", ".join(columns)
    values_sql = ", ".join(["%s"] * len(df.columns))
    update_set = ", ".join([f'"{col}" = EXCLUDED."{col}"' for col in df.columns])
    
    upsert_sql = f"""
    INSERT INTO stock_balance_sheet_by_report_em (
        {columns_sql}
    ) VALUES ({values_sql})
    ON CONFLICT ({', '.join(pk_columns)}) DO UPDATE SET
        {update_set};
    """
    
    try:
        with conn.cursor() as cursor:
            # Enhanced data conversion and validation
            data = []
            for idx, record in enumerate(df.to_dict('records')):
                try:
                    converted = {}
                    for col, val in record.items():
                        if pd.isna(val):
                            converted[col] = None
                        elif hasattr(val, 'item'):  # Convert numpy types
                            converted[col] = val.item()
                        elif isinstance(val, (list, dict)):
                            converted[col] = str(val)  # Serialize complex types
                        else:
                            converted[col] = val
                    
                    # Validate truly required fields (symbol and report date)
                    required_fields = ['symbol', 'REPORT_DATE']  # Only these are mandatory
                    missing_fields = [f for f in required_fields if not converted.get(f)]
                    if missing_fields:
                        logger.warning(f"Skipping record {idx} missing required fields: {missing_fields}")
                        continue
                        
                    data.append(tuple(converted.values()))
                except Exception as e:
                    logger.error(f"Error converting record {idx}: {e}")
                    continue
            
            if not data:
                logger.warning("No valid data to upsert")
                return
            
            # Execute in smaller batches for reliability
            batch_size = 100
            total_upserted = 0
            for i in range(0, len(data), batch_size):
                batch = data[i:i + batch_size]
                try:
                    psycopg2.extras.execute_batch(cursor, upsert_sql, batch)
                    total_upserted += len(batch)
                except Exception as e:
                    logger.error(f"Error upserting batch {i//batch_size}: {e}")
                    conn.rollback()
                    # Retry failed batch individually
                    for j, record in enumerate(batch):
                        try:
                            cursor.execute(upsert_sql, record)
                            total_upserted += 1
                        except Exception as e:
                            logger.error(f"Error upserting record {i+j}: {e}")
                            continue
            
            # Get actual inserted and updated counts
            cursor.execute("""
                SELECT 
                    SUM(CASE WHEN xmax = 0 THEN 1 ELSE 0 END) AS inserted,
                    SUM(CASE WHEN xmax <> 0 THEN 1 ELSE 0 END) AS updated
                FROM stock_balance_sheet_by_report_em
                WHERE ctid IN (
                    SELECT ctid 
                    FROM stock_balance_sheet_by_report_em 
                    ORDER BY ctid DESC 
                    LIMIT %s
                )
            """, (total_upserted,))
            counts = cursor.fetchone()
            inserted = counts[0] or 0
            updated = counts[1] or 0
            conn.commit()
            logger.info(f"Inserted {inserted} new records, updated {updated} existing records")
    except Exception as e:
        logger.error(f"Error upserting data: {e}")
        conn.rollback()
        raise

def get_all_stock_symbols(conn):
    """Get all stock symbols from stocks table"""
    try:
        with conn.cursor() as cursor:
            cursor.execute("SELECT DISTINCT symbol FROM stocks")
            return [row[0] for row in cursor.fetchall()]
    except Exception as e:
        logger.error(f"Error fetching stock symbols: {e}")
        raise

def process_stock(conn, symbol):
    """Process financial report for a single stock"""
    try:
        logger.info(f"Processing {symbol}...")
        # Add market prefix if missing
        full_symbol = f"SH{symbol}" if len(symbol) == 6 else symbol
        full_symbol = f"SZ{symbol}" if len(symbol) == 6 and symbol.startswith(('0', '3')) else full_symbol
        
        # Get data with error handling
        result = ak.stock_balance_sheet_by_report_em(symbol=full_symbol)
        if result is None:
            logger.warning(f"No data returned for {full_symbol}")
            return 0
            
        # Log raw API response for debugging
        logger.debug(f"Raw API response for {full_symbol}: {str(result)[:200]}...")
            
        # Handle different response formats
        if isinstance(result, pd.DataFrame):
            df = result.copy()
        elif isinstance(result, dict):
            if "data" in result:
                df = pd.DataFrame(result["data"])
            else:
                df = pd.DataFrame([result])
        else:
            df = pd.DataFrame(result)
        
        if df.empty:
            logger.warning(f"Empty dataframe for {full_symbol}")
            return 0
            
        # Log dataframe info for debugging
        logger.debug(f"DataFrame info for {full_symbol}:")
        logger.debug(f"Columns: {df.columns.tolist()}")
        logger.debug(f"First row: {df.iloc[0].to_dict()}")
            
        # Transform columns to match tp.xlsx format
        df = df.rename(columns={
            'SECUCODE': 'SECUCODE',
            'SECURITY_CODE': 'SECURITY_CODE', 
            'SECURITY_NAME_ABBR': 'SECURITY_NAME_ABBR',
            'REPORT_DATE': 'REPORT_DATE'
        })
        
        # Add missing required columns
        if 'symbol' not in df.columns:
            df['symbol'] = symbol
            
        # Ensure we have all columns from tp.xlsx
        required_cols = ['SECUCODE', 'SECURITY_CODE', 'SECURITY_NAME_ABBR', 'ORG_CODE', 
                        'ORG_TYPE', 'REPORT_DATE', 'REPORT_TYPE', 'OPINION_TYPE']
        for col in required_cols:
            if col not in df.columns:
                df[col] = None
                
        # Ensure table exists with correct schema
        create_table_if_not_exists(conn, df)
        
        # Upsert data
        upsert_data(conn, df)
        return len(df)
        
    except Exception as e:
        logger.error(f"Error processing {symbol}: {e}")
        return 0

def main():
    try:
        # Connect to database
        conn = get_db_connection()
        
        # Get all stock symbols
        symbols = get_all_stock_symbols(conn)
        if not symbols:
            raise ValueError("No stock symbols found in database")
            
        logger.info(f"Found {len(symbols)} stocks to process")
        
        # Process in batches of 10 with delays
        batch_size = 10
        total_processed = 0
        for batch_start in range(0, len(symbols), batch_size):
            batch = symbols[batch_start:batch_start + batch_size]
            
            # Reconnect for each batch
            conn = get_db_connection()
            
            for i, symbol in enumerate(batch, 1):
                try:
                    count = process_stock(conn, symbol)
                    total_processed += count
                    logger.info(f"Progress: {batch_start + i}/{len(symbols)} stocks, {total_processed} records")
                except Exception as e:
                    logger.error(f"Failed to process {symbol}: {e}")
                    continue
                    
            # Commit after each batch
            conn.commit()
            conn.close()
            
            # Add delay between batches
            time.sleep(5)
                
        logger.info(f"Completed processing. Total records updated: {total_processed}")
        
    except Exception as e:
        logger.error(f"Error in main execution: {e}")
    finally:
        if 'conn' in locals():
            conn.close()

if __name__ == "__main__":
    main()
