import pandas as pd
import logging
import os
import argparse
import json
from datetime import datetime

# Configure logging
logging.basicConfig(
    level=logging.INFO,
    format='%(asctime)s - %(levelname)s - %(message)s'
)
logger = logging.getLogger(__name__)

def analyze_six_data(application_path, allowance_path, filter_fields="", exclude_fields="", df = False):
    """
    Analyze travel expense data from Excel files.
    
    Args:
        application_path (str): Path to the business travel application Excel file
        allowance_path (str): Path to the travel allowance Excel file
        filter_fields (str): Fields to filter in "出差目的名称" (multiple keywords separated by spaces)
        exclude_fields (str): Fields to exclude in "出差目的名称" (multiple keywords separated by spaces)
    
    Returns:
        list: List of dictionaries containing the analyzed data
    """
    try:
        logger.info(f"Loading data from application file: {application_path}")
        logger.info(f"Loading data from allowance file: {allowance_path}")
        
        # Read both Excel files
        df_six_1 = pd.read_excel(application_path)
        df_six_2 = pd.read_excel(allowance_path)
        
        # Clean and filter data
        df_six_1 = df_six_1.fillna("")
        
        # Apply filters if provided - using OR relationship
        if filter_fields:
            # Split filter fields into individual keywords
            filter_keywords = [keyword.strip() for keyword in filter_fields.split()]
            # Create a mask for any of the keywords
            filter_mask = df_six_1["出差目的名称"].str.contains('|'.join(filter_keywords), na=False)
            df_six_1 = df_six_1[filter_mask]
            
        if exclude_fields:
            # Split exclude fields into individual keywords
            exclude_keywords = [keyword.strip() for keyword in exclude_fields.split()]
            # Create a mask for any of the exclude keywords
            exclude_mask = ~df_six_1["出差目的名称"].str.contains('|'.join(exclude_keywords), na=False)
            df_six_1 = df_six_1[exclude_mask]
        
        # Create unique identifiers and merge dataframes
        df_six_1["标识"] = df_six_1["唯一标识"] + df_six_1["人员编号"]
        df_six_2["标识"] = df_six_2["唯一标识"] + df_six_2["员工"]
        
        # Merge the dataframes
        df_six_1 = df_six_1.merge(df_six_2[["标识", "金额"]], how="left", on="标识")
        
        # Process the amounts
        df_six_1["收款金额"] = df_six_1["收款金额"].apply(lambda x: x if isinstance(x, float) else 0)
        df_six_1["收款金额"] = df_six_1["收款金额"].fillna(0) + df_six_1["金额"].fillna(0)
        
        # Clean up intermediate columns
        df_six_1.drop(["标识", "金额"], axis=1, inplace=True)
        
        # Filter by minimum amount
        df_six_1 = df_six_1[df_six_1["收款金额"] >= 160]
        df_six_1.reset_index(drop=True, inplace=True)

        # Convert to list of dictionaries
        if df:
            return df_six_1
        return list(df_six_1.T.to_dict().values())
        
    except Exception as e:
        logger.error(f"Error analyzing data: {str(e)}")
        return []

def save_results(data, output_format='excel', output_path=None):
    """
    Save analysis results to file.
    
    Args:
        data (list): Analysis results
        output_format (str): Output format ('excel' or 'json')
        output_path (str): Output file path
    """
    if not data:
        logger.warning("No data to save")
        return

    if output_path is None:
        # Generate default output filename in alldata directory
        output_path = os.path.join('data', 'alldata', '交通补助不合理分析结果(无自带车).xlsx')

    try:
        # Create alldata directory if it doesn't exist
        os.makedirs(os.path.dirname(output_path), exist_ok=True)
        
        if output_format == 'excel':
            df = pd.DataFrame(data)
            df.to_excel(output_path, index=False)
            logger.info(f"Results saved to Excel file: {output_path}")
        else:
            logger.info("JSON output format is disabled")
    except Exception as e:
        logger.error(f"Error saving results: {str(e)}")

def main(once = False):
    # Parse command line arguments
    parser = argparse.ArgumentParser(description='分析交通补助报销数据')
    parser.add_argument('--filter', help='出差目的名称过滤条件', default='')
    parser.add_argument('--exclude', help='出差目的名称排除条件', default='')
    parser.add_argument('--output-format', choices=['excel'], default='excel',
                      help='输出文件格式 (仅支持excel)')
    parser.add_argument('--output-path', help='输出文件路径')
    
    args = parser.parse_args()
    
    # Define input file paths
    application_path = os.path.join('data', '商旅申请单据.xlsx')
    allowance_path = os.path.join('data', '差旅费出差补助.xlsx')
    
    # Verify input files exist
    if not os.path.exists(application_path):
        logger.error(f"商旅申请单据文件不存在: {application_path}")
        return
    if not os.path.exists(allowance_path):
        logger.error(f"差旅费出差补助文件不存在: {allowance_path}")
        return
    
    # Run analysis
    logger.info("开始数据分析...")
    results = analyze_six_data(application_path, allowance_path, args.filter, args.exclude)
    if once:
        return results
    # Print summary
    logger.info(f"分析完成，共找到 {len(results)} 条记录")
    
    # Save results
    save_results(results, args.output_format, args.output_path)

if __name__ == '__main__':
    main() 