#!/usr/bin/env python3
# -*- coding: utf-8 -*-

"""This module contains functions to save solving results of water supply models.
"""

from loguru import logger
import argparse
import json

import numpy as np
import xarray as xr
import pandas as pd

from optmodel.log_config import timer
from optmodel.utils import cartesian_product


def create_data_array(
    data : dict, dims : list, unit : str, model : object
) -> xr.DataArray:
    """Create a xarray DataArray with specified data, dimensions, coordinates 
    and units.

    Parameters
    ----------
    data : dict
        The data to be included in the DataArray.
    dims : list
        The list of dimensions of the data.
    unit : str
        The unit of the data.
    model : object
        The model object.

    Returns
    -------
    xr.DataArray
        A DataArray with the specified data, dimensions, coordinates and units.
    """
    coords = {dim: getattr(model, dim) for dim in dims}
    index_tuple = cartesian_product(*coords.values())
    if len(dims) == 1:
        index_tuple = [i[0] for i in index_tuple]
    # 修复：直接从data字典中获取变量，然后调用get_value
    data_values = {}
    for key, var in data.items():
        try:
            data_values[key] = model.get_value(var)
        except Exception as e:
            print(f"Warning: Failed to get value for {key}: {e}")
            
    data = np.array(
        [data_values.get(tuple_, 0.0) for tuple_ in index_tuple]
    ).reshape([len(coord) for coord in coords.values()])
    return xr.DataArray(data=data,
                        dims=dims,
                        coords=coords,
                        attrs={'unit': unit})


@timer
def extract_water_supply_results(model : object) -> xr.Dataset:
    """Extracts results for water supply models.

    Parameters
    ----------
    model : object
        Model object solved already.

    Returns
    -------
    xr.Dataset
        A Dataset containing DataArrays for each attribute of the model.
    """
    data_vars = {}
    
    # 水库入流量 (1e6m³)
    data_vars['inflow'] = create_data_array(
        model.inflow, ['reservoir', 'time'], '10**6m**3', model
    )
    
    # 水库出流量 (1e6m³)
    data_vars['outflow'] = create_data_array(
        model.outflow, ['reservoir', 'time'], '10**6m**3', model
    )
    
    # 供水流量 (1e6m³)
    data_vars['supplyflow'] = create_data_array(
        model.supplyflow, ['reservoir', 'water_user', 'time'], '10**6m**3', model
    )
    
    # 水库库容 (1e6m³)
    data_vars['storage'] = create_data_array(
        model.storage, ['reservoir', 'time_p'], '10**6m**3', model
    )
    
    # 弃水流量 (1e6m³)
    data_vars['spillflow'] = create_data_array(
        model.spillflow, ['reservoir', 'time'], '10**6m**3', model
    )
    
    # 引入水量 (1e6m³)
    data_vars['importflow'] = create_data_array(
        model.importflow, ['external_water_sources', 'reservoir', 'time'], 
        '10**6m**3', model
    )

    # 弃水总量 (1e6m³)
    try:
        spillflow_total_value = model.get_value(model.spillflow_total)
    except Exception as e:
        logger.warning(f"Failed to get spillflow_total value: {e}")
        spillflow_total_value = 0.0
    
    data_vars['spillflow_total'] = xr.DataArray(
        data=spillflow_total_value
    )
    
    # 汛末蓄水总量（比例）
    try:
        end_storage_total_value = model.get_value(model.end_storage_flood_season_total)
    except Exception as e:
        logger.warning(f"Failed to get end_storage_flood_season_total value: {e}")
        end_storage_total_value = 0.0
    
    data_vars['end_storage_flood_season_total'] = xr.DataArray(
        data=end_storage_total_value
    )
    
    # 调度期末库容总量
    try:
        end_storage_dispatch_term_value = model.get_value(model.end_storage_dispatch_term_total)
    except Exception as e:
        logger.warning(f"Failed to get end_storage_dispatch_term_total value: {e}")
        end_storage_dispatch_term_value = 0.0
    
    data_vars['end_storage_dispatch_term_total'] = xr.DataArray(
        data=end_storage_dispatch_term_value
    )
    
    return xr.Dataset(data_vars)


@timer
def save_to_excel(
    ds : xr.Dataset,
    output_filename : str
) -> None:
    """Save the results to an Excel file.

    Parameters
    ----------
    ds : xr.Dataset
        Dataset containing the results.
    output_filename : str
        The name of the output file.
    """
    max_rows = 1_000_000
    # pylint: disable=abstract-class-instantiated
    with pd.ExcelWriter(
        f'{output_filename}.xlsx', engine='xlsxwriter'
    ) as writer:
        for key in ds.data_vars:
            if len(ds[key].shape) == 0:
                df = pd.DataFrame([ds[key].values.max()], columns=[key])
            else:
                df = ds[key].to_dataframe()
            n_rows = len(df)
            if n_rows <= max_rows:
                df.to_excel(writer, sheet_name=key, merge_cells=False)
            else:
                logger.info(
                    "{} has {} rows, split into {} sheets", 
                    key, n_rows, int(np.ceil(n_rows/max_rows))
                )
                n_sheets = int(np.ceil(n_rows/max_rows))
                for i in range(n_sheets):
                    start_row = i * max_rows
                    end_row = min((i+1) * max_rows, n_rows)
                    df_part = df.iloc[start_row:end_row]
                    df_part.to_excel(
                        writer, sheet_name=f'{key}_{i+1}', merge_cells=False
                    )


def save_to_json_direct(model : object, output_filename : str) -> None:
    """Save model results directly to JSON format without creating xarray Dataset first.
    
    Parameters
    ----------
    model : object
        The model object to extract results from.
    output_filename : str
        The name of the output file.
    """
    # Initialize the main data structure
    data_dict = {
        'variables': {},
        'metadata': {
            'description': 'Water supply model results',
            'units': '10**6m**3',
            'variable_info': {}
        }
    }
    
    def extract_model_data(model_var, dims):
        """Extract data from model variable and organize by dimensions."""
        data_list = []
        
        if len(dims) == 1:
            # 1D data (e.g., spillflow_total)
            if hasattr(model_var, 'get_values'):
                values = model_var.get_values()
            else:
                # 允许传入常数占位（如 0.0），避免调用 get_value 触发类型不匹配
                if isinstance(model_var, (int, float)):
                    values = float(model_var)
                else:
                    try:
                        values = model.get_value(model_var)
                    except Exception as e:
                        logger.warning(f"Failed to get value for 1D variable: {e}")
                        values = 0.0
            
            if isinstance(values, dict):
                for key, value in values.items():
                    data_list.append({
                        dims[0]: str(key),
                        'value': float(value) if hasattr(value, '__float__') else value
                    })
            else:
                data_list.append({
                    'value': float(values) if hasattr(values, '__float__') else values
                })
                
        elif len(dims) == 2:
            # 2D data (e.g., inflow, outflow, spillflow, storage)
            try:
                # 对于tupledict类型，直接遍历其items
                if hasattr(model_var, 'items'):
                    for key, var in model_var.items():
                        try:
                            value = model.get_value(var)
                            if isinstance(key, tuple) and len(key) == 2:
                                data_list.append({
                                    dims[0]: str(key[0]),
                                    dims[1]: str(key[1]),
                                    'value': float(value) if hasattr(value, '__float__') else value
                                })
                        except Exception as e:
                            logger.warning(f"Failed to get value for key {key}: {e}")
                else:
                    values = model.get_value(model_var)
                    for key, value in values.items():
                        if isinstance(key, tuple) and len(key) == 2:
                            data_list.append({
                                dims[0]: str(key[0]),
                                dims[1]: str(key[1]),
                                'value': float(value) if hasattr(value, '__float__') else value
                            })
            except Exception as e:
                logger.warning(f"Failed to extract 2D data: {e}")
                    
        elif len(dims) == 3:
            # 3D data (e.g., supplyflow, importflow)
            try:
                # 对于tupledict类型，直接遍历其items
                if hasattr(model_var, 'items'):
                    for key, var in model_var.items():
                        try:
                            value = model.get_value(var)
                            if isinstance(key, tuple) and len(key) == 3:
                                data_list.append({
                                    dims[0]: str(key[0]),
                                    dims[1]: str(key[1]),
                                    dims[2]: str(key[2]),
                                    'value': float(value) if hasattr(value, '__float__') else value
                                })
                        except Exception as e:
                            logger.warning(f"Failed to get value for key {key}: {e}")
                else:
                    values = model.get_value(model_var)
                    for key, value in values.items():
                        if isinstance(key, tuple) and len(key) == 3:
                            data_list.append({
                                dims[0]: str(key[0]),
                                dims[1]: str(key[1]),
                                dims[2]: str(key[2]),
                                'value': float(value) if hasattr(value, '__float__') else value
                            })
            except Exception as e:
                logger.warning(f"Failed to extract 3D data: {e}")
        
        return data_list
    
    # Extract each variable directly from model
    variables_config = {
        'inflow': (model.inflow, ['reservoir', 'time']),
        'outflow': (model.outflow, ['reservoir', 'time']),
        'supplyflow': (model.supplyflow, ['reservoir', 'water_user', 'time']),
        'storage': (model.storage, ['reservoir', 'time_p']),
        'spillflow': (model.spillflow, ['reservoir', 'time']),
        'importflow': (model.importflow, ['external_water_sources', 'reservoir', 'time'])
    }
    # 安全添加总弃水，缺失时使用0并记录warning
    try:
        variables_config['spillflow_total'] = (model.spillflow_total, ['total'])
    except AttributeError as e:
        logger.warning(f"spillflow_total not available: {e}")
        variables_config['spillflow_total'] = (0.0, ['total'])
    
    # 安全添加汛末蓄水总量，缺失时使用0并记录warning
    try:
        variables_config['end_storage_flood_season_total'] = (model.end_storage_flood_season_total, ['total'])
    except AttributeError as e:
        logger.warning(f"end_storage_flood_season_total not available: {e}")
        variables_config['end_storage_flood_season_total'] = (0.0, ['total'])
    
    # 安全添加调度期末库容总量，缺失时使用0并记录warning
    try:
        variables_config['end_storage_dispatch_term_total'] = (model.end_storage_dispatch_term_total, ['total'])
    except AttributeError as e:
        logger.warning(f"end_storage_dispatch_term_total not available: {e}")
        variables_config['end_storage_dispatch_term_total'] = (0.0, ['total'])
    
    for var_name, (model_var, dims) in variables_config.items():
        try:
            # Store variable metadata
            data_dict['metadata']['variable_info'][var_name] = {
                'dims': dims,
                'description': f'{var_name} in 10**6m**3'
            }
            
            # Extract and store variable data
            data_dict['variables'][var_name] = extract_model_data(model_var, dims)
            
        except Exception as e:
            logger.warning(f"Failed to extract {var_name}: {e}")
            data_dict['variables'][var_name] = []
    
    # Save to JSON file
    with open(f'{output_filename}.json', 'w', encoding='utf-8') as f:
        json.dump(data_dict, f, indent=2, ensure_ascii=False)


def save_to_json(ds : xr.Dataset, output_filename : str) -> None:
    """Save the dataset to a JSON file with improved readability.

    Parameters
    ----------
    ds : xr.Dataset
        The dataset to save.
    output_filename : str
        The name of the output file.
    """
    # Convert xarray Dataset to dictionary with variables as top-level keys
    data_dict = {
        'variables': {},
        'metadata': {
            'global_attrs': dict(ds.attrs),
            'variable_info': {}
        }
    }
    
    for var_name, data_array in ds.data_vars.items():
        # Store variable metadata
        data_dict['metadata']['variable_info'][var_name] = {
            'dims': list(data_array.dims),
            'attrs': dict(data_array.attrs)
        }
        
        # Initialize variable data list
        data_dict['variables'][var_name] = []
        
        # Create a flattened list with coordinate-value pairs for better readability
        if len(data_array.dims) == 1:
            # For 1D data, create simple key-value pairs
            dim_name = data_array.dims[0]
            coords = data_array.coords[dim_name].values
            values = data_array.values
            
            for coord, value in zip(coords, values):
                data_dict['variables'][var_name].append({
                    dim_name: coord.item() if hasattr(coord, 'item') else coord,
                    'value': value.item() if hasattr(value, 'item') else value
                })
        
        elif len(data_array.dims) == 2:
            # For 2D data, create nested structure
            dim1, dim2 = data_array.dims
            coords1 = data_array.coords[dim1].values
            coords2 = data_array.coords[dim2].values
            
            for i, coord1 in enumerate(coords1):
                for j, coord2 in enumerate(coords2):
                    value = data_array.values[i, j]
                    data_dict['variables'][var_name].append({
                        dim1: coord1.item() if hasattr(coord1, 'item') else coord1,
                        dim2: coord2.item() if hasattr(coord2, 'item') else coord2,
                        'value': value.item() if hasattr(value, 'item') else value
                    })
        
        else:
            # For higher dimensional data, use a general approach
            coords = {dim: data_array.coords[dim].values for dim in data_array.dims}
            index_tuple = cartesian_product(*coords.values())
            
            for indices in index_tuple:
                coord_dict = {}
                for dim, coord_val in zip(data_array.dims, indices):
                    coord_dict[dim] = coord_val.item() if hasattr(coord_val, 'item') else coord_val
                
                # Get the value at this coordinate
                value = data_array.sel(**coord_dict).values
                coord_dict['value'] = value.item() if hasattr(value, 'item') else value
                data_dict['variables'][var_name].append(coord_dict)
    
    # Save to JSON file
    with open(f'{output_filename}_1.json', 'w', encoding='utf-8') as f:
        json.dump(data_dict, f, indent=2, ensure_ascii=False)


def save_result(model : object) -> None:
    """Extracts results from the provided water supply model.

    Parameters
    ----------
    model : object
        The model object to extract results from and save.
    """
    import os
    
    args = getattr(model, 'param', {}).get('command_line_args', None)
    output_folder = getattr(model, 'param', {}).get('output_folder', 'output')
    output_filename = getattr(model, 'param', {}).get(
        'output_filename', 'water_supply_results'
    )
    
    # 确保输出文件夹存在
    os.makedirs(output_folder, exist_ok=True)
    
    # 构建完整的输出路径
    full_output_path = os.path.join(output_folder, output_filename)
    
    if args:
        full_output_path = update_output_filename(full_output_path, args)

    ds = extract_water_supply_results(model)
    # ds.to_netcdf(f'{full_output_path}.nc')
    # logger.info(f"Results are written to {full_output_path}.nc")
    
    save_to_excel(ds, full_output_path)
    logger.info(f"Results are written to {full_output_path}.xlsx")
    
    # save_to_json(ds, full_output_path)
    # logger.info(f"Results are written to {full_output_path}.json")
    
    save_to_json_direct(model, full_output_path)
    logger.info(f"Results are written to {full_output_path}_1.json")


def update_output_filename(
    output_filename : str, args : argparse.Namespace
) -> str:
    """Update the output filename based on the arguments.

    Parameters
    ----------
    output_filename : str
        The name of the output file.
    args : argparse.Namespace
        Arguments parsed by argparse.

    Returns
    -------
    str
        The updated output filename.
    """
    if args and hasattr(args, '__dict__'):
        suffix = '_'.join(
            f'{key}_{value}' for key, value in vars(args).items() 
            if value is not None
        )
        if suffix:
            output_filename = f"{output_filename}_{suffix}"
    return output_filename
