import pandas as pd
import re

def clean_duplicate_groups(file_path):
    """
    Remove duplicate groups of columns where values are identical if not empty.
    Groups are identified by number suffix (e.g., Name1, CAS1, SMILES1).
    """
    # Read Excel file
    df = pd.read_excel(file_path)
    
    # Find all unique group numbers from column names
    group_numbers = set()
    for col in df.columns:
        match = re.search(r'(\d+)$', col)
        if match:
            group_numbers.add(int(match.group(1)))
    
    group_numbers = sorted(list(group_numbers))
    if not group_numbers:
        return df
    
    # Get base column names (without numbers)
    base_cols = set()
    for col in df.columns:
        base_col = re.sub(r'\d+$', '', col)
        base_cols.add(base_col)
    
    # Keep track of groups to drop
    groups_to_drop = set()
    
    # Compare each group with previous groups
    for i, current_group in enumerate(group_numbers[1:], 1):
        current_cols = [f"{base}{current_group}" for base in base_cols]
        
        # Compare with all previous groups
        for prev_group in group_numbers[:i]:
            if prev_group in groups_to_drop:
                continue
                
            prev_cols = [f"{base}{prev_group}" for base in base_cols]
            
            # Check if groups are identical (when not empty)
            is_identical = True
            for curr_col, prev_col in zip(current_cols, prev_cols):
                if curr_col not in df.columns or prev_col not in df.columns:
                    continue
                    
                # Compare non-empty values
                mask = (~df[curr_col].isna()) & (~df[prev_col].isna())
                if not df.loc[mask, curr_col].equals(df.loc[mask, prev_col]):
                    is_identical = False
                    break
            
            if is_identical:
                groups_to_drop.add(current_group)
                break
    
    # Drop columns from groups marked for removal
    cols_to_drop = []
    for group in groups_to_drop:
        cols_to_drop.extend([col for col in df.columns if col.endswith(str(group))])
    
    df_cleaned = df.drop(columns=cols_to_drop)
    
    return df_cleaned

# Example usage
if __name__ == "__main__":
    # Replace with your Excel file path
    file_path = "Matched_DB.xlsx"
    result_df = clean_duplicate_groups(file_path)
    
    # Save the result to a new Excel file
    output_path = "cleaned_output.xlsx"
    result_df.to_excel(output_path, index=False)
    print(f"Cleaned file saved to: {output_path}")