import os
import re
import shutil
import xml.etree.ElementTree as ET
import pandas as pd
from pandas import DataFrame
from .config import *
from .logger import *
from .decorator import *
import glob
import platform

# Only import win32com on Windows
if platform.system() == 'Windows':
    import win32com.client as win32


@log_function_call
def remove_empty_date_folders():
    """Remove empty date folders."""
    to_be_removed = []
    date_folders = get_list_date_folders()
    for date_folder in date_folders:
        relevant_files = [f for f in os.listdir(date_folder) if f.endswith(('.png', '.xlsx', '.xls'))]
        if not relevant_files:
            logger.info(f"Removing empty date folder: {date_folder}")
            to_be_removed.append(date_folder)
    for date_folder in to_be_removed:
        try:
            shutil.rmtree(date_folder)
        except Exception as e:
            logger.error(f"Error removing empty date folder: [{date_folder}]: {e}")
            return False, f"Error removing empty date folder: [{date_folder}]: {e}"
    return True, f"Empty date folders removed successfully : [{', '.join(to_be_removed)}]"

def get_list_date_folders():
    """Get all folders that match YYYY-MM-DD pattern"""
    # TODO depends on datetime strftime pattern
    date_folders = [
        d
        for d in os.listdir()
        if os.path.isdir(d) and re.match(r"\d{4}-\d{2}-\d{2}", d)
    ]
    date_folders.sort(reverse=True)
    return date_folders


def get_list_downloaded_websites(folder_path: str) -> list[str]:
    """Get the list of downloaded websites from a given folder path."""
    downloaded_sites = []
    for file in os.listdir(folder_path):
        if file.startswith("besins-selenium"):
            continue
        site_name = None
        for website in get_website_names():
            if file.startswith(website):
                site_name = website
                break
        if site_name is None:
            continue
        downloaded_sites.append(site_name)
    return downloaded_sites


@log_function_call
def get_date_folder_analysis_as_dataframe(folder_path: str) -> DataFrame:
    """Extracts data from files in a given folder path to analyze the download process."""
    if not os.path.exists(folder_path):
        return pd.DataFrame()
    
    relevant_files = glob.glob(os.path.join(folder_path, "*.png")) + \
                         glob.glob(os.path.join(folder_path, "*.xlsx")) + \
                         glob.glob(os.path.join(folder_path, "*.xls"))
   
    if not relevant_files:
        return pd.DataFrame()

    files = os.listdir(folder_path)
    sites_data = {}

    for file in files:
        if file.startswith("besins-selenium"):
            continue
        site_name = None
        for website in get_website_names():
            if file.startswith(website):
                site_name = website
                break
        if site_name is None:
            continue
        if site_name not in sites_data:
            sites_data[site_name] = {
                "SiteName": site_name,
                "Screenshots": [],
                "Login": "❌",  # TODO
                "Downloaded": [],
                "Errors": 0,
                "Warnings": 0,
                "Info": 0,
                "Debug": 0,
                "URL": map_site_to_url()[site_name],
                "TaskID": map_site_to_task_id()[site_name],
                "Browser": "Unknown",
            }

        if file.endswith(".png"):
            sites_data[site_name]["Screenshots"].append(file)
            if "login" in file.lower() and "_02_" in file:
                sites_data[site_name]["Login"] = "✅"  # Success if there's a second login screenshot. TODO : improve this
        elif file.endswith((".xls", ".xlsx")):
            sites_data[site_name]["Downloaded"].append(file)
        elif file.endswith(".log"):
            sites_data[site_name]["Errors"] = count_errors(
                os.path.join(folder_path, file)
            )
            sites_data[site_name]["Warnings"] = count_warnings(
                os.path.join(folder_path, file)
            )
            sites_data[site_name]["Info"] = count_info(
                os.path.join(folder_path, file)
            )
            sites_data[site_name]["Debug"] = count_debug(
                os.path.join(folder_path, file)
            )

    df = pd.DataFrame(list(sites_data.values()))

    site_task_mapping = map_site_to_task_id()
    site_url_mapping = map_site_to_url()
    df["TaskID"] = df["SiteName"].map(site_task_mapping)
    df["url"] = df["SiteName"].map(site_url_mapping)
    return df


@log_function_call
def get_latest_downloaded_files(browser_downloads_dir: str, num_files: int) -> list[str]:
    """Retrieve the latest downloaded files from the specified directory."""
    files = [
        os.path.join(browser_downloads_dir, f)
        for f in os.listdir(browser_downloads_dir)
    ]
    files = [f for f in files if os.path.isfile(f)]
    files.sort(key=os.path.getmtime, reverse=True)
    return files[:num_files]


@log_function_call
def get_most_recent_downloaded_file(browser_downloads_dir: str) -> str:
    """Retrieve the most recent downloaded file from the specified directory."""
    return get_latest_downloaded_files(browser_downloads_dir, num_files=1)[0]


@log_function_call
def process_downloaded_file(download_path: str, destination_path: str):
    _, file_extension = os.path.splitext(download_path)  # TODO

    try:
        if file_extension.lower() == ".xls":
            convert_xls_to_xlsx(download_path, destination_path)
        elif file_extension.lower() == ".xlsx":
            shutil.copy(download_path, destination_path)
        elif file_extension.lower() == ".xml":
            convert_xml_to_xlsx(download_path, destination_path)
        elif is_xml_file(download_path):
            convert_xml_to_xlsx(download_path, destination_path)
    except Exception as e:
        logger.warning(f"Unsupported file type: '{download_path}'. No action taken.")


@log_function_call
def convert_xls_to_xlsx(xls_path: str, xlsx_path: str):
    """Convert an XLS file to an XLSX file."""
    try:
        excel_data = pd.read_excel(xls_path, sheet_name=None, engine="xlrd")
        with pd.ExcelWriter(xlsx_path, engine="openpyxl") as writer:
            for sheet, data in excel_data.items():
                data.to_excel(writer, sheet_name=sheet, index=False)
    except Exception as e:
        logger.error(f"Error converting '{xls_path}' to '{xlsx_path}': {e}")

@log_function_call
def convert_xls_to_xlsx_way_2(xls_path: str, xlsx_path: str):
    if platform.system() != 'Windows':
        logger.error("convert_xls_to_xlsx_way_2 is only supported on Windows systems.")
        raise OSError("This function requires Windows with pywin32 installed")
    
    excel_data = win32.gencache.EnsureDispatch('Excel.Application')
    workbook = excel_data.Workbooks.Open(xls_path)
    workbook.SaveAs(xlsx_path, FileFormat=51) 
    workbook.Close()
    excel_data.Application.Quit()

@log_function_call
def convert_xml_to_xlsx(xml_path: str, xlsx_path: str):
    """Convert an XML file to an XLSX file."""
    try:
        with open(xml_path, "r", encoding="utf-8") as xml_file:
            xml_content = xml_file.read().lstrip()
        root = ET.fromstring(xml_content)
        namespace = {"ss": "urn:schemas-microsoft-com:office:spreadsheet"}
        headers = [
            cell.find("ss:Data", namespace).text
            for cell in root.findall(".//ss:Row[1]/ss:Cell", namespace)
        ]
        data_rows = []
        rows = root.findall(".//ss:Row", namespace)
        for row in rows[1:]:
            data_rows.append(
                [
                    cell.find("ss:Data", namespace).text
                    for cell in row.findall("ss:Cell", namespace)
                ]
            )
        df = pd.DataFrame(data_rows, columns=headers)
        df.to_excel(xlsx_path, index=False)
    except Exception as e:
        logger.error(f"Error converting 'xml' to 'xlsx': {e}")



@log_function_call
def is_xml_file(file_path: str) -> bool:
    """Check if a file is an XML file."""
    try:
        with open(file_path, "r", encoding="utf-8") as file:
            for line in file:
                if line.strip():
                    return line.strip().startswith("<?xml")
            return False
    except Exception as e:
        logger.error(f"Error reading file '{file_path}': {e}")
        return False
