import os
import cv2
import random
import numpy as np
import pandas as pd
from PIL import Image
import pickle
import ctypes

from sklearn.preprocessing import LabelBinarizer
from sklearn.model_selection import train_test_split

import tensorflow as tf
from tqdm import tqdm
from tensorflow.keras.models import Model
from tensorflow.keras.layers import (Input, Flatten)
from tensorflow.keras.layers import (Dense, Lambda, Dropout, Activation, Reshape)
from tensorflow.keras.utils import Sequence
from tensorflow.keras.preprocessing.image import ImageDataGenerator

from tensorflow.keras.layers import (Conv2D, MaxPooling2D)
from tensorflow.keras.layers import BatchNormalization
from sklearn.metrics import f1_score, precision_score, recall_score
from tensorflow.keras.metrics import CategoricalAccuracy, Precision, Recall
from tensorflow.keras.optimizers import Adam, SGD
from tensorflow.keras.preprocessing.image import img_to_array
from tensorflow.keras.regularizers import l2

import seaborn as sns
import matplotlib
import matplotlib.pyplot as plt

import gc
import re
import plotly.graph_objects as go
from io import BytesIO
import base64

STYLES_CSV_PATH = "/kaggle/input/fashion-product-images-dataset/fashion-dataset/styles.csv"
IMAGES_PATH = "/kaggle/input/fashion-product-images-dataset/fashion-dataset/images/"

df = pd.read_csv(STYLES_CSV_PATH, on_bad_lines='skip')
print(f"Total Rows: {df.shape[0]}\nTotal Columns: {df.shape[1]}")

df['image_path'] = df['id'].apply(lambda x: IMAGES_PATH + str(x) +'.jpg')
df.head()

df = df.apply(lambda col: col.fillna('Unknown').astype(str) if col.name not in ['id'] else col)
df = df.astype({col: str for col in df.columns if col != 'id'})

df.isna().sum()
df = df[df['id'].isin([int(i.split('.')[0]) for i in os.listdir(IMAGES_PATH)])]


def extract_brand(name):
    keywords = ['men', 'man', 'woman', 'women', 'boy', 'girl', 'kid', 'unisex']

    name_lower = name.lower()

    if "'" in name_lower:
        parts = name_lower.split("'", 1)
        before_quote = parts[0].strip()
        after_quote = parts[1].strip()

        words_before_quote = before_quote.split(' ')

        if words_before_quote[-1] in keywords:
            brand = ' '.join(words_before_quote[:-1])
        else:
            brand = before_quote

        brand = brand.strip()
    else:
        words = name_lower.split(' ')
        brand = name_lower
        for word in words:
            if word in keywords:
                brand = name_lower.split(word)[0].strip()
                break
        if brand == name_lower:
            brand = name_lower.split(' ')[0]

    # Capitalize the first letter of every word in the output
    return brand.title()

df['brand'] = df['productDisplayName'].apply(extract_brand)


def refine_brands(df):
    brands = df['brand'].tolist()

    for i, brand in enumerate(brands):
        for j, other_brand in enumerate(brands):
            if i != j:
                if f' {other_brand} ' in f' {brand} ':
                    brands[i] = other_brand
                    break

    df['brand'] = brands

refine_brands(df)

brand_counts = df['brand'].value_counts()

for brand, count in brand_counts.items():
    print(f"{brand}: {count}")

attributes = [
    "gender",
    "masterCategory",
    "subCategory",
    "articleType",
    "baseColour",
    "season",
    "usage",
    "brand"
]

import warnings
warnings.filterwarnings('ignore')


def show_samples(samples):
    plt.figure(figsize=(20, 15))
    for i, (idx, row) in enumerate(samples.iterrows()):
        img_path = row['image_path']
        plt.subplot(5, 4, i + 1)
        try:
            img = Image.open(img_path)
            plt.imshow(img)

            title_text = ""
            for column in row.index:
                if column != 'image_path':
                    title_text += f"{column if column != 'productDisplayName' else 'Name'}: {row[column]}\n"

            plt.title(title_text, fontsize=8, loc='center')
            plt.axis('off')
        except Exception as e:
            print(f"Error loading image: {img_path} - {e}")
    plt.tight_layout()
    plt.subplots_adjust(bottom=0, right=0.8, top=1.3)
    plt.show()


show_samples(df.sample(20))


import plotly.express as px

# Loop through each attribute and create a bar chart
for attribute in attributes:
    fig = px.bar(df.groupby(attribute).count().reset_index(),
                 x=attribute, y='id', title=f'Count per {attribute.capitalize()} Category', color=attribute)
    fig.update_layout(barmode='stack', xaxis={'categoryorder':'total descending'})
    fig.show()


def resize_image(image, max_size=(400, 400)):
    image.thumbnail(max_size, Image.ANTIALIAS)
    return image


def display_random_samples(df, attribute):
    classes = df[attribute].unique()
    fig = go.Figure()

    for idx, cls in enumerate(classes):
        subset = df[df[attribute] == cls]

        random_image_path = subset['image_path'].sample(n=1).values[0]

        try:
            img = Image.open(random_image_path)
            img = resize_image(img)

            buffer = BytesIO()
            img.save(buffer, format="PNG")
            img_str = base64.b64encode(buffer.getvalue()).decode()

            fig.add_layout_image(
                dict(
                    source=f'data:image/png;base64,{img_str}',
                    x=idx,  # Position image on the x-axis
                    y=0.5,
                    xref="x",
                    yref="y",
                    xanchor="center",
                    yanchor="middle",
                    sizex=0.8,
                    sizey=0.8,
                    layer="above"
                )
            )

            fig.add_annotation(
                text=f'{attribute.capitalize()}: {cls}',
                x=idx,
                y=-0.2,
                xref="x",
                yref="y",
                showarrow=False,
                font=dict(size=10),
                align="center"
            )
        except Exception as e:
            print(f'Error loading image: {e}')

    fig.update_layout(
        title=f'Random Samples of Each {attribute.capitalize()}',
        xaxis=dict(showticklabels=False, range=[-0.5, len(classes) - 0.5]),
        yaxis=dict(showticklabels=False, range=[-0.3, 1]),
        showlegend=False,
        height=400,
        width=200 * len(classes),
        margin=dict(t=50, b=50, l=20, r=20),
        xaxis_visible=False,
        yaxis_visible=False
    )

    fig.show()

for attribute in attributes:
    display_random_samples(df, attribute)


def check_attribute_classes(df, attr1, attr2):
    result = df.groupby(attr1)[attr2].unique().reset_index()
    result[attr2] = result[attr2].apply(list)
    return result


def display_all_attribute_classes(df, attributes):
    for i, attr1 in enumerate(attributes):
        for attr2 in attributes[i + 1:]:
            result = check_attribute_classes(df, attr1, attr2)
            print(f"Possible {attr2} values for each {attr1}:")
            print(result)
            print("\n")

            result = check_attribute_classes(df, attr2, attr1)
            print(f"Possible {attr1} values for each {attr2}:")
            print(result)
            print("\n")

check_attribute_classes(df, 'masterCategory', 'subCategory')

from mlxtend.frequent_patterns import apriori, association_rules

df_temp = df.drop(columns=['id', 'image_path'])
one_hot_encoded_data = pd.get_dummies(df_temp)

frequent_itemsets = apriori(one_hot_encoded_data, min_support=0.01, use_colnames=True)
rules = association_rules(frequent_itemsets, metric="confidence", min_threshold=0.5)
rules = rules.sort_values(by='lift', ascending=False)


print(rules)

from sklearn.preprocessing import LabelBinarizer

encoders = {}
unknown_class_indices = {}


def one_hot_encode(column_name, data):
    if column_name in encoders:
        encoder = encoders[column_name]
    else:
        encoder = LabelBinarizer()
        encoder.fit(df[column_name])
        encoders[column_name] = encoder

        classes = encoder.classes_
        if 'Unknown' in classes:
            unknown_class_index = np.where(classes == 'Unknown')[0][0]
        else:
            unknown_class_index = None

        unknown_class_indices[column_name] = unknown_class_index

    encoded_data = encoder.transform(data)

    return encoded_data


def one_hot_decode(column_name, encoded_data):
    if column_name not in encoders:
        one_hot_encode(column_name, df[column_name])
    encoder = encoders[column_name]
    decoded_data = encoder.inverse_transform(encoded_data)
    return decoded_data

from sklearn.model_selection import train_test_split

X_train, X_test, Y_train, Y_test = train_test_split(df[['id', 'image_path']], df.drop(columns=['image_path', 'productDisplayName', 'year']), test_size=0.2, random_state=42)

print(f"X_train shape: {X_train.shape}")
print(f"X_test shape: {X_test.shape}")
print(f"Y_train shape: {Y_train.shape}")
print(f"Y_test shape: {Y_test.shape}")


IMAGE_DIMS = (180, 180, 3) # (224, 224, 3) is the best choice


def plot_images(image_list, save=False, file_name='plot.png'):
    if len(image_list) == 0:
        print("No valid images to display.")
        return

    fig, axes = plt.subplots(1, len(image_list), figsize=(20, 5))

    if len(image_list) == 1:
        axes = [axes]

    for ax, img in zip(axes, image_list):
        ax.imshow(img)
        ax.axis('off')

    if save:
        plt.savefig(file_name)
    plt.show()


from albumentations import Compose, RandomResizedCrop, Flip, Rotate, RandomBrightnessContrast, RandomScale, Resize, HorizontalFlip

def augment_image(image):
    augmentation_pipeline = Compose([
        RandomScale(scale_limit=0.3, p=1.0),
        RandomResizedCrop(height=IMAGE_DIMS[0], width=IMAGE_DIMS[1], scale=(0.5, 1.0)),
        HorizontalFlip(p=0.75),
        Rotate(limit=15),
        RandomBrightnessContrast(brightness_limit=0.1, contrast_limit=0.1)
    ])
    augmented = augmentation_pipeline(image=image)
    return augmented['image']


def load_images(imagePaths, labels, image_dims=IMAGE_DIMS, preprocess=False, augment=False, plot=True):
    print(f"Loading Images in Size: {image_dims}")
    if isinstance(labels, pd.DataFrame):
        label_columns = labels.columns
    else:
        raise ValueError("Labels should be provided as a pandas DataFrame.")

    image_data = []
    label_data = []

    for idx, (imagePath, label) in tqdm(enumerate(zip(imagePaths, labels.values)), total=len(imagePaths),
                                        desc="Loading images"):
        if not os.path.exists(imagePath):
            print(f"File does not exist: {imagePath}")
            continue
        image = cv2.imread(imagePath)
        if image is None:
            print(f"Error reading image at path: {imagePath}")
            continue
        image = cv2.resize(image, (image_dims[0], image_dims[1]))
        image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
        if preprocess:
            image = preprocess_input(image)

        if image.shape[0] != image_dims[0] or image.shape[1] != image_dims[1]:
            print(f"Image at path {imagePath} has incorrect dimensions: {image.shape[:2]}")
            continue

        image_data.append(image)
        label_data.append(label)

        if augment:
            augmented_image = augment_image(image)
            if preprocess:
                augmented_image = preprocess_input(augmented_image)

            if augmented_image.shape[0] != image_dims[0] or augmented_image.shape[1] != image_dims[1]:
                print(f"Augmented image has incorrect dimensions: {augmented_image.shape[:2]}")
                continue

            image_data.append(augmented_image)
            label_data.append(label)

    if plot and image_data:
        plot_images(image_data[:6])

    return np.array(image_data), pd.DataFrame(label_data, columns=label_columns)


gc.collect()
libc = ctypes.CDLL("libc.so.6") # clearing cache
libc.malloc_trim(0)

X_train_images, Y_train_images = load_images(X_train['image_path'], Y_train, augment=True)
X_test_images, Y_test_images = load_images(X_test['image_path'], Y_test, plot=False)


class DataGenerator(Sequence):
    def __init__(self, image_paths, labels, batch_size=32, image_dims=IMAGE_DIMS, preprocess=True, shuffle=True):
        self.image_paths = image_paths
        self.labels = labels
        self.batch_size = batch_size
        self.image_dims = image_dims
        self.preprocess = preprocess
        self.shuffle = shuffle
        self.on_epoch_end()

    def __len__(self):
        return int(np.floor(len(self.image_paths) / self.batch_size))

    def __getitem__(self, index):
        indexes = self.indexes[index * self.batch_size:(index + 1) * self.batch_size]
        batch_image_paths = [self.image_paths[k] for k in indexes]
        batch_labels = self.labels.iloc[indexes]

        images = self.__load_images(batch_image_paths)
        labels = self.__load_labels(batch_labels)

        return tf.convert_to_tensor(images), {col: tf.convert_to_tensor(labels[col]) for col in batch_labels.columns}

    def on_epoch_end(self):
        self.indexes = np.arange(len(self.image_paths))
        if self.shuffle:
            np.random.shuffle(self.indexes)

    def __load_images(self, image_paths):
        image_data = []
        for imagePath in image_paths:
            if not os.path.exists(imagePath):
                print(f"File does not exist: {imagePath}")
                continue
            image = cv2.imread(imagePath)
            if image is None:
                print(f"Error reading image at path: {imagePath}")
                continue
            image = cv2.resize(image, (self.image_dims[0], self.image_dims[1]))
            image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
            if self.preprocess:
                image = preprocess_input(image)
            image_data.append(image)
        return image_data

    def __load_labels(self, batch_labels):
        labels = {}
        for col in batch_labels.columns:
            encoded_labels = one_hot_encode(col, batch_labels[col])
            labels[col] = encoded_labels
        return labels

LEARNING_RATE = 1e-5
MOMENTUM = 0.9
EPOCHS = 50
BATCH_SIZE = 32
BASE_MODEL = "RESNET50"
L2_REG = 0.01


def get_output_sizes():
    output_sizes = {}
    for key in encoders:
        output_sizes[key] = len(encoders[key].classes_)
    return output_sizes


def build_model(base_model, image_dims, l2_reg):
    base_model.trainable = False

    output_sizes = get_output_sizes()

    inputs = Input(shape=image_dims)
    x = base_model(inputs, training=False)
    x = Flatten()(x)
    x = Dense(1024, activation='relu', kernel_regularizer=l2(l2_reg))(x)
    x = Dense(512, activation='relu', kernel_regularizer=l2(l2_reg))(x)
    x = Dense(256, activation='relu', kernel_regularizer=l2(l2_reg))(x)
    x = Dense(128, activation='relu', kernel_regularizer=l2(l2_reg))(x)

    outputs = []
    for attribute, size in output_sizes.items():
        output = Dense(size, activation='softmax', name=attribute, kernel_regularizer=l2(l2_reg))(
            x)  # L2 regularization added
        outputs.append(output)

    model = Model(inputs=inputs, outputs=outputs)
    return model


warnings.filterwarnings('ignore')
for attr in attributes:
    one_hot_encode(attr, df[attr])

from tensorflow.keras import backend as K
from tensorflow.keras.losses import categorical_crossentropy


def masked_categorical_crossentropy(y_true, y_pred, unknown_class_index=None):
    if unknown_class_index is None:
        return categorical_crossentropy(y_true, y_pred)
    mask = K.not_equal(K.argmax(y_true, axis=-1), unknown_class_index)
    mask = K.cast(mask, K.floatx())
    loss = categorical_crossentropy(y_true, y_pred)
    masked_loss = loss * mask
    return K.mean(masked_loss)


from tensorflow.keras.applications import VGG16, VGG19, ResNet50, EfficientNetB0, InceptionV3
from tensorflow.keras.applications.vgg16 import preprocess_input as vgg_preprocess
from tensorflow.keras.applications.resnet import preprocess_input as resnet_preprocess
from tensorflow.keras.applications.efficientnet import preprocess_input as efficientnet_preprocess
from tensorflow.keras.applications.inception_v3 import preprocess_input as inception_preprocess

if BASE_MODEL == "VGG16":
    preprocess_input = vgg_preprocess
    base_model = VGG16(weights='imagenet', include_top=False, input_shape=IMAGE_DIMS)
elif BASE_MODEL == "RESNET50":
    preprocess_input = resnet_preprocess
    base_model = ResNet50(weights='imagenet', include_top=False, input_shape=IMAGE_DIMS)
elif BASE_MODEL == "EFFICIENTNETB0":
    preprocess_input = efficientnet_preprocess
    base_model = EfficientNetB0(weights='imagenet', include_top=False, input_shape=IMAGE_DIMS)
elif BASE_MODEL == "INCEPTIONV3":
    preprocess_input = inception_preprocess
    base_model = InceptionV3(weights='imagenet', include_top=False, input_shape=IMAGE_DIMS)
else:
    raise ValueError("Unsupported BASE_MODEL. Choose 'VGG16', 'VGG19', 'RESNET50', 'EFFICIENTNETB0', or 'INCEPTIONV3'.")


model = build_model(base_model, IMAGE_DIMS, l2_reg=L2_REG)

opt = SGD(learning_rate=LEARNING_RATE, momentum=MOMENTUM)

losses = {attr: lambda y_true, y_pred, attr=attr: masked_categorical_crossentropy(y_true, y_pred, unknown_class_indices[attr]) for attr in attributes}
metrics = {attr: [CategoricalAccuracy(name='accuracy'), Precision(name='precision'), Recall(name='recall')] for attr in attributes}

model.compile(
    optimizer=opt,
    loss=losses,
    metrics=metrics
)

from tensorflow.keras.utils import plot_model
plot_model(model)


def combine_outputs(Y, attributes):
    output_dict = {attr: one_hot_encode(attr, Y[attr]) for attr in attributes}
    return output_dict

model_fit_history = model.fit(
    X_train_images,
    combine_outputs(Y_train_images, attributes),
    validation_data=(
        X_test_images,
        combine_outputs(Y_test_images, attributes)
    ),
    epochs=EPOCHS,
    batch_size=BATCH_SIZE,
    verbose=1,
)

history = model_fit_history.history