import os
import pandas as pd
from torchvision.io import read_image
from PIL import Image, ImageDraw
from torch.utils.data import Dataset, WeightedRandomSampler, DataLoader
from torchvision import transforms, models
from tqdm.auto import tqdm
import torch
import numpy as np
import cv2
import warnings
import matplotlib.pyplot as plt
import random
from torch import nn, optim
from torch.nn import BCEWithLogitsLoss
import torch.optim as optim

data_parent_dir = "./data"

class MultiLabelDataset(Dataset):
    def __init__(self,
                 img_root=f'{data_parent_dir}/Fruits-detection',
                 sub_root='train',
                 img_dir='images',
                 img_labels='labels',
                 train=True,
                 num_images=50):  # Added parameter for the number of images per batch
        self.img_root = img_root
        self.sub_root = sub_root
        self.img_dir = img_dir
        self.img_labels = img_labels
        self.train = train
        self.num_images = num_images  # Store the number of images per batch

        # Define class names
        self.class_names = {0: 'apple', 1: 'banana', 2: 'grape', 3: 'orange', 4: 'pineapple', 5: 'watermelon'}

        # Initialize an array of zeros for multi-label encoding
        self.multi_label_labels = []

        # Get labels
        dfs = []
        self.labels_path = os.path.join(self.img_root, self.sub_root, self.img_labels)
        for file in os.listdir(self.labels_path):
            if file.endswith(".txt"):
                file_path = os.path.join(self.labels_path, file)
                df = pd.read_csv(file_path, delimiter=" ",
                                 names=["label", "coordinate_1", "coordinate_2", "coordinate_3",
                                        "coordinate_4"])  # Adjust column names as needed
                df["filename"] = os.path.splitext(file)[0] + '.jpg'
                df["class_name"] = df["label"].map(self.class_names)
                dfs.append(df)

        self.final_df = pd.concat(dfs, ignore_index=True)

        # Get class-wise indices to randomly select images
        self.class_indices = {class_name: self.final_df[self.final_df['class_name'] == class_name].index.tolist() for
                              class_name in self.class_names.values()}

        # Initialize an empty list to store multi-label encoded labels
        multi_label_labels_dict = {}

        # Randomly select images for each class
        selected_indices = []
        for class_name, indices in self.class_indices.items():
            selected_indices.extend(random.sample(indices, min(len(indices), self.num_images)))

        # Iterate over the selected indices
        for idx in selected_indices:
            filename = self.final_df.loc[idx, 'filename']
            labels = set(self.final_df[self.final_df['filename'] == filename][
                             'label'].tolist())  # If there are multiple fruits in an image

            # Initialize an array of zeros for multi-label encoding
            multi_label_labels = [0] * len(self.class_names)

            # Set the corresponding indices to 1 for each label
            for label in labels:
                multi_label_labels[label] = 1

            # Append the multi-label encoded labels to the dictionary
            multi_label_labels_dict[filename] = [multi_label_labels]

        # Convert the dictionary of lists to a list of arrays
        multi_label_labels_list = [torch.tensor(labels) for labels in multi_label_labels_dict.values()]

        # Stack the padded tensors to create a single tensor
        self.multi_label_labels_tensor = torch.stack(multi_label_labels_list)

        # Get images
        self.imgs_path = os.path.join(self.img_root, self.sub_root, self.img_dir)
        image_names = self.final_df.iloc[selected_indices]['filename'].tolist()
        self.images = []
        self.images_path = []
        for img_name in tqdm(image_names):
            image_path = os.path.join(self.imgs_path, img_name)
            image = cv2.imread(image_path)
            image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
            self.images_path.append(image_path)
            self.images.append(image)

        # Define transformations
        if self.train:
            self.transform = transforms.Compose([
                transforms.RandomResizedCrop(224),
                transforms.RandomHorizontalFlip(),
                #transforms.ColorJitter(brightness=0.2, contrast=0.2, saturation=0.2, hue=0.1),
                transforms.ToTensor(),
                transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),
            ])
        else:
            self.transform = transforms.Compose([
                transforms.Resize(224),
                transforms.ToTensor(),
                transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),
            ])

    def get_image_path(self, idx):
        img_path = self.images_path[idx]
        return img_path

    def get_labels(self, idx):
        return self.multi_label_labels_tensor[idx]

    def __len__(self):
        return len(self.images)

    def __getitem__(self, idx):
        image = self.images[idx]
        label = self.multi_label_labels_tensor[idx]

        # Apply transformations
        transformed_image = self.transform(Image.fromarray(image))

        return transformed_image, label

def train():
    train_dataset = MultiLabelDataset(num_images=2200)
    val_dataset = MultiLabelDataset(sub_root='valid', num_images=314, train=False)

    train_loader = DataLoader(train_dataset, batch_size=32, shuffle=True)
    val_loader = DataLoader(val_dataset, batch_size=32, shuffle=False)

    print(len(train_dataset))
    print(len(val_dataset))

    # Print an image from the training dataset
    # for i, data in enumerate(train_dataset):
    #     images, labels = data[0], data[1]
    #     class_index = torch.argmax(labels[0])  # Get index of the first non-zero label
    #     class_name = train_dataset.class_names[class_index.item()]  # Convert index to class name
    #     plt.title(class_name)
    #     plt.imshow(np.transpose(images.numpy(), (1, 2, 0)))  # Transpose the image to (240, 240, 3)
    #     plt.show()
    #
    #     # Obtain the path of the plotted image
    #     image_path = train_dataset.get_image_path(i)
    #     # Read the original image using OpenCV
    #     original_image = cv2.imread(image_path)
    #     original_image = cv2.cvtColor(original_image, cv2.COLOR_BGR2RGB)
    #
    #     # Plot the original image
    #     plt.title("Original Image")
    #     plt.imshow(original_image)
    #     plt.axis('off')
    #
    #     # Obtain the labels of the plotted image
    #     image_labels = train_dataset.get_labels(i)
    #     print("Labels:", image_labels)
    #     break

    # Initialize pre-trained model

    ### Initialize pre-trained model ###
    pretrained_model = models.resnet50(pretrained=True)

    # Get the last layer of the model
    last_layer_name, last_layer = list(pretrained_model.named_children())[-1]
    # Print information about the last layer
    print("Last Layer Name:", last_layer_name)
    print("Last Layer:", last_layer)

    ### Modify the last layer of the pretrained model for multi-label classification ###
    num_classes = 6

    pretrained_model.fc = nn.Linear(pretrained_model.fc.in_features, num_classes)

    # Freeze the parameters of the pre-trained layers
    for param in pretrained_model.parameters():
        param.requires_grad = False

    # Unfreeze the parameters of the last few layers for fine-tuning
    for param in pretrained_model.layer3.parameters():
        param.requires_grad = True
    for param in pretrained_model.layer4.parameters():
        param.requires_grad = True

    # last_layer_name, last_layer = list(pretrained_model.layer4())
    # print("New last layer:", last_layer)

    last_layer_name, last_layer = list(pretrained_model.named_children())[-1]
    print("New last layer name:", last_layer_name)
    print("New last layer:", last_layer)

    ### Fine tune the model ###
    print('Fine tune the model')
    # Define loss function (Binary Cross Entropy Loss in this case, for multi-label classification)
    criterion = BCEWithLogitsLoss()

    # Define optimizer
    optimizer = optim.SGD(pretrained_model.parameters(), lr=0.001, momentum=0.9) #原始代码

    # optimizer = optim.Adam([
    #     {'params': pretrained_model.layer3.parameters(), 'lr': 1e-4},
    #     {'params': pretrained_model.layer4.parameters(), 'lr': 1e-4},
    #     {'params': pretrained_model.fc.parameters(), 'lr': 1e-3}
    # ])
    #optimizer = torch.optim.AdamW(pretrained_model.parameters(), lr=0.0005, weight_decay=1e-4)
    # Training loop
    num_epochs = 50
    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
    pretrained_model.to(device)

    train_losses = []  # To store the losses for plotting
    best_val_loss = float('inf')  # Initialize with a very large value

    # Train the model
    for epoch in range(num_epochs):

        # Train the model on the training set
        pretrained_model.train()

        # Initialize the training loss accumulator to zero
        training_loss = 0.0

        for i, (image, labels) in enumerate(train_dataset):
            # Prepare data and send it to the proper device
            image = image.unsqueeze(0).to(device)
            labels = labels.float().to(device)

            # Clear the gradients of all optimized parameters
            optimizer.zero_grad()

            # Forward pass: obtain model predictions for the input data
            outputs = pretrained_model(image)

            # Compute the loss between the model predictions and the true labels
            loss = criterion(outputs, labels)

            # Backward pass: compute gradients of the loss with respect to model parameters
            loss.backward()

            # Update model parameters using the computed gradients and the optimizer
            optimizer.step()

            # Update the training loss
            training_loss += loss.item()

        # Calculate average training loss
        train_loss = training_loss / len(train_dataset)
        train_losses.append(train_loss)

        # Evaluate the model on the validation set
        pretrained_model.eval()
        val_loss = 0.0
        correct_preds = 0
        total_samples = 0
        with torch.no_grad():
            for image, labels in val_dataset:
                # Prepare data and send it to the proper device
                image = image.unsqueeze(0).to(device)
                labels = labels.float().to(device)

                # Forward pass: obtain model predictions for the input data
                outputs = pretrained_model(image)

                # Compute the loss between the model predictions and the true labels
                loss = criterion(outputs, labels)

                # Update the validation loss
                val_loss += loss.item()

                # Round up and down to either 1 or 0
                predicted = torch.round(outputs)
                total_samples += labels.size(0)
                # Calculate how many images were correctly classified
                #correct_preds += torch.sum(torch.all(torch.eq(predicted, labels), dim=1)).item()
                correct_preds += (predicted == labels).sum().item()

        # Calculate validation loss
        val_loss /= len(val_dataset)

        # Calculate validation accuracy
        #val_acc = correct_preds / total_samples * 100

        total_labels = labels.numel()
        val_acc = correct_preds / total_labels * 100

        # Print validation loss and accuracy
        print(
            f"Epoch [{epoch + 1}/{num_epochs}] Train Loss: {train_loss:.4f}  Validation Loss: {val_loss:.4f}, Validation Accuracy: {val_acc:.2f}%")

        # 定义保存路径
        save_path = f'model/train/best_model_epoch_ok.pth'

        # 获取父目录路径
        parent_dir = os.path.dirname(save_path)

        # 如果父目录不存在，则创建它
        if not os.path.exists(parent_dir):
            os.makedirs(parent_dir)
        # Save the model if it performs better on validation set
        if val_loss < best_val_loss:
            best_val_loss = val_loss
            torch.save(pretrained_model.state_dict(), save_path)

    print('Finished Training')

    # Plotting the evolution of loss
    plt.plot(train_losses, label='Training Loss')
    plt.xlabel('Epoch')
    plt.ylabel('Loss')
    plt.title('Evolution of Training Loss')
    plt.legend()
    plt.show()

def predict_image(image_path, model, transform, threshold=0.5):
    # 加载并预处理图像
    image = Image.open(image_path).convert('RGB')
    input_tensor = transform(image).unsqueeze(0)  # 添加batch维度

    # 执行预测
    with torch.no_grad():
        output = model(input_tensor)
        probabilities = torch.sigmoid(output)  # 将logits转换为概率

    # 获取类别名称
    class_names = ['apple', 'banana', 'grape', 'orange', 'pineapple', 'watermelon']

    # 根据阈值确定预测结果
    predicted_labels = (probabilities > threshold).squeeze().int().numpy()
    detected_classes = [class_names[i] for i, label in enumerate(predicted_labels) if label == 1]

    return detected_classes, image

    #prob_values = probabilities.squeeze().numpy().tolist()
    #class_probs = {class_names[i]: f"{prob * 100:.1f}%" for i, prob in enumerate(prob_values)}
    # return detected_classes, class_probs, image

def plot_prediction(image, classes):
    plt.imshow(image)
    plt.title(f"Predicted: {', '.join(classes)}")
    plt.axis('off')
    plt.show()


if __name__ == '__main__':
    train()

    # 定义模型结构（必须与训练时完全一致）
    model = models.resnet50(pretrained=False)
    num_classes = 6
    model.fc = nn.Linear(model.fc.in_features, num_classes)

    # 加载保存的模型权重
    model_path = "./model/train/best_model_epoch_ok.pth"  # 替换成你的模型路径
    model.load_state_dict(torch.load(model_path, map_location=torch.device('cpu')))  # 如果是CPU环境

    # 设置为评估模式
    model.eval()

    # 定义图片预处理操作
    transform = transforms.Compose([
        transforms.Resize((224, 224)),  # 调整图片大小
        transforms.ToTensor(),  # 转换为张量
        transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])  # 归一化
    ])

    # 加载并预处理图片
    image_path = f'{data_parent_dir}/Fruits-detection/test/images/0d6db81d71062441_jpg.rf.b3d78c026d096825eb142e5971ed502c.jpg'  # 替换为您的图片路径
    detected_classes, original_image = predict_image(image_path, model, transform)
    print(f"detected_classes:{detected_classes}")
    plot_prediction(original_image, detected_classes)
