import os

import torch
from torch import nn
from torchvision import transforms as T
from torchvision.models import resnet50, resnet18
from torch.utils.data import Dataset, DataLoader
from PIL import Image

import numpy as np
import pandas as pd
from sklearn.model_selection import StratifiedKFold
from argparse import ArgumentParser


def make_parser():
    parser = ArgumentParser(description="Image classifier")
    parser.add_argument("--use_transform_arch", default=False, type=bool)
    parser.add_argument("--num_workers", default=0, type=int)
    parser.add_argument("--learning_rate", default=1e-3, type=float)
    parser.add_argument("--use_clip_norm", default=False, type=bool)
    parser.add_argument("--max_norm", default=1., type=float)
    parser.add_argument("--use_fgm_training", default=False, type=bool)
    parser.add_argument("--fgm_emb_str", default="conv1.", type=str)
    parser.add_argument("--gradient_accumulate_steps", default=1, type=int)
    parser.add_argument("--num_epochs", default=6, type=int)
    return parser

args = make_parser().parse_args([])

total_df = pd.read_csv("./ReversoContextClass/train.csv")
idx2label = sorted(list(set(total_df["label"])))
label2idx = {label:idx for idx, label in enumerate(idx2label)}
total_df["label_idx"] = total_df["label"].apply(lambda x: label2idx[x])

skf = StratifiedKFold(n_splits=5, shuffle=True)
x = total_df["image_path"].values
y = total_df["label_idx"].values

fold_dict = {}
for fold_idx, (train_idx, val_idx) in enumerate(skf.split(x, y)):
    fold_dict[fold_idx] = (train_idx, val_idx)

train_idx, val_idx = fold_dict[0]

train_df = total_df.iloc[train_idx]
val_df = total_df.iloc[val_idx]

class MyDataset(Dataset):
    def __init__(self, df, transform, parent_path="./ReversoContextClass/images/"):
        super().__init__()
        self.df = df
        self.transform = transform
        self.parent_path = parent_path
    
    def __len__(self):
        return len(self.df)
    
    def __getitem__(self, idx):
        row = self.df.iloc[idx]
        image_path = row["name"]
        label = row["label_idx"]
        
        data = Image.open(os.path.join(self.parent_path, image_path)).convert("RGB")
        data = self.transform(data)
        
        return data, torch.tensor(label, dtype=torch.long)

image_mean = [0.6604482, 0.64209795, 0.6145322]
image_std = [0.3236943, 0.32059464, 0.33661628]
imgsz = 224
train_transform = T.Compose([
    T.Resize((imgsz, imgsz)),
    T.ToTensor(),
    T.Normalize(mean=image_mean, std=image_std)
])
val_transform = T.Compose([
    T.Resize((imgsz, imgsz)),
    T.ToTensor(),
    T.Normalize(mean=image_mean, std=image_std)
])

train_ds = MyDataset(train_df, train_transform)
val_ds = MyDataset(val_df, val_transform)

model = resnet18(pretrained=False)
model.load_state_dict(torch.load("./semi_weakly_supervised_resnet18-118f1556.pth", map_location="cpu"), strict=False)
model.fc = nn.Linear(model.fc.in_features, len(idx2label))

from fireoil import Trainer
from sklearn.metrics import precision_recall_fscore_support, accuracy_score


class MyTrainer(Trainer):
    def compute_loss(self, model, batch, device, loss_fn=None, return_preds=False):
        x, y = batch
        x, y = x.to(device), y.to(device)
        y_hat = model(x)
        loss = loss_fn(y_hat, y)
        preds = y_hat
        
        return {"loss": loss, "preds": preds} if return_preds else {"loss":loss}

    def compute_metrics(self, preds, batch):
        x, y = batch        
        y_hat = preds.detach().cpu().argmax(dim=-1).numpy()
        y = y.detach().cpu().numpy()
        
        p, r, fb, _ = precision_recall_fscore_support(y, y_hat, average="macro")
        accuracy = accuracy_score(y, y_hat)
        
        metric = accuracy
        
        return {"metric": metric, "precision":p, "recall": r, "fbeta": fb} # 平均值


loss_fn = nn.CrossEntropyLoss()

trainer = MyTrainer(args, model, train_ds, val_ds, loss_fn)
trainer.train()