import os
from cProfile import label
from pyexpat import features

import numpy as np
from numpy import dtype
from sympy import expand_mul
from torch.utils.data import Dataset, DataLoader
import cv2
from transformers import BertTokenizer, BertModel
import torchvision.transforms as transforms
import torch
import torch.nn as nn
import torchvision
from sklearn.metrics import f1_score

batch_size = 32
learning_rate = 1e-4
max_epochs = 2
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")

tokenizer = BertTokenizer.from_pretrained('bert-base-uncased', cache_dir="./cache")
transform = transforms.Compose([
    transforms.ToTensor(),
    transforms.Resize((224, 224)),
    transforms.Normalize(mean=[0.485, 0.456, 0.406],  # 应用 Imagenet 的均值
                         std=[0.229, 0.224, 0.225])
])

data_root = "data/MVSA_Single"

def read_data():
    examples = []
    with open(os.path.join(data_root, "labelResultAll.txt"), "r", encoding="utf-8") as f:
        lines = f.readlines()
        # i = 0
        for line in lines[1:]:
            # if i == 0:
            #     continue

            line = line.strip("\n")

            id, labels = line.split("\t")
            text_label, image_label = labels.split(",")
            if text_label == image_label:
                data = {
                    'id': id,
                    'label': text_label
                }
                examples.append(data)
            # i += 1
    return examples

def split_data(examples, train_ratio=0.8):
    index = np.arange(len(examples))
    np.random.shuffle(index)

    examples = np.array(examples)[index]
    train_num = int(len(examples) * train_ratio)
    train_examples = examples[:train_num]
    valid_examples = examples[train_num:]

    return train_examples, valid_examples

def create_label_dict(examples):
    label_dict = {} # {label_str: label_id}
    for example in examples:
        label = example['label']
        if not label_dict.__contains__(label):
            label_dict[label] = len(label_dict)

    return label_dict

class MVSA_Dataset(Dataset):
    def __init__(self, examples, transform, label_dict):
        super(MVSA_Dataset, self).__init__()
        self.examples = examples
        self.transform = transform
        self.label_dict = label_dict

    def __getitem__(self, index):
        example = self.examples[index]
        image_path = os.path.join(data_root, 'data', example['id'] + ".jpg")
        text_path = os.path.join(data_root, 'data', example['id'] + ".txt")
        img = cv2.imread(image_path, cv2.IMREAD_UNCHANGED)
        img_tensor = self.transform(img)

        with open(text_path, "r", encoding='utf-8') as f:
            lines = f.readlines()
            text = lines[0].strip("\n")

        label = example["label"]
        label_id = self.label_dict[label]

        # img_tensor: tensor, text: str, label_id: int
        return img_tensor, text, label_id

    def __len__(self):
        return len(self.examples)

def my_collate_fn(batch):
    # batch: [(img_tensor, text, label_id), (), (), ...]
    img_list = []
    text_list = []
    label_list = []
    for item in batch:
        img_list.append(item[0])
        text_list.append(item[1])
        label_list.append(item[2])

    img_tensor = torch.stack(img_list, dim=0) # [B, C, H, W]
    text_tensor = tokenizer.batch_encode_plus(
        batch_text_or_text_pairs=text_list,
        truncation=True,
        padding='max_length',
        max_length=512,
        return_tensors='pt',
    )

    label_tensor = torch.tensor(label_list, dtype=torch.long)

    return img_tensor, text_tensor, label_tensor

def test_dataset(examples, label_dict):
    dataset = MVSA_Dataset(examples, transform, label_dict)
    dataloader = DataLoader(
        dataset,
        batch_size=4,
        shuffle=False,
        collate_fn=my_collate_fn
    )
    for step, batch in enumerate(dataloader):
        break

class MVSASentimentAnalysisModel(nn.Module):
    def __init__(self):
        resnet = torchvision.models.resnet50(pretrained=True)
        # nn.Sequential(layer1, layer2, layer3, ...)
        self.image_feature_extractor = nn.Sequential(*resnet.children())[:-1]
        self.text_feature_extractor = BertModel.from_pretrained('bert-base-uncased', cache_dir='./cache')
        for p in self.image_feature_extractor:
            p.requires_grad_ = False

        for p in self.text_feature_extractor:
            p.requires_grad_ = False

        self.fc1 = nn.Linear(768 + 2048, 512)
        self.fc2 = nn.Linear(512, 64)
        self.fc3 = nn.Linear(64, 3)

    def forward(self, img_tensor, text_tensor):
        # img_tensor: [B, C, H, W]
        # text_tensor: input_ids: [B, 512], attention_mask: [B, 512], token_type_ids: [B, 512]
        img_feature = self.image_feature_extractor(img_tensor) # [B, 2048, 1, 1]
        img_feature = torch.squeeze(img_feature) # [B, 2048]
        bert_output = self.text_feature_extractor(
            text_tensor['input_ids'],
            text_tensor['attention_mask'],
            text_tensor['token_type_ids']
        )
        text_feature = bert_output['pooler_output'] # [B, 768]

        feature = torch.cat([img_feature, text_feature], dim=1) # [B, 2048+768]
        feature = self.fc1(feature)
        feature = nn.ReLU(feature)
        feature = self.fc2(feature)
        feature = self.fc3(feature) # [B, 3]

        return feature

def train(dataloader, model):
    criterion = torch.nn.CrossEntropyLoss()
    optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate)

    for epoch in range(max_epochs):
        for step, batch in enumerate(dataloader):
            imgs, texts, labels = batch
            imgs = imgs.to(device)
            texts = texts.to(device)
            labels = labels.to(device)

            pred_logist = model(imgs, texts) # [B, 3]
            loss = criterion(pred_logist, labels)

            optimizer.zero_grad()
            loss.backward()
            optimizer.step()

            if step % 10 == 0:
                print("Epoch: {:d}, Step: {:d}, Loss: {:.4f}".format(epoch, step, loss.item()))

def valid(dataloader, model):
    golden_labels = []
    predicted_labels = []

    for step, batch in enumerate(dataloader):
        imgs, texts, labels = batch
        imgs = imgs.to(device)
        texts = texts.to(device)
        labels = labels.to(device)

        pred_logits = model(imgs, texts)
        _, pred_idx = torch.max(pred_logits, dim=1)

        golden_labels.extend(labels.cpu().numpy().tolist())
        predicted_labels.extend(pred_idx.cpu().numpy().tolist())

    f1 = f1_score(golden_labels, predicted_labels, average="macro")
    return f1

if __name__ == '__main__':
    examples = read_data()
    label_dict = create_label_dict(examples)
    # test_dataset(examples, label_dict)
    train_examples, valid_examples = split_data(examples)
    print('finish')
