from PIL import Image
import requests
import torch
from torch import nn
from torch.optim import Adam
import pandas as pd
import torchvision
from torchvision import transforms
from data import FakeNewsDataset
from torch.utils.data import Dataset, DataLoader
from sklearn.metrics import accuracy_score, confusion_matrix
from types import MethodType

from model import CLIP
from tqdm import tqdm
from typing import Any, Optional, Tuple, Union
from ignite.metrics import Accuracy,Precision,Recall
import os
from tqdm import tqdm
import os
os.environ["TOKENIZERS_PARALLELISM"] = "false"

"""
相关参数
"""
epochs = 0
MAX_LEN = 500
root_dir = "/root/autodl-tmp/fakeddit/"


def train():
    url = "http://images.cocodataset.org/val2017/000000039769.jpg"
    image = Image.open(requests.get(url, stream=True).raw)

    df_train = pd.read_csv("/root/autodl-tmp/fakeddit/train.tsv", sep='\t', header=0, index_col='index')
    df_test = pd.read_csv("/root/autodl-tmp/fakeddit/test.tsv", sep='\t', header=0, index_col='index')
    image_transform = torchvision.transforms.Compose(
        [
            torchvision.transforms.Resize(size=(224, 224)),
            torchvision.transforms.ToTensor(),
            #transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
        ]
    )

    transformed_dataset_train = FakeNewsDataset(df_train, root_dir, image_transform,100)

    transformed_dataset_val = FakeNewsDataset(df_test, root_dir, image_transform)

    train_dataloader = DataLoader(transformed_dataset_train, batch_size=32,
                            shuffle=True, num_workers=4)

    val_dataloader = DataLoader(transformed_dataset_val, batch_size=64,
                            shuffle=True, num_workers=0)
    criterion = nn.CrossEntropyLoss()
    

    model = CLIP()
    if os.path.exists("best.pt"):
        checkpoint = torch.load("best.pt")
        model=(checkpoint['model'])
        print("load model success")
    
    model = model.cuda()
    model.train()
    optimizer = Adam(model.parameters(), lr=5e-5)
    total_acc_val = 0

    

    for epoch_i in tqdm(range(epochs)):
        model.train()
        for step,batch in enumerate(train_dataloader):
            labels = batch["label"].cuda()
            logits = model(batch)
            batch_loss = criterion(logits, labels)
            
            acc = (logits.argmax(dim=1) == labels).sum().item()
            total_acc_val += acc
            
            model.zero_grad()
            batch_loss.backward()
            optimizer.step()
        test(model, val_dataloader)
    torch.save({"model":model},"best.pt")

    test(model, val_dataloader)

def test(model,val_dataloader):
    model.eval()
    model.eval()
    m_precision = Precision()
    m_recall = Recall()
    m_accuracy = Accuracy()
    total_acc_val = 0

    for step,batch in enumerate(tqdm(val_dataloader)):
        labels = batch["label"].cuda()

        with torch.no_grad():
            logits = model(batch)

        acc = (logits.argmax(dim=1) == labels).sum().item()
        total_acc_val += acc

        ret = confusion_matrix(labels.cpu().numpy().tolist(),logits.argmax(dim=1).cpu().numpy().tolist()).ravel()

        #计算混淆矩阵
        m_precision.update((logits.argmax(dim=1), labels))
        m_recall.update((logits.argmax(dim=1), labels))
        m_accuracy.update((logits.argmax(dim=1), labels))

    #计算混淆矩阵
    print("m_precision",m_precision.compute())
    print("m_recall",m_recall.compute())
    print("m_accuracy",m_accuracy.compute())
    print("acc:",total_acc_val)
    print(ret)

if __name__=="__main__":
    train()
