import torch
from torch import nn
from torch.utils.data import Dataset, DataLoader
import numpy as np
from tqdm import tqdm
import torch.nn as nn
import torch.optim as optim
import os

from loader import load_flow
from Models import Transformer, Encoder, Decoder


SEED = 156
torch.manual_seed(SEED)
np.random.seed(SEED)

class flowDataset(Dataset):
    def __init__(self, dataset_fname, load_time=0, load_length=1, load_direction=1, no_neg=True, truncate_num=100):
        self.trainset = []
        self.load_time = load_time
        self.load_length = load_length
        self.load_direction = load_direction
        self.no_neg = no_neg
        self.truncate_num = truncate_num
        self.max_label = 0
        with open(dataset_fname, 'r') as f:
            for line in f:
                site, fname_list = line.split(": ")
                site = int(site[4:]) - 1
                if site > self.max_label:
                    self.max_label = site
                fname_list = fname_list.strip()[2:-2].split("', '")
                for fname in fname_list:
                    self.trainset.append((site, fname))
        # np.random.shuffle(self.trainset)
    
    def __getitem__(self, index):
        site, fname = self.trainset[index]
        flow = load_flow(fname, load_time=self.load_time, load_length=self.load_length, load_direction=self.load_direction, no_neg=self.no_neg)
        flow = np.array(flow)
        if len(flow) > self.truncate_num:
            flow = flow[:self.truncate_num]
        elif len(flow) < self.truncate_num:
            flow = np.pad(flow, ((0, self.truncate_num - flow.shape[0])), 'constant')
        flow = flow.astype(np.float32)
        flow = torch.from_numpy(flow)
        return site, flow, fname
    
    # def __iter__(self):
    #     for site, fname in self.trainset:
    #         flow = load_flow(fname, load_time=self.load_time, load_length=self.load_length, load_direction=self.load_direction)
    #         flow = np.array(flow)
    #         if len(flow) > self.truncate_num:
    #             flow = flow[:self.truncate_num]
    #         elif len(flow) < self.truncate_num:
    #             flow = np.pad(flow, ((0, self.truncate_num - flow.shape[0])), 'constant')
    #         flow = flow.astype(np.float32)
    #         flow = torch.from_numpy(flow)
    #         yield (site, flow, fname)
    
    def __len__(self):
        return len(self.trainset)


print("building dateset...")
truncate_num = 1000
train_dataset = flowDataset('../dataset_CW/train.fname', load_time=0, load_length=1, load_direction=1, no_neg=True, truncate_num=truncate_num)
test_dataset = flowDataset('../dataset_CW/test.fname', load_time=0, load_length=1, load_direction=1, no_neg=True, truncate_num=truncate_num)

batch_size = 16
print("batch size:", batch_size)
train_dataloader = DataLoader(train_dataset, batch_size=batch_size, shuffle=True)
test_dataloader = DataLoader(test_dataset, batch_size=batch_size, shuffle=True)


# print("loading training data...")
# for labels, flows, fnames in tqdm(train_dataloader):
#     print(labels)
#     print(type(flows))
#     print(flows.shape[0])
#     print(flows[0])
#     print(fnames[0])
#     break



class myModel(nn.Module):
    def __init__(self, vocab_size, src, d_model, N, heads, dropout, n_classes):
        super().__init__()
        self.encoder = Encoder(vocab_size, d_model, N, heads, dropout)
        self.linear1 = nn.Linear(d_model, 1)
        self.linear2 = nn.Linear(src, n_classes)
        # self.fc = nn.Sigmoid()
    
    def forward(self, x):
        x = self.encoder(x, None)
        x = self.linear1(x)
        x = x.squeeze(-1)
        x = self.linear2(x)
        # x = self.fc(x)
        return x


print("cuda available:", torch.cuda.is_available())

model = myModel(vocab_size=2921, src=truncate_num, d_model=4, N=1, heads=1, dropout=0.1, n_classes=95)

train_model = True
resume_train = False

evaluate_model = True

'''
    Train
'''
if train_model:
    learning_rate = 5e-2
    num_epoches = 100
    if resume_train:
        print("resume from model.pth")
        model.load_state_dict(torch.load('model.pth'))
    criterion = nn.CrossEntropyLoss()
    optimizer = optim.SGD(model.parameters(), lr=learning_rate)
    model.cuda()
    best_running_acc = 0
    for epoch in range(num_epoches):
        '''
        Train
        '''
        model.train()
        running_loss = 0.0
        running_acc = 0.0
        trained_num = 0
        pbar = tqdm(enumerate(train_dataloader), total=len(train_dataloader))
        pbar.set_description(f'epoch{epoch + 1}')
        for i, (label, x, _) in pbar:
            x = x.long()
            x = x.cuda()
            
            label = label.cuda()
            
            out = model(x)
            
            loss = criterion(out, label)
            
            running_loss += loss.item() * label.size(0)
            pred = torch.argmax(out, dim=-1)
            accuracy = (pred == label).float().mean()
            num_correct = (pred == label).sum()
            running_acc += num_correct.item()
            
            trained_num += label.size(0)
            pbar.set_postfix({
                # 'b_acc': f'{accuracy:.2f}',
                'r_acc': f'{running_acc / trained_num:.2f}',
                'r_loss': f'{running_loss / trained_num:.2f}',
            })
            
            # Back Propagation
            optimizer.zero_grad()
            loss.backward()
            optimizer.step()
        if running_acc > best_running_acc:
            best_running_acc = running_acc
            torch.save(model.state_dict(), f'model.pth')
        '''
            Validate
        '''
        with torch.no_grad():
            model.eval()
            num_correct = 0
            valid_num = 0

            for label, x, fname in (test_dataloader):
                x = x.long()
                x = x.cuda()
                
                label = label.cuda()
                
                out = model(x)
                pred = torch.argmax(out, dim=-1)
                num_correct += (pred == label).sum().item()
                valid_num += label.size(0)
                # for i in range(label.size(0)):
                #     print(label[i].item(), pred[i].item(), fname[i])

            print(f"epoch{epoch + 1} acc: {num_correct}/{valid_num}, {num_correct / valid_num:.3f}")
    
    # save model
    torch.save(model.state_dict(), 'model.pth')

'''
    Evaluate
'''
if evaluate_model:
    if not train_model:
        print("load model from model.pth")
        model.load_state_dict(torch.load('model.pth'))
    model.eval()
    model.cuda()

    num_correct = 0
    test_num = 0
    fout = open('evaluate.result', 'w')
    for label, x, fname in tqdm(test_dataloader):
        x = x.long()
        x = x.cuda()
        
        label = label.cuda()
        
        out = model(x)
        pred = torch.argmax(out, dim=-1)
        num_correct += (pred == label).sum().item()
        test_num += label.size(0)
        for i in range(label.size(0)):
            fout.write(f"{label[i].item()}, {pred[i].item()}, {fname[i]}\n")

    print(f"acc: {num_correct}/{test_num}, {num_correct / test_num:.2f}")
    fout.write(f"acc: {num_correct}/{test_num}, {num_correct / test_num:.2f}\n")
    fout.close()