from collections import Counter
import random
import torch
from torch import nn
import torch.nn.functional as F
from torch.utils.data import Dataset, DataLoader
from tqdm import tqdm
import jieba

PAD = "@@PAD@@"
UNK = "@@UNK@@"

MAX_SEQ_LEN = 20  # -1 for no truncation
UNK_THRESHOLD = 5
BATCH_SIZE = 128
LEARNING_RATE = 1e-3
EMBEDDING_DIM = 128
HIDDEN_DIM = 256
N_RNN_LAYERS = 2

def seed_everything(seed=1):
    random.seed(seed)
    torch.manual_seed(seed)
    torch.cuda.manual_seed_all(seed)
    torch.backends.cudnn.deterministic = True

def split_data(train_data, num_split=2000):
    random.shuffle(train_data)
    return train_data[:-num_split], train_data[-num_split:]

import pandas as pd
train_file = r"D:\DeskS\NLP\cn_nlp\data\train.txt"
df = pd.read_csv(train_file, sep="\t", header=None, names=["id", "category", "sentence"])

rows = []
for index, row in df[['id', 'category', 'sentence']].iterrows():
    rows.append({
            'sentence': row['sentence'],
            'label': row['category'],
            })

# print(rows[0])


def tokenize(data, max_seq_len=MAX_SEQ_LEN):
    for exampl in data:
        exampl["text"] = [word for word in jieba.cut(exampl["sentence"])][:max_seq_len]

def create_vocab(data, unk_threshold=UNK_THRESHOLD):
    counter = Counter(token for example in data for token in example["text"])
    vocab = {token for token in counter if counter[token] > unk_threshold}
    print(f"Vocab size: {len(vocab) + 2}")  # add the special tokens
    print(f"Most common token: {counter.most_common(10)}")
    token_to_idx = {PAD: 0, UNK: 1}
    for token in vocab:
        token_to_idx[token] = len(token_to_idx)
    return token_to_idx


def apply_vocab(data, token_to_idx):
    for example in data:
        example["text"] = [token_to_idx.get(token, token_to_idx[UNK]) for token in example["text"]]


def apply_label_map(data, label_to_idx):
    for example in data:
        example["label"] = label_to_idx[example["label"]]


# print(tokenize(rows))
# token_to_idx = create_vocab(rows)
# print(token_to_idx )
# print(rows[0])

class SentimentDataset(Dataset):
    def __init__(self, data, pad_idx):
        data = sorted(data, key=lambda example: len(example["text"]))
        self.texts = [example["text"] for example in data]
        self.labels = [example["label"] for example in data]
        self.pad_idx = pad_idx

    def __getitem__(self, index):
        return [self.texts[index],self.labels[index]]

    def __len__(self):
        return len(self.texts)

    def collate_fn(self, batch):
        def tensorize(elements, dtype):
             return [torch.tensor(element,dtype=dtype) for element in elements]
        def pad(tensors):
            """Assumes 1-d tensors."""
            max_len = max(len(tensor) for tensor in tensors)
            padded_tensors = [F.pad(tensor,(0,max_len - len(tensor)),value=self.pad_idx) for tensor in tensors]
            return padded_tensors
        texts,labels = zip(*batch)
        return [torch.stack(pad(tensorize(texts,torch.long)),dim=0),torch.stack(tensorize(labels,torch.long),dim=0),]


class SequenceClassifier(nn.Module):
    def __init__(self, vocab_size, embedding_dim, hidden_dim, n_labels, n_rnn_layers, pad_idx):
        super().__init__()

        self.pad_idx = pad_idx

        self.embedding = nn.Embedding(vocab_size, embedding_dim)
        self.rnn = nn.GRU(
            embedding_dim, hidden_dim, num_layers=n_rnn_layers, batch_first=True, bidirectional=True
        )
        # We take the final hidden state at all GRU layers as the sequence representation.
        # 2 because bidirectional.
        layered_hidden_dim = hidden_dim * n_rnn_layers * 2
        self.output = nn.Linear(layered_hidden_dim, n_labels)

    def forward(self, text):
        non_padded_positions = text != self.pad_idx
        lens = non_padded_positions.sum(dim=1)
        embedded = self.embedding(text)


def count_parameters(model):
    return sum(p.numel() for p in model.parameters() if p.requires_grad)

def train(model, dataloader, optimizer, device):
    for texts, labels in tqdm(dataloader):
        texts, labels = texts.to(device), labels.to(device)
        output = model(texts)
        loss = F.cross_entropy(output,labels)
        model.zero_grad()
        loss.backward()
        optimizer.step()

def evaluate(model, dataloader, device):
    count = correct = 0.0
    model.eval()
    with torch.no_grad():
        for texts,labels in tqdm(dataloader):
            # shape: (batch_size,n_labels)
            output = model(texts)
            predicted = output.argmax(dim=1)
            count += len(predicted)
            correct += (predicted == labels).sum().item()
    print(f"Accuracy:{correct / count}")


import pandas as pd

train_file = r"D:\DeskS\NLP\cn_nlp\data\train.txt"
df = pd.read_csv(train_file, sep="\t", header=None, names=["id", "category", "sentence"])

rows = []
for index, row in df[['id', 'category', 'sentence']].iterrows():
    rows.append({
        'sentence': row['sentence'],
        'label': row['category'],
    })

train_data, test_data = split_data(rows, int(0.1 * len(rows)))
train_data, dev_data = split_data(train_data, int(0.1 * len(train_data)))

for data in (train_data, dev_data, test_data):
    tokenize(data)

token_to_idx = create_vocab(train_data)

label_to_idx = {
    "Addictive Behavior": 0,
    "Age": 1,
    "Allergy Intolerance": 2,
    "Compliance with Protocol": 3,
    "Consent": 4,
    "Diagnostic": 5,
    "Disease": 6,
    "Enrollment in other studies": 7,
    "Laboratory Examinations": 8,
    "Life Expectancy": 9,
    "Organ or Tissue Status": 10,
    "Pharmaceutical Substance or Drug": 11,
    "Risk Assessment": 12,
    "Smoking Status": 13,
    "Therapy or Surgery": 14,
}
for data in (train_data, dev_data, test_data):
    apply_vocab(data, token_to_idx)
    apply_label_map(data, label_to_idx)

pad_idx = token_to_idx[PAD]
train_dataset = SentimentDataset(train_data, pad_idx)
dev_dataset = SentimentDataset(dev_data, pad_idx)
test_dataset = SentimentDataset(test_data, pad_idx)

train_dataloader = DataLoader(
    train_dataset, batch_size=BATCH_SIZE, shuffle=True, collate_fn=train_dataset.collate_fn
)
dev_dataloader = DataLoader(
    dev_dataset, batch_size=BATCH_SIZE, collate_fn=dev_dataset.collate_fn
)
test_dataloader = DataLoader(
    test_dataset, batch_size=BATCH_SIZE, collate_fn=test_dataset.collate_fn
)

model = SequenceClassifier(
    len(token_to_idx), EMBEDDING_DIM, HIDDEN_DIM, len(label_to_idx), N_RNN_LAYERS, pad_idx
)
print(f"Model has {count_parameters(model)} parameters.")

optimizer = torch.optim.Adam(model.parameters(), lr=LEARNING_RATE)
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
model.to(device)


N_EPOCHS = 20
print(f"Random baseline")
print("1")
evaluate(model, dev_dataloader, device)
print("2")
for epoch in range(N_EPOCHS):
    print("kkkk")
    print(f"Epoch {epoch + 1}")  # 0-based -> 1-based
    train(model, train_dataloader, optimizer, device)
    evaluate(model, dev_dataloader, device)
print(f"Test set performance")
evaluate(model, test_dataloader, device)
