import os
os.environ['CUDA_VISIBLE_DEVICES'] = '2'
import pandas as pd
import torch
from sklearn.model_selection import train_test_split
from torch.utils.data import Dataset, DataLoader, TensorDataset
import numpy as np
import pandas as pd
import random
import re
from transformers import BertTokenizer
from sklearn.metrics import f1_score, confusion_matrix, accuracy_score,\
                        classification_report, precision_recall_fscore_support

data_text = pd.read_csv('../MMMTD_features/data_with_personality.csv')
data_text['tv'] = [int(s[:2]) for s in data_text['vvid']]

train_index = data_text['tv']<=26
valid_index = (data_text['tv']==32) | (data_text['tv']>=39)
test_index = ((data_text['tv']>=27) & (data_text['tv']<32)) | ((data_text['tv']>32) & (data_text['tv']<=37))

data_text_train = data_text[train_index]
data_text_valid = data_text[valid_index]
data_text_test = data_text[test_index]

x_train = data_text_train['说话内容'].tolist()
train_label = data_text_train['Emotion_base'].tolist()

ut_text_valid = data_text_valid['说话内容'].tolist()
ut_label_valid = data_text_valid['Emotion_base'].tolist()

x_test = data_text_test['说话内容'].tolist()
test_label = data_text_test['Emotion_base'].tolist()


# ut_label = data_text['Emotion'].tolist()

# max_len = 64
# for ut in ut_text:
#     if max_len<len(ut):
#         max_len = len(ut)
# print(max_len)


# stratify 按照标签进行采样，训练集和验证部分同分布
# x_train, x_test, train_label, test_label = train_test_split(ut_text, ut_label, test_size=0.2, stratify=ut_label)



tokenizer = BertTokenizer.from_pretrained('bert-base-chinese')
train_encoding = tokenizer(x_train, truncation=True, padding=True, max_length=120)
# max_length对文本做截断
test_encoding = tokenizer(x_test, truncation=True, padding=True, max_length=120)

# 数据集读取 转成dict形式
class NewsDataset(Dataset):
    def __init__(self, encodings, labels):
        self.encodings = encodings
        self.labels = labels
    
    # 读取单个样本
    def __getitem__(self, idx):
        item = {key: torch.tensor(val[idx]) for key, val in self.encodings.items()}
        item['labels'] = torch.tensor(int(self.labels[idx]))
        return item
    
    def __len__(self):
        return len(self.labels)

train_dataset = NewsDataset(train_encoding, train_label)
test_dataset = NewsDataset(test_encoding, test_label)

# print(train_dataset[1])
# # attention mask表示有字的或者填充的
# # input_idas 包括cls seq等的
# # token_types 表示第一句或第二句

# 精度计算
def flat_accuracy(preds, labels):
    pred_flat = np.argmax(preds, axis=1).flatten()
    labels_flat = labels.flatten()
    return np.sum(pred_flat == labels_flat) / len(labels_flat)

from transformers import BertForSequenceClassification, AdamW, get_linear_schedule_with_warmup
model = BertForSequenceClassification.from_pretrained('bert-base-chinese', num_labels=8)
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
model.to(device)

# 单个读取到批量读取
train_loader = DataLoader(train_dataset, batch_size=64, shuffle=True)
test_dataloader = DataLoader(test_dataset, batch_size=64, shuffle=True)

# 优化方法
optim = AdamW(model.parameters(), lr=2e-5)
total_steps = len(train_loader) * 1
scheduler = get_linear_schedule_with_warmup(optim, 
                      num_warmup_steps = 100, # Default value in run_glue.py
                      num_training_steps = total_steps)

# 训练函数
def train():
    model.train()
    total_train_loss = 0
    iter_num = 0
    total_iter = len(train_loader)
    for batch in train_loader:
        # 正向传播
        optim.zero_grad()
        input_ids = batch['input_ids'].to(device)
        attention_mask = batch['attention_mask'].to(device)
        labels = batch['labels'].to(device)
        outputs = model(input_ids, attention_mask=attention_mask, labels=labels)
        loss = outputs[0]
        total_train_loss += loss.item()
        
        # 反向梯度信息
        loss.backward()
        torch.nn.utils.clip_grad_norm_(model.parameters(), 1.0)
        
        # 参数更新
        optim.step()
        scheduler.step()

        iter_num += 1
        if(iter_num % 100==0):
            print("epoch: %d, iter_num: %d, loss: %.4f, %.2f%%" % (epoch, iter_num, loss.item(), iter_num/total_iter*100))
        
    print("Epoch: %d, Average training loss: %.4f"%(epoch, total_train_loss/len(train_loader)))
    
def validation():
    model.eval()
    labels_list = []
    pred_list = []
    total_eval_accuracy = 0
    total_eval_loss = 0
    for batch in test_dataloader:
        with torch.no_grad():
            # 正常传播
            input_ids = batch['input_ids'].to(device)
            attention_mask = batch['attention_mask'].to(device)
            labels = batch['labels'].to(device)
            outputs = model(input_ids, attention_mask=attention_mask, labels=labels)
        
        loss = outputs[0]
        logits = outputs[1]

        total_eval_loss += loss.item()
        logits = logits.detach().cpu().numpy()
        pred = np.argmax(logits, axis=1)
        pred_list.extend(pred)

        label_ids = labels.to('cpu').numpy()
        labels_list.extend(label_ids)
        total_eval_accuracy += flat_accuracy(logits, label_ids)
        
    avg_val_accuracy = total_eval_accuracy / len(test_dataloader)
    print("Accuracy: %.4f" % (avg_val_accuracy))
    print("Average testing loss: %.4f"%(total_eval_loss/len(test_dataloader)))
    # print(confusion_matrix(labels_list, logits_all))
    # print(labels_list[1])
    # print(pred_list[1])
    print(classification_report(labels_list, pred_list, digits=4))
    print("-------------------------------")
    

for epoch in range(5):
    print("------------Epoch: %d ----------------" % epoch)
    train()
    validation()
