import os
import torch
import torch.nn.functional as F
from scipy.stats import spearmanr
from model import SimCSE
from torch.utils.data import Dataset, DataLoader
from transformers import BertTokenizer
from utils.log import MyLog
from tqdm import tqdm
import time

Bert_path = '/data0/jianyu10/PTM/huggingface_model_cache/chinese-roberta-wwm-ext'
# Bert_path = 'D:/PTM/chinse-roberta-wwm-ext'

tokenizer = BertTokenizer.from_pretrained("hfl/chinese-roberta-wwm-ext", cache_dir=Bert_path)
# DEVICE = torch.device('cpu')
# tokenizer.convert_ids_to_tokens()
DEVICE = torch.device('cpu' if torch.cuda.is_available() else 'cuda')


class CDataset(Dataset):
    def __init__(self, path, sep='\t'):
        super(CDataset, self).__init__()
        self.data = []
        self.load(path, sep)

    def load(self, path, sep):
        start = time.time()
        print(f'loadding data from {path}')
        with open(path, 'r', encoding='utf-8') as f:
            for i in tqdm(f):
                line = i.strip().split(sep)
                sample = {k: v for k, v in zip(['sen1', 'sen2', 'label'], line[-3:])}
                if sample: self.data.append(sample)
        end = time.time()
        print(f'load finished, cost:{int(end - start)} s')

    def __len__(self):
        return len(self.data)

    def __getitem__(self, item):
        line = self.data[item]
        sen1 = tokenizer.encode_plus(line.get('sen1')).get('input_ids')
        sen2 = tokenizer.encode_plus(line.get('sen2')).get('input_ids')
        sen_len = max([len(sen1), len(sen2)])
        sen1 = sen1 + [tokenizer.pad_token_id] * (sen_len - len(sen1))
        sen2 = sen2 + [tokenizer.pad_token_id] * (sen_len - len(sen2))
        return {'input_ids': torch.tensor([sen1, sen2], device=DEVICE),
                'label': int(line.get('label'))}


class Valid:
    def __init__(self, config):
        self.config = config
        self.log = MyLog(config.log_dir, __file__, pre='valid_result').getlog()
        self.model = SimCSE(self.config).to(self.config.device)
        self.model.to(DEVICE)
        # path = config.save_path + '/new_1_0.06797.pt'
        path = self.config.save_path + '/checkpoint.pth.tar'
        chechpoint = torch.load(path)
        self.model.load_state_dict(chechpoint['state_dict'])
        # self.model.load_state_dict(torch.load(path))
        self.log.info(f"model load from {path}")
        self.model.eval()
        self.datafiles = {'ATEC': '\t', 'BQ_corpus': ',', 'LCQMC': '\t', 'STS-B': '||'}
        self.dataloader = self.dataLoad()

    def dataLoad(self):
        result = {}
        for file, sep in self.datafiles.items():
            opath = f'valid_data/{file}/'
            path = opath + [i for i in os.listdir(opath) if 'dev' in i][-1]
            data = CDataset(path, sep)
            dataloder = DataLoader(data, shuffle=True)
            result[file] = dataloder
        return result

    def valid(self):
        for k, v in self.dataloader.items():
            sim_tensor = []
            label_array = []
            with torch.no_grad():
                for idx, inputs in tqdm(enumerate(v, start=1), total=len(v), desc=f'{k}:'):
                    input_ids = inputs.get('input_ids')
                    label = inputs.get('label')
                    outputs = self.model(input_ids.view(2, -1))
                    sim = F.cosine_similarity(outputs[0], outputs[1], dim=-1)
                    sim_tensor.append(sim.item())
                    label_array.append(int(label))
            spearman_socre = spearmanr(label_array, sim_tensor).correlation
            self.log.info(f"{k}-- Spearman's ρ: {spearman_socre:.3f}")


# class testdata(Dataset):
#     def __init__(self, path, sep='\t'):
#         super(testdata, self).__init__()
#         self.data = []
#         self.load()
#
#     def load(self):
#         data = open('data/ldevdata.csv', 'r', encoding='utf-8').readlines()
#         for i in tqdm(data):
#             line = i.strip.split(',')
#             a = 'idx,classes,ori,pos,neg,online_sim,online_dis,sim,dis,label'
#             self.data.append({'idx': line[0], 'ori': line[2], 'pos': line[3], 'neg': line[4]})
#
#     def __len__(self):
#         return len(self.data)
#
#     def __getitem__(self, item):
#         line = self.data[item]
#         sen1 = tokenizer.encode_plus(line.get('ori')).get('input_ids')
#         line['ori'] = sen1
#         sen2 = tokenizer.encode_plus(line.get('pos')).get('input_ids')
#         line['pos'] = sen2
#         sen3 = tokenizer.encode_plus(line.get('neg')).get('input_ids')
#         line['neg'] = sen3
#         return line
#
#
class Valid1:
    def __init__(self, config):
        self.config = config
        self.log = MyLog(config.log_dir, __file__, pre='valid_result').getlog()
        self.model = SimCSE(self.config).to(self.config.device)
        self.model.to(DEVICE)
        # path = config.save_path + '/new_1_0.06797.pt'
        path = self.config.save_path + '/checkpoint.pth.tar'
        chechpoint = torch.load(path)
        self.model.load_state_dict(chechpoint['state_dict'])
        # self.model.load_state_dict(torch.load(path))
        self.log.info(f"model load from {path}")
        self.model.eval()

    def valid(self):
        respath = open('data/resdata.txt', 'a+', encoding='utf-8')
        data = open('data/ldevdata.csv', 'r', encoding='utf-8').readlines()
        with torch.no_grad():
            for i in tqdm(data):
                line = i.strip().split(',')
                resline = line[:8]
                ori = line[2]
                pos = line[3]
                neg = line[4]
                oriidx = tokenizer.encode_plus(ori, return_tensors='pt').get('input_ids')
                posidx = tokenizer.encode_plus(pos, return_tensors='pt').get('input_ids')
                negidx = tokenizer.encode_plus(neg, return_tensors='pt').get('input_ids')
                oriten = self.model(oriidx)
                posten = self.model(posidx)
                negten = self.model(negidx)
                sim = F.cosine_similarity(oriten, posten, dim=-1)
                unsim = F.cosine_similarity(oriten, negten, dim=-1)
                print(f'{ori}---{pos}---{sim.item()}')
                print(f'{ori}---{neg}---{unsim.item()}')
                respath.write(','.join(resline) + str(round(sim.item(), 6)) + ',' + str(round(unsim.item(), 6)) + '\n')


if __name__ == '__main__':
    from config import Config

    config = Config()
    da = Valid1(config)
    da.valid()
