# encoding=utf-8
import os

import torch
import torch.nn.functional as F
import torch.nn as nn
from scipy.stats import spearmanr
from model import PromptBert
from torch.utils.data import Dataset, DataLoader
from transformers import BertTokenizer
from utils.log import MyLog
from tqdm import tqdm
import time
import os

# os.environ["CUDA_VISIBLE_DEVICES"] = "3"
torch.cuda.set_device(3)
Bert_path = '/data0/jianyu10/PTM/huggingface_model_cache/chinese-roberta-wwm-ext'
# Bert_path = 'D:/PTM/chinse-roberta-wwm-ext'

tokenizer = BertTokenizer.from_pretrained("hfl/chinese-roberta-wwm-ext", cache_dir=Bert_path)
# DEVICE = torch.device('cpu')
# tokenizer.convert_ids_to_tokens()
DEVICE = torch.device('cpu' if torch.cuda.is_available() else 'cuda')


class Valid1:
    def __init__(self, config):
        self.config = config
        self.log = MyLog(config.log_dir, __file__, pre='valid_result').getlog()
        self.model = PromptBert(self.config)
        # self.model = nn.DataParallel(self.model, device_ids='3')
        self.model.to(DEVICE)
        # path = config.save_path + '/new_1_0.06797.pt'
        path = 'UseModel/train_loss_best.pth.tar'
        chechpoint = torch.load(path)
        # chechpoint = torch.load(path,map_location={'cuda:2':'cuda:0','cuda:3':'cuda:1'})
        self.model.load_state_dict(chechpoint['state_dict'])
        # self.model.load_state_dict(torch.load(path))
        self.log.info(f"model load from {path}")
        self.model.eval()

    def gettmp(self, title):
        temp1 = '{}，它的意思是[MASK]。'
        pad = '[PAD]' * len(title)
        return temp1.format(title),temp1.format(pad)

    def valid(self):
        respath = open('data/title_pairs.txt', 'a+', encoding='utf-8')
        data = open('data/title_pairs_labeled.txt', 'r', encoding='utf-8').readlines()
        with torch.no_grad():
            for i in tqdm(data):
                resline = i.strip().split(',')
                # resline = line[:7]
                ori = resline[2]
                pos = resline[3]
                # neg = line[4]
                ori, oritmp = self.gettmp(ori)
                pos, postmp = self.gettmp(pos)
                # neg, negtmp = self.gettmp(neg)
                oriidx = tokenizer.encode_plus(ori, return_tensors='pt').get('input_ids')
                posidx = tokenizer.encode_plus(pos, return_tensors='pt').get('input_ids')
                # negidx = tokenizer.encode_plus(neg, return_tensors='pt').get('input_ids')
                oritmpidx = tokenizer.encode_plus(oritmp, return_tensors='pt').get('input_ids')
                postmpidx = tokenizer.encode_plus(postmp, return_tensors='pt').get('input_ids')
                # negtmpidx = tokenizer.encode_plus(negtmp, return_tensors='pt').get('input_ids')
                oriten = self.model(oriidx, oritmpidx)
                posten = self.model(posidx, postmpidx)
                # negten = self.model(negidx, negtmpidx)
                sim = F.cosine_similarity(oriten, posten, dim=-1)
                # unsim = F.cosine_similarity(oriten, negten, dim=-1)
                print(f'{ori}---{pos}---{sim.item()}')
                # print(f'{ori}---{neg}---{unsim.item()}')
                respath.write(
                    # ','.join(resline) + ',' + str(round(sim.item(), 6)) + ',' + str(round(unsim.item(), 6)) + '\n')
                    ','.join(resline) + ',' + str(round(sim.item(), 6)) + '\n')

    def test(self, title, title1):
        with torch.no_grad():
            ori, oritmp = self.gettmp(title)
            pos, postmp = self.gettmp(title1)
            oriidx = tokenizer.encode_plus(ori, return_tensors='pt').get('input_ids')
            posidx = tokenizer.encode_plus(pos, return_tensors='pt').get('input_ids')
            oritmpidx = tokenizer.encode_plus(oritmp, return_tensors='pt').get('input_ids')
            postmpidx = tokenizer.encode_plus(postmp, return_tensors='pt').get('input_ids')
            oriten = self.model(oriidx, oritmpidx)
            posten = self.model(posidx, postmpidx)
            sim = F.cosine_similarity(oriten, posten, dim=-1)
            print(f'{ori}---{pos}---{sim.item()}')
            return sim.item()

    def valid1(self):
        respath = open('testdata/badcase_res.txt', 'a+', encoding='utf-8')
        data = open('testdata/badcase1.txt', 'r', encoding='utf-8').readlines()
        with torch.no_grad():
            for i in tqdm(data):
                resline = i.strip().split('\t')
                ori = resline[0]
                pos = resline[1]
                label = resline[-1]
                ori, oritmp = self.gettmp(ori)
                pos, postmp = self.gettmp(pos)
                oriidx = tokenizer.encode_plus(ori, return_tensors='pt').get('input_ids').to(DEVICE)
                posidx = tokenizer.encode_plus(pos, return_tensors='pt').get('input_ids').to(DEVICE)
                oritmpidx = tokenizer.encode_plus(oritmp, return_tensors='pt').get('input_ids').to(DEVICE)
                postmpidx = tokenizer.encode_plus(postmp, return_tensors='pt').get('input_ids').to(DEVICE)
                oriten = self.model(oriidx, oritmpidx)
                posten = self.model(posidx, postmpidx)
                sim = F.cosine_similarity(oriten, posten, dim=-1)
                print(f'{ori}---{pos}---{sim.item()}---{label}')
                respath.write(
                    '\t'.join(resline) + '\t' + str(round(sim.item(), 6)) + '\n')


if __name__ == '__main__':
    from config import Config

    config = Config()
    da = Valid1(config)
    da.valid1()
    # data = open('badcase.txt', 'r', encoding='utf-8').readlines()
    # resd = open('badcaseresd.txt', 'a+', encoding='utf-8')
    # for i, line in enumerate(data):
    #    if i < len(data) - 1 and data[i].strip() and data[i + 1].strip():
    #        score = da.test(data[i].strip(), data[i + 1].strip())
    #        resd.write(','.join([data[i].strip(), data[i + 1].strip(), str(score)])+'\n')

    # da.test('24岁羽毛球世界冠军宣布退役！心率只有37，已是名校教师 心率仅37!', '24岁国羽世界冠军被迫退役,巴黎奥运又少一王牌')
    # da.test('张常宁晒一家13口全家福！父亲哥哥丈夫皆帅气，与男篮国手一起玩', '张常宁1家13口聚餐过年！父亲+哥哥+老公皆帅气，4岁侄子身高出众')
