import torch
import torch.nn.functional as F
from scipy.stats import spearmanr
import numpy as np
import random

y_pred = torch.arange(20).view(-1,1)
print(y_pred)
# pos = torch.cat(
#     [torch.cat([y_pred[i], y_pred[i + 1]], dim=-1).unsqueeze(0) for i in
#      range(0, y_pred.shape[0]-1, 2)], dim=0)
# print(pos)
# neg = torch.cat(
#     [torch.cat([y_pred[i], y_pred[(i + 6)%y_pred.shape[0]]], dim=-1).unsqueeze(0) for i in
#      range(0, y_pred.shape[0]-1, 2)], dim=0)
# print(neg)
a = torch.cat(
    [torch.cat([torch.std(torch.abs(y_pred[i]- y_pred[(i + 6)%y_pred.shape[0]]))], dim=-1).unsqueeze(0) for i in
     range(0, y_pred.shape[0]-1, 2)], dim=0)
print(a)
# y_true = torch.arange(20)
# print(y_true)
# # use_row = torch.where((y_true + 1) % 3 != 0)[0]
# # print(use_row)
# y_true = (y_true - y_true % 2 * 2) + 1
# print(y_true)
# print(torch.cat([torch.tensor(0.1).unsqueeze(0), torch.tensor(0.2).unsqueeze(0)], dim=0))
# a = 1/torch.abs(torch.mean(torch.cat([torch.std(torch.randn((1,200)))-torch.tensor([1]) for i in range(10)]))).item()-1
# print(a/100)
# print(spearmanr(list(range(10)), [random.random() for i in range(10)]).correlation)
# print(__file__)
# from transformers import BertTokenizer
#
# Bert_path = 'D:/PTM/chinse-roberta-wwm-ext'
#
# tokenizer = BertTokenizer.from_pretrained("hfl/chinese-roberta-wwm-ext", cache_dir=Bert_path)
# a = tokenizer.encode_plus('今天天气',padding='max_length',max_length=10)
# print(a)
# ip = a.get('input_ids')
# for i in ip:
#     print(tokenizer.convert_ids_to_tokens(i))

# print(random.random() > 1)
# print(f'{random.random():.0%}')
# a = torch.arange(0, 11).view(-1, 1)
# print(a)
# batch_len = int(a.size(0) / 3) * 3
# ori = a[0:batch_len - 2:3]
# sim = a[1:batch_len - 1:3]
# neg = a[2:batch_len:3]
# print(ori)
# print(sim)
# print(neg)
# a = np.random.randn(10)
# b = list(range(10))
# print(spearmanr(b,a).correlation)
# y_true = torch.arange(12)
# print(y_true)
# use_row = torch.where((y_true + 1) % 3 != 0)[0]
# y_true = (use_row - use_row % 3 * 2) + 1
# print(y_true)
# with open('data/train.txt', 'r', encoding='utf-8') as f:
#     for idx, i in enumerate(f):
#         print(idx,i)
# import sys
# from loguru import logger as log
#
# format = "{time:YYYY-MM-DD HH:mm:ss} {level} {file}:{line} {message}"
#
# # 修改默认的日志格式
# log.configure(handlers=[dict(sink=sys.stderr, format=format)])
# log.add('test.log', rotation='20MB', format=format)
# log.info('test')
# import random
# a = '1'
# b =a
# while a==b:
#     print(id(a),id(b))
#
#     b = random.choice(['1','1','1','2','2'])
#     print(id(a),id(b))
#     print(a,b)
# print(a,b)
# y_pred = torch.arange(0, 10.).view(-1)
# lenloss = torch.sqrt(torch.sum(torch.norm(y_pred, dim=-1) - 1.8 * torch.ones(y_pred.size(0))))
# print((torch.zeros(3).numpy()+[torch])/100)
a, b, c = 0, 0, 0
# label = torch.tensor([1,0]*int(y_pred.size(0) / 3))
# pos = torch.cat([torch.cat([y_pred[i], y_pred[i + 1], torch.abs(y_pred[i]-y_pred[i + 1])], dim=-1).unsqueeze(0) for i in range(0, int(y_pred.size(0) / 3) * 3 - 2, 3)],dim=0)
# neg = torch.cat([torch.cat([y_pred[i], y_pred[i + 1], torch.abs(y_pred[i]-y_pred[i + 1])], dim=-1).unsqueeze(0) for i in range(1, int(y_pred.size(0) / 3) * 3 - 1, 3)],dim=0)
# print(label)
# print(pos)
# print(neg)
# print(a.unsqueeze(1))
# print(a.unsqueeze(1).shape)
# b = torch.arange(1.,769.)
# ta = torch.ones(768)
# tb = torch.arange(1.,769.)
# print(torch.norm(ta)*torch.norm(tb))
# print(torch.dot(ta,tb))
# print(torch.dot(ta,tb)/(torch.norm(ta)*torch.norm(tb)))
# print(F.cosine_similarity(a.unsqueeze(0),a.unsqueeze(1),dim=-1).size())
# y_true = torch.arange(12)
# print(y_true)
# use_row = torch.where((y_true + 1) % 3 != 0)[0]
# print(use_row)
# y_true = (use_row - use_row % 3 * 2) + 1
# print(y_true)
