import numpy as np
import pandas as pd
import torch
import transformers as ppb # pytorch transformers
from sklearn.linear_model import LogisticRegression
from sklearn.model_selection import cross_val_score
from sklearn.model_selection import train_test_split
from transformers import BertTokenizer, BertConfig, BertForMaskedLM, BertForNextSentencePrediction
from transformers import BertModel
 
model_name = 'distilbert_base_uncased'
MODEL_PATH = 'D:/Desktop/信息检索/Bert模型代码实践/distilbert_base_uncased/'
 
# a. 通过词典导入分词器
tokenizer = BertTokenizer.from_pretrained(model_name)
# b. 导入配置文件
model_config = BertConfig.from_pretrained(model_name)
# 修改配置
model_config.output_hidden_states = True
model_config.output_attentions = True
# 通过配置和路径导入模型
bert_model = BertModel.from_pretrained(MODEL_PATH, config = model_config)
# 导入分类的数据
df = pd.read_csv('./SST2/train.tsv', delimiter='\t', header=None)
df.head()
print(len(df))
df = df.sample(2000)#随机抽样2000条数据
# print(batch_1[1].value_counts())

# 将每一个句子处理成为一个id lists
# tokenized = batch_1[0].apply((lambda x: tokenizer.encode(x, add_special_tokens=True)))
tokenized = df[0].apply((lambda x: tokenizer.encode(x, add_special_tokens=True)))

max_len = 0
for i in tokenized.values:
    if len(i) > max_len:
        max_len = len(i)
padded = np.array([i + [0]*(max_len-len(i)) for i in tokenized.values])
print(np.array(padded).shape)
attention_mask = np.where(padded != 0, 1, 0)
# print(attention_mask.shape)

input_ids = torch.tensor(padded)
attention_mask = torch.tensor(attention_mask)
with torch.no_grad():
    last_hidden_states = bert_model(input_ids, attention_mask=attention_mask)

# # 对所有序列的第一个位置的输出进行切分，取所有隐藏单元的输出
features = last_hidden_states[0][:,0,:].numpy()

# # 逻辑回归分类
# 调参过程
labels = df[1]
# c=[0.01,0.1,0.15,0.2,0.25,0.3,0.4,0.5,0.6,0.7]
# for i in range(10):
#     train_features, test_features, train_labels, test_labels = train_test_split(features, labels)
#     lr_clf = LogisticRegression(penalty="l1", C=c[i], solver="liblinear")#losgitic 逻辑回归
#     lr_clf.fit(train_features, train_labels)
#     scores=lr_clf.score(test_features, test_labels)
#     print(scores)
# 使用最佳参数 C = 0.5实验
for i in range(10):
    train_features, test_features, train_labels, test_labels = train_test_split(features, labels)
    lr_clf = LogisticRegression(penalty="l1", C=0.5, solver="liblinear")#losgitic 逻辑回归
    lr_clf.fit(train_features, train_labels)
    scores=lr_clf.score(test_features, test_labels)
    print(scores)