import os
os.environ['HF_ENDPOINT'] = 'https://hf-mirror.com'
import numpy as np
import torch
from transformers import BertModel, BertTokenizer
from bottle import get, run,Bottle,request

def transForm(sentence):

    # 加载预训练的BERT模型，使用的是bert-base-uncased模型，该模型是大小写不敏感的。
    model = BertModel.from_pretrained('bert-base-uncased')
    # 加载与BERT模型相对应的分词器，用于将文本划分为单词或子词。
    tokenizer = BertTokenizer.from_pretrained('bert-base-uncased')

    # Initialize an empty list to store the results
    bianma_APIs = []

    sentence = "Generate PDFs, Images, and more from HTML and URLs"
    description = "Optimize your website effortlessly with SEOOptimizeAPI - the powerful tool that provides valuable insights and automates repetitive tasks. With SEOOptimizeAPI, you can take the guesswork out of website optimization. The API utilizes advanced algorithms and technologies to provide in-depth insights into your website's performance and help you identify areas for improvement. And with its easy-to-use API endpoints, you can automate repetitive tasks and save time and effort"
    # 使用分词器对句子进行编码，将句子转换为BERT模型可以处理的输入表示。add_special_tokens=True
    # 参数表示在编码时添加特殊标记，truncation="longest_first"参数表示在编码过程中，如果句子超出模型最大长度限制，则从句子开头截断。
    input_ids = tokenizer.encode(description, add_special_tokens=True, truncation="longest_first")
    # 将编码后的句子转换为PyTorch张量，以便与BERT模型进行计算。
    input_ids = torch.tensor([input_ids])

    with torch.no_grad():
        # 将编码后的输入传递给BERT模型进行推理，得到每个词的最后隐藏状态。[0]表示提取最后一层的隐藏状态。
        last_hidden_states = model(input_ids)[0]
        # Extract the first element of the last_hidden_states tensor and convert it to a list of floats
        # 从最后隐藏状态张量中提取第一个词的隐藏表示，并将其转换为Python列表。
        result = last_hidden_states[0][0].tolist()
        # Append the list of floats to bianma_APIs
        # 将隐藏表示添加到bianma_APIs列表中。
        bianma_APIs.append(result)
    
    # Convert bianma_APIs to a numpy array
    bianma_array = np.array(bianma_APIs)

    # Define the file path where you want to save the data
    file_path = "./bianma_APIs.txt"

    # Save the bianma_array to the text file
    # np.savetxt(file_path, bianma_array)

    print(bianma_APIs)
    # print(len(bianma_APIs))
    # print(bianma_APIs[0][0])
    # print(len(bianma_APIs[0]))
    return bianma_APIs

app = Bottle()

@app.route('/search')
def sentenceSearch():
    sentence = request.query.sentence
    return dict(data = transForm(sentence))

@get('/sentenceCtrans')
def sentenceCtrans():
    return transForm('hello')
app.run(host='127.0.0.1',port=80)
