import math
import torch
import torch.nn as nn
import torch.nn.functional as F
import os
import requests
import tiktoken

# get the dataset
if not os.path.exists('sales_textbook.txt'):
    url = "https://huggingface.co/datasets/goendalf666/sales-textbook_for_convincing_and_selling/resolve/main/sales_textbook.txt?download=true"
    with open('sales_textbook.txt', 'wb') as f:
        f.write(requests.get(url).content)
with open('sales_textbook.txt', 'r', encoding="utf-8") as f:
    text = f.read()

encoding = tiktoken.get_encoding("cl100k_base")
tokenized_text = encoding.encode(text)


tokenized_text = torch.tensor(tokenized_text, dtype=torch.long)
# .max(): 返回张量中的最大值
# .item(): 将单元素张量转换为Python标量值
max_token_value = tokenized_text.max().item()

batch_size = 32  # 批次
context_length = 64  # 文本长度
# 模型特征
d_model = 512  # 一个文本的特征数量
number_heads = 16  # 多头数量

# split train and valid
train_index = int(len(tokenized_text) * .9)
train_data = tokenized_text[:train_index]
valid_data = tokenized_text[train_index:]

data = train_data
indexes = torch.randint(low=0, high=len(data) - context_length, size=(batch_size,))
# print(indexes)
# 将所有子序列堆叠成一个新的张量
x_batch = torch.stack([data[index:index + context_length] for index in indexes])
print(x_batch.shape)
y_batch = torch.stack([data[index + 1:index + context_length + 1] for index in indexes])

# define input embedding table
input_embedding_lookup_table = nn.Embedding(max_token_value + 1, d_model)
# 权重
x_batch_embedding = input_embedding_lookup_table(x_batch)
print(x_batch_embedding.shape)
y_batch_embedding = input_embedding_lookup_table(y_batch)

# get positional encoding
position_encoding_lookup_table = torch.zeros(context_length, d_model)
position = torch.arange(0, context_length, dtype=torch.float).unsqueeze(1)
# apply the sine & cosine
div_term = torch.exp(torch.arange(0, d_model, 2).float() * (-math.log(10000.0) / d_model))
position_encoding_lookup_table[:, 0::2] = torch.sin(position * div_term)
position_encoding_lookup_table[:, 1::2] = torch.cos(position * div_term)
# add batch to the first dimension
position_encoding_lookup_table = position_encoding_lookup_table.unsqueeze(0).expand(batch_size, -1, -1)

x = x_batch_embedding + position_encoding_lookup_table
y = y_batch_embedding + position_encoding_lookup_table

print(x.shape)
print(y.shape)
# get q k v
w_q = nn.Linear(d_model, d_model, bias=False)
w_k = nn.Linear(d_model, d_model, bias=False)
w_v = nn.Linear(d_model, d_model, bias=False)

q = w_q(x)
k = w_k(x)
v = w_v(x)

Q = q.view(batch_size, context_length, number_heads, d_model // number_heads).permute(0, 2, 1, 3)
K = k.view(batch_size, context_length, number_heads, d_model // number_heads).permute(0, 2, 1, 3)
V = v.view(batch_size, context_length, number_heads, d_model // number_heads).permute(0, 2, 1, 3)

output = Q @ K.transpose(-2, -1) / math.sqrt(d_model // number_heads)

# apply mask
mask = torch.triu(torch.ones(context_length, context_length), diagonal=1).bool()
output = output.masked_fill(mask, float('-inf'))

attention_score = F.softmax(output, dim=-1)

A = attention_score @ V
print(A.shape)

A = A.permute(0, 2, 1, 3).reshape(batch_size, context_length, d_model)

w_o = nn.Linear(d_model, d_model, bias=False)
output = w_o(A)
print(output.shape)

output = output + x

layer_norm = nn.LayerNorm(d_model)

# apply final linear layer
output = nn.Linear(d_model, max_token_value + 1)(output)

log_items = F.softmax(output, dim=-1)
print(log_items)
