import torch
import torch.nn as nn
import torch.nn.functional as F
import os
import requests
import tiktoken

# 超参数
batch_size =4
context_length =16 # 长度
d_model =64 # 维度
num_heads =4

# 获取数据集
if not os.path.exists('sales_textbook.txt'):
    url='https://huggingface.co/datasets/goendalf666/sales-textbook_for_convincing_and_selling/raw/main/sales_textbook.txt'
    with open('sales_textbook.txt','wb') as f:
        f.write(requests.get(url).content)

with open('sales_textbook.txt','r') as f:
        text=f.read()

# 测试读取成功
        # print(text)

encoding = tiktoken.get_encoding("o200k_base")
# assert enc.decode(enc.encode("hello world")) == "hello world"

tokenized_text=encoding.encode(text)
tokenized_text =torch.tensor(tokenized_text, dtype=torch.long)
# print(tokenized_text)
# 字典矩阵
   # 索引
max_token_value =tokenized_text.max().item()
   # 词嵌入
input_embedding_lookup_table = nn.Embedding(max_token_value+1,d_model)
    # 随机权重
# input_embedding_lookup_table.weight.data
print(f"max_token_value: {input_embedding_lookup_table.weight.data}")


# 切割数据集
train_idex=int(len(tokenized_text)*0.9)
train_data = tokenized_text[:train_idex]
valid_data = tokenized_text[train_idex:]

data=train_data
# 获取batch_size条随机句子头的位置
idxs=torch.randint(low=0,high=len(data)-context_length, size=(batch_size,))
x_batch = torch.stack([data[idx:idx+context_length]for idx in idxs])
y_batch = torch.stack([data[idx+1:idx+context_length+1] for idx in idxs])
print(x_batch)

import pandas as pd
pd.DataFrame(x_batch[0].numpy())
print(encoding.decode(x_batch[0].numpy()))

x_batch_embedding =input_embedding_lookup_table(x_batch)
y_batch_embedding = input_embedding_lookup_table(y_batch)

import math
# get positional encoding
    # 创建矩阵
position_encoding_lookup_table =torch.zeros(context_length, d_model) 
position = torch.arange(0,context_length, dtype=torch.float).unsqueeze(1)
#  apply the sine & cosine
div_term = torch.exp(torch.arange(0,d_model,2).float()*(-math.log(10000.0)/ d_model))
position_encoding_lookup_table[:,0::2]= torch.sin(position * div_term)
position_encoding_lookup_table[:,1::2]=torch.cos(position *div_term)
# 添加批次编号:expand(batch_size, -1, -1)
position_encoding_lookup_table = position_encoding_lookup_table.unsqueeze(0).expand(batch_size, -1, -1) 
# add batch to the first dimension
print(f"位置信息: {position_encoding_lookup_table}")

x=x_batch_embedding + position_encoding_lookup_table
y=y_batch_embedding + position_encoding_lookup_table
print(f"位置信息2: {x_batch_embedding}")

# 提供训练权重矩阵
Wq =nn.Linear(d_model,d_model)
Wk =nn.Linear(d_model,d_model)
Wv =nn.Linear(d_model,d_model)

Q = Wq(x)
K= Wk(x)
V = Wv(x)

print(f"训练权重矩阵: Q = {Q.shape}, K = {K}, V = {V}")

# d_model切割成num_heads份
# permute(0,2,1,3) 转置矩阵位置,原本是0,1,2,3 
Q=Q.reshape(batch_size, context_length, num_heads, d_model//num_heads).permute(0,2,1,3)
K=K.reshape(batch_size, context_length, num_heads, d_model//num_heads).permute(0,2,1,3)
V=V.reshape(batch_size, context_length, num_heads, d_model//num_heads).permute(0,2,1,3)
# attention公式
output=Q@K.transpose(-2,-1)/math.sqrt(d_model//num_heads)
# 遮罩
mask = torch.triu(torch.ones(context_length, context_length), diagonal=1).bool()
output =output.masked_fill(mask,float('-inf'))
print(f"遮罩: mask = {mask},output={output}")

#apply softmax
attention_score =F.softmax(output, dim=-1)
print(f"概率归元:softmax = {attention_score}")

# apply attention @ V
A=attention_score@ V

# apply concatenate,转置拼接
A = A.permute(0,2,1,3).reshape(batch_size, context_length, d_model)

Wo = nn.Linear(d_model,d_model)
output =Wo(A)

print(f"最终:concatenate = {output}")

# apply residual connection
output =output+x

# apply layer normalization
Layer_norm = nn.LayerNorm(d_model)
Layer_norm_output = Layer_norm(output)


# apply feedforward network 前馈网络
ffn = nn.Sequential(
    nn.Linear(d_model,4*d_model),
    nn.ReLU(),
    nn.Linear(4*d_model, d_model)
)

output =output+ffn(Layer_norm_output)

print(f"前馈网络:output = {output.shape}")

output =Layer_norm(output)

# apply final linear layer 字典矩阵相似概率
output = nn.Linear(d_model,max_token_value+1)(output)

logits =F.softmax(output,dim=-1)
print(f"大辞典概率集:logits = {logits[0,0].sum()}")

predicted_index=torch.argmax(logits[0,0]).item()
print(f"初步预测:result = {encoding.decode([predicted_index])}")