
import torch, types, os, gc, math, json,time,random,pyrwkv_tokenizer
import torch.nn as nn
import torch.optim as optim
from torch.nn import functional as F
from tqdm import tqdm
from collections import defaultdict
import torch.utils.checkpoint as checkpoint
torch.set_num_threads(1)

args = types.SimpleNamespace()
args.n_layer = 24
args.n_embd = 1024
args.head_size = 64
args.vocab_size = 65536 # 50304# 65536

tokenizer = pyrwkv_tokenizer.RWKVTokenizer()

class RWKV_Tmix_x070(nn.Module):
    def __init__(self, args, layer_id):
        super().__init__()
        self.layer_id = layer_id
        self.head_size = args.head_size
        self.n_head = args.dim_att // self.head_size
        assert args.dim_att % self.n_head == 0

        H = self.n_head
        N = self.head_size
        C = args.n_embd
        self.n_embd= args.n_embd

        D_DECAY_LORA = 64
        D_AAA_LORA = 64
        D_MV_LORA = 32
        D_GATE_LORA = 128

        self.x_r = nn.Parameter(torch.empty(1,1,C))
        self.x_w = nn.Parameter(torch.empty(1,1,C))
        self.x_k = nn.Parameter(torch.empty(1,1,C))
        self.x_v = nn.Parameter(torch.empty(1,1,C))
        self.x_a = nn.Parameter(torch.empty(1,1,C))
        self.x_g = nn.Parameter(torch.empty(1,1,C))

        self.w0 = nn.Parameter(torch.empty(1,1,C))
        self.w1 = nn.Parameter(torch.empty(C, D_DECAY_LORA))
        self.w2 = nn.Parameter(torch.empty(D_DECAY_LORA, C))

        self.a0 = nn.Parameter(torch.empty(1,1,C))
        self.a1 = nn.Parameter(torch.empty(C, D_AAA_LORA))
        self.a2 = nn.Parameter(torch.empty(D_AAA_LORA, C))

        if self.layer_id != 0:
            self.v0 = nn.Parameter(torch.empty(1,1,C))
            self.v1 = nn.Parameter(torch.empty(C, D_MV_LORA))
            self.v2 = nn.Parameter(torch.empty(D_MV_LORA, C))

        self.g1 = nn.Parameter(torch.empty(C, D_GATE_LORA))
        self.g2 = nn.Parameter(torch.empty(D_GATE_LORA, C))

        self.k_k = nn.Parameter(torch.empty(1,1,C))
        self.k_a = nn.Parameter(torch.empty(1,1,C))
        self.r_k = nn.Parameter(torch.empty(H,N))

        self.time_record = None
        self.receptance = nn.Linear(C, C, bias=False)
        self.key = nn.Linear(C, C, bias=False)
        self.value = nn.Linear(C, C, bias=False)
        self.output = nn.Linear(C, C, bias=False)
        self.ln_x = nn.GroupNorm(H, C, eps=64e-5) # !!! notice eps value !!!
        self.head_size = args.head_size
        self.state = None

    def forward(self, x, v_first):
        B, T, C = x.size()
        H = self.n_head
        if self.time_record is None:
            self.time_record = torch.zeros(B,1,C)
        if T == 1:
            xx = self.time_record - x
            self.time_record = x
        else:
            xx = torch.cat((self.time_record, x[:, :-1, :]), dim=1)
            xx = xx - x
            self.time_record = x[:, -1:, :]

        xr = x + xx * self.x_r
        xw = x + xx * self.x_w
        xk = x + xx * self.x_k
        xv = x + xx * self.x_v
        xa = x + xx * self.x_a
        xg = x + xx * self.x_g

        r = self.receptance(xr)
        w = -F.softplus(-(self.w0 + torch.tanh(xw @ self.w1) @ self.w2)) - 0.5 # soft-clamp to (-inf, -0.5)
        k = self.key(xk)
        v = self.value(xv)

        if self.layer_id == 0:
            v_first = v # store the v of the first layer
        else:
            v = v + (v_first - v) * torch.sigmoid(self.v0 + (xv @ self.v1) @ self.v2) # add value residual

        a = torch.sigmoid(self.a0 + (xa @ self.a1) @ self.a2) # a is "in-context learning rate"
        g = torch.sigmoid(xg @ self.g1) @ self.g2

        kk = k * self.k_k
        kk = F.normalize(kk.view(B,T,H,-1), dim=-1, p=2.0).view(B,T,C)
        k = k * (1 + (a-1) * self.k_a)
        x = self.wkv(r, w, k, v, -kk, kk*a)
        x = self.ln_x(x.view(B * T, C)).view(B, T, C)
        x = x + ((r.view(B,T,H,-1)*k.view(B,T,H,-1)*self.r_k).sum(dim=-1, keepdim=True) * v.view(B,T,H,-1)).view(B,T,C)
        x = self.output(x * g)
        return x, v_first
    
    def wkv(self,r, w, k, v, a, b):
        B, T, C = r.size()
        H = self.n_head
        N = self.head_size
        r = r.view(B, T, H, N)
        k = k.view(B, T, H, N)
        v = v.view(B, T, H, N)
        a = a.view(B, T, H, N)
        b = b.view(B, T, H, N)
        w = torch.exp(-torch.exp(w.view(B, T, H, N)))
        out = torch.zeros((B, T, H, N))
        if self.state is None: self.state = torch.zeros((B, H, N, N))
        for t in range(T):
            kk = k[:, t, :].view(B, H, 1, N)
            rr = r[:, t, :].view(B, H, N, 1)
            vv = v[:, t, :].view(B, H, N, 1)
            aa = a[:, t, :].view(B, H, N, 1)
            bb = b[:, t, :].view(B, H, 1, N)
            self.state = self.state * w[: , t, :, None, :] + self.state @ aa @ bb + vv @ kk
            out[:, t, :] = (self.state @ rr).view(B, H, N)
        return out.view(B, T, C)
    
class RWKV_CMix_x070(nn.Module):
    def __init__(self, args):
        super().__init__()
        self.n_embd = args.n_embd
        self.time_record = None
        self.x_k = nn.Parameter(torch.empty(1, 1, args.n_embd))
        self.key = nn.Linear(args.n_embd, args.dim_ffn, bias=False)
        self.value = nn.Linear(args.dim_ffn, args.n_embd, bias=False)

    def forward(self, x):
        B,T,C = x.shape
        if self.time_record is None:
            self.time_record = torch.zeros(B,1,C)
        if T == 1:
            xx = self.time_record - x
            self.time_record = x
        else:
            xx = torch.cat((self.time_record, x[:, :-1, :]), dim=1)
            xx = xx - x
            self.time_record = x[:, -1:, :]
        k = x + xx * self.x_k
        k = torch.relu(self.key(k)) ** 2
        return self.value(k)
    
class Block(nn.Module):
    def __init__(self, args, layer_id):
        super().__init__()
        self.layer_id = layer_id
        if self.layer_id == 0:
            self.ln0 = nn.LayerNorm(args.n_embd) # only used in block 0, should be fused with emb
        self.ln1 = nn.LayerNorm(args.n_embd)
        self.ln2 = nn.LayerNorm(args.n_embd)
        self.att = RWKV_Tmix_x070(args, layer_id)
        self.ffn = RWKV_CMix_x070(args)
        
    def forward(self, x, v_first):
        if self.layer_id == 0:
            x = self.ln0(x)
        xx, v_first = self.att(self.ln1(x), v_first)
        x = x + xx
        x = x + self.ffn(self.ln2(x))
        return x, v_first

class RWKV(nn.Module):
    def __init__(self, args):
        super().__init__()
        args.dim_att = args.n_embd
        args.dim_ffn = args.n_embd * 4
        self.n_embd = args.n_embd
        self.emb = nn.Embedding(args.vocab_size, args.n_embd)
        self.emb.requires_grad_(False)
        self.blocks = nn.ModuleList([Block(args, i) for i in range(args.n_layer)])
        for block in self.blocks:
            block.att.requires_grad_(False)
        self.ln_out = nn.LayerNorm(args.n_embd)
        self.ln_out.requires_grad_(False)
        self.head = nn.Linear(args.n_embd, args.vocab_size, bias=False)
        self.head.requires_grad_(False)
    def forward(self, x):
        x = self.输入预处理(x)
        x = self.emb(x)
        v_first = None
        for block in self.blocks:
            x, v_first = block(x, v_first)
        x = self.ln_out(x)
        x = self.head(x)
        return x 
    def reset(self):
        for block in self.blocks:
            block.att.state = None
            block.att.time_record = None
            block.ffn.time_record = None
    def 输入预处理(self, 输入):
        if isinstance(输入, torch.Tensor):
            if 输入.dim() == 2:
                pass
            elif 输入.dim() == 1:
                输入 = 输入.unsqueeze(0)
            else:
                raise ValueError(f"输入的维度不正确: {输入.dim()}\n输入数据: {输入}")
        elif isinstance(输入, str):
            输入 = torch.tensor(tokenizer.encode(输入)).unsqueeze(0)
        elif isinstance(输入, int):
            输入 = torch.tensor([输入]).unsqueeze(0)
        elif isinstance(输入, list):
            输入 = torch.tensor(输入).unsqueeze(0)
        return 输入

########################################################################################################
def sample_logits(logits, temperature: float = 1.0, top_p: float = 0.7, top_k: int = 0):
    if logits.dtype != torch.float32:
        logits = logits.float()  # 避免不必要的类型转换

    probs = F.softmax(logits, dim=-1)
    sorted_probs, sorted_ids = torch.sort(probs, descending=True)
    
    if top_k > 0 and top_k < sorted_probs.size(-1):
        sorted_probs[top_k:] = 0.0
        probs = torch.zeros_like(probs).scatter(-1, sorted_ids[:top_k], sorted_probs[:top_k])
    
    if 0 < top_p < 1.0:
        cumulative_probs = torch.cumsum(sorted_probs, dim=-1)
        cutoff_index = torch.searchsorted(cumulative_probs, top_p, right=True)
        cutoff = sorted_probs[cutoff_index]
        probs = torch.where(probs >= cutoff, probs, torch.zeros_like(probs))
        if torch.sum(probs) == 0:  # 防止所有概率都被置为0
            probs = torch.ones_like(probs) / probs.size(-1)
        else:
            probs = probs / torch.sum(probs, dim=-1, keepdim=True)  # 重新归一化
    
    if temperature != 1.0:
        probs = torch.pow(probs, 1.0 / temperature)
        probs = probs / torch.sum(probs, dim=-1, keepdim=True)  # 重新归一化以防温度改变概率总和
    
    m = torch.distributions.Categorical(probs)
    return m.sample().item()

def 预测(model,prompt,预测长度=1000):
    with torch.no_grad():
        model.eval()
        model.reset()
        print(prompt, end="", flush=True)
        out=model(prompt)[:,-1,:][0]
        token_counts = defaultdict(float)
        完整文本 = ""
        for _ in range(预测长度):
            for token in token_counts:
                penalty = 0.5 + token_counts[token] * 0.5
                out[token] -= penalty
                token_counts[token] *= 0.99
            token=sample_logits(out)
            token_counts[token] += 1 
            out = model(token)[:, -1, :][0]
            try:
                预测文本 = tokenizer.decode([token])
                完整文本 += 预测文本
                print(预测文本, end="", flush=True)
            except:
                pass
        print("\n", flush=True)


model = RWKV(args)


#加载可用权重
model.load_state_dict(torch.load("基座pro.pth", map_location="cpu", weights_only=True), strict=False)




global 保存文件序列
保存文件序列=[0,1,2]
保存文件编号=0
保存文件序号=保存文件序列[保存文件编号]
""" try:
    for 保存文件序号 in 保存文件序列:
        try:
            model.load_state_dict(torch.load(f"rwkv{保存文件序号}.pth", map_location="cpu", weights_only=True), strict=False)
            break
        except:
            pass
except:
    pass """







with open("测试.jsonl", 'r', encoding='utf-8') as f:
    全部训练数据 = [json.loads(line) for line in f][:10]
prompt=""
for 训练文本 in 全部训练数据:
    prompt+="User: " +训练文本['prompt']+"\n\nAssistant: "+训练文本['response']+"\n\n"
model.reset()
with torch.no_grad():
    model.eval()
    model(prompt)
prompt = "User: 你好，你是谁？\n\nAssistant: "
预测(model,prompt,预测长度=10000)

breakpoint()

def 周期训练(模型,训练文件,总训练轮数=9999999999999,批量=5,初始学习率=1e-4):
    with open(训练文件, 'r', encoding='utf-8') as f:
        全部训练数据 = [json.loads(line) for line in f]
    
    优化器 = optim.AdamW(模型.parameters(), lr=初始学习率)
    损失函数 = nn.CrossEntropyLoss()
    保存文件编号=0
    保存文件序号=保存文件序列[保存文件编号]
    for 轮数 in range(总训练轮数):
        周期=0
        for 训练文本 in 全部训练数据:
            周期+=1
            模型.reset()
            训练数据="User: " +训练文本['prompt']+"\n\nAssistant: "+训练文本['response'][:10]
            模型.train()
            训练数据 =tokenizer.encode(训练数据)
            训练数据 = torch.tensor(训练数据)
            输入 = 训练数据[:-1].unsqueeze(0)
            目标 = 训练数据[1:].unsqueeze(0)
            输出 = 模型(输入)
            损失 = 损失函数(输出.view(-1, 输出.size(-1)), 目标.view(-1))
            损失.backward() 
            优化器.step() 
            优化器.zero_grad() 
        
            print(f"轮数：{轮数+1}, 周期：{周期},损失: {损失:.2f},数据量：{len(训练数据)}")
            if 周期%批量==0:
                if 保存文件序号==保存文件序列[-1]:
                    保存文件序号=保存文件序列[0]
                    保存文件编号=0
                else:
                    保存文件编号+=1
                    保存文件序号=保存文件序列[保存文件编号]
                torch.save(模型.state_dict(), f"rwkv{保存文件序号}.pth") 
                文本="User: " +训练文本['prompt']+"\n\nAssistant: "
                预测(模型,"User: " +文本+"\n\nAssistant: ",预测长度=100)


周期训练(model,"测试.jsonl")



