import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import torch
from glob import glob
from tqdm import tqdm
from model import SamOut

import polars as pl
from collections import Counter


def train():
    voc = pd.read_pickle("total_voc_new.pkl")["voc"]
    voc = ["<|sos|>", "<|user|>", "<|agent|>", "<|pad|>", "<|history|>", "<|unk|>", "<|end|>", "<|next|>"] + sorted(
        set(voc))

    net = SamOut(len(voc), 1024+512, 64, 16)
    print(sum([i.shape[0]*i.shape[1] for i in net.parameters() if len(i.shape)>1])+sum([i.shape[0] for i in net.parameters() if len(i.shape)==1]))

    net.load_state_dict(torch.load("pretrain_768.pth"))
    net.to("cuda")

    opt = torch.optim.Adam(params=net.parameters(), lr=0.00003)
    loss_func0 = torch.nn.CrossEntropyLoss(ignore_index=3)

    bar = tqdm(range(10))
    steps = 0
    epoch_loss = []
    batch_size=25

    for epoch in bar:
        paths=glob("F:/skypile_train_token_id/*.pkl")
        paths=paths[len(paths)//3:]
        data_set=[]
        for ii in range(0,len(paths),2):

            for one_path in paths[ii:ii+2]:

                data_set+= pd.read_pickle(one_path,compression="zip")
                np.random.shuffle(data_set)
                loss_list = []
                for i in range(0, len(data_set), batch_size):
                    # weights.append(list(net.state_dict().values())[0])
                    j = i + batch_size
                    input_one = data_set[i:j]

                    out0, _ = net(torch.Tensor(input_one)[:, :-1].int().to("cuda"))
                    loss = loss_func0(out0.reshape([-1, out0.shape[-1]]),
                                      torch.Tensor(input_one)[:, 1:].reshape([-1]).long().to("cuda"))

                    loss_list.append(loss.item())
                    bar.set_description(
                        "epoch___{}____loss___{:.6f}____steps___{}".format(epoch, np.mean(loss_list), steps))
                    opt.zero_grad()
                    loss.backward()
                    opt.step()
                    steps += batch_size

                torch.save(net.state_dict(), "pretrain_768.pth")
                # eval_model()
                epoch_loss.append(np.mean(loss_list))
                pd.to_pickle(epoch_loss, "loss916")




def gen_sft_single_data_align():
    from jieba import lcut
    pre_data = pl.read_csv("F:/ChatTTS/nlp_data_set/sft_data_single.csv")
    pre_data = pre_data.to_numpy().tolist()
    data_set = []
    index_id=0
    for h, q, a in tqdm(pre_data):
        index_id+=1
        one = ["<|user|>"] + lcut(q) + ["<|agent|>"] + lcut(a)
        data_set.append(one)

        if len(data_set)>1000000:

            pd.to_pickle(data_set, "D:/SamOutV2/SamOutV2/sft/sft_data_single_{}.pkl".format(index_id))
            data_set=[]
    pd.to_pickle(data_set, "D:/SamOutV2/SamOutV2/sft/sft_data_single_{}.pkl".format(index_id))


def train_single():
    voc = pd.read_pickle("total_voc_new.pkl")["voc"]
    voc = ["<|sos|>", "<|user|>", "<|agent|>", "<|pad|>", "<|history|>", "<|unk|>", "<|end|>", "<|next|>"] + sorted(
        set(voc))

    net = SamOut(len(voc), 1024 + 512, 64, 16)

    net.load_state_dict(torch.load("pretrain_768.pth"))
    net.to("cuda")

    opt = torch.optim.Adam(params=net.parameters(), lr=0.00003)
    loss_func0 = torch.nn.CrossEntropyLoss(ignore_index=3)

    bar = tqdm(range(1))
    steps = 0
    epoch_loss = []

    for epoch in bar:
        paths=glob("D:/SamOutV2/SamOutV2/sft_id/*.pkl")
        np.random.shuffle(paths)
        for o in range(0,len(paths),2):
            data_set =[]
            for one_path in paths[o:o+2]:

                data_set+=pd.read_pickle(one_path,compression="zip")

            np.random.shuffle(data_set)

            loss_list = []
            for i in range(0, len(data_set), 25):
                # weights.append(list(net.state_dict().values())[0])
                j = i + 25
                input_one = data_set[i:j]
                input_one=np.array(input_one)
                input_one[np.isnan(input_one)]=5

                out0, _ = net(torch.Tensor(input_one)[:, :-1].int().to("cuda"))

                loss = loss_func0(out0.reshape([-1, out0.shape[-1]]),
                                  torch.Tensor(input_one)[:, 1:].reshape([-1]).long().to("cuda"))

                loss_list.append(loss.item())
                bar.set_description(
                    "epoch___{}____loss___{:.6f}____steps___{}".format(epoch, np.mean(loss_list), steps))
                opt.zero_grad()
                loss.backward()
                opt.step()
                steps += 25

            torch.save(net.state_dict(), "pretrain_sft_single_1024.pth")
            # eval_model()
            epoch_loss.append(np.mean(loss_list))
            pd.to_pickle(epoch_loss, "loss916")


if __name__ == '__main__':


    # train()
    # gen_sft_single_data_align()
    train_single()
