from transformers import AutoModelForCausalLM, AutoTokenizer
import time
import chainlit as cl
from fastapi import FastAPI
from chainlit.utils import mount_chainlit
from chainlit.types import ThreadDict
from openai import AsyncOpenAI
from mcp import ClientSession
from typing import Dict, Optional
from fastapi import Request, Response
from chainlit.input_widget import Select, Switch, Slider
import pandas as pd
import plotly.graph_objects as go
import matplotlib.pyplot as plt
from datasets import load_dataset, Audio

from transformers import AutoTokenizer
from datasets import load_dataset
import json
import os
from tokenizers import Tokenizer, decoders, models, normalizers, pre_tokenizers, trainers
from transformers import AutoModelForCausalLM

directory_path = "./tokenizer"
 
@cl.set_starters
async def set_starters():
    '''初始化界面 提示'''
    return [
        cl.Starter(
            label="tokenizer",
            message="tokenizer",
            icon="/public/idea.svg",
        ) ,
        cl.Starter(
            label="encode_decode",
            message="encode_decode",
            icon="/public/idea.svg",
        ),
        cl.Starter(
            label="trainer",
            message="trainer",
            icon="/public/idea.svg",
        )

    ]

def 加载数据集():
    print('start 加载数据集')
    dataset = load_dataset("cornell-movie-review-data/rotten_tomatoes")
    dataset=dataset["train"]
    dataset.rename_column("act", "labels")

    print(dataset)
    print(dataset[-1]["prompt"])
    

def tokenizerFun(text:str):
   
 
    if  os.path.exists(directory_path):
            tokenizer = AutoTokenizer.from_pretrained(directory_path)
            tokenizer.save_pretrained(directory_path)
            encoded_input = tokenizer(text, return_tensors="pt")

    else:
        tokenizer = AutoTokenizer.from_pretrained("bert-base-uncased")
        tokenizer.save_pretrained(directory_path)
        encoded_input = tokenizer(text, return_tensors="pt")
   

    return encoded_input

def encode_decode(text:str):
    tokenizer = AutoTokenizer.from_pretrained(directory_path)
    output = tokenizer.encode(text)
    print(output)
    encoded_input= tokenizer.decode(output)
    return encoded_input

def trainerFunc():
    
    tokenizer = Tokenizer(models.Unigram())
    tokenizer.normalizer = normalizers.NFKC()
    tokenizer.pre_tokenizer = pre_tokenizers.ByteLevel()
    tokenizer.decoder = decoders.WordPiece()
    trainer = trainers.UnigramTrainer(
    vocab_size=20000,
    initial_alphabet=pre_tokenizers.ByteLevel.alphabet(),
    special_tokens=["<PAD>", "<BOS>", "<EOS>"],
   )
    data = [
    "Beautiful is better than ugly."
    "Explicit is better than implicit."
    "Simple is better than complex."
    "Complex is better than complicated."
    "Flat is better than nested."
    "Sparse is better than dense."
    "Readability counts."
    ]
    tokenizer.train_from_iterator(data, trainer=trainer)

    output = tokenizer.encode(data[0])
    print(output)
    encoded_input= tokenizer.decode(output)

    return encoded_input

@cl.on_message
async def on_message(message: cl.Message):
    inputMessage="how are you ?"
    if message.content=='tokenizer':
        output= tokenizerFun(inputMessage)

    if message.content=='encode_decode':
        output=encode_decode(inputMessage)
        await  cl.Message(
        content=output
       ).send()
        
    if message.content=='trainer':
        output=trainerFunc()
        await  cl.Message(
        content=output
       ).send()
        
    


