from transformers import AutoModelForCausalLM, AutoTokenizer, PaliGemmaProcessor
import time
import chainlit as cl
from fastapi import FastAPI
from chainlit.utils import mount_chainlit
from chainlit.types import ThreadDict
from openai import AsyncOpenAI
from mcp import ClientSession
from typing import Dict, Optional
from fastapi import Request, Response
from chainlit.input_widget import Select, Switch, Slider
import pandas as pd
import plotly.graph_objects as go
import matplotlib.pyplot as plt
from datasets import load_dataset, Audio

from transformers import AutoTokenizer
from datasets import load_dataset
import json
import os
from tokenizers import Tokenizer, decoders, models, normalizers, pre_tokenizers, trainers
from transformers import AutoModelForCausalLM
from transformers import AutoTokenizer
directory_path = "./tokenizer"
from PIL import Image
import requests
from transformers import AutoImageProcessor
import torch
from transformers.video_utils import load_video
from transformers import AutoVideoProcessor
from transformers import AutoBackbone
from transformers import AutoProcessor, PaliGemmaForConditionalGeneration,PaliGemmaForConditionalGeneration
from PIL import Image
import requests

def get_current_temperature(location: str, unit: str):
    """
    Get the current temperature at a location.
    
    Args:
        location: The location to get the temperature for, in the format "City, Country"
        unit: The unit to return the temperature in. (choices: ["celsius", "fahrenheit"])
    """
    return 22.  # A real function should probably actually get the temperature!

def get_current_wind_speed(location: str):
    """
    Get the current wind speed in km/h at a given location.
    
    Args:
        location: The location to get the wind speed for, in the format "City, Country"
    """
    return 6.  # A real function should probably actually get the wind speed!

tools = [get_current_temperature, get_current_wind_speed]

 
@cl.set_starters
async def set_starters():
    '''初始化界面 提示'''
    return [
        cl.Starter(
            label="tokenizer",
            message="tokenizer",
            icon="/public/idea.svg",
        ) ,
        cl.Starter(
            label="imageProcessor",
            message="imageProcessor",
            icon="/public/idea.svg",
        ),
        cl.Starter(
            label="videoProcessor",
            message="videoProcessor",
            icon="/public/idea.svg",
        ),
        cl.Starter(
            label="特征图",
            message="backbone",
            icon="/public/idea.svg",
        )
     ,
        cl.Starter(
            label="多模态",
            message="multimodal",
            icon="/public/idea.svg",
        ),
        cl.Starter(
            label="工具使用",
            message="tools",
            icon="/public/idea.svg",
        )


    ]



def tokenizerTest():
    tokenizer = AutoTokenizer.from_pretrained("google/gemma-2-2b")
    output= tokenizer("We are very happy to show you the 🤗 Transformers library", return_tensors="pt")
    print(output)

def imageProcessorTest():
    image_processor = AutoImageProcessor.from_pretrained("google/vit-base-patch16-224")
    url = "https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/image_processor_example.png"
    image = Image.open(requests.get(url, stream=True).raw).convert("RGB")
    inputs = image_processor(image, return_tensors="pt")
    print(inputs)

def videoProcessorTest():
    processor = AutoVideoProcessor.from_pretrained("llava-hf/llava-onevision-qwen2-0.5b-ov-hf")
    video = load_video("video.mp4")
    processor = torch.compile(processor)
    processed_video = processor(video, return_tensors="pt")
    print(processed_video)

def backboneTest():
    
    model = AutoBackbone.from_pretrained("microsoft/swin-tiny-patch4-window7-224", out_indices=(1,))
    processor = AutoImageProcessor.from_pretrained("microsoft/swin-tiny-patch4-window7-224")

    url = "http://images.cocodataset.org/val2017/000000039769.jpg"
    image = Image.open(requests.get(url, stream=True).raw)

    inputs = processor(image, return_tensors="pt")
    outputs = model(**inputs)
    print(outputs)
    return outputs

def multimodalTest():
    processor = AutoProcessor.from_pretrained("google/paligemma-3b-pt-224")
    prompt = "answer en Where is the cat standing?"
    url = "https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/pipeline-cat-chonk.jpeg"
    image = Image.open(requests.get(url, stream=True).raw)
    model_inputs = processor(text=prompt, images=image, return_tensors="pt")
    input_len = model_inputs["input_ids"].shape[-1]
    model_id='google/paligemma-3b-pt-224'
    model = PaliGemmaForConditionalGeneration.from_pretrained(model_id, torch_dtype=torch.bfloat16, device_map="auto").eval()
    processor = PaliGemmaProcessor.from_pretrained(model_id)
    with torch.inference_mode():
        generation = model.generate(**model_inputs, max_new_tokens=100, do_sample=False)
        generation = generation[0][input_len:]
        decoded = processor.decode(generation, skip_special_tokens=True)
        print(decoded)
        



@cl.on_message
async def on_message(message: cl.Message):
   
    if message.content=='tokenizer':
        output= tokenizerTest()
        await  cl.Message(
        content=output
       ).send()
    if message.content=='imageProcessor':
        output= imageProcessorTest()
        await  cl.Message(
        content='ok'
       ).send()
        
    if message.content=='backbone':
        output= backboneTest()
        await  cl.Message(
        content='ok'
       ).send()
            
    if message.content=='videoProcessor':
        output= videoProcessorTest()
        await  cl.Message(
        content='ok'
       ).send()
    if message.content=='multimodal':
        output= multimodalTest()
        await  cl.Message(
        content='ok'
       ).send()
    
        
    


