from transformers import AutoModelForCausalLM, AutoTokenizer
import time
import chainlit as cl
from fastapi import FastAPI
from chainlit.utils import mount_chainlit
from chainlit.types import ThreadDict
from openai import AsyncOpenAI
from mcp import ClientSession
from typing import Dict, Optional
from fastapi import Request, Response
from chainlit.input_widget import Select, Switch, Slider
import pandas as pd
import plotly.graph_objects as go
import matplotlib.pyplot as plt
from datasets import load_dataset, Audio

from transformers import AutoTokenizer
from datasets import load_dataset
import json
import os
from tokenizers import Tokenizer, decoders, models, normalizers, pre_tokenizers, trainers
from transformers import pipeline
from PIL import Image
import requests


 
@cl.set_starters
async def set_starters():
    '''初始化界面 提示'''
    return [
        cl.Starter(
            label="音频分类",
            message="audio-classification",
            icon="/public/idea.svg",
        ) ,
        cl.Starter(
            label="语音识别",
            message="automaticSpeechRecognition",
            icon="/public/idea.svg",
        ),
        cl.Starter(
            label="深度估计",
            message="depthEstimation",
            icon="/public/idea.svg",
        ),
        cl.Starter(
            label="文档问答机器人",
            message="documentQuestionAnswering",
            icon="/public/idea.svg",
        ),
 cl.Starter(
            label="铁证提取",
            message="featureExtraction",
            icon="/public/idea.svg",
        ),
      cl.Starter(
            label="填充蒙版",
            message="fillMasker",
            icon="/public/idea.svg",
        ),
           cl.Starter(
            label="图像分类",
            message="imageClassification",
            icon="/public/idea.svg",
        ),
           cl.Starter(
            label="图像铁证提取",
            message="imageFeatureExtraction",
            icon="/public/idea.svg",
        ),
    cl.Starter(
            label="图像分割",
            message="imageSegmentation",
            icon="/public/idea.svg",
        ),
         cl.Starter(
            label="图像文本到文本",
            message="imageTextToText",
            icon="/public/idea.svg",
        ),
        cl.Starter(
            label="图像到图像",
            message="imageToImage",
            icon="/public/idea.svg",
        ),
  cl.Starter(
            label="图像到文本",
            message="imageToText",
            icon="/public/idea.svg",
        ),
        cl.Starter(
            label="蒙版生成",
            message="maskGeneration",
            icon="/public/idea.svg",
        ),
 cl.Starter(
            label="对象检测",
            message="objectDetection",
            icon="/public/idea.svg",
        ),
        cl.Starter(
            label="问题回答",
            message="auestionAnswering",
            icon="/public/idea.svg",
        ),
 cl.Starter(
            label="表格问题回答",
            message="tableQuestionAnswering",
            icon="/public/idea.svg",
        ),
        cl.Starter(
            label="文本分类",
            message="textClassification",
            icon="/public/idea.svg",
        ),
        cl.Starter(
            label="文本生成",
            message="textGeneration",
            icon="/public/idea.svg",
        ), cl.Starter(
            label="文本转音频",
            message="textToAudio",
            icon="/public/idea.svg",
        ), cl.Starter(
            label="ner",
            message="tokenClassification",
            icon="/public/idea.svg",
        ), cl.Starter(
            label="看图回答问题",
            message="visualQuestionAnswering",
            icon="/public/idea.svg",
        ),
  cl.Starter(
            label="零样本分类",
            message="zeroShotClassification",
            icon="/public/idea.svg",
        ), cl.Starter(
            label="零样本对象检测",
            message="zeroShotObjectDetection",
            icon="/public/idea.svg",
        ),
    ]

def audioClassificationTest():
    ''' 需要 ffmpeg '''
    #AutoModelForAudioClassification
    classifier = pipeline(task="audio-classification", model="superb/wav2vec2-base-superb-ks")
    output= classifier("https://huggingface.co/datasets/Narsil/asr_dummy/resolve/main/1.flac")
    return output
def automaticSpeechRecognitionTest():
    ''' 需要 ffmpeg '''
    '''https://blog.csdn.net/omonday1234/article/details/147353919'''
    #AutoModelForAudioClassification
    transcriber = pipeline(model="openai/whisper-base")
    output=transcriber("https://huggingface.co/datasets/Narsil/asr_dummy/resolve/main/1.flac")
    return output

def  depthEstimationTest():
    depth_estimator = pipeline(task="depth-estimation", model="LiheYoung/depth-anything-base-hf")
    output = depth_estimator("http://images.cocodataset.org/val2017/000000039769.jpg")
    print(output)
    return output

def  documentQuestionAnsweringTest():
    '''https://blog.csdn.net/u014034795/article/details/118400156'''
    document_qa = pipeline(model="impira/layoutlm-document-qa")
    output=document_qa(
    image="https://huggingface.co/spaces/impira/docquery/resolve/2359223c1837a7587402bda0f2643382a6eefeab/invoice.png",
    question="What is the invoice number?")
    return output

def featureExtractionTest():
    extractor = pipeline(model="google-bert/bert-base-uncased", task="feature-extraction")
    result = extractor("This is a simple test.", return_tensors=True)
    print(result)
    return result

def fillMaskerTest():
    fill_masker = pipeline(model="google-bert/bert-base-uncased")
    output=fill_masker("This is a simple [MASK].")
    return output

def imageClassificationTest():
    classifier = pipeline(model="microsoft/beit-base-patch16-224-pt22k-ft22k",use_fast=False)
    output= classifier("https://huggingface.co/datasets/Narsil/image_dummy/raw/main/parrots.png")
    print(output)
    return output

def imageFeatureExtractionTest():
    extractor = pipeline(model="google/vit-base-patch16-224", task="image-feature-extraction")
    result = extractor("https://huggingface.co/datasets/Narsil/image_dummy/raw/main/parrots.png", return_tensors=True)
    print(result)
    return result

def imageSegmentationTest():
    segmenter = pipeline(model="facebook/detr-resnet-50-panoptic")
    segments = segmenter("https://huggingface.co/datasets/Narsil/image_dummy/raw/main/parrots.png")
    return len(segments)


def imageTextToTextTest():
    pipe = pipeline(task="image-text-to-text", model="Salesforce/blip-image-captioning-base")
    output=pipe("https://huggingface.co/datasets/Narsil/image_dummy/raw/main/parrots.png", text="A photo of")
    return output

def imageToImageTest():
    upscaler = pipeline("image-to-image", model="caidas/swin2SR-classical-sr-x2-64")
    img = Image.open(requests.get("http://images.cocodataset.org/val2017/000000039769.jpg", stream=True).raw)
    img = img.resize((64, 64))
    upscaled_img = upscaler(img)
    print(upscaled_img.size)
    return upscaled_img.size

def imageToTextTest():
    captioner = pipeline(model="ydshieh/vit-gpt2-coco-en")
    output= captioner("https://huggingface.co/datasets/Narsil/image_dummy/raw/main/parrots.png")
    return output

def maskGenerationTest():
    generator = pipeline(model="facebook/sam-vit-base", task="mask-generation")
    outputs = generator(
    "http://images.cocodataset.org/val2017/000000039769.jpg",
    )
    print(outputs)
    return outputs

def objectDetectionTest():
    detector = pipeline(model="facebook/detr-resnet-50")
    output=detector("https://huggingface.co/datasets/Narsil/image_dummy/raw/main/parrots.png")
    return output

def auestionAnsweringTest():
    oracle = pipeline(model="deepset/roberta-base-squad2")
    output =oracle(question="Where do I live?", context="My name is Wolfgang and I live in Berlin")
    return output


def tableQuestionAnsweringTest():
    oracle = pipeline(model="google/tapas-base-finetuned-wtq")
    table = {
         "Repository": ["Transformers", "Datasets", "Tokenizers"],
        "Stars": ["36542", "4512", "3934"],
        "Contributors": ["651", "77", "34"],
        "Programming language": ["Python", "Python", "Rust, Python and NodeJS"],
     }
    output= oracle(query="How many stars does the transformers repository have?", table=table)
    return output

def textClassificationTest():
    classifier = pipeline(model="distilbert/distilbert-base-uncased-finetuned-sst-2-english")
    output=classifier("This movie is disgustingly good !")
    return output

def textGenerationTest():
    generator = pipeline(model="openai-community/gpt2")
    output= generator("I can't believe you did such a ", do_sample=False)
    return output

def textToAudioTest():
    pipe = pipeline(model="suno/bark-small")
    output = pipe("Hey it's HuggingFace on the phone!")
    audio = output["audio"]
    sampling_rate = output["sampling_rate"]
    return sampling_rate

def tokenClassificationTest():

    token_classifier = pipeline(model="Jean-Baptiste/camembert-ner", aggregation_strategy="simple")
    sentence = "Je m'appelle jean-baptiste et je vis à montréal"
    tokens = token_classifier(sentence)
    return tokens

def  visualQuestionAnsweringTest():
    oracle = pipeline(model="dandelin/vilt-b32-finetuned-vqa")
    image_url = "https://huggingface.co/datasets/Narsil/image_dummy/raw/main/lena.png"
    output=oracle(question="What is she wearing ?", image=image_url)
    return output

def zeroShotClassificationTest():
    oracle = pipeline(model="facebook/bart-large-mnli")
    output= oracle("I have a problem with my iphone that needs to be resolved asap!!",
        candidate_labels=["urgent", "not urgent", "phone", "tablet", "computer"])
    return output

def zeroShotObjectDetectionTest():
    detector = pipeline(model="google/owlvit-base-patch32", task="zero-shot-object-detection")
    output=detector(
        "http://images.cocodataset.org/val2017/000000039769.jpg",
        candidate_labels=["cat", "couch"])
    return output
    

@cl.on_message
async def on_message(message: cl.Message):
    if message.content=='audio-classification':
       output= audioClassificationTest()
       await  cl.Message(
        content=output
       ).send()
    if message.content=='automaticSpeechRecognition':
       output= automaticSpeechRecognitionTest()
       await  cl.Message(
        content=output
       ).send()
            
    if message.content=='depthEstimation':
       output= depthEstimationTest()
       await  cl.Message(
        content='ok'
       ).send()

    if message.content=='documentQuestionAnswering':
       output= documentQuestionAnsweringTest()
       await  cl.Message(
        content=json.dumps(output)
       ).send() 
    if message.content=='featureExtraction':
       output= featureExtractionTest()
       await  cl.Message(
        content='ok'
       ).send()    
    if message.content=='fillMasker':
       output= fillMaskerTest()
       await  cl.Message(
         content=json.dumps(output)
       ).send()   
    if message.content=='imageClassification':
       output= imageClassificationTest()
       await  cl.Message(
         content=json.dumps(output)
       ).send()   
    if message.content=='imageFeatureExtraction':
       output= imageFeatureExtractionTest()
       await  cl.Message(
         content='ok'
       ).send()          
    if message.content=='imageSegmentation':
       output= imageSegmentationTest()
       await  cl.Message(
         content='ok:'+str(output)
       ).send()  
    if message.content=='imageTextToText':
       output= imageTextToTextTest()
       await  cl.Message(
       content=json.dumps(output)
       ).send()  
    if message.content=='imageToImage':
       output= imageToImageTest()
       await  cl.Message(
       content=json.dumps(output)
       ).send()  
    if message.content=='imageToText':
       output= imageToTextTest()
       await  cl.Message(
       content=json.dumps(output)
       ).send()  
    if message.content=='maskGeneration':
       output= maskGenerationTest()
       await  cl.Message(
       content=json.dumps(output)
       ).send()  
    if message.content=='objectDetection':
       output= objectDetectionTest()
       await  cl.Message(
       content=json.dumps(output)
       ).send()  
    if message.content=='auestionAnswering':
       output= auestionAnsweringTest()
       await  cl.Message(
       content=json.dumps(output)
       ).send() 
    if message.content=='tableQuestionAnswering':
       output= tableQuestionAnsweringTest()
       await  cl.Message(
       content=json.dumps(output)
       ).send() 
    if message.content=='textClassification':
       output= textClassificationTest()
       await  cl.Message(
       content=json.dumps(output)
       ).send() 
    if message.content=='textGeneration':
       output= textGenerationTest()
       await  cl.Message(
       content=json.dumps(output)
       ).send() 
    if message.content=='textToAudio':
       output= textToAudioTest()
       await  cl.Message(
       content=json.dumps(output)
       ).send() 
    if message.content=='visualQuestionAnswering':
       output=visualQuestionAnsweringTest()
       await  cl.Message(
       content=json.dumps(output)
       ).send() 
    if message.content=='zeroShotClassification':
       output=zeroShotClassificationTest()
       await  cl.Message(
       content=json.dumps(output)
       ).send()
    if message.content=='zeroShotObjectDetection':
       output=zeroShotObjectDetectionTest()
       await  cl.Message(
       content=json.dumps(output)
       ).send()

       
    


