import torch
import numpy as np
from PIL import Image
import time
from datasets import load_dataset
import soundfile as sf  
from rembg import remove

from ipex_llm import optimize_model
from ipex_llm.transformers import AutoModel  

from diffusers import AutoPipelineForText2Image
from transformers import AutoTokenizer,SpeechT5Processor, SpeechT5HifiGan, SpeechT5ForTextToSpeech


# 用于文本生成
def gentext(prompt):
    GLM4_PROMPT_FORMAT = "<|user|>\n{prompt}\n<|assistant|>"  

    model_path="ZhipuAI/glm-4-9b-chat"

    model = AutoModel.from_pretrained(model_path,
                                      load_in_4bit=True,
                                      optimize_model=True,
                                      trust_remote_code=True,
                                      use_cache=True,
                                      model_hub='huggingface')
    model = model.half().to("xpu")

    # Load tokenizer
    tokenizer = AutoTokenizer.from_pretrained(model_path,
                                              trust_remote_code=True)
    
    # Generate predicted tokens
    with torch.inference_mode():
        prompt = GLM4_PROMPT_FORMAT.format(prompt=prompt)
        input_ids = tokenizer.encode(prompt, return_tensors="pt").to('xpu')

        # ipex_llm model needs a warmup, then inference time can be accurate
        output = model.generate(input_ids,
                                max_new_tokens=32)

        st = time.time()

        output = model.generate(input_ids,
                                max_new_tokens=32)
        
        torch.xpu.synchronize()
        end = time.time()
        output_str = tokenizer.decode(output[0], skip_special_tokens=True)
        print(f'Inference time: {end-st} s')
        print('-'*20, 'Prompt', '-'*20)
        print(prompt)
        print('-'*20, 'Output', '-'*20)
        print(output_str)
        return output_str  

# 用于图片生成
def genimg(prompt):
    pipeline_text2image = AutoPipelineForText2Image.from_pretrained(
        "stabilityai/stable-diffusion-xl-base-1.0", 
        torch_dtype=torch.float16, 
        use_safetensors=True
    )
    pipeline_text2image = optimize_model(pipeline_text2image, low_bit=None)
    pipeline_text2image.to("xpu")

    with torch.inference_mode():
        # warmup
        image = pipeline_text2image(prompt=prompt,num_inference_steps=20).images[0]

        # start inference
        st = time.time()
        image = pipeline_text2image(prompt=prompt,num_inference_steps=20).images[0]
        end = time.time()
        print(f'Inference time: {end-st} s')
        save_path = f'../results/img/{end}.png'
        image.save(save_path)
        return save_path
         
# 用于语音合成文本
def tts(text):
    
    
    model_path = 'microsoft/speecht5_tts'
    vocoder_path = 'microsoft/speecht5_hifigan'
    dataset_path = 'Matthijs/cmu-arctic-xvectors'

    processor = SpeechT5Processor.from_pretrained(model_path)
    model = SpeechT5ForTextToSpeech.from_pretrained(model_path)
    vocoder = SpeechT5HifiGan.from_pretrained(vocoder_path)

    # With only one line to enable IPEX-LLM optimization on model
    # Skip optimizing these two modules to get higher audio quality
    # When running LLMs on Intel iGPUs for Windows users, we recommend setting `cpu_embedding=True` in the optimize_model function.
    # This will allow the memory-intensive embedding layer to utilize the CPU instead of iGPU.
    model = optimize_model(model, modules_to_not_convert=["speech_decoder_postnet.feat_out",
                                                          "speech_decoder_postnet.prob_out"]) 
    model = model.to('xpu')
    vocoder = vocoder.to('xpu')

    inputs = processor(text=text, return_tensors="pt").to('xpu')
    
    # load xvector containing speaker's voice characteristics from a dataset
    embeddings_dataset = load_dataset(dataset_path, split="validation")
    speaker_embeddings = torch.tensor(embeddings_dataset[7306]["xvector"]).unsqueeze(0).to('xpu')
    
    with torch.inference_mode():
        # ipex_llm model needs a warmup, then inference time can be accurate
        speech = model.generate_speech(inputs["input_ids"], speaker_embeddings, vocoder=vocoder)

        st = time.time()
        speech = model.generate_speech(inputs["input_ids"], speaker_embeddings, vocoder=vocoder)
        torch.xpu.synchronize()
        end = time.time()
        print(f"Inference time: {end-st} s")
    
    save_path = f"../results/audio/{end}.wav"
    sf.write(save_path, speech.to('cpu').numpy(), samplerate=16000)
    return save_path


# 用于智能抠图
def removebg(image):
    input = Image.open(image)
    output = remove(input)
    end=time.time()
    output_path = f"../results/audio/{end}.wav"
    output.save(output_path)
    return output_path


