import os
import io
import base64
import asyncio
import logging
import torch
import json

from typing import Optional, Union
from wechaty_puppet import FileBox
from wechaty import Wechaty, Contact, MessageType
from wechaty.user import Message, Room

from diffusers import StableDiffusionPipeline, DPMSolverMultistepScheduler
from transformers import AutoTokenizer, AutoModelForSeq2SeqLM

logging.basicConfig(
    level=logging.INFO,
    format='%(asctime)s %(levelname)s %(filename)s <%(funcName)s> %(message)s',
    datefmt='%Y-%m-%d %H:%M:%S',
)

log = logging.getLogger(__name__)

# set model path
with open('/bot/main/model.json') as f:
    model_dict = json.load(f)

diff_model_path = model_dict["diffusion"]
tran_model_path = model_dict["translate"]

# diffusion model
pipe = StableDiffusionPipeline.from_pretrained(
    diff_model_path, torch_dtype=torch.float16)
pipe.scheduler = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config)
pipe = pipe.to("cuda")
pipe.enable_attention_slicing()

# translate model
trans_tokenizer = AutoTokenizer.from_pretrained(tran_model_path)
trans_model = AutoModelForSeq2SeqLM.from_pretrained(tran_model_path)
trans_tokenizer.tgt_lang = "en"


async def message(msg: Message) -> None:
    """back on message"""
    # from_contact = msg.talker()
    # if msg.type() == MessageType.MESSAGE_TYPE_IMAGE:
    #     rec_img = msg.to_image()
    #     rec_img = await rec_img.artwork()
    #     await msg.say(rec_img)
    text = await msg.mention_text()
    print(text)
    room = msg.room()
    mentionSelf = await msg.mention_self()
    # room = msg.room()
    if room and mentionSelf:
        # conversation: Union[
        #     Room, Contact] = from_contact if room is None else room
        # await conversation.ready()
        # await conversation.say('dong')

        encoded_zh = trans_tokenizer(text, return_tensors="pt")
        generated_tokens = trans_model.generate(**encoded_zh)
        prompt = trans_tokenizer.batch_decode(
            generated_tokens, skip_special_tokens=True)[0]
        print(prompt)

        image = pipe(prompt).images[0]

        image.save("try.png")
        file_box = FileBox.from_file(
            './try.png',
            name=None)
        await msg.say(file_box)
        torch.cuda.empty_cache()

bot: Optional[Wechaty] = None


async def main() -> None:
    """doc"""
    # pylint: disable=W0603
    global bot
    bot = Wechaty().on('message', message)
    await bot.start()


asyncio.run(main())
