import pickle
import random
from io import BytesIO
from transformers import BlipProcessor, BlipForConditionalGeneration
from diffusers import StableDiffusionPipeline, DDIMScheduler
from ip_adapter.ip_adapter_faceid import IPAdapterFaceIDPlus
from xformers.ops import MemoryEfficientAttentionFlashAttentionOp
from PIL import Image
import torch
import numpy as np
import cv2
import telebot
from rembg import remove
from instafilter import Instafilter
from insightface.app import FaceAnalysis
from insightface.utils import face_align
import styles

################################ CONFIG ##################################

BOT_TOKEN = " "

SETTINGS_MENU = {'/style':  {  'select_text':'Cтиль для генерации:',
                               'buttons':  {'fantasy':'Фэнтези',
                                            'standard':'Стандарт',
                                            'photo': 'Фото'},
                               'reply_text':'Выбран стиль: '
                               },
                 '/batch':  {  'select_text':'Количество картинок:',
                               'buttons':  {'1':'Одна',
                                            '5': 'Пять',
                                            '10':'Десять'},
                               'reply_text':'Установлено количество: '
                               }}
USERS_DB_NAME = 'users.db'

model_style_loaded = 'None'
users = {}

device = torch.device("cuda" if torch.cuda.is_available() else "cpu")


#######################  FUNCTIONS  #######################

def add_user(user_id):
    global users
    users[user_id] = {}
    users[user_id]['batch'] = '5'
    users[user_id]['style'] = 'photo'
    users[user_id]['need_to_stop'] = False
    print(f"[INFO] USER ADDED: {user_id}")

def save_users():
    global users
    with open(USERS_DB_NAME, 'wb') as f:
        pickle.dump(users, f)

def load_users():
    global users
    try:
        with open (USERS_DB_NAME, 'rb') as f:
            users = pickle.load(f)
    except IOError:
        print('[ERROR]: NO USERS DATABASE FILE FOUND')

def image_apply_instafilter(image, filter_strength = 0.5):
    base_image = np.array(image)
    filters_list = ['1977','Aden','Amaro','Ashby','Brannan','Brooklyn','Charmes','Clarendon',
                    'Crema','Dogpatch','Earlybird','Gingham','Ginza','Hefe','Helena','Hudson',
                    'Inkwell','Juno','Kelvin','Lark','Lo-Fi','Ludwig',
                    'Mayfair','Melvin','Moon','Nashville','Perpetua','Reyes','Rise','Sierra',
                    'Skyline','Slumber','Stinson','Sutro','Toaster','Valencia','Vesper',
                    'Walden','Willow','X-ProII']
    filter_name =  filters_list[random.randrange(len(filters_list))]
    model = Instafilter(filter_name)
    filtered_image = model(base_image)
    blended_image = multiply_images(filtered_image, base_image, filter_strength)
    return blended_image

def multiply_images(image1, image2, transparency):
    img1 = image1.astype(np.float32)
    img2 = image2.astype(np.float32)
    blended_image = img1 * (1 - transparency) + img2 * transparency
    blended_image = blended_image.astype(np.uint8)
    blended_image = Image.fromarray(blended_image)
    return blended_image

def isolate_on_white(image):
    isolated_image_transparent_bg = remove(image)
    isolated_image = Image.new("RGB", isolated_image_transparent_bg.size, (255, 255, 255))
    isolated_image.paste(isolated_image_transparent_bg, mask = isolated_image_transparent_bg.split()[3])
    return isolated_image

def image_auto_cc(image):
    clip_hist_percent=1
    image = np.array(image)
    gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
    hist = cv2.calcHist([gray],[0],None,[256],[0,256])
    hist_size = len(hist)
    accumulator = []
    hst_point = float(hist.item(0))
    accumulator.append(hst_point)
    for index in range(1, hist_size):
        hst_point = float(hist.item(index))
        accumulator.append(accumulator[index -1] + hst_point)
    maximum = accumulator[-1]
    clip_hist_percent *= (maximum/100.0)
    clip_hist_percent /= 2.0
    minimum_gray = 0
    while accumulator[minimum_gray] < clip_hist_percent:
        minimum_gray += 1
    maximum_gray = hist_size -1
    while accumulator[maximum_gray] >= (maximum - clip_hist_percent):
        maximum_gray -= 1
    alpha = 255 / (maximum_gray - minimum_gray)
    beta = -minimum_gray * alpha
    auto_result = cv2.convertScaleAbs(image, alpha=alpha, beta=beta)
    image = Image.fromarray(auto_result)
    return image

def image_detect_gender(image):
    processor = BlipProcessor.from_pretrained("Salesforce/blip-image-captioning-large")
    model = BlipForConditionalGeneration.from_pretrained(
        "Salesforce/blip-image-captioning-large",
        torch_dtype = torch.float16,
        ).to(device)
    inputs = processor(
        image,
        return_tensors = "pt",
        torch_dtype = torch.float16,
        ).to(device)
    out = model.generate(**inputs)
    image_description = str(processor.decode(out[0], skip_special_tokens = True))
    image_description_word_list = image_description.split(' ')
    genders_list = ['man','woman']
    matching_words = list(set(image_description_word_list) & set(genders_list))
    if len(matching_words) == 1:
        if matching_words[0] == 'woman':
            return 'female'
        return 'male'
    else:
        return 'failed'

def make_face_embeds(image):
    image_array = np.array(image.convert('RGB'))
    app = FaceAnalysis(name="buffalo_l", provider=['CUDAExecutionProvider'])
    app.prepare(ctx_id = 0, det_size = (640, 640))
    faces = app.get(image_array)
    try:
        faceid_embeds = torch.from_numpy(faces[0].normed_embedding).unsqueeze(0)
        return(faceid_embeds, faces[0])
    except Exception:
        error_reply = 'detection failed'
        return error_reply

def reface_preload(style):
    global loaded_style
    base_model_path = styles.STYLES[style]['model_path']
    image_encoder_path = "laion/CLIP-ViT-H-14-laion2B-s32B-b79K"
    ip_adapter = "ip-adapter-faceid-plus_sd15.bin"
    noise_scheduler = DDIMScheduler(
        beta_start = 0.00085,
        beta_end = 0.012,
        beta_schedule = "scaled_linear",
        clip_sample = False,
        set_alpha_to_one = False,
        steps_offset = 1,
    )
    pipeline = StableDiffusionPipeline.from_pretrained(
        base_model_path,
        torch_dtype = torch.float16,
        scheduler = noise_scheduler,
        feature_extractor = None,
        safety_checker = None
    )
    pipeline.enable_xformers_memory_efficient_attention(attention_op=MemoryEfficientAttentionFlashAttentionOp)
    pipeline.vae.enable_xformers_memory_efficient_attention(attention_op=None)
    reface_pipeline = IPAdapterFaceIDPlus(pipeline, image_encoder_path, ip_adapter, device)
    loaded_style = style
    return reface_pipeline

def reface(reface_pipeline, faceid_embeds, face, gender, image, style):
    if gender == 'female':
        random_token = random.choice(styles.STYLES[style]['female']['random_tokens'])
        positive_prompt = f"{styles.STYLES[style]['female']['pre_prompt']}, {random_token}, "\
                          f"{styles.FEMALE_POSITIVE_PROMPT}, {styles.STYLES[style]['positive']}, "\
                          f"{styles.COMMON_POSITIVE_PROMPT}"
        negative_prompt = f"{styles.NUDE_NEGATIVE_PROMPT}, {styles.STYLES[style]['negative']}, {styles.FEMALE_NEGATIVE_PROMPT}"
    else:
        random_token = random.choice(styles.STYLES[style]['male']['random_tokens'])
        positive_prompt = f"{styles.STYLES[style]['male']['pre_prompt']}, {random_token}, "\
                          f"{styles.MALE_POSITIVE_PROMPT}, {styles.STYLES[style]['positive']}, "\
                          f"{styles.COMMON_POSITIVE_PROMPT}"
        negative_prompt = f"{styles.NUDE_NEGATIVE_PROMPT}, {styles.STYLES[style]['negative']}, {styles.MALE_NEGATIVE_PROMPT}"
    negative_prompt = negative_prompt + styles.COMMON_NEGATIVE_PROMPT
    image = np.array(image)
    face_image = face_align.norm_crop(image, landmark = face.kps, image_size = 224)
    guidance_scale = float(styles.STYLES[style]['guidance_scale'])
    images = reface_pipeline.generate(
        prompt = positive_prompt,
        negative_prompt = negative_prompt,
        face_image = face_image,
        faceid_embeds = faceid_embeds,
        s_scale = 1.0,
        shortcut = False,
        num_samples = 4,
        width = 512,
        height = 696,
        guidance_scale = guidance_scale,
        num_inference_steps = 30,
        seed = random.randrange(9999),
        )
    generated_image = images[0]
    return generated_image

def img2txt(save_path):
    processor = BlipProcessor.from_pretrained(
        "Salesforce/blip-image-captioning-large"
        )
    model = BlipForConditionalGeneration.from_pretrained(
        "Salesforce/blip-image-captioning-large",
        torch_dtype = torch.float16,
        ).to(device)
    raw_image = Image.open(save_path).convert('RGB')
    inputs = processor(
        raw_image,
        return_tensors = "pt",
        torch_dtype = torch.float16,
        ).to(device)
    out = model.generate(**inputs)
    image_description = str(processor.decode(out[0], skip_special_tokens=True))
    return image_description

bot = telebot.TeleBot(BOT_TOKEN)
load_users()
not_available = False
reface_pipeline = None
loaded_style = 'standard'

####################  TELEGRAM BOT  #######################

@bot.message_handler(commands = ['start'])
def start_message(message):
    global users
    bot.send_message(message.chat.id,"Привет! Выбери стиль и количество картинок для первой генерации и присылай фото. Поменять настройки можно в меню бота.")
    if not message.chat.id in users:
        add_user(message.chat.id)
        print(users)
        save_users()
    users[message.chat.id]['need_to_stop'] = False
    keyboard_parts = {}
    settings = SETTINGS_MENU.keys()
    for setting in settings:
        keyboard_parts[setting] = telebot.types.InlineKeyboardMarkup()
        for key, value in SETTINGS_MENU[setting]['buttons'].items():
            if key == users[message.chat.id][setting[1:]]:
                keyboard_parts[setting].add(telebot.types.InlineKeyboardButton(text = "» " + value + " «", callback_data = setting + "+" + key))
            else:
                keyboard_parts[setting].add(telebot.types.InlineKeyboardButton(text = value, callback_data = setting + "+" + key))
        bot.send_message(message.chat.id, f"⚙️ {SETTINGS_MENU[setting]['select_text']}", reply_markup=keyboard_parts[setting])

@bot.message_handler(commands = ['settings'])
def all_settings(message):
    global users
    keyboard_parts = {}
    settings = SETTINGS_MENU.keys()
    for setting in settings:
        keyboard_parts[setting] = telebot.types.InlineKeyboardMarkup()
        for key, value in SETTINGS_MENU[setting]['buttons'].items():
            if key == users[message.chat.id][setting[1:]]:
                keyboard_parts[setting].add(telebot.types.InlineKeyboardButton(text = "» " + value + " «", callback_data = setting + "+" + key))
            else:
                keyboard_parts[setting].add(telebot.types.InlineKeyboardButton(text = value, callback_data = setting + "+" + key))
        bot.send_message(message.chat.id, f"⚙️ {SETTINGS_MENU[setting]['select_text']}", reply_markup=keyboard_parts[setting])

@bot.callback_query_handler(func=lambda call:True)
def apply_settings(call):
    global users
    chat_id = call.message.chat.id
    split = call.data.split('+')
    settings_key = split[0][1:]
    settings_value =  split[1]
    users[call.message.chat.id][settings_key] = str(settings_value)
    reply = SETTINGS_MENU[split[0]]['reply_text'] +  SETTINGS_MENU[split[0]]['buttons'][settings_value]
    bot.send_message(chat_id = chat_id, text = reply)


@bot.message_handler(commands = ['stop'])
def command_processor(message):
    global style
    global users
    global need_to_stop
    users[message.chat.id]['need_to_stop'] = True
    bot.send_message(message.chat.id,"⛔ Генерация остановится по завершении создания текущей картинки")
    save_users()

@bot.message_handler(content_types=['photo'])
def photo_processor(message):
    global not_available
    global reface_pipeline
    if not_available:
        bot.send_message(message.chat.id, "⛔ Предыдущая генерация не завершена. Дождитесь завершения или остановите ее")
    else:
        users[message.chat.id]['need_to_stop'] = False
        photo = message.photo[-1]
        file_info = bot.get_file(photo.file_id)
        incoming_image = bot.download_file(file_info.file_path)
        repeats = int(users[message.chat.id]['batch'])
        bot.send_message(message.chat.id, '🤖 Анализирую фото, подождите...')
        image = Image.open(BytesIO(incoming_image))
        gender = image_detect_gender(image)
        if gender == 'failed':
            bot.send_message(message.chat.id, "⛔ Лицо не распознано, пришлите другое фото")
        else:
            isolated_image = isolate_on_white(image)
            color_corrected_image = image_auto_cc(isolated_image)
            face_embeds, face = make_face_embeds(color_corrected_image)
            if face_embeds == 'detection failed':
                bot.send_message(message.chat.id, "⛔ Лицо не распознано, пришлите другое фото")
            else:
                if gender == 'female':
                    reply = '👧 Начинаю генерацию, подождите...'
                elif gender == 'male':
                    reply = '👨 Начинаю генерацию, подождите...'
                bot.send_message(message.chat.id, reply)
                if loaded_style != users[message.chat.id]['style'] or reface_pipeline is None:
                    bot.send_message(message.chat.id, f"💡 Загружаю ИИ-модели для стиля «{SETTINGS_MENU['/style']['buttons'][users[message.chat.id]['style']]}», подождите ...")
                    reface_pipeline = reface_preload(users[message.chat.id]['style'])
                not_available = True
                for i in range(1, repeats + 1):
                    if not users[message.chat.id]['need_to_stop']:
                        print("Image №", i)
                        generated_image = reface(reface_pipeline, face_embeds, face, gender, isolated_image, users[message.chat.id]['style'])
                        final_image = image_apply_instafilter(generated_image, filter_strength = styles.STYLES[users[message.chat.id]['style']]['instafilter_strength'])
                        caption = f"Image {i}/{repeats} © AItar"
                        bot.send_photo(message.chat.id, photo = final_image, caption = caption)
                    else:
                        users[message.chat.id]['need_to_stop'] = False
                        bot.send_message(message.chat.id,"🏁 Генерация остановлена")
                        not_available = False
                        break
                not_available = False
            bot.send_message(message.chat.id,"🏁 Генерация завершена")
bot.infinity_polling()
