import os
import pickle
from pathlib import Path
from PIL import Image
import transformers
from transformers import CLIPTextModel, CLIPTokenizer
import shutil
import data.util as Util
import random
from diffusers import UNet2DConditionModel
import torch
# random.seed = 0

# for i in reversed(range(5)):
#     print(i)

# image = Image.open("dataset/celeba_test_16_128/sr_16_128/00005.png")
# # text_encoder = CLIPTextModel.from_pretrained("pretrained/text_encoder/").to("cuda")
# image_encoder = transformers.CLIPVisionModel.from_pretrained("openai/clip-vit-base-patch32").to("cuda")
# process = transformers.CLIPImageProcessor.from_pretrained("openai/clip-vit-base-patch32")
# image_encoder.save_pretrained("pretrained/image_encoder")
# process.save_pretrained("pretrained/processor")
# inputs = process(images=image, return_tensors="pt").to("cuda")
# outputs = image_encoder(**inputs)
#
# print(outputs.last_hidden_state.shape)
# # text_encoder = CLIPTextModel.from_pretrained("openai/clip-vit-large-patch14").to("cuda")
# tokenizer = CLIPTokenizer.from_pretrained("pretrained/tokenizer/")
#  # 对输入的text进行tokenize，得到对应的token ids
# prompt = ""
# text_input_ids = tokenizer(
#     prompt,
#     padding="max_length",
#     max_length=tokenizer.model_max_length,
#     truncation=True,
#     return_tensors="pt"
# ).input_ids
#
# # 将token ids送入text model得到77x768的特征
# text_embeddings = text_encoder(text_input_ids.to("cuda"))[0]
# a = torch.clamp(text_embeddings, -1, 1)
# print(text_embeddings.shape)
# print(torch.min(a))



# train_prompt_path = "dataset/test_caption"
# prompt_path = Util.get_paths_from_txts(train_prompt_path)
# with open(prompt_path[0], 'r') as f:
#     lines = f.readlines()
#     lines.append("")
#     print(lines)
#     print(len(lines))
    # number = random.randint(0, 10)
    # print(lines[number])
# print(prompt_path)
# print(len(prompt_path))

prompt_path = "dataset/celeba-caption"
train_prompt_path = "dataset/test_200_caption"
with open("dataset/filenames_test.pickle", "rb") as f:
    filenames = pickle.load(f)
    # print(filenames)
files = []
train_files = []
for i in range(len(filenames)):
    files.append(Path(prompt_path + "/" + str(filenames[i]) + ".txt"))
    train_files.append(Path(train_prompt_path + "/" + str(filenames[i].zfill(5)) + ".txt"))
    shutil.copy(files[i], train_files[i])
    with open(files[i], 'r') as f:
        lines = f.readlines()
    with open(train_files[i], 'w') as f:
        f.writelines(lines[0])

# print(files[5])

# text_encoder = CLIPTextModel.from_pretrained("runwayml/stable-diffusion-v1-5", subfolder="text_encoder").to("cuda")
# # text_encoder = CLIPTextModel.from_pretrained("openai/clip-vit-large-patch14").to("cuda")
# tokenizer = CLIPTokenizer.from_pretrained("runwayml/stable-diffusion-v1-5", subfolder="tokenizer")
# tokenizer = CLIPTokenizer.from_pretrained("openai/clip-vit-large-patch14")
# text_encoder.save_pretrained("pretrained/text_encoder")
# tokenizer.save_pretrained("pretrained/tokenizer")

# # 对输入的text进行tokenize，得到对应的token ids
# prompt = ""
# text_input_ids = tokenizer(
#     prompt,
#     padding="max_length",
#     max_length=tokenizer.model_max_length,
#     truncation=True,
#     return_tensors="pt"
# ).input_ids
#
# # 将token ids送入text model得到77x768的特征
# text_embeddings = text_encoder(text_input_ids.to("cuda"))[0]
# print(text_embeddings.shape)
# print(text_embeddings)

# image_path = "dataset/images"
# with open("dataset/filenames_train.pickle", "rb") as f:
#     filenames = pickle.load(f)
#     print(filenames[5])
# files = []
# for i in range(len(data)):
#     files.append(Path(image_path + "/" + str(data[i]) + ".jpg"))
#
# print(files[5])

# device = "cpu"
#
# model = UNet2DConditionModel(
#         in_channels=6,
#         out_channels=3,
#         layers_per_block=2,
#         block_out_channels=(64, 128, 256, 512),
#         cross_attention_dim=768
#     )
#
# model.to(device)
#
# noise = torch.randn((1, 6, 128, 128)).to(device)
# print(noise.shape)
# embdding = torch.randn((1, 77, 768)).to(device)
# # timesteps = torch.linspace(0, 999, 8).long.to(device)
#
# with torch.no_grad():
#     model_predict = model(sample=noise, timestep=5, encoder_hidden_states=embdding).sample
#     print(model_predict.shape)