File size: 2,199 Bytes
dd913ba
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
from diffusers import AutoencoderKL, FluxPipeline, FluxTransformer2DModel
from huggingface_hub.constants import HF_HUB_CACHE
from transformers import T5EncoderModel
from PIL import Image
from pipelines.models import TextToImageRequest
from torch import Generator
from typing import Type
from torchao.quantization import quantize_, int8_weight_only, fpx_weight_only
import torch
import torch._dynamo
import os

os.environ['PYTORCH_CUDA_ALLOC_CONF']="expandable_segments:True"
os.environ["TOKENIZERS_PARALLELISM"] = "True"
torch._dynamo.config.suppress_errors = True
torch.backends.cuda.matmul.allow_tf32 = True
torch.backends.cudnn.enabled = True

Pipeline = None

def load_pipeline() -> Pipeline:
    ckpt_id = "passfh/flux_enc_vae"
    ckpt_revision = "07c7ccc6fa03bfba9bbd3de12132d68a8acb5bfd" 
    vae = AutoencoderKL.from_pretrained(ckpt_id,revision=ckpt_revision, subfolder="vae", local_files_only=True, torch_dtype=torch.bfloat16,)
    quantize_(vae, int8_weight_only())
    text_encoder_2 = T5EncoderModel.from_pretrained("passfh/tf_flux", revision = "183b9075737fe1584f7465abb2d43d0535f48453", subfolder="text_encoder_2",torch_dtype=torch.bfloat16)
    path = os.path.join(HF_HUB_CACHE, "models--passfh--tf_flux/snapshots/183b9075737fe1584f7465abb2d43d0535f48453/transformer")
    transformer = FluxTransformer2DModel.from_pretrained(path, torch_dtype=torch.bfloat16, use_safetensors=False)
    pipeline = FluxPipeline.from_pretrained(ckpt_id, revision=ckpt_revision, transformer=transformer, text_encoder_2=text_encoder_2, torch_dtype=torch.bfloat16,)
    pipeline.to("cuda")
    pipeline.to(memory_format=torch.channels_last)
    for _ in range(1):
        pipeline(prompt="insensible, timbale, pothery, electrovital, actinogram, taxis, intracerebellar, centrodesmus", width=1024, height=1024, guidance_scale=0.0, num_inference_steps=4, max_sequence_length=256)
    return pipeline

@torch.no_grad()
def infer(request: TextToImageRequest, pipeline: Pipeline, generator: Generator) -> Image:
    return pipeline(request.prompt, generator=generator, guidance_scale=0.0, num_inference_steps=4, max_sequence_length=256, height=request.height, width=request.width, output_type="pil").images[0]