Created
January 24, 2025 10:15
-
-
Save sayakpaul/15cb0636fc829371a8991db56a13c377 to your computer and use it in GitHub Desktop.
Benchmarking Flux across different optimizations.
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
from diffusers import DiffusionPipeline | |
from diffusers import FluxTransformer2DModel, BitsAndBytesConfig | |
from transformers import T5EncoderModel, BitsAndBytesConfig as BnbConfig | |
from offloader import ModelOffloaderV2 | |
import torch.utils.benchmark as benchmark | |
from pathlib import Path | |
import os | |
import sys | |
import torch | |
import json | |
import argparse | |
torch.cuda.memory.empty_cache() | |
torch.cuda.reset_max_memory_allocated() | |
torch.cuda.reset_max_memory_cached() | |
torch.cuda.reset_peak_memory_stats() | |
prompt = "A whimsical and creative image depicting a hybrid creature that is a mix of a waffle and a hippopotamus, basking in a river of melted butter amidst a breakfast-themed landscape. It features the distinctive, bulky body shape of a hippo. However, instead of the usual grey skin, the creature’s body resembles a golden-brown, crispy waffle fresh off the griddle. The skin is textured with the familiar grid pattern of a waffle, each square filled with a glistening sheen of syrup. The environment combines the natural habitat of a hippo with elements of a breakfast table setting, a river of warm, melted butter, with oversized utensils or plates peeking out from the lush, pancake-like foliage in the background, a towering pepper mill standing in for a tree. As the sun rises in this fantastical world, it casts a warm, buttery glow over the scene. The creature, content in its butter river, lets out a yawn. Nearby, a flock of birds take flight" | |
def load_pipeline(args): | |
quantization_config_dit, quantization_config_t5 = None, None | |
if args.bit4_dit: | |
quantization_config_dit = BitsAndBytesConfig( | |
load_in_4bit=True, bnb_4bit_quant_type="nf4", bnb_4bit_compute_dtype=torch.bfloat16 | |
) | |
transformer = FluxTransformer2DModel.from_pretrained( | |
"black-forest-labs/FLUX.1-dev", subfolder="transformer", quantization_config=quantization_config_dit, torch_dtype=torch.bfloat16 | |
) | |
if args.bit4_t5: | |
quantization_config_t5 = BnbConfig( | |
load_in_4bit=True, bnb_4bit_quant_type="nf4", bnb_4bit_compute_dtype=torch.bfloat16 | |
) | |
text_encoder_2 = T5EncoderModel.from_pretrained( | |
"black-forest-labs/FLUX.1-dev", subfolder="text_encoder_2", quantization_config=quantization_config_t5, torch_dtype=torch.bfloat16 | |
) | |
pipeline = DiffusionPipeline.from_pretrained( | |
"black-forest-labs/FLUX.1-dev", | |
transformer=transformer, | |
text_encoder_2=text_encoder_2, | |
torch_dtype=torch.bfloat16, | |
) | |
if args.layerwise_upcasting: | |
pipeline.transformer.enable_layerwise_casting( | |
storage_dtype=torch.float8_e4m3fn, compute_dtype=torch.bfloat16 | |
) | |
if args.direct_cuda: | |
pipeline = pipeline.to("cuda") | |
elif args.cpu_offload: | |
pipeline.enable_model_cpu_offload() | |
elif args.slow_offload: | |
pipeline.enable_sequential_cpu_offload() | |
elif args.advanced_offload: | |
if args.bit4_dit: | |
pipeline.transformer = pipeline.transformer.to("cpu") | |
if args.bit4_t5: | |
pipeline.text_encoder_2 = pipeline.text_encoder_2.to("cpu") | |
ModelOffloaderV2(pipeline.text_encoder_2, record_stream=True).cuda() # T5 | |
ModelOffloaderV2(pipeline.transformer, record_stream=True).cuda() | |
pipeline.text_encoder.cuda() # CLIP | |
pipeline.vae.cuda() | |
return pipeline | |
def benchmark_fn(f, *args, **kwargs): | |
t0 = benchmark.Timer( | |
stmt="f(*args, **kwargs)", | |
globals={"args": args, "kwargs": kwargs, "f": f}, | |
num_threads=torch.get_num_threads(), | |
) | |
return f"{(t0.blocked_autorange().mean):.3f}" | |
def run_inference(pipeline): | |
image = pipeline( | |
prompt=prompt, | |
guidance_scale=3.5, | |
num_inference_steps=28, | |
max_sequence_length=512, | |
generator=torch.manual_seed(0) | |
).images[0] | |
return image | |
def parse_args(): | |
parser = argparse.ArgumentParser() | |
# Use 0 or 1 for booleans | |
parser.add_argument( | |
"--bit4_dit", | |
type=int, | |
choices=[0, 1], | |
default=0, | |
help="Enable 4-bit quantization for DIT model (0 = False, 1 = True)." | |
) | |
parser.add_argument( | |
"--bit4_t5", | |
type=int, | |
choices=[0, 1], | |
default=0, | |
help="Enable 4-bit quantization for T5 model (0 = False, 1 = True)." | |
) | |
parser.add_argument( | |
"--layerwise_upcasting", | |
type=int, | |
choices=[0, 1], | |
default=0, | |
help="Enable layerwise upcasting (0 = False, 1 = True)." | |
) | |
parser.add_argument( | |
"--direct_cuda", | |
type=int, | |
choices=[0, 1], | |
default=1, | |
help="Move the pipeline directly to CUDA (0 = False, 1 = True)." | |
) | |
parser.add_argument( | |
"--cpu_offload", | |
type=int, | |
choices=[0, 1], | |
default=0, | |
help="Enable CPU offload (0 = False, 1 = True)." | |
) | |
parser.add_argument( | |
"--slow_offload", | |
type=int, | |
choices=[0, 1], | |
default=0, | |
help="Enable sequential CPU offload (0 = False, 1 = True)." | |
) | |
parser.add_argument( | |
"--advanced_offload", | |
type=int, | |
choices=[0, 1], | |
default=0, | |
help="Enable advanced offload (0 = False, 1 = True)." | |
) | |
args = parser.parse_args() | |
if args.cpu_offload and args.slow_offload and args.advanced_offload: | |
raise ValueError("All offloading schemes cannot be True.") | |
if args.direct_cuda and (args.cpu_offload or args.slow_offload or args.advanced_offload): | |
if not (args.bit4_dit or args.bit4_t5): | |
raise ValueError("CUDA placement cannot be enabled with offloading.") | |
if not (args.cpu_offload or args.slow_offload or args.advanced_offload) and not args.direct_cuda: | |
raise ValueError("At least one offloading strategy or CUDA placement need to be specified.") | |
return args | |
def create_image_filename(args): | |
filename_parts = [ | |
"flux", # Base identifier or model name | |
f"dit4bit@{args.bit4_dit}", | |
f"t54bit@{args.bit4_t5}", | |
f"layerwise@{args.layerwise_upcasting}", | |
f"directCuda@{args.direct_cuda}", | |
f"cpuOffload@{args.cpu_offload}", | |
f"slowOffload@{args.slow_offload}", | |
f"advOffload@{args.advanced_offload}" | |
] | |
filename = "_".join(filename_parts) + ".png" | |
return filename | |
def main(args): | |
print(f"Running with {args=}") | |
image_filename = create_image_filename(args) | |
if os.path.exists(image_filename): | |
print("Configuration already executed. Exiting.") | |
sys.exit(0) | |
pipeline = load_pipeline(args) | |
image = run_inference(pipeline) | |
time = benchmark_fn(run_inference, pipeline) | |
image.save(image_filename) | |
max_memory = torch.cuda.max_memory_allocated() / (1024**3) | |
json_dict = vars(args) | |
json_dict.update({"memory": round(max_memory, 3), "time": time}) | |
json_path = Path(image_filename).with_suffix(".json") | |
with open(json_path, "w") as f: | |
json.dump(json_dict, f) | |
if __name__ == "__main__": | |
args = parse_args() | |
main(args) |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment
offloader
comes from https://gist.github.com/gau-nernst/9408e13c32d3c6e7025d92cce6cba140