Oysiyl commited on
Commit
0d712f8
·
1 Parent(s): 00847d8

torch.compile, bfloat16

Browse files
Files changed (1) hide show
  1. app.py +167 -48
app.py CHANGED
@@ -1,6 +1,8 @@
 
1
  import os
2
  import random
3
  import sys
 
4
  from typing import Any, Mapping, Sequence, Union
5
 
6
  import gradio as gr
@@ -10,9 +12,14 @@ import torch
10
  from huggingface_hub import hf_hub_download
11
  from PIL import Image
12
 
13
- # Import FreeU for quality improvements
 
 
14
  from comfy_extras.nodes_freelunch import FreeU_V2
15
 
 
 
 
16
  hf_hub_download(
17
  repo_id="stable-diffusion-v1-5/stable-diffusion-v1-5",
18
  filename="v1-5-pruned-emaonly.ckpt",
@@ -163,7 +170,7 @@ def import_custom_nodes() -> None:
163
  init_extra_nodes()
164
 
165
 
166
- from nodes import NODE_CLASS_MAPPINGS
167
 
168
  # Initialize common nodes
169
  checkpointloadersimple = NODE_CLASS_MAPPINGS["CheckpointLoaderSimple"]()
@@ -196,10 +203,6 @@ latentupscaleby = NODE_CLASS_MAPPINGS["LatentUpscaleBy"]()
196
  # Additional issue: MPS tensor operations can produce NaN/inf values (PyTorch bug #84364)
197
  # Solution: Monkey-patch dtype functions to force fp32, enable MPS fallback
198
  # References: https://civitai.com/articles/11106, https://github.com/pytorch/pytorch/issues/84364
199
- import os
200
-
201
- from comfy import model_management
202
- from comfy.cli_args import args
203
 
204
  # Lazy upscale model loading - only load when needed
205
  # This is safe for ZeroGPU since upscaling happens inside @spaces.GPU function
@@ -244,7 +247,30 @@ def calculate_vae_tile_size(image_size):
244
  return 1024, 128
245
 
246
 
247
- if torch.backends.mps.is_available():
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
248
  print(f"MPS device detected (PyTorch {torch.__version__})")
249
  os.environ["PYTORCH_ENABLE_MPS_FALLBACK"] = (
250
  "1" # Enable MPS fallback for unsupported ops
@@ -317,6 +343,52 @@ valid_models = [
317
  model_management.load_models_gpu(valid_models)
318
 
319
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
320
  @spaces.GPU(duration=30)
321
  def generate_qr_code_unified(
322
  prompt: str,
@@ -342,6 +414,7 @@ def generate_qr_code_unified(
342
  controlnet_strength_final: float = 0.7,
343
  controlnet_strength_standard_first: float = 0.45,
344
  controlnet_strength_standard_final: float = 1.0,
 
345
  ):
346
  # Only manipulate the text if it's a URL input type
347
  qr_text = text_input
@@ -369,6 +442,7 @@ def generate_qr_code_unified(
369
  enable_upscale,
370
  controlnet_strength_standard_first,
371
  controlnet_strength_standard_final,
 
372
  )
373
  else: # artistic
374
  yield from _pipeline_artistic(
@@ -391,6 +465,7 @@ def generate_qr_code_unified(
391
  sag_blur_sigma,
392
  controlnet_strength_first,
393
  controlnet_strength_final,
 
394
  )
395
 
396
 
@@ -409,6 +484,7 @@ def generate_standard_qr(
409
  enable_freeu: bool = False,
410
  controlnet_strength_standard_first: float = 0.45,
411
  controlnet_strength_standard_final: float = 1.0,
 
412
  ):
413
  """Wrapper function for standard QR generation"""
414
  # Get actual seed used (custom or random)
@@ -450,6 +526,7 @@ def generate_standard_qr(
450
  enable_upscale=enable_upscale,
451
  controlnet_strength_standard_first=controlnet_strength_standard_first,
452
  controlnet_strength_standard_final=controlnet_strength_standard_final,
 
453
  )
454
 
455
  final_image = None
@@ -493,6 +570,7 @@ def generate_artistic_qr(
493
  sag_blur_sigma: float = 0.5,
494
  controlnet_strength_first: float = 0.45,
495
  controlnet_strength_final: float = 0.70,
 
496
  ):
497
  """Wrapper function for artistic QR generation with FreeU and SAG parameters"""
498
  # Get actual seed used (custom or random)
@@ -548,6 +626,7 @@ def generate_artistic_qr(
548
  sag_blur_sigma=sag_blur_sigma,
549
  controlnet_strength_first=controlnet_strength_first,
550
  controlnet_strength_final=controlnet_strength_final,
 
551
  )
552
 
553
  final_image = None
@@ -570,7 +649,6 @@ def generate_artistic_qr(
570
 
571
 
572
  # Helper functions for shareable settings JSON
573
- import json
574
 
575
 
576
  def generate_settings_json(params_dict: dict) -> str:
@@ -950,6 +1028,7 @@ def _pipeline_standard(
950
  enable_upscale: bool = False,
951
  controlnet_strength_first: float = 0.45,
952
  controlnet_strength_final: float = 1.0,
 
953
  ):
954
  emptylatentimage_5 = emptylatentimage.generate(
955
  width=image_size, height=image_size, batch_size=1
@@ -976,6 +1055,14 @@ def _pipeline_standard(
976
  # Set protocol based on input type: None for plain text, Https for URLs
977
  qr_protocol = "None" if input_type == "Plain Text" else "Https"
978
 
 
 
 
 
 
 
 
 
979
  try:
980
  comfy_qr_by_module_size_15 = comfy_qr_by_module_size.generate_qr(
981
  protocol=qr_protocol,
@@ -1001,7 +1088,9 @@ def _pipeline_standard(
1001
  base_qr_np = (base_qr_tensor.cpu().numpy() * 255).astype(np.uint8)
1002
  base_qr_np = base_qr_np[0]
1003
  base_qr_pil = Image.fromarray(base_qr_np)
1004
- yield base_qr_pil, "Generated base QR pattern… enhancing with AI (step 1/3)"
 
 
1005
 
1006
  emptylatentimage_17 = emptylatentimage.generate(
1007
  width=image_size * 2, height=image_size * 2, batch_size=1
@@ -1011,6 +1100,9 @@ def _pipeline_standard(
1011
  control_net_name="control_v11f1e_sd15_tile_fp16.safetensors"
1012
  )
1013
 
 
 
 
1014
  for q in range(1):
1015
  controlnetapplyadvanced_11 = controlnetapplyadvanced.apply_controlnet(
1016
  strength=controlnet_strength_first,
@@ -1053,6 +1145,11 @@ def _pipeline_standard(
1053
  latent_image=get_value_at_index(emptylatentimage_5, 0),
1054
  )
1055
 
 
 
 
 
 
1056
  # Calculate optimal tile size for this image
1057
  tile_size, overlap = calculate_vae_tile_size(image_size)
1058
 
@@ -1076,11 +1173,16 @@ def _pipeline_standard(
1076
  mid_np = (mid_tensor.cpu().numpy() * 255).astype(np.uint8)
1077
  mid_np = mid_np[0]
1078
  mid_pil = Image.fromarray(mid_np)
1079
- yield mid_pil, "First enhancement pass complete (step 2/3)… refining details"
 
 
1080
 
1081
  # Clear cache before second pass to free memory
1082
  model_management.soft_empty_cache()
1083
 
 
 
 
1084
  controlnetapplyadvanced_20 = controlnetapplyadvanced.apply_controlnet(
1085
  strength=controlnet_strength_final,
1086
  start_percent=0,
@@ -1105,6 +1207,11 @@ def _pipeline_standard(
1105
  latent_image=get_value_at_index(emptylatentimage_17, 0),
1106
  )
1107
 
 
 
 
 
 
1108
  # Second pass is always 2x original, calculate based on doubled size
1109
  tile_size_2x, overlap_2x = calculate_vae_tile_size(image_size * 2)
1110
 
@@ -1128,7 +1235,9 @@ def _pipeline_standard(
1128
  pre_upscale_np = (pre_upscale_tensor.cpu().numpy() * 255).astype(np.uint8)
1129
  pre_upscale_np = pre_upscale_np[0]
1130
  pre_upscale_pil = Image.fromarray(pre_upscale_np)
1131
- yield pre_upscale_pil, "Enhancement complete (step 3/4)... upscaling image"
 
 
1132
 
1133
  # Upscale the final image (load model on-demand)
1134
  upscale_model = get_upscale_model()
@@ -1141,17 +1250,18 @@ def _pipeline_standard(
1141
  image_np = (image_tensor.cpu().numpy() * 255).astype(np.uint8)
1142
  image_np = image_np[0]
1143
  pil_image = Image.fromarray(image_np)
1144
- yield (
1145
- pil_image,
1146
- "No errors, all good! Final QR art generated and upscaled. (step 4/4)",
1147
- )
1148
  else:
1149
  # No upscaling
1150
  image_tensor = get_value_at_index(vaedecode_21, 0)
1151
  image_np = (image_tensor.cpu().numpy() * 255).astype(np.uint8)
1152
  image_np = image_np[0]
1153
  pil_image = Image.fromarray(image_np)
1154
- yield pil_image, "No errors, all good! Final QR art generated."
 
 
1155
 
1156
 
1157
  def _pipeline_artistic(
@@ -1174,6 +1284,7 @@ def _pipeline_artistic(
1174
  sag_blur_sigma: float = 0.5,
1175
  controlnet_strength_first: float = 0.45,
1176
  controlnet_strength_final: float = 0.7,
 
1177
  ):
1178
  # Generate QR code
1179
  qr_protocol = "None" if input_type == "Plain Text" else "Https"
@@ -1215,10 +1326,9 @@ def _pipeline_artistic(
1215
 
1216
  # Only add noise if there's a border (border_size > 0)
1217
  if border_size > 0:
1218
- yield (
1219
- base_qr_pil,
1220
- f"Generated base QR pattern... adding QR-like cubics to border (step {current_step}/{total_steps})",
1221
- )
1222
  current_step += 1
1223
 
1224
  # Add QR-like cubic patterns ONLY to border region (extends QR structure into border)
@@ -1235,18 +1345,16 @@ def _pipeline_artistic(
1235
  noisy_qr_np = (qr_with_border_noise.cpu().numpy() * 255).astype(np.uint8)
1236
  noisy_qr_np = noisy_qr_np[0]
1237
  noisy_qr_pil = Image.fromarray(noisy_qr_np)
1238
- yield (
1239
- noisy_qr_pil,
1240
- f"Added QR-like cubics to border... enhancing with AI (step {current_step}/{total_steps})",
1241
- )
1242
  current_step += 1
1243
  else:
1244
  # No border, skip noise
1245
  qr_with_border_noise = get_value_at_index(comfy_qr, 0)
1246
- yield (
1247
- base_qr_pil,
1248
- f"Generated base QR pattern (no border)... enhancing with AI (step {current_step}/{total_steps})",
1249
- )
1250
  current_step += 1
1251
 
1252
  # Generate latent image
@@ -1329,6 +1437,8 @@ def _pipeline_artistic(
1329
  enhanced_model = freeu_model
1330
 
1331
  # First sampling pass
 
 
1332
  samples = ksampler.sample(
1333
  seed=seed,
1334
  steps=30,
@@ -1342,6 +1452,11 @@ def _pipeline_artistic(
1342
  latent_image=get_value_at_index(latent_image, 0),
1343
  )
1344
 
 
 
 
 
 
1345
  # First decode with dynamic tiling
1346
  tile_size, overlap = calculate_vae_tile_size(image_size)
1347
 
@@ -1363,10 +1478,9 @@ def _pipeline_artistic(
1363
  first_pass_np = (first_pass_tensor.cpu().numpy() * 255).astype(np.uint8)
1364
  first_pass_np = first_pass_np[0]
1365
  first_pass_pil = Image.fromarray(first_pass_np)
1366
- yield (
1367
- first_pass_pil,
1368
- f"First enhancement pass complete (step {current_step}/{total_steps})... final refinement pass",
1369
- )
1370
  current_step += 1
1371
 
1372
  # Clear cache before second pass to free memory
@@ -1392,6 +1506,8 @@ def _pipeline_artistic(
1392
  )
1393
 
1394
  # Final sampling pass
 
 
1395
  final_samples = ksampler.sample(
1396
  seed=seed + 1,
1397
  steps=30,
@@ -1405,6 +1521,11 @@ def _pipeline_artistic(
1405
  latent_image=get_value_at_index(upscaled_latent, 0),
1406
  )
1407
 
 
 
 
 
 
1408
  # Final decode with dynamic tiling
1409
  tile_size, overlap = calculate_vae_tile_size(image_size)
1410
 
@@ -1428,10 +1549,9 @@ def _pipeline_artistic(
1428
  pre_upscale_np = (pre_upscale_tensor.cpu().numpy() * 255).astype(np.uint8)
1429
  pre_upscale_np = pre_upscale_np[0]
1430
  pre_upscale_pil = Image.fromarray(pre_upscale_np)
1431
- yield (
1432
- pre_upscale_pil,
1433
- f"Final refinement complete (step {current_step}/{total_steps})... upscaling image",
1434
- )
1435
  current_step += 1
1436
 
1437
  # Upscale image with model (load model on-demand)
@@ -1446,24 +1566,23 @@ def _pipeline_artistic(
1446
  image_np = (image_tensor.cpu().numpy() * 255).astype(np.uint8)
1447
  image_np = image_np[0]
1448
  final_image = Image.fromarray(image_np)
1449
- yield (
1450
- final_image,
1451
- f"No errors, all good! Final artistic QR code generated and upscaled. (step {current_step}/{total_steps})",
1452
- )
1453
  else:
1454
  # No upscaling
1455
  image_tensor = get_value_at_index(final_decoded, 0)
1456
  image_np = (image_tensor.cpu().numpy() * 255).astype(np.uint8)
1457
  image_np = image_np[0]
1458
  final_image = Image.fromarray(image_np)
1459
- yield (
1460
- final_image,
1461
- f"No errors, all good! Final artistic QR code generated. (step {current_step}/{total_steps})",
1462
- )
1463
 
1464
 
1465
  if __name__ == "__main__" and not os.environ.get("QR_TESTING_MODE"):
1466
- # Start your Gradio app
 
1467
  with gr.Blocks(delete_cache=(3600, 3600)) as app:
1468
  # Add a title and description
1469
  gr.Markdown("# QR Code Art Generator")
@@ -1471,8 +1590,8 @@ if __name__ == "__main__" and not os.environ.get("QR_TESTING_MODE"):
1471
  This is an AI-powered QR code generator that creates artistic QR codes using Stable Diffusion 1.5 and ControlNet models.
1472
  The application uses a custom ComfyUI workflow to generate QR codes.
1473
 
1474
- **Privacy Notice:** Generated images are temporarily cached during your session.
1475
- Files are cleared when the server restarts. Download your QR codes immediately after generation.
1476
 
1477
  ### Tips:
1478
  - Use detailed prompts for better results
@@ -2707,7 +2826,7 @@ if __name__ == "__main__" and not os.environ.get("QR_TESTING_MODE"):
2707
  )
2708
 
2709
  # ARTISTIC QR TAB
2710
- app.queue()
2711
  app.launch(share=False, mcp_server=True)
2712
  # Note: Automatic file cleanup via delete_cache not available in Gradio 5.49.1
2713
  # Files will be cleaned up when the server is restarted
 
1
+ import json
2
  import os
3
  import random
4
  import sys
5
+ import warnings
6
  from typing import Any, Mapping, Sequence, Union
7
 
8
  import gradio as gr
 
12
  from huggingface_hub import hf_hub_download
13
  from PIL import Image
14
 
15
+ # ComfyUI imports (after HF hub downloads)
16
+ from comfy import model_management
17
+ from comfy.cli_args import args
18
  from comfy_extras.nodes_freelunch import FreeU_V2
19
 
20
+ # Suppress torchsde floating-point precision warnings (cosmetic only, no functional impact)
21
+ warnings.filterwarnings("ignore", message="Should have tb<=t1 but got")
22
+
23
  hf_hub_download(
24
  repo_id="stable-diffusion-v1-5/stable-diffusion-v1-5",
25
  filename="v1-5-pruned-emaonly.ckpt",
 
170
  init_extra_nodes()
171
 
172
 
173
+ from nodes import NODE_CLASS_MAPPINGS # noqa: E402
174
 
175
  # Initialize common nodes
176
  checkpointloadersimple = NODE_CLASS_MAPPINGS["CheckpointLoaderSimple"]()
 
203
  # Additional issue: MPS tensor operations can produce NaN/inf values (PyTorch bug #84364)
204
  # Solution: Monkey-patch dtype functions to force fp32, enable MPS fallback
205
  # References: https://civitai.com/articles/11106, https://github.com/pytorch/pytorch/issues/84364
 
 
 
 
206
 
207
  # Lazy upscale model loading - only load when needed
208
  # This is safe for ZeroGPU since upscaling happens inside @spaces.GPU function
 
247
  return 1024, 128
248
 
249
 
250
+ def log_progress(message, gr_progress=None, progress_value=None):
251
+ """Helper to log progress to both console and Gradio (simple stage-based updates)"""
252
+ print(f"{message}", flush=True)
253
+ if gr_progress and progress_value is not None:
254
+ gr_progress(progress_value, desc=message)
255
+
256
+
257
+ # Device-specific optimizations
258
+ if torch.cuda.is_available() and not torch.backends.mps.is_available():
259
+ # CUDA device - check bfloat16 support
260
+ print(f"CUDA device detected (PyTorch {torch.__version__})")
261
+
262
+ # Check if bfloat16 is supported (requires compute capability >= 8.0, e.g., A100, H100)
263
+ if torch.cuda.is_bf16_supported():
264
+ print(" ✓ Using bfloat16 precision for optimal performance")
265
+ print(" ✓ Memory optimizations enabled")
266
+ # Note: bfloat16 is handled automatically by model_management on CUDA
267
+ # No dtype forcing needed - ComfyUI uses optimal dtypes by default
268
+ else:
269
+ print(" ⚠️ bfloat16 not supported on this GPU, using default precision")
270
+ print(" ℹ️ For best performance, use GPU with compute capability >= 8.0")
271
+
272
+ elif torch.backends.mps.is_available():
273
+ # MPS device (Apple Silicon) - force fp32 to avoid black image bug
274
  print(f"MPS device detected (PyTorch {torch.__version__})")
275
  os.environ["PYTORCH_ENABLE_MPS_FALLBACK"] = (
276
  "1" # Enable MPS fallback for unsupported ops
 
343
  model_management.load_models_gpu(valid_models)
344
 
345
 
346
+ # Apply torch.compile to diffusion models for 1.5-1.7× speedup
347
+ # Compilation happens once at startup (30-60s), then cached for fast inference
348
+ def _apply_torch_compile_optimizations():
349
+ """Apply torch.compile to both pipeline models using ComfyUI's infrastructure"""
350
+ try:
351
+ from comfy_api.torch_helpers.torch_compile import set_torch_compile_wrapper
352
+
353
+ print("\n🔧 Applying torch.compile optimizations...")
354
+
355
+ # Compile standard pipeline model (DreamShaper 3.32)
356
+ standard_model = get_value_at_index(checkpointloadersimple_4, 0)
357
+ set_torch_compile_wrapper(
358
+ model=standard_model,
359
+ backend="inductor",
360
+ mode="reduce-overhead", # Best for iterative sampling
361
+ fullgraph=False, # ControlNet prevents full graph
362
+ dynamic=False, # Fixed image sizes per pipeline
363
+ keys=["diffusion_model"], # Compile UNet only
364
+ )
365
+ print(" ✓ Compiled standard pipeline diffusion model")
366
+
367
+ # Compile artistic pipeline model (DreamShaper 6.31)
368
+ artistic_model = get_value_at_index(checkpointloadersimple_artistic, 0)
369
+ set_torch_compile_wrapper(
370
+ model=artistic_model,
371
+ backend="inductor",
372
+ mode="reduce-overhead",
373
+ fullgraph=False,
374
+ dynamic=False,
375
+ keys=["diffusion_model"],
376
+ )
377
+ print(" ✓ Compiled artistic pipeline diffusion model")
378
+ print("✅ torch.compile optimizations applied successfully!\n")
379
+
380
+ except Exception as e:
381
+ print(f"⚠️ torch.compile optimization failed: {e}")
382
+ print(" Continuing without compilation (slower but functional)\n")
383
+
384
+
385
+ # Only apply torch.compile on CUDA (not on MPS for local testing)
386
+ if torch.cuda.is_available():
387
+ _apply_torch_compile_optimizations()
388
+ else:
389
+ print("ℹ️ Skipping torch.compile (not on CUDA)")
390
+
391
+
392
  @spaces.GPU(duration=30)
393
  def generate_qr_code_unified(
394
  prompt: str,
 
414
  controlnet_strength_final: float = 0.7,
415
  controlnet_strength_standard_first: float = 0.45,
416
  controlnet_strength_standard_final: float = 1.0,
417
+ progress=gr.Progress(),
418
  ):
419
  # Only manipulate the text if it's a URL input type
420
  qr_text = text_input
 
442
  enable_upscale,
443
  controlnet_strength_standard_first,
444
  controlnet_strength_standard_final,
445
+ progress,
446
  )
447
  else: # artistic
448
  yield from _pipeline_artistic(
 
465
  sag_blur_sigma,
466
  controlnet_strength_first,
467
  controlnet_strength_final,
468
+ progress,
469
  )
470
 
471
 
 
484
  enable_freeu: bool = False,
485
  controlnet_strength_standard_first: float = 0.45,
486
  controlnet_strength_standard_final: float = 1.0,
487
+ progress=gr.Progress(),
488
  ):
489
  """Wrapper function for standard QR generation"""
490
  # Get actual seed used (custom or random)
 
526
  enable_upscale=enable_upscale,
527
  controlnet_strength_standard_first=controlnet_strength_standard_first,
528
  controlnet_strength_standard_final=controlnet_strength_standard_final,
529
+ progress=progress,
530
  )
531
 
532
  final_image = None
 
570
  sag_blur_sigma: float = 0.5,
571
  controlnet_strength_first: float = 0.45,
572
  controlnet_strength_final: float = 0.70,
573
+ progress=gr.Progress(),
574
  ):
575
  """Wrapper function for artistic QR generation with FreeU and SAG parameters"""
576
  # Get actual seed used (custom or random)
 
626
  sag_blur_sigma=sag_blur_sigma,
627
  controlnet_strength_first=controlnet_strength_first,
628
  controlnet_strength_final=controlnet_strength_final,
629
+ progress=progress,
630
  )
631
 
632
  final_image = None
 
649
 
650
 
651
  # Helper functions for shareable settings JSON
 
652
 
653
 
654
  def generate_settings_json(params_dict: dict) -> str:
 
1028
  enable_upscale: bool = False,
1029
  controlnet_strength_first: float = 0.45,
1030
  controlnet_strength_final: float = 1.0,
1031
+ gr_progress=None,
1032
  ):
1033
  emptylatentimage_5 = emptylatentimage.generate(
1034
  width=image_size, height=image_size, batch_size=1
 
1055
  # Set protocol based on input type: None for plain text, Https for URLs
1056
  qr_protocol = "None" if input_type == "Plain Text" else "Https"
1057
 
1058
+ # Test progress bar at the very beginning
1059
+ print(f"DEBUG: gr_progress type: {type(gr_progress)}")
1060
+ print(f"DEBUG: gr_progress value: {gr_progress}")
1061
+ if gr_progress:
1062
+ print("DEBUG: Calling gr_progress(0.0)")
1063
+ gr_progress(0.0, desc="Starting QR generation...")
1064
+ print("DEBUG: Called gr_progress(0.0) successfully")
1065
+
1066
  try:
1067
  comfy_qr_by_module_size_15 = comfy_qr_by_module_size.generate_qr(
1068
  protocol=qr_protocol,
 
1088
  base_qr_np = (base_qr_tensor.cpu().numpy() * 255).astype(np.uint8)
1089
  base_qr_np = base_qr_np[0]
1090
  base_qr_pil = Image.fromarray(base_qr_np)
1091
+ msg = "Generated base QR pattern… enhancing with AI (step 1/3)"
1092
+ log_progress(msg, gr_progress, 0.05)
1093
+ yield base_qr_pil, msg
1094
 
1095
  emptylatentimage_17 = emptylatentimage.generate(
1096
  width=image_size * 2, height=image_size * 2, batch_size=1
 
1100
  control_net_name="control_v11f1e_sd15_tile_fp16.safetensors"
1101
  )
1102
 
1103
+ # Simple stage update for first pass
1104
+ log_progress("First pass - preparing controlnets...", gr_progress, 0.1)
1105
+
1106
  for q in range(1):
1107
  controlnetapplyadvanced_11 = controlnetapplyadvanced.apply_controlnet(
1108
  strength=controlnet_strength_first,
 
1145
  latent_image=get_value_at_index(emptylatentimage_5, 0),
1146
  )
1147
 
1148
+ # Yield progress update after first sampling completes
1149
+ msg = "First pass sampling complete... decoding image"
1150
+ log_progress(msg, gr_progress, 0.4)
1151
+ yield base_qr_pil, msg # Yield with same image as before
1152
+
1153
  # Calculate optimal tile size for this image
1154
  tile_size, overlap = calculate_vae_tile_size(image_size)
1155
 
 
1173
  mid_np = (mid_tensor.cpu().numpy() * 255).astype(np.uint8)
1174
  mid_np = mid_np[0]
1175
  mid_pil = Image.fromarray(mid_np)
1176
+ msg = "First enhancement pass complete (step 2/3)… refining details"
1177
+ log_progress(msg, gr_progress, 0.5)
1178
+ yield mid_pil, msg
1179
 
1180
  # Clear cache before second pass to free memory
1181
  model_management.soft_empty_cache()
1182
 
1183
+ # Simple stage update for second pass
1184
+ log_progress("Second pass (refinement)...", gr_progress, 0.5)
1185
+
1186
  controlnetapplyadvanced_20 = controlnetapplyadvanced.apply_controlnet(
1187
  strength=controlnet_strength_final,
1188
  start_percent=0,
 
1207
  latent_image=get_value_at_index(emptylatentimage_17, 0),
1208
  )
1209
 
1210
+ # Yield progress update after second sampling completes
1211
+ msg = "Second pass sampling complete... decoding final image"
1212
+ log_progress(msg, gr_progress, 0.8)
1213
+ yield mid_pil, msg # Yield with previous image
1214
+
1215
  # Second pass is always 2x original, calculate based on doubled size
1216
  tile_size_2x, overlap_2x = calculate_vae_tile_size(image_size * 2)
1217
 
 
1235
  pre_upscale_np = (pre_upscale_tensor.cpu().numpy() * 255).astype(np.uint8)
1236
  pre_upscale_np = pre_upscale_np[0]
1237
  pre_upscale_pil = Image.fromarray(pre_upscale_np)
1238
+ msg = "Enhancement complete (step 3/4)... upscaling image"
1239
+ log_progress(msg, gr_progress, 0.9)
1240
+ yield pre_upscale_pil, msg
1241
 
1242
  # Upscale the final image (load model on-demand)
1243
  upscale_model = get_upscale_model()
 
1250
  image_np = (image_tensor.cpu().numpy() * 255).astype(np.uint8)
1251
  image_np = image_np[0]
1252
  pil_image = Image.fromarray(image_np)
1253
+ msg = "No errors, all good! Final QR art generated and upscaled. (step 4/4)"
1254
+ log_progress(msg, gr_progress, 1.0)
1255
+ yield (pil_image, msg)
 
1256
  else:
1257
  # No upscaling
1258
  image_tensor = get_value_at_index(vaedecode_21, 0)
1259
  image_np = (image_tensor.cpu().numpy() * 255).astype(np.uint8)
1260
  image_np = image_np[0]
1261
  pil_image = Image.fromarray(image_np)
1262
+ msg = "No errors, all good! Final QR art generated."
1263
+ log_progress(msg, gr_progress, 1.0)
1264
+ yield pil_image, msg
1265
 
1266
 
1267
  def _pipeline_artistic(
 
1284
  sag_blur_sigma: float = 0.5,
1285
  controlnet_strength_first: float = 0.45,
1286
  controlnet_strength_final: float = 0.7,
1287
+ gr_progress=None,
1288
  ):
1289
  # Generate QR code
1290
  qr_protocol = "None" if input_type == "Plain Text" else "Https"
 
1326
 
1327
  # Only add noise if there's a border (border_size > 0)
1328
  if border_size > 0:
1329
+ msg = f"Generated base QR pattern... adding QR-like cubics to border (step {current_step}/{total_steps})"
1330
+ log_progress(msg, gr_progress, 0.05)
1331
+ yield (base_qr_pil, msg)
 
1332
  current_step += 1
1333
 
1334
  # Add QR-like cubic patterns ONLY to border region (extends QR structure into border)
 
1345
  noisy_qr_np = (qr_with_border_noise.cpu().numpy() * 255).astype(np.uint8)
1346
  noisy_qr_np = noisy_qr_np[0]
1347
  noisy_qr_pil = Image.fromarray(noisy_qr_np)
1348
+ msg = f"Added QR-like cubics to border... enhancing with AI (step {current_step}/{total_steps})"
1349
+ log_progress(msg, gr_progress, 0.1)
1350
+ yield (noisy_qr_pil, msg)
 
1351
  current_step += 1
1352
  else:
1353
  # No border, skip noise
1354
  qr_with_border_noise = get_value_at_index(comfy_qr, 0)
1355
+ msg = f"Generated base QR pattern (no border)... enhancing with AI (step {current_step}/{total_steps})"
1356
+ log_progress(msg, gr_progress, 0.1)
1357
+ yield (base_qr_pil, msg)
 
1358
  current_step += 1
1359
 
1360
  # Generate latent image
 
1437
  enhanced_model = freeu_model
1438
 
1439
  # First sampling pass
1440
+ log_progress("First pass - artistic sampling...", gr_progress, 0.2)
1441
+
1442
  samples = ksampler.sample(
1443
  seed=seed,
1444
  steps=30,
 
1452
  latent_image=get_value_at_index(latent_image, 0),
1453
  )
1454
 
1455
+ # Yield progress update after first sampling completes
1456
+ msg = f"First pass sampling complete... decoding image (step {current_step}/{total_steps})"
1457
+ log_progress(msg, gr_progress, 0.4)
1458
+ yield (noisy_qr_pil if border_size > 0 else base_qr_pil, msg)
1459
+
1460
  # First decode with dynamic tiling
1461
  tile_size, overlap = calculate_vae_tile_size(image_size)
1462
 
 
1478
  first_pass_np = (first_pass_tensor.cpu().numpy() * 255).astype(np.uint8)
1479
  first_pass_np = first_pass_np[0]
1480
  first_pass_pil = Image.fromarray(first_pass_np)
1481
+ msg = f"First enhancement pass complete (step {current_step}/{total_steps})... final refinement pass"
1482
+ log_progress(msg, gr_progress, 0.5)
1483
+ yield (first_pass_pil, msg)
 
1484
  current_step += 1
1485
 
1486
  # Clear cache before second pass to free memory
 
1506
  )
1507
 
1508
  # Final sampling pass
1509
+ log_progress("Second pass (refinement)...", gr_progress, 0.6)
1510
+
1511
  final_samples = ksampler.sample(
1512
  seed=seed + 1,
1513
  steps=30,
 
1521
  latent_image=get_value_at_index(upscaled_latent, 0),
1522
  )
1523
 
1524
+ # Yield progress update after second sampling completes
1525
+ msg = f"Second pass sampling complete... decoding final image (step {current_step}/{total_steps})"
1526
+ log_progress(msg, gr_progress, 0.8)
1527
+ yield (first_pass_pil, msg)
1528
+
1529
  # Final decode with dynamic tiling
1530
  tile_size, overlap = calculate_vae_tile_size(image_size)
1531
 
 
1549
  pre_upscale_np = (pre_upscale_tensor.cpu().numpy() * 255).astype(np.uint8)
1550
  pre_upscale_np = pre_upscale_np[0]
1551
  pre_upscale_pil = Image.fromarray(pre_upscale_np)
1552
+ msg = f"Final refinement complete (step {current_step}/{total_steps})... upscaling image"
1553
+ log_progress(msg, gr_progress, 0.9)
1554
+ yield (pre_upscale_pil, msg)
 
1555
  current_step += 1
1556
 
1557
  # Upscale image with model (load model on-demand)
 
1566
  image_np = (image_tensor.cpu().numpy() * 255).astype(np.uint8)
1567
  image_np = image_np[0]
1568
  final_image = Image.fromarray(image_np)
1569
+ msg = f"No errors, all good! Final artistic QR code generated and upscaled. (step {current_step}/{total_steps})"
1570
+ log_progress(msg, gr_progress, 1.0)
1571
+ yield (final_image, msg)
 
1572
  else:
1573
  # No upscaling
1574
  image_tensor = get_value_at_index(final_decoded, 0)
1575
  image_np = (image_tensor.cpu().numpy() * 255).astype(np.uint8)
1576
  image_np = image_np[0]
1577
  final_image = Image.fromarray(image_np)
1578
+ msg = f"No errors, all good! Final artistic QR code generated. (step {current_step}/{total_steps})"
1579
+ log_progress(msg, gr_progress, 1.0)
1580
+ yield (final_image, msg)
 
1581
 
1582
 
1583
  if __name__ == "__main__" and not os.environ.get("QR_TESTING_MODE"):
1584
+ # Start your Gradio app with automatic cache cleanup
1585
+ # delete_cache=(3600, 3600) means: check every hour and delete files older than 1 hour
1586
  with gr.Blocks(delete_cache=(3600, 3600)) as app:
1587
  # Add a title and description
1588
  gr.Markdown("# QR Code Art Generator")
 
1590
  This is an AI-powered QR code generator that creates artistic QR codes using Stable Diffusion 1.5 and ControlNet models.
1591
  The application uses a custom ComfyUI workflow to generate QR codes.
1592
 
1593
+ **Privacy Notice:** Generated images are automatically deleted after 1 hour.
1594
+ Temporary files are checked and cleaned every hour. Download your QR codes promptly after generation.
1595
 
1596
  ### Tips:
1597
  - Use detailed prompts for better results
 
2826
  )
2827
 
2828
  # ARTISTIC QR TAB
2829
+ app.queue() # Required for gr.Progress() to work!
2830
  app.launch(share=False, mcp_server=True)
2831
  # Note: Automatic file cleanup via delete_cache not available in Gradio 5.49.1
2832
  # Files will be cleaned up when the server is restarted