File size: 2,563 Bytes
18274c1
453ed2e
 
 
8115e1c
6914f7a
453ed2e
a29e3ba
00f6a78
453ed2e
8115e1c
4984c7e
be85eb8
 
453ed2e
 
8115e1c
00f6a78
8115e1c
00f6a78
8115e1c
 
 
 
 
766763f
8115e1c
 
 
 
766763f
8115e1c
9ad92f4
00f6a78
453ed2e
00f6a78
4984c7e
8115e1c
 
453ed2e
7391723
8115e1c
7391723
 
8115e1c
453ed2e
8115e1c
 
 
453ed2e
 
 
8115e1c
 
 
 
49ad6a5
8115e1c
453ed2e
8115e1c
 
 
 
 
 
c000f9c
8115e1c
 
c000f9c
453ed2e
8115e1c
 
c000f9c
8115e1c
 
 
 
 
 
 
 
 
 
 
 
 
 
453ed2e
8115e1c
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
import spaces
import torch
import gradio as gr
from PIL import Image
import os

from diffusers import (
    DiffusionPipeline,
    AutoencoderKL,
    ControlNetModel,
    StableDiffusionControlNetPipeline,
    StableDiffusionControlNetImg2ImgPipeline,
    DPMSolverMultistepScheduler,
    EulerDiscreteScheduler
)

# -------- CONFIG -------- #
BASE_MODEL = "SG161222/Realistic_Vision_V5.1_noVAE"
CONTROLNET_MODEL = "monster-labs/control_v1p_sd15_qrcode_monster"

# -------- VAE & CONTROLNET -------- #
vae = AutoencoderKL.from_pretrained(
    "stabilityai/sd-vae-ft-mse",
    torch_dtype=torch.float16
)

controlnet = ControlNetModel.from_pretrained(
    CONTROLNET_MODEL,
    torch_dtype=torch.float16
)

# -------- MAIN PIPELINE -------- #
main_pipe = StableDiffusionControlNetPipeline.from_pretrained(
    BASE_MODEL,
    controlnet=controlnet,
    vae=vae,
    torch_dtype=torch.float16,
    safety_checker=None,   # safety checker disabled
    feature_extractor=None
).to("cuda")

# -------- IMG2IMG PIPELINE -------- #
image_pipe = StableDiffusionControlNetImg2ImgPipeline(**main_pipe.components)

# -------- SCHEDULERS -------- #
SAMPLER_MAP = {
    "DPM++ Karras SDE": lambda config: DPMSolverMultistepScheduler.from_config(
        config, use_karras=True, algorithm_type="sde-dpmsolver++"
    ),
    "Euler": lambda config: EulerDiscreteScheduler.from_config(config),
}

# -------- GRADIO DEMO -------- #
def generate(prompt, control_image, strength=0.8, guidance=7.0, steps=30, sampler="Euler"):
    scheduler = SAMPLER_MAP[sampler](image_pipe.scheduler.config)
    image_pipe.scheduler = scheduler

    image = image_pipe(
        prompt=prompt,
        image=control_image,
        strength=strength,
        guidance_scale=guidance,
        num_inference_steps=steps
    ).images[0]
    return image

with gr.Blocks() as demo:
    gr.Markdown("# ✨ Illusion Diffusion (Fixed)")

    with gr.Row():
        prompt = gr.Textbox(label="Prompt")
        control_image = gr.Image(type="pil", label="Control Image")

    with gr.Row():
        strength = gr.Slider(0.0, 1.0, value=0.8, label="Strength")
        guidance = gr.Slider(1.0, 15.0, value=7.0, label="Guidance Scale")
        steps = gr.Slider(5, 50, value=30, step=1, label="Steps")
        sampler = gr.Dropdown(list(SAMPLER_MAP.keys()), value="Euler", label="Sampler")

    btn = gr.Button("Generate")
    output = gr.Image()

    btn.click(
        fn=generate,
        inputs=[prompt, control_image, strength, guidance, steps, sampler],
        outputs=output
    )

demo.queue().launch()