Alexander Bagus
22
ad8d7d1
raw
history blame
5.58 kB
import gradio as gr
import numpy as np
import os, random, json, spaces, torch, time, subprocess
import torch
from transformers import AutoProcessor, AutoTokenizer
from longcat_image.models import LongCatImageTransformer2DModel
from longcat_image.pipelines import LongCatImagePipeline
from utils.image_utils import rescale_image
from utils.prompt_utils import polish_prompt
# GIT_DIR = "LongCat-Image"
# GIT_URL = "https://github.com/yourusername/LongCat-Image.git"
# if not os.path.isdir(GIT_DIR):
# subprocess.run(["git", "clone", GIT_URL])
# else:
# print("Folder already exists.")
MODEL_REPO = "meituan-longcat/LongCat-Image"
MAX_SEED = np.iinfo(np.int32).max
text_processor = AutoTokenizer.from_pretrained(
MODEL_REPO,
subfolder = 'tokenizer'
)
transformer = LongCatImageTransformer2DModel.from_pretrained(
MODEL_REPO ,
subfolder = 'transformer',
torch_dtype=torch.bfloat16,
use_safetensors=True
).to("cuda")
pipe = LongCatImagePipeline.from_pretrained(
MODEL_REPO,
transformer=transformer,
text_processor=text_processor
)
pipe.to("cuda", torch.bfloat16)
def prepare(prompt, is_polish_prompt):
if not is_polish_prompt: return prompt, False
polished_prompt = polish_prompt(prompt)
return polished_prompt, True
@spaces.GPU
def inference(
prompt,
negative_prompt="blurry ugly bad",
width=1024,
height=1024,
seed=42,
randomize_seed=True,
guidance_scale=1.5,
num_inference_steps=8,
progress=gr.Progress(track_tqdm=True),
):
timestamp = time.time()
print(f"timestamp: {timestamp}")
# generation
if randomize_seed: seed = random.randint(0, MAX_SEED)
generator = torch.Generator().manual_seed(seed)
image = pipe(
prompt= prompt,
negative_prompt = negative_prompt,
height=1024,
width=1024,
generator=generator,
guidance_scale=guidance_scale,
num_inference_steps=num_inference_steps,
enable_prompt_rewrite= False
).images[0]
return image, seed
def read_file(path: str) -> str:
with open(path, 'r', encoding='utf-8') as f:
content = f.read()
return content
css = """
#col-container {
margin: 0 auto;
max-width: 960px;
}
"""
with open('static/data.json', 'r') as file:
data = json.load(file)
examples = data['examples']
with gr.Blocks() as demo:
with gr.Column(elem_id="col-container"):
with gr.Column():
gr.HTML(read_file("static/header.html"))
with gr.Row():
with gr.Column():
prompt = gr.Textbox(
label="Prompt",
show_label=False,
lines=2,
placeholder="Enter your prompt",
# container=False,
)
is_polish_prompt = gr.Checkbox(label="Polish prompt", value=True)
run_button = gr.Button("Generate", variant="primary")
with gr.Accordion("Advanced Settings", open=False):
negative_prompt = gr.Textbox(
label="Negative prompt",
lines=2,
container=False,
placeholder="Enter your negative prompt",
value="blurry ugly bad"
)
num_inference_steps = gr.Slider(
label="Steps",
minimum=1,
maximum=30,
step=1,
value=9,
)
with gr.Row():
width = gr.Slider(
label="Width",
minimum=512,
maximum=1280,
step=32,
value=768,
)
height = gr.Slider(
label="Height",
minimum=512,
maximum=1280,
step=32,
value=1024,
)
guidance_scale = gr.Slider(
label="Guidance scale",
minimum=0.0,
maximum=10.0,
step=0.1,
value=1.0,
)
seed = gr.Slider(
label="Seed",
minimum=0,
maximum=MAX_SEED,
step=1,
value=42,
)
randomize_seed = gr.Checkbox(label="Randomize seed", value=False)
with gr.Column():
output_image = gr.Image(label="Generated image", show_label=False)
polished_prompt = gr.Textbox(label="Polished prompt", interactive=False)
gr.Examples(examples=examples, inputs=[prompt])
gr.Markdown(read_file("static/footer.md"))
run_button.click(
fn=prepare,
inputs=[prompt, is_polish_prompt],
outputs=[polished_prompt, is_polish_prompt]
).then(
fn=inference,
inputs=[
prompt,
negative_prompt,
seed,
randomize_seed,
guidance_scale,
num_inference_steps,
],
outputs=[output_image, seed],
)
if __name__ == "__main__":
demo.launch(mcp_server=True, css=css)