| | from dataclasses import dataclass |
| | from enum import Enum |
| |
|
| | @dataclass |
| | class Task: |
| | benchmark: str |
| | metric: str |
| | col_name: str |
| |
|
| |
|
| | |
| | |
| | class Tasks(Enum): |
| | |
| | task0 = Task("anli_r1", "acc", "ANLI") |
| | task1 = Task("logiqa", "acc_norm", "LogiQA") |
| |
|
| | NUM_FEWSHOT = 0 |
| | |
| |
|
| |
|
| |
|
| | |
| | TITLE = """<h1 align="center" id="space-title">UnlearnDiffAtk Benchmark</h1>""" |
| |
|
| | |
| | SUB_TITLE = """<h2 align="center" id="space-title">Effective and efficient adversarial prompt generation approach for diffusion models</h2>""" |
| |
|
| | |
| | INTRODUCTION_TEXT = """ |
| | This benchmark is evaluates the robustness of safety-driven unlearned diffusion models (DMs) |
| | (i.e., DMs after unlearning undesirable concepts, styles, or objects) across a variety of tasks. For more details, please visit the [project](https://www.optml-group.com/posts/mu_attack), |
| | check the [code](https://github.com/OPTML-Group/Diffusion-MU-Attack), and read the [paper](https://arxiv.org/abs/2310.11868).\\ |
| | Demo of our offensive method: [UnlearnDiffAtk](https://huggingface.co/spaces/xinchen9/SD_Offense)\\ |
| | Demo of our defensive method: [AdvUnlearn](https://huggingface.co/spaces/xinchen9/SD_Defense) |
| | """ |
| |
|
| | |
| | LLM_BENCHMARKS_TEXT = f""" |
| | For more details of Unlearning Methods used in this benchmarks:\\ |
| | [Erasing Concepts from Diffusion Models,(ESD)](https://github.com/rohitgandikota/erasing).\\ |
| | [Forget-Me-Not: Learning to Forget in Text-to-Image Diffusion Models,(FMN)](https://github.com/SHI-Labs/Forget-Me-Not).\\ |
| | [Concept Ablation,(AC)](https://github.com/nupurkmr9/concept-ablation).\\ |
| | [Unified Concept Editing in Diffusion Models,(UCE)](https://github.com/rohitgandikota/unified-concept-editing).\\ |
| | [Safe Latent Diffusion,(SLD)](https://github.com/ml-research/safe-latent-diffusion) |
| | """ |
| |
|
| | EVALUATION_QUEUE_TEXT = """ |
| | Evaluation Metrics: Attack success rate (ASR) into two categories: (1) the pre-attack success rate (pre-ASR), and (2) the post-attack success. |
| | rate (post-ASR). Both are percentage formula.\\ |
| | Fréchet inception distance(FID) into two categories:(1): the FID of image generated by Base Model (Pre-FID),and |
| | (2) The FID of images generated by Unlearned Methods (Post-FID).\\ |
| | (3) CLIP (Contrastive Language-Image Pretraining) Score is an established method to measure an image’s proximity to a text.\\ |
| | the number -1 means no data reported till now |
| | """ |
| |
|
| | CITATION_BUTTON_LABEL = "Copy the following snippet to cite these results" |
| | CITATION_BUTTON_TEXT = r""" |
| | @article{zhang2023generate, |
| | title={To Generate or Not? Safety-Driven Unlearned Diffusion Models Are Still Easy To Generate Unsafe Images... For Now}, |
| | author={Zhang, Yimeng and Jia, Jinghan and Chen, Xin and Chen, Aochuan and Zhang, Yihua and Liu, Jiancheng and Ding, Ke and Liu, Sijia}, |
| | journal={arXiv preprint arXiv:2310.11868}, |
| | year={2023} |
| | } |
| | |
| | @article{zhang2024defensive, |
| | title={Defensive Unlearning with Adversarial Training for Robust Concept Erasure in Diffusion Models}, |
| | author={Zhang, Yimeng and Chen, Xin and Jia, Jinghan and Zhang, Yihua and Fan, Chongyu and Liu, Jiancheng and Hong, Mingyi and Ding, Ke and Liu, Sijia}, |
| | journal={arXiv preprint arXiv:2405.15234}, |
| | year={2024} |
| | } |
| | """ |