unamed666 Linaqruf commited on
Commit
a45c6ba
0 Parent(s):

Duplicate from cagliostrolab/animagine-xl-3.0

Browse files

Co-authored-by: Furqanil Taqwa <[email protected]>

.gitattributes ADDED
@@ -0,0 +1,35 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ *.7z filter=lfs diff=lfs merge=lfs -text
2
+ *.arrow filter=lfs diff=lfs merge=lfs -text
3
+ *.bin filter=lfs diff=lfs merge=lfs -text
4
+ *.bz2 filter=lfs diff=lfs merge=lfs -text
5
+ *.ckpt filter=lfs diff=lfs merge=lfs -text
6
+ *.ftz filter=lfs diff=lfs merge=lfs -text
7
+ *.gz filter=lfs diff=lfs merge=lfs -text
8
+ *.h5 filter=lfs diff=lfs merge=lfs -text
9
+ *.joblib filter=lfs diff=lfs merge=lfs -text
10
+ *.lfs.* filter=lfs diff=lfs merge=lfs -text
11
+ *.mlmodel filter=lfs diff=lfs merge=lfs -text
12
+ *.model filter=lfs diff=lfs merge=lfs -text
13
+ *.msgpack filter=lfs diff=lfs merge=lfs -text
14
+ *.npy filter=lfs diff=lfs merge=lfs -text
15
+ *.npz filter=lfs diff=lfs merge=lfs -text
16
+ *.onnx filter=lfs diff=lfs merge=lfs -text
17
+ *.ot filter=lfs diff=lfs merge=lfs -text
18
+ *.parquet filter=lfs diff=lfs merge=lfs -text
19
+ *.pb filter=lfs diff=lfs merge=lfs -text
20
+ *.pickle filter=lfs diff=lfs merge=lfs -text
21
+ *.pkl filter=lfs diff=lfs merge=lfs -text
22
+ *.pt filter=lfs diff=lfs merge=lfs -text
23
+ *.pth filter=lfs diff=lfs merge=lfs -text
24
+ *.rar filter=lfs diff=lfs merge=lfs -text
25
+ *.safetensors filter=lfs diff=lfs merge=lfs -text
26
+ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
27
+ *.tar.* filter=lfs diff=lfs merge=lfs -text
28
+ *.tar filter=lfs diff=lfs merge=lfs -text
29
+ *.tflite filter=lfs diff=lfs merge=lfs -text
30
+ *.tgz filter=lfs diff=lfs merge=lfs -text
31
+ *.wasm filter=lfs diff=lfs merge=lfs -text
32
+ *.xz filter=lfs diff=lfs merge=lfs -text
33
+ *.zip filter=lfs diff=lfs merge=lfs -text
34
+ *.zst filter=lfs diff=lfs merge=lfs -text
35
+ *tfevents* filter=lfs diff=lfs merge=lfs -text
README.md ADDED
@@ -0,0 +1,394 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ license: other
3
+ license_name: faipl-1.0-sd
4
+ license_link: https://freedevproject.org/faipl-1.0-sd/
5
+ language:
6
+ - en
7
+ tags:
8
+ - text-to-image
9
+ - stable-diffusion
10
+ - safetensors
11
+ - stable-diffusion-xl
12
+ base_model: Linaqruf/animagine-xl-2.0
13
+ widget:
14
+ - text: 1girl, green hair, sweater, looking at viewer, upper body, beanie, outdoors, night, turtleneck, masterpiece, best quality
15
+ parameter:
16
+ negative_prompt: nsfw, lowres, bad anatomy, bad hands, text, error, missing fingers, extra digit, fewer digits, cropped, worst quality, low quality, normal quality, jpeg artifacts, signature, watermark, username, blurry, artist name
17
+ example_title: 1girl
18
+ - text: 1boy, male focus, green hair, sweater, looking at viewer, upper body, beanie, outdoors, night, turtleneck, masterpiece, best quality
19
+ parameter:
20
+ negative_prompt: nsfw, lowres, bad anatomy, bad hands, text, error, missing fingers, extra digit, fewer digits, cropped, worst quality, low quality, normal quality, jpeg artifacts, signature, watermark, username, blurry, artist name
21
+ example_title: 1boy
22
+ ---
23
+
24
+ <style>
25
+ .title-container {
26
+ display: flex;
27
+ justify-content: center;
28
+ align-items: center;
29
+ height: 100vh; /* Adjust this value to position the title vertically */
30
+ }
31
+
32
+ .title {
33
+ font-size: 2.5em;
34
+ text-align: center;
35
+ color: #333;
36
+ font-family: 'Helvetica Neue', sans-serif;
37
+ text-transform: uppercase;
38
+ letter-spacing: 0.1em;
39
+ padding: 0.5em 0;
40
+ background: transparent;
41
+ }
42
+
43
+ .title span {
44
+ background: -webkit-linear-gradient(45deg, #7ed56f, #28b485);
45
+ -webkit-background-clip: text;
46
+ -webkit-text-fill-color: transparent;
47
+ }
48
+
49
+ .custom-table {
50
+ table-layout: fixed;
51
+ width: 100%;
52
+ border-collapse: collapse;
53
+ margin-top: 2em;
54
+ }
55
+
56
+ .custom-table td {
57
+ width: 50%;
58
+ vertical-align: top;
59
+ padding: 10px;
60
+ box-shadow: 0px 0px 0px 0px rgba(0, 0, 0, 0.15);
61
+ }
62
+
63
+ .custom-image-container {
64
+ position: relative;
65
+ width: 100%;
66
+ margin-bottom: 0em;
67
+ overflow: hidden;
68
+ border-radius: 10px;
69
+ transition: transform .7s;
70
+ /* Smooth transition for the container */
71
+ }
72
+
73
+ .custom-image-container:hover {
74
+ transform: scale(1.05);
75
+ /* Scale the container on hover */
76
+ }
77
+
78
+ .custom-image {
79
+ width: 100%;
80
+ height: auto;
81
+ object-fit: cover;
82
+ border-radius: 10px;
83
+ transition: transform .7s;
84
+ margin-bottom: 0em;
85
+ }
86
+
87
+ .nsfw-filter {
88
+ filter: blur(8px); /* Apply a blur effect */
89
+ transition: filter 0.3s ease; /* Smooth transition for the blur effect */
90
+ }
91
+
92
+ .custom-image-container:hover .nsfw-filter {
93
+ filter: none; /* Remove the blur effect on hover */
94
+ }
95
+
96
+ .overlay {
97
+ position: absolute;
98
+ bottom: 0;
99
+ left: 0;
100
+ right: 0;
101
+ color: white;
102
+ width: 100%;
103
+ height: 40%;
104
+ display: flex;
105
+ flex-direction: column;
106
+ justify-content: center;
107
+ align-items: center;
108
+ font-size: 1vw;
109
+ font-style: bold;
110
+ text-align: center;
111
+ opacity: 0;
112
+ /* Keep the text fully opaque */
113
+ background: linear-gradient(0deg, rgba(0, 0, 0, 0.8) 60%, rgba(0, 0, 0, 0) 100%);
114
+ transition: opacity .5s;
115
+ }
116
+ .custom-image-container:hover .overlay {
117
+ opacity: 1;
118
+ /* Make the overlay always visible */
119
+ }
120
+ .overlay-text {
121
+ background: linear-gradient(45deg, #7ed56f, #28b485);
122
+ -webkit-background-clip: text;
123
+ color: transparent;
124
+ /* Fallback for browsers that do not support this effect */
125
+ text-shadow: 2px 2px 4px rgba(0, 0, 0, 0.7);
126
+ /* Enhanced text shadow for better legibility */
127
+
128
+ .overlay-subtext {
129
+ font-size: 0.75em;
130
+ margin-top: 0.5em;
131
+ font-style: italic;
132
+ }
133
+
134
+ .overlay,
135
+ .overlay-subtext {
136
+ text-shadow: 2px 2px 4px rgba(0, 0, 0, 0.5);
137
+ }
138
+
139
+ </style>
140
+
141
+ <h1 class="title">
142
+ <span>Animagine XL 3.0</span>
143
+ </h1>
144
+ <table class="custom-table">
145
+ <tr>
146
+ <td>
147
+ <div class="custom-image-container">
148
+ <img class="custom-image" src="https://cdn-uploads.huggingface.co/production/uploads/6365c8dbf31ef76df4042821/ep_oy_NVSMQaU162w8Gwp.png" alt="sample1">
149
+ </div>
150
+ <div class="custom-image-container">
151
+ <img class="custom-image" src="https://cdn-uploads.huggingface.co/production/uploads/6365c8dbf31ef76df4042821/FGFZgsqrhOcor5mid5eap.png" alt="sample4">
152
+ </div>
153
+ </td>
154
+ <td>
155
+ <div class="custom-image-container">
156
+ <img class="custom-image" src="https://cdn-uploads.huggingface.co/production/uploads/6365c8dbf31ef76df4042821/EuvINvBsCKZQuspZHN-uF.png" alt="sample2">
157
+ </div>
158
+ <div class="custom-image-container">
159
+ <img class="custom-image" src="https://cdn-uploads.huggingface.co/production/uploads/6365c8dbf31ef76df4042821/yyRqdHJfePKl7ytB6ieX9.png" alt="sample3">
160
+ </td>
161
+ <td>
162
+ <div class="custom-image-container">
163
+ <img class="custom-image" src="https://cdn-uploads.huggingface.co/production/uploads/6365c8dbf31ef76df4042821/2oWmFh728T0hzEkUtSmgy.png" alt="sample1">
164
+ </div>
165
+ <div class="custom-image-container">
166
+ <img class="custom-image" src="https://cdn-uploads.huggingface.co/production/uploads/6365c8dbf31ef76df4042821/3yaZxWkUOenZSSNtGQR_3.png" alt="sample4">
167
+ </div>
168
+ </td>
169
+ </tr>
170
+ </table>
171
+
172
+ **Animagine XL 3.0** is the latest version of the sophisticated open-source anime text-to-image model, building upon the capabilities of its predecessor, Animagine XL 2.0. Developed based on Stable Diffusion XL, this iteration boasts superior image generation with notable improvements in hand anatomy, efficient tag ordering, and enhanced knowledge about anime concepts. Unlike the previous iteration, we focused to make the model learn concepts rather than aesthetic.
173
+
174
+ ## Model Details
175
+ - **Developed by**: [Cagliostro Research Lab](https://huggingface.co/cagliostrolab)
176
+ - **Model type**: Diffusion-based text-to-image generative model
177
+ - **Model Description**: Animagine XL 3.0 is engineered to generate high-quality anime images from textual prompts. It features enhanced hand anatomy, better concept understanding, and prompt interpretation, making it the most advanced model in its series.
178
+ - **License**: [Fair AI Public License 1.0-SD](https://freedevproject.org/faipl-1.0-sd/)
179
+ - **Finetuned from model**: [Animagine XL 2.0](https://huggingface.co/Linaqruf/animagine-xl-2.0)
180
+
181
+ ## Gradio & Colab Integration
182
+
183
+ Animagine XL 3.0 is accessible through user-friendly platforms such as Gradio and Google Colab:
184
+
185
+ - **Gradio Web UI**: [Open In Spaces](https://huggingface.co/spaces/Linaqruf/Animagine-XL)
186
+ - **Google Colab**: [Open In Colab](https://colab.research.google.com/#fileId=https%3A//huggingface.co/Linaqruf/animagine-xl/blob/main/Animagine_XL_demo.ipynb)
187
+
188
+ ## 🧨 Diffusers Installation
189
+
190
+ To use Animagine XL 3.0, install the required libraries as follows:
191
+
192
+ ```bash
193
+ pip install diffusers --upgrade
194
+ pip install transformers accelerate safetensors
195
+ ```
196
+
197
+ Example script for generating images with Animagine XL 3.0:
198
+
199
+ ```python
200
+ import torch
201
+ from diffusers import (
202
+ StableDiffusionXLPipeline,
203
+ EulerAncestralDiscreteScheduler,
204
+ AutoencoderKL
205
+ )
206
+
207
+ # Load VAE component
208
+ vae = AutoencoderKL.from_pretrained(
209
+ "madebyollin/sdxl-vae-fp16-fix",
210
+ torch_dtype=torch.float16
211
+ )
212
+
213
+ # Configure the pipeline
214
+ pipe = StableDiffusionXLPipeline.from_pretrained(
215
+ "cagliostrolab/animagine-xl-3.0",
216
+ vae=vae,
217
+ torch_dtype=torch.float16,
218
+ use_safetensors=True,
219
+ )
220
+ pipe.scheduler = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config)
221
+ pipe.to('cuda')
222
+
223
+ # Define prompts and generate image
224
+ prompt = "1girl, arima kana, oshi no ko, solo, upper body, v, smile, looking at viewer, outdoors, night"
225
+ negative_prompt = "nsfw, lowres, bad anatomy, bad hands, text, error, missing fingers, extra digit, fewer digits, cropped, worst quality, low quality, normal quality, jpeg artifacts, signature, watermark, username, blurry, artist name"
226
+
227
+ image = pipe(
228
+ prompt,
229
+ negative_prompt=negative_prompt,
230
+ width=832,
231
+ height=1216,
232
+ guidance_scale=7,
233
+ num_inference_steps=28
234
+ ).images[0]
235
+ ```
236
+
237
+ ## Usage Guidelines
238
+
239
+ ### Tag Ordering
240
+
241
+ Prompting is a bit different in this iteration, for optimal results, it's recommended to follow the structured prompt template because we train the model like this:
242
+
243
+ ```
244
+ 1girl/1boy, character name, from what series, everything else in any order.
245
+ ```
246
+
247
+ ## Special Tags
248
+
249
+ Like the previous iteration, this model was trained with some special tags to steer the result toward quality, rating and when the posts was created. The model can still do the job without these special tags, but it’s recommended to use them if we want to make the model easier to handle.
250
+
251
+ ### Quality Modifiers
252
+
253
+ | Quality Modifier | Score Criterion |
254
+ | ---------------- | --------------- |
255
+ | `masterpiece` | >150 |
256
+ | `best quality` | 100-150 |
257
+ | `high quality` | 75-100 |
258
+ | `medium quality` | 25-75 |
259
+ | `normal quality` | 0-25 |
260
+ | `low quality` | -5-0 |
261
+ | `worst quality` | <-5 |
262
+
263
+ ### Rating Modifiers
264
+
265
+ | Rating Modifier | Rating Criterion |
266
+ | ------------------------------| ------------------------- |
267
+ | `rating: general` | General |
268
+ | `rating: sensitive` | Sensitive |
269
+ | `rating: questionable`, `nsfw`| Questionable |
270
+ | `rating: explicit`, `nsfw` | Explicit |
271
+
272
+ ### Year Modifier
273
+
274
+ These tags help to steer the result toward modern or vintage anime art styles, ranging from `newest` to `oldest`.
275
+
276
+ | Year Tag | Year Range |
277
+ | -------- | ---------------- |
278
+ | `newest` | 2022 to 2023 |
279
+ | `late` | 2019 to 2021 |
280
+ | `mid` | 2015 to 2018 |
281
+ | `early` | 2011 to 2014 |
282
+ | `oldest` | 2005 to 2010 |
283
+
284
+ ## Recommended settings
285
+
286
+ To guide the model towards generating high-aesthetic images, use negative prompts like:
287
+
288
+ ```
289
+ nsfw, lowres, bad anatomy, bad hands, text, error, missing fingers, extra digit, fewer digits, cropped, worst quality, low quality, normal quality, jpeg artifacts, signature, watermark, username, blurry, artist name
290
+ ```
291
+
292
+ For higher quality outcomes, prepend prompts with:
293
+
294
+ ```
295
+ masterpiece, best quality
296
+ ```
297
+
298
+ However, be careful to use `masterpiece`, `best quality` because many high-scored datasets are NSFW. It’s better to add `nsfw`, `rating: sensitive` to the negative prompt and `rating: general` to the positive prompt. it’s recommended to use a lower classifier-free guidance (CFG Scale) of around 5-7, sampling steps below 30, and to use Euler Ancestral (Euler a) as a sampler.
299
+
300
+ ### Multi Aspect Resolution
301
+
302
+ This model supports generating images at the following dimensions:
303
+
304
+ | Dimensions | Aspect Ratio |
305
+ |-------------------|-----------------|
306
+ | `1024 x 1024` | 1:1 Square |
307
+ | `1152 x 896` | 9:7 |
308
+ | `896 x 1152` | 7:9 |
309
+ | `1216 x 832` | 19:13 |
310
+ | `832 x 1216` | 13:19 |
311
+ | `1344 x 768` | 7:4 Horizontal |
312
+ | `768 x 1344` | 4:7 Vertical |
313
+ | `1536 x 640` | 12:5 Horizontal |
314
+ | `640 x 1536` | 5:12 Vertical |
315
+
316
+ ## Training and Hyperparameters
317
+
318
+ - **Animagine XL 3.0** was trained on a 2x A100 GPU with 80GB memory for 21 days or over 500 gpu hours. The training process encompassed three stages:
319
+ - Base:
320
+ - **Feature Alignment Stage**: Utilized 1.2m images to acquaint the model with basic anime concepts.
321
+ - **Refining UNet Stage**: Employed 2.5k curated datasets to only fine-tune the UNet.
322
+ - Curated:
323
+ - **Aesthetic Tuning Stage**: Employed 3.5k high-quality curated datasets to refine the model's art style.
324
+
325
+ ### Hyperparameters
326
+
327
+ | Stage | Epochs | UNet Learning Rate | Train Text Encoder | Text Encoder Learning Rate | Batch Size | Mixed Precision | Noise Offset |
328
+ |-----------------------------|--------|--------------------|--------------------|----------------------------|----------------|-----------------|--------------|
329
+ | **Feature Alignment Stage** | 10 | 7.5e-6 | True | 3.75e-6 | 48 x 2 | fp16 | N/A |
330
+ | **Refining UNet Stage** | 10 | 2e-6 | False | N/A | 48 | fp16 | 0.0357 |
331
+ | **Aesthetic Tuning Stage** | 10 | 1e-6 | False | N/A | 48 | fp16 | 0.0357 |
332
+
333
+ ## Model Comparison
334
+
335
+ ### Training Config
336
+
337
+ | Configuration Item | Animagine XL 2.0 | Animagine 3.0 |
338
+ |-----------------------|-------------------------|-------------------------|
339
+ | **GPU** | A100 80G | 2 x A100 80G |
340
+ | **Dataset** | 170k + 83k images | 1271990 + 3500 Images |
341
+ | **Shuffle Separator** | N/A | True |
342
+ | **Global Epochs** | 20 | 20 |
343
+ | **Learning Rate** | 1e-6 | 7.5e-6 |
344
+ | **Batch Size** | 32 | 48 x 2 |
345
+ | **Train Text Encoder**| True | True |
346
+ | **Train Special Tags**| True | True |
347
+ | **Image Resolution** | 1024 | 1024 |
348
+ | **Bucket Resolution** | 2048 x 512 | 2048 x 512 |
349
+
350
+ Source code and training config are available here: https://github.com/cagliostrolab/sd-scripts/tree/main/notebook
351
+
352
+ ## Limitations
353
+
354
+ While "Animagine XL 3.0" represents a significant advancement in anime text-to-image generation, it's important to acknowledge its limitations to understand its best use cases and potential areas for future improvement.
355
+
356
+ 1. **Concept Over Artstyle Focus**: The model prioritizes learning concepts rather than specific art styles, which might lead to variations in aesthetic appeal compared to its predecessor.
357
+ 2. **Non-Photorealistic Design**: Animagine XL 3.0 is not designed for generating photorealistic or realistic images, focusing instead on anime-style artwork.
358
+ 3. **Anatomical Challenges**: Despite improvements, the model can still struggle with complex anatomical structures, particularly in dynamic poses, resulting in occasional inaccuracies.
359
+ 4. **Dataset Limitations**: The training dataset of 1.2 million images may not encompass all anime characters or series, limiting the model's ability to generate less known or newer characters.
360
+ 5. **Natural Language Processing**: The model is not optimized for interpreting natural language, requiring more structured and specific prompts for best results.
361
+ 6. **NSFW Content Risk**: Using high-quality tags like 'masterpiece' or 'best quality' carries a risk of generating NSFW content inadvertently, due to the prevalence of such images in high-scoring training datasets.
362
+
363
+ These limitations highlight areas for potential refinement in future iterations and underscore the importance of careful prompt crafting for optimal results. Understanding these constraints can help users better navigate the model's capabilities and tailor their expectations accordingly.
364
+
365
+ ## Acknowledgements
366
+
367
+ We extend our gratitude to the entire team and community that contributed to the development of Animagine XL 3.0, including our partners and collaborators who provided resources and insights crucial for this iteration.
368
+
369
+ - **Main:** For the open source grant supporting our research, thank you so much.
370
+ - **Cagliostro Lab Collaborator:** For helping quality checking during pretraining and curating datasets during fine-tuning.
371
+ - **Kohya SS:** For providing the essential training script and merged our PR about `keep_tokens_separator` or Shuffle Separator.
372
+ - **Camenduru Server Community:** For invaluable insights and support and quality checking
373
+ - **NovelAI:** For inspiring how to build the datasets and label it using tag ordering.
374
+
375
+ ## Collaborators
376
+
377
+ - [Linaqruf](https://huggingface.co/Linaqruf)
378
+ - [DamarJati](https://huggingface.co/DamarJati)
379
+ - [Asahina2K](https://huggingface.co/Asahina2K)
380
+ - [ItsMeBell](https://huggingface.co/ItsMeBell)
381
+ - [Zwicky18](https://huggingface.co/Zwicky18)
382
+ - [NekoFi](https://huggingface.co/NekoFi)
383
+ - [Scipius2121](https://huggingface.co/Scipius2121)
384
+ - [Raelina](https://huggingface.co/Raelina)
385
+
386
+ ## License
387
+
388
+ Animagine XL 3.0 now uses the [Fair AI Public License 1.0-SD](https://freedevproject.org/faipl-1.0-sd/), compatible with Stable Diffusion models. Key points:
389
+ 1. **Modification Sharing:** If you modify Animagine XL 3.0, you must share both your changes and the original license.
390
+ 2. **Source Code Accessibility:** If your modified version is network-accessible, provide a way (like a download link) for others to get the source code. This applies to derived models too.
391
+ 3. **Distribution Terms:** Any distribution must be under this license or another with similar rules.
392
+ 4. **Compliance:** Non-compliance must be fixed within 30 days to avoid license termination, emphasizing transparency and adherence to open-source values.
393
+
394
+ The choice of this license aims to keep Animagine XL 3.0 open and modifiable, aligning with open source community spirit. It protects contributors and users, encouraging a collaborative, ethical open-source community. This ensures the model not only benefits from communal input but also respects open-source development freedoms.
animagine-xl-3.0.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1449e5b0b9de87b0f414c5f29cb11ce3b3dc61fa2b320e784c9441720bf7b766
3
+ size 6938218610
model_index.json ADDED
@@ -0,0 +1,33 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_class_name": "StableDiffusionXLPipeline",
3
+ "_diffusers_version": "0.23.0",
4
+ "force_zeros_for_empty_prompt": true,
5
+ "scheduler": [
6
+ "diffusers",
7
+ "EulerDiscreteScheduler"
8
+ ],
9
+ "text_encoder": [
10
+ "transformers",
11
+ "CLIPTextModel"
12
+ ],
13
+ "text_encoder_2": [
14
+ "transformers",
15
+ "CLIPTextModelWithProjection"
16
+ ],
17
+ "tokenizer": [
18
+ "transformers",
19
+ "CLIPTokenizer"
20
+ ],
21
+ "tokenizer_2": [
22
+ "transformers",
23
+ "CLIPTokenizer"
24
+ ],
25
+ "unet": [
26
+ "diffusers",
27
+ "UNet2DConditionModel"
28
+ ],
29
+ "vae": [
30
+ "diffusers",
31
+ "AutoencoderKL"
32
+ ]
33
+ }
scheduler/scheduler_config.json ADDED
@@ -0,0 +1,18 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_class_name": "EulerDiscreteScheduler",
3
+ "_diffusers_version": "0.23.0",
4
+ "beta_end": 0.012,
5
+ "beta_schedule": "scaled_linear",
6
+ "beta_start": 0.00085,
7
+ "clip_sample": false,
8
+ "interpolation_type": "linear",
9
+ "num_train_timesteps": 1000,
10
+ "prediction_type": "epsilon",
11
+ "sample_max_value": 1.0,
12
+ "set_alpha_to_one": false,
13
+ "skip_prk_steps": true,
14
+ "steps_offset": 1,
15
+ "timestep_spacing": "leading",
16
+ "trained_betas": null,
17
+ "use_karras_sigmas": false
18
+ }
text_encoder/config.json ADDED
@@ -0,0 +1,24 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "architectures": [
3
+ "CLIPTextModel"
4
+ ],
5
+ "attention_dropout": 0.0,
6
+ "bos_token_id": 0,
7
+ "dropout": 0.0,
8
+ "eos_token_id": 2,
9
+ "hidden_act": "quick_gelu",
10
+ "hidden_size": 768,
11
+ "initializer_factor": 1.0,
12
+ "initializer_range": 0.02,
13
+ "intermediate_size": 3072,
14
+ "layer_norm_eps": 1e-05,
15
+ "max_position_embeddings": 77,
16
+ "model_type": "clip_text_model",
17
+ "num_attention_heads": 12,
18
+ "num_hidden_layers": 12,
19
+ "pad_token_id": 1,
20
+ "projection_dim": 768,
21
+ "torch_dtype": "float16",
22
+ "transformers_version": "4.35.0",
23
+ "vocab_size": 49408
24
+ }
text_encoder/model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:cace1711b2bf8d9e021086ed4f4320950f0f0bd77e53ff4dad73dd7d1b7fae8e
3
+ size 246144152
text_encoder_2/config.json ADDED
@@ -0,0 +1,24 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "architectures": [
3
+ "CLIPTextModelWithProjection"
4
+ ],
5
+ "attention_dropout": 0.0,
6
+ "bos_token_id": 0,
7
+ "dropout": 0.0,
8
+ "eos_token_id": 2,
9
+ "hidden_act": "gelu",
10
+ "hidden_size": 1280,
11
+ "initializer_factor": 1.0,
12
+ "initializer_range": 0.02,
13
+ "intermediate_size": 5120,
14
+ "layer_norm_eps": 1e-05,
15
+ "max_position_embeddings": 77,
16
+ "model_type": "clip_text_model",
17
+ "num_attention_heads": 20,
18
+ "num_hidden_layers": 32,
19
+ "pad_token_id": 1,
20
+ "projection_dim": 1280,
21
+ "torch_dtype": "float16",
22
+ "transformers_version": "4.35.0",
23
+ "vocab_size": 49408
24
+ }
text_encoder_2/model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c93832378a008cdcb4ed174b8ebf7ea3f52abb4a87a69475d374f2d23a60fa29
3
+ size 1389382176
tokenizer/merges.txt ADDED
The diff for this file is too large to render. See raw diff
 
tokenizer/special_tokens_map.json ADDED
@@ -0,0 +1,30 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "bos_token": {
3
+ "content": "<|startoftext|>",
4
+ "lstrip": false,
5
+ "normalized": true,
6
+ "rstrip": false,
7
+ "single_word": false
8
+ },
9
+ "eos_token": {
10
+ "content": "<|endoftext|>",
11
+ "lstrip": false,
12
+ "normalized": true,
13
+ "rstrip": false,
14
+ "single_word": false
15
+ },
16
+ "pad_token": {
17
+ "content": "<|endoftext|>",
18
+ "lstrip": false,
19
+ "normalized": false,
20
+ "rstrip": false,
21
+ "single_word": false
22
+ },
23
+ "unk_token": {
24
+ "content": "<|endoftext|>",
25
+ "lstrip": false,
26
+ "normalized": true,
27
+ "rstrip": false,
28
+ "single_word": false
29
+ }
30
+ }
tokenizer/tokenizer_config.json ADDED
@@ -0,0 +1,30 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "add_prefix_space": false,
3
+ "added_tokens_decoder": {
4
+ "49406": {
5
+ "content": "<|startoftext|>",
6
+ "lstrip": false,
7
+ "normalized": true,
8
+ "rstrip": false,
9
+ "single_word": false,
10
+ "special": true
11
+ },
12
+ "49407": {
13
+ "content": "<|endoftext|>",
14
+ "lstrip": false,
15
+ "normalized": true,
16
+ "rstrip": false,
17
+ "single_word": false,
18
+ "special": true
19
+ }
20
+ },
21
+ "bos_token": "<|startoftext|>",
22
+ "clean_up_tokenization_spaces": true,
23
+ "do_lower_case": true,
24
+ "eos_token": "<|endoftext|>",
25
+ "errors": "replace",
26
+ "model_max_length": 77,
27
+ "pad_token": "<|endoftext|>",
28
+ "tokenizer_class": "CLIPTokenizer",
29
+ "unk_token": "<|endoftext|>"
30
+ }
tokenizer/vocab.json ADDED
The diff for this file is too large to render. See raw diff
 
tokenizer_2/merges.txt ADDED
The diff for this file is too large to render. See raw diff
 
tokenizer_2/special_tokens_map.json ADDED
@@ -0,0 +1,24 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "bos_token": {
3
+ "content": "<|startoftext|>",
4
+ "lstrip": false,
5
+ "normalized": true,
6
+ "rstrip": false,
7
+ "single_word": false
8
+ },
9
+ "eos_token": {
10
+ "content": "<|endoftext|>",
11
+ "lstrip": false,
12
+ "normalized": true,
13
+ "rstrip": false,
14
+ "single_word": false
15
+ },
16
+ "pad_token": "!",
17
+ "unk_token": {
18
+ "content": "<|endoftext|>",
19
+ "lstrip": false,
20
+ "normalized": true,
21
+ "rstrip": false,
22
+ "single_word": false
23
+ }
24
+ }
tokenizer_2/tokenizer_config.json ADDED
@@ -0,0 +1,38 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "add_prefix_space": false,
3
+ "added_tokens_decoder": {
4
+ "0": {
5
+ "content": "!",
6
+ "lstrip": false,
7
+ "normalized": false,
8
+ "rstrip": false,
9
+ "single_word": false,
10
+ "special": true
11
+ },
12
+ "49406": {
13
+ "content": "<|startoftext|>",
14
+ "lstrip": false,
15
+ "normalized": true,
16
+ "rstrip": false,
17
+ "single_word": false,
18
+ "special": true
19
+ },
20
+ "49407": {
21
+ "content": "<|endoftext|>",
22
+ "lstrip": false,
23
+ "normalized": true,
24
+ "rstrip": false,
25
+ "single_word": false,
26
+ "special": true
27
+ }
28
+ },
29
+ "bos_token": "<|startoftext|>",
30
+ "clean_up_tokenization_spaces": true,
31
+ "do_lower_case": true,
32
+ "eos_token": "<|endoftext|>",
33
+ "errors": "replace",
34
+ "model_max_length": 77,
35
+ "pad_token": "!",
36
+ "tokenizer_class": "CLIPTokenizer",
37
+ "unk_token": "<|endoftext|>"
38
+ }
tokenizer_2/vocab.json ADDED
The diff for this file is too large to render. See raw diff
 
unet/config.json ADDED
@@ -0,0 +1,72 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_class_name": "UNet2DConditionModel",
3
+ "_diffusers_version": "0.23.0",
4
+ "act_fn": "silu",
5
+ "addition_embed_type": "text_time",
6
+ "addition_embed_type_num_heads": 64,
7
+ "addition_time_embed_dim": 256,
8
+ "attention_head_dim": [
9
+ 5,
10
+ 10,
11
+ 20
12
+ ],
13
+ "attention_type": "default",
14
+ "block_out_channels": [
15
+ 320,
16
+ 640,
17
+ 1280
18
+ ],
19
+ "center_input_sample": false,
20
+ "class_embed_type": null,
21
+ "class_embeddings_concat": false,
22
+ "conv_in_kernel": 3,
23
+ "conv_out_kernel": 3,
24
+ "cross_attention_dim": 2048,
25
+ "cross_attention_norm": null,
26
+ "down_block_types": [
27
+ "DownBlock2D",
28
+ "CrossAttnDownBlock2D",
29
+ "CrossAttnDownBlock2D"
30
+ ],
31
+ "downsample_padding": 1,
32
+ "dropout": 0.0,
33
+ "dual_cross_attention": false,
34
+ "encoder_hid_dim": null,
35
+ "encoder_hid_dim_type": null,
36
+ "flip_sin_to_cos": true,
37
+ "freq_shift": 0,
38
+ "in_channels": 4,
39
+ "layers_per_block": 2,
40
+ "mid_block_only_cross_attention": null,
41
+ "mid_block_scale_factor": 1,
42
+ "mid_block_type": "UNetMidBlock2DCrossAttn",
43
+ "norm_eps": 1e-05,
44
+ "norm_num_groups": 32,
45
+ "num_attention_heads": null,
46
+ "num_class_embeds": null,
47
+ "only_cross_attention": false,
48
+ "out_channels": 4,
49
+ "projection_class_embeddings_input_dim": 2816,
50
+ "resnet_out_scale_factor": 1.0,
51
+ "resnet_skip_time_act": false,
52
+ "resnet_time_scale_shift": "default",
53
+ "reverse_transformer_layers_per_block": null,
54
+ "sample_size": 128,
55
+ "time_cond_proj_dim": null,
56
+ "time_embedding_act_fn": null,
57
+ "time_embedding_dim": null,
58
+ "time_embedding_type": "positional",
59
+ "timestep_post_act": null,
60
+ "transformer_layers_per_block": [
61
+ 1,
62
+ 2,
63
+ 10
64
+ ],
65
+ "up_block_types": [
66
+ "CrossAttnUpBlock2D",
67
+ "CrossAttnUpBlock2D",
68
+ "UpBlock2D"
69
+ ],
70
+ "upcast_attention": null,
71
+ "use_linear_projection": true
72
+ }
unet/diffusion_pytorch_model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e5e255166e72f0bb8aabb77e03aceaa72d72915da47da342f965874cce036992
3
+ size 5135149760
vae/config.json ADDED
@@ -0,0 +1,32 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_class_name": "AutoencoderKL",
3
+ "_diffusers_version": "0.23.0",
4
+ "_name_or_path": "madebyollin/sdxl-vae-fp16-fix",
5
+ "act_fn": "silu",
6
+ "block_out_channels": [
7
+ 128,
8
+ 256,
9
+ 512,
10
+ 512
11
+ ],
12
+ "down_block_types": [
13
+ "DownEncoderBlock2D",
14
+ "DownEncoderBlock2D",
15
+ "DownEncoderBlock2D",
16
+ "DownEncoderBlock2D"
17
+ ],
18
+ "force_upcast": false,
19
+ "in_channels": 3,
20
+ "latent_channels": 4,
21
+ "layers_per_block": 2,
22
+ "norm_num_groups": 32,
23
+ "out_channels": 3,
24
+ "sample_size": 512,
25
+ "scaling_factor": 0.13025,
26
+ "up_block_types": [
27
+ "UpDecoderBlock2D",
28
+ "UpDecoderBlock2D",
29
+ "UpDecoderBlock2D",
30
+ "UpDecoderBlock2D"
31
+ ]
32
+ }
vae/diffusion_pytorch_model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6353737672c94b96174cb590f711eac6edf2fcce5b6e91aa9d73c5adc589ee48
3
+ size 167335342