Kohaku-Blueleaf commited on
Commit
a1372fa
1 Parent(s): cb688ac
Files changed (5) hide show
  1. .gitignore +162 -0
  2. app.py +302 -0
  3. diff.py +120 -0
  4. meta.py +54 -0
  5. requirements.txt +3 -0
.gitignore ADDED
@@ -0,0 +1,162 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Byte-compiled / optimized / DLL files
2
+ __pycache__/
3
+ *.py[cod]
4
+ *$py.class
5
+
6
+ # C extensions
7
+ *.so
8
+
9
+ # Distribution / packaging
10
+ .Python
11
+ build/
12
+ develop-eggs/
13
+ dist/
14
+ downloads/
15
+ eggs/
16
+ .eggs/
17
+ lib/
18
+ lib64/
19
+ parts/
20
+ sdist/
21
+ var/
22
+ wheels/
23
+ share/python-wheels/
24
+ *.egg-info/
25
+ .installed.cfg
26
+ *.egg
27
+ MANIFEST
28
+
29
+ # PyInstaller
30
+ # Usually these files are written by a python script from a template
31
+ # before PyInstaller builds the exe, so as to inject date/other infos into it.
32
+ *.manifest
33
+ *.spec
34
+
35
+ # Installer logs
36
+ pip-log.txt
37
+ pip-delete-this-directory.txt
38
+
39
+ # Unit test / coverage reports
40
+ htmlcov/
41
+ .tox/
42
+ .nox/
43
+ .coverage
44
+ .coverage.*
45
+ .cache
46
+ nosetests.xml
47
+ coverage.xml
48
+ *.cover
49
+ *.py,cover
50
+ .hypothesis/
51
+ .pytest_cache/
52
+ cover/
53
+
54
+ # Translations
55
+ *.mo
56
+ *.pot
57
+
58
+ # Django stuff:
59
+ *.log
60
+ local_settings.py
61
+ db.sqlite3
62
+ db.sqlite3-journal
63
+
64
+ # Flask stuff:
65
+ instance/
66
+ .webassets-cache
67
+
68
+ # Scrapy stuff:
69
+ .scrapy
70
+
71
+ # Sphinx documentation
72
+ docs/_build/
73
+
74
+ # PyBuilder
75
+ .pybuilder/
76
+ target/
77
+
78
+ # Jupyter Notebook
79
+ .ipynb_checkpoints
80
+
81
+ # IPython
82
+ profile_default/
83
+ ipython_config.py
84
+
85
+ # pyenv
86
+ # For a library or package, you might want to ignore these files since the code is
87
+ # intended to run in multiple environments; otherwise, check them in:
88
+ # .python-version
89
+
90
+ # pipenv
91
+ # According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.
92
+ # However, in case of collaboration, if having platform-specific dependencies or dependencies
93
+ # having no cross-platform support, pipenv may install dependencies that don't work, or not
94
+ # install all needed dependencies.
95
+ #Pipfile.lock
96
+
97
+ # poetry
98
+ # Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control.
99
+ # This is especially recommended for binary packages to ensure reproducibility, and is more
100
+ # commonly ignored for libraries.
101
+ # https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control
102
+ #poetry.lock
103
+
104
+ # pdm
105
+ # Similar to Pipfile.lock, it is generally recommended to include pdm.lock in version control.
106
+ #pdm.lock
107
+ # pdm stores project-wide configurations in .pdm.toml, but it is recommended to not include it
108
+ # in version control.
109
+ # https://pdm.fming.dev/latest/usage/project/#working-with-version-control
110
+ .pdm.toml
111
+ .pdm-python
112
+ .pdm-build/
113
+
114
+ # PEP 582; used by e.g. github.com/David-OConnor/pyflow and github.com/pdm-project/pdm
115
+ __pypackages__/
116
+
117
+ # Celery stuff
118
+ celerybeat-schedule
119
+ celerybeat.pid
120
+
121
+ # SageMath parsed files
122
+ *.sage.py
123
+
124
+ # Environments
125
+ .env
126
+ .venv
127
+ env/
128
+ venv/
129
+ ENV/
130
+ env.bak/
131
+ venv.bak/
132
+
133
+ # Spyder project settings
134
+ .spyderproject
135
+ .spyproject
136
+
137
+ # Rope project settings
138
+ .ropeproject
139
+
140
+ # mkdocs documentation
141
+ /site
142
+
143
+ # mypy
144
+ .mypy_cache/
145
+ .dmypy.json
146
+ dmypy.json
147
+
148
+ # Pyre type checker
149
+ .pyre/
150
+
151
+ # pytype static type analyzer
152
+ .pytype/
153
+
154
+ # Cython debug symbols
155
+ cython_debug/
156
+
157
+ # PyCharm
158
+ # JetBrains specific template is maintained in a separate JetBrains.gitignore that can
159
+ # be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore
160
+ # and can be added to the global gitignore or merged into this file. For a more nuclear
161
+ # option (not recommended) you can uncomment the following to ignore the entire idea folder.
162
+ #.idea/
app.py ADDED
@@ -0,0 +1,302 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import sys
2
+ import gradio as gr
3
+
4
+ import re
5
+ import random
6
+ from time import time
7
+
8
+ import torch
9
+ from transformers import set_seed
10
+ if sys.platform == "win32":
11
+ #dev env in windows, @spaces.GPU will cause problem
12
+ def GPU(func):
13
+ return func
14
+ else:
15
+ from spaces import GPU
16
+
17
+ import kgen.models as models
18
+ import kgen.executor.titpop as titpop
19
+ from kgen.formatter import seperate_tags, apply_format
20
+ from kgen.generate import generate
21
+
22
+ from diff import load_model, encode_prompts
23
+ from meta import DEFAULT_NEGATIVE_PROMPT
24
+
25
+
26
+ sdxl_pipe = load_model()
27
+
28
+ models.load_model(
29
+ "KBlueLeaf/TITPOP-200M-dev",
30
+ device="cuda",
31
+ subfolder="dan-cc-coyo_epoch2",
32
+ )
33
+ generate(max_new_tokens=4)
34
+
35
+
36
+ DEFAULT_FORMAT = """<|special|>, <|characters|>, <|copyrights|>,
37
+ <|artist|>,
38
+
39
+ <|general|>,
40
+
41
+ <|extended|>.
42
+
43
+ <|quality|>, <|meta|>, <|rating|>
44
+ """.strip()
45
+ DEFAULT_TAGS = """
46
+ 1girl,
47
+ ningen mame, ciloranko,
48
+ solo, dragon girl,
49
+ masterpiece, absurdres, safe, newest
50
+ """.strip()
51
+ DEFAULT_NL = """
52
+ An illustration of a girl
53
+ """.strip()
54
+
55
+
56
+ def format_time(timing):
57
+ total = timing["total"]
58
+ generate_pass = timing["generate_pass"]
59
+
60
+ result = ""
61
+
62
+ result += f"""
63
+ ### Process Time
64
+ | Total | {total:5.2f} sec / {generate_pass:5} Passes | {generate_pass/total:7.2f} Passes Per Second|
65
+ |-|-|-|
66
+ """
67
+ if "generated_tokens" in timing:
68
+ total_generated_tokens = timing["generated_tokens"]
69
+ total_input_tokens = timing["input_tokens"]
70
+ if "generated_tokens" in timing and "total_sampling" in timing:
71
+ sampling_time = timing["total_sampling"] / 1000
72
+ process_time = timing["prompt_process"] / 1000
73
+ model_time = timing["total_eval"] / 1000
74
+
75
+ result += f"""| Process | {process_time:5.2f} sec / {total_input_tokens:5} Tokens | {total_input_tokens/process_time:7.2f} Tokens Per Second|
76
+ | Sampling | {sampling_time:5.2f} sec / {total_generated_tokens:5} Tokens | {total_generated_tokens/sampling_time:7.2f} Tokens Per Second|
77
+ | Eval | {model_time:5.2f} sec / {total_generated_tokens:5} Tokens | {total_generated_tokens/model_time:7.2f} Tokens Per Second|
78
+ """
79
+
80
+ if "generated_tokens" in timing:
81
+ result += f"""
82
+ ### Processed Tokens:
83
+ * {total_input_tokens:} Input Tokens
84
+ * {total_generated_tokens:} Output Tokens
85
+ """
86
+ return result
87
+
88
+
89
+ @GPU
90
+ @torch.no_grad()
91
+ def generate(
92
+ tags,
93
+ nl_prompt,
94
+ black_list,
95
+ temp,
96
+ target_length,
97
+ top_p,
98
+ min_p,
99
+ top_k,
100
+ seed,
101
+ escape_brackets,
102
+ ):
103
+ titpop.BAN_TAGS = [t.strip() for t in black_list.split(",") if t.strip()]
104
+ generation_setting = {
105
+ "seed": seed,
106
+ "temperature": temp,
107
+ "top_p": top_p,
108
+ "min_p": min_p,
109
+ "top_k": top_k,
110
+ }
111
+ inputs = seperate_tags(tags.split(","))
112
+ if nl_prompt:
113
+ if "<|extended|>" in DEFAULT_FORMAT:
114
+ inputs["extended"] = nl_prompt
115
+ elif "<|generated|>" in DEFAULT_FORMAT:
116
+ inputs["generated"] = nl_prompt
117
+ input_prompt = apply_format(inputs, DEFAULT_FORMAT)
118
+ if escape_brackets:
119
+ input_prompt = re.sub(r"([()\[\]])", r"\\\1", input_prompt)
120
+
121
+ meta, operations, general, nl_prompt = titpop.parse_titpop_request(
122
+ seperate_tags(tags.split(",")),
123
+ nl_prompt,
124
+ tag_length_target=target_length,
125
+ generate_extra_nl_prompt="<|generated|>" in DEFAULT_FORMAT or not nl_prompt,
126
+ )
127
+ t0 = time()
128
+ for result, timing in titpop.titpop_runner_generator(
129
+ meta, operations, general, nl_prompt, **generation_setting
130
+ ):
131
+ result = apply_format(result, DEFAULT_FORMAT)
132
+ if escape_brackets:
133
+ result = re.sub(r"([()\[\]])", r"\\\1", result)
134
+ timing["total"] = time() - t0
135
+ yield result, input_prompt, format_time(timing)
136
+
137
+
138
+ @GPU
139
+ @torch.no_grad()
140
+ def generate_image(
141
+ seed,
142
+ prompt,
143
+ prompt2,
144
+ ):
145
+ torch.cuda.empty_cache()
146
+ prompt_embeds, negative_prompt_embeds, pooled_embeds2, neg_pooled_embeds2 = (
147
+ encode_prompts(sdxl_pipe, prompt, DEFAULT_NEGATIVE_PROMPT)
148
+ )
149
+ set_seed(seed)
150
+ result = sdxl_pipe(
151
+ prompt_embeds=prompt_embeds,
152
+ negative_prompt_embeds=negative_prompt_embeds,
153
+ pooled_prompt_embeds=pooled_embeds2,
154
+ negative_pooled_prompt_embeds=neg_pooled_embeds2,
155
+ num_inference_steps=24,
156
+ width=1024,
157
+ height=1024,
158
+ guidance_scale=6.0,
159
+ ).images[0]
160
+ prompt_embeds, negative_prompt_embeds, pooled_embeds2, neg_pooled_embeds2 = (
161
+ encode_prompts(sdxl_pipe, prompt2, DEFAULT_NEGATIVE_PROMPT)
162
+ )
163
+ set_seed(seed)
164
+ result2 = sdxl_pipe(
165
+ prompt_embeds=prompt_embeds,
166
+ negative_prompt_embeds=negative_prompt_embeds,
167
+ pooled_prompt_embeds=pooled_embeds2,
168
+ negative_pooled_prompt_embeds=neg_pooled_embeds2,
169
+ num_inference_steps=24,
170
+ width=1024,
171
+ height=1024,
172
+ guidance_scale=6.0,
173
+ ).images[0]
174
+ torch.cuda.empty_cache()
175
+ return result2, result
176
+
177
+
178
+ if __name__ == "__main__":
179
+ with gr.Blocks(theme=gr.themes.Soft()) as demo:
180
+ gr.Markdown("""# TITPOP DEMO""")
181
+ with gr.Accordion("Introduction and Instructions", open=False):
182
+ gr.Markdown(
183
+ """
184
+ ### What is this:
185
+ TITPOP
186
+
187
+ **The implementation is a little bit inefficient, image gen may be a little bit slower than expected.**
188
+ """
189
+ )
190
+ with gr.Row():
191
+ with gr.Column(scale=5):
192
+ with gr.Row():
193
+ with gr.Column(scale=3):
194
+ tags_input = gr.TextArea(
195
+ label="Danbooru Tags",
196
+ lines=6,
197
+ show_copy_button=True,
198
+ interactive=True,
199
+ value=DEFAULT_TAGS,
200
+ placeholder="Enter danbooru tags here",
201
+ )
202
+ nl_prompt_input = gr.Textbox(
203
+ label="Natural Language Prompt",
204
+ lines=6,
205
+ show_copy_button=True,
206
+ interactive=True,
207
+ value=DEFAULT_NL,
208
+ placeholder="Enter Natural Language Prompt here",
209
+ )
210
+ black_list = gr.TextArea(
211
+ label="Black List (seperated by comma)",
212
+ lines=4,
213
+ interactive=True,
214
+ value="monochrome",
215
+ placeholder="Enter tag/nl black list here",
216
+ )
217
+ with gr.Column(scale=2):
218
+ target_length = gr.Dropdown(
219
+ label="Target Length",
220
+ choices=["very_short", "short", "long", "very_long"],
221
+ value="short",
222
+ )
223
+ temp = gr.Slider(
224
+ label="Temp",
225
+ minimum=0.0,
226
+ maximum=1.5,
227
+ value=0.5,
228
+ step=0.05,
229
+ )
230
+ top_p = gr.Slider(
231
+ label="Top P",
232
+ minimum=0.0,
233
+ maximum=1.0,
234
+ value=0.95,
235
+ step=0.05,
236
+ )
237
+ min_p = gr.Slider(
238
+ label="Min P",
239
+ minimum=0.0,
240
+ maximum=0.2,
241
+ value=0.05,
242
+ step=0.01,
243
+ )
244
+ top_k = gr.Slider(
245
+ label="Top K", minimum=0, maximum=120, value=60, step=1
246
+ )
247
+ with gr.Row():
248
+ seed = gr.Number(
249
+ label="Seed",
250
+ minimum=0,
251
+ maximum=2147483647,
252
+ value=20090220,
253
+ step=1,
254
+ )
255
+ escape_brackets = gr.Checkbox(
256
+ label="Escape Brackets", value=False
257
+ )
258
+ submit = gr.Button("TITPOP!", variant="primary")
259
+ with gr.Accordion("Speed statstics", open=False):
260
+ cost_time = gr.Markdown()
261
+ with gr.Column(scale=5):
262
+ result = gr.TextArea(
263
+ label="Result", lines=8, show_copy_button=True, interactive=False
264
+ )
265
+ input_prompt = gr.Textbox(
266
+ label="Input Prompt", lines=1, interactive=False, visible=False
267
+ )
268
+ gen_img = gr.Button("Generate Image from Result", variant="primary")
269
+ with gr.Row():
270
+ with gr.Column():
271
+ img1 = gr.Image(label="Original Propmt", interactive=False)
272
+ with gr.Column():
273
+ img2 = gr.Image(label="Generated Prompt", interactive=False)
274
+ submit.click(
275
+ generate,
276
+ [
277
+ tags_input,
278
+ nl_prompt_input,
279
+ black_list,
280
+ temp,
281
+ target_length,
282
+ top_p,
283
+ min_p,
284
+ top_k,
285
+ seed,
286
+ escape_brackets,
287
+ ],
288
+ [
289
+ result,
290
+ input_prompt,
291
+ cost_time,
292
+ ],
293
+ queue=True,
294
+ )
295
+ gen_img.click(
296
+ generate_image,
297
+ [seed, result, input_prompt],
298
+ [img1, img2],
299
+ queue=True,
300
+ )
301
+
302
+ demo.launch()
diff.py ADDED
@@ -0,0 +1,120 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from functools import partial
2
+
3
+ import torch
4
+ from diffusers import StableDiffusionXLKDiffusionPipeline
5
+ from k_diffusion.sampling import get_sigmas_polyexponential
6
+ from k_diffusion.sampling import sample_dpmpp_2m_sde
7
+
8
+ torch.set_float32_matmul_precision("medium")
9
+
10
+
11
+ def set_timesteps_polyexponential(self, orig_sigmas, num_inference_steps, device=None):
12
+ self.num_inference_steps = num_inference_steps
13
+
14
+ self.sigmas = get_sigmas_polyexponential(
15
+ num_inference_steps + 1,
16
+ sigma_min=orig_sigmas[-2],
17
+ sigma_max=orig_sigmas[0],
18
+ rho=0.666666,
19
+ device=device or "cpu",
20
+ )
21
+ self.sigmas = torch.cat([self.sigmas[:-2], self.sigmas.new_zeros([1])])
22
+
23
+
24
+ def model_forward(k_diffusion_model: torch.nn.Module):
25
+ orig_forward = k_diffusion_model.forward
26
+
27
+ def forward(*args, **kwargs):
28
+ with torch.autocast(device_type="cuda", dtype=torch.float16):
29
+ result = orig_forward(*args, **kwargs)
30
+ return result.float()
31
+
32
+ return forward
33
+
34
+
35
+ def load_model(model_id="KBlueLeaf/Kohaku-XL-Zeta", device="cuda"):
36
+ pipe: StableDiffusionXLKDiffusionPipeline
37
+ pipe = StableDiffusionXLKDiffusionPipeline.from_pretrained(
38
+ model_id, torch_dtype=torch.float16
39
+ ).to(device)
40
+ pipe.scheduler.set_timesteps = partial(
41
+ set_timesteps_polyexponential, pipe.scheduler, pipe.scheduler.sigmas
42
+ )
43
+ pipe.sampler = partial(sample_dpmpp_2m_sde, eta=0.35, solver_type="heun")
44
+ pipe.k_diffusion_model.forward = model_forward(pipe.k_diffusion_model)
45
+ return pipe
46
+
47
+
48
+ def encode_prompts(pipe: StableDiffusionXLKDiffusionPipeline, prompt, neg_prompt):
49
+ max_length = pipe.tokenizer.model_max_length
50
+
51
+ input_ids = pipe.tokenizer(prompt, return_tensors="pt").input_ids.to("cuda")
52
+ input_ids2 = pipe.tokenizer_2(prompt, return_tensors="pt").input_ids.to("cuda")
53
+
54
+ negative_ids = pipe.tokenizer(
55
+ neg_prompt,
56
+ truncation=False,
57
+ padding="max_length",
58
+ max_length=input_ids.shape[-1],
59
+ return_tensors="pt",
60
+ ).input_ids.to("cuda")
61
+ negative_ids2 = pipe.tokenizer_2(
62
+ neg_prompt,
63
+ truncation=False,
64
+ padding="max_length",
65
+ max_length=input_ids.shape[-1],
66
+ return_tensors="pt",
67
+ ).input_ids.to("cuda")
68
+
69
+ if negative_ids.size() > input_ids.size():
70
+ input_ids = pipe.tokenizer(
71
+ prompt,
72
+ truncation=False,
73
+ padding="max_length",
74
+ max_length=negative_ids.shape[-1],
75
+ return_tensors="pt",
76
+ ).input_ids.to("cuda")
77
+ input_ids2 = pipe.tokenizer_2(
78
+ prompt,
79
+ truncation=False,
80
+ padding="max_length",
81
+ max_length=negative_ids.shape[-1],
82
+ return_tensors="pt",
83
+ ).input_ids.to("cuda")
84
+
85
+ concat_embeds = []
86
+ neg_embeds = []
87
+ for i in range(0, input_ids.shape[-1], max_length):
88
+ concat_embeds.append(pipe.text_encoder(input_ids[:, i : i + max_length])[0])
89
+ neg_embeds.append(pipe.text_encoder(negative_ids[:, i : i + max_length])[0])
90
+
91
+ concat_embeds2 = []
92
+ neg_embeds2 = []
93
+ pooled_embeds2 = []
94
+ neg_pooled_embeds2 = []
95
+ for i in range(0, input_ids.shape[-1], max_length):
96
+ hidden_states = pipe.text_encoder_2(
97
+ input_ids2[:, i : i + max_length], output_hidden_states=True
98
+ )
99
+ concat_embeds2.append(hidden_states.hidden_states[-2])
100
+ pooled_embeds2.append(hidden_states[0])
101
+
102
+ hidden_states = pipe.text_encoder_2(
103
+ negative_ids2[:, i : i + max_length], output_hidden_states=True
104
+ )
105
+ neg_embeds2.append(hidden_states.hidden_states[-2])
106
+ neg_pooled_embeds2.append(hidden_states[0])
107
+
108
+ prompt_embeds = torch.cat(concat_embeds, dim=1)
109
+ negative_prompt_embeds = torch.cat(neg_embeds, dim=1)
110
+ prompt_embeds2 = torch.cat(concat_embeds2, dim=1)
111
+ negative_prompt_embeds2 = torch.cat(neg_embeds2, dim=1)
112
+ prompt_embeds = torch.cat([prompt_embeds, prompt_embeds2], dim=-1)
113
+ negative_prompt_embeds = torch.cat(
114
+ [negative_prompt_embeds, negative_prompt_embeds2], dim=-1
115
+ )
116
+
117
+ pooled_embeds2 = torch.mean(torch.stack(pooled_embeds2, dim=0), dim=0)
118
+ neg_pooled_embeds2 = torch.mean(torch.stack(neg_pooled_embeds2, dim=0), dim=0)
119
+
120
+ return prompt_embeds, negative_prompt_embeds, pooled_embeds2, neg_pooled_embeds2
meta.py ADDED
@@ -0,0 +1,54 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ DEFAULT_STYLE_LIST = {
2
+ "style 1": "ask (askzy), torino aqua, migolu",
3
+ "style 2": "azuuru, torino aqua, kedama milk, fuzichoco, ask (askzy), chen bin, atdan, hito, mignon",
4
+ "style 3": "nou (nounknown), shikimi (yurakuru), namiki itsuki, lemon89h, satsuki (miicat), chon (chon33v), omutatsu, mochizuki kei",
5
+ "style 4": "ciloranko, maccha (mochancc), lobelia (saclia), migolu, ask (askzy), wanke, jiu ye sang, rumoon, mizumi zumi",
6
+ "style 5": "reoen, alchemaniac, rella, watercolor (medium)",
7
+ "style 6": "ogipote, misu kasumi, fuzichoco, ciloranko, ninjin nouka, ningen mame, ask (askzy), kita (kitairoha), maccha (mochancc)",
8
+ "no style": "",
9
+ }
10
+
11
+ MODEL_DEFAULT_QUALITY_LIST = {
12
+ "KBlueLeaf/Kohaku-XL-Zeta": "masterpiece, newest, absurdres",
13
+ "KBlueLeaf/Kohaku-XL-Epsilon-rev2": "masterpiece, newest, absurdres",
14
+ "KBlueLeaf/Kohaku-XL-Epsilon": "masterpiece, newest, absurdres, safe",
15
+ "cagliostrolab/animagine-xl-3.1": "masterpiece, newest, very aesthetic, absurdres, safe",
16
+ }
17
+
18
+ MODEL_FORMAT_LIST = {
19
+ "KBlueLeaf/Kohaku-XL-Zeta": """<|special|>,
20
+ <|characters|>, <|copyrights|>,
21
+ <|artist|>,
22
+
23
+ <|general|>,
24
+
25
+ <|quality|>, <|meta|>, <|rating|>""",
26
+ "KBlueLeaf/Kohaku-XL-Epsilon-rev2": """<|special|>,
27
+ <|characters|>, <|copyrights|>,
28
+ <|artist|>,
29
+
30
+ <|general|>,
31
+
32
+ <|quality|>, <|meta|>, <|rating|>""",
33
+ "KBlueLeaf/Kohaku-XL-Epsilon": """<|special|>,
34
+ <|characters|>, <|copyrights|>,
35
+ <|artist|>,
36
+
37
+ <|general|>,
38
+
39
+ <|quality|>, <|meta|>, <|rating|>""",
40
+ "cagliostrolab/animagine-xl-3.1": """<|special|>,
41
+ <|characters|>, <|copyrights|>,
42
+ <|artist|>,
43
+
44
+ <|general|>,
45
+
46
+ <|quality|>, <|meta|>, <|rating|>""",
47
+ }
48
+
49
+
50
+ DEFAULT_NEGATIVE_PROMPT = """
51
+ low quality, worst quality, normal quality, text, signature, jpeg artifacts,
52
+ bad anatomy, old, early, mini skirt, nsfw, chibi, multiple girls, multiple boys,
53
+ multiple tails, multiple views, copyright name, watermark, artist name, signature
54
+ """
requirements.txt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ git+https://${GITHUB_TOKEN}@github.com/KohakuBlueleaf/TITPOP-KGen@titpop
2
+ gradio
3
+ spaces