{ "peft_type": "LORA", "r": 16, "lora_alpha": 16, "target_modules": [ "down_blocks.0.attentions.0.transformer_blocks.0.attn1.to_q", "down_blocks.1.attentions.0.transformer_blocks.0.attn2.to_k" ], "lora_dropout": 0.05, "bias": "none", "task_type": "IMAGE_GENERATION" }