Official art, ancient india landscapes, (streets), (flowers), beautiful landscapes, masterpieces, high quality, exquisite graphics, high detail, epic landscapes, colorful, intricate details, wide angle lenses
2024-01-30
View Translation
Like
Reply
M
Manish Gawande
Official art, ancient india landscapes, (streets), (flowers), beautiful landscapes, masterpieces, high quality, exquisite graphics, high detail, epic landscapes, colorful, intricate details, wide angle lenses
2024-01-30
View Translation
Like
Reply
Y
ytvoziury
Um desenho estilo manga preto e branco na qual uma amulher alta com cabelos ondulados segurando um bebê em um quarto amplo, com janelas que mostra uma floresta, no quarto tem uma escrivanhinha com um cristal gravado com letras desconhecidas que emitia uma luz suave, projetando figuras no teto
[sdxl_arguments]
cache_text_encoder_outputs = true
no_half_vae = true
min_timestep = 0
max_timestep = 1000
shuffle_caption = false
lowram = true
[model_arguments]
pretrained_model_name_or_path = "stabilityai/stable-diffusion-xl-base-1.0"
vae = "/content/vae/sdxl_vae.safetensors"
[dataset_arguments]
debug_dataset = false
in_json = "/content/LoRA/meta_lat.json"
train_data_dir = "/content/drive/MyDrive/lora_training/datasets/Stylized_Setting_SDXL"
dataset_repeats = 2
keep_tokens = 1
resolution = "1024,1024"
color_aug = false
token_warmup_min = 1
token_warmup_step = 0
[training_arguments]
output_dir = "/content/drive/MyDrive/kohya-trainer/output/Stylized_Setting_SDXL"
output_name = "Stylized_Setting_SDXL"
save_precision = "fp16"
save_every_n_epochs = 1
train_batch_size = 4
max_token_length = 225
mem_eff_attn = false
sdpa = true
xformers = false
max_train_epochs = 10
max_data_loader_n_workers = 8
persistent_data_loader_workers = true
gradient_checkpointing = true
gradient_accumulation_steps = 1
mixed_precision = "fp16"
[logging_arguments]
log_with = "tensorboard"
logging_dir = "/content/LoRA/logs"
log_prefix = "Stylized_Setting_SDXL"
[sample_prompt_arguments]
sample_sampler = "euler_a"
[saving_arguments]
save_model_as = "safetensors"
[optimizer_arguments]
optimizer_type = "AdaFactor"
learning_rate = 0.0001
max_grad_norm = 0
optimizer_args = [ "scale_parameter=False", "relative_step=False", "warmup_init=False",]
lr_scheduler = "constant_with_warmup"
lr_warmup_steps = 100
[additional_network_arguments]
no_metadata = false
network_module = "networks.lora"
network_dim = 32
network_alpha = 16
network_args = []
network_train_unet_only = true