This video is a step-by-step tutorial to fine-tune Stable Diffusion 3 Medium locally on your own custom image dataset.
Code:
conda create -n sdft python=3.11 -y
pip install peft
pip install datasets
pip install huggingface_hub
pip install wandb
pip install bitsandbytes
pip install pillow
pip install git+https://github.com/huggingface/transformers
pip install accelerate
pip install sentencepiece
git clone https://github.com/huggingface/diffusers
cd diffusers
pip install -e .
cd examples/dreambooth
pip install -r requirements_sd3.txt
huggingface-cli login
accelerate config default
from huggingface_hub import snapshot_download
mkdir /home/Ubuntu/dog
local_dir = "/home/Ubuntu/dog"
snapshot_download(
"diffusers/dog-example",
local_dir=local_dir, repo_type="dataset",
ignore_patterns=".gitattributes",
)
============
export MODEL_NAME="stabilityai/stable-diffusion-3-medium-diffusers"
export INSTANCE_DIR="/home/Ubuntu/dog"
export OUTPUT_DIR="trained-sd3-lora"
accelerate launch train_dreambooth_lora_sd3.py \
--pretrained_model_name_or_path=$MODEL_NAME \
--instance_data_dir=$INSTANCE_DIR \
--output_dir=$OUTPUT_DIR \
--mixed_precision="fp16" \
--instance_prompt="a photo of sks dog" \
--resolution=512 \
--train_batch_size=1 \
--gradient_accumulation_steps=4 \
--learning_rate=1e-5 \
--report_to="wandb" \
--lr_scheduler="constant" \
--lr_warmup_steps=0 \
--max_train_steps=500 \
--validation_prompt="A photo of sks dog in a bucket" \
--validation_epochs=25 \
--seed="0" \
--push_to_hub
No comments:
Post a Comment