Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

[WIP] Federated learning #2418

Draft
wants to merge 9 commits into
base: main
Choose a base branch
from
Draft
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
7 changes: 7 additions & 0 deletions pyproject.toml
Original file line number Diff line number Diff line change
Expand Up @@ -62,6 +62,13 @@ dev = [
"wandb",
"expecttest",
]
federated = [
"fastapi",
"uvicorn",
"aiofiles",
"requests",
"python-multipart"
]

[tool.setuptools.dynamic]
version = {attr = "torchtune.__version__"}
Expand Down
37 changes: 37 additions & 0 deletions recipes/configs/federation_diloco.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,37 @@
# Config for FederationRecipe in federation.py
#
# To launch, run the following command from root torchtune directory:
# tune run federation --config federation_dilico.yaml

output_dir: /tmp/torchtune/qwen2_5_0_5B/federated # /tmp may be deleted by your system. Change it to your preference.

#
# Model arguments
model:
_component_: torchtune.models.qwen2_5.qwen2_5_0_5b

checkpointer:
_component_: torchtune.training.FullModelHFCheckpointer
checkpoint_dir: /Users/maxime/.cache/huggingface/hub/models--Qwen--Qwen2.5-0.5B-Instruct/snapshots/7ae557604adf67be50417f59c2c2f167def9a775
checkpoint_files: [model.safetensors]
recipe_checkpoint: null
output_dir: ${output_dir}
model_type: QWEN2
resume_from_checkpoint: False

device: mps
dtype: bf16
seed: 1234

federator:
_component_: torchtune.training.federation.DiLoCoFederator
h: 50
optimizer:
_component_: torch.optim.SGD
lr: 1
nesterov: True
momentum: 0.9

participants:
- uioGMzl7zqtkydxP66mJh9dZCaNnSWOb
- etbefHUtU0XViIPrqo8nN2TlpvIUTdi7
114 changes: 114 additions & 0 deletions recipes/configs/qwen2_5/0.5B_full_single_device_diloco_1.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,114 @@
# Config for single device full finetuning in full_finetune_single_device.py
# using a Qwen2.5 0.5B
#
# This config assumes that you've run the following command before launching
# this run:
# tune download Qwen/Qwen2.5-0.5B-Instruct --output-dir /tmp/Qwen2.5-0.5B-Instruct
#
#
# To launch on a single device, run the following command from root:
# tune run full_finetune_single_device --config qwen2/1.5B_full_single_device
#
# You can add specific overrides through the command line. For example
# to override the checkpointer directory while launching training
# you can run:
# tune run full_finetune_single_device --config qwen2/1.5B_full_single_device checkpointer.checkpoint_dir=<YOUR_CHECKPOINT_DIR>
#
# This config works only for training on single device.

output_dir: /tmp/torchtune/qwen2_5_0_5B/full_single_device # /tmp may be deleted by your system. Change it to your preference.

# Model arguments
model:
_component_: torchtune.models.qwen2_5.qwen2_5_0_5b

# Tokenizer
tokenizer:
_component_: torchtune.models.qwen2_5.qwen2_5_tokenizer
path: /Users/maxime/.cache/huggingface/hub/models--Qwen--Qwen2.5-0.5B-Instruct/snapshots/7ae557604adf67be50417f59c2c2f167def9a775/vocab.json
merges_file: /Users/maxime/.cache/huggingface/hub/models--Qwen--Qwen2.5-0.5B-Instruct/snapshots/7ae557604adf67be50417f59c2c2f167def9a775/merges.txt
max_seq_len: 4096

# Checkpointer
checkpointer:
_component_: torchtune.training.FullModelHFCheckpointer
checkpoint_dir: /Users/maxime/.cache/huggingface/hub/models--Qwen--Qwen2.5-0.5B-Instruct/snapshots/7ae557604adf67be50417f59c2c2f167def9a775/
checkpoint_files: [model.safetensors]
recipe_checkpoint: null
output_dir: ${output_dir}
model_type: QWEN2
resume_from_checkpoint: False

# Dataset
dataset:
_component_: torchtune.datasets.alpaca_cleaned_dataset
packed: False # True increases speed

seed: 4321
shuffle: True

# Fine-tuning arguments
batch_size: 4
epochs: 1
optimizer:
_component_: torch.optim.AdamW
lr: 2e-5

optimizer_in_bwd: True # True saves memory. Requires gradient_accumulation_steps=1

loss:
_component_: torchtune.modules.loss.CEWithChunkedOutputLoss

max_steps_per_epoch: null
gradient_accumulation_steps: 1 # Use to increase effective batch size
clip_grad_norm: null
compile: False # torch.compile the model + loss, True increases speed + decreases memory

# Training environment
device: mps

# Memory management
enable_activation_checkpointing: True # True reduces memory
enable_activation_offloading: False # True reduces memory

# Reduced precision
dtype: bf16

# Logging
metric_logger:
_component_: torchtune.training.metric_logging.DiskLogger
log_dir: ${output_dir}/logs
log_every_n_steps: 1
log_peak_memory_stats: True

# Federation
federation:
_component_: torchtune.training.federation.TuneParticipant
endpoint: http://127.0.0.1:8000
token: uioGMzl7zqtkydxP66mJh9dZCaNnSWOb
temporary_dir: ${output_dir}/federation

# Profiler (disabled)
profiler:
_component_: torchtune.training.setup_torch_profiler
enabled: False

#Output directory of trace artifacts
output_dir: ${output_dir}/profiling_outputs

#`torch.profiler.ProfilerActivity` types to trace
cpu: True
cuda: False

#trace options passed to `torch.profiler.profile`
profile_memory: False
with_stack: False
record_shapes: True
with_flops: False

# `torch.profiler.schedule` options:
# wait_steps -> wait, warmup_steps -> warmup, active_steps -> active, num_cycles -> repeat
wait_steps: 5
warmup_steps: 3
active_steps: 2
num_cycles: 1
113 changes: 113 additions & 0 deletions recipes/configs/qwen2_5/0.5B_full_single_device_diloco_2.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,113 @@
# Config for single device full finetuning in full_finetune_single_device.py
# using a Qwen2.5 0.5B
#
# This config assumes that you've run the following command before launching
# this run:
# tune download Qwen/Qwen2.5-0.5B-Instruct --output-dir /tmp/Qwen2.5-0.5B-Instruct
#
#
# To launch on a single device, run the following command from root:
# tune run full_finetune_single_device --config qwen2/1.5B_full_single_device
#
# You can add specific overrides through the command line. For example
# to override the checkpointer directory while launching training
# you can run:
# tune run full_finetune_single_device --config qwen2/1.5B_full_single_device checkpointer.checkpoint_dir=<YOUR_CHECKPOINT_DIR>
#
# This config works only for training on single device.

output_dir: /tmp/torchtune/qwen2_5_0_5B/full_single_device # /tmp may be deleted by your system. Change it to your preference.

# Model arguments
model:
_component_: torchtune.models.qwen2_5.qwen2_5_0_5b

# Tokenizer
tokenizer:
_component_: torchtune.models.qwen2_5.qwen2_5_tokenizer
path: /Users/maxime/.cache/huggingface/hub/models--Qwen--Qwen2.5-0.5B-Instruct/snapshots/7ae557604adf67be50417f59c2c2f167def9a775/vocab.json
merges_file: /Users/maxime/.cache/huggingface/hub/models--Qwen--Qwen2.5-0.5B-Instruct/snapshots/7ae557604adf67be50417f59c2c2f167def9a775/merges.txt
max_seq_len: 4096

# Checkpointer
checkpointer:
_component_: torchtune.training.FullModelHFCheckpointer
checkpoint_dir: /Users/maxime/.cache/huggingface/hub/models--Qwen--Qwen2.5-0.5B-Instruct/snapshots/7ae557604adf67be50417f59c2c2f167def9a775/
checkpoint_files: [model.safetensors]
recipe_checkpoint: null
output_dir: ${output_dir}
model_type: QWEN2
resume_from_checkpoint: False

# Dataset
dataset:
_component_: torchtune.datasets.cnn_dailymail_articles_dataset

seed: 1234
shuffle: True

# Fine-tuning arguments
batch_size: 1
epochs: 1
optimizer:
_component_: torch.optim.AdamW
lr: 2e-5

optimizer_in_bwd: True # True saves memory. Requires gradient_accumulation_steps=1

loss:
_component_: torchtune.modules.loss.CEWithChunkedOutputLoss

max_steps_per_epoch: null
gradient_accumulation_steps: 1 # Use to increase effective batch size
clip_grad_norm: null
compile: False # torch.compile the model + loss, True increases speed + decreases memory

# Training environment
device: cuda

# Memory management
enable_activation_checkpointing: True # True reduces memory
enable_activation_offloading: False # True reduces memory

# Reduced precision
dtype: bf16

# Logging
metric_logger:
_component_: torchtune.training.metric_logging.DiskLogger
log_dir: ${output_dir}/logs
log_every_n_steps: 1
log_peak_memory_stats: True

# Federation
federation:
_component_: torchtune.training.federation.TuneParticipant
endpoint: http://127.0.0.1:8000
token: etbefHUtU0XViIPrqo8nN2TlpvIUTdi7
temporary_dir: ${output_dir}/federation

# Profiler (disabled)
profiler:
_component_: torchtune.training.setup_torch_profiler
enabled: False

#Output directory of trace artifacts
output_dir: ${output_dir}/profiling_outputs

#`torch.profiler.ProfilerActivity` types to trace
cpu: True
cuda: False

#trace options passed to `torch.profiler.profile`
profile_memory: False
with_stack: False
record_shapes: True
with_flops: False

# `torch.profiler.schedule` options:
# wait_steps -> wait, warmup_steps -> warmup, active_steps -> active, num_cycles -> repeat
wait_steps: 5
warmup_steps: 3
active_steps: 2
num_cycles: 1
Loading