mirror of
https://github.com/pese-git/llm-arch-research.git
synced 2026-01-24 05:21:16 +00:00
update and expand scientific docstrings for optimizer, scheduler, trainer
- Expanded module-level and function/class docstrings in optimizer.py, scheduler.py, and trainer.py - Described mathematical foundations, theoretical motivations, and provided detailed usage examples for students - All docstrings in Russian, clear scientific style test(training): add comprehensive tests for optimizer, scheduler, and trainer modules - Added new test files for get_optimizer, get_linear_schedule_with_warmup, and Trainer - Tests cover parameter handling, edge cases, and expected learning dynamics (lr schedules and loss behavior) - Trainer now logs average epoch losses to self.loss_history for testability and analysis refactor(training/trainer): log epoch loss to loss_history for downstream analysis and tests BREAKING CHANGE: Trainer.loss_history is a new attribute consolidating average losses per epoch, enabling robust learning dynamics assertions in tests
This commit is contained in:
35
llm/tests/training/test_optimizer.py
Normal file
35
llm/tests/training/test_optimizer.py
Normal file
@@ -0,0 +1,35 @@
|
||||
import pytest
|
||||
import torch.nn as nn
|
||||
from llm.training.optimizer import get_optimizer
|
||||
|
||||
class DummyModel(nn.Module):
|
||||
def __init__(self):
|
||||
super().__init__()
|
||||
self.linear = nn.Linear(10, 1)
|
||||
|
||||
def test_get_optimizer_adamw():
|
||||
model = DummyModel()
|
||||
optimizer = get_optimizer(model, lr=1e-3, weight_decay=0.02, optimizer_type="adamw")
|
||||
assert optimizer.__class__.__name__ == 'AdamW'
|
||||
assert optimizer.defaults['lr'] == 1e-3
|
||||
assert optimizer.defaults['weight_decay'] == 0.02
|
||||
|
||||
def test_get_optimizer_adam():
|
||||
model = DummyModel()
|
||||
optimizer = get_optimizer(model, lr=1e-4, weight_decay=0.01, optimizer_type="adam")
|
||||
assert optimizer.__class__.__name__ == 'Adam'
|
||||
assert optimizer.defaults['lr'] == 1e-4
|
||||
assert optimizer.defaults['weight_decay'] == 0.01
|
||||
|
||||
def test_get_optimizer_sgd():
|
||||
model = DummyModel()
|
||||
optimizer = get_optimizer(model, lr=0.1, optimizer_type="sgd")
|
||||
assert optimizer.__class__.__name__ == 'SGD'
|
||||
assert optimizer.defaults['lr'] == 0.1
|
||||
# SGD: weight_decay по умолчанию 0 для этого вызова
|
||||
assert optimizer.defaults['momentum'] == 0.9
|
||||
|
||||
def test_get_optimizer_invalid():
|
||||
model = DummyModel()
|
||||
with pytest.raises(ValueError):
|
||||
get_optimizer(model, optimizer_type="nonexistent")
|
||||
Reference in New Issue
Block a user