mirror of
https://github.com/pese-git/llm-arch-research.git
synced 2026-01-23 21:10:54 +00:00
test(core): fix FeedForward and MultiHeadAttention tests for unified interface and tuple outputs
This commit is contained in:
@@ -98,7 +98,7 @@ def test_multi_head_attention():
|
||||
batch_size, seq_len = 2, 16
|
||||
inputs = torch.randn(batch_size, seq_len, emb_size)
|
||||
|
||||
output = attention(inputs)
|
||||
output, _ = attention(inputs)
|
||||
|
||||
assert output.shape == inputs.shape
|
||||
print("✅ Multi-head attention test passed")
|
||||
|
||||
Reference in New Issue
Block a user