mirror of
https://github.com/pese-git/llm-arch-research.git
synced 2026-01-23 21:10:54 +00:00
feat(gemma): initial implementation of Gemma model and configs
- Add core Gemma model (architecture, attention, GeGLU, RoPE, RMSNorm, etc) - Add configs for training and generation: gemma_train.json, gemma_generate.json - Add Gemma notebook for exploratory analysis and demonstration - Add __init__.py for Gemma submodule - Update run_llm_experiment.py to support Gemma experiment configs test(gemma): add comprehensive unit tests for Gemma - Test forward pass (with/without cache) - Test autoregressive generation (greedy, top-k, top-p) - Test shape correctness and max sequence length errors - Test multi-layer stack and token embeddings docs: add documentation notebook for Gemma usage and analysis Closes: #issue (if applicable)
This commit is contained in:
28
experiments/llm_only/configs/gemma_train.json
Normal file
28
experiments/llm_only/configs/gemma_train.json
Normal file
@@ -0,0 +1,28 @@
|
||||
{
|
||||
"bpe_tokenizer": "checkpoints/bpe_tokenizer.json",
|
||||
"bpe_vocab_size": 1000,
|
||||
"bpe_special_tokens": ["<pad>", "<unk>", "<bos>", "<eos>"],
|
||||
"test_prompts": ["Open source AI", "What is Llama?"],
|
||||
"model_config": {
|
||||
"vocab_size": null,
|
||||
"embed_dim": 256,
|
||||
"num_q_heads": 4,
|
||||
"num_kv_heads": 2,
|
||||
"head_size": 64,
|
||||
"num_layers": 4,
|
||||
"max_position_embeddings": 512,
|
||||
"num_experts": 8,
|
||||
"top_k_experts": 2,
|
||||
"window_size": 16,
|
||||
"dropout": 0.1
|
||||
},
|
||||
"model_weights": "checkpoints/gemma-bpe/model.pt",
|
||||
"model_config_path": "checkpoints/gemma-bpe/config.json",
|
||||
"training": {
|
||||
"learning_rate": 0.0003,
|
||||
"batch_size": 2,
|
||||
"num_epochs": 3,
|
||||
"warmup_steps": 50
|
||||
},
|
||||
"log_path": "checkpoints/gemma_only_training_logs.json"
|
||||
}
|
||||
Reference in New Issue
Block a user