mirror of
https://github.com/pese-git/llm-arch-research.git
synced 2026-01-23 21:10:54 +00:00
- implement Mistral model in llm/models/mistral/mistral.py with GroupedQueryAttention, SwiGLU, RoPE, sliding window attention - add __init__.py for module export - add config files for mistral training and generation - update universal experiment runner to support Mistral model - add notebook for Mistral experiments
26 lines
778 B
JSON
26 lines
778 B
JSON
{
|
|
"bpe_tokenizer": "checkpoints/bpe_tokenizer.json",
|
|
"bpe_vocab_size": 1000,
|
|
"bpe_special_tokens": ["<pad>", "<unk>", "<bos>", "<eos>"],
|
|
"test_prompts": ["Open source AI", "What is Llama?"],
|
|
"model_config": {
|
|
"vocab_size": null,
|
|
"embed_dim": 256,
|
|
"num_q_heads": 4,
|
|
"num_kv_heads": 2,
|
|
"head_size": 64,
|
|
"num_layers": 4,
|
|
"max_position_embeddings": 512,
|
|
"window_size": 16,
|
|
"dropout": 0.1
|
|
},
|
|
"model_weights": "checkpoints/mistral-bpe/model.pt",
|
|
"model_config_path": "checkpoints/mistral-bpe/config.json",
|
|
"training": {
|
|
"learning_rate": 0.0003,
|
|
"batch_size": 2,
|
|
"num_epochs": 3,
|
|
"warmup_steps": 50
|
|
},
|
|
"log_path": "checkpoints/mistral_only_training_logs.json"
|
|
} |