mirror of
https://github.com/pese-git/llm-arch-research.git
synced 2026-01-23 21:10:54 +00:00
- implement Mistral model in llm/models/mistral/mistral.py with GroupedQueryAttention, SwiGLU, RoPE, sliding window attention - add __init__.py for module export - add config files for mistral training and generation - update universal experiment runner to support Mistral model - add notebook for Mistral experiments
19 lines
505 B
JSON
19 lines
505 B
JSON
{
|
|
"bpe_tokenizer": "checkpoints/bpe_tokenizer.json",
|
|
"test_prompts": [
|
|
"Open weights",
|
|
"The Llama model is",
|
|
"Efficient transformers"
|
|
],
|
|
"model_config_path": "checkpoints/mistral-bpe/config.json",
|
|
"model_weights": "checkpoints/mistral-bpe/model.pt",
|
|
"generation": {
|
|
"max_new_tokens": 40,
|
|
"temperature": 0.8,
|
|
"do_sample": true,
|
|
"top_k": null,
|
|
"top_p": null
|
|
},
|
|
"log_path": "checkpoints/mistral_only_generation_logs.json"
|
|
}
|
|
|