mirror of
https://github.com/pese-git/llm-arch-research.git
synced 2026-01-24 05:21:16 +00:00
feat(mixtral): initial implementation of Mixtral MoE model, configs, and tests
- Add Mixtral architecture implementation with MoE support (llm/src/llm/models/mixtral/mixtral.py) - Introduce generic Mixture-of-Experts (MoE) block (llm/src/llm/core/moe.py) - Create dedicated configuration files for Mixtral training and generation experiments - Register and test Mixtral support in experiment runner (run_llm_experiment.py) - Add unit tests for Mixtral API including forward, caching, and generation modes - Include Jupyter notebook mixstral.ipynb for architectural exploration and research - Ensure correct handling of torch bool masks in sampling (top-k, top-p) during generation BREAKING CHANGE: Adds new model code and test coverage, modifying experiment runner logic to register Mixtral.
This commit is contained in:
19
experiments/llm_only/configs/mixtral_generate.json
Normal file
19
experiments/llm_only/configs/mixtral_generate.json
Normal file
@@ -0,0 +1,19 @@
|
||||
{
|
||||
"bpe_tokenizer": "checkpoints/bpe_tokenizer.json",
|
||||
"test_prompts": [
|
||||
"Open weights",
|
||||
"The Llama model is",
|
||||
"Efficient transformers"
|
||||
],
|
||||
"model_config_path": "checkpoints/mixtral-bpe/config.json",
|
||||
"model_weights": "checkpoints/mixtral-bpe/model.pt",
|
||||
"generation": {
|
||||
"max_new_tokens": 40,
|
||||
"temperature": 0.8,
|
||||
"do_sample": true,
|
||||
"top_k": null,
|
||||
"top_p": null
|
||||
},
|
||||
"log_path": "checkpoints/mixtral_only_generation_logs.json"
|
||||
}
|
||||
|
||||
28
experiments/llm_only/configs/mixtral_train.json
Normal file
28
experiments/llm_only/configs/mixtral_train.json
Normal file
@@ -0,0 +1,28 @@
|
||||
{
|
||||
"bpe_tokenizer": "checkpoints/bpe_tokenizer.json",
|
||||
"bpe_vocab_size": 1000,
|
||||
"bpe_special_tokens": ["<pad>", "<unk>", "<bos>", "<eos>"],
|
||||
"test_prompts": ["Open source AI", "What is Llama?"],
|
||||
"model_config": {
|
||||
"vocab_size": null,
|
||||
"embed_dim": 256,
|
||||
"num_q_heads": 4,
|
||||
"num_kv_heads": 2,
|
||||
"head_size": 64,
|
||||
"num_layers": 4,
|
||||
"max_position_embeddings": 512,
|
||||
"num_experts": 8,
|
||||
"top_k_experts": 2,
|
||||
"window_size": 16,
|
||||
"dropout": 0.1
|
||||
},
|
||||
"model_weights": "checkpoints/mixtral-bpe/model.pt",
|
||||
"model_config_path": "checkpoints/mixtral-bpe/config.json",
|
||||
"training": {
|
||||
"learning_rate": 0.0003,
|
||||
"batch_size": 2,
|
||||
"num_epochs": 3,
|
||||
"warmup_steps": 50
|
||||
},
|
||||
"log_path": "checkpoints/mixtral_only_training_logs.json"
|
||||
}
|
||||
@@ -45,6 +45,9 @@ def load_model_class(model_name):
|
||||
elif model_name.lower() == 'mistral':
|
||||
from llm.models.mistral import Mistral
|
||||
return Mistral
|
||||
elif model_name.lower() == 'mixtral':
|
||||
from llm.models.mixtral import Mixtral
|
||||
return Mixtral
|
||||
else:
|
||||
raise ValueError(f"Модель '{model_name}' не поддерживается.")
|
||||
|
||||
|
||||
Reference in New Issue
Block a user