mirror of
https://github.com/pese-git/llm-arch-research.git
synced 2026-01-23 21:10:54 +00:00
- add universal runner run_llm_experiment.py with JSON-config driven LLM training / generation - add configs for gpt, gpt2, llama (training/generation) - remove individual train/generate scripts for each model - update README with simple how-to for experiments block BREAKING CHANGE: all llm_only experiments now run only through run_llm_experiment.py; legacy scripts removed
19 lines
542 B
JSON
19 lines
542 B
JSON
{
|
|
"bpe_tokenizer": "checkpoints/bpe_tokenizer.json",
|
|
"test_prompts": [
|
|
"Нейронные сети",
|
|
"Обработка естественного языка",
|
|
"GPT-2 — это"
|
|
],
|
|
"model_config_path": "checkpoints/gpt2-bpe/config.json",
|
|
"model_weights": "checkpoints/gpt2-bpe/model.pt",
|
|
"generation": {
|
|
"max_new_tokens": 40,
|
|
"temperature": 0.8,
|
|
"do_sample": true,
|
|
"top_k": null,
|
|
"top_p": null
|
|
},
|
|
"log_path": "checkpoints/llm_only_generation_logs.json"
|
|
}
|
|
|