| | base_model: allura-org/Gemma-3-Glitter-12B |
| | model_type: AutoModelForCausalLM |
| | tokenizer_type: AutoTokenizer |
| |
|
| | load_in_8bit: false |
| | load_in_4bit: false |
| | strict: false |
| |
|
| | datasets: |
| | - path: NewEden/LIMARP-Complexity |
| | type: chat_template |
| | roles_to_train: ["gpt"] |
| | field_messages: conversations |
| | message_field_role: from |
| | message_field_content: value |
| | train_on_eos: turn |
| | - path: NewEden/Creative_Writing-Complexity |
| | type: chat_template |
| | roles_to_train: ["gpt"] |
| | field_messages: conversations |
| | message_field_role: from |
| | message_field_content: value |
| | train_on_eos: turn |
| | - path: NewEden/wata-Oshi-No-prefix |
| | type: chat_template |
| | roles_to_train: ["gpt"] |
| | field_messages: conversations |
| | message_field_role: from |
| | message_field_content: value |
| | train_on_eos: turn |
| | - path: NewEden/No-Prefix-LN |
| | type: chat_template |
| | roles_to_train: ["gpt"] |
| | field_messages: conversations |
| | message_field_role: from |
| | message_field_content: value |
| | train_on_eos: turn |
| | - path: NewEden/Storium-No-Prefix |
| | type: chat_template |
| | roles_to_train: ["gpt"] |
| | field_messages: conversations |
| | message_field_role: from |
| | message_field_content: value |
| | train_on_eos: turn |
| | - path: NewEden/PIPPA-Mega-Filtered |
| | type: chat_template |
| | roles_to_train: ["gpt"] |
| | field_messages: conversations |
| | message_field_role: from |
| | message_field_content: value |
| | train_on_eos: turn |
| | - path: NewEden/OpenCAI-ShareGPT |
| | type: chat_template |
| | roles_to_train: ["gpt"] |
| | field_messages: conversations |
| | message_field_role: from |
| | message_field_content: value |
| | train_on_eos: turn |
| | val_set_size: 0.01 |
| | output_dir: ./outputs |
| |
|
| | sequence_len: 16384 |
| | sample_packing: true |
| | eval_sample_packing: false |
| | pad_to_sequence_len: true |
| |
|
| |
|
| | plugins: |
| | - axolotl.integrations.liger.LigerPlugin |
| | - axolotl.integrations.cut_cross_entropy.CutCrossEntropyPlugin |
| | liger_rope: true |
| | liger_rms_norm: true |
| | liger_glu_activation: true |
| | liger_fused_linear_cross_entropy: false |
| | cut_cross_entropy: true |
| | optimizer: apollo_adamw |
| | optim_args: proj=random,rank=1,scale=128.0,scale_type=tensor,update_proj_gap=200 |
| | optim_target_modules: |
| | - .*.attn.* |
| | - .*.mlp.* |
| | gradient_checkpointing: unsloth |
| | flash_attention: true |
| |
|
| | adapter: lora |
| | lora_model_dir: |
| | lora_r: 64 |
| | lora_alpha: 32 |
| | lora_dropout: 0.1 |
| | lora_target_modules: |
| | - gate_proj |
| | - down_proj |
| | - up_proj |
| | - q_proj |
| | - v_proj |
| | - k_proj |
| | - o_proj |
| |
|
| | gradient_accumulation_steps: 2 |
| | micro_batch_size: 1 |
| | num_epochs: 4 |
| | lr_scheduler: rex |
| | learning_rate: 1e-5 |
| | weight_decay: 0.02 |
| |
|
| | train_on_inputs: false |
| | group_by_length: false |
| | bf16: auto |
| | fp16: |
| | tf32: true |
| |
|
| | early_stopping_patience: |
| | resume_from_checkpoint: |
| | |
| | local_rank: |
| | logging_steps: 1 |
| | xformers_attention: |
| | flash_attention: true |
| |
|
| | warmup_steps: 35 |
| | evals_per_epoch: 4 |
| | eval_table_size: |
| | eval_max_new_tokens: 128 |
| | saves_per_epoch: 1 |
| |
|
| | debug: |
| | deepspeed: |
| | fsdp: |
| | fsdp_config: |