HectorHe commited on
Commit
be8799e
·
verified ·
1 Parent(s): 67ccd92

Training in progress, step 3000

Browse files
config.json CHANGED
@@ -53,7 +53,7 @@
53
  "topk_group": 1,
54
  "topk_method": "greedy",
55
  "torch_dtype": "bfloat16",
56
- "transformers_version": "4.55.0",
57
  "use_cache": false,
58
  "v_head_dim": 128,
59
  "vocab_size": 102400
 
53
  "topk_group": 1,
54
  "topk_method": "greedy",
55
  "torch_dtype": "bfloat16",
56
+ "transformers_version": "4.55.2",
57
  "use_cache": false,
58
  "v_head_dim": 128,
59
  "vocab_size": 102400
model-00001-of-00007.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:a4e91d8b9519cf6d61e2659f1f3f8266908ebfb67eb807a884a25f46cf080fae
3
  size 4994763632
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:fbea0aa3847b87d17b12ecd5ba95424eec4849f32286b2ba6302b51ffbfe55a6
3
  size 4994763632
model-00002-of-00007.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:54440705c364d027c842803d7f94611fae5ac7007dfa353257c6fd93de652879
3
  size 4995044944
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9b06e0f345a91a3ac6912263a8405ac1bc046a97c5a415c6d15be111c0ca3d4d
3
  size 4995044944
model-00003-of-00007.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:10a39d68a2fc4a9d92626f9c78b3ef5f2989297acc78e3f935bd81e79bce9784
3
  size 4996085000
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8ff55ce1ae37d684e0d2a9295399bfb8eaa3e147229e7aee892a4791986b0d97
3
  size 4996085000
model-00004-of-00007.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:d4eef70d5e7ee2474bbc7653b90649456640d70e8c3acd6d968f0ab22c79875b
3
  size 4996085224
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:75c8ad4c896a8ef507d006317a99693a48362e2c5b2f263478babe83fa299a93
3
  size 4996085224
model-00005-of-00007.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:ffcd09d1b525e1ec22e37136e6808e9ef2fdc8f96c884034eeeedabba6ae5cc1
3
  size 4996085224
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5843a23f7e43a27c77ca47bac0a8742a6c5c90cfe39abaa0df65e2bb4aeb32f9
3
  size 4996085224
model-00006-of-00007.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:69bf52ea5b557f18fb6dc9f3ef39053eecb82ab5cb2037e41825016655df9b19
3
  size 4995045792
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a072cdcc5c5f1050c699ec66cd1da54a8cb73113e3c48b176c4cc113b602cec2
3
  size 4995045792
model-00007-of-00007.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:882f838140c6911bb360712ce4d57b1492df9e0eda3b7c28d4236b3dd1c5505b
3
  size 1440515736
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d78a00fafcb1800fbe740bf1a65b7196b7052e8355a858510ba7b4cda1a95054
3
  size 1440515736
tokenizer_config.json CHANGED
@@ -152,6 +152,7 @@
152
  "clean_up_tokenization_spaces": false,
153
  "eos_token": "<|end▁of▁sentence|>",
154
  "extra_special_tokens": {},
 
155
  "legacy": true,
156
  "model_max_length": 16384,
157
  "pad_token": "<|end▁of▁sentence|>",
 
152
  "clean_up_tokenization_spaces": false,
153
  "eos_token": "<|end▁of▁sentence|>",
154
  "extra_special_tokens": {},
155
+ "fast_tokenizer": true,
156
  "legacy": true,
157
  "model_max_length": 16384,
158
  "pad_token": "<|end▁of▁sentence|>",
training.log CHANGED
@@ -216,3 +216,434 @@ weight_decay=0.0,
216
  )
217
  (lm_head): Linear(in_features=2048, out_features=102400, bias=False)
218
  )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
216
  )
217
  (lm_head): Linear(in_features=2048, out_features=102400, bias=False)
218
  )
219
+ 2025-08-15 08:24:09 - INFO - __main__ - Model parameters ModelConfig(model_name_or_path='deepseek-ai/DeepSeek-Coder-V2-Lite-Instruct', model_revision='main', torch_dtype='bfloat16', trust_remote_code=True, attn_implementation='flash_attention_2', use_peft=False, lora_r=16, lora_alpha=32, lora_dropout=0.05, lora_target_modules=None, lora_modules_to_save=None, lora_task_type='CAUSAL_LM', use_rslora=False, use_dora=False, load_in_8bit=False, load_in_4bit=False, bnb_4bit_quant_type='nf4', use_bnb_nested_quant=False)
220
+ 2025-08-15 08:24:09 - INFO - __main__ - Script parameters ScriptArguments(dataset_name='autoprogrammer/nemotron_code_lf_filtered', dataset_config=None, dataset_train_split='train', dataset_test_split='test', gradient_checkpointing_use_reentrant=False, ignore_bias_buffers=False)
221
+ 2025-08-15 08:24:09 - INFO - __main__ - Training parameters SFTConfig(
222
+ _n_gpu=1,
223
+ accelerator_config={'split_batches': False, 'dispatch_batches': None, 'even_batches': True, 'use_seedable_sampler': True, 'non_blocking': False, 'gradient_accumulation_kwargs': None, 'use_configured_state': False},
224
+ activation_offloading=False,
225
+ adafactor=False,
226
+ adam_beta1=0.9,
227
+ adam_beta2=0.999,
228
+ adam_epsilon=1e-08,
229
+ auto_find_batch_size=False,
230
+ average_tokens_across_devices=True,
231
+ batch_eval_metrics=False,
232
+ benchmarks=[],
233
+ bf16=True,
234
+ bf16_full_eval=False,
235
+ callbacks=[],
236
+ chat_template=None,
237
+ completion_only_loss=None,
238
+ data_seed=None,
239
+ dataloader_drop_last=False,
240
+ dataloader_num_workers=0,
241
+ dataloader_persistent_workers=False,
242
+ dataloader_pin_memory=True,
243
+ dataloader_prefetch_factor=None,
244
+ dataset_kwargs=None,
245
+ dataset_num_proc=None,
246
+ dataset_text_field=text,
247
+ ddp_backend=None,
248
+ ddp_broadcast_buffers=None,
249
+ ddp_bucket_cap_mb=None,
250
+ ddp_find_unused_parameters=None,
251
+ ddp_timeout=1800000000,
252
+ debug=[],
253
+ deepspeed=None,
254
+ disable_tqdm=False,
255
+ do_eval=True,
256
+ do_predict=False,
257
+ do_train=False,
258
+ eos_token=<EOS_TOKEN>,
259
+ eval_accumulation_steps=None,
260
+ eval_delay=0,
261
+ eval_do_concat_batches=True,
262
+ eval_on_start=False,
263
+ eval_packing=None,
264
+ eval_steps=None,
265
+ eval_strategy=IntervalStrategy.NO,
266
+ eval_use_gather_object=False,
267
+ fp16=False,
268
+ fp16_backend=auto,
269
+ fp16_full_eval=False,
270
+ fp16_opt_level=O1,
271
+ fsdp=[],
272
+ fsdp_config={'min_num_params': 0, 'xla': False, 'xla_fsdp_v2': False, 'xla_fsdp_grad_ckpt': False},
273
+ fsdp_min_num_params=0,
274
+ fsdp_transformer_layer_cls_to_wrap=None,
275
+ full_determinism=False,
276
+ gradient_accumulation_steps=1,
277
+ gradient_checkpointing=True,
278
+ gradient_checkpointing_kwargs={'use_reentrant': False},
279
+ greater_is_better=None,
280
+ group_by_length=False,
281
+ half_precision_backend=auto,
282
+ hub_always_push=False,
283
+ hub_model_id=Deepseek-Coder-V2-Lite-13B-Instruct-sft-nemotron-code,
284
+ hub_model_revision=main,
285
+ hub_private_repo=None,
286
+ hub_revision=None,
287
+ hub_strategy=HubStrategy.EVERY_SAVE,
288
+ hub_token=<HUB_TOKEN>,
289
+ ignore_data_skip=False,
290
+ include_for_metrics=[],
291
+ include_inputs_for_metrics=False,
292
+ include_num_input_tokens_seen=False,
293
+ include_tokens_per_second=False,
294
+ jit_mode_eval=False,
295
+ label_names=None,
296
+ label_smoothing_factor=0.0,
297
+ learning_rate=1e-05,
298
+ length_column_name=length,
299
+ liger_kernel_config=None,
300
+ load_best_model_at_end=False,
301
+ local_rank=0,
302
+ log_level=info,
303
+ log_level_replica=warning,
304
+ log_on_each_node=True,
305
+ logging_dir=data/DeepSeek-Coder-V2-Lite-Instruct/sft/nemotron_code/runs/Aug15_08-24-07_ip-172-31-35-111,
306
+ logging_first_step=False,
307
+ logging_nan_inf_filter=True,
308
+ logging_steps=1,
309
+ logging_strategy=IntervalStrategy.STEPS,
310
+ lr_scheduler_kwargs={'min_lr_rate': 0.1},
311
+ lr_scheduler_type=SchedulerType.COSINE_WITH_MIN_LR,
312
+ max_grad_norm=1.0,
313
+ max_length=8192,
314
+ max_seq_length=None,
315
+ max_steps=-1,
316
+ metric_for_best_model=None,
317
+ model_init_kwargs=None,
318
+ mp_parameters=,
319
+ neftune_noise_alpha=None,
320
+ no_cuda=False,
321
+ num_train_epochs=1,
322
+ optim=OptimizerNames.ADAMW_TORCH,
323
+ optim_args=None,
324
+ optim_target_modules=None,
325
+ output_dir=data/DeepSeek-Coder-V2-Lite-Instruct/sft/nemotron_code,
326
+ overwrite_hub_revision=False,
327
+ overwrite_output_dir=True,
328
+ packing=True,
329
+ pad_to_multiple_of=None,
330
+ pad_token=<PAD_TOKEN>,
331
+ padding_free=False,
332
+ past_index=-1,
333
+ per_device_eval_batch_size=16,
334
+ per_device_train_batch_size=1,
335
+ prediction_loss_only=False,
336
+ push_to_hub=True,
337
+ push_to_hub_model_id=None,
338
+ push_to_hub_organization=None,
339
+ push_to_hub_revision=False,
340
+ push_to_hub_token=<PUSH_TO_HUB_TOKEN>,
341
+ ray_scope=last,
342
+ remove_unused_columns=True,
343
+ report_to=['wandb'],
344
+ restore_callback_states_from_checkpoint=False,
345
+ resume_from_checkpoint=None,
346
+ run_name=None,
347
+ save_on_each_node=False,
348
+ save_only_model=False,
349
+ save_safetensors=True,
350
+ save_steps=500,
351
+ save_strategy=SaveStrategy.STEPS,
352
+ save_total_limit=1,
353
+ seed=1234,
354
+ skip_memory_metrics=True,
355
+ system_prompt=None,
356
+ tf32=None,
357
+ torch_compile=False,
358
+ torch_compile_backend=None,
359
+ torch_compile_mode=None,
360
+ torch_empty_cache_steps=None,
361
+ torchdynamo=None,
362
+ tpu_metrics_debug=False,
363
+ tpu_num_cores=None,
364
+ use_cpu=False,
365
+ use_ipex=False,
366
+ use_legacy_prediction_loop=False,
367
+ use_liger_kernel=False,
368
+ use_mps_device=False,
369
+ wandb_entity=None,
370
+ wandb_project=None,
371
+ wandb_run_group=None,
372
+ warmup_ratio=0.1,
373
+ warmup_steps=0,
374
+ weight_decay=0.0,
375
+ )
376
+ 2025-08-15 08:24:09 - INFO - __main__ - Checkpoint detected, resuming training at last_checkpoint='data/DeepSeek-Coder-V2-Lite-Instruct/sft/nemotron_code/checkpoint-2500'.
377
+ 2025-08-15 08:24:10 - INFO - __main__ - *** Initializing model kwargs ***
378
+ 2025-08-15 08:58:30 - INFO - __main__ - *** Train ***
379
+ 2025-08-15 08:58:30 - INFO - __main__ - DeepseekV2ForCausalLM(
380
+ (model): DeepseekV2Model(
381
+ (embed_tokens): Embedding(102400, 2048)
382
+ (layers): ModuleList(
383
+ (0): DeepseekV2DecoderLayer(
384
+ (self_attn): DeepseekV2FlashAttention2(
385
+ (q_proj): Linear(in_features=2048, out_features=3072, bias=False)
386
+ (kv_a_proj_with_mqa): Linear(in_features=2048, out_features=576, bias=False)
387
+ (kv_a_layernorm): DeepseekV2RMSNorm()
388
+ (kv_b_proj): Linear(in_features=512, out_features=4096, bias=False)
389
+ (o_proj): Linear(in_features=2048, out_features=2048, bias=False)
390
+ (rotary_emb): DeepseekV2YarnRotaryEmbedding()
391
+ )
392
+ (mlp): DeepseekV2MLP(
393
+ (gate_proj): Linear(in_features=2048, out_features=10944, bias=False)
394
+ (up_proj): Linear(in_features=2048, out_features=10944, bias=False)
395
+ (down_proj): Linear(in_features=10944, out_features=2048, bias=False)
396
+ (act_fn): SiLU()
397
+ )
398
+ (input_layernorm): DeepseekV2RMSNorm()
399
+ (post_attention_layernorm): DeepseekV2RMSNorm()
400
+ )
401
+ (1-26): 26 x DeepseekV2DecoderLayer(
402
+ (self_attn): DeepseekV2FlashAttention2(
403
+ (q_proj): Linear(in_features=2048, out_features=3072, bias=False)
404
+ (kv_a_proj_with_mqa): Linear(in_features=2048, out_features=576, bias=False)
405
+ (kv_a_layernorm): DeepseekV2RMSNorm()
406
+ (kv_b_proj): Linear(in_features=512, out_features=4096, bias=False)
407
+ (o_proj): Linear(in_features=2048, out_features=2048, bias=False)
408
+ (rotary_emb): DeepseekV2YarnRotaryEmbedding()
409
+ )
410
+ (mlp): DeepseekV2MoE(
411
+ (experts): ModuleList(
412
+ (0-63): 64 x DeepseekV2MLP(
413
+ (gate_proj): Linear(in_features=2048, out_features=1408, bias=False)
414
+ (up_proj): Linear(in_features=2048, out_features=1408, bias=False)
415
+ (down_proj): Linear(in_features=1408, out_features=2048, bias=False)
416
+ (act_fn): SiLU()
417
+ )
418
+ )
419
+ (gate): MoEGate()
420
+ (shared_experts): DeepseekV2MLP(
421
+ (gate_proj): Linear(in_features=2048, out_features=2816, bias=False)
422
+ (up_proj): Linear(in_features=2048, out_features=2816, bias=False)
423
+ (down_proj): Linear(in_features=2816, out_features=2048, bias=False)
424
+ (act_fn): SiLU()
425
+ )
426
+ )
427
+ (input_layernorm): DeepseekV2RMSNorm()
428
+ (post_attention_layernorm): DeepseekV2RMSNorm()
429
+ )
430
+ )
431
+ (norm): DeepseekV2RMSNorm()
432
+ )
433
+ (lm_head): Linear(in_features=2048, out_features=102400, bias=False)
434
+ )
435
+ 2025-08-15 09:01:57 - INFO - __main__ - Model parameters ModelConfig(model_name_or_path='deepseek-ai/DeepSeek-Coder-V2-Lite-Instruct', model_revision='main', torch_dtype='bfloat16', trust_remote_code=True, attn_implementation='flash_attention_2', use_peft=False, lora_r=16, lora_alpha=32, lora_dropout=0.05, lora_target_modules=None, lora_modules_to_save=None, lora_task_type='CAUSAL_LM', use_rslora=False, use_dora=False, load_in_8bit=False, load_in_4bit=False, bnb_4bit_quant_type='nf4', use_bnb_nested_quant=False)
436
+ 2025-08-15 09:01:57 - INFO - __main__ - Script parameters ScriptArguments(dataset_name='autoprogrammer/nemotron_code_lf_filtered', dataset_config=None, dataset_train_split='train', dataset_test_split='test', gradient_checkpointing_use_reentrant=False, ignore_bias_buffers=False)
437
+ 2025-08-15 09:01:57 - INFO - __main__ - Training parameters SFTConfig(
438
+ _n_gpu=1,
439
+ accelerator_config={'split_batches': False, 'dispatch_batches': None, 'even_batches': True, 'use_seedable_sampler': True, 'non_blocking': False, 'gradient_accumulation_kwargs': None, 'use_configured_state': False},
440
+ activation_offloading=False,
441
+ adafactor=False,
442
+ adam_beta1=0.9,
443
+ adam_beta2=0.999,
444
+ adam_epsilon=1e-08,
445
+ auto_find_batch_size=False,
446
+ average_tokens_across_devices=True,
447
+ batch_eval_metrics=False,
448
+ benchmarks=[],
449
+ bf16=True,
450
+ bf16_full_eval=False,
451
+ callbacks=[],
452
+ chat_template=None,
453
+ completion_only_loss=None,
454
+ data_seed=None,
455
+ dataloader_drop_last=False,
456
+ dataloader_num_workers=0,
457
+ dataloader_persistent_workers=False,
458
+ dataloader_pin_memory=True,
459
+ dataloader_prefetch_factor=None,
460
+ dataset_kwargs=None,
461
+ dataset_num_proc=None,
462
+ dataset_text_field=text,
463
+ ddp_backend=None,
464
+ ddp_broadcast_buffers=None,
465
+ ddp_bucket_cap_mb=None,
466
+ ddp_find_unused_parameters=None,
467
+ ddp_timeout=1800000000,
468
+ debug=[],
469
+ deepspeed=None,
470
+ disable_tqdm=False,
471
+ do_eval=True,
472
+ do_predict=False,
473
+ do_train=False,
474
+ eos_token=<EOS_TOKEN>,
475
+ eval_accumulation_steps=None,
476
+ eval_delay=0,
477
+ eval_do_concat_batches=True,
478
+ eval_on_start=False,
479
+ eval_packing=None,
480
+ eval_steps=None,
481
+ eval_strategy=IntervalStrategy.NO,
482
+ eval_use_gather_object=False,
483
+ fp16=False,
484
+ fp16_backend=auto,
485
+ fp16_full_eval=False,
486
+ fp16_opt_level=O1,
487
+ fsdp=[],
488
+ fsdp_config={'min_num_params': 0, 'xla': False, 'xla_fsdp_v2': False, 'xla_fsdp_grad_ckpt': False},
489
+ fsdp_min_num_params=0,
490
+ fsdp_transformer_layer_cls_to_wrap=None,
491
+ full_determinism=False,
492
+ gradient_accumulation_steps=1,
493
+ gradient_checkpointing=True,
494
+ gradient_checkpointing_kwargs={'use_reentrant': False},
495
+ greater_is_better=None,
496
+ group_by_length=False,
497
+ half_precision_backend=auto,
498
+ hub_always_push=False,
499
+ hub_model_id=Deepseek-Coder-V2-Lite-13B-Instruct-sft-nemotron-code,
500
+ hub_model_revision=main,
501
+ hub_private_repo=None,
502
+ hub_revision=None,
503
+ hub_strategy=HubStrategy.EVERY_SAVE,
504
+ hub_token=<HUB_TOKEN>,
505
+ ignore_data_skip=False,
506
+ include_for_metrics=[],
507
+ include_inputs_for_metrics=False,
508
+ include_num_input_tokens_seen=False,
509
+ include_tokens_per_second=False,
510
+ jit_mode_eval=False,
511
+ label_names=None,
512
+ label_smoothing_factor=0.0,
513
+ learning_rate=1e-05,
514
+ length_column_name=length,
515
+ liger_kernel_config=None,
516
+ load_best_model_at_end=False,
517
+ local_rank=0,
518
+ log_level=info,
519
+ log_level_replica=warning,
520
+ log_on_each_node=True,
521
+ logging_dir=data/DeepSeek-Coder-V2-Lite-Instruct/sft/nemotron_code/runs/Aug15_09-01-55_ip-172-31-35-111,
522
+ logging_first_step=False,
523
+ logging_nan_inf_filter=True,
524
+ logging_steps=1,
525
+ logging_strategy=IntervalStrategy.STEPS,
526
+ lr_scheduler_kwargs={'min_lr_rate': 0.1},
527
+ lr_scheduler_type=SchedulerType.COSINE_WITH_MIN_LR,
528
+ max_grad_norm=1.0,
529
+ max_length=8192,
530
+ max_seq_length=None,
531
+ max_steps=-1,
532
+ metric_for_best_model=None,
533
+ model_init_kwargs=None,
534
+ mp_parameters=,
535
+ neftune_noise_alpha=None,
536
+ no_cuda=False,
537
+ num_train_epochs=1,
538
+ optim=OptimizerNames.ADAMW_TORCH,
539
+ optim_args=None,
540
+ optim_target_modules=None,
541
+ output_dir=data/DeepSeek-Coder-V2-Lite-Instruct/sft/nemotron_code,
542
+ overwrite_hub_revision=False,
543
+ overwrite_output_dir=True,
544
+ packing=True,
545
+ pad_to_multiple_of=None,
546
+ pad_token=<PAD_TOKEN>,
547
+ padding_free=False,
548
+ past_index=-1,
549
+ per_device_eval_batch_size=16,
550
+ per_device_train_batch_size=1,
551
+ prediction_loss_only=False,
552
+ push_to_hub=True,
553
+ push_to_hub_model_id=None,
554
+ push_to_hub_organization=None,
555
+ push_to_hub_revision=False,
556
+ push_to_hub_token=<PUSH_TO_HUB_TOKEN>,
557
+ ray_scope=last,
558
+ remove_unused_columns=True,
559
+ report_to=['wandb'],
560
+ restore_callback_states_from_checkpoint=False,
561
+ resume_from_checkpoint=/home/ubuntu/efs/hector/data/DeepSeek-Coder-V2-Lite-Instruct/sft/nemotron_code/checkpoint-2500,
562
+ run_name=None,
563
+ save_on_each_node=False,
564
+ save_only_model=False,
565
+ save_safetensors=True,
566
+ save_steps=500,
567
+ save_strategy=SaveStrategy.STEPS,
568
+ save_total_limit=1,
569
+ seed=1234,
570
+ skip_memory_metrics=True,
571
+ system_prompt=None,
572
+ tf32=None,
573
+ torch_compile=False,
574
+ torch_compile_backend=None,
575
+ torch_compile_mode=None,
576
+ torch_empty_cache_steps=None,
577
+ torchdynamo=None,
578
+ tpu_metrics_debug=False,
579
+ tpu_num_cores=None,
580
+ use_cpu=False,
581
+ use_ipex=False,
582
+ use_legacy_prediction_loop=False,
583
+ use_liger_kernel=False,
584
+ use_mps_device=False,
585
+ wandb_entity=None,
586
+ wandb_project=None,
587
+ wandb_run_group=None,
588
+ warmup_ratio=0.1,
589
+ warmup_steps=0,
590
+ weight_decay=0.0,
591
+ )
592
+ 2025-08-15 09:01:58 - INFO - __main__ - *** Initializing model kwargs ***
593
+ 2025-08-15 09:02:23 - INFO - __main__ - *** Train ***
594
+ 2025-08-15 09:02:23 - INFO - __main__ - DeepseekV2ForCausalLM(
595
+ (model): DeepseekV2Model(
596
+ (embed_tokens): Embedding(102400, 2048)
597
+ (layers): ModuleList(
598
+ (0): DeepseekV2DecoderLayer(
599
+ (self_attn): DeepseekV2FlashAttention2(
600
+ (q_proj): Linear(in_features=2048, out_features=3072, bias=False)
601
+ (kv_a_proj_with_mqa): Linear(in_features=2048, out_features=576, bias=False)
602
+ (kv_a_layernorm): DeepseekV2RMSNorm()
603
+ (kv_b_proj): Linear(in_features=512, out_features=4096, bias=False)
604
+ (o_proj): Linear(in_features=2048, out_features=2048, bias=False)
605
+ (rotary_emb): DeepseekV2YarnRotaryEmbedding()
606
+ )
607
+ (mlp): DeepseekV2MLP(
608
+ (gate_proj): Linear(in_features=2048, out_features=10944, bias=False)
609
+ (up_proj): Linear(in_features=2048, out_features=10944, bias=False)
610
+ (down_proj): Linear(in_features=10944, out_features=2048, bias=False)
611
+ (act_fn): SiLU()
612
+ )
613
+ (input_layernorm): DeepseekV2RMSNorm()
614
+ (post_attention_layernorm): DeepseekV2RMSNorm()
615
+ )
616
+ (1-26): 26 x DeepseekV2DecoderLayer(
617
+ (self_attn): DeepseekV2FlashAttention2(
618
+ (q_proj): Linear(in_features=2048, out_features=3072, bias=False)
619
+ (kv_a_proj_with_mqa): Linear(in_features=2048, out_features=576, bias=False)
620
+ (kv_a_layernorm): DeepseekV2RMSNorm()
621
+ (kv_b_proj): Linear(in_features=512, out_features=4096, bias=False)
622
+ (o_proj): Linear(in_features=2048, out_features=2048, bias=False)
623
+ (rotary_emb): DeepseekV2YarnRotaryEmbedding()
624
+ )
625
+ (mlp): DeepseekV2MoE(
626
+ (experts): ModuleList(
627
+ (0-63): 64 x DeepseekV2MLP(
628
+ (gate_proj): Linear(in_features=2048, out_features=1408, bias=False)
629
+ (up_proj): Linear(in_features=2048, out_features=1408, bias=False)
630
+ (down_proj): Linear(in_features=1408, out_features=2048, bias=False)
631
+ (act_fn): SiLU()
632
+ )
633
+ )
634
+ (gate): MoEGate()
635
+ (shared_experts): DeepseekV2MLP(
636
+ (gate_proj): Linear(in_features=2048, out_features=2816, bias=False)
637
+ (up_proj): Linear(in_features=2048, out_features=2816, bias=False)
638
+ (down_proj): Linear(in_features=2816, out_features=2048, bias=False)
639
+ (act_fn): SiLU()
640
+ )
641
+ )
642
+ (input_layernorm): DeepseekV2RMSNorm()
643
+ (post_attention_layernorm): DeepseekV2RMSNorm()
644
+ )
645
+ )
646
+ (norm): DeepseekV2RMSNorm()
647
+ )
648
+ (lm_head): Linear(in_features=2048, out_features=102400, bias=False)
649
+ )
training_args.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:5ff576f8162e1a2a3a7d78df03d3a799feb5a9463dceeb2565ebf309d4c6fe8a
3
- size 7736
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b513e306e2fb4cd08cc41c8172b5350595afa4ceba42965cad86229a50070180
3
+ size 7800