Trained L2R token ranking model on MIMIC-IV
Browse files- config.json +2 -3
- lookup_table.pt +2 -2
- special_tokens_map.json +1 -7
- training_l2r_log_2025-06-11_15-29-36.log +434 -0
config.json
CHANGED
|
@@ -1,8 +1,7 @@
|
|
| 1 |
{
|
| 2 |
-
"_attn_implementation_autoset": true,
|
| 3 |
"_name_or_path": "mistralai/Mistral-7B-Instruct-v0.3",
|
| 4 |
"architectures": [
|
| 5 |
-
"
|
| 6 |
],
|
| 7 |
"attention_dropout": 0.0,
|
| 8 |
"bos_token_id": 1,
|
|
@@ -15926,7 +15925,7 @@
|
|
| 15926 |
"rope_theta": 1000000.0,
|
| 15927 |
"sliding_window": null,
|
| 15928 |
"tie_word_embeddings": false,
|
| 15929 |
-
"torch_dtype": "
|
| 15930 |
"transformers_version": "4.49.0",
|
| 15931 |
"use_cache": true,
|
| 15932 |
"vocab_size": 32768
|
|
|
|
| 1 |
{
|
|
|
|
| 2 |
"_name_or_path": "mistralai/Mistral-7B-Instruct-v0.3",
|
| 3 |
"architectures": [
|
| 4 |
+
"LTRModel"
|
| 5 |
],
|
| 6 |
"attention_dropout": 0.0,
|
| 7 |
"bos_token_id": 1,
|
|
|
|
| 15925 |
"rope_theta": 1000000.0,
|
| 15926 |
"sliding_window": null,
|
| 15927 |
"tie_word_embeddings": false,
|
| 15928 |
+
"torch_dtype": "float32",
|
| 15929 |
"transformers_version": "4.49.0",
|
| 15930 |
"use_cache": true,
|
| 15931 |
"vocab_size": 32768
|
lookup_table.pt
CHANGED
|
@@ -1,3 +1,3 @@
|
|
| 1 |
version https://git-lfs.github.com/spec/v1
|
| 2 |
-
oid sha256:
|
| 3 |
-
size
|
|
|
|
| 1 |
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:019cdc69e465557bb4648008851bd0938d508c07bb7da22b65f9fbd07614fd56
|
| 3 |
+
size 1040975029
|
special_tokens_map.json
CHANGED
|
@@ -13,13 +13,7 @@
|
|
| 13 |
"rstrip": false,
|
| 14 |
"single_word": false
|
| 15 |
},
|
| 16 |
-
"pad_token":
|
| 17 |
-
"content": "</s>",
|
| 18 |
-
"lstrip": false,
|
| 19 |
-
"normalized": false,
|
| 20 |
-
"rstrip": false,
|
| 21 |
-
"single_word": false
|
| 22 |
-
},
|
| 23 |
"unk_token": {
|
| 24 |
"content": "<unk>",
|
| 25 |
"lstrip": false,
|
|
|
|
| 13 |
"rstrip": false,
|
| 14 |
"single_word": false
|
| 15 |
},
|
| 16 |
+
"pad_token": "</s>",
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 17 |
"unk_token": {
|
| 18 |
"content": "<unk>",
|
| 19 |
"lstrip": false,
|
training_l2r_log_2025-06-11_15-29-36.log
ADDED
|
@@ -0,0 +1,434 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
2025-06-11 15:29:36,035 - INFO - 📝 Logging initialized. Log file created at: ../tmp/logs/training_l2r_log_2025-06-11_15-29-36.log - [learning2rank.py:287:setup_logger]
|
| 2 |
+
2025-06-11 15:29:36,035 - INFO - ================================================================================ - [learning2rank.py:109:log_section]
|
| 3 |
+
2025-06-11 15:29:36,036 - INFO - = 📌 INITIALIZING TRAINING ENVIRONMENT = - [learning2rank.py:110:log_section]
|
| 4 |
+
2025-06-11 15:29:36,036 - INFO - ================================================================================ - [learning2rank.py:113:log_section]
|
| 5 |
+
2025-06-11 15:29:36,036 - INFO - 🚀 Setting up data paths and environment variables... - [learning2rank.py:3751:main]
|
| 6 |
+
2025-06-11 15:29:36,036 - INFO - 🛠️ Command-line Arguments: - [learning2rank.py:303:print_args]
|
| 7 |
+
2025-06-11 15:29:36,036 - INFO -
|
| 8 |
+
🔹 output_dir: ../tmp/MIMIC4_DEMO/mimic4_l2rtrain_mistral7b
|
| 9 |
+
🔹 source_url: XURLs.MIMIC4_DEMO
|
| 10 |
+
🔹 data: mimic4_icd10_full
|
| 11 |
+
🔹 data_l2r_fname_prefix: mimic4_icd10
|
| 12 |
+
🔹 logfile: training_l2r_log
|
| 13 |
+
🔹 base_dir: ../tmp/MIMIC4_DEMO
|
| 14 |
+
🔹 l2r_boot_dir: ../tmp/MIMIC4_DEMO/mimic4_l2rboot_mistral7b
|
| 15 |
+
🔹 hub_model_id: deb101/mistral-7b-instruct-v0.3-mimic4-adapt
|
| 16 |
+
🔹 model_name: mistralai/Mistral-7B-Instruct-v0.3
|
| 17 |
+
🔹 max_length: 512
|
| 18 |
+
🔹 do_fresh_training: True
|
| 19 |
+
🔹 load_from_checkpoint: False
|
| 20 |
+
🔹 task: l2r
|
| 21 |
+
🔹 num_train_epochs: 2
|
| 22 |
+
🔹 metric_for_best_model: ndcg@25
|
| 23 |
+
🔹 learning_rate: 0.0001
|
| 24 |
+
🔹 warmup_steps: 0
|
| 25 |
+
🔹 generate_report: False
|
| 26 |
+
🔹 logfile_path: ../tmp/logs/training_l2r_log_2025-06-11_15-29-36.log
|
| 27 |
+
🔹 source: /home/ubuntu/.xcube/data/mimic4_demo - [learning2rank.py:304:print_args]
|
| 28 |
+
2025-06-11 15:29:36,036 - INFO - ➖➖➖➖➖➖➖➖➖➖➖➖➖➖➖➖➖➖➖➖➖➖➖➖➖➖➖➖➖➖ - [learning2rank.py:305:print_args]
|
| 29 |
+
2025-06-11 15:29:36,036 - INFO - 📁 Using L2R boot directory: ../tmp/MIMIC4_DEMO/mimic4_l2rboot_mistral7b - [learning2rank.py:3765:main]
|
| 30 |
+
2025-06-11 15:29:36,036 - INFO - 📂 Using output directory: ../tmp/MIMIC4_DEMO/mimic4_l2rtrain_mistral7b - [learning2rank.py:3767:main]
|
| 31 |
+
2025-06-11 15:29:36,037 - INFO - ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ - [learning2rank.py:109:log_section]
|
| 32 |
+
2025-06-11 15:29:36,037 - INFO - + ✨ LOADING DATASETS + - [learning2rank.py:110:log_section]
|
| 33 |
+
2025-06-11 15:29:36,037 - INFO - ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ - [learning2rank.py:113:log_section]
|
| 34 |
+
2025-06-11 15:29:36,037 - INFO - 📊 Loading main dataset and L2R dataset... - [learning2rank.py:3770:main]
|
| 35 |
+
2025-06-11 15:29:36,037 - INFO - 📂 Loading main data from: /home/ubuntu/.xcube/data/mimic4_demo/mimic4_icd10_full.csv - [learning2rank.py:333:get_data]
|
| 36 |
+
2025-06-11 15:29:44,310 - INFO - ✅ Successfully loaded main data: 122279 rows - [learning2rank.py:346:get_data]
|
| 37 |
+
2025-06-11 15:29:44,310 - INFO - 📂 Loading L2R data from: ../tmp/MIMIC4_DEMO/mimic4_l2rboot_mistral7b/mimic4_icd10_tok_rank_per_lbl.ft - [learning2rank.py:352:get_data]
|
| 38 |
+
2025-06-11 15:29:44,310 - INFO - 📂 Loading L2R tokens from: ../tmp/MIMIC4_DEMO/mimic4_l2rboot_mistral7b/mimic4_icd10_tok.ft - [learning2rank.py:355:get_data]
|
| 39 |
+
2025-06-11 15:29:44,310 - INFO - 📂 Loading L2R labels from: ../tmp/MIMIC4_DEMO/mimic4_l2rboot_mistral7b/mimic4_icd10_lbl.ft - [learning2rank.py:358:get_data]
|
| 40 |
+
2025-06-11 15:29:46,024 - INFO - ✅ Successfully loaded L2R data: 260243456 rows - [learning2rank.py:365:get_data]
|
| 41 |
+
2025-06-11 15:29:46,024 - INFO - ✅ Successfully loaded L2R tokens: 32768 tokens - [learning2rank.py:366:get_data]
|
| 42 |
+
2025-06-11 15:29:46,024 - INFO - ✅ Successfully loaded L2R labels: 7942 rows - [learning2rank.py:367:get_data]
|
| 43 |
+
2025-06-11 15:29:46,024 - INFO - 🔄 Total data loaded: 122279 main rows, 260243456 L2R rows - [learning2rank.py:374:get_data]
|
| 44 |
+
2025-06-11 15:29:46,025 - INFO - ✨ Successfully loaded both datasets: - [learning2rank.py:398:load_datasets]
|
| 45 |
+
2025-06-11 15:29:46,025 - INFO - - 📄 Main dataset: 122279 records - [learning2rank.py:399:load_datasets]
|
| 46 |
+
2025-06-11 15:29:46,025 - INFO - - 📄 L2R dataset: 260243456 records - [learning2rank.py:400:load_datasets]
|
| 47 |
+
2025-06-11 15:29:46,025 - INFO - - 🔤 Tokens: 32768 items - [learning2rank.py:401:load_datasets]
|
| 48 |
+
2025-06-11 15:29:46,025 - INFO - - 🏷️ Labels: 7942 items - [learning2rank.py:402:load_datasets]
|
| 49 |
+
2025-06-11 15:29:46,032 - INFO - ✅ Data loading completed successfully - [learning2rank.py:410:load_datasets]
|
| 50 |
+
2025-06-11 15:29:47,314 - INFO - ⏳ Starting quantization of ranks for DataFrame with 260243456 rows. Containing 32768 unique tokens & 7942 unique labels - [learning2rank.py:529:quantize_l2r_data]
|
| 51 |
+
2025-06-11 15:29:47,314 - INFO - 🔄 Quantizing those 32768 unique token ranks into 101 quantization levels for each label - [learning2rank.py:554:quantize_l2r_data]
|
| 52 |
+
2025-06-11 15:30:33,284 - INFO - ✅ Completed quantization: Produced tensor of shape torch.Size([7942, 32768, 4]) with 101 quantization levels per label - [learning2rank.py:608:quantize_l2r_data]
|
| 53 |
+
2025-06-11 15:30:33,309 - WARNING - Label 457: Only 1 tokens with top relevance score (need 50) - [learning2rank.py:724:test_scored_tokens]
|
| 54 |
+
2025-06-11 15:30:33,314 - WARNING - Label 2871: Only 1 tokens with top relevance score (need 50) - [learning2rank.py:724:test_scored_tokens]
|
| 55 |
+
2025-06-11 15:30:33,318 - WARNING - Label 4330: Only 1 tokens with top relevance score (need 50) - [learning2rank.py:724:test_scored_tokens]
|
| 56 |
+
2025-06-11 15:30:33,322 - WARNING - Label 3865: Only 1 tokens with top relevance score (need 50) - [learning2rank.py:724:test_scored_tokens]
|
| 57 |
+
2025-06-11 15:30:33,325 - WARNING - Label 6626: Only 1 tokens with top relevance score (need 50) - [learning2rank.py:724:test_scored_tokens]
|
| 58 |
+
2025-06-11 15:30:33,353 - INFO - ******************************************************************************** - [learning2rank.py:109:log_section]
|
| 59 |
+
2025-06-11 15:30:33,353 - INFO - * 🌟 STARTING LEARNING TO RANK MODEL TRAINING * - [learning2rank.py:110:log_section]
|
| 60 |
+
2025-06-11 15:30:33,353 - INFO - ******************************************************************************** - [learning2rank.py:113:log_section]
|
| 61 |
+
2025-06-11 15:30:33,353 - INFO - 🔐 Loaded authentication token from environment - [learning2rank.py:3799:main]
|
| 62 |
+
2025-06-11 15:30:33,353 - INFO - 🏷️ Hub Model ID for this Learning to Rank task: deb101/mistral-7b-instruct-v0.3-mimic4-adapt-l2r - [learning2rank.py:3803:main]
|
| 63 |
+
2025-06-11 15:30:33,353 - INFO - -------------------------------------------------------------------------------- - [learning2rank.py:109:log_section]
|
| 64 |
+
2025-06-11 15:30:33,353 - INFO - - 📋 MODEL EXISTENCE CHECK - - [learning2rank.py:110:log_section]
|
| 65 |
+
2025-06-11 15:30:33,353 - INFO - -------------------------------------------------------------------------------- - [learning2rank.py:113:log_section]
|
| 66 |
+
2025-06-11 15:30:33,353 - INFO - 🔍 Checking model existence locally and on Hugging Face Hub... - [learning2rank.py:430:check_model_existence]
|
| 67 |
+
2025-06-11 15:30:33,354 - INFO - ❌ Model not found locally at: ../tmp/MIMIC4_DEMO/mimic4_l2rtrain_mistral7b - [learning2rank.py:437:check_model_existence]
|
| 68 |
+
2025-06-11 15:30:33,411 - INFO - ✅ Model exists on Hugging Face Hub with ID: deb101/mistral-7b-instruct-v0.3-mimic4-adapt-l2r - [learning2rank.py:449:check_model_existence]
|
| 69 |
+
2025-06-11 15:30:33,411 - INFO - 📁 Model exists either locally or on Hub - [learning2rank.py:475:check_model_existence]
|
| 70 |
+
2025-06-11 15:30:33,411 - INFO - ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ - [learning2rank.py:109:log_section]
|
| 71 |
+
2025-06-11 15:30:33,411 - INFO - + ✨ STARTING FRESH TRAINING + - [learning2rank.py:110:log_section]
|
| 72 |
+
2025-06-11 15:30:33,411 - INFO - ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ - [learning2rank.py:113:log_section]
|
| 73 |
+
2025-06-11 15:30:33,411 - INFO - 🔄 Starting fresh training (either forced or model not found)... - [learning2rank.py:3816:main]
|
| 74 |
+
2025-06-11 15:30:33,681 - WARNING - Note: Environment variable`HF_TOKEN` is set and is the current active token independently from the token you've just configured. - [_login.py:415:_login]
|
| 75 |
+
2025-06-11 15:30:33,681 - INFO - 🔑 Successfully authenticated with Hugging Face Hub - [learning2rank.py:311:authenticate_hf]
|
| 76 |
+
2025-06-11 15:30:33,681 - INFO - ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ - [learning2rank.py:109:log_section]
|
| 77 |
+
2025-06-11 15:30:33,681 - INFO - + ✨ LOADING BASE MODEL + - [learning2rank.py:110:log_section]
|
| 78 |
+
2025-06-11 15:30:33,681 - INFO - ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ - [learning2rank.py:113:log_section]
|
| 79 |
+
2025-06-11 15:30:33,681 - INFO - 📥 Loading pretrained model and tokenizer... - [learning2rank.py:3858:main]
|
| 80 |
+
2025-06-11 15:30:33,681 - INFO - 🚀 Starting model and tokenizer loading process... - [learning2rank.py:939:load_base_model_and_tokenizer]
|
| 81 |
+
2025-06-11 15:30:33,682 - INFO - 📊 Quantization config: BitsAndBytesConfig {
|
| 82 |
+
"_load_in_4bit": true,
|
| 83 |
+
"_load_in_8bit": false,
|
| 84 |
+
"bnb_4bit_compute_dtype": "bfloat16",
|
| 85 |
+
"bnb_4bit_quant_storage": "uint8",
|
| 86 |
+
"bnb_4bit_quant_type": "nf4",
|
| 87 |
+
"bnb_4bit_use_double_quant": true,
|
| 88 |
+
"llm_int8_enable_fp32_cpu_offload": false,
|
| 89 |
+
"llm_int8_has_fp16_weight": false,
|
| 90 |
+
"llm_int8_skip_modules": null,
|
| 91 |
+
"llm_int8_threshold": 6.0,
|
| 92 |
+
"load_in_4bit": true,
|
| 93 |
+
"load_in_8bit": false,
|
| 94 |
+
"quant_method": "bitsandbytes"
|
| 95 |
+
}
|
| 96 |
+
- [learning2rank.py:948:load_base_model_and_tokenizer]
|
| 97 |
+
2025-06-11 15:30:33,682 - INFO - 🔤 Loading tokenizer for model: mistralai/Mistral-7B-Instruct-v0.3... - [learning2rank.py:952:load_base_model_and_tokenizer]
|
| 98 |
+
2025-06-11 15:30:33,981 - INFO - 📝 Setting pad token to eos token... - [learning2rank.py:956:load_base_model_and_tokenizer]
|
| 99 |
+
2025-06-11 15:30:33,981 - INFO - 🧠 Loading base model with quantization... - [learning2rank.py:964:load_base_model_and_tokenizer]
|
| 100 |
+
2025-06-11 15:30:34,497 - INFO - We will use 90% of the memory on device 0 for storing the model, and 10% for the buffer to avoid OOM. You can set `max_memory` in to a higher value to use more memory (at your own risk). - [modeling.py:991:get_balanced_memory]
|
| 101 |
+
2025-06-11 15:30:39,731 - INFO - 🔧 Setting up default LoRA configuration... - [learning2rank.py:987:load_base_model_and_tokenizer]
|
| 102 |
+
2025-06-11 15:30:39,731 - INFO - 🔍 LoRA config: r=16, alpha=32, targets={'k_proj', 'q_proj', 'o_proj', 'v_proj'}, dropout=0.05 - [learning2rank.py:1010:load_base_model_and_tokenizer]
|
| 103 |
+
2025-06-11 15:30:39,731 - INFO - 🧩 Applying LoRA adapters to model... - [learning2rank.py:1017:load_base_model_and_tokenizer]
|
| 104 |
+
2025-06-11 15:30:39,906 - INFO - 📊 trainable params: 13,631,488 || all params: 7,261,655,040 || trainable%: 0.1877 - [learning2rank.py:135:log_print_output]
|
| 105 |
+
2025-06-11 15:30:39,907 - INFO - ✅ Model and tokenizer successfully loaded! - [learning2rank.py:1024:load_base_model_and_tokenizer]
|
| 106 |
+
2025-06-11 15:30:39,907 - INFO - ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ - [learning2rank.py:109:log_section]
|
| 107 |
+
2025-06-11 15:30:39,907 - INFO - + ✨ DATA PREPROCESSING + - [learning2rank.py:110:log_section]
|
| 108 |
+
2025-06-11 15:30:39,907 - INFO - ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ - [learning2rank.py:113:log_section]
|
| 109 |
+
2025-06-11 15:30:39,907 - INFO - 🔄 Loading and preprocessing training data... - [learning2rank.py:3866:main]
|
| 110 |
+
2025-06-11 15:30:40,975 - INFO - 🔍 Verifying uniqueness of token IDs per label in scored_tokens... - [learning2rank.py:1302:preprocess_data]
|
| 111 |
+
2025-06-11 15:30:41,803 - INFO - ✅ All labels have unique token IDs in scored_tokens. - [learning2rank.py:1314:preprocess_data]
|
| 112 |
+
2025-06-11 15:30:41,803 - INFO - 🚀 Building a 2D lookup table for efficient token-to-relevance mapping across all labels! 🚀 - [learning2rank.py:1317:preprocess_data]
|
| 113 |
+
2025-06-11 15:30:41,803 - INFO - 🔢 Total labels = 7942 - [learning2rank.py:1320:preprocess_data]
|
| 114 |
+
2025-06-11 15:30:41,803 - INFO - 🔍 Precomputing token indices and corresponding relevance_values for each label... - [learning2rank.py:1321:preprocess_data]
|
| 115 |
+
2025-06-11 15:30:42,036 - INFO - 📊 Lookup table dimensions: 32768 vocabulary size × 7942 labels - [learning2rank.py:1328:preprocess_data]
|
| 116 |
+
2025-06-11 15:30:42,036 - INFO - ⚡ This approach eliminates token comparison broadcasting and provides O(1) lookup time for relevance scores! - [learning2rank.py:1331:preprocess_data]
|
| 117 |
+
2025-06-11 15:30:42,036 - INFO - 🧮 Processing relevance calculations vectorized for maximum speed 🔥 - [learning2rank.py:1334:preprocess_data]
|
| 118 |
+
2025-06-11 15:30:42,202 - INFO - 🔍 Verifying token mappings with 10 samples... - [learning2rank.py:1367:verify_token_mappings]
|
| 119 |
+
2025-06-11 15:30:42,375 - INFO - ✅ Token mappings verification completed successfully! 🎉 - [learning2rank.py:1458:verify_token_mappings]
|
| 120 |
+
2025-06-11 15:30:42,380 - INFO - 🔄 Processing dataset with map... - [learning2rank.py:1522:preprocess_data]
|
| 121 |
+
2025-06-11 15:30:42,697 - INFO - ✅ Dataset built in 0h 0m 0.32s - [learning2rank.py:1545:preprocess_data]
|
| 122 |
+
2025-06-11 15:30:42,709 - INFO - The size of Training set: 173 🏋️ (Training Data) - [learning2rank.py:1576:preprocess_data]
|
| 123 |
+
2025-06-11 15:30:42,709 - INFO - The size of Evaluation set: 35 🔍 (Test Data) - [learning2rank.py:1577:preprocess_data]
|
| 124 |
+
2025-06-11 15:30:42,709 - INFO - 🚀 Created HuggingFace Dataset with 208 samples, 7942 labels - [learning2rank.py:1585:preprocess_data]
|
| 125 |
+
2025-06-11 15:30:42,710 - INFO - 🏷️ Number of unique ICD-10 codes: 7942 - [learning2rank.py:3879:main]
|
| 126 |
+
2025-06-11 15:30:42,710 - INFO - ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ - [learning2rank.py:109:log_section]
|
| 127 |
+
2025-06-11 15:30:42,710 - INFO - + ✨ MODEL INITIALIZATION + - [learning2rank.py:110:log_section]
|
| 128 |
+
2025-06-11 15:30:42,710 - INFO - ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ - [learning2rank.py:113:log_section]
|
| 129 |
+
2025-06-11 15:30:42,710 - INFO - 🧠 Initializing custom L2R model for outputting per-token relevance scores per ICD-10 codes. - [learning2rank.py:3882:main]
|
| 130 |
+
2025-06-11 15:30:44,257 - INFO - 🔧 Registering LTRModel with transformers AutoModel 🚀 - [learning2rank.py:1725:define_model]
|
| 131 |
+
2025-06-11 15:30:44,258 - INFO - ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ - [learning2rank.py:109:log_section]
|
| 132 |
+
2025-06-11 15:30:44,258 - INFO - + ✨ TRAINING PREPARATION + - [learning2rank.py:110:log_section]
|
| 133 |
+
2025-06-11 15:30:44,258 - INFO - ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ - [learning2rank.py:113:log_section]
|
| 134 |
+
2025-06-11 15:30:44,258 - INFO - ⚙️ Preparing training components and optimizers... - [learning2rank.py:3889:main]
|
| 135 |
+
2025-06-11 15:30:44,289 - INFO - 🖥️ Device: NVIDIA GH200 480GB - [learning2rank.py:1887:log_training_configuration]
|
| 136 |
+
2025-06-11 15:30:44,289 - INFO - 🔋 CUDA Available: True - [learning2rank.py:1890:log_training_configuration]
|
| 137 |
+
2025-06-11 15:30:44,289 - INFO - 💾 CUDA Device Count: 1 - [learning2rank.py:1891:log_training_configuration]
|
| 138 |
+
2025-06-11 15:30:44,290 - INFO -
|
| 139 |
+
📋 Training Configuration 📋
|
| 140 |
+
+----------+-----------------------------+--------------------------------------------------+
|
| 141 |
+
| 🌟 Emoji | 🏷️ Parameter | 📊 Value |
|
| 142 |
+
+----------+-----------------------------+--------------------------------------------------+
|
| 143 |
+
| 📁 | Output Directory | ../tmp/MIMIC4_DEMO/mimic4_l2rtrain_mistral7b |
|
| 144 |
+
| 🔁 | Training Epochs | 2 |
|
| 145 |
+
| 🏋️ | Train Batch Size | 1 |
|
| 146 |
+
| 🔍 | Eval Batch Size | 1 |
|
| 147 |
+
| 📊 | Gradient Accumulation Steps | 4 |
|
| 148 |
+
| 🚀 | Learning Rate | 0.0001 |
|
| 149 |
+
| 🌅 | Warmup Steps | 0 |
|
| 150 |
+
| 💾 | Save Strategy | epoch |
|
| 151 |
+
| 💾 | Save Total Limit | 10 |
|
| 152 |
+
| 📊 | Evaluation Strategy | epoch |
|
| 153 |
+
| 🎯 | Best Model Metric | ndcg@25 |
|
| 154 |
+
| 📝 | Logging Strategy | steps (every 10 steps) |
|
| 155 |
+
| 🌐 | Push to Hub | True |
|
| 156 |
+
| 🌐 | Hub Model ID | deb101/mistral-7b-instruct-v0.3-mimic4-adapt-l2r |
|
| 157 |
+
| 🔢 | Steps per Epoch | 43 |
|
| 158 |
+
| 🔢 | Total Training Steps | 86 |
|
| 159 |
+
| 🔢 | Evaluation Steps | 35 |
|
| 160 |
+
| 📊 | Training Dataset Size | 173 samples 🏋️ |
|
| 161 |
+
| 📊 | Evaluation Dataset Size | 35 samples 🔍 |
|
| 162 |
+
+----------+-----------------------------+--------------------------------------------------+ - [learning2rank.py:1879:log_training_args]
|
| 163 |
+
2025-06-11 15:30:44,290 - INFO - ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ - [learning2rank.py:109:log_section]
|
| 164 |
+
2025-06-11 15:30:44,290 - INFO - + ✨ MODEL TRAINING + - [learning2rank.py:110:log_section]
|
| 165 |
+
2025-06-11 15:30:44,290 - INFO - ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ - [learning2rank.py:113:log_section]
|
| 166 |
+
2025-06-11 15:30:44,290 - INFO - 🏋️ Starting model training process... - [learning2rank.py:3909:main]
|
| 167 |
+
2025-06-11 15:30:44,290 - INFO - 🏁 Preparing Custom Trainer 🛠️ - [learning2rank.py:3080:train_model]
|
| 168 |
+
2025-06-11 15:30:44,329 - INFO - We are registering the tokenizer mistralai/Mistral-7B-Instruct-v0.3 in Custom Trainer - [learning2rank.py:2519:__init__]
|
| 169 |
+
2025-06-11 15:30:44,329 - INFO - 🏋️ Commencing Model Training 💪 - [learning2rank.py:3121:train_model]
|
| 170 |
+
2025-06-11 15:30:44,595 - INFO - 🚀 Starting Training... - [learning2rank.py:2269:on_train_begin]
|
| 171 |
+
2025-06-11 15:31:06,023 - INFO -
|
| 172 |
+
[36m🚂 Training Metrics (Step 10) 🚂
|
| 173 |
+
+---------------+--------------+
|
| 174 |
+
| Metric | Value |
|
| 175 |
+
+===============+==============+
|
| 176 |
+
| loss | -4.73834e+16 |
|
| 177 |
+
+---------------+--------------+
|
| 178 |
+
| grad_norm | nan |
|
| 179 |
+
+---------------+--------------+
|
| 180 |
+
| learning_rate | 0.0001 |
|
| 181 |
+
+---------------+--------------+
|
| 182 |
+
| epoch | 0.231214 |
|
| 183 |
+
+---------------+--------------+[0m - [learning2rank.py:2187:on_log]
|
| 184 |
+
2025-06-11 15:31:20,408 - INFO -
|
| 185 |
+
[36m🚂 Training Metrics (Step 20) 🚂
|
| 186 |
+
+---------------+--------------+
|
| 187 |
+
| Metric | Value |
|
| 188 |
+
+===============+==============+
|
| 189 |
+
| loss | -3.59497e+17 |
|
| 190 |
+
+---------------+--------------+
|
| 191 |
+
| grad_norm | nan |
|
| 192 |
+
+---------------+--------------+
|
| 193 |
+
| learning_rate | 0.0001 |
|
| 194 |
+
+---------------+--------------+
|
| 195 |
+
| epoch | 0.462428 |
|
| 196 |
+
+---------------+--------------+[0m - [learning2rank.py:2187:on_log]
|
| 197 |
+
2025-06-11 15:31:34,863 - INFO -
|
| 198 |
+
[36m🚂 Training Metrics (Step 30) 🚂
|
| 199 |
+
+---------------+--------------+
|
| 200 |
+
| Metric | Value |
|
| 201 |
+
+===============+==============+
|
| 202 |
+
| loss | -6.37696e+16 |
|
| 203 |
+
+---------------+--------------+
|
| 204 |
+
| grad_norm | nan |
|
| 205 |
+
+---------------+--------------+
|
| 206 |
+
| learning_rate | 0.0001 |
|
| 207 |
+
+---------------+--------------+
|
| 208 |
+
| epoch | 0.693642 |
|
| 209 |
+
+---------------+--------------+[0m - [learning2rank.py:2187:on_log]
|
| 210 |
+
2025-06-11 15:31:49,256 - INFO -
|
| 211 |
+
[36m🚂 Training Metrics (Step 40) 🚂
|
| 212 |
+
+---------------+--------------+
|
| 213 |
+
| Metric | Value |
|
| 214 |
+
+===============+==============+
|
| 215 |
+
| loss | -8.24219e+17 |
|
| 216 |
+
+---------------+--------------+
|
| 217 |
+
| grad_norm | nan |
|
| 218 |
+
+---------------+--------------+
|
| 219 |
+
| learning_rate | 0.0001 |
|
| 220 |
+
+---------------+--------------+
|
| 221 |
+
| epoch | 0.924855 |
|
| 222 |
+
+---------------+--------------+[0m - [learning2rank.py:2187:on_log]
|
| 223 |
+
2025-06-11 15:31:53,966 - INFO - Removing 'token_type_ids' from eval_dataset as they are not needed. - [learning2rank.py:2635:evaluate]
|
| 224 |
+
2025-06-11 15:35:08,172 - WARNING - No valid samples found for metric 'precision@25'. - [learning2rank.py:2739:evaluate]
|
| 225 |
+
2025-06-11 15:35:08,172 - INFO -
|
| 226 |
+
[33m🔍 Evaluation Metrics 🔍
|
| 227 |
+
+-------------------+-------------+
|
| 228 |
+
| Metric | Value |
|
| 229 |
+
+===================+=============+
|
| 230 |
+
| eval_loss | -1.2019e+17 |
|
| 231 |
+
+-------------------+-------------+
|
| 232 |
+
| eval_ndcg | 0.955488 |
|
| 233 |
+
+-------------------+-------------+
|
| 234 |
+
| eval_ndcg@25 | 0.212566 |
|
| 235 |
+
+-------------------+-------------+
|
| 236 |
+
| eval_precision@25 | 0 |
|
| 237 |
+
+-------------------+-------------+[0m - [learning2rank.py:2206:on_evaluate]
|
| 238 |
+
2025-06-11 15:35:09,356 - INFO - 💾 Model weights saved in safetensors format: ../tmp/MIMIC4_DEMO/mimic4_l2rtrain_mistral7b/checkpoint-44 - [learning2rank.py:2759:_save]
|
| 239 |
+
2025-06-11 15:35:09,383 - INFO - ⚙️ Config saved in checkpoint: ../tmp/MIMIC4_DEMO/mimic4_l2rtrain_mistral7b/checkpoint-44 - [learning2rank.py:2764:_save]
|
| 240 |
+
2025-06-11 15:35:09,384 - INFO - 📋 Saved files in ../tmp/MIMIC4_DEMO/mimic4_l2rtrain_mistral7b/checkpoint-44:
|
| 241 |
+
+---------+-------------------+------------+
|
| 242 |
+
| Index | Saved File | Size |
|
| 243 |
+
+=========+===================+============+
|
| 244 |
+
| 1 | training_args.bin | 0.01 MB |
|
| 245 |
+
+---------+-------------------+------------+
|
| 246 |
+
| 2 | model.safetensors | 4122.74 MB |
|
| 247 |
+
+---------+-------------------+------------+
|
| 248 |
+
| 3 | config.json | 0.38 MB |
|
| 249 |
+
+---------+-------------------+------------+ - [learning2rank.py:2781:_save]
|
| 250 |
+
2025-06-11 15:35:22,810 - INFO -
|
| 251 |
+
[36m🚂 Training Metrics (Step 50) 🚂
|
| 252 |
+
+---------------+--------------+
|
| 253 |
+
| Metric | Value |
|
| 254 |
+
+===============+==============+
|
| 255 |
+
| loss | -1.24339e+18 |
|
| 256 |
+
+---------------+--------------+
|
| 257 |
+
| grad_norm | nan |
|
| 258 |
+
+---------------+--------------+
|
| 259 |
+
| learning_rate | 0.0001 |
|
| 260 |
+
+---------------+--------------+
|
| 261 |
+
| epoch | 1.13873 |
|
| 262 |
+
+---------------+--------------+[0m - [learning2rank.py:2187:on_log]
|
| 263 |
+
2025-06-11 15:35:37,308 - INFO -
|
| 264 |
+
[36m🚂 Training Metrics (Step 60) 🚂
|
| 265 |
+
+---------------+--------------+
|
| 266 |
+
| Metric | Value |
|
| 267 |
+
+===============+==============+
|
| 268 |
+
| loss | -8.42207e+16 |
|
| 269 |
+
+---------------+--------------+
|
| 270 |
+
| grad_norm | nan |
|
| 271 |
+
+---------------+--------------+
|
| 272 |
+
| learning_rate | 9.7e-05 |
|
| 273 |
+
+---------------+--------------+
|
| 274 |
+
| epoch | 1.36994 |
|
| 275 |
+
+---------------+--------------+[0m - [learning2rank.py:2187:on_log]
|
| 276 |
+
2025-06-11 15:35:51,736 - INFO -
|
| 277 |
+
[36m🚂 Training Metrics (Step 70) 🚂
|
| 278 |
+
+---------------+--------------+
|
| 279 |
+
| Metric | Value |
|
| 280 |
+
+===============+==============+
|
| 281 |
+
| loss | -6.54621e+18 |
|
| 282 |
+
+---------------+--------------+
|
| 283 |
+
| grad_norm | 1.53209e+18 |
|
| 284 |
+
+---------------+--------------+
|
| 285 |
+
| learning_rate | 8.8e-05 |
|
| 286 |
+
+---------------+--------------+
|
| 287 |
+
| epoch | 1.60116 |
|
| 288 |
+
+---------------+--------------+[0m - [learning2rank.py:2187:on_log]
|
| 289 |
+
2025-06-11 15:36:06,062 - INFO -
|
| 290 |
+
[36m🚂 Training Metrics (Step 80) 🚂
|
| 291 |
+
+---------------+--------------+
|
| 292 |
+
| Metric | Value |
|
| 293 |
+
+===============+==============+
|
| 294 |
+
| loss | -9.11572e+16 |
|
| 295 |
+
+---------------+--------------+
|
| 296 |
+
| grad_norm | nan |
|
| 297 |
+
+---------------+--------------+
|
| 298 |
+
| learning_rate | 7.8e-05 |
|
| 299 |
+
+---------------+--------------+
|
| 300 |
+
| epoch | 1.83237 |
|
| 301 |
+
+---------------+--------------+[0m - [learning2rank.py:2187:on_log]
|
| 302 |
+
2025-06-11 15:36:15,873 - INFO - 💾 Model weights saved in safetensors format: ../tmp/MIMIC4_DEMO/mimic4_l2rtrain_mistral7b/checkpoint-86 - [learning2rank.py:2759:_save]
|
| 303 |
+
2025-06-11 15:36:15,900 - INFO - ⚙️ Config saved in checkpoint: ../tmp/MIMIC4_DEMO/mimic4_l2rtrain_mistral7b/checkpoint-86 - [learning2rank.py:2764:_save]
|
| 304 |
+
2025-06-11 15:36:15,901 - INFO - 📋 Saved files in ../tmp/MIMIC4_DEMO/mimic4_l2rtrain_mistral7b/checkpoint-86:
|
| 305 |
+
+---------+-------------------+------------+
|
| 306 |
+
| Index | Saved File | Size |
|
| 307 |
+
+=========+===================+============+
|
| 308 |
+
| 1 | training_args.bin | 0.01 MB |
|
| 309 |
+
+---------+-------------------+------------+
|
| 310 |
+
| 2 | model.safetensors | 4122.74 MB |
|
| 311 |
+
+---------+-------------------+------------+
|
| 312 |
+
| 3 | config.json | 0.38 MB |
|
| 313 |
+
+---------+-------------------+------------+ - [learning2rank.py:2781:_save]
|
| 314 |
+
2025-06-11 15:36:16,120 - INFO - Removing 'token_type_ids' from eval_dataset as they are not needed. - [learning2rank.py:2635:evaluate]
|
| 315 |
+
2025-06-11 15:39:29,774 - INFO -
|
| 316 |
+
[33m🔍 Evaluation Metrics 🔍
|
| 317 |
+
+-------------------+--------------+
|
| 318 |
+
| Metric | Value |
|
| 319 |
+
+===================+==============+
|
| 320 |
+
| eval_loss | -2.89696e+17 |
|
| 321 |
+
+-------------------+--------------+
|
| 322 |
+
| eval_ndcg | 0.956037 |
|
| 323 |
+
+-------------------+--------------+
|
| 324 |
+
| eval_ndcg@25 | 0.620746 |
|
| 325 |
+
+-------------------+--------------+
|
| 326 |
+
| eval_precision@25 | 0.2464 |
|
| 327 |
+
+-------------------+--------------+[0m - [learning2rank.py:2206:on_evaluate]
|
| 328 |
+
2025-06-11 15:39:33,224 - INFO - 💾 Model weights saved in safetensors format: ../tmp/MIMIC4_DEMO/mimic4_l2rtrain_mistral7b/checkpoint-86 - [learning2rank.py:2759:_save]
|
| 329 |
+
2025-06-11 15:39:33,252 - INFO - ⚙️ Config saved in checkpoint: ../tmp/MIMIC4_DEMO/mimic4_l2rtrain_mistral7b/checkpoint-86 - [learning2rank.py:2764:_save]
|
| 330 |
+
2025-06-11 15:39:33,253 - INFO - 📋 Saved files in ../tmp/MIMIC4_DEMO/mimic4_l2rtrain_mistral7b/checkpoint-86:
|
| 331 |
+
+---------+--------------------+------------+
|
| 332 |
+
| Index | Saved File | Size |
|
| 333 |
+
+=========+====================+============+
|
| 334 |
+
| 1 | training_args.bin | 0.01 MB |
|
| 335 |
+
+---------+--------------------+------------+
|
| 336 |
+
| 2 | optimizer.pt | 352.39 MB |
|
| 337 |
+
+---------+--------------------+------------+
|
| 338 |
+
| 3 | model.safetensors | 4122.74 MB |
|
| 339 |
+
+---------+--------------------+------------+
|
| 340 |
+
| 4 | scaler.pt | 0.00 MB |
|
| 341 |
+
+---------+--------------------+------------+
|
| 342 |
+
| 5 | config.json | 0.38 MB |
|
| 343 |
+
+---------+--------------------+------------+
|
| 344 |
+
| 6 | scheduler.pt | 0.00 MB |
|
| 345 |
+
+---------+--------------------+------------+
|
| 346 |
+
| 7 | trainer_state.json | 0.00 MB |
|
| 347 |
+
+---------+--------------------+------------+
|
| 348 |
+
| 8 | rng_state.pth | 0.01 MB |
|
| 349 |
+
+---------+--------------------+------------+ - [learning2rank.py:2781:_save]
|
| 350 |
+
2025-06-11 15:39:33,735 - INFO - 📂 Loading best model from ../tmp/MIMIC4_DEMO/mimic4_l2rtrain_mistral7b/checkpoint-86 - [learning2rank.py:2837:_load_best_model]
|
| 351 |
+
2025-06-11 15:39:33,735 - INFO - 🖥️ Model is on device: cuda:0 - [learning2rank.py:2847:_load_best_model]
|
| 352 |
+
2025-06-11 15:39:33,798 - INFO - 🔑 Key order comparison:
|
| 353 |
+
+---------+--------------------------------------------------------------------------+-------------------------------------------------------------------------------------------+
|
| 354 |
+
| Index | Saved state_dict Keys | Model state_dict Keys |
|
| 355 |
+
+=========+==========================================================================+===========================================================================================+
|
| 356 |
+
| 1 | ground_model.base_model.model.lm_head.weight | label_embeddings.weight |
|
| 357 |
+
+---------+--------------------------------------------------------------------------+-------------------------------------------------------------------------------------------+
|
| 358 |
+
| 2 | ground_model.base_model.model.model.embed_tokens.weight | ground_model.base_model.model.model.embed_tokens.weight |
|
| 359 |
+
+---------+--------------------------------------------------------------------------+-------------------------------------------------------------------------------------------+
|
| 360 |
+
| 3 | ground_model.base_model.model.model.layers.0.input_layernorm.weight | ground_model.base_model.model.model.layers.0.self_attn.q_proj.base_layer.weight |
|
| 361 |
+
+---------+--------------------------------------------------------------------------+-------------------------------------------------------------------------------------------+
|
| 362 |
+
| 4 | ground_model.base_model.model.model.layers.0.mlp.down_proj.weight | ground_model.base_model.model.model.layers.0.self_attn.q_proj.base_layer.weight.absmax |
|
| 363 |
+
+---------+--------------------------------------------------------------------------+-------------------------------------------------------------------------------------------+
|
| 364 |
+
| 5 | ground_model.base_model.model.model.layers.0.mlp.down_proj.weight.absmax | ground_model.base_model.model.model.layers.0.self_attn.q_proj.base_layer.weight.quant_map |
|
| 365 |
+
+---------+--------------------------------------------------------------------------+-------------------------------------------------------------------------------------------+ - [learning2rank.py:2871:_load_best_model]
|
| 366 |
+
2025-06-11 15:39:34,630 - INFO - ✅ Loaded best model weights from ../tmp/MIMIC4_DEMO/mimic4_l2rtrain_mistral7b/checkpoint-86/model.safetensors - [learning2rank.py:2888:_load_best_model]
|
| 367 |
+
2025-06-11 15:39:34,670 - INFO - ✔️ Weight for label_embeddings.weight matches between saved and loaded state_dict - [learning2rank.py:2900:_load_best_model]
|
| 368 |
+
2025-06-11 15:39:34,725 - INFO - ✔️ Weight for ground_model.base_model.model.model.embed_tokens.weight matches between saved and loaded state_dict - [learning2rank.py:2900:_load_best_model]
|
| 369 |
+
2025-06-11 15:39:34,740 - INFO -
|
| 370 |
+
[36m🚂 Training Metrics (Step 86) 🚂
|
| 371 |
+
+--------------------------+--------------+
|
| 372 |
+
| Metric | Value |
|
| 373 |
+
+==========================+==============+
|
| 374 |
+
| train_runtime | 530.146 |
|
| 375 |
+
+--------------------------+--------------+
|
| 376 |
+
| train_samples_per_second | 0.653 |
|
| 377 |
+
+--------------------------+--------------+
|
| 378 |
+
| train_steps_per_second | 0.162 |
|
| 379 |
+
+--------------------------+--------------+
|
| 380 |
+
| total_flos | 3.81076e+15 |
|
| 381 |
+
+--------------------------+--------------+
|
| 382 |
+
| train_loss | -1.15684e+18 |
|
| 383 |
+
+--------------------------+--------------+
|
| 384 |
+
| epoch | 1.9711 |
|
| 385 |
+
+--------------------------+--------------+[0m - [learning2rank.py:2187:on_log]
|
| 386 |
+
2025-06-11 15:39:34,740 - INFO - ✨ Training Completed! ✨ - [learning2rank.py:2336:on_train_end]
|
| 387 |
+
2025-06-11 15:39:34,814 - INFO - 📊 Training loss plot saved as '../tmp/MIMIC4_DEMO/mimic4_l2rtrain_mistral7b/train_loss_plot.png' - [learning2rank.py:2439:on_train_end]
|
| 388 |
+
2025-06-11 15:39:34,870 - INFO - 📊 Evaluation loss plot saved as '../tmp/MIMIC4_DEMO/mimic4_l2rtrain_mistral7b/eval_loss_plot.png' - [learning2rank.py:2453:on_train_end]
|
| 389 |
+
2025-06-11 15:39:34,928 - INFO - 📊 Evaluation metric plot saved as '../tmp/MIMIC4_DEMO/mimic4_l2rtrain_mistral7b/eval_ndcg@25_plot.png' - [learning2rank.py:2474:on_train_end]
|
| 390 |
+
2025-06-11 15:39:34,928 - INFO - ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ - [learning2rank.py:109:log_section]
|
| 391 |
+
2025-06-11 15:39:34,928 - INFO - + ✨ MODEL SAVING + - [learning2rank.py:110:log_section]
|
| 392 |
+
2025-06-11 15:39:34,928 - INFO - ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ - [learning2rank.py:113:log_section]
|
| 393 |
+
2025-06-11 15:39:34,928 - INFO - 💾 Saving trained model and pushing to Hugging Face Hub... - [learning2rank.py:3924:main]
|
| 394 |
+
2025-06-11 15:39:34,928 - INFO - 📁 Creating/using output directory: ../tmp/MIMIC4_DEMO/mimic4_l2rtrain_mistral7b - [learning2rank.py:3155:save_and_push]
|
| 395 |
+
2025-06-11 15:39:36,118 - INFO - 💾 Model weights saved in safetensors format: ../tmp/MIMIC4_DEMO/mimic4_l2rtrain_mistral7b - [learning2rank.py:2759:_save]
|
| 396 |
+
2025-06-11 15:39:36,144 - INFO - ⚙️ Config saved in checkpoint: ../tmp/MIMIC4_DEMO/mimic4_l2rtrain_mistral7b - [learning2rank.py:2764:_save]
|
| 397 |
+
2025-06-11 15:39:36,145 - INFO - 📋 Saved files in ../tmp/MIMIC4_DEMO/mimic4_l2rtrain_mistral7b:
|
| 398 |
+
+---------+-----------------------+------------+
|
| 399 |
+
| Index | Saved File | Size |
|
| 400 |
+
+=========+=======================+============+
|
| 401 |
+
| 1 | eval_loss_plot.png | 0.03 MB |
|
| 402 |
+
+---------+-----------------------+------------+
|
| 403 |
+
| 2 | training_args.bin | 0.01 MB |
|
| 404 |
+
+---------+-----------------------+------------+
|
| 405 |
+
| 3 | model.safetensors | 4122.74 MB |
|
| 406 |
+
+---------+-----------------------+------------+
|
| 407 |
+
| 4 | eval_ndcg@25_plot.png | 0.04 MB |
|
| 408 |
+
+---------+-----------------------+------------+
|
| 409 |
+
| 5 | config.json | 0.38 MB |
|
| 410 |
+
+---------+-----------------------+------------+
|
| 411 |
+
| 6 | train_loss_plot.png | 0.03 MB |
|
| 412 |
+
+---------+-----------------------+------------+ - [learning2rank.py:2781:_save]
|
| 413 |
+
2025-06-11 15:39:39,545 - INFO - 💾 Model weights saved in safetensors format: ../tmp/MIMIC4_DEMO/mimic4_l2rtrain_mistral7b - [learning2rank.py:2759:_save]
|
| 414 |
+
2025-06-11 15:39:39,573 - INFO - ⚙️ Config saved in checkpoint: ../tmp/MIMIC4_DEMO/mimic4_l2rtrain_mistral7b - [learning2rank.py:2764:_save]
|
| 415 |
+
2025-06-11 15:39:39,575 - INFO - 📋 Saved files in ../tmp/MIMIC4_DEMO/mimic4_l2rtrain_mistral7b:
|
| 416 |
+
+---------+-----------------------+------------+
|
| 417 |
+
| Index | Saved File | Size |
|
| 418 |
+
+=========+=======================+============+
|
| 419 |
+
| 1 | eval_loss_plot.png | 0.03 MB |
|
| 420 |
+
+---------+-----------------------+------------+
|
| 421 |
+
| 2 | training_args.bin | 0.01 MB |
|
| 422 |
+
+---------+-----------------------+------------+
|
| 423 |
+
| 3 | model.safetensors | 4122.74 MB |
|
| 424 |
+
+---------+-----------------------+------------+
|
| 425 |
+
| 4 | eval_ndcg@25_plot.png | 0.04 MB |
|
| 426 |
+
+---------+-----------------------+------------+
|
| 427 |
+
| 5 | config.json | 0.38 MB |
|
| 428 |
+
+---------+-----------------------+------------+
|
| 429 |
+
| 6 | train_loss_plot.png | 0.03 MB |
|
| 430 |
+
+---------+-----------------------+------------+ - [learning2rank.py:2781:_save]
|
| 431 |
+
2025-06-11 15:41:03,613 - INFO - 💾 Model saved to: ../tmp/MIMIC4_DEMO/mimic4_l2rtrain_mistral7b - [learning2rank.py:3159:save_and_push]
|
| 432 |
+
2025-06-11 15:41:07,306 - INFO - 💾 Model and config explicitly saved to: ../tmp/MIMIC4_DEMO/mimic4_l2rtrain_mistral7b - [learning2rank.py:3163:save_and_push]
|
| 433 |
+
2025-06-11 15:41:07,337 - INFO - 🖌️ Tokenizer saved to: ../tmp/MIMIC4_DEMO/mimic4_l2rtrain_mistral7b - [learning2rank.py:3167:save_and_push]
|
| 434 |
+
2025-06-11 15:41:07,771 - INFO - 📊 Lookup table saved to: ../tmp/MIMIC4_DEMO/mimic4_l2rtrain_mistral7b/lookup_table.pt - [learning2rank.py:3172:save_and_push]
|