aquiro1994 commited on
Commit
c1ce904
·
verified ·
1 Parent(s): eccaf36

End of training

Browse files
README.md CHANGED
@@ -19,7 +19,7 @@ should probably proofread and complete it, then remove this comment. -->
19
  This model is a fine-tuned version of [answerdotai/ModernBERT-base](https://huggingface.co/answerdotai/ModernBERT-base) on an unknown dataset.
20
  It achieves the following results on the evaluation set:
21
  - Loss: nan
22
- - F1: 0.0176
23
 
24
  ## Model description
25
 
@@ -50,11 +50,11 @@ The following hyperparameters were used during training:
50
 
51
  | Training Loss | Epoch | Step | Validation Loss | F1 |
52
  |:-------------:|:-----:|:----:|:---------------:|:------:|
53
- | No log | 1.0 | 29 | nan | 0.0176 |
54
- | No log | 2.0 | 58 | nan | 0.0176 |
55
- | No log | 3.0 | 87 | nan | 0.0176 |
56
- | 0.0 | 4.0 | 116 | nan | 0.0176 |
57
- | 0.0 | 5.0 | 145 | nan | 0.0176 |
58
 
59
 
60
  ### Framework versions
 
19
  This model is a fine-tuned version of [answerdotai/ModernBERT-base](https://huggingface.co/answerdotai/ModernBERT-base) on an unknown dataset.
20
  It achieves the following results on the evaluation set:
21
  - Loss: nan
22
+ - F1: 0.0021
23
 
24
  ## Model description
25
 
 
50
 
51
  | Training Loss | Epoch | Step | Validation Loss | F1 |
52
  |:-------------:|:-----:|:----:|:---------------:|:------:|
53
+ | No log | 1.0 | 29 | nan | 0.0021 |
54
+ | No log | 2.0 | 58 | nan | 0.0021 |
55
+ | No log | 3.0 | 87 | nan | 0.0021 |
56
+ | 0.0 | 4.0 | 116 | nan | 0.0021 |
57
+ | 0.0 | 5.0 | 145 | nan | 0.0021 |
58
 
59
 
60
  ### Framework versions
runs/Jun06_19-54-48_5aefe4f9a840/events.out.tfevents.1749239689.5aefe4f9a840.5036.2 CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:fef1ed808175acccca612e46d4d41b1c61b982d360a82c5513d029a905d40aeb
3
- size 8999
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1c571d5d047991772aa4ed107b8794ae97fed060f2eebaea479a592cef6c589d
3
+ size 9353
tokenizer.json CHANGED
@@ -1,7 +1,19 @@
1
  {
2
  "version": "1.0",
3
- "truncation": null,
4
- "padding": null,
 
 
 
 
 
 
 
 
 
 
 
 
5
  "added_tokens": [
6
  {
7
  "id": 0,
 
1
  {
2
  "version": "1.0",
3
+ "truncation": {
4
+ "direction": "Right",
5
+ "max_length": 8192,
6
+ "strategy": "LongestFirst",
7
+ "stride": 0
8
+ },
9
+ "padding": {
10
+ "strategy": "BatchLongest",
11
+ "direction": "Right",
12
+ "pad_to_multiple_of": null,
13
+ "pad_id": 50283,
14
+ "pad_type_id": 0,
15
+ "pad_token": "[PAD]"
16
+ },
17
  "added_tokens": [
18
  {
19
  "id": 0,
tokenizer_config.json CHANGED
@@ -933,20 +933,13 @@
933
  "cls_token": "[CLS]",
934
  "extra_special_tokens": {},
935
  "mask_token": "[MASK]",
936
- "max_length": 512,
937
  "model_input_names": [
938
  "input_ids",
939
  "attention_mask"
940
  ],
941
- "model_max_length": 512,
942
- "pad_to_multiple_of": null,
943
  "pad_token": "[PAD]",
944
- "pad_token_type_id": 0,
945
- "padding_side": "right",
946
  "sep_token": "[SEP]",
947
- "stride": 0,
948
  "tokenizer_class": "PreTrainedTokenizerFast",
949
- "truncation_side": "right",
950
- "truncation_strategy": "longest_first",
951
  "unk_token": "[UNK]"
952
  }
 
933
  "cls_token": "[CLS]",
934
  "extra_special_tokens": {},
935
  "mask_token": "[MASK]",
 
936
  "model_input_names": [
937
  "input_ids",
938
  "attention_mask"
939
  ],
940
+ "model_max_length": 8192,
 
941
  "pad_token": "[PAD]",
 
 
942
  "sep_token": "[SEP]",
 
943
  "tokenizer_class": "PreTrainedTokenizerFast",
 
 
944
  "unk_token": "[UNK]"
945
  }