commit files to HF hub
Browse files
README.md
CHANGED
|
@@ -34,22 +34,22 @@ model-index:
|
|
| 34 |
value: 0.0
|
| 35 |
- name: ROUGE-L (Question Answering)
|
| 36 |
type: rouge_l_question_answering
|
| 37 |
-
value:
|
| 38 |
- name: METEOR (Question Answering)
|
| 39 |
type: meteor_question_answering
|
| 40 |
-
value:
|
| 41 |
- name: BERTScore (Question Answering)
|
| 42 |
type: bertscore_question_answering
|
| 43 |
-
value:
|
| 44 |
- name: MoverScore (Question Answering)
|
| 45 |
type: moverscore_question_answering
|
| 46 |
-
value:
|
| 47 |
- name: AnswerF1Score (Question Answering)
|
| 48 |
type: answer_f1_score__question_answering
|
| 49 |
-
value:
|
| 50 |
- name: AnswerExactMatch (Question Answering)
|
| 51 |
type: answer_exact_match_question_answering
|
| 52 |
-
value:
|
| 53 |
---
|
| 54 |
|
| 55 |
# Model Card of `lmqg/mt5-small-jaquad-qa`
|
|
@@ -93,16 +93,16 @@ output = pipe("question: 新型車両として6000系が構想されたのは、
|
|
| 93 |
|
| 94 |
| | Score | Type | Dataset |
|
| 95 |
|:-----------------|--------:|:--------|:-----------------------------------------------------------------|
|
| 96 |
-
| AnswerExactMatch |
|
| 97 |
-
| AnswerF1Score |
|
| 98 |
-
| BERTScore |
|
| 99 |
-
| Bleu_1 |
|
| 100 |
| Bleu_2 | 0 | default | [lmqg/qg_jaquad](https://huggingface.co/datasets/lmqg/qg_jaquad) |
|
| 101 |
| Bleu_3 | 0 | default | [lmqg/qg_jaquad](https://huggingface.co/datasets/lmqg/qg_jaquad) |
|
| 102 |
| Bleu_4 | 0 | default | [lmqg/qg_jaquad](https://huggingface.co/datasets/lmqg/qg_jaquad) |
|
| 103 |
-
| METEOR |
|
| 104 |
-
| MoverScore |
|
| 105 |
-
| ROUGE_L |
|
| 106 |
|
| 107 |
|
| 108 |
|
|
@@ -117,9 +117,9 @@ The following hyperparameters were used during fine-tuning:
|
|
| 117 |
- model: google/mt5-small
|
| 118 |
- max_length: 512
|
| 119 |
- max_length_output: 32
|
| 120 |
-
- epoch:
|
| 121 |
- batch: 16
|
| 122 |
-
- lr: 0.
|
| 123 |
- fp16: False
|
| 124 |
- random_seed: 1
|
| 125 |
- gradient_accumulation_steps: 4
|
|
|
|
| 34 |
value: 0.0
|
| 35 |
- name: ROUGE-L (Question Answering)
|
| 36 |
type: rouge_l_question_answering
|
| 37 |
+
value: 63.77
|
| 38 |
- name: METEOR (Question Answering)
|
| 39 |
type: meteor_question_answering
|
| 40 |
+
value: 49.75
|
| 41 |
- name: BERTScore (Question Answering)
|
| 42 |
type: bertscore_question_answering
|
| 43 |
+
value: 96.29
|
| 44 |
- name: MoverScore (Question Answering)
|
| 45 |
type: moverscore_question_answering
|
| 46 |
+
value: 88.92
|
| 47 |
- name: AnswerF1Score (Question Answering)
|
| 48 |
type: answer_f1_score__question_answering
|
| 49 |
+
value: 65.7
|
| 50 |
- name: AnswerExactMatch (Question Answering)
|
| 51 |
type: answer_exact_match_question_answering
|
| 52 |
+
value: 65.7
|
| 53 |
---
|
| 54 |
|
| 55 |
# Model Card of `lmqg/mt5-small-jaquad-qa`
|
|
|
|
| 93 |
|
| 94 |
| | Score | Type | Dataset |
|
| 95 |
|:-----------------|--------:|:--------|:-----------------------------------------------------------------|
|
| 96 |
+
| AnswerExactMatch | 65.7 | default | [lmqg/qg_jaquad](https://huggingface.co/datasets/lmqg/qg_jaquad) |
|
| 97 |
+
| AnswerF1Score | 65.7 | default | [lmqg/qg_jaquad](https://huggingface.co/datasets/lmqg/qg_jaquad) |
|
| 98 |
+
| BERTScore | 96.29 | default | [lmqg/qg_jaquad](https://huggingface.co/datasets/lmqg/qg_jaquad) |
|
| 99 |
+
| Bleu_1 | 61.42 | default | [lmqg/qg_jaquad](https://huggingface.co/datasets/lmqg/qg_jaquad) |
|
| 100 |
| Bleu_2 | 0 | default | [lmqg/qg_jaquad](https://huggingface.co/datasets/lmqg/qg_jaquad) |
|
| 101 |
| Bleu_3 | 0 | default | [lmqg/qg_jaquad](https://huggingface.co/datasets/lmqg/qg_jaquad) |
|
| 102 |
| Bleu_4 | 0 | default | [lmqg/qg_jaquad](https://huggingface.co/datasets/lmqg/qg_jaquad) |
|
| 103 |
+
| METEOR | 49.75 | default | [lmqg/qg_jaquad](https://huggingface.co/datasets/lmqg/qg_jaquad) |
|
| 104 |
+
| MoverScore | 88.92 | default | [lmqg/qg_jaquad](https://huggingface.co/datasets/lmqg/qg_jaquad) |
|
| 105 |
+
| ROUGE_L | 63.77 | default | [lmqg/qg_jaquad](https://huggingface.co/datasets/lmqg/qg_jaquad) |
|
| 106 |
|
| 107 |
|
| 108 |
|
|
|
|
| 117 |
- model: google/mt5-small
|
| 118 |
- max_length: 512
|
| 119 |
- max_length_output: 32
|
| 120 |
+
- epoch: 14
|
| 121 |
- batch: 16
|
| 122 |
+
- lr: 0.0006
|
| 123 |
- fp16: False
|
| 124 |
- random_seed: 1
|
| 125 |
- gradient_accumulation_steps: 4
|
eval/metric.first.answer.paragraph_question.answer.lmqg_qg_jaquad.default.json
CHANGED
|
@@ -1 +1 @@
|
|
| 1 |
-
{"validation": {"Bleu_1": 0.
|
|
|
|
| 1 |
+
{"validation": {"Bleu_1": 0.590103397341066, "Bleu_2": 2.1903398357857222e-09, "Bleu_3": 5.4279259757280845e-12, "Bleu_4": 3.5561120535260716e-13, "AnswerF1Score": 62.266226622662266, "AnswerExactMatch": 62.24930185326225, "METEOR": 0.47933309303466093, "ROUGE_L": 0.6084562839110386, "BERTScore": 0.9603767614583735, "MoverScore": 0.883987196500161}, "test": {"Bleu_1": 0.6141809290952044, "Bleu_2": 2.0167854054074478e-09, "Bleu_3": 5.137273746966832e-12, "Bleu_4": 3.8771421012861e-13, "AnswerF1Score": 65.7019548108657, "AnswerExactMatch": 65.7019548108657, "METEOR": 0.4975267797624061, "ROUGE_L": 0.6377253109926377, "BERTScore": 0.962933681752059, "MoverScore": 0.8892355901795232}}
|
eval/samples.test.hyp.paragraph_question.answer.lmqg_qg_jaquad.default.txt
CHANGED
|
The diff for this file is too large to render.
See raw diff
|
|
|
eval/samples.validation.hyp.paragraph_question.answer.lmqg_qg_jaquad.default.txt
CHANGED
|
The diff for this file is too large to render.
See raw diff
|
|
|