will-rads commited on
Commit
0e7f697
·
verified ·
1 Parent(s): 192710c

Update README.md

Browse files

fix: correct README front matter

Files changed (1) hide show
  1. README.md +26 -25
README.md CHANGED
@@ -1,42 +1,43 @@
1
  ---
2
- language: en
3
- license: mit # Or choose another like 'apache-2.0', 'cc-by-sa-4.0', etc.
4
  library_name: transformers
 
 
5
  tags:
6
- - text-classification
7
- - hate-speech
8
- - offensive-language
9
- - distilbert
10
  - tensorflow
11
- pipeline_tag: text-classification
 
 
 
12
  widget:
13
- - text: "I love this beautiful day, it's fantastic!"
14
  example_title: "Positive Example"
15
  - text: "You are a terrible person and I wish you the worst."
16
  example_title: "Offensive Example"
17
  - text: "This is a completely neutral statement about clouds."
18
  example_title: "Neutral Example"
19
- - text: "Kill all of them, they don't belong in our country." # Potentially strong hate speech
20
  example_title: "Hate Speech Example"
 
 
21
  model-index:
22
- - name: distilbert-hatespeech-classifier # Should match your model name
23
  results:
24
- - task:
25
- type: text-classification
26
- name: Text Classification
27
- dataset:
28
- name: tdavidson/hate_speech_offensive # Or the specific name you used
29
- type: hf # Indicates it's from Hugging Face datasets
30
- metrics:
31
- - name: Validation Accuracy
32
- type: accuracy
33
- value: 0.7137 # Your best validation accuracy (from Epoch 2)
34
- - name: Validation Loss
35
- type: loss
36
- value: 0.7337 # Your best validation loss (from Epoch 2)
37
  ---
38
-
39
-
40
  # Ethical-Content-Moderation
41
  Fine-Tuning DistilBERT for Ethical Content Moderation
42
 
 
1
  ---
2
+ pipeline_tag: text-classification
 
3
  library_name: transformers
4
+ license: mit
5
+ language: en
6
  tags:
7
+ - transformers
 
 
 
8
  - tensorflow
9
+ - distilbert
10
+ - text-classification
11
+
12
+ # Widget examples shown on the model page:
13
  widget:
14
+ - text: "I love this community."
15
  example_title: "Positive Example"
16
  - text: "You are a terrible person and I wish you the worst."
17
  example_title: "Offensive Example"
18
  - text: "This is a completely neutral statement about clouds."
19
  example_title: "Neutral Example"
20
+ - text: "Kill all of them, they don't belong in our country."
21
  example_title: "Hate Speech Example"
22
+
23
+ # Optional: results for the model card
24
  model-index:
25
+ - name: distilbert-hatespeech-classifier
26
  results:
27
+ - task:
28
+ type: text-classification
29
+ name: Text Classification
30
+ dataset:
31
+ name: tdavidson/hate_speech_offensive
32
+ type: hf
33
+ metrics:
34
+ - name: Validation Accuracy
35
+ type: accuracy
36
+ value: 0.7137
37
+ - name: Validation Loss
38
+ type: loss
39
+ value: 0.7337
40
  ---
 
 
41
  # Ethical-Content-Moderation
42
  Fine-Tuning DistilBERT for Ethical Content Moderation
43