Datasets:

Modalities:
Text
Languages:
English
Libraries:
Datasets
License:
Louis Thomson commited on
Commit
12f7f0d
·
1 Parent(s): 29dc9e8

Delete clean.py

Browse files

Testing to see if deleting clean.py will make load_dataset immediately able to parse the files.

Files changed (1) hide show
  1. clean.py +0 -153
clean.py DELETED
@@ -1,153 +0,0 @@
1
- import re
2
- import math
3
-
4
- import pandas as pd
5
-
6
- from tqdm import tqdm
7
-
8
- seed = 7497
9
-
10
- TOXIC_COLUMNS = [
11
- "toxic",
12
- "severe_toxic",
13
- "obscene",
14
- "threat",
15
- "insult",
16
- "identity_hate",
17
- ]
18
-
19
- # Time and date regexes
20
- TIME = r"([0-9]{1,2}:[0-9]{2}( (am|AM|pm|PM))?)"
21
- DAY = r"([23]?(1(st)?|2(nd)?|3(rd)?|[4-9](th)?)|1[0-9](th)?)"
22
- MONTH = r"(January|February|March|April|May|June|July|August|September|October|November|December|Jan|Feb|Mar|Apr|May|Jun|Jul|Aug|Sep|Nov|Dec)"
23
- YEAR = r"('?[0-9]{2}|[0-9]{4})"
24
- DATE = rf"(({DAY} {MONTH}|{MONTH} {DAY})(,? {YEAR})?)"
25
- TIMESTAMP = rf"((({TIME},? (\(UTC\) )?)?{DATE}|({DATE},? )?{TIME})(\s+\(UTC\))?)"
26
-
27
- # The 'talk' part at the end of a signature
28
- TALK = r"((\|\s*|\(\s*)?[tT]alk((\s*[-|•, ]\s*|\s+)[cC]ontribs)?(\s*[-|)])?)"
29
-
30
- # IP addresses
31
- IP = r"([0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3})"
32
-
33
- # Username and the username part of a the signature
34
- USERNAME = r"([^#<>[\]|{}/@\s]+)"
35
- USER_SIG = rf"((((?:\s)[-–—]\s*)?(\((User:)?{USERNAME}\)|User:{USERNAME})|(?:\s)[-–—]\s*{USERNAME})(\s+{TALK})?)"
36
-
37
- # A full signature
38
- SIGNATURE = rf"(((([-–—]\s*)?{IP}(\s+{USER_SIG})?|(?:\s)[-–—]\s*[uU]nsigned|{TALK}|{USER_SIG})(\s+{TIMESTAMP})?)|{TIMESTAMP}(\s+{TALK})?)"
39
-
40
- # List of the patterns to remove
41
- REGEX_REMOVE = [
42
- r"^(\"+|'+)", # Initial quotation marks
43
- r"(\"+|'+)$", # Final quotation marks
44
- r"^REDIRECT.*$", # The whole comment is a redirect
45
- rf"^\s*{SIGNATURE}", # Initial signature
46
- rf"{SIGNATURE}\s*$", # Final signature
47
- r" \[[0-9]+\]|\[[0-9]+\] ", # Citations
48
- r"‖\s+[tT]alk - [-a-zA-Z0-9._()\s]+‖",
49
- r"==[^=]+==",
50
- r"^::+",
51
- r"^\s*\(UTC\)",
52
- rf"Unblock {IP}",
53
- r"2nd Unblock Request",
54
- r":Category:",
55
- r"File:[^\s]+",
56
- r"\{\|.+\|\}", # Embedded code
57
- # r"\{\{.+\s.+\}\}", # Embedded code
58
- r"^\s+", # Initial whitespace
59
- r"\s+$", # Trailing whitespace
60
- ]
61
-
62
- # List of patterns to replaces
63
- REGEX_REPLACE = {
64
- "\n+": "\n",
65
- "\\'": "'",
66
- '""+': '"',
67
- "''+": "'",
68
- # r"(WP|Wikipedia):[^\s]+": "URL", # Wikipedia internal links
69
- r"[^\s]+#[^\s]+": "URL", # Wikipedia internal links
70
- r"https?:\/\/(www\.)?[-a-zA-Z0-9@:%._\+~#=]{2,256}\.[a-z]{2,4}\b([-a-zA-Z0-9@:%_\+.~#?&//=]*)": "URL", # ULRs
71
- r"([uU]ser_[tT]alk|[tT]alk):[^\s]+": "URL", # Talk links
72
- }
73
-
74
-
75
- def clean_sentence(sentence):
76
- """Preprocess a sentence using the regex rules"""
77
- for pattern in REGEX_REMOVE:
78
- sentence = re.sub(pattern, "", sentence)
79
- for pattern, repl in REGEX_REPLACE.items():
80
- sentence = re.sub(pattern, repl, sentence)
81
- return sentence
82
-
83
-
84
- def make_binary_label(row):
85
- """Make a row label binary by combining all toxicity types"""
86
- for column in TOXIC_COLUMNS:
87
- if row[column] == 1:
88
- return 1
89
- return 0
90
-
91
-
92
- print("Loading original data...")
93
-
94
- # Load up the original data
95
- train_df = pd.read_csv("orig_train.csv").set_index("id")
96
- test_text_df = pd.read_csv("orig_test.csv").set_index("id")
97
- test_labels_df = pd.read_csv("orig_test_labels.csv").set_index("id")
98
-
99
- # Remove the datapoints which have no label
100
- test_text_df = test_text_df.loc[test_labels_df["toxic"] != -1]
101
- test_labels_df = test_labels_df.loc[test_labels_df["toxic"] != -1]
102
-
103
- # Join the test text and labels to make a complete dataset
104
- test_df = test_text_df.join(test_labels_df)
105
-
106
- print("Cleaning train split...")
107
- for index, row in tqdm(train_df.iterrows(), total=len(train_df)):
108
- row["comment_text"] = clean_sentence(row["comment_text"])
109
-
110
- print("Cleaning test split...")
111
- for index, row in tqdm(test_df.iterrows(), total=len(test_df)):
112
- row["comment_text"] = clean_sentence(row["comment_text"])
113
-
114
-
115
- # Some texts will get reduced to the empty string. Let's remove them first
116
- print("Removing empty texts...")
117
- train_df = train_df.loc[train_df["comment_text"] != ""]
118
- test_df = test_df.loc[test_df["comment_text"] != ""]
119
-
120
- # Get rid of any duplicates we made
121
- print("Removing duplicate entries...")
122
- train_df = train_df.drop_duplicates(subset=["comment_text"])
123
- test_df = test_df.drop_duplicates(subset=["comment_text"])
124
-
125
- print("Creating binary column...")
126
-
127
- # Make the new binary column
128
- train_df["label"] = train_df.apply(make_binary_label, axis=1)
129
- test_df["label"] = test_df.apply(make_binary_label, axis=1)
130
-
131
- # Remove all other classification columns
132
- train_df = train_df.drop(columns=TOXIC_COLUMNS)
133
- test_df = test_df.drop(columns=TOXIC_COLUMNS)
134
-
135
- print("Creating eval split...")
136
-
137
- # Shuffle the current train split
138
- train_df = train_df.sample(frac=1, random_state=seed)
139
-
140
- # The new size of the train split
141
- train_size = math.floor(len(train_df) * 0.8)
142
-
143
- # Separate into train and eval splits
144
- eval_df = train_df[train_size:]
145
- train_df = train_df[:train_size]
146
-
147
- # print("Saving to disk...")
148
- with open("train.csv", "w") as f:
149
- train_df.to_csv(f)
150
- with open("evaluation.csv", "w") as f:
151
- eval_df.to_csv(f)
152
- with open("test.csv", "w") as f:
153
- test_df.to_csv(f)