holylovenia commited on
Commit
733393c
·
verified ·
1 Parent(s): b1be6d2

Upload filipino_hatespeech_election.py with huggingface_hub

Browse files
Files changed (1) hide show
  1. filipino_hatespeech_election.py +124 -0
filipino_hatespeech_election.py ADDED
@@ -0,0 +1,124 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import csv
2
+ import os
3
+ from pathlib import Path
4
+ from typing import Dict, List, Tuple
5
+
6
+ import datasets
7
+
8
+ from seacrowd.utils import schemas
9
+ from seacrowd.utils.configs import SEACrowdConfig
10
+ from seacrowd.utils.constants import Licenses, Tasks
11
+
12
+ _CITATION = """
13
+ @article{Cabasag-2019-hate-speech,
14
+ title={Hate speech in Philippine election-related tweets: Automatic detection and classification using natural language processing.},
15
+ author={Neil Vicente Cabasag, Vicente Raphael Chan, Sean Christian Lim, Mark Edward Gonzales, and Charibeth Cheng},
16
+ journal={Philippine Computing Journal},
17
+ volume={XIV},
18
+ number={1},
19
+ month={August},
20
+ year={2019}
21
+ }
22
+ """
23
+
24
+ _DATASETNAME = "filipino_hatespeech_election"
25
+
26
+ _DESCRIPTION = """
27
+ The dataset used in this study was a subset of the corpus 1,696,613 tweets crawled by Andrade et al. and posted from November 2015 to May 2016 during the campaign period for the Philippine presidential election. They were culled
28
+ based on the presence of candidate names (e.g., Binay, Duterte, Poe, Roxas, and Santiago) and election-related hashtags (e.g., #Halalan2016, #Eleksyon2016, and #PiliPinas2016). Data preprocessing was performed to prepare the
29
+ tweets for feature extraction and classification. It consisted of the following steps: data de-identification, uniform resource locator (URL) removal, special character processing, normalization, hashtag processing, and tokenization.
30
+ """
31
+
32
+ _HOMEPAGE = "https://huggingface.co/datasets/hate_speech_filipino"
33
+
34
+ _LANGUAGES = ["fil"]
35
+
36
+ _LICENSE = Licenses.UNKNOWN.value
37
+
38
+ _LOCAL = False
39
+
40
+ _URLS = {_DATASETNAME: "https://s3.us-east-2.amazonaws.com/blaisecruz.com/datasets/hatenonhate/hatespeech_raw.zip"}
41
+
42
+ _SUPPORTED_TASKS = [Tasks.ABUSIVE_LANGUAGE_PREDICTION]
43
+
44
+ _SOURCE_VERSION = "1.0.0"
45
+
46
+ _SEACROWD_VERSION = "2024.06.20"
47
+
48
+ _CLASSES = ["0", "1"] # corresponds to ["non-hate-containing", "hate-containing"]
49
+
50
+
51
+ class FilipinoHatespeechElectionDataset(datasets.GeneratorBasedBuilder):
52
+ """Hate Speech Text Classification Dataset in Filipino."""
53
+
54
+ SOURCE_VERSION = datasets.Version(_SOURCE_VERSION)
55
+ SEACROWD_VERSION = datasets.Version(_SEACROWD_VERSION)
56
+
57
+ BUILDER_CONFIGS = [
58
+ SEACrowdConfig(
59
+ name=f"{_DATASETNAME}_source",
60
+ version=SOURCE_VERSION,
61
+ description=f"{_DATASETNAME} source schema",
62
+ schema="source",
63
+ subset_id=_DATASETNAME,
64
+ ),
65
+ SEACrowdConfig(
66
+ name=f"{_DATASETNAME}_seacrowd_text",
67
+ version=SEACROWD_VERSION,
68
+ description=f"{_DATASETNAME} SEACrowd schema",
69
+ schema="seacrowd_text",
70
+ subset_id=_DATASETNAME,
71
+ ),
72
+ ]
73
+
74
+ DEFAULT_CONFIG_NAME = f"{_DATASETNAME}_source"
75
+
76
+ def _info(self) -> datasets.DatasetInfo:
77
+ features = schemas.text_features(label_names=_CLASSES)
78
+
79
+ return datasets.DatasetInfo(
80
+ description=_DESCRIPTION,
81
+ features=features,
82
+ homepage=_HOMEPAGE,
83
+ license=_LICENSE,
84
+ citation=_CITATION,
85
+ )
86
+
87
+ def _split_generators(self, dl_manager: datasets.DownloadManager) -> List[datasets.SplitGenerator]:
88
+ """Returns SplitGenerators."""
89
+ urls = _URLS[_DATASETNAME]
90
+ data_dir = dl_manager.download_and_extract(urls)
91
+ return [
92
+ datasets.SplitGenerator(
93
+ name=datasets.Split.TRAIN,
94
+ gen_kwargs={
95
+ "filepath": os.path.join(data_dir, "hatespeech", "train.csv"),
96
+ "split": "train",
97
+ },
98
+ ),
99
+ datasets.SplitGenerator(
100
+ name=datasets.Split.TEST,
101
+ gen_kwargs={
102
+ "filepath": os.path.join(data_dir, "hatespeech", "test.csv"),
103
+ "split": "test",
104
+ },
105
+ ),
106
+ datasets.SplitGenerator(
107
+ name=datasets.Split.VALIDATION,
108
+ gen_kwargs={
109
+ "filepath": os.path.join(data_dir, "hatespeech", "valid.csv"),
110
+ "split": "dev",
111
+ },
112
+ ),
113
+ ]
114
+
115
+ def _generate_examples(self, filepath: Path, split: str) -> Tuple[int, Dict]:
116
+ with open(filepath, encoding="utf-8") as csv_file:
117
+ csv_reader = csv.reader(csv_file, quotechar='"', delimiter=",", quoting=csv.QUOTE_ALL, skipinitialspace=True)
118
+ next(csv_reader)
119
+ for i, row in enumerate(csv_reader):
120
+ try:
121
+ text, label = row
122
+ yield i, {"id": str(i), "text": row[0], "label": _CLASSES[int(row[1].strip()) - 1]}
123
+ except ValueError:
124
+ pass