wjbmattingly
commited on
Upload 4 files
Browse files- README.md +1 -12
- analyze.ipynb +0 -0
- convert.ipynb +28 -67
- home-alcar-ner.jsonl +2 -2
README.md
CHANGED
@@ -2,19 +2,8 @@
|
|
2 |
license: cc-by-4.0
|
3 |
---
|
4 |
This NER dataset comes from the following publication.
|
5 |
-
|
6 |
```
|
7 |
-
|
8 |
-
author = {Stutzmann, Dominique and
|
9 |
-
Torres Aguilar, Sergio and
|
10 |
-
Chaffenet, Paul},
|
11 |
-
title = {HOME-Alcar: Aligned and Annotated Cartularies},
|
12 |
-
month = nov,
|
13 |
-
year = 2021,
|
14 |
-
publisher = {Zenodo},
|
15 |
-
doi = {10.5281/zenodo.5600884},
|
16 |
-
url = {https://doi.org/10.5281/zenodo.5600884}
|
17 |
-
}
|
18 |
```
|
19 |
|
20 |
I have used my the Notebook `convert.ipynb` to convert it from the original format to spaCy's format. The notebook expects the Database download from the link above to be in the root directory.
|
|
|
2 |
license: cc-by-4.0
|
3 |
---
|
4 |
This NER dataset comes from the following publication.
|
|
|
5 |
```
|
6 |
+
Stutzmann, D., Torres Aguilar, S., & Chaffenet, P. (2021). HOME-Alcar: Aligned and Annotated Cartularies [Data set]. Zenodo. https://doi.org/10.5281/zenodo.5600884
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
7 |
```
|
8 |
|
9 |
I have used my the Notebook `convert.ipynb` to convert it from the original format to spaCy's format. The notebook expects the Database download from the link above to be in the root directory.
|
analyze.ipynb
ADDED
The diff for this file is too large to render.
See raw diff
|
|
convert.ipynb
CHANGED
@@ -2,26 +2,25 @@
|
|
2 |
"cells": [
|
3 |
{
|
4 |
"cell_type": "code",
|
5 |
-
"execution_count":
|
6 |
"metadata": {},
|
7 |
"outputs": [],
|
8 |
"source": [
|
9 |
"import pandas as pd\n",
|
10 |
"import glob\n",
|
11 |
"import spacy\n",
|
12 |
-
"from spacy.tokens import Span\n",
|
13 |
-
"import os"
|
|
|
|
|
14 |
]
|
15 |
},
|
16 |
{
|
17 |
"cell_type": "code",
|
18 |
-
"execution_count":
|
19 |
"metadata": {},
|
20 |
"outputs": [],
|
21 |
"source": [
|
22 |
-
"def join_words(group):\n",
|
23 |
-
" return ' '.join(group.astype(str))\n",
|
24 |
-
"\n",
|
25 |
"def create_spacy_training_data(file_path):\n",
|
26 |
" # Load data from the Excel file\n",
|
27 |
" data = pd.read_excel(file_path)\n",
|
@@ -33,79 +32,41 @@
|
|
33 |
" group_col = \"Original_Act_ID\"\n",
|
34 |
" else:\n",
|
35 |
" \"unknown\"\n",
|
36 |
-
" data = data[~data['Word_x'].apply(lambda x: isinstance(x, int))]\n",
|
37 |
-
" data = data[~data['Word_x'].apply(lambda x: isinstance(x, float))]\n",
|
38 |
-
"\n",
|
39 |
-
"\n",
|
40 |
-
"\n",
|
41 |
-
"\n",
|
42 |
" \n",
|
43 |
" # Combine words into sentences, assumed by unique 'Line_ID'\n",
|
44 |
-
" grouped_data = data.groupby(group_col)
|
45 |
" \n",
|
46 |
" # Prepare training data in spaCy format\n",
|
47 |
" training_data = []\n",
|
48 |
-
"
|
49 |
-
"
|
50 |
-
"
|
51 |
-
"
|
52 |
-
"
|
|
|
53 |
"\n",
|
54 |
-
"
|
55 |
-
"
|
56 |
-
" start = current_position\n",
|
57 |
-
" end = start + len(word_data['Word_x'])\n",
|
58 |
-
" # Check if there's a named entity\n",
|
59 |
-
" if word_data['LOC_x'] != 'O':\n",
|
60 |
-
" entities.append((start, end, 'LOC'))\n",
|
61 |
-
" if word_data['PERS_x'] != 'O':\n",
|
62 |
-
" entities.append((start, end, 'PER'))\n",
|
63 |
-
" \n",
|
64 |
-
" current_position = end + 1 # Update position, accounting for space\n",
|
65 |
"\n",
|
66 |
-
" # Append to training data\n",
|
67 |
-
" training_data.append({\"text\": text, \"entities\": entities})\n",
|
68 |
-
" training_data = convert_to_spacy_docs(training_data)\n",
|
69 |
-
" return training_data\n",
|
70 |
"\n",
|
71 |
-
"
|
72 |
-
"
|
73 |
-
"
|
74 |
-
"
|
75 |
-
"
|
76 |
-
" spacy_docs = []\n",
|
77 |
-
" \n",
|
78 |
-
" for record in training_data:\n",
|
79 |
-
" # Create a doc from the text\n",
|
80 |
-
" doc = nlp(record['text'])\n",
|
81 |
-
" \n",
|
82 |
-
" # Create a list to collect entity spans\n",
|
83 |
-
" spans = []\n",
|
84 |
-
" \n",
|
85 |
-
" for start, end, label in record['entities']:\n",
|
86 |
-
" span = doc.char_span(start, end, label=label)\n",
|
87 |
-
" if span is not None: # Only add the span if it's correctly aligned with token boundaries\n",
|
88 |
-
" spans.append(span)\n",
|
89 |
-
" \n",
|
90 |
-
" # Overwrite the doc's 'ents' with our list of spans\n",
|
91 |
-
" try:\n",
|
92 |
-
" doc.spans[\"sc\"] = spans\n",
|
93 |
-
" except:\n",
|
94 |
-
" ValueError\n",
|
95 |
-
" print(spans)\n",
|
96 |
-
" \n",
|
97 |
" span_ents = []\n",
|
98 |
" for span in doc.spans[\"sc\"]:\n",
|
99 |
" span_ents.append({\"text\": span.text, \"label\": span.label_, \"start\": span.start, \"end\": span.end})\n",
|
100 |
-
"
|
101 |
-
"
|
102 |
-
" \n",
|
103 |
-
" return spacy_docs"
|
104 |
]
|
105 |
},
|
106 |
{
|
107 |
"cell_type": "code",
|
108 |
-
"execution_count":
|
109 |
"metadata": {},
|
110 |
"outputs": [
|
111 |
{
|
@@ -123,7 +84,7 @@
|
|
123 |
},
|
124 |
{
|
125 |
"cell_type": "code",
|
126 |
-
"execution_count":
|
127 |
"metadata": {},
|
128 |
"outputs": [
|
129 |
{
|
@@ -163,7 +124,7 @@
|
|
163 |
},
|
164 |
{
|
165 |
"cell_type": "code",
|
166 |
-
"execution_count":
|
167 |
"metadata": {},
|
168 |
"outputs": [],
|
169 |
"source": [
|
|
|
2 |
"cells": [
|
3 |
{
|
4 |
"cell_type": "code",
|
5 |
+
"execution_count": 108,
|
6 |
"metadata": {},
|
7 |
"outputs": [],
|
8 |
"source": [
|
9 |
"import pandas as pd\n",
|
10 |
"import glob\n",
|
11 |
"import spacy\n",
|
12 |
+
"from spacy.tokens import Span, Doc\n",
|
13 |
+
"import os\n",
|
14 |
+
"from spacy.training import biluo_tags_to_offsets, biluo_tags_to_spans, iob_to_biluo\n",
|
15 |
+
"import srsly"
|
16 |
]
|
17 |
},
|
18 |
{
|
19 |
"cell_type": "code",
|
20 |
+
"execution_count": 113,
|
21 |
"metadata": {},
|
22 |
"outputs": [],
|
23 |
"source": [
|
|
|
|
|
|
|
24 |
"def create_spacy_training_data(file_path):\n",
|
25 |
" # Load data from the Excel file\n",
|
26 |
" data = pd.read_excel(file_path)\n",
|
|
|
32 |
" group_col = \"Original_Act_ID\"\n",
|
33 |
" else:\n",
|
34 |
" \"unknown\"\n",
|
35 |
+
" # data = data[~data['Word_x'].apply(lambda x: isinstance(x, int))]\n",
|
36 |
+
" # data = data[~data['Word_x'].apply(lambda x: isinstance(x, float))]\n",
|
37 |
+
" data['Word_x'] = data['Word_x'].astype(str).str.strip()\n",
|
|
|
|
|
|
|
38 |
" \n",
|
39 |
" # Combine words into sentences, assumed by unique 'Line_ID'\n",
|
40 |
+
" grouped_data = data.groupby(group_col)\n",
|
41 |
" \n",
|
42 |
" # Prepare training data in spaCy format\n",
|
43 |
" training_data = []\n",
|
44 |
+
" for _, item in grouped_data:\n",
|
45 |
+
" bilo_loc = item[\"LOC_x\"].tolist()\n",
|
46 |
+
" bilo_person = item[\"PERS_x\"].tolist()\n",
|
47 |
+
" tokens = item[\"Word_x\"].tolist()\n",
|
48 |
+
" doc = Doc(nlp.vocab, words=tokens, spaces=[True for i in range(len(tokens))])\n",
|
49 |
+
" # doc = nlp(\" \".join(tokens))\n",
|
50 |
"\n",
|
51 |
+
" spans = iob_to_biluo(bilo_person)\n",
|
52 |
+
" spans = biluo_tags_to_spans(doc, spans)\n",
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
53 |
"\n",
|
|
|
|
|
|
|
|
|
54 |
"\n",
|
55 |
+
" loc_spans = iob_to_biluo(bilo_loc)\n",
|
56 |
+
" loc_spans = biluo_tags_to_spans(doc, loc_spans)\n",
|
57 |
+
"\n",
|
58 |
+
" spans = loc_spans + spans\n",
|
59 |
+
" doc.spans[\"sc\"] = spans\n",
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
60 |
" span_ents = []\n",
|
61 |
" for span in doc.spans[\"sc\"]:\n",
|
62 |
" span_ents.append({\"text\": span.text, \"label\": span.label_, \"start\": span.start, \"end\": span.end})\n",
|
63 |
+
" training_data.append({\"text\": doc.text, \"spans\": span_ents})\n",
|
64 |
+
" return training_data"
|
|
|
|
|
65 |
]
|
66 |
},
|
67 |
{
|
68 |
"cell_type": "code",
|
69 |
+
"execution_count": 98,
|
70 |
"metadata": {},
|
71 |
"outputs": [
|
72 |
{
|
|
|
84 |
},
|
85 |
{
|
86 |
"cell_type": "code",
|
87 |
+
"execution_count": 117,
|
88 |
"metadata": {},
|
89 |
"outputs": [
|
90 |
{
|
|
|
124 |
},
|
125 |
{
|
126 |
"cell_type": "code",
|
127 |
+
"execution_count": 120,
|
128 |
"metadata": {},
|
129 |
"outputs": [],
|
130 |
"source": [
|
home-alcar-ner.jsonl
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
-
size
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:28834247380066b3f52a1b46ecf3fc34ef83021e143eeabfcb060fc072c02ac0
|
3 |
+
size 10718349
|